diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index 825e619250bf2ebb77d6c2abc9843414c8b5efd5..f2ec42949a54d7b293541810fec83c7cfa8e26a3 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format correlates to the operations allowed. It's visible only on platforms that support the capability. +What: /sys/bus/dsa/devices/wq./driver_name +Date: Sept 8, 2023 +KernelVersion: 6.7.0 +Contact: dmaengine@vger.kernel.org +Description: Name of driver to be bounded to the wq. + What: /sys/bus/dsa/devices/engine./group_id Date: Oct 25, 2019 KernelVersion: 5.6.0 diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 6731ffacc5f0c6a299344667d5a01151bae080b1..bd6793760f29842b841217d4226f6d5538403a62 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -1,4 +1,4 @@ -What: /sys/kernel/debug/qat__/qat/fw_counters +What: /sys/kernel/debug/qat__/fw_counters Date: November 2023 KernelVersion: 6.6 Contact: qat-linux@intel.com @@ -59,3 +59,51 @@ Description: (RO) Read returns the device health status. The driver does not monitor for Heartbeat. It is left for a user to poll the status periodically. + +What: /sys/kernel/debug/qat__/pm_status +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns power management information specific to the + QAT device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/cnv_errors +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns, for each Acceleration Engine (AE), the number + of errors and the type of the last error detected by the device + when performing verified compression. + Reported counters:: + + : Number of Compress and Verify (CnV) errors and type + of the last CnV error detected by Acceleration + Engine N. + +What: /sys/kernel/debug/qat__/heartbeat/inject_error +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (WO) Write to inject an error that simulates an heartbeat + failure. This is to be used for testing purposes. + + After writing this file, the driver stops arbitration on a + random engine and disables the fetching of heartbeat counters. + If a workload is running on the device, a job submitted to the + accelerator might not get a response and a read of the + `heartbeat/status` attribute might report -1, i.e. device + unresponsive. + The error is unrecoverable thus the device must be restarted to + restore its functionality. + + This attribute is available only when the kernel is built with + CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION=y. + + A write of 1 enables error injection. + + The following example shows how to enable error injection:: + + # cd /sys/kernel/debug/qat__ + # echo 1 > heartbeat/inject_error diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry new file mode 100644 index 0000000000000000000000000000000000000000..eacee207208827341b367aba6f4df1eb8432aeec --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -0,0 +1,228 @@ +What: /sys/kernel/debug/qat__/telemetry/control +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Enables/disables the reporting of telemetry metrics. + + Allowed values to write: + ======================== + * 0: disable telemetry + * 1: enable telemetry + * 2, 3, 4: enable telemetry and calculate minimum, maximum + and average for each counter over 2, 3 or 4 samples + + Returned values: + ================ + * 1-4: telemetry is enabled and running + * 0: telemetry is disabled + + Example. + + Writing '3' to this file starts the collection of + telemetry metrics. Samples are collected every second and + stored in a circular buffer of size 3. These values are then + used to calculate the minimum, maximum and average for each + counter. After enabling, counters can be retrieved through + the ``device_data`` file:: + + echo 3 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + Writing '0' to this file stops the collection of telemetry + metrics:: + + echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/device_data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RO) Reports device telemetry counters. + Reads report metrics about performance and utilization of + a QAT device: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms. + pci_trans_cnt number of PCIe partial transactions + max_rd_lat maximum logged read latency [ns] (could + be any read operation) + rd_lat_acc_avg average read latency [ns] + max_gp_lat max get to put latency [ns] (only takes + samples for AE0) + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_page_req_lat_avg Address Translator(AT), average page + request latency [ns] + at_trans_lat_avg AT, average page translation latency [ns] + at_max_tlb_used AT, maximum uTLB used + util_cpr utilization of Compression slice N [%] + exec_cpr execution count of Compression slice N + util_xlt utilization of Translator slice N [%] + exec_xlt execution count of Translator slice N + util_dcpr utilization of Decompression slice N [%] + exec_dcpr execution count of Decompression slice N + util_pke utilization of PKE N [%] + exec_pke execution count of PKE N + util_ucs utilization of UCS slice N [%] + exec_ucs execution count of UCS slice N + util_wat utilization of Wireless Authentication + slice N [%] + exec_wat execution count of Wireless Authentication + slice N + util_wcp utilization of Wireless Cipher slice N [%] + exec_wcp execution count of Wireless Cipher slice N + util_cph utilization of Cipher slice N [%] + exec_cph execution count of Cipher slice N + util_ath utilization of Authentication slice N [%] + exec_ath execution count of Authentication slice N + ======================= ======================================== + + The telemetry report file can be read with the following command:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/device_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + If a device lacks of a specific accelerator, the corresponding + attribute is not reported. + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/rp__data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file, + and report telemetry counters related to each. + + Allowed values to write: + ======================== + * 0 to ````: + Ring pair to be monitored. The value of ``num_rps`` can be + retrieved through ``/sys/bus/pci/devices//qat/num_rps``. + See Documentation/ABI/testing/sysfs-driver-qat. + + Reads report metrics about performance and utilization of + the selected RP: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms + rp_num RP number associated with slot + service_type service associated to the RP + pci_trans_cnt number of PCIe partial transactions + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_glob_devtlb_hit Message descriptor DevTLB hit rate + at_glob_devtlb_miss Message descriptor DevTLB miss rate + tl_at_payld_devtlb_hit Payload DevTLB hit rate + tl_at_payld_devtlb_miss Payload DevTLB miss rate + ======================= ======================================== + + Example. + + Writing the value '32' to the file ``rp_C_data`` starts the + collection of telemetry metrics for ring pair 32:: + + echo 32 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + Once a ring pair is selected, statistics can be read accessing + the file:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + + On QAT GEN4 devices there are 64 RPs on a PF, so the allowed + values are 0..63. This number is absolute to the device. + If Virtual Functions (VF) are used, the ring pair number can + be derived from the Bus, Device, Function of the VF: + + ============ ====== ====== ====== ====== + PCI BDF/VF RP0 RP1 RP2 RP3 + ============ ====== ====== ====== ====== + 0000:6b:0.1 RP 0 RP 1 RP 2 RP 3 + 0000:6b:0.2 RP 4 RP 5 RP 6 RP 7 + 0000:6b:0.3 RP 8 RP 9 RP 10 RP 11 + 0000:6b:0.4 RP 12 RP 13 RP 14 RP 15 + 0000:6b:0.5 RP 16 RP 17 RP 18 RP 19 + 0000:6b:0.6 RP 20 RP 21 RP 22 RP 23 + 0000:6b:0.7 RP 24 RP 25 RP 26 RP 27 + 0000:6b:1.0 RP 28 RP 29 RP 30 RP 31 + 0000:6b:1.1 RP 32 RP 33 RP 34 RP 35 + 0000:6b:1.2 RP 36 RP 37 RP 38 RP 39 + 0000:6b:1.3 RP 40 RP 41 RP 42 RP 43 + 0000:6b:1.4 RP 44 RP 45 RP 46 RP 47 + 0000:6b:1.5 RP 48 RP 49 RP 50 RP 51 + 0000:6b:1.6 RP 52 RP 53 RP 54 RP 55 + 0000:6b:1.7 RP 56 RP 57 RP 58 RP 59 + 0000:6b:2.0 RP 60 RP 61 RP 62 RP 63 + ============ ====== ====== ====== ====== + + The mapping is only valid for the BDFs of VFs on the host. + + + The service provided on a ring-pair varies depending on the + configuration. The configuration for a given device can be + queried and set using ``cfg_services``. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + The following table reports how ring pairs are mapped to VFs + on the PF 0000:6b:0.0 configured for `sym;asym` or `asym;sym`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 asym RP 1 sym RP 2 asym RP 3 sym + 0000:6b:0.2 RP 4 asym RP 5 sym RP 6 asym RP 7 sym + 0000:6b:0.3 RP 8 asym RP 9 sym RP10 asym RP11 sym + ... ... ... ... ... + =========== ============ =========== ============ =========== + + All VFs follow the same pattern. + + + The following table reports how ring pairs are mapped to VFs on + the PF 0000:6b:0.0 configured for `dc`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 dc RP 1 dc RP 2 dc RP 3 dc + 0000:6b:0.2 RP 4 dc RP 5 dc RP 6 dc RP 7 dc + 0000:6b:0.3 RP 8 dc RP 9 dc RP10 dc RP11 dc + ... ... ... ... ... + =========== ============ =========== ============ =========== + + The mapping of a RP to a service can be retrieved using + ``rp2srv`` from sysfs. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps index 8757dcf41c0825b80c43b04e377f514bedf980c0..a5f506f7d4819ecee67f4dcdb62f19d3b34e8473 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps @@ -16,3 +16,9 @@ Description: Example output in powerpc: grep . /sys/bus/event_source/devices/cpu/caps/* /sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9 + + The "branch_counter_nr" in the supported platform exposes the + maximum number of counters which can be shown in the u64 counters + of PERF_SAMPLE_BRANCH_COUNTERS, while the "branch_counter_width" + exposes the width of each counter. Both of them can be used by + the perf tool to parse the logged counters in each branch. diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 96834d103a09e2ad0f156d9b2756f274de53a24b..96020fb051c347e0f3c87a0775853be0300425a5 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -95,3 +95,69 @@ Description: (RW) This configuration option provides a way to force the device i 0 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/rp2srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This attribute provides a way for a user to query a + specific ring pair for the type of service that it is currently + configured for. + + When written to, the value is cached and used to perform the + read operation. Allowed values are in the range 0 to N-1, where + N is the max number of ring pairs supported by a device. This + can be queried using the attribute qat/num_rps. + + A read returns the service associated to the ring pair queried. + + The values are: + + * dc: the ring pair is configured for running compression services + * sym: the ring pair is configured for running symmetric crypto + services + * asym: the ring pair is configured for running asymmetric crypto + services + + Example usage:: + + # echo 1 > /sys/bus/pci/devices//qat/rp2srv + # cat /sys/bus/pci/devices//qat/rp2srv + sym + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/num_rps +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RO) Returns the number of ring pairs that a single device has. + + Example usage:: + + # cat /sys/bus/pci/devices//qat/num_rps + 64 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/auto_reset +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Reports the current state of the autoreset feature + for a QAT device + + Write to the attribute to enable or disable device auto reset. + + Device auto reset is disabled by default. + + The values are: + + * 1/Yy/on: auto reset enabled. If the device encounters an + unrecoverable error, it will be reset automatically. + * 0/Nn/off: auto reset disabled. If the device encounters an + unrecoverable error, it will not be reset. + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras new file mode 100644 index 0000000000000000000000000000000000000000..176dea1e9c0aa9684cfc9c15037aecba2277795f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_ras @@ -0,0 +1,41 @@ +What: /sys/bus/pci/devices//qat_ras/errors_correctable +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of correctable errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_nonfatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of non fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_fatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/reset_error_counters +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (WO) Write to resets all error counters of a device. + + The following example reports how to reset the counters:: + + # echo 1 > /sys/bus/pci/devices//qat_ras/reset_error_counters + # cat /sys/bus/pci/devices//qat_ras/errors_correctable + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_nonfatal + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_fatal + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-driver-qat_rl b/Documentation/ABI/testing/sysfs-driver-qat_rl new file mode 100644 index 0000000000000000000000000000000000000000..8c282ae3155ddc7c96351a1a0ab2ea711ba282a8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_rl @@ -0,0 +1,226 @@ +What: /sys/bus/pci/devices//qat_rl/sla_op +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (WO) This attribute is used to perform an operation on an SLA. + The supported operations are: add, update, rm, rm_all, and get. + + Input values must be filled through the associated attribute in + this group before a write to this file. + If the operation completes successfully, the associated + attributes will be updated. + The associated attributes are: cir, pir, srv, rp, and id. + + Supported operations: + + * add: Creates a new SLA with the provided inputs from user. + * Inputs: cir, pir, srv, and rp + * Output: id + + * get: Returns the configuration of the specified SLA in id attribute + * Inputs: id + * Outputs: cir, pir, srv, and rp + + * update: Updates the SLA with new values set in the following attributes + * Inputs: id, cir, and pir + + * rm: Removes the specified SLA in the id attribute. + * Inputs: id + + * rm_all: Removes all the configured SLAs. + * Inputs: None + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/rp +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) When read, reports the current assigned ring pairs for the + queried SLA. + When wrote to, configures the ring pairs associated to a new SLA. + + The value is a 64-bit bit mask and is written/displayed in hex. + Each bit of this mask represents a single ring pair i.e., + bit 1 == ring pair id 0; bit 3 == ring pair id 2. + + Selected ring pairs must to be assigned to a single service, + i.e. the one provided with the srv attribute. The service + assigned to a certain ring pair can be checked by querying + the attribute qat/rp2srv. + + The maximum number of ring pairs is 4 per SLA. + + Applicability in sla_op: + + * WRITE: add operation + * READ: get operation + + Example usage:: + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 + + ## Write + # echo 0x5 > /sys/bus/pci/devices//qat_rl/rp + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/id +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) If written to, the value is used to retrieve a particular + SLA and operate on it. + This is valid only for the following operations: update, rm, + and get. + A read of this attribute is only guaranteed to have correct data + after creation of an SLA. + + Applicability in sla_op: + + * WRITE: rm and update operations + * READ: add and get operations + + Example usage:: + + ## Read + ## Set attributes e.g. cir, pir, srv, etc + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Write + # echo 7 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 ## ring pair ID 0 and ring pair ID 2 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Committed information rate (CIR). Rate guaranteed to be + achieved by a particular SLA. The value is expressed in + permille scale, i.e. 1000 refers to the maximum device + throughput for a selected service. + + After sending a "get" to sla_op, this will be populated with the + CIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 500 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cir + 500 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/pir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Peak information rate (PIR). The maximum rate that can be + achieved by that particular SLA. An SLA can reach a value + between CIR and PIR when the device is not fully utilized by + requests from other users (assigned to different SLAs). + + After sending a "get" to sla_op, this will be populated with the + PIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 750 > /sys/bus/pci/devices//qat_rl/pir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/pir + 750 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Service (SRV). Represents the service (sym, asym, dc) + associated to an SLA. + Can be written to or queried to set/show the SRV type for an SLA. + The SRV attribute is used to specify the SRV type before adding + an SLA. After an SLA is configured, reports the service + associated to that SLA. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo "dc" > /sys/bus/pci/devices//qat_rl/srv + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/srv + dc + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cap_rem +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This file will return the remaining capability for a + particular service/sla. This is the remaining value that a new + SLA can be set to or a current SLA can be increased with. + + Example usage:: + + # echo "asym" > /sys/bus/pci/devices//qat_rl/cap_rem + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 250 + # echo 250 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-fs-virtiofs b/Documentation/ABI/testing/sysfs-fs-virtiofs new file mode 100644 index 0000000000000000000000000000000000000000..4839dbce997e4224affbd5dfcc5d22ec79d49d86 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-fs-virtiofs @@ -0,0 +1,11 @@ +What: /sys/fs/virtiofs//tag +Date: Feb 2024 +Contact: virtio-fs@lists.linux.dev +Description: + [RO] The mount "tag" that can be used to mount this filesystem. + +What: /sys/fs/virtiofs//device +Date: Feb 2024 +Contact: virtio-fs@lists.linux.dev +Description: + Symlink to the virtio device that exports this filesystem. diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage new file mode 100644 index 0000000000000000000000000000000000000000..7bfbb9cc2c11301b1c163a80c819669039ba2b32 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage @@ -0,0 +1,18 @@ +What: /sys/kernel/mm/transparent_hugepage/ +Date: April 2024 +Contact: Linux memory management mailing list +Description: + /sys/kernel/mm/transparent_hugepage/ contains a number of files and + subdirectories, + + - defrag + - enabled + - hpage_pmd_size + - khugepaged + - shmem_enabled + - use_zero_page + - subdirectories of the form hugepages-kB, where + is the page size of the hugepages supported by the kernel/CPU + combination. + + See Documentation/admin-guide/mm/transhuge.rst for details. diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index ff456871bf4b8b74a7e74bea664e0e6fa5e3910b..90ce42fd70c364395fe86b1adb0577c99006e45d 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -109,6 +109,14 @@ Brief summary of control files. memory.kmem.tcp.failcnt show the number of tcp buf memory usage hits limits memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded + memory.wmark_ratio set/show water mark ratio + memory.wmark_low low limit (memory usage low water mark, + read-only) + memory.wmark_high high limit (memory usge high water mark, + read-only) + memory.wmark_scale_factor the gap between wmark_low and wmark_high, + percentage of max limit, default is 50 or 0.5% of max limit. + The max value is 1000 or 10% of max limit. ==================================== ========================================== 1. History @@ -971,7 +979,64 @@ Test: (Expect a bunch of notifications, and eventually, the oom-killer will trigger.) -12. TODO +12. Cgroup oom priority +======================= +Under memory pressure, reclaim and oom would happen, with multiple +cgroups exist in one system, we might want some of the cgroups's memory +or tasks survived the reclaim and oom while there are other candidates. + +The "memory.low" and "memory.min" make that happen during reclaim, this +"memory.priority" introduces a priority oom to meet above requirement +in oom. + +The priority value is from 0 to 12, the higher number the higher priority. +The priority is among siblings, it is not global priority, by this we can +map these 13 priorities to the tens of thousands of memcgs. + +When oom happens it first chooses the lowest priority memcg as victim then +uses the kernel default algorithm(see function oom_evaluate_task()) to select +bad process from the victim memcg. + +For example:: + + The following hierarchy: + root + / \ + A B + / \ / \ + C D E F + + priority: + A: 10, B: 8 + C: 5, D: 6, E: 7, F: 8 + +When oom happens in root, it first iterates its two children A and B, and selects +B as next iteration root since B's priority is lower than A, subsequent victim +selection is limit in the B's subtree. E is selected as victim memcg finally, since +its priority is lower than its sibling. + +This priority oom works both for memcg and global oom. For global oom the root is +root memcg. + +Meanwhile, we provide the interface memory.use_prioprity_oom to decide whether to +enable/disable the feature in each memcg. Write "1" to enable the priority oom and +"0" to disable it. + +13. Background reclaim +====================== + +The user could setup memory usage water mark by echoing a value to +memory.wmark_ratio. Valid value is from 0 to 100, which represents percentage +of max limit. The wmark_low and wmark_high would be calculated by max limit +and wmark_ratio. 0 means water mark is disabled, both wmark_low and wmark_high +would be max, which is the default value. + +Once water mark is setup correctly, when charging pages to memcg, if the usage +exceeds wmark_high, which means available memory is low, a work would be +scheduled to reclaim pages in background to try to reduce memory usage to +wmark_low if possible. + +14. TODO ======== 1. Make per-cgroup scanner reclaim not-shared pages first diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b26b5274eaaf140ed8ccb617df2eca53a166e8bd..4c485887e1acc63740516183293d05978ee8824c 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1045,11 +1045,12 @@ All time durations are in microseconds. - user_usec - system_usec - and the following five when the controller is enabled: + and the following six when the controller is enabled: - nr_periods - nr_throttled - throttled_usec + - current_bw - nr_bursts - burst_usec @@ -1275,6 +1276,47 @@ PAGE_SIZE multiple when read back. The max memory usage recorded for the cgroup and its descendants since the creation of the cgroup. + memory.wmark_ratio + A read-write single value file which exists on non-root + cgroups. The default is 0. + + Memory usage water mark. Valid value is from 0 to 100, which + represents percentage of max limit or high limit if high is setup. + The wmark_low and wmark_high would be calculated by max limit and + wmark_ratio. 0 means water mark is disabled, both wmark_low and + wmark_high would be max, which is the default value. + + Once water mark is setup correctly, when charging pages to memcg, + if the usage exceeds wmark_high, which means available memory is low, + a work would be scheduled to reclaim pages in background to try to + reduce memory usage to wmark_low if possible. + + If memory.low is greater than memory.wmark_high, back ground reclaim + may not take effect at all due to low protection. + + memory.wmark_high + A read-only single value file which exists on non-root cgroups. + The default is max. + + Memory usage high water mark, which means the available memory is low. + For details, please refer to the above wmark_ratio section. + + memory.wmark_low + A read-only single value file which exists on non-root cgroups. + The default is max. + + Memory usage low water mark, which means the available memory is ok. + For details, please refer to the above wmark_ratio section. + + memory.wmark_scale_factor + A read-write single value file which exists on non-root cgroups. + The default is 50. + + The gap between wmark_low and wmark_high. The unit is in fractions + of 10,000. The default value of 50 means the distance between wmark_high + and wmark_low is 0.5% of the max limit of the cgroup. The maximum value + is 1000, or 10% of max limit. + memory.oom.group A read-write single value file which exists on non-root cgroups. The default value is "0". @@ -1532,6 +1574,15 @@ PAGE_SIZE multiple when read back. collapsing an existing range of pages. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE is not set. + thp_swpout (npn) + Number of transparent hugepages which are swapout in one piece + without splitting. + + thp_swpout_fallback (npn) + Number of transparent hugepages which were split before swapout. + Usually because failed to allocate some continuous swap space + for the huge page. + memory.numa_stat A read-only nested-keyed file which exists on non-root cgroups. @@ -1717,6 +1768,30 @@ IO Interface Files 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021 + io.extstat + A read-only nested-keyed file. + + Lines are keyed by $MAJ:$MIN device numbers and not ordered. + The following nested keys are defined. + + ======== ============================= + rwait IO read wait time + wwait IO write wait time + rserv IO read service time + wserv IO write service time + rcomp Number of completed read IOs + wcomp Number of completed write IOs + rbytesq Bytes of queued read IOs + wbytesq Bytes of queued write IOs + riosq Number of queued read IOs + wiosq Number of queued write IOs + ======== ============================= + + An example read output follows:: + + 253:16 rwait=0 wwait=3300 rserv=0 wserv=414366321956 rcomp=0 wcomp=12 rbytesq=0 wbytesq=40960000 riosq=0 wiosq=12 + 253:0 rwait=0 wwait=0 rserv=0 wserv=0 rcomp=0 wcomp=0 rbytesq=0 wbytesq=0 riosq=0 wiosq=0 + io.cost.qos A read-write nested-keyed file which exists only on the root cgroup. diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 609a3201fd4e1ec233aa45260e94c335f01edbf7..f9af03371cc18e31aec0108745dc0db10ea8f920 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -131,6 +131,12 @@ Field 16 -- # of flush requests completed Field 17 -- # of milliseconds spent flushing This is the total number of milliseconds spent by all flush requests. +Field 18 -- # of milliseconds spent reading on device driver's side + +Field 19 -- # of milliseconds spent writing on device driver's side + +Field 20 -- # of milliseconds spent discarding on device driver's side + To avoid introducing performance bottlenecks, no locks are held while modifying these counters. This implies that minor inaccuracies may be introduced when changes collide, so (for instance) adding up all the diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d83a3f47e20074c0126dbe5df0415237a6eda6b9..11b6b92dcd5fb91039a7c723d48ff69c22b483e0 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -586,6 +586,9 @@ nokmem -- Disable kernel memory accounting. nobpf -- Disable BPF memory accounting. + cgwb_v1 Enable writeback control for cgroup for cgroup v1 + interface. + checkreqprot= [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } See security/selinux/Kconfig help text. @@ -2313,6 +2316,11 @@ isapnp= [ISAPNP] Format: ,,, + zhaoxin_patch_bitmask= + [X86] Bitmask for Zhaoxin Platform's patch. + bit 0: enable KH-40000 dma patch's node check function + + isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,] @@ -3287,6 +3295,11 @@ mga= [HW,DRM] + microcode.force_minrev= [X86] + Format: + Enable or disable the microcode minimal revision + enforcement for the runtime microcode loader. + min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this physical address is ignored. @@ -3439,6 +3452,13 @@ Note that if CONFIG_MODULE_SIG_FORCE is set, that is always true, so this option does nothing. + module.sig_enforce_subsys + [KNL] When CONFIG_MODULE_SIG is set, this means that + modules the user set without (valid) signatures will + fail to load. Note that CONFIG_MODULE_SIG_FORCE is set, + that is always true, so this option does nothing. + Now we support gpu, block and net. + module_blacklist= [KNL] Do not load a comma-separated list of modules. Useful for debugging problem modules. @@ -3932,9 +3952,10 @@ vulnerability. System may allow data leaks with this option. - no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized - steal time accounting. steal time is computed, but - won't influence scheduler behaviour + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV,LOONGARCH,EARLY] + Disable paravirtualized steal time accounting. steal time + is computed, but won't influence scheduler behaviour + nosync [HW,M68K] Disables sync negotiation for all devices. @@ -4020,6 +4041,18 @@ NUMA balancing. Allowed values are enable and disable + numa_spinlock= [NUMA, PV_OPS] Select the NUMA-aware variant + of spinlock. The options are: + auto - Enable this variant if running on a multi-node + machine in native environment. (Under this option, if + paravirt spinlock is already enabled, this variant will + not be enabled.) + on - Unconditionally enable this variant. + off - Unconditionally disable this variant. + + Not specifying this option is equivalent to + numa_spinlock=off. + numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 'node', 'default' can be specified This can be set from sysctl after boot. @@ -4708,6 +4741,14 @@ [KNL] Number of legacy pty's. Overwrites compiled-in default number. + qspinlock.numa_spinlock_threshold_ns= [NUMA, PV_OPS] + Set the time threshold in nanoseconds for the + number of intra-node lock hand-offs before the + NUMA-aware spinlock is forced to be passed to + a thread on another NUMA node. Smaller values + result in a more fair, but less performant spinlock, + and vice versa. The default value is 1000000 (=1ms). + quiet [KNL] Disable most log messages r128= [HW,DRM] @@ -6406,10 +6447,37 @@ : poll all this frequency 0: no polling (default) + thp_anon= [KNL] + Format: [KMG],[KMG]:;[KMG]-[KMG]: + state is one of "always", "madvise", "never" or "inherit". + Control the default behavior of the system with respect + to anonymous transparent hugepages. + Can be used multiple times for multiple anon THP sizes. + See Documentation/admin-guide/mm/transhuge.rst for more + details. + + thp_file= [KNL] + Format: [KMG]:always|always+exec|never + Can be used to control the default behavior of the + system with respect to file-backed transparent hugepages. + Can be used multiple times for multiple file-backed THP + sizes. See Documentation/admin-guide/mm/transhuge.rst + for more details. + threadirqs [KNL] Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. + thp_shmem= [KNL] + Format: [KMG],[KMG]:;[KMG]-[KMG]: + Control the default policy of each hugepage size for the + internal shmem mount. is one of policies available + for the shmem mount ("always", "inherit", "never", "within_size", + and "advise"). + It can be used multiple times for multiple shmem THP sizes. + See Documentation/admin-guide/mm/transhuge.rst for more + details. + topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu @@ -6599,6 +6667,13 @@ See Documentation/admin-guide/mm/transhuge.rst for more details. + transparent_hugepage_shmem= [KNL] + Format: [always|within_size|advise|never|deny|force] + Can be used to control the hugepage allocation policy for + the internal shmem mount. + See Documentation/admin-guide/mm/transhuge.rst + for more details. + trusted.source= [KEYS] Format: This parameter identifies the trust source as a backend @@ -7038,6 +7113,11 @@ vmpoff= [KNL,S390] Perform z/VM CP command after power off. Format: + vring_force_dma_api + Force virtio vring to use dma api. This is only needed + on xdragon platform (prior to 20181230 release, e.g. + 0930 release). + vsyscall= [X86-64] Controls the behavior of vsyscalls (i.e. calls to fixed addresses of 0xffffffffff600x00 from legacy diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index b0cc8243e0934096508e984fa16ebc27c940277d..a44131c4765e0f3ddf74dcfd1fd9e7c4b7e8f5a8 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -45,10 +45,25 @@ components: the two is using hugepages just because of the fact the TLB miss is going to run faster. +Modern kernels support "multi-size THP" (mTHP), which introduces the +ability to allocate memory in blocks that are bigger than a base page +but smaller than traditional PMD-size (as described above), in +increments of a power-of-2 number of pages. mTHP can back anonymous +memory (for example 16K, 32K, 64K, etc). These THPs continue to be +PTE-mapped, but in many cases can still provide similar benefits to +those outlined above: Page faults are significantly reduced (by a +factor of e.g. 4, 8, 16, etc), but latency spikes are much less +prominent because the size of each page isn't as huge as the PMD-sized +variant and there is less memory to clear in each page fault. Some +architectures also employ TLB compression mechanisms to squeeze more +entries in when a set of PTEs are virtually and physically contiguous +and approporiately aligned. In this case, TLB misses will occur less +often. + THP can be enabled system wide or restricted to certain tasks or even memory ranges inside task's address space. Unless THP is completely disabled, there is ``khugepaged`` daemon that scans memory and -collapses sequences of basic pages into huge pages. +collapses sequences of basic pages into PMD-sized huge pages. The THP behaviour is controlled via :ref:`sysfs ` interface and using madvise(2) and prctl(2) system calls. @@ -95,12 +110,40 @@ Global THP controls Transparent Hugepage Support for anonymous memory can be entirely disabled (mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to avoid the risk of consuming more memory resources) or enabled -system wide. This can be achieved with one of:: +system wide. This can be achieved per-supported-THP-size with one of:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + echo madvise >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + echo never >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + +where is the hugepage size being addressed, the available sizes +for which vary by system. + +For example:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled + +Alternatively it is possible to specify that a given hugepage size +will inherit the top-level "enabled" value:: + + echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + +For example:: + + echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled + +The top-level setting (for use with "inherit") can be set by issuing +one of the following commands:: echo always >/sys/kernel/mm/transparent_hugepage/enabled echo madvise >/sys/kernel/mm/transparent_hugepage/enabled echo never >/sys/kernel/mm/transparent_hugepage/enabled +By default, PMD-sized hugepages have enabled="inherit" and all other +hugepage sizes have enabled="never". If enabling multiple hugepage +sizes, the kernel will select the most appropriate enabled size for a +given allocation. + It's also possible to limit defrag efforts in the VM to generate anonymous hugepages in case they're not immediately free to madvise regions or to never try to defrag memory and simply fallback to regular @@ -146,25 +189,33 @@ madvise never should be self-explanatory. -By default kernel tries to use huge zero page on read page fault to -anonymous mapping. It's possible to disable huge zero page by writing 0 -or enable it back by writing 1:: +By default kernel tries to use huge, PMD-mappable zero page on read +page fault to anonymous mapping. It's possible to disable huge zero +page by writing 0 or enable it back by writing 1:: echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page -Some userspace (such as a test program, or an optimized memory allocation -library) may want to know the size (in bytes) of a transparent hugepage:: +Some userspace (such as a test program, or an optimized memory +allocation library) may want to know the size (in bytes) of a +PMD-mappable transparent hugepage:: cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size -khugepaged will be automatically started when -transparent_hugepage/enabled is set to "always" or "madvise, and it'll -be automatically shutdown if it's set to "never". +khugepaged will be automatically started when PMD-sized THP is enabled +(either of the per-size anon control or the top-level control are set +to "always" or "madvise"), and it'll be automatically shutdown when +PMD-sized THP is disabled (when both the per-size anon control and the +top-level control are "never") Khugepaged controls ------------------- +.. note:: + khugepaged currently only searches for opportunities to collapse to + PMD-sized THP and no attempt is made to collapse to other THP + sizes. + khugepaged runs usually at low frequency so while one may not want to invoke defrag algorithms synchronously during the page faults, it should be worth invoking defrag at least in khugepaged. However it's @@ -232,13 +283,100 @@ processes. Exceeding the number would block the collapse:: A higher value may increase memory footprint for some workloads. -Boot parameter -============== - -You can change the sysfs boot time defaults of Transparent Hugepage -Support by passing the parameter ``transparent_hugepage=always`` or -``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` -to the kernel command line. +File-Backed Hugepages +--------------------- + +The kernel will automatically select an appropriate THP size for file-backed +memory from a set of allowed sizes. By default all THP sizes that the page cache +supports are allowed, but this set can be modified with one of:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + echo always+exec >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + echo never >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + +where is the hugepage size being addressed, the available sizes for which +vary by system. ``always`` adds the hugepage size to the set of allowed sizes, +and ``never`` removes the hugepage size from the set of allowed sizes. + +``always+exec`` acts like ``always`` but additionally marks the hugepage size as +the preferred hugepage size for sections of any file mapped executable. A +maximum of one hugepage size can be marked as ``exec`` at a time, so applying it +to a new size implicitly removes it from any size it was previously set for. + +In some situations, constraining the allowed sizes can reduce memory +fragmentation, resulting in fewer allocation fallbacks and improved system +performance. + +Note that any changes to the allowed set of sizes only applies to future +file-backed THP allocations. + +Boot parameters +=============== + +You can change the sysfs boot time default for the top-level "enabled" +control by passing the parameter ``transparent_hugepage=always`` or +``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` to the +kernel command line. + +Alternatively, each supported anonymous THP size can be controlled by +passing ``thp_anon=[KMG],[KMG]:;[KMG]-[KMG]:``, +where ```` is the THP size (must be a power of 2 of PAGE_SIZE and +supported anonymous THP) and ```` is one of ``always``, ``madvise``, +``never`` or ``inherit``. + +For example, the following will set 16K, 32K, 64K THP to ``always``, +set 128K, 512K to ``inherit``, set 256K to ``madvise`` and 1M, 2M +to ``never``:: + + thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never + +``thp_anon=`` may be specified multiple times to configure all THP sizes as +required. If ``thp_anon=`` is specified at least once, any anon THP sizes +not explicitly configured on the command line are implicitly set to +``never``. + +``transparent_hugepage`` setting only affects the global toggle. If +``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``. +However, if a valid ``thp_anon`` setting is provided by the user, the +PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER +is not defined within a valid ``thp_anon``, its policy will default to +``never``. + +Similarly to ``transparent_hugepage``, you can control the hugepage +allocation policy for the internal shmem mount by using the kernel parameter +``transparent_hugepage_shmem=``, where ```` is one of the +seven valid policies for shmem (``always``, ``within_size``, ``advise``, +``never``, ``deny``, and ``force``). + +In the same manner as ``thp_anon`` controls each supported anonymous THP +size, ``thp_shmem`` controls each supported shmem THP size. ``thp_shmem`` +has the same format as ``thp_anon``, but also supports the policy +``within_size``. + +``thp_shmem=`` may be specified multiple times to configure all THP sizes +as required. If ``thp_shmem=`` is specified at least once, any shmem THP +sizes not explicitly configured on the command line are implicitly set to +``never``. + +``transparent_hugepage_shmem`` setting only affects the global toggle. If +``thp_shmem`` is not specified, PMD_ORDER hugepage will default to +``inherit``. However, if a valid ``thp_shmem`` setting is provided by the +user, the PMD_ORDER hugepage policy will be overridden. If the policy for +PMD_ORDER is not defined within a valid ``thp_shmem``, its policy will +default to ``never``. + +Each supported file-backed THP size can be controlled by passing +``thp_file=[KMG]:``, where ```` is the THP size and +```` is one of ``always``, ``always+exec`` or ``never``. + +For example, the following will set 64K THP to ``always+exec``:: + + thp_file=64K:always+exec + +``thp_file=`` may be specified multiple times to configure all THP sizes as +required. If ``thp_file=`` is specified at least once, any file-backed THP +sizes not explicitly configured on the command line are implicitly set to +``never``. Hugepages in tmpfs/shmem ======================== @@ -279,22 +417,50 @@ deny force Force the huge option on for all - very useful for testing; +Shmem can also use "multi-size THP" (mTHP) by adding a new sysfs knob to +control mTHP allocation: +'/sys/kernel/mm/transparent_hugepage/hugepages-kB/shmem_enabled', +and its value for each mTHP is essentially consistent with the global +setting. An 'inherit' option is added to ensure compatibility with these +global settings. Conversely, the options 'force' and 'deny' are dropped, +which are rather testing artifacts from the old ages. + +always + Attempt to allocate huge pages every time we need a new page; + +inherit + Inherit the top-level "shmem_enabled" value. By default, PMD-sized hugepages + have enabled="inherit" and all other hugepage sizes have enabled="never"; + +never + Do not allocate huge pages; + +within_size + Only allocate huge page if it will be fully within i_size. + Also respect fadvise()/madvise() hints; + +advise + Only allocate huge pages if requested with fadvise()/madvise(); + Need of application restart =========================== -The transparent_hugepage/enabled values and tmpfs mount option only affect -future behavior. So to make them effective you need to restart any -application that could have been using hugepages. This also applies to the -regions registered in khugepaged. +The transparent_hugepage/enabled and +transparent_hugepage/hugepages-kB/enabled values and tmpfs mount +option only affect future behavior. So to make them effective you need +to restart any application that could have been using hugepages. This +also applies to the regions registered in khugepaged. Monitoring usage ================ -The number of anonymous transparent huge pages currently used by the +The number of PMD-sized anonymous transparent huge pages currently used by the system is available by reading the AnonHugePages field in ``/proc/meminfo``. -To identify what applications are using anonymous transparent huge pages, -it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages fields -for each mapping. +To identify what applications are using PMD-sized anonymous transparent huge +pages, it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages +fields for each mapping. (Note that AnonHugePages only applies to traditional +PMD-sized THP for historical reasons and should have been called +AnonHugePmdMapped). The number of file transparent huge pages mapped to userspace is available by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``. @@ -310,7 +476,7 @@ monitor how successfully the system is providing huge pages for use. thp_fault_alloc is incremented every time a huge page is successfully - allocated to handle a page fault. + allocated and charged to handle a page fault. thp_collapse_alloc is incremented by khugepaged when it has found @@ -318,7 +484,7 @@ thp_collapse_alloc successfully allocated a new huge page to store the data. thp_fault_fallback - is incremented if a page fault fails to allocate + is incremented if a page fault fails to allocate or charge a huge page and instead falls back to using small pages. thp_fault_fallback_charge @@ -332,20 +498,23 @@ thp_collapse_alloc_failed the allocation. thp_file_alloc - is incremented every time a file huge page is successfully - allocated. + is incremented every time a shmem huge page is successfully + allocated (Note that despite being named after "file", the counter + measures only shmem). thp_file_fallback - is incremented if a file huge page is attempted to be allocated - but fails and instead falls back to using small pages. + is incremented if a shmem huge page is attempted to be allocated + but fails and instead falls back to using small pages. (Note that + despite being named after "file", the counter measures only shmem). thp_file_fallback_charge - is incremented if a file huge page cannot be charged and instead + is incremented if a shmem huge page cannot be charged and instead falls back to using small pages even though the allocation was - successful. + successful. (Note that despite being named after "file", the + counter measures only shmem). thp_file_mapped - is incremented every time a file huge page is mapped into + is incremented every time a file or shmem huge page is mapped into user address space. thp_split_page @@ -388,6 +557,78 @@ thp_swpout_fallback Usually because failed to allocate some continuous swap space for the huge page. +In /sys/kernel/mm/transparent_hugepage/hugepages-kB/stats, There are +also individual counters for each huge page size, which can be utilized to +monitor the system's effectiveness in providing huge pages for usage. Each +counter has its own corresponding file. + +anon_fault_alloc + is incremented every time a huge page is successfully + allocated and charged to handle a page fault. + +anon_fault_fallback + is incremented if a page fault fails to allocate or charge + a huge page and instead falls back to using huge pages with + lower orders or small pages. + +anon_fault_fallback_charge + is incremented if a page fault fails to charge a huge page and + instead falls back to using huge pages with lower orders or + small pages even though the allocation was successful. + +swpout + is incremented every time a huge page is swapped out in one + piece without splitting. + +swpout_fallback + is incremented if a huge page has to be split before swapout. + Usually because failed to allocate some continuous swap space + for the huge page. + +shmem_alloc + is incremented every time a shmem huge page is successfully + allocated. + +shmem_fallback + is incremented if a shmem huge page is attempted to be allocated + but fails and instead falls back to using small pages. + +shmem_fallback_charge + is incremented if a shmem huge page cannot be charged and instead + falls back to using small pages even though the allocation was + successful. + +split + is incremented every time a huge page is successfully split into + smaller orders. This can happen for a variety of reasons but a + common reason is that a huge page is old and is being reclaimed. + +split_failed + is incremented if kernel fails to split huge + page. This can happen if the page was pinned by somebody. + +split_deferred + is incremented when a huge page is put onto split queue. + This happens when a huge page is partially unmapped and splitting + it would free up some memory. Pages on split queue are going to + be split under memory pressure, if splitting is possible. + +nr_anon + the number of anonymous THP we have in the whole system. These THPs + might be currently entirely mapped or have partially unmapped/unused + subpages. + +nr_anon_partially_mapped + the number of anonymous THP which are likely partially mapped, possibly + wasting memory, and have been queued for deferred memory reclamation. + Note that in corner some cases (e.g., failed migration), we might detect + an anonymous THP as "partially mapped" and count it here, even though it + is not actually partially mapped anymore. + +file_alloc + is incremented every time a file huge page is successfully + allocated. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help @@ -413,7 +654,7 @@ for huge pages. Optimizing the applications =========================== -To be guaranteed that the kernel will map a 2M page immediately in any +To be guaranteed that the kernel will map a THP immediately in any memory region, the mmap region has to be hugepage naturally aligned. posix_memalign() can provide that guarantee. diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst new file mode 100644 index 0000000000000000000000000000000000000000..d47cd229d7106f26ac47b85b66e6fc28a95479ad --- /dev/null +++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst @@ -0,0 +1,94 @@ +====================================================================== +Synopsys DesignWare Cores (DWC) PCIe Performance Monitoring Unit (PMU) +====================================================================== + +DesignWare Cores (DWC) PCIe PMU +=============================== + +The PMU is a PCIe configuration space register block provided by each PCIe Root +Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error +injection, and Statistics). + +As the name indicates, the RAS DES capability supports system level +debugging, AER error injection, and collection of statistics. To facilitate +collection of statistics, Synopsys DesignWare Cores PCIe controller +provides the following two features: + +- one 64-bit counter for Time Based Analysis (RX/TX data throughput and + time spent in each low-power LTSSM state) and +- one 32-bit counter for Event Counting (error and non-error events for + a specified lane) + +Note: There is no interrupt for counter overflow. + +Time Based Analysis +------------------- + +Using this feature you can obtain information regarding RX/TX data +throughput and time spent in each low-power LTSSM state by the controller. +The PMU measures data in two categories: + +- Group#0: Percentage of time the controller stays in LTSSM states. +- Group#1: Amount of data processed (Units of 16 bytes). + +Lane Event counters +------------------- + +Using this feature you can obtain Error and Non-Error information in +specific lane by the controller. The PMU event is selected by all of: + +- Group i +- Event j within the Group i +- Lane k + +Some of the events only exist for specific configurations. + +DesignWare Cores (DWC) PCIe PMU Driver +======================================= + +This driver adds PMU devices for each PCIe Root Port named based on the BDF of +the Root Port. For example, + + 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) + +the PMU device name for this Root Port is dwc_rootport_3018. + +The DWC PCIe PMU driver registers a perf PMU driver, which provides +description of available events and configuration options in sysfs, see +/sys/bus/event_source/devices/dwc_rootport_{bdf}. + +The "format" directory describes format of the config fields of the +perf_event_attr structure. The "events" directory provides configuration +templates for all documented events. For example, +"Rx_PCIe_TLP_Data_Payload" is an equivalent of "eventid=0x22,type=0x1". + +The "perf list" command shall list the available events from sysfs, e.g.:: + + $# perf list | grep dwc_rootport + <...> + dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] + <...> + dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event] + +Time Based Analysis Event Usage +------------------------------- + +Example usage of counting PCIe RX TLP data payload (Units of bytes):: + + $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ + +The average RX/TX bandwidth can be calculated using the following formula: + + PCIe RX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window + PCIe TX Bandwidth = Tx_PCIe_TLP_Data_Payload / Measure_Time_Window + +Lane Event Usage +------------------------------- + +Each lane has the same event set and to avoid generating a list of hundreds +of events, the user need to specify the lane ID explicitly, e.g.:: + + $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/ + +The driver does not support sampling, therefore "perf record" will not +work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst index f60be04e4e336ecfc3d00eca73b609c3d20a909d..6bc7739fddb5e0b95a83d2f15edcf8ce1a11200d 100644 --- a/Documentation/admin-guide/perf/index.rst +++ b/Documentation/admin-guide/perf/index.rst @@ -19,6 +19,7 @@ Performance monitor support arm_dsu_pmu thunderx2-pmu alibaba_pmu + dwc_pcie_pmu nvidia-pmu meson-ddr-pmu cxl diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst index a321b84eccaac0f66221836250073fb20f083fbd..59174739319c077a0ebdf7ea148a4cd7a823b371 100644 --- a/Documentation/admin-guide/sysctl/fs.rst +++ b/Documentation/admin-guide/sysctl/fs.rst @@ -205,6 +205,15 @@ already own the source file, or do not have read/write access to it. This protection is based on the restrictions in Openwall and grsecurity. +hardlink_cross_projid +--------------------- + +This is a temporary workaround plan to avoid the limitation when creating +hard link cross two projids. When set to "0", hardlink creation cross +two projids is restricted. When set to "1" hardlinks can be created +cross two projids. + + protected_regular ----------------- @@ -332,3 +341,13 @@ Each "watch" costs roughly 90 bytes on a 32-bit kernel, and roughly 160 bytes on a 64-bit one. The current default value for ``max_user_watches`` is 4% of the available low memory, divided by the "watch" cost in bytes. + +5. /proc/sys/fs/fuse - Configuration options for FUSE filesystems +===================================================================== + +This directory contains the following configuration options for FUSE +filesystems: + +``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for +setting/getting the maximum number of pages that can be used for servicing +requests in FUSE. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index cf33de56da27dc6bb1ac1e6a1929867584945926..af3fe24e938e68f590d428505c8e104cd43e629f 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1603,6 +1603,24 @@ entry will default to 2 instead of 0. = ============================================================= +unprivileged_userns_clone +========================= + +This value controls if unprivileged users could unshare a new user +namespace. When the value is zero, unprivileged users are not allowed +to unshare a new user namespace. Privileged users (with CAP_SYS_ADMIN) +are not affected and are always capable of unsharing a new user +namespace. + + +userns_max_level +================ + +This value indicates the maximum nested level of user namespace. The +valid configuration values are 0-33. When configured to zero, user +namespace is effectively disabled. + + warn_limit ========== diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 45ba1f4dc004876f16a57fbcb34debd065769bc2..beabacb0fcba53713a6392b7bc5e0eec62123028 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm: - watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode +- enable_context_readahead admin_reserve_kbytes @@ -1044,3 +1045,19 @@ of other processes running on other nodes will not be affected. Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. + + +enable_context_readahead +======================== + +Specific workloads whose io activities are mostly random, context readahead +feature may introduce unnecessary io read operations, which will impact app's +performance. + +Default it is enabled. + +To disable context readahead: + echo 0 > /proc/sys/vm/enable_context_readahead + +To enable context readahead again: + echo 1 > /proc/sys/vm/enable_context_readahead diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst index de6d8a4790e2b6cd06f69fe5ebe72eb69c23cb3d..14ea68bcf196ed3b24f034615865d0fb31f383f1 100644 --- a/Documentation/arch/arm64/cpu-feature-registers.rst +++ b/Documentation/arch/arm64/cpu-feature-registers.rst @@ -152,6 +152,8 @@ infrastructure: +------------------------------+---------+---------+ | DIT | [51-48] | y | +------------------------------+---------+---------+ + | MPAM | [43-40] | n | + +------------------------------+---------+---------+ | SVE | [35-32] | y | +------------------------------+---------+---------+ | GIC | [27-24] | n | diff --git a/Documentation/arch/loongarch/irq-chip-model.rst b/Documentation/arch/loongarch/irq-chip-model.rst index 7988f41923639dd3e2fc93e1709b2a5f4abe3405..6dd48256e39f74ca4f36638ce8c865da6f9c3849 100644 --- a/Documentation/arch/loongarch/irq-chip-model.rst +++ b/Documentation/arch/loongarch/irq-chip-model.rst @@ -85,6 +85,38 @@ to CPUINTC directly:: | Devices | +---------+ +Advanced Extended IRQ model +=========================== + +In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go +to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go +to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts +go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI-related definitions ======================== diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab94107c91f511f2e61299b7c918793e7209f6ef --- /dev/null +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -0,0 +1,101 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +HYGON Secure Virtualization +=========================== + +China Secure Virtualization (CSV) is a key virtualization feature on Hygon +processors. + +The 1st generation of CSV (CSV for short) is a secure virtualization technology +to provide memory encryption for the virtual machine (VM), each VM's memory is +encrypted by its unique encryption key which is managed by secure processor. + +The 2nd generation of CSV (CSV2 for short) provides security enhancement to CSV +by encrypting not only the VM's memory but also the vCPU's registers of the VM. + +The 3rd generation of CSV (CSV3 for short) is a more advanced secure +virtualization technology, it integrates secure processor, memory encryption and +memory isolation to provide the ability to protect guest's private data. The CSV3 +guest's context like CPU registers, control block and nested page table is accessed +only by the guest itself and the secure processor. Neither other guests nor the +host can tamper with the guest's context. + +The secure processor is a separate processor inside Hygon hardware. The firmware +running inside the secure processor performs activities in a secure way, such as +OVMF encryption, VM launch, secure memory management and nested page table +management etc. For more information, please see CSV spec and CSV3 spec from Hygon. + +A CSV guest is running in the memory that is encrypted with a dedicated encrypt +key which is set by the secure processor. And CSV guest's memory encrypt key is +unique from the others. A low latency crypto engine resides on Hygon hardware +to minimize the negative effect on memory bandwidth. In CSV guest, a guest private +page will be automatically decrypted when read from memory and encrypted when +written to memory. + +CSV3 provides an enhancement technology named memory isolation to improve the +security. A dedicated memory isolation hardware is built in Hygon hardware. Only +the secure processor has privilege to configure the isolation hardware. At the +BIOS stage, host will reserve several memory regions as secure which are protected +by the isolation hardware. The secure processor allocates the reserved secure +memory for CSV3 guest and marks the memory as dedicated for the current CSV3 +guest. Any memory access (read or write) to CSV3 guest's private memory outside +the guest will be blocked by isolation hardware. + +A CSV3 guest may declare some memory regions as shared to share data with the +host. When a page is set as shared, read/write on the page will bypass the +isolation hardware and the guest's shared memory can be accessed by the host. A +method named CSV3 secure call command is designed and CSV3 guest sends the secure +call command to the secure processor to change private memory to shared memory. +In the method, 2 dedicated pages are reserved at early stage of the guest. Any +read/write on the dedicated pages will trigger nested page fault. When NPF +happens, the host helps to issue an external command to the secure processor but +cannot tamper with the data in the guest's private memory. Then the secure +processor checks the fault address and handles the command if the address is +exactly the dedicated pages. + +Support for CSV can be determined through the CPUID instruction. The CPUID +function 0x8000001f reports information to CSV:: + + 0x8000001f[eax]: + Bit[1] indicates support for CSV + Bit[3] indicates support for CSV2 + Bit[30] indicates support for CSV3 + +If CSV is support, MSR 0xc0010131 can be used to determine if CSV is active:: + + 0xc0010131: + Bit[0] 0 = CSV is not active + 1 = CSV is active + Bit[1] 0 = CSV2 is not active + 1 = CSV2 is active + Bit[30] 0 = CSV3 is not active + 1 = CSV3 is active + +All CSV/CSV2's configurations must be enabled in CSV3. Linux can activate CSV3 by +default (CONFIG_HYGON_CSV=y, CONFIG_CMA=y). CSV3 guest's memory is managed by +CMA (Contiguous Memory Allocation). User must specify CSV3 total secure memory on +the linux kernel command line with csv_mem_size or csv_mem_percentage:: + + csv_mem_size=nn[MG] + [KNL,CSV] + Reserve specified CSV3 memory size in CMA. CSV3's memory will be + allocated from these CMAs. + For instance, csv_mem_size=40G, 40G memory is reserved for CSV3. + + csv_mem_percentage=nn + [KNL,CSV] + Reserve specified memory size which is prorated according to the + whole system memory size. CSV3 guest's memory will be allocated + from these CMAs. + For instance, csv_mem_percentage=60, means 60% system memory is + reserved for CSV3. + The maximum percentage is 80. And the default percentage is 0. + +Limitations +The reserved CSV3 memory within CMA cannot be used by kernel or any application that +may pin memory using long term gup during the application's life time. +For instance, if the whole system memory is 64G and 32G is reserved for CSV3 with +kernel command line csv_mem_percentage=50, only 32G memory is available for CSV/CSV2. +As a result, user will fail to run a CSV/CSV2 guest with memory size which exceeds +32G. diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst index b627c6f36bcf5a966b9f7ff7e34059f44ffe8f38..69c04052861df91fd452c3c172175199de37d837 100644 --- a/Documentation/arch/x86/microcode.rst +++ b/Documentation/arch/x86/microcode.rst @@ -35,6 +35,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -69,6 +71,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -217,7 +223,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -227,6 +234,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index cb05d90111b4f5d8380f9a858d5b6d7c4ffb698c..a6279df64a9db8aa69dd08d8643e9cc9b7c42da7 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -35,7 +35,7 @@ about the feature from resctrl's info directory. To use the feature mount the file system:: - # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl + # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps][,debug]] /sys/fs/resctrl mount options are: @@ -46,6 +46,9 @@ mount options are: "mba_MBps": Enable the MBA Software Controller(mba_sc) to specify MBA bandwidth in MBps +"debug": + Make debug files accessible. Available debug files are annotated with + "Available only with debug option". L2 and L3 CDP are controlled separately. @@ -124,6 +127,13 @@ related to allocation: "P": Corresponding region is pseudo-locked. No sharing allowed. +"sparse_masks": + Indicates if non-contiguous 1s value in CBM is supported. + + "0": + Only contiguous 1s value in CBM is supported. + "1": + Non-contiguous 1s value in CBM is supported. Memory bandwidth(MB) subdirectory contains the following files with respect to allocation: @@ -299,7 +309,14 @@ All groups contain the following files: "tasks": Reading this file shows the list of all tasks that belong to this group. Writing a task id to the file will add a task to the - group. If the group is a CTRL_MON group the task is removed from + group. Multiple tasks can be added by separating the task ids + with commas. Tasks will be assigned sequentially. Multiple + failures are not supported. A single failure encountered while + attempting to assign a task will cause the operation to abort and + already added tasks before the failure will remain in the group. + Failures will be logged to /sys/fs/resctrl/info/last_cmd_status. + + If the group is a CTRL_MON group the task is removed from whichever previous CTRL_MON group owned the task and also from any MON group that owned the task. If the group is a MON group, then the task must already belong to the CTRL_MON parent of this @@ -342,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain: file. On successful pseudo-locked region creation the mode will automatically change to "pseudo-locked". +"ctrl_hw_id": + Available only with debug option. The identifier used by hardware + for the control group. On x86 this is the CLOSID. + When monitoring is enabled all MON groups will also contain: "mon_data": @@ -355,6 +376,10 @@ When monitoring is enabled all MON groups will also contain: the sum for all tasks in the CTRL_MON group and all tasks in MON groups. Please see example section for more details on usage. +"mon_hw_id": + Available only with debug option. The identifier used by hardware + for the monitor group. On x86 this is the RMID. + Resource allocation rules ------------------------- @@ -445,12 +470,13 @@ For cache resources we describe the portion of the cache that is available for allocation using a bitmask. The maximum value of the mask is defined by each cpu model (and may be different for different cache levels). It is found using CPUID, but is also provided in the "info" directory of -the resctrl file system in "info/{resource}/cbm_mask". Intel hardware +the resctrl file system in "info/{resource}/cbm_mask". Some Intel hardware requires that these masks have all the '1' bits in a contiguous block. So 0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9 -and 0xA are not. On a system with a 20-bit mask each bit represents 5% -of the capacity of the cache. You could partition the cache into four -equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. +and 0xA are not. Check /sys/fs/resctrl/info/{resource}/sparse_masks +if non-contiguous 1s value is supported. On a system with a 20-bit mask +each bit represents 5% of the capacity of the cache. You could partition +the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. Memory bandwidth Allocation and monitoring ========================================== diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 0d2647fb358d7ce1a4a85bd994d9b05e8ea0cec9..18920610ab7cea379f172f0b8684475eaf471df6 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -37,16 +37,14 @@ prototype in a header for the wrapper kfunc. An example is given below:: /* Disables missing prototype warnings */ - __diag_push(); - __diag_ignore_all("-Wmissing-prototypes", - "Global kfuncs as their definitions will be in BTF"); + __bpf_kfunc_start_defs(); __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr) { return find_get_task_by_vpid(nr); } - __diag_pop(); + __bpf_kfunc_end_defs(); A wrapper kfunc is often needed when we need to annotate parameters of the kfunc. Otherwise one may directly make the kfunc visible to the BPF program by @@ -155,10 +153,10 @@ In addition to kfuncs' arguments, verifier may need more information about the type of kfunc(s) being registered with the BPF subsystem. To do so, we define flags on a set of kfuncs as follows:: - BTF_SET8_START(bpf_task_set) + BTF_KFUNCS_START(bpf_task_set) BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE) - BTF_SET8_END(bpf_task_set) + BTF_KFUNCS_END(bpf_task_set) This set encodes the BTF ID of each kfunc listed above, and encodes the flags along with it. Ofcourse, it is also allowed to specify no flags. @@ -325,10 +323,10 @@ Once the kfunc is prepared for use, the final step to making it visible is registering it with the BPF subsystem. Registration is done per BPF program type. An example is shown below:: - BTF_SET8_START(bpf_task_set) + BTF_KFUNCS_START(bpf_task_set) BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE) - BTF_SET8_END(bpf_task_set) + BTF_KFUNCS_END(bpf_task_set) static const struct btf_kfunc_id_set bpf_task_kfunc_set = { .owner = THIS_MODULE, diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 936f6aaa75c857bca55a7ced70b0d99490a9014d..0cfa0f667f2a3cd7c83699ef195df621e932d0e9 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -67,13 +67,142 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +You can change the KFENCE memory pool size by setting ``kfence.num_objects`` +in boot command line, or writing to +``/sys/module/kfence/parameters/num_objects`` when kfence is not enabled, +and the pool size of each node will be computed and updated +in the same way as above. You can set this value as large as possible, so +please be careful DO NOT use up all memorys. +When enabling KFENCE, ``num_objects`` will be adjusted to make the pool size +aligned up to 1GiB. That means, ``num_objects`` itself will be aligned up to +131071 (Unless ``num_objects`` is smaller than it and is regarded as using +the upstream mode). + +You can enable/disable KFENCE dynamically after startup by writing a proper +number to ``/sys/module/kfence/parameters/sample_interval``. Setting this value +to 0 means disabling KFENCE, and unused KFENCE pool memory will be +automatically freed. Otherwise KFENCE will be enabled, it will try to alloc +enough memory to hold ``num_objects`` the user has set. If this value is a +negative number, sample_interval will be invalid, and KFENCE will alloc slabs +and pages from its pool at all time if possible. + +You can change KFENCE pool mode by setting ``kfence.pool_mode`` in boot command +line, or writing to ``/sys/module/kfence/parameters/pool_mode`` when kfence is +not enabled. If the value is ``global`` (as default), ``num_objects`` becomes a +global total sum. The total KFENCE pools will hold ``num_objects`` slabs/pages. +Otherwise if the value is ``node``, ``num_objects`` becomes a per node value, +KFENCE pools on each node will hold ``num_objects`` slabs/pages separately. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. +TLB recover issue +~~~~~~~~~~~~~~~~~ + +For some arch like x86, kernel virtual address directly mapping to physical +address is mapped by PUD huge TLB, so that performance can be improved since +kernel no need to visit PMD and PTE. Each PUD covers an 1GiB area. + +However, KFENCE needs to set guarded pages and breaks this design. PUD will be +splited to PTE, meaning that an 1GiB area will be splited to a large number of +4KiB (page size) areas. This may impact the performance. + +To solve this issue, the size of each kfence pool area is forced to be 1GiB, +and one area can hold 131071 objects, calculating by:: + + 1GiB / 4KiB / 2 - 1 = 131071 + +So the user input kfence.num_objects will be aligned up to 131071 for +convenience of splitting them to several 1GiB areas. + +One KFENCE pool area will be allocated in 1GiB aligned address, ensuring +only splitting one PUD. When KFENCE is disabled and there is no active +slabs/pages in this area, it will be freed and the corresponding TLB will +be recovered to the origin PUD (only on x86_64 now). + +An exception is the user input less than 131071 in boot cmdline. See mode 1 +of the following examples. + +Set a pool limit on various memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like crashkernel, the user can limit the size of kfence pool by setting +``kfence.booting_max`` in boot command line. A reasonable config can be:: + + kfence.booting_max=0-128M:0,128M-256M:1M,256M-:2M + +So that: + On machines with memory of [0, 128M), kfence will not be enabled. + + On machines with memory of [128M, 256M), kfence will allocate at most 1MB + for kfence pool. (which means num_objects = 127 on page_size = 4KB) + + On machines with memory larger than 256M, kfence will allocate at most 2MB + for kfence pool. (which means num_objects = 255 on page_size = 4KB) + +Notes: + This config only sets the upper limit, so if the user sets num_objects = 127 + and ``kfence.booting_max=0-:2M``, kfence will still allocate 1MB for pool. + + This config only works for upstream mode. (pool_size < 1GB and + sample_interval > 0) Because if the user want to use debug mode, he must + focus on the specific machine and not need this general setting. + +Examples +~~~~~~~~ + +There are mainly three kinds of distributing method. + +1. Upstream mode:: + + num_objects < 131071 + pool_mode = global (cannot be node) + sample_interval cannot be negative + + In this mode, everything looks like it is in upstream. However, if user + enlarges ``num_objects`` after startup, it will be aligned up to 131071 + and become mode 2. + +2. Global mode:: + + num_objects >= 131071 + pool_mode = global + + For example, if num_objects = 131071 * 2, and there are 4 nodes in total. + Node 0 and node 1 will separately alloc 1GiB memoty for KFENCE pools, and + there is nothing to do with node 2 and node 3. Sampling slabs and pages on + these empty nodes (2 and 3) will be mapped to previous nodes (0 and 1). + + If num_objects = 131071 * 6, the memory usage will be [2, 2, 1, 1]GiB on + these 4 nodes. + +3. Per node mode:: + + num_objects >= 131071 + pool_mode = node + + This mode is easy to understand. If num_objects = 131071 * n, the memory + usage will be [n, n, n, n]GiB on 4 nodes. + +Monitoring specific slabs +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If users want to enable or disable KFENCE for specific slabs, setting via +per_slab switch at ``/sys/kernel/slab//skip_kfence``. The default +value is 0 for all slabs (meaning do not skip). + +Users can also switch monitoring order0 pages by +setting ``kfence.order0_page`` in boot command line, +or writing to ``/sys/module/kfence/parameters/order0_page``. + Error reports ~~~~~~~~~~~~~ +By default, KFENCE will only report faults in dmesg. If users want to panic +the kernel, set ``kfence.fault=panic`` in boot command line, or write "panic" +to ``/sys/module/kfence/parameters/fault``. + A typical out-of-bounds access looks like this:: ================================================================== @@ -258,7 +387,7 @@ object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting an out-of-bounds access, and marking the page as accessible so that the faulting -code can (wrongly) continue executing (set ``panic_on_warn`` to panic instead). +code can (wrongly) continue executing. To detect out-of-bounds writes to memory within the object's page itself, KFENCE also uses pattern-based redzones. For each object page, a redzone is set diff --git a/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d542ecb1a7d6c3e3ec820179de29f1dd4259bb4 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml @@ -0,0 +1,227 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/arm,mpam-msc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Memory System Resource Partitioning and Monitoring (MPAM) + +description: | + The Arm MPAM specification can be found here: + + https://developer.arm.com/documentation/ddi0598/latest + +maintainers: + - Rob Herring + +properties: + compatible: + items: + - const: arm,mpam-msc # Further details are discoverable + - const: arm,mpam-memory-controller-msc + + reg: + maxItems: 1 + description: A memory region containing registers as defined in the MPAM + specification. + + interrupts: + minItems: 1 + items: + - description: error (optional) + - description: overflow (optional, only for monitoring) + + interrupt-names: + oneOf: + - items: + - enum: [ error, overflow ] + - items: + - const: error + - const: overflow + + arm,not-ready-us: + description: The maximum time in microseconds for monitoring data to be + accurate after a settings change. For more information, see the + Not-Ready (NRDY) bit description in the MPAM specification. + + numa-node-id: true # see NUMA binding + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^ris@[0-9a-f]$': + type: object + additionalProperties: false + description: | + RIS nodes for each RIS in an MSC. These nodes are required for each RIS + implementing known MPAM controls + + properties: + compatible: + enum: + # Bulk storage for cache + - arm,mpam-cache + # Memory bandwidth + - arm,mpam-memory + + reg: + minimum: 0 + maximum: 0xf + + cpus: + $ref: '/schemas/types.yaml#/definitions/phandle-array' + description: + Phandle(s) to the CPU node(s) this RIS belongs to. By default, the parent + device's affinity is used. + + arm,mpam-device: + $ref: '/schemas/types.yaml#/definitions/phandle' + description: + By default, the MPAM enabled device associated with a RIS is the MSC's + parent node. It is possible for each RIS to be associated with different + devices in which case 'arm,mpam-device' should be used. + + required: + - compatible + - reg + +required: + - compatible + - reg + +dependencies: + interrupts: [ interrupt-names ] + +additionalProperties: false + +examples: + - | + /* + cpus { + cpu@0 { + next-level-cache = <&L2_0>; + }; + cpu@100 { + next-level-cache = <&L2_1>; + }; + }; + */ + L2_0: cache-controller-0 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L2_1: cache-controller-1 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L3: cache-controller@30000000 { + compatible = "arm,dsu-l3-cache", "cache"; + cache-level = <3>; + cache-unified; + + ranges = <0x0 0x30000000 0x800000>; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + /* CPU affinity implied by parent cache node's */ + reg = <0x10000 0x2000>; + interrupts = <1>, <2>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + }; + }; + + mem: memory-controller@20000 { + compatible = "foo,a-memory-controller"; + reg = <0x20000 0x1000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + msc@21000 { + compatible = "arm,mpam-memory-controller-msc", "arm,mpam-msc"; + reg = <0x21000 0x1000>; + interrupts = <3>; + interrupt-names = "error"; + arm,not-ready-us = <1>; + numa-node-id = <1>; + }; + }; + + iommu@40000 { + reg = <0x40000 0x1000>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@41000 { + compatible = "arm,mpam-msc"; + reg = <0 0x1000>; + interrupts = <5>, <6>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@2 { + compatible = "arm,mpam-cache"; + reg = <0>; + // TODO: How to map to device(s)? + }; + }; + }; + + msc@80000 { + compatible = "foo,a-standalone-msc"; + reg = <0x80000 0x1000>; + + clocks = <&clks 123>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + reg = <0x10000 0x2000>; + interrupts = <7>; + interrupt-names = "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@0 { + compatible = "arm,mpam-cache"; + reg = <0>; + arm,mpam-device = <&L2_0>; + }; + + ris@1 { + compatible = "arm,mpam-memory"; + reg = <1>; + arm,mpam-device = <&mem>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/gpu/phytium,dc.yaml b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5be348f6e23f7b37d345e1fe568a33d8e1239d7f --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dc/snps,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Display Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/dc/display-controller.yaml# + +properties: + compatible: + const: phytium,dc + + reg: + minItems: 1 + items: + - description: Offset and length of the memory mapped registers + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description:Display controller reference clock source + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; diff --git a/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml b/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ac073de21259bc25e27cce4528a419bd1ba8afd --- /dev/null +++ b/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/loongson/loongson,ls3c6000se.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# +title: Loongson Security Module (SE) +maintainers: + - Qunqin Zhao +description: + This binding describes the Loongson Security Module which provides control for + hardware encryption acceleration devices. +properties: + compatible: + items: + - enum: + - "loongson,ls3xse" + reg: + maxItems: 1 + interrupts: + minItems: 1 + maxItems: 32 + dmam_size: + $ref: /schemas/types.yaml#/definitions/uint32 +required: + - compatible + - reg + - interrupts + - dmam_size +additionalProperties: false +examples: + - | + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + lsse@c00e0000000 { + compatible = "loongson,ls3c6000se"; + reg = <0xc00 0xe0000000 0x0 0x1000>; + interrupt-parent = <&liointc>; + interrupts = <0x20 IRQ_TYPE_LEVEL_HIGH>; + dmam_size = <0x800000>; + }; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 12a16031d7b6d010ab69bedecf6ef4ee160924ad..93258265c6b05e6adb6999bc1d26e69f86d04459 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1047,6 +1047,8 @@ patternProperties: description: PHICOMM Co., Ltd. "^phytec,.*": description: PHYTEC Messtechnik GmbH + "^phytium,.*": + description: Phytium Technology Co., Ltd. "^picochip,.*": description: Picochip Ltd "^pine64,.*": diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4fba897d931823c9f22e0ebca1ba7ff6f4ea56f --- /dev/null +++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst @@ -0,0 +1,846 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================================= +IAA Compression Accelerator Crypto Driver +========================================= + +Tom Zanussi + +The IAA crypto driver supports compression/decompression compatible +with the DEFLATE compression standard described in RFC 1951, which is +the compression/decompression algorithm exported by this module. + +The IAA hardware spec can be found here: + + https://cdrdv2.intel.com/v1/dl/getContent/721858 + +The iaa_crypto driver is designed to work as a layer underneath +higher-level compression devices such as zswap. + +Users can select IAA compress/decompress acceleration by specifying +one of the supported IAA compression algorithms in whatever facility +allows compression algorithms to be selected. + +For example, a zswap device can select the IAA 'fixed' mode +represented by selecting the 'deflate-iaa' crypto compression +algorithm:: + + # echo deflate-iaa > /sys/module/zswap/parameters/compressor + +This will tell zswap to use the IAA 'fixed' compression mode for all +compresses and decompresses. + +Currently, there is only one compression modes available, 'fixed' +mode. + +The 'fixed' compression mode implements the compression scheme +specified by RFC 1951 and is given the crypto algorithm name +'deflate-iaa'. (Because the IAA hardware has a 4k history-window +limitation, only buffers <= 4k, or that have been compressed using a +<= 4k history window, are technically compliant with the deflate spec, +which allows for a window of up to 32k. Because of this limitation, +the IAA fixed mode deflate algorithm is given its own algorithm name +rather than simply 'deflate'). + + +Config options and other setup +============================== + +The IAA crypto driver is available via menuconfig using the following +path:: + + Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression Accelerator + +In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO. + +The IAA crypto driver also supports statistics, which are available +via menuconfig using the following path:: + + Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression -> Enable Intel(R) IAA Compression Accelerator Statistics + +In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS. + +The following config options should also be enabled:: + + CONFIG_IRQ_REMAP=y + CONFIG_INTEL_IOMMU=y + CONFIG_INTEL_IOMMU_SVM=y + CONFIG_PCI_ATS=y + CONFIG_PCI_PRI=y + CONFIG_PCI_PASID=y + CONFIG_INTEL_IDXD=m + CONFIG_INTEL_IDXD_SVM=y + +IAA is one of the first Intel accelerator IPs that can work in +conjunction with the Intel IOMMU. There are multiple modes that exist +for testing. Based on IOMMU configuration, there are 3 modes:: + + - Scalable + - Legacy + - No IOMMU + + +Scalable mode +------------- + +Scalable mode supports Shared Virtual Memory (SVM or SVA). It is +entered when using the kernel boot commandline:: + + intel_iommu=on,sm_on + +with VT-d turned on in BIOS. + +With scalable mode, both shared and dedicated workqueues are available +for use. + +For scalable mode, the following BIOS settings should be enabled:: + + Socket Configuration > IIO Configuration > Intel VT for Directed I/O (VT-d) > Intel VT for Directed I/O + + Socket Configuration > IIO Configuration > PCIe ENQCMD > ENQCMDS + + +Legacy mode +----------- + +Legacy mode is entered when using the kernel boot commandline:: + + intel_iommu=off + +or VT-d is not turned on in BIOS. + +If you have booted into Linux and not sure if VT-d is on, do a "dmesg +| grep -i dmar". If you don't see a number of DMAR devices enumerated, +most likely VT-d is not on. + +With legacy mode, only dedicated workqueues are available for use. + + +No IOMMU mode +------------- + +No IOMMU mode is entered when using the kernel boot commandline:: + + iommu=off. + +With no IOMMU mode, only dedicated workqueues are available for use. + + +Usage +===== + +accel-config +------------ + +When loaded, the iaa_crypto driver automatically creates a default +configuration and enables it, and assigns default driver attributes. +If a different configuration or set of driver attributes is required, +the user must first disable the IAA devices and workqueues, reset the +configuration, and then re-register the deflate-iaa algorithm with the +crypto subsystem by removing and reinserting the iaa_crypto module. + +The :ref:`iaa_disable_script` in the 'Use Cases' +section below can be used to disable the default configuration. + +See :ref:`iaa_default_config` below for details of the default +configuration. + +More likely than not, however, and because of the complexity and +configurability of the accelerator devices, the user will want to +configure the device and manually enable the desired devices and +workqueues. + +The userspace tool to help doing that is called accel-config. Using +accel-config to configure device or loading a previously saved config +is highly recommended. The device can be controlled via sysfs +directly but comes with the warning that you should do this ONLY if +you know exactly what you are doing. The following sections will not +cover the sysfs interface but assumes you will be using accel-config. + +The :ref:`iaa_sysfs_config` section in the appendix below can be +consulted for the sysfs interface details if interested. + +The accel-config tool along with instructions for building it can be +found here: + + https://github.com/intel/idxd-config/#readme + +Typical usage +------------- + +In order for the iaa_crypto module to actually do any +compression/decompression work on behalf of a facility, one or more +IAA workqueues need to be bound to the iaa_crypto driver. + +For instance, here's an example of configuring an IAA workqueue and +binding it to the iaa_crypto driver (note that device names are +specified as 'iax' rather than 'iaa' - this is because upstream still +has the old 'iax' device naming in place) :: + + # configure wq1.0 + + accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --priority=10 --name="iaa_crypto" --driver-name="crypto" iax1/wq1.0 + + accel-config config-engine iax1/engine1.0 --group-id=0 + + # enable IAA device iax1 + + accel-config enable-device iax1 + + # enable wq1.0 on IAX device iax1 + + accel-config enable-wq iax1/wq1.0 + +Whenever a new workqueue is bound to or unbound from the iaa_crypto +driver, the available workqueues are 'rebalanced' such that work +submitted from a particular CPU is given to the most appropriate +workqueue available. Current best practice is to configure and bind +at least one workqueue for each IAA device, but as long as there is at +least one workqueue configured and bound to any IAA device in the +system, the iaa_crypto driver will work, albeit most likely not as +efficiently. + +The IAA crypto algorigthms is operational and compression and +decompression operations are fully enabled following the successful +binding of the first IAA workqueue to the iaa_crypto driver. + +Similarly, the IAA crypto algorithm is not operational and compression +and decompression operations are disabled following the unbinding of +the last IAA worqueue to the iaa_crypto driver. + +As a result, the IAA crypto algorithms and thus the IAA hardware are +only available when one or more workques are bound to the iaa_crypto +driver. + +When there are no IAA workqueues bound to the driver, the IAA crypto +algorithms can be unregistered by removing the module. + + +Driver attributes +----------------- + +There are a couple user-configurable driver attributes that can be +used to configure various modes of operation. They're listed below, +along with their default values. To set any of these attributes, echo +the appropriate values to the attribute file located under +/sys/bus/dsa/drivers/crypto/ + +The attribute settings at the time the IAA algorithms are registered +are captured in each algorithm's crypto_ctx and used for all compresses +and decompresses when using that algorithm. + +The available attributes are: + + - verify_compress + + Toggle compression verification. If set, each compress will be + internally decompressed and the contents verified, returning error + codes if unsuccessful. This can be toggled with 0/1:: + + echo 0 > /sys/bus/dsa/drivers/crypto/verify_compress + + The default setting is '1' - verify all compresses. + + - sync_mode + + Select mode to be used to wait for completion of each compresses + and decompress operation. + + The crypto async interface support implemented by iaa_crypto + provides an implementation that satisfies the interface but does + so in a synchronous manner - it fills and submits the IDXD + descriptor and then loops around waiting for it to complete before + returning. This isn't a problem at the moment, since all existing + callers (e.g. zswap) wrap any asynchronous callees in a + synchronous wrapper anyway. + + The iaa_crypto driver does however provide true asynchronous + support for callers that can make use of it. In this mode, it + fills and submits the IDXD descriptor, then returns immediately + with -EINPROGRESS. The caller can then either poll for completion + itself, which requires specific code in the caller which currently + nothing in the upstream kernel implements, or go to sleep and wait + for an interrupt signaling completion. This latter mode is + supported by current users in the kernel such as zswap via + synchronous wrappers. Although it is supported this mode is + significantly slower than the synchronous mode that does the + polling in the iaa_crypto driver previously mentioned. + + This mode can be enabled by writing 'async_irq' to the sync_mode + iaa_crypto driver attribute:: + + echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode + + Async mode without interrupts (caller must poll) can be enabled by + writing 'async' to it:: + + echo async > /sys/bus/dsa/drivers/crypto/sync_mode + + The mode that does the polling in the iaa_crypto driver can be + enabled by writing 'sync' to it:: + + echo sync > /sys/bus/dsa/drivers/crypto/sync_mode + + The default mode is 'sync'. + +.. _iaa_default_config: + +IAA Default Configuration +------------------------- + +When the iaa_crypto driver is loaded, each IAA device has a single +work queue configured for it, with the following attributes:: + + mode "dedicated" + threshold 0 + size Total WQ Size from WQCAP + priority 10 + type IDXD_WQT_KERNEL + group 0 + name "iaa_crypto" + driver_name "crypto" + +The devices and workqueues are also enabled and therefore the driver +is ready to be used without any additional configuration. + +The default driver attributes in effect when the driver is loaded are:: + + sync_mode "sync" + verify_compress 1 + +In order to change either the device/work queue or driver attributes, +the enabled devices and workqueues must first be disabled. In order +to have the new configuration applied to the deflate-iaa crypto +algorithm, it needs to be re-registered by removing and reinserting +the iaa_crypto module. The :ref:`iaa_disable_script` in the 'Use +Cases' section below can be used to disable the default configuration. + +Statistics +========== + +If the optional debugfs statistics support is enabled, the IAA crypto +driver will generate statistics which can be accessed in debugfs at:: + + # ls -al /sys/kernel/debug/iaa-crypto/ + total 0 + drwxr-xr-x 2 root root 0 Mar 3 07:55 . + drwx------ 53 root root 0 Mar 3 07:55 .. + -rw-r--r-- 1 root root 0 Mar 3 07:55 global_stats + -rw-r--r-- 1 root root 0 Mar 3 07:55 stats_reset + -rw-r--r-- 1 root root 0 Mar 3 07:55 wq_stats + +The global_stats file shows a set of global statistics collected since +the driver has been loaded or reset:: + + # cat global_stats + global stats: + total_comp_calls: 4300 + total_decomp_calls: 4164 + total_sw_decomp_calls: 0 + total_comp_bytes_out: 5993989 + total_decomp_bytes_in: 5993989 + total_completion_einval_errors: 0 + total_completion_timeout_errors: 0 + total_completion_comp_buf_overflow_errors: 136 + +The wq_stats file shows per-wq stats, a set for each iaa device and wq +in addition to some global stats:: + + # cat wq_stats + iaa device: + id: 1 + n_wqs: 1 + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + wqs: + name: iaa_crypto + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 3 + n_wqs: 1 + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + wqs: + name: iaa_crypto + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 5 + n_wqs: 1 + comp_calls: 1360 + comp_bytes: 1999776 + decomp_calls: 0 + decomp_bytes: 0 + wqs: + name: iaa_crypto + comp_calls: 1360 + comp_bytes: 1999776 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 7 + n_wqs: 1 + comp_calls: 2940 + comp_bytes: 3994213 + decomp_calls: 4164 + decomp_bytes: 5993989 + wqs: + name: iaa_crypto + comp_calls: 2940 + comp_bytes: 3994213 + decomp_calls: 4164 + decomp_bytes: 5993989 + ... + +Writing to 'stats_reset' resets all the stats, including the +per-device and per-wq stats:: + + # echo 1 > stats_reset + # cat wq_stats + global stats: + total_comp_calls: 0 + total_decomp_calls: 0 + total_comp_bytes_out: 0 + total_decomp_bytes_in: 0 + total_completion_einval_errors: 0 + total_completion_timeout_errors: 0 + total_completion_comp_buf_overflow_errors: 0 + ... + + +Use cases +========= + +Simple zswap test +----------------- + +For this example, the kernel should be configured according to the +dedicated mode options described above, and zswap should be enabled as +well:: + + CONFIG_ZSWAP=y + +This is a simple test that uses iaa_compress as the compressor for a +swap (zswap) device. It sets up the zswap device and then uses the +memory_memadvise program listed below to forcibly swap out and in a +specified number of pages, demonstrating both compress and decompress. + +The zswap test expects the work queues for each IAA device on the +system to be configured properly as a kernel workqueue with a +workqueue driver_name of "crypto". + +The first step is to make sure the iaa_crypto module is loaded:: + + modprobe iaa_crypto + +If the IAA devices and workqueues haven't previously been disabled and +reconfigured, then the default configuration should be in place and no +further IAA configuration is necessary. See :ref:`iaa_default_config` +below for details of the default configuration. + +If the default configuration is in place, you should see the iaa +devices and wq0s enabled:: + + # cat /sys/bus/dsa/devices/iax1/state + enabled + # cat /sys/bus/dsa/devices/iax1/wq1.0/state + enabled + +To demonstrate that the following steps work as expected, these +commands can be used to enable debug output:: + + # echo -n 'module iaa_crypto +p' > /sys/kernel/debug/dynamic_debug/control + # echo -n 'module idxd +p' > /sys/kernel/debug/dynamic_debug/control + +Use the following commands to enable zswap:: + + # echo 0 > /sys/module/zswap/parameters/enabled + # echo 50 > /sys/module/zswap/parameters/max_pool_percent + # echo deflate-iaa > /sys/module/zswap/parameters/compressor + # echo zsmalloc > /sys/module/zswap/parameters/zpool + # echo 1 > /sys/module/zswap/parameters/enabled + # echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled + # echo 100 > /proc/sys/vm/swappiness + # echo never > /sys/kernel/mm/transparent_hugepage/enabled + # echo 1 > /proc/sys/vm/overcommit_memory + +Now you can now run the zswap workload you want to measure. For +example, using the memory_memadvise code below, the following command +will swap in and out 100 pages:: + + ./memory_madvise 100 + + Allocating 100 pages to swap in/out + Swapping out 100 pages + Swapping in 100 pages + Swapped out and in 100 pages + +You should see something like the following in the dmesg output:: + + [ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096 + [ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192 + [ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568 + [ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + ... + +Now that basic functionality has been demonstrated, the defaults can +be erased and replaced with a different configuration. To do that, +first disable zswap:: + + # echo lzo > /sys/module/zswap/parameters/compressor + # swapoff -a + # echo 0 > /sys/module/zswap/parameters/accept_threshold_percent + # echo 0 > /sys/module/zswap/parameters/max_pool_percent + # echo 0 > /sys/module/zswap/parameters/enabled + # echo 0 > /sys/module/zswap/parameters/enabled + +Then run the :ref:`iaa_disable_script` in the 'Use Cases' section +below to disable the default configuration. + +Finally turn swap back on:: + + # swapon -a + +Following all that the IAA device(s) can now be re-configured and +enabled as desired for further testing. Below is one example. + +The zswap test expects the work queues for each IAA device on the +system to be configured properly as a kernel workqueue with a +workqueue driver_name of "crypto". + +The below script automatically does that:: + + #!/bin/bash + + echo "IAA devices:" + lspci -d:0cfe + echo "# IAA devices:" + lspci -d:0cfe | wc -l + + # + # count iaa instances + # + iaa_dev_id="0cfe" + num_iaa=$(lspci -d:${iaa_dev_id} | wc -l) + echo "Found ${num_iaa} IAA instances" + + # + # disable iaa wqs and devices + # + echo "Disable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo disable wq iax${i}/wq${i}.0 + accel-config disable-wq iax${i}/wq${i}.0 + echo disable iaa iax${i} + accel-config disable-device iax${i} + done + + echo "End Disable IAA" + + echo "Reload iaa_crypto module" + + rmmod iaa_crypto + modprobe iaa_crypto + + echo "End Reload iaa_crypto module" + + # + # configure iaa wqs and devices + # + echo "Configure IAA" + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + accel-config config-wq --group-id=0 --mode=dedicated --wq-size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver-name="crypto" iax${i}/wq${i}.0 + accel-config config-engine iax${i}/engine${i}.0 --group-id=0 + done + + echo "End Configure IAA" + + # + # enable iaa wqs and devices + # + echo "Enable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo enable iaa iax${i} + accel-config enable-device iax${i} + echo enable wq iax${i}/wq${i}.0 + accel-config enable-wq iax${i}/wq${i}.0 + done + + echo "End Enable IAA" + +When the workqueues are bound to the iaa_crypto driver, you should +see something similar to the following in dmesg output if you've +enabled debug output (echo -n 'module iaa_crypto +p' > +/sys/kernel/debug/dynamic_debug/control):: + + [ 60.752344] idxd 0000:f6:02.0: add_iaa_wq: added wq 000000004068d14d to iaa 00000000c9585ba2, n_wq 1 + [ 60.752346] iaa_crypto: rebalance_wq_table: nr_nodes=2, nr_cpus 160, nr_iaa 8, cpus_per_iaa 20 + [ 60.752347] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752349] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752350] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752352] iaa_crypto: rebalance_wq_table: assigned wq for cpu=0, node=0 = wq 00000000c8bb4452 + [ 60.752354] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752355] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752356] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752358] iaa_crypto: rebalance_wq_table: assigned wq for cpu=1, node=0 = wq 00000000c8bb4452 + [ 60.752359] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752360] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752361] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752362] iaa_crypto: rebalance_wq_table: assigned wq for cpu=2, node=0 = wq 00000000c8bb4452 + [ 60.752364] iaa_crypto: rebalance_wq_table: iaa=0 + . + . + . + +Once the workqueues and devices have been enabled, the IAA crypto +algorithms are enabled and available. When the IAA crypto algorithms +have been successfully enabled, you should see the following dmesg +output:: + + [ 64.893759] iaa_crypto: iaa_crypto_enable: iaa_crypto now ENABLED + +Now run the following zswap-specific setup commands to have zswap use +the 'fixed' compression mode:: + + echo 0 > /sys/module/zswap/parameters/enabled + echo 50 > /sys/module/zswap/parameters/max_pool_percent + echo deflate-iaa > /sys/module/zswap/parameters/compressor + echo zsmalloc > /sys/module/zswap/parameters/zpool + echo 1 > /sys/module/zswap/parameters/enabled + echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled + + echo 100 > /proc/sys/vm/swappiness + echo never > /sys/kernel/mm/transparent_hugepage/enabled + echo 1 > /proc/sys/vm/overcommit_memory + +Finally, you can now run the zswap workload you want to measure. For +example, using the code below, the following command will swap in and +out 100 pages:: + + ./memory_madvise 100 + + Allocating 100 pages to swap in/out + Swapping out 100 pages + Swapping in 100 pages + Swapped out and in 100 pages + +You should see something like the following in the dmesg output if +you've enabled debug output (echo -n 'module iaa_crypto +p' > +/sys/kernel/debug/dynamic_debug/control):: + + [ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096 + [ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192 + [ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568 + [ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + [ 409.203227] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228 + [ 409.203235] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21ee3dc000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096 + [ 409.203239] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21ee3dc000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + [ 409.203254] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228 + [ 409.203256] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21f1551000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096 + [ 409.203257] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21f1551000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + +In order to unregister the IAA crypto algorithms, and register new +ones using different parameters, any users of the current algorithm +should be stopped and the IAA workqueues and devices disabled. + +In the case of zswap, remove the IAA crypto algorithm as the +compressor and turn off swap (to remove all references to +iaa_crypto):: + + echo lzo > /sys/module/zswap/parameters/compressor + swapoff -a + + echo 0 > /sys/module/zswap/parameters/accept_threshold_percent + echo 0 > /sys/module/zswap/parameters/max_pool_percent + echo 0 > /sys/module/zswap/parameters/enabled + +Once zswap is disabled and no longer using iaa_crypto, the IAA wqs and +devices can be disabled. + +.. _iaa_disable_script: + +IAA disable script +------------------ + +The below script automatically does that:: + + #!/bin/bash + + echo "IAA devices:" + lspci -d:0cfe + echo "# IAA devices:" + lspci -d:0cfe | wc -l + + # + # count iaa instances + # + iaa_dev_id="0cfe" + num_iaa=$(lspci -d:${iaa_dev_id} | wc -l) + echo "Found ${num_iaa} IAA instances" + + # + # disable iaa wqs and devices + # + echo "Disable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo disable wq iax${i}/wq${i}.0 + accel-config disable-wq iax${i}/wq${i}.0 + echo disable iaa iax${i} + accel-config disable-device iax${i} + done + + echo "End Disable IAA" + +Finally, at this point the iaa_crypto module can be removed, which +will unregister the current IAA crypto algorithms:: + + rmmod iaa_crypto + + +memory_madvise.c (gcc -o memory_memadvise memory_madvise.c):: + + #include + #include + #include + #include + #include + #include + + #ifndef MADV_PAGEOUT + #define MADV_PAGEOUT 21 /* force pages out immediately */ + #endif + + #define PG_SZ 4096 + + int main(int argc, char **argv) + { + int i, nr_pages = 1; + int64_t *dump_ptr; + char *addr, *a; + int loop = 1; + + if (argc > 1) + nr_pages = atoi(argv[1]); + + printf("Allocating %d pages to swap in/out\n", nr_pages); + + /* allocate pages */ + addr = mmap(NULL, nr_pages * PG_SZ, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *addr = 1; + + /* initialize data in page to all '*' chars */ + memset(addr, '*', nr_pages * PG_SZ); + + printf("Swapping out %d pages\n", nr_pages); + + /* Tell kernel to swap it out */ + madvise(addr, nr_pages * PG_SZ, MADV_PAGEOUT); + + while (loop > 0) { + /* Wait for swap out to finish */ + sleep(5); + + a = addr; + + printf("Swapping in %d pages\n", nr_pages); + + /* Access the page ... this will swap it back in again */ + for (i = 0; i < nr_pages; i++) { + if (a[0] != '*') { + printf("Bad data from decompress!!!!!\n"); + + dump_ptr = (int64_t *)a; + for (int j = 0; j < 100; j++) { + printf(" page %d data: %#llx\n", i, *dump_ptr); + dump_ptr++; + } + } + + a += PG_SZ; + } + + loop --; + } + + printf("Swapped out and in %d pages\n", nr_pages); + +Appendix +======== + +.. _iaa_sysfs_config: + +IAA sysfs config interface +-------------------------- + +Below is a description of the IAA sysfs interface, which as mentioned +in the main document, should only be used if you know exactly what you +are doing. Even then, there's no compelling reason to use it directly +since accel-config can do everything the sysfs interface can and in +fact accel-config is based on it under the covers. + +The 'IAA config path' is /sys/bus/dsa/devices and contains +subdirectories representing each IAA device, workqueue, engine, and +group. Note that in the sysfs interface, the IAA devices are actually +named using iax e.g. iax1, iax3, etc. (Note that IAA devices are the +odd-numbered devices; the even-numbered devices are DSA devices and +can be ignored for IAA). + +The 'IAA device bind path' is /sys/bus/dsa/drivers/idxd/bind and is +the file that is written to enable an IAA device. + +The 'IAA workqueue bind path' is /sys/bus/dsa/drivers/crypto/bind and +is the file that is written to enable an IAA workqueue. + +Similarly /sys/bus/dsa/drivers/idxd/unbind and +/sys/bus/dsa/drivers/crypto/unbind are used to disable IAA devices and +workqueues. + +The basic sequence of commands needed to set up the IAA devices and +workqueues is: + +For each device:: + 1) Disable any workqueues enabled on the device. For example to + disable workques 0 and 1 on IAA device 3:: + + # echo wq3.0 > /sys/bus/dsa/drivers/crypto/unbind + # echo wq3.1 > /sys/bus/dsa/drivers/crypto/unbind + + 2) Disable the device. For example to disable IAA device 3:: + + # echo iax3 > /sys/bus/dsa/drivers/idxd/unbind + + 3) configure the desired workqueues. For example, to configure + workqueue 3 on IAA device 3:: + + # echo dedicated > /sys/bus/dsa/devices/iax3/wq3.3/mode + # echo 128 > /sys/bus/dsa/devices/iax3/wq3.3/size + # echo 0 > /sys/bus/dsa/devices/iax3/wq3.3/group_id + # echo 10 > /sys/bus/dsa/devices/iax3/wq3.3/priority + # echo "kernel" > /sys/bus/dsa/devices/iax3/wq3.3/type + # echo "iaa_crypto" > /sys/bus/dsa/devices/iax3/wq3.3/name + # echo "crypto" > /sys/bus/dsa/devices/iax3/wq3.3/driver_name + + 4) Enable the device. For example to enable IAA device 3:: + + # echo iax3 > /sys/bus/dsa/drivers/idxd/bind + + 5) Enable the desired workqueues on the device. For example to + enable workques 0 and 1 on IAA device 3:: + + # echo wq3.0 > /sys/bus/dsa/drivers/crypto/bind + # echo wq3.1 > /sys/bus/dsa/drivers/crypto/bind diff --git a/Documentation/driver-api/crypto/iaa/index.rst b/Documentation/driver-api/crypto/iaa/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..aa6837e272643fcba85119b2bee97d1914cf990a --- /dev/null +++ b/Documentation/driver-api/crypto/iaa/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================= +IAA (Intel Analytics Accelerator) +================================= + +IAA provides hardware compression and decompression via the crypto +API. + +.. toctree:: + :maxdepth: 1 + + iaa-crypto + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/crypto/index.rst b/Documentation/driver-api/crypto/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb9709b98beaabd6c4f57082a33e9623006b7d6f --- /dev/null +++ b/Documentation/driver-api/crypto/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============== +Crypto Drivers +============== + +Documentation for crypto drivers that may need more involved setup and +configuration. + +.. toctree:: + :maxdepth: 1 + + iaa/index + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 1e16a40da3baad60577750d667fe8450e3f845e0..f0f8f521f65bcf70959f275b1a192c8a26b55420 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -114,6 +114,7 @@ available subsections can be seen below. zorro hte/index wmi + crypto/index .. only:: subproject and html diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index f200d78744952825790e105f8903c44d1601e02e..c293f8e37468cddacafb5d3edc615134408d6b3b 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -75,7 +75,7 @@ Here are the main features of EROFS: - Support merging tail-end data into a special inode as fragments. - - Support large folios for uncompressed files. + - Support large folios to make use of THPs (Transparent Hugepages); - Support direct I/O on uncompressed files to avoid double caching for loop devices; @@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs): - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git +For more information, please also refer to the documentation site: + +- https://erofs.docs.kernel.org + Bugs and patches are welcome, please kindly help us and send to the following linux-erofs mailing list: @@ -199,7 +203,7 @@ may not. All metadatas can be now observed in two different spaces (views): | | |__________________| 64 bytes - Xattrs, extents, data inline are followed by the corresponding inode with + Xattrs, extents, data inline are placed after the corresponding inode with proper alignment, and they could be optional for different data mappings. _currently_ total 5 data layouts are supported: diff --git a/Documentation/filesystems/fuse-io.rst b/Documentation/filesystems/fuse-io.rst index 255a368fe534b4582c9be673523330e962803123..6464de4266ad504f8bdc89c834230f3c9f5219dc 100644 --- a/Documentation/filesystems/fuse-io.rst +++ b/Documentation/filesystems/fuse-io.rst @@ -15,7 +15,8 @@ The direct-io mode can be selected with the FOPEN_DIRECT_IO flag in the FUSE_OPEN reply. In direct-io mode the page cache is completely bypassed for reads and writes. -No read-ahead takes place. Shared mmap is disabled. +No read-ahead takes place. Shared mmap is disabled by default. To allow shared +mmap, the FUSE_DIRECT_IO_ALLOW_MMAP flag may be enabled in the FUSE_INIT reply. In cached mode reads may be satisfied from the page cache, and data may be read-ahead by the kernel to fill the cache. The cache is always kept consistent diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 2b59cff8be1798cf3ecb947cf3203ef1d3300ad2..6652b658ee7755e4f5610283e09ce32075dc6ead 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -528,9 +528,9 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. does not take into account swapped out page of underlying shmem objects. "Locked" indicates whether the mapping is locked in memory or not. -"THPeligible" indicates whether the mapping is eligible for allocating THP -pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise. -It just shows the current status. +"THPeligible" indicates whether the mapping is eligible for allocating +naturally aligned THP pages of any currently enabled size. 1 if true, 0 +otherwise. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter diff --git a/Documentation/gpu/hydcu-fixup-header.rst b/Documentation/gpu/hydcu-fixup-header.rst new file mode 100644 index 0000000000000000000000000000000000000000..5dca3ff3a137d4d19d193736de954ecfd3053f04 --- /dev/null +++ b/Documentation/gpu/hydcu-fixup-header.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= + drm/hygon/hydcu-fixup-header hydcu-fixup-header driver +========================= + +The drm/hygon/hydcu-fixup-header driver supports all HYGON DCUs. + +General description +====================== + +The drm/hygon/hydcu-fixup-header driver adds flags NO_BUS_RESET to hydcu +device to disable vfio pci reset, as dcu is not support now. diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index 9a607059ea11cfc3765a11c98d46f11021690215..93c9239b9ebe23146c417aae0a725856c7fca230 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -117,7 +117,7 @@ pages: - map/unmap of a PMD entry for the whole THP increment/decrement folio->_entire_mapcount and also increment/decrement - folio->_nr_pages_mapped by COMPOUND_MAPPED when _entire_mapcount + folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes from -1 to 0 or 0 to -1. - map/unmap of individual pages with PTE entry increment/decrement @@ -156,7 +156,7 @@ Partial unmap and deferred_split_folio() Unmapping part of THP (with munmap() or other way) is not going to free memory immediately. Instead, we detect that a subpage of THP is not in use -in page_remove_rmap() and queue the THP for splitting if memory pressure +in folio_remove_rmap_*() and queue the THP for splitting if memory pressure comes. Splitting will free up unused subpages. Splitting the page right away is not an option due to locking context in diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst index 67f1338440a50ab942160f6489964c41bc7c3b9e..b6a07a26b10d58ca2e0b24bcaa0739159033dbb0 100644 --- a/Documentation/mm/unevictable-lru.rst +++ b/Documentation/mm/unevictable-lru.rst @@ -486,7 +486,7 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages. Before the unevictable/mlock changes, mlocking did not mark the pages in any way, so unmapping them required no processing. -For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls +For each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). @@ -511,7 +511,7 @@ userspace; truncation even unmaps and deletes any private anonymous pages which had been Copied-On-Write from the file pages now being truncated. Mlocked pages can be munlocked and deleted in this way: like with munmap(), -for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls +for each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst index 55b65f607a640f4acf3f0edc52b489917a73a3ea..b069d34d7f5cf1436c909ad31b4f22468f932074 100644 --- a/Documentation/networking/sfp-phylink.rst +++ b/Documentation/networking/sfp-phylink.rst @@ -200,6 +200,13 @@ this documentation. when the in-band link state changes - otherwise the link will never come up. + The :c:func:`mac_get_caps` method is optional, and if provided should + return the phylink MAC capabilities that are supported for the passed + ``interface`` mode. In general, there is no need to implement this method. + Phylink will use these capabilities in combination with permissible + capabilities for ``interface`` to determine the allowable ethtool link + modes. + The :c:func:`validate` method should mask the supplied supported mask, and ``state->advertising`` with the supported ethtool link modes. These are the new ethtool link modes, so bitmask operations must be diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 41ed2ceafc92ee1b09148913e50c762c16542663..329b00ba40f3949a6e66f6a897f1bb59c5ed49c4 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -122,9 +122,15 @@ This is tunable via procfs:: Larger slice values will reduce transfer overheads, while smaller values allow for more fine-grained consumption. +Sometimes users might want a group to burst without accumulation. This is +tunable via:: + /proc/sys/kernel/sched_cfs_bw_burst_onset_percent (default=0) + +Up to 100% runtime of cpu.cfs_burst_us might be given on setting bandwidth. + Statistics ---------- -A group's bandwidth statistics are exported via 5 fields in cpu.stat. +A group's bandwidth statistics are exported via 6 fields in cpu.stat. cpu.stat: @@ -132,6 +138,7 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- current_bw: Current runtime in global pool. - nr_bursts: Number of periods burst occurs. - burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used above quota in respective periods. diff --git a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst index f1e9ab18206c33c5d888ed6ff8fad5bb1b845533..472761938682c0703cf18b862f82a701cd1beb3a 100644 --- a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst +++ b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst @@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC:: | Devices | +---------+ +高级扩展IRQ模型 +=============== + +在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC, +CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接 +送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC +统一收集,再直接到达CPUINTC:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI相关的定义 ============== diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 4ea5b837399ad1cb93f837a0f090bc87ed1f3089..63f7ed8d0796af73b5347d97ca3cafa9f3b9c83f 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -82,8 +82,9 @@ Code Seq# Include File Comments 0x10 00-0F drivers/char/s390/vmcp.h 0x10 10-1F arch/s390/include/uapi/sclp_ctl.h 0x10 20-2F arch/s390/include/uapi/asm/hypfs.h -0x12 all linux/fs.h +0x12 all linux/fs.h BLK* ioctls linux/blkpg.h +0x15 all linux/fs.h FS_IOC_* ioctls 0x1b all InfiniBand Subsystem 0x20 all drivers/cdrom/cm206.h diff --git a/Documentation/virt/coco/csv-guest.rst b/Documentation/virt/coco/csv-guest.rst new file mode 100644 index 0000000000000000000000000000000000000000..23cba2a5fd7c093bea25d6c1cc0f1ce41846d015 --- /dev/null +++ b/Documentation/virt/coco/csv-guest.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +CSV Guest API Documentation +=================================================================== + +1. General description +====================== + +The CSV guest driver exposes IOCTL interfaces via the /dev/csv-guest misc +device to allow userspace to get certain CSV guest-specific details. + +2. API description +================== + +In this section, for each supported IOCTL, the following information is +provided along with a generic description. + +:Input parameters: Parameters passed to the IOCTL and related details. +:Output: Details about output data and return value (with details about + the non common error values). + +2.1 CSV_CMD_GET_REPORT +----------------------- + +:Input parameters: struct csv_report_req +:Output: Upon successful execution, CSV_REPORT data is copied to + csv_report_req.report_data and return 0. Return -EINVAL for invalid + operands, -EIO on VMMCALL failure or standard error number on other + common failures. + +The CSV_CMD_GET_REPORT IOCTL can be used by the attestation software to get +the CSV_REPORT from the CSV module using VMMCALL[KVM_HC_VM_ATTESTATION]. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 21a7578142a18b4ad537acbd654ba510dd16fc9f..edc682a94ca4f8a727c5cdecab763d67883de0f9 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -416,6 +416,13 @@ Reads the general purpose registers from the vcpu. __u64 pc; }; + /* LoongArch */ + struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + unsigned long gpr[32]; + unsigned long pc; + }; + 4.12 KVM_SET_REGS ----------------- @@ -506,7 +513,7 @@ translation mode. ------------------ :Capability: basic -:Architectures: x86, ppc, mips, riscv +:Architectures: x86, ppc, mips, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_interrupt (in) :Returns: 0 on success, negative on failure. @@ -592,6 +599,14 @@ b) KVM_INTERRUPT_UNSET This is an asynchronous vcpu ioctl and can be invoked from any thread. +LOONGARCH: +^^^^^^^^^^ + +Queues an external interrupt to be injected into the virtual CPU. A negative +interrupt number dequeues the interrupt. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + 4.17 KVM_DEBUG_GUEST -------------------- @@ -737,7 +752,7 @@ signal mask. ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (out) :Returns: 0 on success, -1 on error @@ -746,7 +761,7 @@ Reads the floating point state from the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -761,12 +776,21 @@ Reads the floating point state from the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.23 KVM_SET_FPU ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (in) :Returns: 0 on success, -1 on error @@ -775,7 +799,7 @@ Writes the floating point state to the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -790,6 +814,15 @@ Writes the floating point state to the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.24 KVM_CREATE_IRQCHIP ----------------------- @@ -1387,7 +1420,7 @@ documentation when it pops into existence). ------------------- :Capability: KVM_CAP_ENABLE_CAP -:Architectures: mips, ppc, s390, x86 +:Architectures: mips, ppc, s390, x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_enable_cap (in) :Returns: 0 on success; -1 on error @@ -1442,7 +1475,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1460,7 +1493,7 @@ Possible values are: ========================== =============================================== KVM_MP_STATE_RUNNABLE the vcpu is currently running - [x86,arm64,riscv] + [x86,arm64,riscv,loongarch] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1516,11 +1549,14 @@ For riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.39 KVM_SET_MP_STATE --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1538,6 +1574,9 @@ For arm64/riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.40 KVM_SET_IDENTITY_MAP_ADDR ------------------------------ @@ -2841,6 +2880,19 @@ Following are the RISC-V D-extension registers: 0x8020 0000 0600 0020 fcsr Floating point control and status register ======================= ========= ============================================= +LoongArch registers are mapped using the lower 32 bits. The upper 16 bits of +that is the register group type. + +LoongArch csr registers are used to control guest cpu or get status of guest +cpu, and they have the following id bit patterns:: + + 0x9030 0000 0001 00 (64-bit) + +LoongArch KVM control registers are used to implement some new defined functions +such as set vcpu counter or reset vcpu, and they have the following id bit patterns:: + + 0x9030 0000 0002 + 4.69 KVM_GET_ONE_REG -------------------- diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index ad13ec55ddfe5110ab8922a5aafe1732951209de..9ca5a45c2140a9f3dfc1dc58b28a871f4c3b844f 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -14,6 +14,7 @@ KVM s390/index ppc-pv x86/index + loongarch/index locking vcpu-requests diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst new file mode 100644 index 0000000000000000000000000000000000000000..1679e48d67d28c4ee30cdc9120d74ae2f54cda57 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/hypercalls.rst @@ -0,0 +1,79 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +The LoongArch paravirtual interface +=================================== + +KVM hypercalls use the HVCL instruction with code 0x100, and the hypercall +number is put in a0 and up to five arguments may be placed in a1-a5, the +return value is placed in v0 (alias with a0). + +The code for that interface can be found in arch/loongarch/kvm/* + +Querying for existence +====================== + +To find out if we're running on KVM or not, cpucfg can be used with index +CPUCFG_KVM_BASE (0x40000000), cpucfg range between 0x40000000 - 0x400000FF +is marked as a specially reserved range. All existing and future processors +will not implement any features in this range. + +When Linux is running on KVM, cpucfg with index CPUCFG_KVM_BASE (0x40000000) +returns magic string "KVM\0" + +Once you determined you're running under a PV capable KVM, you can now use +hypercalls as described below. + +KVM hypercall ABI +================= + +Hypercall ABI on KVM is simple, only one scratch register a0 (v0) and at most +five generic registers used as input parameter. FP register and vector register +is not used for input register and should not be modified during hypercall. +Hypercall function can be inlined since there is only one scratch register. + +The parameters are as follows: + + ======== ================ ================ + Register IN OUT + ======== ================ ================ + a0 function number Return code + a1 1st parameter - + a2 2nd parameter - + a3 3rd parameter - + a4 4th parameter - + a5 5th parameter - + ======== ================ ================ + +Return codes can be as follows: + + ==== ========================= + Code Meaning + ==== ========================= + 0 Success + -1 Hypercall not implemented + -2 Hypercall parameter error + ==== ========================= + +KVM Hypercalls Documentation +============================ + +The template for each hypercall is: +1. Hypercall name +2. Purpose + +1. KVM_HCALL_FUNC_PV_IPI +------------------------ + +:Purpose: Send IPIs to multiple vCPUs. + +- a0: KVM_HCALL_FUNC_PV_IPI +- a1: lower part of the bitmap of destination physical CPUIDs +- a2: higher part of the bitmap of destination physical CPUIDs +- a3: the lowest physical CPUID in bitmap + +The hypercall lets a guest send multicast IPIs, with at most 128 +destinations per hypercall. The destinations are represented by a bitmap +contained in the first two arguments (a1 and a2). Bit 0 of a1 corresponds +to the physical CPUID in the third argument (a3), bit 1 corresponds to the +physical ID a3+1, and so on. diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..83387b4c53455033acaac2ddceb404d898ccaa39 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/index.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +KVM for LoongArch systems +========================= + +.. toctree:: + :maxdepth: 2 + + hypercalls.rst diff --git a/Documentation/vm/memcg_zombie_reaper.rst b/Documentation/vm/memcg_zombie_reaper.rst new file mode 100644 index 0000000000000000000000000000000000000000..9542ec11d9d3ed739bfdff6285d075bbcf4ed7f3 --- /dev/null +++ b/Documentation/vm/memcg_zombie_reaper.rst @@ -0,0 +1,41 @@ +.. _memcg_zombie_reaper: + +=================== +Memcg Zombie Reaper +=================== + +After memcg was deleted, page caches still reference to this memcg +causing large number of dead (zombie) memcgs in the system. Then it +slows down access to "/sys/fs/cgroup/cpu/memory.stat", etc due to +tons of iterations, further causing various latencies. "zombie memcgs +reaper" is a tool to reclaim these dead memcgs. It has two modes: + +"Background kthread reaper" mode +-------------------------------- +In this mode, a kthread reaper keeps reclaiming at background, +some knobs are provided to control the reaper scan behaviour: + +- /sys/kernel/mm/memcg_reaper/scan_interval + +the scan period in second. Default is 5s. + +- /sys/kernel/mm/memcg_reaper/pages_scan + +the scan rate of pages per scan. Default 1310720(5GiB for 4KiB page). + +- /sys/kernel/mm/memcg_reaper/verbose + +output some zombie memcg information for debug purpose. Default off. + +- /sys/kernel/mm/memcg_reaper/reap_background + +on/off switch. Default "0" means off. Write "1" to switch it on. + +"One-shot trigger" mode +----------------------- +In this mode, there is no guarantee to finish the reclaim, you may need +to check and launch multiple rounds as needed. + +- /sys/kernel/mm/memcg_reaper/reap + +users write "1" to trigger one round of zombie memcg reaping. diff --git a/MAINTAINERS b/MAINTAINERS index ae4c0cec50736048c8ba7fbd378c5c4bf8c74759..f6c91f5b2ad2ecfc130e813b2b9a78a64b86d813 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7741,6 +7741,7 @@ R: Yue Hu R: Jeffle Xu L: linux-erofs@lists.ozlabs.org S: Maintained +W: https://erofs.docs.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git F: Documentation/ABI/testing/sysfs-fs-erofs F: Documentation/filesystems/erofs.rst @@ -8162,7 +8163,6 @@ F: lib/memcpy_kunit.c F: lib/strcat_kunit.c F: lib/strscpy_kunit.c F: lib/test_fortify/* -F: scripts/test_fortify.sh K: \b__NO_FORTIFY\b FPGA DFL DRIVERS @@ -10571,6 +10571,13 @@ S: Supported Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ F: drivers/dma/ioat* +INTEL IAA CRYPTO DRIVER +M: Tom Zanussi +L: linux-crypto@vger.kernel.org +S: Supported +F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst +F: drivers/crypto/intel/iaa/* + INTEL IDLE DRIVER M: Jacob Pan M: Len Brown @@ -10595,6 +10602,7 @@ R: Tony Luck S: Maintained F: drivers/platform/x86/intel/ifs F: include/trace/events/intel_ifs.h +F: tools/testing/selftests/drivers/platform/x86/intel/ifs/ INTEL INTEGRATED SENSOR HUB DRIVER M: Srinivas Pandruvada @@ -11530,6 +11538,19 @@ F: include/kvm/arm_* F: tools/testing/selftests/kvm/*/aarch64/ F: tools/testing/selftests/kvm/aarch64/ +KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch) +M: Tianrui Zhao +M: Bibo Mao +M: Huacai Chen +L: kvm@vger.kernel.org +L: loongarch@lists.linux.dev +S: Maintained +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: Documentation/virt/kvm/loongarch/ +F: arch/loongarch/include/asm/kvm* +F: arch/loongarch/include/uapi/asm/kvm* +F: arch/loongarch/kvm/ + KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) M: Huacai Chen L: linux-mips@vger.kernel.org @@ -12392,6 +12413,12 @@ S: Maintained F: Documentation/devicetree/bindings/hwinfo/loongson,ls2k-chipid.yaml F: drivers/soc/loongson/loongson2_guts.c +LOONGSON SECURITY MODULE DRIVER +M: Qunqin Zhao +L: loongarch@lists.linux.dev +S: Maintained +F: Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml + LOONGSON-2 SOC SERIES PM DRIVER M: Yinbo Zhu L: linux-pm@vger.kernel.org @@ -18070,6 +18097,8 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: fs/resctrl/ +F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ READ-COPY UPDATE (RCU) @@ -22630,6 +22659,12 @@ L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/pci/mlx5/ +VFIO NVIDIA GRACE GPU DRIVER +M: Ankit Agrawal +L: kvm@vger.kernel.org +S: Supported +F: drivers/vfio/pci/nvgrace-gpu/ + VFIO PCI DEVICE SPECIFIC DRIVERS R: Jason Gunthorpe R: Yishai Hadas @@ -22653,6 +22688,14 @@ L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/platform/ +VFIO QAT PCI DRIVER +M: Xin Zeng +M: Giovanni Cabiddu +L: kvm@vger.kernel.org +L: qat-linux@intel.com +S: Supported +F: drivers/vfio/pci/qat/ + VGA_SWITCHEROO R: Lukas Wunner S: Maintained @@ -23820,6 +23863,12 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* +YUNSILICON XSC DRIVERS +M: Weihonggang +S: Maintained +F: drivers/infiniband/hw/xsc +F: drivers/net/ethernet/yunsilicon/xsc + Z3FOLD COMPRESSED PAGE ALLOCATOR M: Vitaly Wool R: Miaohe Lin @@ -23873,6 +23922,24 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/zhaoxin.c +ZHAOXIN PMU UNCORE SUPPORT +M: Leoliu-oc +S: Maintained +F: arch/x86/events/zhaoxin/core.c +F: arch/x86/events/zhaoxin/uncore.c +F: arch/x86/events/zhaoxin/uncore.h + +ZHAOXIN TEMPERATURE MONITORING DRIVERS +M: Leoliu-oc +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/zhaoxin-cputemp.c + +ZHAOXIN ZXPAUSE INSTRUCTION SUPPORT +M: LeoLiu-oc +S: Maintained +F: arch/x86/kernel/cpu/zxpause.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/anolis/.gitignore b/anolis/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9b1960e711fc1719053c5908bb5ff3cc321789e5 --- /dev/null +++ b/anolis/.gitignore @@ -0,0 +1 @@ +output/ \ No newline at end of file diff --git a/anolis/Makefile b/anolis/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..bcd2c8039879728fd7f254678ca0dee8b9f0186e --- /dev/null +++ b/anolis/Makefile @@ -0,0 +1,93 @@ +include Makefile.variables + +all: help examples + +dist-check: + @if [ "${DIST_BUILD_MODE}" == "official" ]; then \ + if [ "$(shell git describe --tags --exact-match HEAD 2>/dev/null)" != "${DIST_ANOLIS_VERSION}" ]; then \ + echo "Error: For official build, the tag ${DIST_ANOLIS_VERSION} should point to HEAD"; \ + exit 1; \ + fi \ + fi + @if [ "${DIST_BUILD_MODE}" == "diy" ] && [ -z "${DIST_DIY}" ]; then \ + echo "Error: For diy build, the variable DIST_DIY should not be empty"; \ + exit 1; \ + fi + +dist-genlog: + sh genlog.sh + +dist-genspec: dist-check + sh genspec.sh + +dist-genrpmtree: dist-check + sh genrpmtree.sh + +dist-rpms: dist-genrpmtree dist-check + sh buildpkg.sh + +DIST_CONFIG_TARGETS := dist-defconfig dist-debug-defconfig dist-configs dist-configs-update dist-configs-move dist-configs-import dist-configs-export dist-configs-modify dist-configs-check dist-configs-help + +$(DIST_CONFIG_TARGETS): + make -C configs/ $@ + +clean: + rm -rf $(DIST_OUTPUT) + +dist-version: + @echo $(DIST_ANOLIS_VERSION) + +examples: + @echo '' + @echo 'Build Examples:' + @echo '- *RECOMMEND* devel build with basic rpm packages' + @echo ' DIST_BUILD_MODE=devel DIST_BUILD_EXTRA=base make dist-rpms' + @echo '- *RECOMMEND* devel build with full rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-rpms' + @echo '- *RECOMMEND* nightly build with full rpm packages' + @echo ' DIST_BUILD_MODE=nightly make dist-rpms' + @echo '- diy build with basic rpm packages' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" DIST_BUILD_EXTRA=base make dist-rpms' + @echo '' + @echo 'Kernel Version Examples:' + @echo '- show the kernel version of devel mode' + @echo ' DIST_BUILD_MODE=devel make dist-version' + @echo '- show the kernel version of diy mode' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" make dist-version' + @echo '' + @echo 'Other Examples:' + @echo '- only generate rpm tree in devel mode, but do not build rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-genrpmtree' + @echo '- cleanup' + @echo ' make clean' + +help: + @echo 'For anolis release' + @echo '' + @echo 'RUN `make examples` for some examples' + @echo '--------------------------------' + @echo 'generic commands:' + @echo ' dist-genspec - generate kernel spec file through kernel.spec.template and changelog files' + @echo ' dist-genlog - generate changelogs' + @echo ' dist-genrpmtree - generate rpm tree' + @echo ' dist-rpms - build kernel rpm package, it will auto generated in $(DIST_SHORT_OUTPUT)' + @echo ' dist-version - show dist version' + @echo ' clean - cleanup output dir' + @echo ' examples - show some examples' + @echo '' + @echo '-------------------------------' + @echo 'the environment variables that could override:' + @echo ' DIST - the distribution suffix, eg: .an7, .an8, .an23' + @echo ' DIST_OUTPUT - the output directory, default: $(DIST_SHORT_OUTPUT)' + @echo ' DIST_BUILD_MODE - the build mode. optional: official/nightly/devel/diy' + @echo ' !!! NOTE: BE CAUTIOUS ABOUT USING official BUILD !!!' + @echo ' - official build. kernel version: $(DIST_KERNELVERSION)-$(DIST_OFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - nightly build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - devel build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), without srpm' + @echo ' - diy build. kernel version: $(DIST_KERNELVERSION)-$(DIST_DIY_PKGRELEASEVERION), with srpm' + @echo ' DIST_BUILD_NUMBER - the build number for unofficial build, eg: 1/2' + @echo ' DIST_DIY - the kernel version for diy build' + @echo ' DIST_BUILD_VARIANT & DIST_BUILD_EXTRA - see comments in buildpkg.sh' + @make -C configs/ dist-configs-help + +export \ No newline at end of file diff --git a/anolis/Makefile.variables b/anolis/Makefile.variables new file mode 100644 index 0000000000000000000000000000000000000000..f2598d1b0ce8b3ddca6b9eeaeef3f2fe51b3480c --- /dev/null +++ b/anolis/Makefile.variables @@ -0,0 +1,91 @@ +# the global environment variables, which will be passed to shell scripts +# all variables are start with DIST_, to avoid influences kernel build + +# the dist suffix, eg: an7, an8, an23 +DIST ?= .an23 + +# build mode: +# - official build, the kernel version looks like: 5.10.134-15.1_rc1, and also generate source rpm +# - nightly build, the kernel version looks like: 5.10.134-1.git.6235a991a61d, and also generate source rpm +# - devel build, same as nightly build, without source rpm +DIST_BUILD_MODE ?= devel + +# the package release version. +# eg: for ANCK 5.10-015.1, the major version is 15, the minor version is 1 +DIST_RELEASE_MAJOR_VERSION = 1 +DIST_RELEASE_MINOR_VERSION = + +# testing stage. +# eg: alpha, beta, rc +DIST_TESTING_STAGE = rc +DIST_TESTING_STAGE_MAJOR_VERSION = 1 +DIST_TESTING_STAGE_MINOR_VERSION = + +# special versions, eg: the pgo version +DIST_SPECIAL_VERSION_NAME = +DIST_SPECIAL_VERSION_MAJOR = +DIST_SPECIAL_VERSION_MINOR = + +# build number +DIST_BUILD_NUMBER ?= 1 + +# the kernel root +DIST_SRCROOT = $(shell realpath ..)/ +DIST_SOURCES = $(DIST_SRCROOT)anolis/ +DIST_RPM = $(DIST_SOURCES)rpm/ +DIST_CHANGELOG = $(DIST_SOURCES)changelog/ + +# the output directory +DIST_OUTPUT ?= $(DIST_SOURCES)output/ +DIST_RPMBUILDDIR_OUTPUT = ${DIST_OUTPUT}/rpmbuild +DIST_SHORT_OUTPUT=$(subst $(DIST_SRCROOT),,$(DIST_OUTPUT)) + +DIST_SPEC_TEMPLATE = kernel.spec.template +DIST_SPEC_FILE = kernel.spec + +# generate anolis kernel version + +# kernel version for offical build +DIST_RELEASE_VERSION = $(DIST_RELEASE_MAJOR_VERSION)$(if $(DIST_RELEASE_MINOR_VERSION),.$(DIST_RELEASE_MINOR_VERSION)) +DIST_SPECIAL_VERSION = $(if $(DIST_SPECIAL_VERSION_NAME),.$(DIST_SPECIAL_VERSION_NAME)$(if $(DIST_SPECIAL_VERSION_MAJOR),.$(DIST_SPECIAL_VERSION_MAJOR))$(if $(DIST_SPECIAL_VERSION_MINOR),.$(DIST_SPECIAL_VERSION_MINOR))) +DIST_TESTING_VERSION = $(if $(DIST_TESTING_STAGE),_$(DIST_TESTING_STAGE)$(if $(DIST_TESTING_STAGE_MAJOR_VERSION),$(DIST_TESTING_STAGE_MAJOR_VERSION))$(if $(DIST_TESTING_STAGE_MINOR_VERSION),.$(DIST_TESTING_STAGE_MINOR_VERSION))) +DIST_LINUXVERSION:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^VERSION\ =\ /{s///;p;q}') +DIST_LINUXKPATCHLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^PATCHLEVEL\ =\ /{s///;p;q}') +DIST_LINUXKSUBLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^SUBLEVEL\ =\ /{s///;p;q}') +DIST_KERNELVERSION = $(DIST_LINUXVERSION).$(DIST_LINUXKPATCHLEVEL).$(DIST_LINUXKSUBLEVEL) +DIST_OFFICIAL_PKGRELEASEVERION = $(DIST_RELEASE_VERSION)$(DIST_SPECIAL_VERSION)$(DIST_TESTING_VERSION) + +# kernel version for unoffical build +DIST_GIT_HEAD_SHORT_COMMIT_ID = $(shell git rev-parse --short HEAD) +DIST_GIT_HEAD_FULL_COMMIT_ID = $(shell git rev-parse HEAD) +DIST_UNOFFICIAL_PKGRELEASEVERION = ${DIST_BUILD_NUMBER}.git.$(DIST_GIT_HEAD_SHORT_COMMIT_ID) + +# kernel version for diy build +DIST_DIY_PKGRELEASEVERION = ${DIST_DIY}.diy + +# final kernel version +ifeq ("${DIST_BUILD_MODE}", "official") +DIST_PKGRELEASEVERION = $(DIST_OFFICIAL_PKGRELEASEVERION) +else ifeq ("${DIST_BUILD_MODE}", "diy") +DIST_PKGRELEASEVERION = $(DIST_DIY_PKGRELEASEVERION) +else +DIST_PKGRELEASEVERION = $(DIST_UNOFFICIAL_PKGRELEASEVERION) +endif +DIST_ANOLIS_VERSION = $(DIST_KERNELVERSION)-$(DIST_PKGRELEASEVERION) + +# the package id used for compress kernel tarball: +# for official build, we compress tarball from tag +# for unofficial build, we compress tarball from git HEAD +DIST_PKG_COMMIT_ID = $(if $(DIST_OFFICIAL_BUILD),$(DIST_ANOLIS_VERSION),$(DIST_GIT_HEAD_FULL_COMMIT_ID)) + +ifneq ("${ARCH}","") +DIST_ARCH ?= $(ARCH) +else +DIST_ARCH ?= $(shell uname -m) +endif +ifeq ($(DIST_ARCH),x86_64) + DIST_ARCH := x86 +endif +ifeq ($(DIST_ARCH),aarch64) + DIST_ARCH := arm64 +endif diff --git a/anolis/buildpkg.sh b/anolis/buildpkg.sh new file mode 100644 index 0000000000000000000000000000000000000000..34ff6a929f1de8d54b58b543457f9fb0f772bf2a --- /dev/null +++ b/anolis/buildpkg.sh @@ -0,0 +1,89 @@ +set -xe + +function do_rpmbuild() { + if [ "$DIST_BUILD_MODE" == "official" ] || \ + [ "$DIST_BUILD_MODE" == "nightly" ] || \ + [ "$DIST_BUILD_MODE" == "diy" ]; then + CMD="-ba" + else + CMD="-bb" + fi + + # Now we have: + # + variants: default, only-debug, with-debug + # + extras: base, with-debuginfo, full + # + modes: official, nightly, dev + #TODO: add with-gcov + # + # Matrix + # + # | BuildMode | KernelName | GenerateSrpm | + # |-----------|-----------------|--------------| + # | official | without sha id | Yes | + # | nightly | with git sha id | Yes | + # | devel | with git sha id | No | + # + # | Extra\Var | Default | Only-debug | With-debug | + # |-----------|----------|------------|------------| + # | Base | +default | -default | +default | + # | | -debug | +debug | +debug | + # | | +headers | + # |-----------|------------------------------------| + # | debuginfo | +debuginfo | + # |-----------|------------------------------------| + # | full | +tools +doc +perf | + # + # Note: pre-release mode will always be "full" and "with-debug" by default + + build_opts="--with headers --without bpftool --without signmodules" + + if [ "_${DIST_BUILD_VARIANT}" == "_only-debug" ]; then + build_opts="$build_opts --without default --with debug" + elif [ "_${DIST_BUILD_VARIANT}" == "_with-debug" ]; then + build_opts="$build_opts --with default --with debug" + else # assume default + build_opts="$build_opts --with default --without debug" + fi + + if [ "_${DIST_BUILD_EXTRA}" == "_debuginfo" ]; then + build_opts="$build_opts --with debuginfo --without tools --without doc --without perf" + elif [ "_${DIST_BUILD_EXTRA}" == "_base" ]; then + build_opts="$build_opts --without debuginfo --without tools --without doc --without perf" + else # assume full + build_opts="$build_opts --with debuginfo --with tools --with doc --with perf" + fi + + # launch a new shell to clear current environment variables passed by Makefile + rpmbuild \ + --define "%_smp_mflags -j$(nproc)" \ + --define "%packager " \ + --define "%_topdir ${DIST_RPMBUILDDIR_OUTPUT}" \ + ${build_opts} \ + ${CMD} ${DIST_RPMBUILDDIR_OUTPUT}/SPECS/kernel.spec \ + --target=$(uname -m) || exit 1 +} + +function output() { + if [ -z "$DIST_OFFICIAL_BUILD" ]; then + targetdir=${DIST_BUILD_NUMBER} + else + targetdir=${DIST_ANOLIS_VERSION} + fi + + mkdir -p ${DIST_OUTPUT}/${targetdir} + + cp ${DIST_RPMBUILDDIR_OUTPUT}/RPMS/$(uname -m)/*.rpm ${DIST_OUTPUT}/${targetdir}/ + + # copy srpm packages if and only if they exist. + if [ -f ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ]; then + cp ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ${DIST_OUTPUT}/${targetdir} + fi + + ls ${DIST_OUTPUT}/${targetdir}/*.rpm + + rpm_num=$(ls ${DIST_OUTPUT}/${targetdir}/*.rpm | wc -l) + echo "${rpm_num} rpm(s) copied." +} + +do_rpmbuild +output \ No newline at end of file diff --git a/anolis/changelog/000-changelog.base b/anolis/changelog/000-changelog.base new file mode 100644 index 0000000000000000000000000000000000000000..7bf7526d60c11b342db84dbb5667240420dd4686 --- /dev/null +++ b/anolis/changelog/000-changelog.base @@ -0,0 +1,2 @@ +* Fri Dec 15 2023 Qiao Ma [6.6.7-1_rc1%%DIST%%] +- anolis: bump kernel to 6.6.7 (Qiao Ma) \ No newline at end of file diff --git a/anolis/cmdline/arm64 b/anolis/cmdline/arm64 new file mode 100644 index 0000000000000000000000000000000000000000..0afaaf2e3a68bdfdbd02b0670542a5491226e53e --- /dev/null +++ b/anolis/cmdline/arm64 @@ -0,0 +1,4 @@ +cgroup.memory=nokmem +crashkernel=0M-2G:0M,2G-64G:256M,64G-:384M +iommu.passthrough=1 iommu.strict=0 +nospectre_bhb ssbd=force-off diff --git a/anolis/cmdline/loongarch64 b/anolis/cmdline/loongarch64 new file mode 100644 index 0000000000000000000000000000000000000000..a38a393148b9ca85fa15892dc4fa26b6d1b881a6 --- /dev/null +++ b/anolis/cmdline/loongarch64 @@ -0,0 +1,3 @@ +systemd.unified_cgroup_hierarchy=0 +cgroup.memory=nokmem +crashkernel=1024M diff --git a/anolis/cmdline/x86 b/anolis/cmdline/x86 new file mode 100644 index 0000000000000000000000000000000000000000..10623511d2c35acaa1508e7885fe21b0ebcada79 --- /dev/null +++ b/anolis/cmdline/x86 @@ -0,0 +1,2 @@ +cgroup.memory=nokmem +crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M diff --git a/anolis/configs/How-To-Modify-Kconfig.zh.md b/anolis/configs/How-To-Modify-Kconfig.zh.md new file mode 100644 index 0000000000000000000000000000000000000000..438bf93054436b4c6678f5e42a6b3b200712788d --- /dev/null +++ b/anolis/configs/How-To-Modify-Kconfig.zh.md @@ -0,0 +1,131 @@ +本文示例如何修改和新增一个 kconfig 的配置。 + +# 一、 总体方法 +总的来说,您需要以下几步: + +1. 进入到 `anolis/` 目录: + +`cd anolis/` + +2. 修改/新增 kconfig + +假设要在所有架构中将 CONFIG_foo 都只为y,使用该命令: + +`make dist-configs-modify C=CONFIG_foo all=y L=L1` + +如果只在 x86 架构中将其置为 y,而在其他架构中保持关闭,使用该命令: + +`make dist-configs-modify C=CONFIG_foo x86=y others=n L=L1` + +这个命令执行以下动作: + +a. 查找是否已存在现有的 CONFIG_CAN 的配置项,若有,则删除。 + +b. 根据传入的参数,重新生成 CONFIG_CAN 的配置项。 + +c. 根据新的配置关系,重新计算和刷新 kconfig 的依赖关系 + +在使用时,请注意以下几点: + +a. 使用 `C=CONFIG_foo` 的方式来传递 kconfig 名称,而非使用这样的方式: + +`make dist-configs-modify CONFIG_CAN x86=y others=n L=L1` + +这是由 make 命令的语法所限制的。 + +b. 在传递 kconfig 的配置信息时,请注意必须传递层级信息,即 `L=xx` + +3. 确认结果 + +kconfig 的依赖关系是相当复杂的,因此在对单个 kconfig 调整后,可能会出现依赖条件不满足而导致该 kconfig 实际并未开启的情况。 +因此,`make dist-configs-modify`命令会重新计算依赖关系,这可能导致: + +a. 生成一系列新的 kconfig。 + +这些都是由 CONFIG_foo 通过 `select` 或者 `depends on` 关系自动使能的。 +这类新生成的 kconfig,需要人工调整它们到对应的层级(使用`make dist-configs-move`命令)。 + +b. 对特定 kconfig 的修改并未生效。 + +假设 CONFIG_foo 依赖于 CONFIG_bar,而 CONFIG_bar 之前并未打开,那么在重新计算依赖关系后,CONFIG_foo 依然会处于 `not set` 的状态,甚至是 `invisible` 状态(即在最终结果中看不到关于 CONFIG_foo 的任何配置项)。 + +这种情况,需要先定位依赖的 CONFIG_bar,并递归使用 `make dist-configs-modify` 修改 CONFIG_bar 及其依赖的 kconfig,最后再对 CONFIG_foo 进行修改。 + +具体定位的方法,推荐如下: + +1. 生成最终的 .config 文件。 +`cd /path/to/cloud-kernel/; make anolis_defconfig` + +2. 执行 `make menuconfig` 命令 + +3. 在具体的 tui 界面中,搜索 CONFIG_foo,并通过搜索结果查看对应的依赖关系。 + +# 二、 示例 +我们以使能 `CONFIG_CAN` 为例。 +# 1. 修改 kconfig +``` +cd anolis/; +make dist-configs-modify C=CONFIG_CAN all=y L=L1 +``` +这里,我们将 `CONFIG_CAN` 在所有架构中都打开,且将其层级置为 L1。 + +# 2. 检查结果 + +在调整后,自动使能了一大堆kconfig,我们需要对这些新的 kconfig 调整层级。 +``` +$make dist-configs-modify C=CONFIG_CAN all=y L=L1 +make -C configs/ dist-configs-modify +make[1]: Entering directory '/cloud-kernel/anolis/configs' +remove old file: /cloud-kernel/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN +created new file: /cloud-kernel/anolis/configs/L1-RECOMMEND/default/CONFIG_CAN +refresh configs +collect all old configs... +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-generic-x86.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-generic-x86.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-debug-x86.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-debug-x86.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-generic-arm64.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-generic-arm64.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-debug-arm64.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-debug-arm64.config +split new configs... +replace old configs with new configs.... + +****************************************************************************** +There are some UNKNOWN level's new configs. + +CONFIG_CAN_8DEV_USB CONFIG_CAN_DEBUG_DEVICES CONFIG_CAN_GRCAN CONFIG_CAN_ISOTP CONFIG_CAN_MCBA_USB CONFIG_CAN_PHYTIUM CONFIG_CAN_UCAN +CONFIG_CAN_BCM CONFIG_CAN_DEV CONFIG_CAN_GS_USB CONFIG_CAN_J1939 CONFIG_CAN_MCP251X CONFIG_CAN_RAW CONFIG_CAN_VCAN +CONFIG_CAN_CALC_BITTIMING CONFIG_CAN_EMS_USB CONFIG_CAN_GW CONFIG_CAN_KVASER_PCIEFD CONFIG_CAN_MCP251XFD CONFIG_CAN_SJA1000 CONFIG_CAN_VXCAN +CONFIG_CAN_CC770 CONFIG_CAN_ESD_USB2 CONFIG_CAN_HI311X CONFIG_CAN_KVASER_USB CONFIG_CAN_PEAK_PCIEFD CONFIG_CAN_SLCAN CONFIG_CAN_XILINXCAN +CONFIG_CAN_C_CAN CONFIG_CAN_FLEXCAN CONFIG_CAN_IFI_CANFD CONFIG_CAN_M_CAN CONFIG_CAN_PEAK_USB CONFIG_CAN_SOFTING CONFIG_NET_EMATCH_CANID + +Need to classify above configs manually !!! +See: /cloud-kernel/anolis/configs/UNKNOWN +HINT: `make dist-configs-move` can help you. +eg: make dist-configs-move C=CONFIG_CAN* L=L2 + +****************************************************************************** + +The Final Configs After Refresh +default: CONFIG_CAN=y + +****************************************************************************** +make[1]: Leaving directory '/cloud-kernel/anolis/configs' +``` +可以看到,大量与 CONFIG_CAN 有关的 kconfig 被刷新出来了,但是 `CONFIG_CAN` 的结果是符合预期的。 + +这里,我们将这些自动生效的 kconfig,都放入 L2 层级中。 +``` +make dist-configs-move C=CONFIG_CAN* L=L2 +``` + +# 结束 +到这里为止,所以的步骤已完成,可以使用 `git add` 和 `git commit` 命令记录这些变更,并发起 PR 了。 + +# 附:`make dist-configs-move`参数说明 +`make dist-configs-move` 用于在不同的层级之间移动 kconfig。 +参数如下: +- OLD 可选,表示 kconfig 原来所在的层级。默认为 UNKNOWN +- C 必选,表示需要移动的 kconfig,可使用通配符,如 `C=CONFIG_CAN*` +- L 必选,表示新的层级。 \ No newline at end of file diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 0000000000000000000000000000000000000000..2925d2a06226468dafe9b55b42d8dfe0d1fbe6f1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM new file mode 100644 index 0000000000000000000000000000000000000000..d49f05a3cba1d7cb912ebd706ca500826155f782 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM @@ -0,0 +1 @@ +CONFIG_ARCH_PHYTIUM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 new file mode 100644 index 0000000000000000000000000000000000000000..e40393f9ae82681d5b960f9d34f8334a9e45f5ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 @@ -0,0 +1 @@ +CONFIG_ARM64=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES new file mode 100644 index 0000000000000000000000000000000000000000..517a9e44ba43eb96b4f6e7365ae24bf18d020e04 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES @@ -0,0 +1 @@ +# CONFIG_ARM64_16K_PAGES is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES new file mode 100644 index 0000000000000000000000000000000000000000..5df91df1aa0638b90b497208b39cdd67dd9b53e4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES @@ -0,0 +1 @@ +CONFIG_ARM64_4K_PAGES=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES new file mode 100644 index 0000000000000000000000000000000000000000..c63a3faadd975f0a945bf3d1af7abd4ef70d8f8b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES @@ -0,0 +1 @@ +# CONFIG_ARM64_64K_PAGES is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI new file mode 100644 index 0000000000000000000000000000000000000000..f2b5fedbf279090038498954ffdb14697c3dfa15 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI @@ -0,0 +1 @@ +# CONFIG_ARM64_BTI is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP new file mode 100644 index 0000000000000000000000000000000000000000..09a40aa6394ac04decce4ea6008113f0f1300f6a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP @@ -0,0 +1 @@ +CONFIG_ARM64_CNP=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD new file mode 100644 index 0000000000000000000000000000000000000000..cba9bf0b8cd002bada2a3c6e0c25cf5f0b6cb453 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD @@ -0,0 +1 @@ +CONFIG_ARM64_E0PD=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM new file mode 100644 index 0000000000000000000000000000000000000000..4fc9f03d7411d83b6d27dbab3e92cd6dc15f6abc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM @@ -0,0 +1 @@ +CONFIG_ARM64_HW_AFDBM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM new file mode 100644 index 0000000000000000000000000000000000000000..45957b7b4ea21fbd49e5e57512cad210afbdd776 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM @@ -0,0 +1 @@ +CONFIG_ARM64_MPAM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE new file mode 100644 index 0000000000000000000000000000000000000000..69b7778d449bc33bfedbde76f740438aac4c3372 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE @@ -0,0 +1 @@ +CONFIG_ARM64_MTE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN new file mode 100644 index 0000000000000000000000000000000000000000..ac8c85ac7fc2a5712fa09c21e95dbb6aac1d441b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN @@ -0,0 +1 @@ +CONFIG_ARM64_PAN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI new file mode 100644 index 0000000000000000000000000000000000000000..9a822122078c9dc20d4f20c90090428ed478409f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI @@ -0,0 +1 @@ +CONFIG_ARM64_PSEUDO_NMI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH new file mode 100644 index 0000000000000000000000000000000000000000..15cf70dcbed3b3b3f40547d151eb5557fc3785ac --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH @@ -0,0 +1 @@ +# CONFIG_ARM64_PTR_AUTH is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN new file mode 100644 index 0000000000000000000000000000000000000000..b664a0de1b4c1ae5e8c7ed19d0118e774edb849d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN @@ -0,0 +1 @@ +CONFIG_ARM64_RAS_EXTN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE new file mode 100644 index 0000000000000000000000000000000000000000..cbb647e2703a820b5f5da14ea393552f755f93e7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE @@ -0,0 +1 @@ +CONFIG_ARM64_SVE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE new file mode 100644 index 0000000000000000000000000000000000000000..b34bf805a72d3148fda4da0c39512051223e99c7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE @@ -0,0 +1 @@ +CONFIG_ARM64_TLB_RANGE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS new file mode 100644 index 0000000000000000000000000000000000000000..bb1ab4cb28e5e002d037dac9cc7281dfc8969c86 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_ARM64_USE_LSE_ATOMICS=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 new file mode 100644 index 0000000000000000000000000000000000000000..085f98ecb05888d555c41213b1d34bd6ffd7064f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 @@ -0,0 +1 @@ +# CONFIG_ARM64_VA_BITS_39 is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN new file mode 100644 index 0000000000000000000000000000000000000000..af18c065af17e622c64714c3a92f711371f7d22f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN @@ -0,0 +1 @@ +CONFIG_ARM_CCN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN new file mode 100644 index 0000000000000000000000000000000000000000..50c015319fbc497cf9e34dd68988c526de4670b2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN @@ -0,0 +1 @@ +CONFIG_ARM_CMN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL new file mode 100644 index 0000000000000000000000000000000000000000..b1c35e9ba99b60049d152fff1275430ce1c9203d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_ARM_CPU_RESCTRL=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC new file mode 100644 index 0000000000000000000000000000000000000000..2cb25cc89b7bf35f6286a0a27862842dcbed2b9b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC @@ -0,0 +1 @@ +CONFIG_ARM_GIC=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 new file mode 100644 index 0000000000000000000000000000000000000000..ed757b34f23b2d73afbebe43f2ffca32462fc63f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 @@ -0,0 +1 @@ +CONFIG_ARM_GIC_PHYTIUM_2500=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M new file mode 100644 index 0000000000000000000000000000000000000000..b3eb7dd653ac7182180194ee54da28d24fe8c4d7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V2M=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 new file mode 100644 index 0000000000000000000000000000000000000000..424dd88e7fdff40f041d37cddc836d4a85588392 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS new file mode 100644 index 0000000000000000000000000000000000000000..d50b79f8d9ef6b3cb792e4a3a9e723bcfda23ac6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3_ITS=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI new file mode 100644 index 0000000000000000000000000000000000000000..250435957adff3d6fd996a32583f37f55ee91991 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3_ITS_PCI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU new file mode 100644 index 0000000000000000000000000000000000000000..a9348c81752eebcef20b0732aef05c0fa17cacdb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU @@ -0,0 +1 @@ +CONFIG_ARM_PMU=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..ec97cfb1617f49934b2025e7c3857448d490a1d7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI @@ -0,0 +1 @@ +CONFIG_ARM_PMU_ACPI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU new file mode 100644 index 0000000000000000000000000000000000000000..920fadc10584ef72b0409d57e4eefd12f05f2d57 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU @@ -0,0 +1 @@ +CONFIG_ARM_SMMU=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 new file mode 100644 index 0000000000000000000000000000000000000000..83d144ae08d01d122dd5d34ebe0184f92d7ed5e3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU new file mode 100644 index 0000000000000000000000000000000000000000..827377f8cda7d00dc0ce8ef7d7711cd45ae4a2ba --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3_PMU=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU new file mode 100644 index 0000000000000000000000000000000000000000..c7f32cf49f37dabd13ef58aeb6a0fbac868a1866 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU @@ -0,0 +1 @@ +CONFIG_ARM_SPE_PMU=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT new file mode 100644 index 0000000000000000000000000000000000000000..4d70504d87d414303db7fb9b43b345c302f450f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT @@ -0,0 +1 @@ +CONFIG_CORESIGHT=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN new file mode 100644 index 0000000000000000000000000000000000000000..ee43fdb3b8f4ca8257847c5a59705ac55f1e1499 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN @@ -0,0 +1 @@ +CONFIG_CPU_LITTLE_ENDIAN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT new file mode 100644 index 0000000000000000000000000000000000000000..c23e98f1ee4689d3821241bc9afe7978e4be42e5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -0,0 +1 @@ +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON new file mode 100644 index 0000000000000000000000000000000000000000..bde29bcfc23f90bb3e22857256c49dca986f7b53 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON @@ -0,0 +1 @@ +CONFIG_EXTCON=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ new file mode 100644 index 0000000000000000000000000000000000000000..dfae244722fdfae7d5888a6b4242a8bb6198657c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ @@ -0,0 +1 @@ +CONFIG_HZ=250 diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 new file mode 100644 index 0000000000000000000000000000000000000000..c211724d6d0f57cd6d40319483340138a2e7f491 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 @@ -0,0 +1 @@ +# CONFIG_HZ_1000 is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 new file mode 100644 index 0000000000000000000000000000000000000000..5bb56df22812498281381a5d7306fe2557b17b24 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 @@ -0,0 +1 @@ +CONFIG_HZ_250=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM new file mode 100644 index 0000000000000000000000000000000000000000..14f90d8d68017cece2a4f6a6f033ce87abe4aaeb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM @@ -0,0 +1 @@ +CONFIG_KVM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY b/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY new file mode 100644 index 0000000000000000000000000000000000000000..7e44440231db1d48fbe905c3f10243aa6aaf1b3b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY @@ -0,0 +1 @@ +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..c9ebcff723344aef6b7adf03ff030afc81890165 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC @@ -0,0 +1 @@ +CONFIG_PCI_HOST_GENERIC=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB new file mode 100644 index 0000000000000000000000000000000000000000..0b75af7f9d882c3855f5d613087bb528b957e9d5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB @@ -0,0 +1 @@ +# CONFIG_PWM_ATMEL_TCB is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..7018716d153f3ee61d95c8a45763eb45c4f3ee5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX @@ -0,0 +1 @@ +# CONFIG_PWM_XILINX is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA new file mode 100644 index 0000000000000000000000000000000000000000..8b26467a3104fa7ef547f2b9d747241627e0c7bc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA @@ -0,0 +1 @@ +# CONFIG_QCOM_GPI_DMA is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON new file mode 100644 index 0000000000000000000000000000000000000000..eae24f1162d16b92d997ad51f4abc83035dea4e2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON @@ -0,0 +1 @@ +# CONFIG_QCOM_ICC_BWMON is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH new file mode 100644 index 0000000000000000000000000000000000000000..b89caa4b6306594df4fc9ea22d4aadd4b7a91779 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH @@ -0,0 +1 @@ +# CONFIG_QCOM_LMH is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM new file mode 100644 index 0000000000000000000000000000000000000000..7daedf3de7b78b0798dce7fe253384adf8cf4911 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM @@ -0,0 +1 @@ +# CONFIG_QCOM_MPM is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL new file mode 100644 index 0000000000000000000000000000000000000000..61a3f52d300427c1203eb46d566b8b1898a9f901 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL @@ -0,0 +1 @@ +# CONFIG_QCOM_RAMP_CTRL is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS new file mode 100644 index 0000000000000000000000000000000000000000..07a0ad76ba607ae39806e095126f3487c83080f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS @@ -0,0 +1 @@ +# CONFIG_QCOM_RPM_MASTER_STATS is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM new file mode 100644 index 0000000000000000000000000000000000000000..58e98180c4d5f22c37829a8f2722dee55bf2e6fc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM @@ -0,0 +1 @@ +CONFIG_QCOM_SCM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..8f9c32859fb623276982fbe2aae4c2f15eeb698c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT @@ -0,0 +1 @@ +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM new file mode 100644 index 0000000000000000000000000000000000000000..13face25cbfa1ce5bddce6802d202d3a59641bf2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM @@ -0,0 +1 @@ +# CONFIG_QCOM_SPM is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS new file mode 100644 index 0000000000000000000000000000000000000000..31f85458d145c4cbd423f1803695226f703e1fc5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS @@ -0,0 +1 @@ +# CONFIG_QCOM_SSC_BLOCK_BUS is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 new file mode 100644 index 0000000000000000000000000000000000000000..1a577664e2b4325a14501775bb17509445d11c34 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 @@ -0,0 +1 @@ +CONFIG_UNMAP_KERNEL_AT_EL0=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER new file mode 100644 index 0000000000000000000000000000000000000000..0938fde11ffe2955f7c8c7e7711b51b792c6bb36 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..f738f50ac231d57837cbb1faf9356b58e87312a2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO @@ -0,0 +1 @@ +CONFIG_VIRTIO=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK new file mode 100644 index 0000000000000000000000000000000000000000..193a208422f2731953d3e07e2acfe23c8aeb47c2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK @@ -0,0 +1 @@ +CONFIG_VIRTIO_BLK=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_MMIO b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..be547dc13d5246acc6a10be3fa8bf9d98bc25d15 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_MMIO @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..58505d3a58ffcaf0fe8bcfb63eb81e8b7c7c684a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT b/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT new file mode 100644 index 0000000000000000000000000000000000000000..06a94e48bf6886ec4b0ce096a6b05162b70afec3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT @@ -0,0 +1 @@ +CONFIG_64BIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..839566bae0013592dc4adfc4097f2dc69dd48350 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI @@ -0,0 +1 @@ +CONFIG_ACPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI new file mode 100644 index 0000000000000000000000000000000000000000..9ab33facf55a091edb0e84e8ceee61084bab7cf6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_ACPI_APEI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI new file mode 100644 index 0000000000000000000000000000000000000000..e40fb9aeac03e1388b2a28c1f40e4ebae7a321a6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI @@ -0,0 +1 @@ +CONFIG_ACPI_IPMI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..19881f4402020efb866ea6037bb4f8cec6a6b06a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA @@ -0,0 +1 @@ +CONFIG_ACPI_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR new file mode 100644 index 0000000000000000000000000000000000000000..a24416dcfc82598f9af406c95c512ef7aa2a06ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS b/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS new file mode 100644 index 0000000000000000000000000000000000000000..0c60467178b89cf0d62d07eb21f6836d584e2428 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS @@ -0,0 +1 @@ +CONFIG_ADVISE_SYSCALLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AIO b/anolis/configs/L0-MANDATORY/default/CONFIG_AIO new file mode 100644 index 0000000000000000000000000000000000000000..4272502fc834137b604df84412daa6a25a596283 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AIO @@ -0,0 +1 @@ +CONFIG_AIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP new file mode 100644 index 0000000000000000000000000000000000000000..ff2c37d0b6380695cd51f1c264645e8af91f6734 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_ALLOW_DEV_COREDUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE new file mode 100644 index 0000000000000000000000000000000000000000..c7d15a4b1f090093f7e0a61d0f7872a9de5df041 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE @@ -0,0 +1 @@ +CONFIG_ASYMMETRIC_KEY_TYPE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE new file mode 100644 index 0000000000000000000000000000000000000000..f05823216bcf3d673bfdc39001d05b24a85cc767 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -0,0 +1 @@ +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT b/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT new file mode 100644 index 0000000000000000000000000000000000000000..aa15dd05b4f165e1045f8e59acffa74b3071048f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT @@ -0,0 +1 @@ +CONFIG_AUDIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..27fd9c1cf4b8520f1c856fe4651c69bb8faa42e0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS @@ -0,0 +1 @@ +CONFIG_AUTOFS_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS b/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS new file mode 100644 index 0000000000000000000000000000000000000000..2a0020db1d037028d8aaac578400ad1f90bbd9a8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS @@ -0,0 +1 @@ +CONFIG_AUXILIARY_BUS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL new file mode 100644 index 0000000000000000000000000000000000000000..da5d20df17cd857c444b0c42902a654fda454c97 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL @@ -0,0 +1 @@ +CONFIG_BASE_FULL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED new file mode 100644 index 0000000000000000000000000000000000000000..731981ca3083129885c079d177046606528b4b7f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED @@ -0,0 +1 @@ +CONFIG_BFQ_GROUP_IOSCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF new file mode 100644 index 0000000000000000000000000000000000000000..f9a79c64b4aa0dc60ddf16054afdfd74714c2735 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF @@ -0,0 +1 @@ +CONFIG_BINFMT_ELF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT new file mode 100644 index 0000000000000000000000000000000000000000..b9821f9472881847f44a4d45533c4ae5256e69e9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT @@ -0,0 +1 @@ +CONFIG_BINFMT_SCRIPT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP new file mode 100644 index 0000000000000000000000000000000000000000..b80f0100a923d78663ab814dc9731f53e393d74c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST new file mode 100644 index 0000000000000000000000000000000000000000..b5de1617390c8c650df73ae5dc140179cb4cc534 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_IOCOST=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS new file mode 100644 index 0000000000000000000000000000000000000000..71cc6e708b4045eedb20be638018bf7a3ab45ad9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS @@ -0,0 +1 @@ +CONFIG_BLK_DEBUG_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV new file mode 100644 index 0000000000000000000000000000000000000000..8b43214d0ea4e10f229289c03f72a5699685af60 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV @@ -0,0 +1 @@ +CONFIG_BLK_DEV=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD new file mode 100644 index 0000000000000000000000000000000000000000..f97f7a0a90d37da6d45f741b8ea82caefcf7116a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INITRD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..3e61f2b38920e3ee6476c101b1b25e1b1e6544ea --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE @@ -0,0 +1 @@ +CONFIG_BLK_DEV_IO_TRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME new file mode 100644 index 0000000000000000000000000000000000000000..b1e62d9adf97592d95e1548c70eecfdc36f8ed06 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NVME=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING new file mode 100644 index 0000000000000000000000000000000000000000..54ba3b9284042d657363d3b6d8f7089dacd9462b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING @@ -0,0 +1 @@ +CONFIG_BLK_DEV_THROTTLING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI new file mode 100644 index 0000000000000000000000000000000000000000..e56957b4d29113d69e3c6a40bf4c843e5902386a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI @@ -0,0 +1 @@ +CONFIG_BLK_MQ_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..f9e2e18b64f93580469e0f83c27cb9bc95abd41c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO @@ -0,0 +1 @@ +CONFIG_BLK_MQ_VIRTIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME new file mode 100644 index 0000000000000000000000000000000000000000..9cc328d40312893eb924a20240e97e8d96e8031d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME @@ -0,0 +1 @@ +CONFIG_BLK_RQ_ALLOC_TIME=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..4ef62773835533120d7d2cf63cf5c2565f651b74 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK @@ -0,0 +1 @@ +CONFIG_BLOCK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING b/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING new file mode 100644 index 0000000000000000000000000000000000000000..2fd2a3159f6e43fc51aa7f1d841afbac2ac8be56 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING @@ -0,0 +1 @@ +CONFIG_BONDING=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF new file mode 100644 index 0000000000000000000000000000000000000000..63f09e9eccf59cbe785834a8d41db9507cf2e964 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF @@ -0,0 +1 @@ +CONFIG_BPF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..28e92884696c43d29e2d2308e9189f8c39fa4f06 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS @@ -0,0 +1 @@ +CONFIG_BPF_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT new file mode 100644 index 0000000000000000000000000000000000000000..5f9bba75323e37818f252b86613f3eeafd1df591 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT @@ -0,0 +1 @@ +CONFIG_BPF_JIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM new file mode 100644 index 0000000000000000000000000000000000000000..bf5ae0ddc861c3f3cf9659e36c2085e798427a3f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM @@ -0,0 +1 @@ +CONFIG_BPF_LSM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..4adb0f7e93cf7e989aabd27c619c9621a2bd4227 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL @@ -0,0 +1 @@ +CONFIG_BPF_SYSCALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF new file mode 100644 index 0000000000000000000000000000000000000000..dea83415f4801a7475c3fe85340a07c5ecdd05c4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF @@ -0,0 +1 @@ +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE b/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..06ef54326e6ec7305a7aa67b461082b7fa710931 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE @@ -0,0 +1 @@ +CONFIG_BRIDGE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BUG b/anolis/configs/L0-MANDATORY/default/CONFIG_BUG new file mode 100644 index 0000000000000000000000000000000000000000..7a3a7bf968804f097e351365afe1fe141e1a74d1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BUG @@ -0,0 +1 @@ +CONFIG_BUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES new file mode 100644 index 0000000000000000000000000000000000000000..9c31a788b8a5e9f7505dba1ee65acca38ab4d194 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES @@ -0,0 +1 @@ +CONFIG_CACHEFILES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND new file mode 100644 index 0000000000000000000000000000000000000000..b234b86bb2cc23c4a97b98441d3ebce09e79713b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND @@ -0,0 +1 @@ +CONFIG_CACHEFILES_ONDEMAND=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..a65ea33149e0444271ae56060f9c84fd6934f79d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL @@ -0,0 +1 @@ +CONFIG_CACHESTAT_SYSCALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..b4d7c4e3dc978333b7594f13f30687ff04ea74f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..781657e578afd7bd0d0731cccfd33505f9c41136 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE @@ -0,0 +1 @@ +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH b/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH new file mode 100644 index 0000000000000000000000000000000000000000..0be30bfd50b986ba15e52110a558d2b12e11eb6f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH @@ -0,0 +1 @@ +CONFIG_CFS_BANDWIDTH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS new file mode 100644 index 0000000000000000000000000000000000000000..de40ae788fd1c9c10e55e2bf830742703016c6b3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS @@ -0,0 +1 @@ +CONFIG_CGROUPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF new file mode 100644 index 0000000000000000000000000000000000000000..659477cdb698ba5df805798b953e0d94b293e7af --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF @@ -0,0 +1 @@ +CONFIG_CGROUP_BPF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT new file mode 100644 index 0000000000000000000000000000000000000000..43f05000a1257581331b3d955d295e87e6668556 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT @@ -0,0 +1 @@ +CONFIG_CGROUP_CPUACCT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..5a233a69b057ffaaafe6b5ff9a7f18b97e3c2862 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE @@ -0,0 +1 @@ +CONFIG_CGROUP_DEVICE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB new file mode 100644 index 0000000000000000000000000000000000000000..0e9e34b4c5be4e4b424427c5ad19dfcf51e82c1f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB @@ -0,0 +1 @@ +CONFIG_CGROUP_HUGETLB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF new file mode 100644 index 0000000000000000000000000000000000000000..faa1d1cb71fcc99d83345733538ff5796b104768 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF @@ -0,0 +1 @@ +CONFIG_CGROUP_PERF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS new file mode 100644 index 0000000000000000000000000000000000000000..399a03754d53bd29e18fb63667233f1345c8be01 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS @@ -0,0 +1 @@ +CONFIG_CGROUP_PIDS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA new file mode 100644 index 0000000000000000000000000000000000000000..6d9fbd1dd16397b25fdeecf90e7e6136a01051d1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA @@ -0,0 +1 @@ +CONFIG_CGROUP_RDMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED new file mode 100644 index 0000000000000000000000000000000000000000..aa4be387efbb6071e5d0b76c70824dc0c282c0a8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED @@ -0,0 +1 @@ +CONFIG_CGROUP_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE b/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE new file mode 100644 index 0000000000000000000000000000000000000000..c554a09cece8d8d4703c42f75ea4b12161e07725 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE @@ -0,0 +1 @@ +CONFIG_CHECKPOINT_RESTORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE new file mode 100644 index 0000000000000000000000000000000000000000..3c5b7a555a9f79505ec9c6d9108f8e4ea1182dcd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE @@ -0,0 +1 @@ +CONFIG_CK_KABI_RESERVE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS new file mode 100644 index 0000000000000000000000000000000000000000..6f78e7258a77b54d8e3ddeadda4f5a9d3caffbea --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS @@ -0,0 +1 @@ +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK b/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK new file mode 100644 index 0000000000000000000000000000000000000000..3cbf93120f3365d32826caa2307824102f3d35de --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK @@ -0,0 +1 @@ +CONFIG_COMMON_CLK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION b/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION new file mode 100644 index 0000000000000000000000000000000000000000..23ab91c48b2673c467f36a77fe74b711df8185fe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION @@ -0,0 +1 @@ +CONFIG_COMPACTION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..1ef892062e4879ab17acbdb38f4ccdb300b2c548 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS @@ -0,0 +1 @@ +CONFIG_CONFIGFS_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS new file mode 100644 index 0000000000000000000000000000000000000000..983fcc993d3650a18c8aca78c81591416120ecf5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS @@ -0,0 +1 @@ +CONFIG_CONSOLE_TRANSLATIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP new file mode 100644 index 0000000000000000000000000000000000000000..b2426d3acdc2e866df8e53e03371e670c3c2cb51 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP @@ -0,0 +1 @@ +CONFIG_COREDUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS new file mode 100644 index 0000000000000000000000000000000000000000..9920b4659fcab46a8a4559d643744b1941233e78 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS @@ -0,0 +1 @@ +CONFIG_CPUSETS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ new file mode 100644 index 0000000000000000000000000000000000000000..04872f671d3002d471ca224ca8a6f8654b84a6ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ @@ -0,0 +1 @@ +CONFIG_CPU_FREQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE new file mode 100644 index 0000000000000000000000000000000000000000..98dd7d37018533b4bb371010bb2ed761fc4b4fec --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE @@ -0,0 +1 @@ +CONFIG_CPU_IDLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION new file mode 100644 index 0000000000000000000000000000000000000000..da3a02c10eb96d73a95237ccb22500c019e5d29e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION @@ -0,0 +1 @@ +CONFIG_CPU_ISOLATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS new file mode 100644 index 0000000000000000000000000000000000000000..3d6f96778a81718c24ec0b00635ee3071f36a5da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_CPU_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS b/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS new file mode 100644 index 0000000000000000000000000000000000000000..99803b5a1e6267516e72f275454d331d43d3ca73 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS @@ -0,0 +1 @@ +CONFIG_CRAMFS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE new file mode 100644 index 0000000000000000000000000000000000000000..071d9918f8b86fc37ec48c7d1e986486cb797b11 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE @@ -0,0 +1 @@ +CONFIG_CRASH_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..84bb04c03f986f85e551148e92dfa02fcd93cb67 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP @@ -0,0 +1 @@ +CONFIG_CRASH_DUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..6fc752ec5216566d583f10be8d0436edae9e64f3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD new file mode 100644 index 0000000000000000000000000000000000000000..d5733f86937c594fa66756a0570d15e0613f1301 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_AEAD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 new file mode 100644 index 0000000000000000000000000000000000000000..b1fbe1f7edd31fe9418714c55da47cb817bce723 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AEAD2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES new file mode 100644 index 0000000000000000000000000000000000000000..dd56b423e13e718d4206cb828521d83039bcc1b2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER new file mode 100644 index 0000000000000000000000000000000000000000..528d61fcc1b9396451272c4f6dc530a5ee928fe2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_AKCIPHER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 new file mode 100644 index 0000000000000000000000000000000000000000..7adade1dafb0d786faa35df456653374f440b4c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AKCIPHER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI new file mode 100644 index 0000000000000000000000000000000000000000..cb9dc1dd4603e042ff4f3b93d723c7b6d9221d45 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI @@ -0,0 +1 @@ +CONFIG_CRYPTO_ALGAPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 new file mode 100644 index 0000000000000000000000000000000000000000..3e7c7ffca953590975ecb82e68debb0fefe9b12a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ALGAPI2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM new file mode 100644 index 0000000000000000000000000000000000000000..8b509be56358d8dfa5920f8ebee8ca472a8588ed --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_GCM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH new file mode 100644 index 0000000000000000000000000000000000000000..2104f2f02998ccd4dece4b8426030592458d90ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH new file mode 100644 index 0000000000000000000000000000000000000000..a5e3b09910e723df2da178ae4d2d46da18f661c7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 new file mode 100644 index 0000000000000000000000000000000000000000..288112ec373beb75f1136dd07c29a10fe4545996 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES new file mode 100644 index 0000000000000000000000000000000000000000..93b5da6169482f1f2870a11f3588896c8b72dfa2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_AES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 new file mode 100644 index 0000000000000000000000000000000000000000..d952a4334ef733aef3ac78193ef49eddcc3d528f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_SHA256=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER new file mode 100644 index 0000000000000000000000000000000000000000..084eac591e659a7e68392611cb262c293db43c5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER @@ -0,0 +1 @@ +CONFIG_CRYPTO_MANAGER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 new file mode 100644 index 0000000000000000000000000000000000000000..7eb36f78b5990a70522caad91c79942a2f80eef5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MANAGER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG new file mode 100644 index 0000000000000000000000000000000000000000..b7959aaec6d8e68c380d4a7a32cb089785253cf6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 new file mode 100644 index 0000000000000000000000000000000000000000..d6933362241271e7231cb272ecef55d6eb52e6e0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA new file mode 100644 index 0000000000000000000000000000000000000000..bd58f120558a3e6bbb46b9f689ef71753f88ffad --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA @@ -0,0 +1 @@ +CONFIG_CRYPTO_RSA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 new file mode 100644 index 0000000000000000000000000000000000000000..dcaffa2ebcb7d71cd9852d411040e9dab91fef85 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER new file mode 100644 index 0000000000000000000000000000000000000000..3120100c23394cc03c5880cda4b39a878389e8f2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_SKCIPHER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 new file mode 100644 index 0000000000000000000000000000000000000000..27565787b29cf2ce6a33587907d89a521b70443b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SKCIPHER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 new file mode 100644 index 0000000000000000000000000000000000000000..e554f7498cee7d13bef6afe82243739673a66371 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 new file mode 100644 index 0000000000000000000000000000000000000000..79d952c6847e1335067fc2e5a927e5d50efcbb82 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..a9c9296a6835f5b89e8bf3e56593c292a79369da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_GENERIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 new file mode 100644 index 0000000000000000000000000000000000000000..8460ca1b63dbdff3910c0d603eb95c260768bb98 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..388c7fa8a4afd85130e7b6b70203702d0173b17e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_GENERIC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS b/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS new file mode 100644 index 0000000000000000000000000000000000000000..1a82f4dba556c99df5803c618101dc45b41abcc4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS @@ -0,0 +1 @@ +CONFIG_CXL_BUS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DAX b/anolis/configs/L0-MANDATORY/default/CONFIG_DAX new file mode 100644 index 0000000000000000000000000000000000000000..b756b79156281361dec6bc1b7dcdf842095cd779 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DAX @@ -0,0 +1 @@ +CONFIG_DAX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE new file mode 100644 index 0000000000000000000000000000000000000000..95a87c61430041ebd9a44d4171a16907b70c5b9d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE @@ -0,0 +1 @@ +CONFIG_DEBUG_BUGVERBOSE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS new file mode 100644 index 0000000000000000000000000000000000000000..39c2d26805b6f12cdda99ac880395011ac87aa92 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS @@ -0,0 +1 @@ +CONFIG_DEBUG_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO new file mode 100644 index 0000000000000000000000000000000000000000..4df8bd06fd8b8d37abf394f5fdb66bea080c97ae --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF new file mode 100644 index 0000000000000000000000000000000000000000..39227b4511aff8f781d8aa72657fce051a9d981f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_BTF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 new file mode 100644 index 0000000000000000000000000000000000000000..571ad34e097357e217c23c42872b02ffde9f022b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_DWARF4 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL new file mode 100644 index 0000000000000000000000000000000000000000..cc34cddf40ad56bdf59a47cdde4c9985896ccca7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL @@ -0,0 +1 @@ +CONFIG_DEBUG_KERNEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC new file mode 100644 index 0000000000000000000000000000000000000000..b1c6fde1b6769496ad941fb1215dbc54fc0385e3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC @@ -0,0 +1 @@ +CONFIG_DEBUG_MISC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH new file mode 100644 index 0000000000000000000000000000000000000000..441e3464c292a6ce36596bcdba7909d523a4f5d8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH @@ -0,0 +1 @@ +CONFIG_DEBUG_SECTION_MISMATCH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC new file mode 100644 index 0000000000000000000000000000000000000000..05dd37c064cfd3b5c7e4693386d93fa9dd8cdc71 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC @@ -0,0 +1 @@ +CONFIG_DEFAULT_CUBIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC new file mode 100644 index 0000000000000000000000000000000000000000..a3af1ff70d089e8b3fa7061214b58c742ac58be5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC @@ -0,0 +1 @@ +# CONFIG_DEFAULT_SECURITY_DAC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK b/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK new file mode 100644 index 0000000000000000000000000000000000000000..28ac9ac1f98c590d25ced29c1eae5ec279eea229 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK @@ -0,0 +1 @@ +CONFIG_DETECT_HUNG_TASK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS new file mode 100644 index 0000000000000000000000000000000000000000..4b70528477a1b88061a3595f5ea689863a5fc5df --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS @@ -0,0 +1 @@ +CONFIG_DEVTMPFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT new file mode 100644 index 0000000000000000000000000000000000000000..e1cd3e81bb2764f62f7767f5c0076f98f0eaaabf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT @@ -0,0 +1 @@ +CONFIG_DEVTMPFS_MOUNT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES b/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES new file mode 100644 index 0000000000000000000000000000000000000000..169d6c3d0d4fe71079e9d6b96cc4896fc5acbaff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES @@ -0,0 +1 @@ +CONFIG_DMADEVICES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DMI b/anolis/configs/L0-MANDATORY/default/CONFIG_DMI new file mode 100644 index 0000000000000000000000000000000000000000..f961d1678db2b031da0329bdf1384aa0c589c329 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DMI @@ -0,0 +1 @@ +CONFIG_DMI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY new file mode 100644 index 0000000000000000000000000000000000000000..1871c683bcefa3fee76a1e448b1376c4c69b9221 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY @@ -0,0 +1 @@ +CONFIG_DNOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER b/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER new file mode 100644 index 0000000000000000000000000000000000000000..bf44e0eea3eeb0ef2a91b39befe4989b78933a5c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER @@ -0,0 +1 @@ +CONFIG_DNS_RESOLVER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DRM b/anolis/configs/L0-MANDATORY/default/CONFIG_DRM new file mode 100644 index 0000000000000000000000000000000000000000..1ba603c1c7b810f5544e02cadfb11abab65d6629 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DRM @@ -0,0 +1 @@ +CONFIG_DRM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..5698f5ebf655fcfae0fa3ba02ed16566728890d6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG @@ -0,0 +1 @@ +CONFIG_DYNAMIC_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE new file mode 100644 index 0000000000000000000000000000000000000000..1375e6ef8c3ba8b33f85e8918a2cb4f0ed9b7572 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE @@ -0,0 +1 @@ +CONFIG_DYNAMIC_DEBUG_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..21f2bdd4849d88786b22fa571f001bec1addd237 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC b/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC new file mode 100644 index 0000000000000000000000000000000000000000..dcb32adb912ec5d174e3842d0a22a8c274be189f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC @@ -0,0 +1 @@ +CONFIG_EDAC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EFI b/anolis/configs/L0-MANDATORY/default/CONFIG_EFI new file mode 100644 index 0000000000000000000000000000000000000000..7dcf2966dabad748da7918f14a27b69316664a13 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EFI @@ -0,0 +1 @@ +CONFIG_EFI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE new file mode 100644 index 0000000000000000000000000000000000000000..441e14118ef624eaa501d391377b312584c8d114 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE @@ -0,0 +1 @@ +CONFIG_ELF_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL b/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL new file mode 100644 index 0000000000000000000000000000000000000000..eb0dd3c213f8f084d7d9cb05dee19adb05283b0c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL @@ -0,0 +1 @@ +CONFIG_EPOLL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..0394e57c2a07f8b2de1373093802adb427864aac --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS @@ -0,0 +1 @@ +CONFIG_EROFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK b/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..7ede260aaeb68fbae720761330d2de84f1a40ba5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK @@ -0,0 +1 @@ +CONFIG_ETHTOOL_NETLINK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD b/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD new file mode 100644 index 0000000000000000000000000000000000000000..e8f9fb5d2886d637872dc18b13266464cc7119d4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD @@ -0,0 +1 @@ +CONFIG_EVENTFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EVM b/anolis/configs/L0-MANDATORY/default/CONFIG_EVM new file mode 100644 index 0000000000000000000000000000000000000000..5e5b1549882a919d6e8bbd029ebe682e693d7b36 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EVM @@ -0,0 +1 @@ +CONFIG_EVM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT b/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT new file mode 100644 index 0000000000000000000000000000000000000000..6643b3280f265e4aa94773eac0a76aae8bd03013 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT @@ -0,0 +1 @@ +# CONFIG_EXPERT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS new file mode 100644 index 0000000000000000000000000000000000000000..be0609953b7e2f42f634a0a515fdffe41a0ca78f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS @@ -0,0 +1 @@ +CONFIG_EXT3_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS new file mode 100644 index 0000000000000000000000000000000000000000..7f2f33f2758a20523109adbc67f427fd3e824f1a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS @@ -0,0 +1 @@ +CONFIG_EXT4_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..5dd650fde83477a322b722103d9ca99999e19ac4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_EXT4_FS_POSIX_ACL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..6603fbbd62a6a862b4a462d180825ba2e4f09f8c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY @@ -0,0 +1 @@ +CONFIG_EXT4_FS_SECURITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED new file mode 100644 index 0000000000000000000000000000000000000000..7c73cd02e2469e38efbe8e0b06f4344385dbec22 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED @@ -0,0 +1 @@ +CONFIG_FAIR_GROUP_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY new file mode 100644 index 0000000000000000000000000000000000000000..03964624f8cbd46700f43fa72620d5388bf64b02 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY @@ -0,0 +1 @@ +CONFIG_FANOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS new file mode 100644 index 0000000000000000000000000000000000000000..bb11abcfa290133eed3829ba90912ff2b6a366ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS @@ -0,0 +1 @@ +CONFIG_FAT_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FB b/anolis/configs/L0-MANDATORY/default/CONFIG_FB new file mode 100644 index 0000000000000000000000000000000000000000..7adf7d4970d927c421459e684f01ac8add002f81 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FB @@ -0,0 +1 @@ +CONFIG_FB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FCOE b/anolis/configs/L0-MANDATORY/default/CONFIG_FCOE new file mode 100644 index 0000000000000000000000000000000000000000..0516a090373af82b52aabdc537220dad35b5501a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FCOE @@ -0,0 +1 @@ +CONFIG_FCOE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE new file mode 100644 index 0000000000000000000000000000000000000000..edcdc053f7298a2872053a5033bb6caa7e85fc87 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE @@ -0,0 +1 @@ +CONFIG_FHANDLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING b/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING new file mode 100644 index 0000000000000000000000000000000000000000..d77ce16e5e6e872020ae7733c5438c9057a24cae --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING @@ -0,0 +1 @@ +CONFIG_FILE_LOCKING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..84ef39c6841f146b42e0e859eb749ff4d31d37ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE b/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE new file mode 100644 index 0000000000000000000000000000000000000000..80b71a3729c827b967a0b9ecac149497ddff045e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE @@ -0,0 +1 @@ +CONFIG_FSCACHE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY new file mode 100644 index 0000000000000000000000000000000000000000..2e3862d3aa6b80f00a948b5655cc985121c88e0c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY @@ -0,0 +1 @@ +CONFIG_FSNOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX b/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX new file mode 100644 index 0000000000000000000000000000000000000000..141ae8514ada1f1e2bb1ed771394502c7f303ba3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX @@ -0,0 +1 @@ +CONFIG_FS_DAX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..ef8214661612f892070a3eaa788779e11235c600 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE @@ -0,0 +1 @@ +CONFIG_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS new file mode 100644 index 0000000000000000000000000000000000000000..f2f6f04b04b8adfc2f88298fcdd237bc9976d82a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS @@ -0,0 +1 @@ +CONFIG_FTRACE_SYSCALLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..b1c634d001a0d5bd0fbe0cde1da1a270eca948f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER @@ -0,0 +1 @@ +CONFIG_FUNCTION_GRAPH_TRACER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..d1977efec42407ebf9cec3d340566564a574e95e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER @@ -0,0 +1 @@ +CONFIG_FUNCTION_TRACER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS new file mode 100644 index 0000000000000000000000000000000000000000..835c7f4dadcd131b1687ed99329d22e4b42bfdeb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS @@ -0,0 +1 @@ +CONFIG_FUSE_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX b/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX new file mode 100644 index 0000000000000000000000000000000000000000..df59af0b6b71ba373302fd8f0a5c3deafa8e9fbe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX @@ -0,0 +1 @@ +CONFIG_FUTEX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER b/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER new file mode 100644 index 0000000000000000000000000000000000000000..c8e64ebb4589204621b4ca7a10b5f7d279d99241 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER @@ -0,0 +1 @@ +CONFIG_FW_LOADER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY new file mode 100644 index 0000000000000000000000000000000000000000..492970855851d74d0c8ac10a03fa1b4b5fd323b7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY @@ -0,0 +1 @@ +CONFIG_GENERIC_GETTIMEOFDAY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS new file mode 100644 index 0000000000000000000000000000000000000000..36652dfed18a58de38f4588f772adac359a63ab5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS @@ -0,0 +1 @@ +CONFIG_GENERIC_VDSO_TIME_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR new file mode 100644 index 0000000000000000000000000000000000000000..dc5ae5ce314ee7877885cc696433721ad761049d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI b/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI new file mode 100644 index 0000000000000000000000000000000000000000..c67208b21459144a0392cbcc14b679f53827d636 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI @@ -0,0 +1 @@ +CONFIG_HDMI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS b/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS new file mode 100644 index 0000000000000000000000000000000000000000..bf244406f03e66ded3ddb15d779f7eb862171aa1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS @@ -0,0 +1 @@ +CONFIG_HIGH_RES_TIMERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU new file mode 100644 index 0000000000000000000000000000000000000000..3704a7a0f205019138cc66998242f32396ecf390 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CPU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI new file mode 100644 index 0000000000000000000000000000000000000000..278cde73c789e95f6eaf833c047f0d84983b599e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..31781d4a918ed3bf7f026e40b7f16b71ea1e86c1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_PCIE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS new file mode 100644 index 0000000000000000000000000000000000000000..3c2dea56a54135cd09599a50ecc06473d60046da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS @@ -0,0 +1 @@ +CONFIG_HUGETLBFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE new file mode 100644 index 0000000000000000000000000000000000000000..17c929f62cc5545827d5ba1d77a1d9f3ef9c90a0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON b/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..ff2b3294f655ca7bdc1e4163aa1b61076b13acdc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON @@ -0,0 +1 @@ +CONFIG_HWMON=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM b/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM new file mode 100644 index 0000000000000000000000000000000000000000..971856ab74753f808b979b4ead219b051efc9d22 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 new file mode 100644 index 0000000000000000000000000000000000000000..920c10df708d42eb5408d24b21d1395ea5d0704d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 @@ -0,0 +1 @@ +# CONFIG_HZ_100 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 new file mode 100644 index 0000000000000000000000000000000000000000..082ba4207cfa2f71b4d44dfb6e36fa1cea986e52 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 @@ -0,0 +1 @@ +# CONFIG_HZ_300 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC new file mode 100644 index 0000000000000000000000000000000000000000..55d8ff9baa54fe133be5dcaefd902386a9ac9b4a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC @@ -0,0 +1 @@ +# CONFIG_HZ_PERIODIC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I2C b/anolis/configs/L0-MANDATORY/default/CONFIG_I2C new file mode 100644 index 0000000000000000000000000000000000000000..aafb657f5b9a0a31807c087ca20e6d879ea0f406 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I2C @@ -0,0 +1 @@ +CONFIG_I2C=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I40E b/anolis/configs/L0-MANDATORY/default/CONFIG_I40E new file mode 100644 index 0000000000000000000000000000000000000000..c52af201eb2f8b81bf37326bee2aa187332779b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I40E @@ -0,0 +1 @@ +CONFIG_I40E=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF b/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF new file mode 100644 index 0000000000000000000000000000000000000000..21e0bf4cd205d5ddc0b250877b7f521a53432793 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF @@ -0,0 +1 @@ +CONFIG_I40EVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ICE b/anolis/configs/L0-MANDATORY/default/CONFIG_ICE new file mode 100644 index 0000000000000000000000000000000000000000..855d37ea1722ad842deb3be16054a0c679e3d3fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ICE @@ -0,0 +1 @@ +CONFIG_ICE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING b/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..e7af620e0635339b0c1144d1c54d32b60d7f62a3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING @@ -0,0 +1 @@ +CONFIG_IDLE_PAGE_TRACKING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IGB b/anolis/configs/L0-MANDATORY/default/CONFIG_IGB new file mode 100644 index 0000000000000000000000000000000000000000..1a8ee88776e1db62e70c9dfbe2d613cbf6c876d1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IGB @@ -0,0 +1 @@ +CONFIG_IGB=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF b/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF new file mode 100644 index 0000000000000000000000000000000000000000..63bb3beea6d8574171435849993c9a8d0a904d61 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF @@ -0,0 +1 @@ +CONFIG_IGBVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IMA b/anolis/configs/L0-MANDATORY/default/CONFIG_IMA new file mode 100644 index 0000000000000000000000000000000000000000..752982bdd9277a39b3358be9a3e5ad49de772b62 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IMA @@ -0,0 +1 @@ +CONFIG_IMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET b/anolis/configs/L0-MANDATORY/default/CONFIG_INET new file mode 100644 index 0000000000000000000000000000000000000000..aac63495d8d1240210e94fb3b842082d42be0d6f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET @@ -0,0 +1 @@ +CONFIG_INET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..8814c39c6eba0e8dabdc4135d186694e53978bda --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG @@ -0,0 +1 @@ +CONFIG_INET_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..9ff0e8977aa7af1f6571d501c8940e86118ab0b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_MPTCP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..72fbd06a91ff11214c1fbe0036fc7e6e64bd858b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_TCP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..6abf654838cb5be9ea56b44bb920003a254a56fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_UDP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND b/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND new file mode 100644 index 0000000000000000000000000000000000000000..50f88a2829b7bef4d4aa5101be0e171a18a193aa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND @@ -0,0 +1 @@ +CONFIG_INFINIBAND=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT new file mode 100644 index 0000000000000000000000000000000000000000..de103d88fedb2f687545a08d51e972d1077d467a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT @@ -0,0 +1 @@ +CONFIG_INPUT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD new file mode 100644 index 0000000000000000000000000000000000000000..42362ad6f1dff2ecd2ca6dfaad83f78cc0c67703 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD @@ -0,0 +1 @@ +CONFIG_INPUT_KEYBOARD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE new file mode 100644 index 0000000000000000000000000000000000000000..4f6de0eef8342d7f61caefdc7a7bc93709f28fe4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY b/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY new file mode 100644 index 0000000000000000000000000000000000000000..a3524cb6b8f4de8c40aff5aa76a6621d8d0d71e3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY @@ -0,0 +1 @@ +CONFIG_INTEGRITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT b/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..05bdc99f3d1124b90591e667995625a5398a628c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT @@ -0,0 +1 @@ +CONFIG_IOMMU_SUPPORT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ new file mode 100644 index 0000000000000000000000000000000000000000..784fa4506e4be9a94b9f263ad131d2cdc4b4d8c5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ @@ -0,0 +1 @@ +CONFIG_IOSCHED_BFQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING b/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING new file mode 100644 index 0000000000000000000000000000000000000000..eff85c7a8f85b4cfaa7923d0cb71439e06c6d04f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING @@ -0,0 +1 @@ +CONFIG_IO_URING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS new file mode 100644 index 0000000000000000000000000000000000000000..037635cb2881586ff48cf482b9342de50e1e2e32 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS @@ -0,0 +1 @@ +CONFIG_IPC_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER b/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER new file mode 100644 index 0000000000000000000000000000000000000000..55e555c77722e62f614a07bd4bf241a6f380b9aa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER @@ -0,0 +1 @@ +CONFIG_IPMI_HANDLER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..6450f34933e68296b5eae6f766ee85c5eac15a5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 @@ -0,0 +1 @@ +CONFIG_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES new file mode 100644 index 0000000000000000000000000000000000000000..b4f356ef036d6e81e69e8a65b3ef67e14411ebfe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES @@ -0,0 +1 @@ +CONFIG_IP_NF_ARPTABLES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW new file mode 100644 index 0000000000000000000000000000000000000000..dbe34884bcbb42db4addf4da2984e4bc3197a3d5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW @@ -0,0 +1 @@ +CONFIG_IP_NF_RAW=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..e23a4ded8a85d29ae0aed246380ac636abaad46e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY @@ -0,0 +1 @@ +CONFIG_IP_NF_SECURITY=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET new file mode 100644 index 0000000000000000000000000000000000000000..e4213b7088c5fbcc44f4ec6b064195ac6539b96a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET @@ -0,0 +1 @@ +CONFIG_IP_SET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS new file mode 100644 index 0000000000000000000000000000000000000000..5595ef784c64af2efcef2dbc82bd507b7e4dc95c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS @@ -0,0 +1 @@ +CONFIG_IP_VS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..02fb992d6694bd62fe15351d7d46e61aeeea3f2b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 @@ -0,0 +1 @@ +CONFIG_IP_VS_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IRQ_TIME_ACCOUNTING b/anolis/configs/L0-MANDATORY/default/CONFIG_IRQ_TIME_ACCOUNTING new file mode 100644 index 0000000000000000000000000000000000000000..e250776e77e277f15bc86ff849ed0f823151b13a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IRQ_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_IRQ_TIME_ACCOUNTING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS new file mode 100644 index 0000000000000000000000000000000000000000..379622f04840e2496be709e8a4d10afb280b445c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS @@ -0,0 +1 @@ +CONFIG_ISO9660_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE new file mode 100644 index 0000000000000000000000000000000000000000..a31939ee049b32cf35f3accf7702b5d79a7eb667 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE @@ -0,0 +1 @@ +CONFIG_IXGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF new file mode 100644 index 0000000000000000000000000000000000000000..ee2e70d6eb9168b2e1484183da757f19adaa9650 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF @@ -0,0 +1 @@ +CONFIG_IXGBEVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_JBD2 b/anolis/configs/L0-MANDATORY/default/CONFIG_JBD2 new file mode 100644 index 0000000000000000000000000000000000000000..8cfd98f3b1e478f70bd1cbe8d5a12132ce550a40 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_JBD2 @@ -0,0 +1 @@ +CONFIG_JBD2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL b/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..8371ec0677fb29f9b03514ae1816958e3961a5a1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL @@ -0,0 +1 @@ +CONFIG_JUMP_LABEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS new file mode 100644 index 0000000000000000000000000000000000000000..4701e4fa66f01df49c829a34681664df611bd18a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS @@ -0,0 +1 @@ +CONFIG_KALLSYMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL new file mode 100644 index 0000000000000000000000000000000000000000..e5f6b9c6910f4da25ffa007985d450686551b93a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL @@ -0,0 +1 @@ +CONFIG_KALLSYMS_ALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP b/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP new file mode 100644 index 0000000000000000000000000000000000000000..19e2db992267b1f01e5a2b848f3652bdf993331e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP @@ -0,0 +1 @@ +CONFIG_KCMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS b/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS new file mode 100644 index 0000000000000000000000000000000000000000..ddb8a5f451f076351319e2d503f0c0afae3bdeb9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS @@ -0,0 +1 @@ +CONFIG_KERNFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC new file mode 100644 index 0000000000000000000000000000000000000000..b45488dbb6101ba62c366d90d01b65489f73999a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC @@ -0,0 +1 @@ +CONFIG_KEXEC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..093bbdf2650ab6d69f89e0170b74f096b44cb525 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE @@ -0,0 +1 @@ +CONFIG_KEXEC_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE new file mode 100644 index 0000000000000000000000000000000000000000..25862bf98de8b66dc9449ee324b9c60c966449ce --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_KEXEC_FILE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE b/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE new file mode 100644 index 0000000000000000000000000000000000000000..79ca5de4092436d927a66d2a59d4baecfa947a92 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE @@ -0,0 +1 @@ +CONFIG_KFENCE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB new file mode 100644 index 0000000000000000000000000000000000000000..64b6ebfb4efaad3ccc092d1bbabad8b13fae521f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB @@ -0,0 +1 @@ +CONFIG_KGDB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..26a6aac707fa71b53087d8af4d87d45eb87f7f3a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE @@ -0,0 +1 @@ +CONFIG_KGDB_SERIAL_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT new file mode 100644 index 0000000000000000000000000000000000000000..731febb628fad3e05db8b2e5f74f975ccd576d94 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT @@ -0,0 +1 @@ +# CONFIG_KGDB_TESTS_ON_BOOT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES new file mode 100644 index 0000000000000000000000000000000000000000..e24be2770fd18bb8e3a8b8f62ee5af5b36e8cb03 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES @@ -0,0 +1 @@ +CONFIG_KPROBES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..2111c5d1a27bde1ce67213aa707378f7630f12cd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_KPROBE_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES b/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES new file mode 100644 index 0000000000000000000000000000000000000000..78afe778fff8a035869e0c57e90c5e79f561bc90 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES @@ -0,0 +1 @@ +CONFIG_KRETPROBES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH b/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH new file mode 100644 index 0000000000000000000000000000000000000000..1b05d0d1a109b049181b54fbb6fb08b303057502 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD new file mode 100644 index 0000000000000000000000000000000000000000..f4ae1670697cc60017a641a90f2aaae2d5386e5f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD @@ -0,0 +1 @@ +CONFIG_LOCKD=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 new file mode 100644 index 0000000000000000000000000000000000000000..0740609a259ff65e7db225a93f1def22cbcffbf8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 @@ -0,0 +1 @@ +CONFIG_LOCKD_V4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR new file mode 100644 index 0000000000000000000000000000000000000000..3a80a4e1b5e94e50415626f1f34dec409b2ab7ae --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_LOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN b/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN new file mode 100644 index 0000000000000000000000000000000000000000..93292b3493ea9d835c0a5a5040d66a6cd3c87f53 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN @@ -0,0 +1 @@ +CONFIG_LRU_GEN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ b/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ new file mode 100644 index 0000000000000000000000000000000000000000..3a3a7285a70800d7536bbb2632be9686dca3c023 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MD b/anolis/configs/L0-MANDATORY/default/CONFIG_MD new file mode 100644 index 0000000000000000000000000000000000000000..a11629d797726028e6f9cacb4174b7a54058ba5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MD @@ -0,0 +1 @@ +CONFIG_MD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER new file mode 100644 index 0000000000000000000000000000000000000000..7d1e33a67f7d62c844cdb89df2680035aae984ce --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER @@ -0,0 +1 @@ +CONFIG_MEMBARRIER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG new file mode 100644 index 0000000000000000000000000000000000000000..100384ba8db29ca3abf0412d09f73a7dc6924a0a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG @@ -0,0 +1 @@ +CONFIG_MEMCG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE new file mode 100644 index 0000000000000000000000000000000000000000..af716b05c6c842c0832a0a0a88ebcb32ad977ec0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_MEMORY_FAILURE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..ff75139e042e93b6629dfa37c2aca01704a72825 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTPLUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE new file mode 100644 index 0000000000000000000000000000000000000000..bbbf7d364ab652973f545ac617686779c433cfaa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION b/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..83a9d97c4f8f7ef29eeb101d08c09c2f89bde738 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION @@ -0,0 +1 @@ +CONFIG_MIGRATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS b/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS new file mode 100644 index 0000000000000000000000000000000000000000..a82849e39df97e7205dcd6c3717f2b911d0eff75 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS @@ -0,0 +1 @@ +CONFIG_MISC_FILESYSTEMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN new file mode 100644 index 0000000000000000000000000000000000000000..0a147ed388218a765ffc4081fbfff23902f28390 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN @@ -0,0 +1 @@ +CONFIG_MLX4_EN=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE new file mode 100644 index 0000000000000000000000000000000000000000..f81faa5f3cf91124dcf224fa8abb0bbced4e8ea5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE @@ -0,0 +1 @@ +CONFIG_MLX5_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN new file mode 100644 index 0000000000000000000000000000000000000000..e55da7520a4620959b2efec650400d1ddfd4c176 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_EN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE new file mode 100644 index 0000000000000000000000000000000000000000..2a06bb59c73382adb4f1b048c9e42f090e78b604 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MMU b/anolis/configs/L0-MANDATORY/default/CONFIG_MMU new file mode 100644 index 0000000000000000000000000000000000000000..3dec296304f2c2611990a6078c6ae8b196b9dadf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MMU @@ -0,0 +1 @@ +CONFIG_MMU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH b/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH new file mode 100644 index 0000000000000000000000000000000000000000..d235ab2677b257f0bebef8e1175fe81af822382a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH @@ -0,0 +1 @@ +CONFIG_MODPROBE_PATH="/sbin/modprobe" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES new file mode 100644 index 0000000000000000000000000000000000000000..a83bb6e6b9a64e5ea5c33edba5c50b8b7ece1c00 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES @@ -0,0 +1 @@ +CONFIG_MODULES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG new file mode 100644 index 0000000000000000000000000000000000000000..53288b393fcc61002b9480ac121dcb3443dc0f4c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG @@ -0,0 +1 @@ +CONFIG_MODULE_SIG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL new file mode 100644 index 0000000000000000000000000000000000000000..f0872fa329cb40a1ccea3107115585c072730c14 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL @@ -0,0 +1 @@ +CONFIG_MODULE_SRCVERSION_ALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD new file mode 100644 index 0000000000000000000000000000000000000000..4e69761743653394b61e48d57a332cc19156a33b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD @@ -0,0 +1 @@ +CONFIG_MODULE_UNLOAD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS new file mode 100644 index 0000000000000000000000000000000000000000..6119c683c6a8f4a5af88a865152e35c5d6e8ed5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_MODVERSIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP b/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP new file mode 100644 index 0000000000000000000000000000000000000000..3bfe60494af8adb7c15d8116725856be05fb9d58 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP @@ -0,0 +1 @@ +CONFIG_MPTCP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE new file mode 100644 index 0000000000000000000000000000000000000000..ad5c7700b0f02a455d0c4b5ece8d2c329ff53af3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE @@ -0,0 +1 @@ +CONFIG_MQ_IOSCHED_DEADLINE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER new file mode 100644 index 0000000000000000000000000000000000000000..16623d2a0928ab30510ca608570b6dc73379eb6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER @@ -0,0 +1 @@ +CONFIG_MQ_IOSCHED_KYBER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER b/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER new file mode 100644 index 0000000000000000000000000000000000000000..ffe93503bae4c0dca6f770539319c4e473ea4b70 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER @@ -0,0 +1 @@ +CONFIG_MULTIUSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES b/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES new file mode 100644 index 0000000000000000000000000000000000000000..6c6db9d257b7cc2a57f12982fdee38d3ddb81159 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES @@ -0,0 +1 @@ +CONFIG_NAMESPACES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_NET new file mode 100644 index 0000000000000000000000000000000000000000..bb0276068e2759f98a0e6cd15acac1459942db8b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET @@ -0,0 +1 @@ +CONFIG_NET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES b/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES new file mode 100644 index 0000000000000000000000000000000000000000..c12b1f36af8469828483d39920cb58ed14d93148 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES @@ -0,0 +1 @@ +CONFIG_NETDEVICES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER new file mode 100644 index 0000000000000000000000000000000000000000..7e722d1a8920ea82c0ac37f01f60f04bbce83ac0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER @@ -0,0 +1 @@ +CONFIG_NETFILTER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED new file mode 100644 index 0000000000000000000000000000000000000000..c24a712070655c2b85f059a13e41c01b4a6675fc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED @@ -0,0 +1 @@ +CONFIG_NETFILTER_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS new file mode 100644 index 0000000000000000000000000000000000000000..d9ad7389f7e435b3885b981623b50eb059b07c16 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_INGRESS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..0c5dc18066a41a71546acc5516830573ca044197 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG @@ -0,0 +1 @@ +CONFIG_NETLINK_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS b/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS new file mode 100644 index 0000000000000000000000000000000000000000..55e644969f3f6ccae9fe666e6839f814ee243dc5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS @@ -0,0 +1 @@ +CONFIG_NETWORK_FILESYSTEMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT new file mode 100644 index 0000000000000000000000000000000000000000..0ae15de6b3606dc4c1c3704f72960cd9444610ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT @@ -0,0 +1 @@ +CONFIG_NET_ACT_GACT=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE new file mode 100644 index 0000000000000000000000000000000000000000..f0b4560c2890f37c04863a3c47c3e8ef24b6ba7c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE @@ -0,0 +1 @@ +CONFIG_NET_ACT_POLICE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS new file mode 100644 index 0000000000000000000000000000000000000000..f2125c41edbb378517330a765c9f09170110a04d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS @@ -0,0 +1 @@ +CONFIG_NET_CLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT new file mode 100644 index 0000000000000000000000000000000000000000..3f248b9138c0b496b7f8b5f3eb1a4f12964a8b00 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT @@ -0,0 +1 @@ +CONFIG_NET_CLS_ACT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE new file mode 100644 index 0000000000000000000000000000000000000000..104f6aed80fd3d52d6150890fe495d1b7cadf01d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE @@ -0,0 +1 @@ +CONFIG_NET_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER new file mode 100644 index 0000000000000000000000000000000000000000..755a0916e7f43fdc291e158197e407238fd15bc6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER @@ -0,0 +1 @@ +CONFIG_NET_FAILOVER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY new file mode 100644 index 0000000000000000000000000000000000000000..d823a2fa3c7c92b91d89fef5f4c73f8efb82ef8a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY @@ -0,0 +1 @@ +CONFIG_NET_KEY=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS new file mode 100644 index 0000000000000000000000000000000000000000..6c95e6942dd3c574ddd945ecc77c9d3670009c00 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS @@ -0,0 +1 @@ +CONFIG_NET_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED new file mode 100644 index 0000000000000000000000000000000000000000..32758b597fb525a04d9fed4c0b549831c3032568 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED @@ -0,0 +1 @@ +CONFIG_NET_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL new file mode 100644 index 0000000000000000000000000000000000000000..1a0f8aa77debaf7ab3e0b36095c99c0b7fff3430 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL @@ -0,0 +1 @@ +CONFIG_NET_SCH_FQ_CODEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS new file mode 100644 index 0000000000000000000000000000000000000000..7cee4bc281ee7d63d895d52c45ce850dad06f7a9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS @@ -0,0 +1 @@ +CONFIG_NET_SCH_INGRESS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM new file mode 100644 index 0000000000000000000000000000000000000000..32cb667659df56fa18016d1c5814f64db13381ec --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BROADCOM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..00321eadb66ace1717cff595961211e1260ba787 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX new file mode 100644 index 0000000000000000000000000000000000000000..e33e9ccf9512c1678b85ccd43b02176958637d2e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MELLANOX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN new file mode 100644 index 0000000000000000000000000000000000000000..e755f944fa3babffff8a13562f34ee4709b14997 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_WANGXUN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD new file mode 100644 index 0000000000000000000000000000000000000000..4edd3f446db59f3b75d29748a4e3633791262640 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD @@ -0,0 +1 @@ +CONFIG_NFSD=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 new file mode 100644 index 0000000000000000000000000000000000000000..a1df1fcb6f1a62f3df88bf67d5882ce2e1911a2a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 @@ -0,0 +1 @@ +CONFIG_NFSD_V4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..19298484cd9c45b2e12dd3946a10579001c8f08d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON @@ -0,0 +1 @@ +CONFIG_NFS_COMMON=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..b181162e4b421b11ef594097b0ffa8fc7d32a2c1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS @@ -0,0 +1 @@ +CONFIG_NFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE new file mode 100644 index 0000000000000000000000000000000000000000..dff728512a18f83c584b7a4a0a1f12c477aba899 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE @@ -0,0 +1 @@ +CONFIG_NFS_FSCACHE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 new file mode 100644 index 0000000000000000000000000000000000000000..22442d57ae84632172175d516bf4b4ac68387f60 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 @@ -0,0 +1 @@ +CONFIG_NFS_V3=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 new file mode 100644 index 0000000000000000000000000000000000000000..f18a771f0d7ee5395a01395d343d9e7fb232b343 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 @@ -0,0 +1 @@ +CONFIG_NFS_V4=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 new file mode 100644 index 0000000000000000000000000000000000000000..f131fcb574a849d3b138d56fdd0551b517ebad39 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 @@ -0,0 +1 @@ +CONFIG_NFS_V4_1=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 new file mode 100644 index 0000000000000000000000000000000000000000..1c4ac47992d4432e290cd2b3fece560c329c2820 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 @@ -0,0 +1 @@ +CONFIG_NFS_V4_2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK new file mode 100644 index 0000000000000000000000000000000000000000..6eb10bb34ad4c99ff02de74abe172d282ee78961 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT new file mode 100644 index 0000000000000000000000000000000000000000..25565b9714802abee9795280f4e5cafe7b68b46d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT @@ -0,0 +1 @@ +CONFIG_NF_NAT=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES new file mode 100644 index 0000000000000000000000000000000000000000..b7803fc4026d845e89784230e64bc1c7b9713f90 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES @@ -0,0 +1 @@ +CONFIG_NF_TABLES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET new file mode 100644 index 0000000000000000000000000000000000000000..407ac412fc38eac240ede02bd315bf9eac6ec9ca --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET @@ -0,0 +1 @@ +CONFIG_NF_TABLES_INET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..4393170ed9e72917d19fe99835d3ff7fbcf15c9d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_TABLES_IPV4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..271ac4a7a3881dee0d6791e9540a3b8d6fa0c2f7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_TABLES_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE new file mode 100644 index 0000000000000000000000000000000000000000..bab5cad95e8c9b71d238f127fa9fb64de80e5317 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE @@ -0,0 +1 @@ +CONFIG_NGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS new file mode 100644 index 0000000000000000000000000000000000000000..0ca649ce781c3b05475b1c914485e9550482cd20 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS @@ -0,0 +1 @@ +CONFIG_NLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII new file mode 100644 index 0000000000000000000000000000000000000000..4c75b84a9fcc85c926c8480b94f7e97601a43115 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII @@ -0,0 +1 @@ +CONFIG_NLS_ASCII=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 new file mode 100644 index 0000000000000000000000000000000000000000..49aec3120b0551d7d1e7259d98ad0799fa7308a7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_936=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 new file mode 100644 index 0000000000000000000000000000000000000000..4b6af6728a004d2b3f761a1395af38f9703e8b6f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_950=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..bf5fe34ea9cc7e04b04eaf2c444aa32e852df8b3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT @@ -0,0 +1 @@ +CONFIG_NLS_DEFAULT="utf8" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 new file mode 100644 index 0000000000000000000000000000000000000000..c9692f4e2727d1e2fccabfb61745adc88db44d20 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 @@ -0,0 +1 @@ +CONFIG_NLS_UTF8=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ new file mode 100644 index 0000000000000000000000000000000000000000..8c68dcd51eff856c0c5357a9f183a92e28db6ec4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ @@ -0,0 +1 @@ +CONFIG_NO_HZ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL new file mode 100644 index 0000000000000000000000000000000000000000..4cd3ab9ec63eb93a9d9992507189ef1d808a27d2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL @@ -0,0 +1 @@ +CONFIG_NO_HZ_FULL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE new file mode 100644 index 0000000000000000000000000000000000000000..8641a52f79477ed6b9d2fa4ef2ad633fed5442f1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE @@ -0,0 +1 @@ +# CONFIG_NO_HZ_IDLE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS b/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS new file mode 100644 index 0000000000000000000000000000000000000000..27d187f4dbfc047279e93afe349b84341090c3eb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS @@ -0,0 +1 @@ +CONFIG_NR_CPUS=1024 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..454b2b91384618d1e60c490a26e642d0bcb5e735 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS @@ -0,0 +1 @@ +# CONFIG_NTFS_FS is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA b/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..e480f0c04294a121db1a40f7c2d03c324155b9c6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA @@ -0,0 +1 @@ +CONFIG_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE new file mode 100644 index 0000000000000000000000000000000000000000..63c9ce1730b07643cad1288c1732a717ee18e8e0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE @@ -0,0 +1 @@ +CONFIG_NVME_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS new file mode 100644 index 0000000000000000000000000000000000000000..c5c11f93402234f2a36098042aa1053f8f3de774 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS @@ -0,0 +1 @@ +CONFIG_NVME_FABRICS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA new file mode 100644 index 0000000000000000000000000000000000000000..134190973d15ad35707d953c35fa03cf9c17f5e8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA @@ -0,0 +1 @@ +CONFIG_NVME_RDMA=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP new file mode 100644 index 0000000000000000000000000000000000000000..738cd8284b7b691b9facd891368c8094d81baae5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP @@ -0,0 +1 @@ +CONFIG_NVME_TCP=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS new file mode 100644 index 0000000000000000000000000000000000000000..b30428db0fd8a59fbec8ed7070607164e573aa3d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET b/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET new file mode 100644 index 0000000000000000000000000000000000000000..15b6ef5b812314c77144da72891883f790acd5e9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET @@ -0,0 +1 @@ +CONFIG_PACKET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG b/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG new file mode 100644 index 0000000000000000000000000000000000000000..5bf96f75047bade3db9f8a71c247430f4b306a94 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG @@ -0,0 +1 @@ +CONFIG_PAGE_IDLE_FLAG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS new file mode 100644 index 0000000000000000000000000000000000000000..29dc6ff9860ca72dad9ba150b6282a9dfbe13db9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS @@ -0,0 +1 @@ +CONFIG_PANIC_ON_OOPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..842da8779f98b792cdb9b6b40dc489242955a744 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT @@ -0,0 +1 @@ +CONFIG_PANIC_TIMEOUT=1 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT b/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT new file mode 100644 index 0000000000000000000000000000000000000000..65d95d1e2819cf7717e410175f668d4ed4468877 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT @@ -0,0 +1 @@ +CONFIG_PARAVIRT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED new file mode 100644 index 0000000000000000000000000000000000000000..313ee558609747191f6f5d3007f9a326e45a552e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED @@ -0,0 +1 @@ +CONFIG_PARTITION_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI new file mode 100644 index 0000000000000000000000000000000000000000..c499609c3f62d2977275f85d3df3ce62b1ef9f8e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI @@ -0,0 +1 @@ +CONFIG_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS b/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS new file mode 100644 index 0000000000000000000000000000000000000000..ead4f09ac07bc02381aea387dd83581983912a1a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS @@ -0,0 +1 @@ +CONFIG_PCIEPORTBUS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV new file mode 100644 index 0000000000000000000000000000000000000000..c52af465a46b11ec00844bddc3ca76fd31467d27 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV @@ -0,0 +1 @@ +CONFIG_PCI_IOV=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI new file mode 100644 index 0000000000000000000000000000000000000000..9fc3bddd0b5c7c375242ed348043604510a8f149 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI @@ -0,0 +1 @@ +CONFIG_PCI_MSI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB new file mode 100644 index 0000000000000000000000000000000000000000..46eee76194b0cffb73add26b2deed58cb65571d7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB @@ -0,0 +1 @@ +CONFIG_PCI_PF_STUB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB new file mode 100644 index 0000000000000000000000000000000000000000..b88db42e9c847536f0a0c512a47950b3ecb53f64 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB @@ -0,0 +1 @@ +CONFIG_PCI_STUB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..ba58ff2203e4f68c706219f3b635e4f8e3afac90 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS b/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS new file mode 100644 index 0000000000000000000000000000000000000000..238bddf8d97c75a369ac65787edeccff5126515e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS @@ -0,0 +1 @@ +CONFIG_PGTABLE_LEVELS=4 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS new file mode 100644 index 0000000000000000000000000000000000000000..eae7bdaa3790af5d0cb68d0deff195f87a470a14 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS @@ -0,0 +1 @@ +CONFIG_PID_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..49e251bf11def03283973721caf1695f917eaabe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER @@ -0,0 +1 @@ +CONFIG_PKCS7_MESSAGE_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PM b/anolis/configs/L0-MANDATORY/default/CONFIG_PM new file mode 100644 index 0000000000000000000000000000000000000000..2df782efc711524d50f0053cc5ce5cd35b67336c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PM @@ -0,0 +1 @@ +CONFIG_PM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS b/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS new file mode 100644 index 0000000000000000000000000000000000000000..f240ffc379f8e2cf6d540fb29aaf6b8678b91ce0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS @@ -0,0 +1 @@ +CONFIG_POSIX_TIMERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT new file mode 100644 index 0000000000000000000000000000000000000000..38362d94a92acd97e8aa79b2c97030046f6ea2a0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT @@ -0,0 +1 @@ +# CONFIG_PREEMPT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION new file mode 100644 index 0000000000000000000000000000000000000000..38913b0e31f87bd3548e3375abab6970e3d895a7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION @@ -0,0 +1 @@ +CONFIG_PREEMPTION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD new file mode 100644 index 0000000000000000000000000000000000000000..ebc04961669eab12bfdfed2a8e7d452e3239764f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD @@ -0,0 +1 @@ +CONFIG_PREEMPT_BUILD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..6d995d6c4628cdb554dbdc07db8893f7f03e7ccd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC @@ -0,0 +1 @@ +CONFIG_PREEMPT_DYNAMIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_NONE b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_NONE new file mode 100644 index 0000000000000000000000000000000000000000..0cbeb5a53cb93bde31f8849a08b3fa917927901e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_NONE @@ -0,0 +1 @@ +CONFIG_PREEMPT_NONE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU new file mode 100644 index 0000000000000000000000000000000000000000..719b5866fee8e2dd4e9a26224a6427ddcb72d791 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU @@ -0,0 +1 @@ +CONFIG_PREEMPT_RCU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..42ab34971e786fae47367d2b467152effba6a9f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER @@ -0,0 +1 @@ +# CONFIG_PREEMPT_TRACER is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_VOLUNTARY new file mode 100644 index 0000000000000000000000000000000000000000..6ba012c36d21a1a4c1e8ca255dc458ce35653090 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_VOLUNTARY @@ -0,0 +1 @@ +# CONFIG_PREEMPT_VOLUNTARY is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK new file mode 100644 index 0000000000000000000000000000000000000000..c6baee637082b3a9ae7aaee95f0a9b86e76f3504 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK @@ -0,0 +1 @@ +CONFIG_PRINTK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX new file mode 100644 index 0000000000000000000000000000000000000000..bf385684d57dc59b99b05ed14726a62e0a67a970 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX @@ -0,0 +1 @@ +# CONFIG_PRINTK_INDEX is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME new file mode 100644 index 0000000000000000000000000000000000000000..5d5b73e9cc8e5b27fa78dfed816b3b205cc62071 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME @@ -0,0 +1 @@ +CONFIG_PRINTK_TIME=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS b/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS new file mode 100644 index 0000000000000000000000000000000000000000..8f99f97d3740b5065bebea94adcc90a1cf3f683e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS @@ -0,0 +1 @@ +CONFIG_PROBE_EVENTS_BTF_ARGS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS new file mode 100644 index 0000000000000000000000000000000000000000..68fbd2b358843c904eac4c9ecaa9f5bab3b685b9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS @@ -0,0 +1 @@ +CONFIG_PROC_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE new file mode 100644 index 0000000000000000000000000000000000000000..eb475c0a7d7cc02390dc4995327fd9e6711ababe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE @@ -0,0 +1 @@ +CONFIG_PROC_KCORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR new file mode 100644 index 0000000000000000000000000000000000000000..e728c17ad20822a6e1409da8aff8a2d5f8165f3a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR @@ -0,0 +1 @@ +CONFIG_PROC_PAGE_MONITOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL new file mode 100644 index 0000000000000000000000000000000000000000..eccf86024e96aa1e61c10107a4b12717e7618f89 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL @@ -0,0 +1 @@ +CONFIG_PROC_SYSCTL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE new file mode 100644 index 0000000000000000000000000000000000000000..c864e6b0b39230eb4fc1c271fab4866654ec67fe --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE @@ -0,0 +1 @@ +CONFIG_PROC_VMCORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER b/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER new file mode 100644 index 0000000000000000000000000000000000000000..d29dad9c7ffb80fec644558e9735a3cc8b8ac971 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER @@ -0,0 +1 @@ +# CONFIG_PSE_CONTROLLER is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK new file mode 100644 index 0000000000000000000000000000000000000000..c227a05adc7f0f76b21e1e8c73865606a60a38bd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK @@ -0,0 +1 @@ +# CONFIG_PSTORE_BLK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES new file mode 100644 index 0000000000000000000000000000000000000000..84340b2550e1cb03cc9009bdfec48f0d0b06e26d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES @@ -0,0 +1 @@ +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK new file mode 100644 index 0000000000000000000000000000000000000000..ecf8c4a4473fbcd6c8de7ba8a2af712b5b81f38f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_MOCK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP new file mode 100644 index 0000000000000000000000000000000000000000..466cead8f0a5027a754f93741dc90359ec44224d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_OCP is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL new file mode 100644 index 0000000000000000000000000000000000000000..b494f9af4eb7a2d3fc537a71a8f96e96b793c055 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_OPTIONAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC new file mode 100644 index 0000000000000000000000000000000000000000..ae30596cbc3d4ec202558994f9087b9a2af41365 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC @@ -0,0 +1 @@ +CONFIG_PVPANIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..4cc1414eb4b6b81abd52d7fbc803b1fca471eedf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO @@ -0,0 +1 @@ +# CONFIG_PVPANIC_MMIO is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI new file mode 100644 index 0000000000000000000000000000000000000000..99efce3fb5da90007072d7577539f9219eb16720 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI @@ -0,0 +1 @@ +# CONFIG_PVPANIC_PCI is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK new file mode 100644 index 0000000000000000000000000000000000000000..640d5daf7b6de07c14145846f3b29aa69d2933be --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK @@ -0,0 +1 @@ +# CONFIG_PWM_CLK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC new file mode 100644 index 0000000000000000000000000000000000000000..8aea5b09615e6d6638fe00e9f25523c78cc435ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC @@ -0,0 +1 @@ +# CONFIG_PWM_DWC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA b/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA new file mode 100644 index 0000000000000000000000000000000000000000..7ae6b6fbaa0d4b8899ade73f62a2d11e7260edc0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA @@ -0,0 +1 @@ +CONFIG_QUOTA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE new file mode 100644 index 0000000000000000000000000000000000000000..20610a95a1879cdb78437ecdc8d13ad1b08f6d53 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_BASE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL new file mode 100644 index 0000000000000000000000000000000000000000..5d26c21e2535ef770fa1cc3f506ab5720a95dfe7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL @@ -0,0 +1 @@ +# CONFIG_RANDSTRUCT_FULL is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE new file mode 100644 index 0000000000000000000000000000000000000000..e2aaf1b5dacd35ed2847912c5dbdb5c3dd838c43 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE @@ -0,0 +1 @@ +CONFIG_RANDSTRUCT_NONE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..790181ab9f5c79d3158a58bd6463225bf3533935 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_RANDSTRUCT_PERFORMANCE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RAS b/anolis/configs/L0-MANDATORY/default/CONFIG_RAS new file mode 100644 index 0000000000000000000000000000000000000000..15fb268c3b80ee63e0405eee281a06ff83f77ede --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RAS @@ -0,0 +1 @@ +CONFIG_RAS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY b/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY new file mode 100644 index 0000000000000000000000000000000000000000..146eff5b478df7bdc0c148e87bab8843b4c64f2e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY @@ -0,0 +1 @@ +CONFIG_RELAY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE b/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE new file mode 100644 index 0000000000000000000000000000000000000000..36808edb3af70766a119f2787d6cc75d4ed7ac1b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE @@ -0,0 +1 @@ +CONFIG_RELOCATABLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS new file mode 100644 index 0000000000000000000000000000000000000000..4d7ca33dcdc4fbeef2e86bef8a544c1656d266f5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS @@ -0,0 +1 @@ +CONFIG_RESCTRL_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER b/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER new file mode 100644 index 0000000000000000000000000000000000000000..a268c67a9b48d9b09b794495b835d0454af489a1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER @@ -0,0 +1 @@ +CONFIG_RESET_CONTROLLER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RPS b/anolis/configs/L0-MANDATORY/default/CONFIG_RPS new file mode 100644 index 0000000000000000000000000000000000000000..0c947aea2415b3ceab009e368936aba161ac39fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RPS @@ -0,0 +1 @@ +CONFIG_RPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ b/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ new file mode 100644 index 0000000000000000000000000000000000000000..adc7767df654e61da10c3abbcb6297215fb0fd04 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ @@ -0,0 +1 @@ +CONFIG_RSEQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS b/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS new file mode 100644 index 0000000000000000000000000000000000000000..70c9c29c03970c4ec7fa6773d6411faa1da343ec --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS @@ -0,0 +1 @@ +CONFIG_RTC_CLASS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS new file mode 100644 index 0000000000000000000000000000000000000000..a9ef3bd78e3035af77e1e6d999499ce7700701c5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS @@ -0,0 +1 @@ +CONFIG_SCHEDSTATS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP new file mode 100644 index 0000000000000000000000000000000000000000..6f615c29f0447ad9c067095f20bece7e4865c887 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP @@ -0,0 +1 @@ +CONFIG_SCHED_AUTOGROUP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE new file mode 100644 index 0000000000000000000000000000000000000000..cc3f25e8f01b5955cd350878f3a76de0f84a6941 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE @@ -0,0 +1 @@ +CONFIG_SCHED_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..e8b09aa7c0c4c4e6eb3116eaa062e1741654268c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG @@ -0,0 +1 @@ +CONFIG_SCHED_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC new file mode 100644 index 0000000000000000000000000000000000000000..348674403b1bde35c3085e665da5d86b46e7c967 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC @@ -0,0 +1 @@ +CONFIG_SCHED_MC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI b/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI new file mode 100644 index 0000000000000000000000000000000000000000..2e9142f106128b7b2132af1a9bda0f6d57a37b94 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI @@ -0,0 +1 @@ +CONFIG_SCSI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP new file mode 100644 index 0000000000000000000000000000000000000000..eb9e150920efe80cfb6307afcfdf88d5aab4feeb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP @@ -0,0 +1 @@ +CONFIG_SECCOMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..0814ba30a8264f6e4cfe8a2a4d427f4d7040311f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER @@ -0,0 +1 @@ +CONFIG_SECCOMP_FILTER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING b/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..6f8a21900f956eb85232aef7c2ee4d672f8973bc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_SECONDARY_TRUSTED_KEYRING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..56c9d84012017a67f0445579303cd311022c42a2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY @@ -0,0 +1 @@ +CONFIG_SECURITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS new file mode 100644 index 0000000000000000000000000000000000000000..a108272d735d1cd9316eaa85fc9efaf22a7974d8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS @@ -0,0 +1 @@ +CONFIG_SECURITYFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND new file mode 100644 index 0000000000000000000000000000000000000000..393c3f5f8e26896202f23f6b2378d26f1459a51e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND @@ -0,0 +1 @@ +CONFIG_SECURITY_INFINIBAND=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK new file mode 100644 index 0000000000000000000000000000000000000000..cfde232dc88c2940773efa40b812652019de2521 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK @@ -0,0 +1 @@ +CONFIG_SECURITY_NETWORK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM new file mode 100644 index 0000000000000000000000000000000000000000..4920b268cdd76e7d9101b8c1b36d15996e706109 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM @@ -0,0 +1 @@ +CONFIG_SECURITY_NETWORK_XFRM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH new file mode 100644 index 0000000000000000000000000000000000000000..dcabac67e585bbe6f900c10de39c321a88ea70b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH @@ -0,0 +1 @@ +CONFIG_SECURITY_PATH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX new file mode 100644 index 0000000000000000000000000000000000000000..377ec83aaaf2e29fee4d84a4fb17357b502ed6f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM new file mode 100644 index 0000000000000000000000000000000000000000..e39bcd52e6f2c451890c3f246e23bafe8e0e4e7f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_BOOTPARAM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO b/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO new file mode 100644 index 0000000000000000000000000000000000000000..e0e5093355b5475fcbdfc327cd127158e888c88f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO @@ -0,0 +1 @@ +CONFIG_SERIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM new file mode 100644 index 0000000000000000000000000000000000000000..5867be65ea6839eb90d49de603c311247c9904a8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM @@ -0,0 +1 @@ +CONFIG_SHMEM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD b/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD new file mode 100644 index 0000000000000000000000000000000000000000..2237664d4d8be8d8e870a5071a95d248e13a00e1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD @@ -0,0 +1 @@ +CONFIG_SIGNALFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB new file mode 100644 index 0000000000000000000000000000000000000000..05f729d8b5a2ef496ac185460eb5903a2462a5e3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB @@ -0,0 +1 @@ +CONFIG_SLUB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL new file mode 100644 index 0000000000000000000000000000000000000000..bc41c67abcb06a5175b5eb73c1fe34bbbb1c7422 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL @@ -0,0 +1 @@ +CONFIG_SLUB_CPU_PARTIAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..b19a5f05c484b80103a07560825016a0dc6ccb1d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG @@ -0,0 +1 @@ +CONFIG_SLUB_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON new file mode 100644 index 0000000000000000000000000000000000000000..ed8690f187c3fb931e0f77ef57b2dd1a48affc8d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON @@ -0,0 +1 @@ +# CONFIG_SLUB_DEBUG_ON is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS new file mode 100644 index 0000000000000000000000000000000000000000..349cf04499fb1bd4739e868a6aa51b98d944ec1e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS @@ -0,0 +1 @@ +# CONFIG_SLUB_STATS is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SMC b/anolis/configs/L0-MANDATORY/default/CONFIG_SMC new file mode 100644 index 0000000000000000000000000000000000000000..56a7452afaf0a8d3271a061e69109754b14aad72 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SMC @@ -0,0 +1 @@ +CONFIG_SMC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SMP b/anolis/configs/L0-MANDATORY/default/CONFIG_SMP new file mode 100644 index 0000000000000000000000000000000000000000..1cbf7ec071dae3edd694af151bb37dfa910f5f78 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SMP @@ -0,0 +1 @@ +CONFIG_SMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR new file mode 100644 index 0000000000000000000000000000000000000000..1ff04a43b96309f76096fb1c91fa6faf89b67da3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM new file mode 100644 index 0000000000000000000000000000000000000000..04c45b45045d6609d80dea239bd1f80dc6d797f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM @@ -0,0 +1 @@ +CONFIG_SPARSEMEM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..a2826dacd1f9d4f4da0d2a4ed00d7f8f01c022c0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_VMEMMAP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPI b/anolis/configs/L0-MANDATORY/default/CONFIG_SPI new file mode 100644 index 0000000000000000000000000000000000000000..5616bfc48d779a601451844879e1243756a66781 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPI @@ -0,0 +1 @@ +CONFIG_SPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS new file mode 100644 index 0000000000000000000000000000000000000000..931ff1ddd5a165b0fac267efbecd1e302e9095fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS @@ -0,0 +1 @@ +CONFIG_SQUASHFS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE new file mode 100644 index 0000000000000000000000000000000000000000..3947d556f7f29db49e33dc8dd72b1c36c31c8071 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE @@ -0,0 +1 @@ +CONFIG_STACKTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STM b/anolis/configs/L0-MANDATORY/default/CONFIG_STM new file mode 100644 index 0000000000000000000000000000000000000000..3542730b9e820aa97ae1faf8545843b0bf5da715 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STM @@ -0,0 +1 @@ +CONFIG_STM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..6086b5e508ec197298af463c69dfce80c40ba189 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER @@ -0,0 +1 @@ +CONFIG_STREAM_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX new file mode 100644 index 0000000000000000000000000000000000000000..8c57b454ad2611f1294efd484b7827ad380dc85d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX @@ -0,0 +1 @@ +CONFIG_STRICT_KERNEL_RWX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX new file mode 100644 index 0000000000000000000000000000000000000000..2f1f100d72415b961f0e46e3e8cf894b2b29142c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX @@ -0,0 +1 @@ +CONFIG_STRICT_MODULE_RWX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC b/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC new file mode 100644 index 0000000000000000000000000000000000000000..1c5b2e2407456d1d801675540a193fb92fd09726 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC @@ -0,0 +1 @@ +CONFIG_SUNRPC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP b/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..38565471d0e39011bcfe8788ab231cbe130298cc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP @@ -0,0 +1 @@ +CONFIG_SWAP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES b/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES new file mode 100644 index 0000000000000000000000000000000000000000..5fec45b2930799dbbd3694fe5ac36ba6085d8cf9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES @@ -0,0 +1 @@ +CONFIG_SYN_COOKIES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL new file mode 100644 index 0000000000000000000000000000000000000000..dd53c266bf527983a599c88cf80bc000ccd1a0c5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL @@ -0,0 +1 @@ +CONFIG_SYSCTL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..54827af2b5c3d930076cff7b93347efb9e8b97ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS @@ -0,0 +1 @@ +CONFIG_SYSFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..9cb63f099a137e70e06dcf817fe6c138f4f82cab --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_SYSTEM_TRUSTED_KEYRING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..db0105d049007753f66bfa93bd9f00ada54b1f17 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS @@ -0,0 +1 @@ +CONFIG_SYSTEM_TRUSTED_KEYS="" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC new file mode 100644 index 0000000000000000000000000000000000000000..3d67b9bacb9ed6a889f4d097aabfb0b8654b5d69 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC @@ -0,0 +1 @@ +CONFIG_SYSVIPC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TAP b/anolis/configs/L0-MANDATORY/default/CONFIG_TAP new file mode 100644 index 0000000000000000000000000000000000000000..7604449e6d919e6c7d08794dab1bdf78c379f777 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TAP @@ -0,0 +1 @@ +CONFIG_TAP=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE new file mode 100644 index 0000000000000000000000000000000000000000..437db1bb0d2f40ff606c0e6aa289c2cd7f102a5c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE @@ -0,0 +1 @@ +CONFIG_TARGET_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS new file mode 100644 index 0000000000000000000000000000000000000000..eb9a4ccaca40e72b37a6b936f3dc379b2e90d72e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS @@ -0,0 +1 @@ +CONFIG_TCG_TIS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM new file mode 100644 index 0000000000000000000000000000000000000000..07d9499c121f5bb24df88c75e2e522cea4cea8cf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM @@ -0,0 +1 @@ +CONFIG_TCG_TPM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED new file mode 100644 index 0000000000000000000000000000000000000000..86f996b95d6db80ff33a476a5e4436ed6bff6c6d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED @@ -0,0 +1 @@ +CONFIG_TCP_CONG_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR new file mode 100644 index 0000000000000000000000000000000000000000..55ff89cb627f5e4f59aa8b026345cf5efcc60ebb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR @@ -0,0 +1 @@ +CONFIG_TCP_CONG_BBR=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC new file mode 100644 index 0000000000000000000000000000000000000000..7be0dc4241ef8b7acd91a70af14c4546ff462888 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC @@ -0,0 +1 @@ +CONFIG_TCP_CONG_CUBIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL b/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..42356be7d00aa265f4f89d8be00ddd3a21addc6e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL @@ -0,0 +1 @@ +CONFIG_THERMAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD b/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD new file mode 100644 index 0000000000000000000000000000000000000000..e6f99e2dd67c15a9e1ce28a49640d14d1d3fb1c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD @@ -0,0 +1 @@ +CONFIG_TIMERFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TLS b/anolis/configs/L0-MANDATORY/default/CONFIG_TLS new file mode 100644 index 0000000000000000000000000000000000000000..1d627c36a363eae9729b69bfeb3f002bcf2b81da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TLS @@ -0,0 +1 @@ +CONFIG_TLS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS b/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS new file mode 100644 index 0000000000000000000000000000000000000000..417f7b76b34a98b3e38e67813e0f0eca62bf14f1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS @@ -0,0 +1 @@ +CONFIG_TMPFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS b/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS new file mode 100644 index 0000000000000000000000000000000000000000..510725c37a8ec20f92adf766e9b677c52675749d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS @@ -0,0 +1 @@ +CONFIG_TRACEPOINTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE b/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE new file mode 100644 index 0000000000000000000000000000000000000000..75d999c665c9c86c792be2ed828e5c2704f2129d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE @@ -0,0 +1 @@ +CONFIG_TRANSPARENT_HUGEPAGE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TTY b/anolis/configs/L0-MANDATORY/default/CONFIG_TTY new file mode 100644 index 0000000000000000000000000000000000000000..f21b4a108f335f906098874c3b656833c8432e15 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TTY @@ -0,0 +1 @@ +CONFIG_TTY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TUN b/anolis/configs/L0-MANDATORY/default/CONFIG_TUN new file mode 100644 index 0000000000000000000000000000000000000000..12009e34d34e5ee38b5fd66685e8aa8d795e62ca --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TUN @@ -0,0 +1 @@ +CONFIG_TUN=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE new file mode 100644 index 0000000000000000000000000000000000000000..415e2083637ad915c5eab18fefe02d6d395961be --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE @@ -0,0 +1 @@ +CONFIG_TXGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UIO b/anolis/configs/L0-MANDATORY/default/CONFIG_UIO new file mode 100644 index 0000000000000000000000000000000000000000..109e559666635d1e290abb9fb468cbdec2b52736 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UIO @@ -0,0 +1 @@ +CONFIG_UIO=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX new file mode 100644 index 0000000000000000000000000000000000000000..07b000ef4ddb799ada2b3c6ddd86eaac99c45013 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX @@ -0,0 +1 @@ +CONFIG_UNIX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS new file mode 100644 index 0000000000000000000000000000000000000000..9e23599229b8ed2dc0d5ab036e2b77d00bc6af73 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS @@ -0,0 +1 @@ +CONFIG_UNIX98_PTYS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC b/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC new file mode 100644 index 0000000000000000000000000000000000000000..6b6908419acbf37b0c23a92b55d816eb5ffda505 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC @@ -0,0 +1 @@ +CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..81f7be965605ca1af7d5efb440ca3065741dd12d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_UPROBE_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USB b/anolis/configs/L0-MANDATORY/default/CONFIG_USB new file mode 100644 index 0000000000000000000000000000000000000000..45e19309d49cb939a2b4fb83f1fd8c3afe91efeb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USB @@ -0,0 +1 @@ +CONFIG_USB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT b/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..c4310e0b7237515ad2aeec443fc342c6aefb3af0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT @@ -0,0 +1 @@ +CONFIG_USB_SUPPORT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD b/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD new file mode 100644 index 0000000000000000000000000000000000000000..698c7ed28a26719878b28131c7070adee2ff26fd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD @@ -0,0 +1 @@ +CONFIG_USERFAULTFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS new file mode 100644 index 0000000000000000000000000000000000000000..d6c1f3443d8b45cf202e920ee6cd4cb8a653db30 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS @@ -0,0 +1 @@ +CONFIG_UTS_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VETH b/anolis/configs/L0-MANDATORY/default/CONFIG_VETH new file mode 100644 index 0000000000000000000000000000000000000000..80311f71266d25bb4d36bee9c2e6e0c8212b5f04 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VETH @@ -0,0 +1 @@ +CONFIG_VETH=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS new file mode 100644 index 0000000000000000000000000000000000000000..3204b85ef3fd7efcfcbc016389b5f6230727fc77 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS @@ -0,0 +1 @@ +CONFIG_VFAT_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO new file mode 100644 index 0000000000000000000000000000000000000000..3cdbde3d6df7876bca3a2f70210e81bbfdbdc3f6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO @@ -0,0 +1 @@ +CONFIG_VFIO=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..ace58fff3e600f8c83d82a6ec191c618bb60d344 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_VFIO_PCI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB b/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB new file mode 100644 index 0000000000000000000000000000000000000000..c6d1681b354cfbd486278cb951795bd671055e5f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB @@ -0,0 +1 @@ +CONFIG_VGA_ARB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET new file mode 100644 index 0000000000000000000000000000000000000000..4dd9712dc3dea42eccdcaa0c5d132c380f095ea6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET @@ -0,0 +1 @@ +CONFIG_VHOST_NET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK new file mode 100644 index 0000000000000000000000000000000000000000..935594a5a07d4177a73b603552aaf1d66315b257 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK @@ -0,0 +1 @@ +CONFIG_VHOST_VSOCK=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..f3aec11c1598f96d17ff7c3f4e07c74fd4451dde --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON @@ -0,0 +1 @@ +CONFIG_VIRTIO_BALLOON=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..92643f4c4b6ab0736e0e4063e2faa0d23b944851 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE @@ -0,0 +1 @@ +CONFIG_VIRTIO_CONSOLE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS new file mode 100644 index 0000000000000000000000000000000000000000..9fe6466163ed2aaf2bf4c9c354aa894ccc4f5314 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS @@ -0,0 +1 @@ +CONFIG_VIRTIO_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM new file mode 100644 index 0000000000000000000000000000000000000000..3780f32af7be9f672e24a84ac2b9ff1b256e6e5d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM @@ -0,0 +1 @@ +CONFIG_VIRTIO_MEM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU new file mode 100644 index 0000000000000000000000000000000000000000..ce9f283a857d3b45e164f16ddb4befa7a29a21b1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU @@ -0,0 +1 @@ +CONFIG_VIRTIO_MENU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET new file mode 100644 index 0000000000000000000000000000000000000000..170da19d65dee5a6fa12945c3393c7fe2b4f2826 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET @@ -0,0 +1 @@ +CONFIG_VIRTIO_NET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..b870f02c619698072383810f098c9f396b2f3e44 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM @@ -0,0 +1 @@ +CONFIG_VIRTIO_PMEM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION new file mode 100644 index 0000000000000000000000000000000000000000..097e8b93583bfb7e7f0f5e54165cb2c59dad98fd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION @@ -0,0 +1 @@ +CONFIG_VIRTUALIZATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN new file mode 100644 index 0000000000000000000000000000000000000000..16aaf1a83d5848f148afc59d9dd096261d78df56 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -0,0 +1 @@ +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK b/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK new file mode 100644 index 0000000000000000000000000000000000000000..8bd986875fc7521b2510045525faf3222d192baa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK @@ -0,0 +1 @@ +CONFIG_VMAP_STACK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS b/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS new file mode 100644 index 0000000000000000000000000000000000000000..de44b20ecc6b91ec9489638f27749b8b30948b5a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS @@ -0,0 +1 @@ +CONFIG_VM_EVENT_COUNTERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS b/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..da4a1880654dcb757a7f3f491d854057a34efa61 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VSOCKETS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VT b/anolis/configs/L0-MANDATORY/default/CONFIG_VT new file mode 100644 index 0000000000000000000000000000000000000000..4842a998051251c0b72996a96f68014d2b0cfe8a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VT @@ -0,0 +1 @@ +CONFIG_VT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..c3fcafd5f5d40b8f71359987893e8676364a65c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE @@ -0,0 +1 @@ +CONFIG_VT_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG b/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..80e211dcedd2ee483850985aed09120d0c168b25 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG @@ -0,0 +1 @@ +CONFIG_WATCHDOG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..4376b3a12b66b8aba820bf7f4195c8952a581a42 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER @@ -0,0 +1 @@ +CONFIG_X509_CERTIFICATE_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS b/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..061b65a41476d490b888453a1fdda772447b294c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS @@ -0,0 +1 @@ +CONFIG_XDP_SOCKETS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER b/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER new file mode 100644 index 0000000000000000000000000000000000000000..bfb1935c16911b422cf4ae730daff56352d637d5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER @@ -0,0 +1 @@ +CONFIG_XFRM_USER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..52766aeca40d107b3c8adc8806a61fbca1495f43 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS @@ -0,0 +1 @@ +CONFIG_XFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XPS b/anolis/configs/L0-MANDATORY/default/CONFIG_XPS new file mode 100644 index 0000000000000000000000000000000000000000..357db44258fcc79c248392671a5481463874ef57 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XPS @@ -0,0 +1 @@ +CONFIG_XPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC b/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC new file mode 100644 index 0000000000000000000000000000000000000000..42ed64a29056aa71a5af2f4301a7bf041b8ba9e7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC @@ -0,0 +1 @@ +CONFIG_XZ_DEC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..7d0942fcf12a9c1f9aba959ecc481ed488d0816f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE @@ -0,0 +1 @@ +CONFIG_ZONE_DEVICE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA new file mode 100644 index 0000000000000000000000000000000000000000..c1b5f84a5b4050c58da12f3c669afd7db94dda8a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA @@ -0,0 +1 @@ +CONFIG_ZONE_DMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 new file mode 100644 index 0000000000000000000000000000000000000000..8e7948af4e8cfe61737b58cab355d74a44121856 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 @@ -0,0 +1 @@ +CONFIG_ZONE_DMA32=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM b/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM new file mode 100644 index 0000000000000000000000000000000000000000..32adeab9e5ddcefa0cfe63a5d0453eadba907ad8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM @@ -0,0 +1 @@ +CONFIG_ZRAM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..b50376457fb01ac59130261e82709499433c71ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS @@ -0,0 +1 @@ +CONFIG_ZSTD_COMPRESS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..c9a4b9bc3639ecd314bad5f07bdb38d53ecf4458 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_ZSTD_DECOMPRESS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA b/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..2ff0db092fda3e1df088fc96ee5294ea5b3208f3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA @@ -0,0 +1 @@ +CONFIG_AMD_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 0000000000000000000000000000000000000000..f572df8b6f043e2dc212fd0fd0611a1ec5c0dfac --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO b/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO new file mode 100644 index 0000000000000000000000000000000000000000..a79844faa70846a4067338f1d4e6b119ecaa8e88 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO @@ -0,0 +1 @@ +# CONFIG_COMPAT_VDSO is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD new file mode 100644 index 0000000000000000000000000000000000000000..d5e1923eee950cabdddc749b46fef6e0e5659b86 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD @@ -0,0 +1 @@ +CONFIG_CPU_SUP_AMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON new file mode 100644 index 0000000000000000000000000000000000000000..21b46d0551af6e30de975dbd4f0dd5088b81142f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON @@ -0,0 +1 @@ +CONFIG_CPU_SUP_HYGON=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..7eadcb81665570a584b5333cd9f8a25dfdd8a725 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL @@ -0,0 +1 @@ +CONFIG_CPU_SUP_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..04e96754b00bf16ba349ba2382069a465b843334 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_CPU_SUP_ZHAOXIN=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD new file mode 100644 index 0000000000000000000000000000000000000000..91fac0ebe36ad2c06c36f061ef46052e41793077 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..179f5f1fa240202f4cf53be5ead78e782fa5d3cf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_AVX_X86_64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..8a091c4c4f0185e4439363de7d41d200c6d0c401 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..857d6494dee5d6e688e6e71cd35aee0ae2de9f6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT b/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT new file mode 100644 index 0000000000000000000000000000000000000000..ce9faa20fc1882be26f71ea72d68045ae4a1d782 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -0,0 +1 @@ +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON new file mode 100644 index 0000000000000000000000000000000000000000..efa6c7e6f1e821f1cab16da1f426b97997af1f4c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON @@ -0,0 +1 @@ +# CONFIG_EXTCON is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER b/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..381266e17a1d4d16944ce8d68d389523dd01a8df --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER @@ -0,0 +1 @@ +CONFIG_HPET_TIMER=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ new file mode 100644 index 0000000000000000000000000000000000000000..b2857157e15fd122ce542386fe808acfbaa2c1f6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ @@ -0,0 +1 @@ +CONFIG_HZ=1000 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 new file mode 100644 index 0000000000000000000000000000000000000000..81777c737e09ab82a97285f3b3ba2f90d77c76b0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 @@ -0,0 +1 @@ +CONFIG_HZ_1000=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 new file mode 100644 index 0000000000000000000000000000000000000000..fde8748650f4c6a118bea9aa1be77dafc652a1b0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 @@ -0,0 +1 @@ +# CONFIG_HZ_250 is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL b/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL new file mode 100644 index 0000000000000000000000000000000000000000..75afb0cb1de0b2a38bd72b8c5cb2e975b0aebd28 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL @@ -0,0 +1 @@ +CONFIG_IA32_FEAT_CTL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER b/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..46610d00bcb341b76f9a478f1c2d9399803a85d4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER @@ -0,0 +1 @@ +CONFIG_INSTRUCTION_DECODER=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE new file mode 100644 index 0000000000000000000000000000000000000000..11c398412921d05d6832dc35c9151edbb01b1031 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE @@ -0,0 +1 @@ +CONFIG_INTEL_IDLE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..b21af5c551f074516f82ab085e60cbab9fd87e28 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM new file mode 100644 index 0000000000000000000000000000000000000000..eb03ccae7899634e1cea58ff14b24640c75cf131 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_SVM=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..1340073a4abf7388cd76bbac0d587dc12605fa29 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST @@ -0,0 +1 @@ +CONFIG_INTEL_TDX_GUEST=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP new file mode 100644 index 0000000000000000000000000000000000000000..dfe559c5e88c57d9cbe718454a34aad86a314abb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP @@ -0,0 +1 @@ +CONFIG_IRQ_REMAP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE b/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..de3ae48570c692f271b8b7ac6158d00017a05a91 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE @@ -0,0 +1 @@ +CONFIG_KPROBES_ON_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM new file mode 100644 index 0000000000000000000000000000000000000000..cf9bf67a6a49065a23c5147d76c5b389917c8a28 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM @@ -0,0 +1 @@ +CONFIG_KVM=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD new file mode 100644 index 0000000000000000000000000000000000000000..b96224f70affbcf5366314c20a573d793b33afc5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD @@ -0,0 +1 @@ +CONFIG_KVM_AMD=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..133da04247ee04089bb72c050d809edfea289097 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST @@ -0,0 +1 @@ +CONFIG_KVM_GUEST=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..7f5fa445299924423b33dbd5db87cd4e4ef0aebf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL @@ -0,0 +1 @@ +CONFIG_KVM_INTEL=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR b/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR new file mode 100644 index 0000000000000000000000000000000000000000..744162bca1b8ac234e59270ce16210585148d594 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR @@ -0,0 +1 @@ +CONFIG_MTRR=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES b/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES new file mode 100644 index 0000000000000000000000000000000000000000..a604dda96dbd715ed8ab1b65301caac1d6a30e86 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES @@ -0,0 +1 @@ +CONFIG_OPTPROBES=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION b/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION new file mode 100644 index 0000000000000000000000000000000000000000..6881a7757248e3183464823633a43c26e86536ac --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION @@ -0,0 +1 @@ +CONFIG_PAGE_TABLE_ISOLATION=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START b/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START new file mode 100644 index 0000000000000000000000000000000000000000..197ff1f912721f70546e0b754a8e756f1267dd92 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START @@ -0,0 +1 @@ +CONFIG_PHYSICAL_START=0x1000000 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY b/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..cb4fdaaaca38e2774669fa0f8d8512f868704900 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MEMORY=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC b/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC new file mode 100644 index 0000000000000000000000000000000000000000..d1b75f27c7d8b67c47dc06b27248f095a1fd0f55 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC @@ -0,0 +1 @@ +# CONFIG_RAS_CEC is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK new file mode 100644 index 0000000000000000000000000000000000000000..dd6882f9adb57735e5d7fb5fee61a717aabcc21d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK @@ -0,0 +1 @@ +CONFIG_RETHUNK=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE new file mode 100644 index 0000000000000000000000000000000000000000..c46e12644718396c86092f3ffaadac901c8f924c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE @@ -0,0 +1 @@ +CONFIG_RETPOLINE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..2917a4256f6bf5060854975acd78f6975b3694d2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST @@ -0,0 +1 @@ +CONFIG_SEV_GUEST=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER new file mode 100644 index 0000000000000000000000000000000000000000..abdba63a50448e3daa6e7d63b477c88d3b7ab812 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER @@ -0,0 +1 @@ +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE b/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..461d0b1d4e82f5c25b89bfa9f1d33a59a81dc9bd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE @@ -0,0 +1 @@ +CONFIG_VGA_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..30057292047c1a905095f0ba2172ea78362a26aa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO @@ -0,0 +1 @@ +CONFIG_VIRTIO=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK new file mode 100644 index 0000000000000000000000000000000000000000..a98570e7985736d990ad7b452a9d2cfd0fc24dd4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK @@ -0,0 +1 @@ +CONFIG_VIRTIO_BLK=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..8d3f6df40a387b31dcb03a8ad09f1e3edfe63710 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..902a03720ac5eb5d4eb20f35e2111d9aafd9fa6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 new file mode 100644 index 0000000000000000000000000000000000000000..083f4ef4358071500e9ba1a388cc2a5977331a00 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 @@ -0,0 +1 @@ +CONFIG_X86=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL new file mode 100644 index 0000000000000000000000000000000000000000..db301f396452141d88ddd8ff4d35ed65f2695943 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL @@ -0,0 +1 @@ +# CONFIG_X86_5LEVEL is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..0f9a5b591c63c7d2ec7f3afb8adbc153f76999fa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 @@ -0,0 +1 @@ +CONFIG_X86_64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..9ed4791dca112ef099bb851337bbb5e425a2f10f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA @@ -0,0 +1 @@ +CONFIG_X86_64_ACPI_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP new file mode 100644 index 0000000000000000000000000000000000000000..40d5ce7ec16b496d21e2fb02bd40375b3e46e8c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP @@ -0,0 +1 @@ +CONFIG_X86_64_SMP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 new file mode 100644 index 0000000000000000000000000000000000000000..de479387b87731df5b3cf31cb8827ae96710ea3d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 @@ -0,0 +1 @@ +CONFIG_X86_CMPXCHG64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL new file mode 100644 index 0000000000000000000000000000000000000000..0388f23848bff5b817c1ac76129deffca5e255b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_X86_CPU_RESCTRL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM new file mode 100644 index 0000000000000000000000000000000000000000..cb77e518a53398ee16bbc4f054e4d46acb187988 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM @@ -0,0 +1 @@ +CONFIG_X86_IOPL_IOPERM=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC new file mode 100644 index 0000000000000000000000000000000000000000..4e82cbf6613b4064d1fd1f3849f69881c9a90796 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC @@ -0,0 +1 @@ +CONFIG_X86_IO_APIC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC new file mode 100644 index 0000000000000000000000000000000000000000..a4be2f8427cbce538c7b8dfe16e2d9e9f7c3f09d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC @@ -0,0 +1 @@ +CONFIG_X86_LOCAL_APIC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE new file mode 100644 index 0000000000000000000000000000000000000000..de8e48077fc415d3c62d200d58629620bbb40b7b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE @@ -0,0 +1 @@ +CONFIG_X86_MCE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD new file mode 100644 index 0000000000000000000000000000000000000000..7c1f05da9c25d3335f362b1a1db00f372aef11f6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD @@ -0,0 +1 @@ +CONFIG_X86_MCE_AMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..17f52def25f4e416cc005caadd780d32038d2fb8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL @@ -0,0 +1 @@ +CONFIG_X86_MCE_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT new file mode 100644 index 0000000000000000000000000000000000000000..09f062ec2b29c7227d59429f9837b1b4cf56db05 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT @@ -0,0 +1 @@ +CONFIG_X86_PAT=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES new file mode 100644 index 0000000000000000000000000000000000000000..9f3b3e4361555507043dffbf3e7fa2515f49ce38 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES @@ -0,0 +1 @@ +CONFIG_X86_PLATFORM_DEVICES=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX new file mode 100644 index 0000000000000000000000000000000000000000..afd56e8184aecfbb7827ed65c4a4a4339cbe7977 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX @@ -0,0 +1 @@ +CONFIG_X86_SGX=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC new file mode 100644 index 0000000000000000000000000000000000000000..0b00e79015368058936e3c8226db856412dca5ca --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC @@ -0,0 +1 @@ +CONFIG_X86_TSC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP new file mode 100644 index 0000000000000000000000000000000000000000..1fc309a652088ad657ae4004d6ebb3310b12eefb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP @@ -0,0 +1 @@ +CONFIG_X86_UMIP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION new file mode 100644 index 0000000000000000000000000000000000000000..6c8bee87eed79f61c0925fc1758e6c7fb45cf285 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION @@ -0,0 +1 @@ +CONFIG_X86_VSYSCALL_EMULATION=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC new file mode 100644 index 0000000000000000000000000000000000000000..8bee94deca3e0874b971db972aa11a703fc03072 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC @@ -0,0 +1 @@ +CONFIG_X86_X2APIC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI new file mode 100644 index 0000000000000000000000000000000000000000..5f0eaeae8aebcda69e5f846e4b440f6349f83058 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI @@ -0,0 +1 @@ +CONFIG_ACPI_AGDI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT new file mode 100644 index 0000000000000000000000000000000000000000..0eb2d78651d3d4794bd1dd8b3d53c3202fd583eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT @@ -0,0 +1 @@ +# CONFIG_ACPI_BGRT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ new file mode 100644 index 0000000000000000000000000000000000000000..701191bf6ecf86d1cde3ebec89de854169b06926 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_CPUFREQ=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK new file mode 100644 index 0000000000000000000000000000000000000000..39ad17e2b7c12a7ba928ae0c7af2c1c91279c407 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK @@ -0,0 +1 @@ +# CONFIG_ACPI_DOCK is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..ee9b2fca882f7e3456b2515ad5250951f9b32b01 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ACPI_EC_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY new file mode 100644 index 0000000000000000000000000000000000000000..5b4c7c472ba03bff707658852b6c656a514b5ab6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY @@ -0,0 +1 @@ +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD new file mode 100644 index 0000000000000000000000000000000000000000..fb9c9ae81ac578db1940988cf1fb5d3e17b80623 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD @@ -0,0 +1 @@ +# CONFIG_ACPI_TAD is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..30ac5b5e32c0333f65eacb52001c4cbbb521a270 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_AHCI_TEGRA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..d8d54113b6ef7a6b5af62bc9b36184e42f905b53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE @@ -0,0 +1 @@ +CONFIG_AHCI_XGENE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU new file mode 100644 index 0000000000000000000000000000000000000000..abe6d16f12a3542e68886b845d0cf5b207605251 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU @@ -0,0 +1 @@ +CONFIG_ALIBABA_UNCORE_DRW_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER new file mode 100644 index 0000000000000000000000000000000000000000..87511982f8e30cc5c48eeaffece04c143a13c7c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER @@ -0,0 +1 @@ +CONFIG_ARCH_FORCE_MAX_ORDER=10 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..3b74d56f4c1173b59679eec863f92dfa1a61869f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC new file mode 100644 index 0000000000000000000000000000000000000000..253aee154ee7f9092655208e8e9a1bbf48d0b182 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA_241_SOC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL new file mode 100644 index 0000000000000000000000000000000000000000..bdb51423da776f6da031e246e7b11d6957900bfd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL @@ -0,0 +1 @@ +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN new file mode 100644 index 0000000000000000000000000000000000000000..e16916ceca225668ac29593f518c54bb86dd6e81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN @@ -0,0 +1 @@ +CONFIG_ARM64_AMU_EXTN=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE new file mode 100644 index 0000000000000000000000000000000000000000..23a09e20f027b625cfe11cac4b1b234bdb88854f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE @@ -0,0 +1 @@ +CONFIG_ARM64_CONTPTE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING new file mode 100644 index 0000000000000000000000000000000000000000..3eb3dbd290a8cd86da0fbfba445469efd71b58cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING @@ -0,0 +1 @@ +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN new file mode 100644 index 0000000000000000000000000000000000000000..625b39cb938103bb3b86309d9b02fb77dd8f9397 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN @@ -0,0 +1 @@ +CONFIG_ARM64_EPAN=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 new file mode 100644 index 0000000000000000000000000000000000000000..1c8f6ffc8379980f80335db28d4bc564087d2968 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1024718=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 new file mode 100644 index 0000000000000000000000000000000000000000..454fb1ecca77db3bfc6b5345713a1ae77b13572b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1165522=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 new file mode 100644 index 0000000000000000000000000000000000000000..e35f8566910eb6573d5ab9575879b630cf8f7c95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1286807=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 new file mode 100644 index 0000000000000000000000000000000000000000..3c76daab59ebc23011590851310533e9ada83da8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1319367=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 new file mode 100644 index 0000000000000000000000000000000000000000..06e9dc0ef82101c30feecd93d5a9991007b3106d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1418040=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 new file mode 100644 index 0000000000000000000000000000000000000000..3e0b01e04788ce20f0a6e840e14da0e921f7d9cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1463225=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 new file mode 100644 index 0000000000000000000000000000000000000000..241640b0fee7104eecc1b9b484d5293f5b706001 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1508412=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 new file mode 100644 index 0000000000000000000000000000000000000000..4cc427c4a91b2ff7140d99fa610324561a5cd7a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1530923=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 new file mode 100644 index 0000000000000000000000000000000000000000..fc1543d3ad9057130682fee43774d3889f7793e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1542419=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 new file mode 100644 index 0000000000000000000000000000000000000000..906c998532df0297e3e779fc0995e4e4aef20cd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1742098=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 new file mode 100644 index 0000000000000000000000000000000000000000..f511d7ca5d3a4b38b149bde37464a6c52c8a9250 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2051678=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 new file mode 100644 index 0000000000000000000000000000000000000000..7c24dc2ce9cce798b7609da708e357dfda868f2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2054223=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 new file mode 100644 index 0000000000000000000000000000000000000000..abae3e10fe1d1d9a2db3b1147feffcf0a2725bcb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2067961=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 new file mode 100644 index 0000000000000000000000000000000000000000..fdbc56319ebe3e434abc85e2f6a01a6f00ab695e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2077057=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 new file mode 100644 index 0000000000000000000000000000000000000000..6525de1c21423026afcf76c3ecc44cea8c09d7f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2441007=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 new file mode 100644 index 0000000000000000000000000000000000000000..206bfce41c51be417bddf41a2a42b923271f74f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2441009=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 new file mode 100644 index 0000000000000000000000000000000000000000..8fa26ac1ec03f83bbf83c3b1c6c41e982e84c644 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2457168=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 new file mode 100644 index 0000000000000000000000000000000000000000..a908ff25e11dc3b68a05a50a800734ec9207a9d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2645198=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 new file mode 100644 index 0000000000000000000000000000000000000000..6caad365482a7bbace84ebacad033a28107ff3ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2658417=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 new file mode 100644 index 0000000000000000000000000000000000000000..830700ac32cb011ebe9037c4475503c1a982ded2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2966298=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 new file mode 100644 index 0000000000000000000000000000000000000000..953b476f9b301442ef5c1dc51b19de95e0dcaea3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_3117295=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 new file mode 100644 index 0000000000000000000000000000000000000000..f6f6f286638d98a5af6ad13933603ae54440f053 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_3194386=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 new file mode 100644 index 0000000000000000000000000000000000000000..e53fbbbe50a4d5b134e59e54b0801bb9dbe6bc33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_819472=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 new file mode 100644 index 0000000000000000000000000000000000000000..2b4897f441f35c6707fbe8f4342138f230b8cde1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_824069=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 new file mode 100644 index 0000000000000000000000000000000000000000..8a3b9aaaf871311add77fb29096ee60bf0bd2c1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_826319=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 new file mode 100644 index 0000000000000000000000000000000000000000..d341b420c3fb6f69d41442ec38350123e7d65a22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_827319=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 new file mode 100644 index 0000000000000000000000000000000000000000..9d1f8fcd7c01aa63adb66ab40c84a42c02976170 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_832075=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 new file mode 100644 index 0000000000000000000000000000000000000000..a4bf47d176aa7a0bb7c26cc19ed914a3f899a065 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_834220=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 new file mode 100644 index 0000000000000000000000000000000000000000..303cae7624da92e9cb596e12ae08e9da1aa25618 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_843419=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 new file mode 100644 index 0000000000000000000000000000000000000000..c223b74d76ac3137b16e40df118854caaf2c8012 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_845719=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..f2e1b130ff17f47981660351556ac459d1c0a830 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM @@ -0,0 +1 @@ +CONFIG_ARM64_PMEM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN new file mode 100644 index 0000000000000000000000000000000000000000..3b878e832c908cc3f254780d2ebf4baa3231f1bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN @@ -0,0 +1 @@ +# CONFIG_ARM64_SW_TTBR0_PAN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI new file mode 100644 index 0000000000000000000000000000000000000000..478c311de7aac565fa05c4c44246125569937956 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI @@ -0,0 +1 @@ +CONFIG_ARM64_TAGGED_ADDR_ABI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED new file mode 100644 index 0000000000000000000000000000000000000000..ed87b7c75bfa22ce5058b973faafe68a12992127 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_ARMV8_DEPRECATED is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU new file mode 100644 index 0000000000000000000000000000000000000000..3d62948536517999f177513cc4a425b2aa535c51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU new file mode 100644 index 0000000000000000000000000000000000000000..a7163f24fe2d9aa9722d2e01d3a19fe09fcbf36f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_DMC620_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU new file mode 100644 index 0000000000000000000000000000000000000000..9c56fdf217b6c6464c2949a4b2f4e1bf8fa7f816 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU @@ -0,0 +1 @@ +CONFIG_ARM_DSU_PMU=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 new file mode 100644 index 0000000000000000000000000000000000000000..3cf36797cdd4d348b44f555a114c1599d0abe233 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 @@ -0,0 +1 @@ +# CONFIG_ARM_MHU_V2 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 new file mode 100644 index 0000000000000000000000000000000000000000..7183292c347edf548d621ab6276a1351bd93a80c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 @@ -0,0 +1 @@ +CONFIG_ARM_PMUV3=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ new file mode 100644 index 0000000000000000000000000000000000000000..f2b7060d62abd0c63d7fee941b15235e33205ccd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN new file mode 100644 index 0000000000000000000000000000000000000000..b1f92c0d44e7ef1aa80bfdc1fea0bb6365f58fcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_POWER_DOMAIN=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL new file mode 100644 index 0000000000000000000000000000000000000000..29ef036c5e67c76933ea8b2ffadb06075fa97c3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_PROTOCOL=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..b33609158a6ab14add5dcb27c7a941a007a8e48e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE @@ -0,0 +1 @@ +CONFIG_ARM_SDE_INTERFACE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..5292578ce994b82860035f45dd426a06b99f84f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_ARM_SMC_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..1f0c3b533ef91d45d08187af1fef43e2cd92b365 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..24d9b92d0c9b336b2bdbabb422ca47112206eb37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ARM_SP805_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ATA_OVER_ETH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ATA_OVER_ETH new file mode 100644 index 0000000000000000000000000000000000000000..4742a627864d02714544445d9772c0086c2f40ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ATA_OVER_ETH @@ -0,0 +1 @@ +# CONFIG_ATA_OVER_ETH is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_BLK_DEV_PCIESSD_MTIP32XX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_BLK_DEV_PCIESSD_MTIP32XX new file mode 100644 index 0000000000000000000000000000000000000000..82024346d771b7fdf3027188febf0e0a4fbcb274 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_BLK_DEV_PCIESSD_MTIP32XX @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 new file mode 100644 index 0000000000000000000000000000000000000000..e173734bd7ff4298d36619c30372f9568b989e8d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_22375=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 new file mode 100644 index 0000000000000000000000000000000000000000..59e70bcee7fbf23ecd484c8f813305d8e2ee8cc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_23144=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 new file mode 100644 index 0000000000000000000000000000000000000000..04080e787ef6ebf380f1bde19c862fc29cdfdd87 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_23154=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 new file mode 100644 index 0000000000000000000000000000000000000000..540c843070abc7ff7888162efd5e00a64aad2114 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_27456=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 new file mode 100644 index 0000000000000000000000000000000000000000..e3f4218af66520c3e8a2e63903c1982c810949df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_30115=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 new file mode 100644 index 0000000000000000000000000000000000000000..b7f72514b8327ff2ad065280ed3538706f7f20eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 @@ -0,0 +1 @@ +CONFIG_CAVIUM_TX2_ERRATUM_219=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES new file mode 100644 index 0000000000000000000000000000000000000000..cc0c4e7eefc572fb21121d465ba5078dda7a207b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_MBYTES=64 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE new file mode 100644 index 0000000000000000000000000000000000000000..f218410dcca613b90f9696572968c92a46990a5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE @@ -0,0 +1 @@ +CONFIG_CMDLINE="console=ttyAMA0" diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..db59f377052d65effaf15a5d7cdf9ca04dae871b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE @@ -0,0 +1 @@ +# CONFIG_CMDLINE_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER new file mode 100644 index 0000000000000000000000000000000000000000..251fe61ac21a9fc64d6fd97082e9cd0d81acc540 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER @@ -0,0 +1 @@ +CONFIG_CMDLINE_FROM_BOOTLOADER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU new file mode 100644 index 0000000000000000000000000000000000000000..160c1a367badf920669144fe2fc059ad8cb17616 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CATU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..05ee4b1530f6150092f16c6dcf93a331abf0c387 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CPU_DEBUG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..9fda80dcc73957cd05f41abfc33ac33911d6333a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI new file mode 100644 index 0000000000000000000000000000000000000000..da3d7a8beae5847c1c6f958783909643c4d8a303 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CTI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS new file mode 100644 index 0000000000000000000000000000000000000000..d65a71a185cef589d5c71045c5445479dec4d147 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS new file mode 100644 index 0000000000000000000000000000000000000000..c1885e26676df0a872602e3b604eaa97ff606a01 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS @@ -0,0 +1 @@ +CONFIG_CORESIGHT_LINKS_AND_SINKS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC new file mode 100644 index 0000000000000000000000000000000000000000..af3cefcef6b2d5800251ea3f6d0b5bf36e3a2aff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC @@ -0,0 +1 @@ +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 new file mode 100644 index 0000000000000000000000000000000000000000..e2179b9b003ba14d245e8463d78a461a6156b686 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SINK_ETBV10=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU new file mode 100644 index 0000000000000000000000000000000000000000..3875bb704ffe3029e0d3fc429eb0ea1f35c3dd3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SINK_TPIU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X new file mode 100644 index 0000000000000000000000000000000000000000..7989081534a0aa808a9b283956588a0841e67274 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SOURCE_ETM4X=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM new file mode 100644 index 0000000000000000000000000000000000000000..742eeaa7521c805b0c57af1cbee2f5a8d7d65254 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM @@ -0,0 +1 @@ +CONFIG_CORESIGHT_STM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK new file mode 100644 index 0000000000000000000000000000000000000000..6e4f61ead8625043db224c5339e73f539f779f12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK @@ -0,0 +1 @@ +# CONFIG_CPUMASK_OFFSTACK is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN new file mode 100644 index 0000000000000000000000000000000000000000..be479cf3825039a2f59d50d4087529a0ed5ffbc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN @@ -0,0 +1 @@ +# CONFIG_CPU_BIG_ENDIAN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE new file mode 100644 index 0000000000000000000000000000000000000000..87da942a3967d40a7aa2dd934c864c12b83ef0c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND new file mode 100644 index 0000000000000000000000000000000000000000..39bec58842ddd3509d80ac8635eaf67f9204f49b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..25d6ba24864b6561e7602abc7345439e317c10c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..fa1c22af3fa8419515b7f460911036a36f9f24a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL @@ -0,0 +1 @@ +CONFIG_CPU_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 new file mode 100644 index 0000000000000000000000000000000000000000..dd0ae2c1dc82824f8f1f8568445bc51d7ee53905 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS new file mode 100644 index 0000000000000000000000000000000000000000..6e04273531a289e9754843cc612dbb825797241c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..720099599c6b191c531cc96b1eb3885be2cf04dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK new file mode 100644 index 0000000000000000000000000000000000000000..b07b273c0819b876daf20758c537008f1c769511 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM new file mode 100644 index 0000000000000000000000000000000000000000..20ef5afff9d85735519174d29787a92446cb5e2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK new file mode 100644 index 0000000000000000000000000000000000000000..2ac813860483e6e166e771eb1b57e2475303cd3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON new file mode 100644 index 0000000000000000000000000000000000000000..c680f1a05b3a59ec833cb80098e3d1e033fcc0d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..c59d53cfb35c35f49c811ec568af1201941084d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 new file mode 100644 index 0000000000000000000000000000000000000000..7ddf015cc516e799a76d643079600c052d8bec15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CURVE25519=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX new file mode 100644 index 0000000000000000000000000000000000000000..4af70dce5453ce751b51a5f14a339de78bc9c90f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF new file mode 100644 index 0000000000000000000000000000000000000000..be5e5622359094d6ea3f41ad13902768d61f537f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X new file mode 100644 index 0000000000000000000000000000000000000000..358ca4222b3c2bc65a68c003f4bc51f861124e26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C62X is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF new file mode 100644 index 0000000000000000000000000000000000000000..6b24e553f52ff98913077a6798d75c452344042a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC new file mode 100644 index 0000000000000000000000000000000000000000..f7cb83cd2f37e4aeae4003a3881c390d17abb3c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF new file mode 100644 index 0000000000000000000000000000000000000000..9e540c16040d7302dbacdcf3e455127b1c2a4468 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_ECDSA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_ECDSA new file mode 100644 index 0000000000000000000000000000000000000000..3fb9c1b9e1f4fbd89a8a1635d22f43468dbf40f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_ECDSA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ECDSA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..f2db2f21a866b17f1c69aa1f941176e84409f781 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON new file mode 100644 index 0000000000000000000000000000000000000000..9dba23aee2d7fc3e63d476f556ddc6340b024abe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..8923fcedfa9859aacd0b9e4d907220745a19b501 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 new file mode 100644 index 0000000000000000000000000000000000000000..ba32f8501a377176a41624d68c490b153ca70910 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256_ARM64=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..7c87d7f4cc4c250397e928ac4069cf8e96e0d4f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA2_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..93a6a00f40e6fddfadfaa5411af45c1ed87f07e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON new file mode 100644 index 0000000000000000000000000000000000000000..d4345460d87f389fb2e0d9b9c1aef9127c1188d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..5aa28cca8cec55a49b410099161a727a2abac922 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK new file mode 100644 index 0000000000000000000000000000000000000000..979299a7a83381c1171e6b69108aa27def61ce48 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM new file mode 100644 index 0000000000000000000000000000000000000000..7ea8b7e6eb6536b48dd3c77bdc553b355de34b5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM new file mode 100644 index 0000000000000000000000000000000000000000..4bde9d25faec23981f4e3703f2a28c5b3ad92093 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK new file mode 100644 index 0000000000000000000000000000000000000000..f1f668a9fb3cd5757b84c4f9a6c9e6a19baf07ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..3aeeb5cd4251b1cc28985885892bb056d7291af5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +CONFIG_DEBUG_PERF_USE_VMALLOC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT new file mode 100644 index 0000000000000000000000000000000000000000..555cf4be4d7745348f948502e8230ef45d86740f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT @@ -0,0 +1 @@ +# CONFIG_DEVPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX new file mode 100644 index 0000000000000000000000000000000000000000..77478a2130758ac76e353bba19af827c7e041fda --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX @@ -0,0 +1 @@ +CONFIG_DEV_DAX=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM new file mode 100644 index 0000000000000000000000000000000000000000..755061fbf2ebb734d75ccbded8fccd9d56c0148b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM @@ -0,0 +1 @@ +# CONFIG_DEV_DAX_KMEM is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..8c7fd6732ccda5a2e068db242d83b851dcb55932 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_PMEM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..6d3eed26fd28e54c78f373c425dc87318ca727e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG @@ -0,0 +1 @@ +# CONFIG_DLM_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS new file mode 100644 index 0000000000000000000000000000000000000000..f4f69ff9b0ca40b646fe8dedfe044827be6037d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS @@ -0,0 +1 @@ +CONFIG_DMABUF_HEAPS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM new file mode 100644 index 0000000000000000000000000000000000000000..f89629641a9c63dce19b4ec171aa056da8de88cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM @@ -0,0 +1 @@ +CONFIG_DMABUF_HEAPS_SYSTEM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK new file mode 100644 index 0000000000000000000000000000000000000000..6f3da0bab76830c0dac53108e57be69c2ab2d8a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU_CIK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR new file mode 100644 index 0000000000000000000000000000000000000000..4134231af34a2b01fcd0f537778710ab473bb292 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP new file mode 100644 index 0000000000000000000000000000000000000000..dfff6b592b1892fa2b2dd081da714bf8133fb71e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP @@ -0,0 +1 @@ +CONFIG_DRM_AMD_ACP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV new file mode 100644 index 0000000000000000000000000000000000000000..d875280a04a19396fdf22e3823ed5f69e3dafbc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV @@ -0,0 +1 @@ +CONFIG_DRM_DP_AUX_CHARDEV=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM new file mode 100644 index 0000000000000000000000000000000000000000..a663e2800867b350cdc161cbc5a72d79bfd8d640 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM @@ -0,0 +1 @@ +CONFIG_DRM_PHYTIUM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX new file mode 100644 index 0000000000000000000000000000000000000000..4e22be4d92e64f005a14e9957498608136b72fd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX @@ -0,0 +1 @@ +# CONFIG_DRM_VMWGFX is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU new file mode 100644 index 0000000000000000000000000000000000000000..041de175cfc074d9dea7dd0dbd7cb93a694d0736 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU @@ -0,0 +1 @@ +CONFIG_DWC_PCIE_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI new file mode 100644 index 0000000000000000000000000000000000000000..c8fcf14854980534f1bfe73a929ebc8229ed6d2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI @@ -0,0 +1 @@ +CONFIG_DW_DMAC_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..dbadc3572072843107096b6930755835b671b2b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE @@ -0,0 +1 @@ +CONFIG_EDAC_XGENE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER new file mode 100644 index 0000000000000000000000000000000000000000..d729d29d3261b5049ab50906a31afaae010153b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER @@ -0,0 +1 @@ +CONFIG_EFI_ARMSTUB_DTB_LOADER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET new file mode 100644 index 0000000000000000000000000000000000000000..58c96a0ad9b737754ba12476f3ea94c17c9849a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET @@ -0,0 +1 @@ +# CONFIG_EFI_COCO_SECRET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE new file mode 100644 index 0000000000000000000000000000000000000000..bcdf7b1f5218fdaa806f98e473b8dfb11132ec60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE @@ -0,0 +1 @@ +CONFIG_ETM4X_IMPDEF_FEATURE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..543d0e0d5880b50e8ad5c1170558ab1b55a7c89a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO @@ -0,0 +1 @@ +CONFIG_EXTCON_GPIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV new file mode 100644 index 0000000000000000000000000000000000000000..5f8bd917319bb5e18c334a78dc0595c3062e5e12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV @@ -0,0 +1 @@ +# CONFIG_FB_HYPERV is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..0eb3d04225f1530afe4d1714297ccd0d3291527d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE @@ -0,0 +1 @@ +CONFIG_FB_SIMPLE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 new file mode 100644 index 0000000000000000000000000000000000000000..efd995bdfe8a304c7ee0cf3fa4b4a61ed030aa99 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 @@ -0,0 +1 @@ +CONFIG_FB_SSD1307=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID new file mode 100644 index 0000000000000000000000000000000000000000..7c3c659ce6bd2fe8c70b0f996ecb765c713c90bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID @@ -0,0 +1 @@ +# CONFIG_FIRMWARE_EDID is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..cc041e55918212ee6c761ed60b330c56b8856fb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION @@ -0,0 +1 @@ +CONFIG_FRAME_POINTER_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 new file mode 100644 index 0000000000000000000000000000000000000000..122d7ac6ee73099066eaf07e5f2dac2397a9f8e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 @@ -0,0 +1 @@ +CONFIG_FUJITSU_ERRATUM_010001=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER new file mode 100644 index 0000000000000000000000000000000000000000..5c1b6a2a73940562590467d609c47cf4a7c0486c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER @@ -0,0 +1 @@ +# CONFIG_FUNCTION_PROFILER is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUSION_CTL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUSION_CTL new file mode 100644 index 0000000000000000000000000000000000000000..f72467bd7804a4a87745a527e90461230828b3f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUSION_CTL @@ -0,0 +1 @@ +# CONFIG_FUSION_CTL is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_COMPRESS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..f95f93620f7096b71e7cf1352a6bf4b49cb1a823 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_COMPRESS @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_COMPRESS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..686f9318790717d9fcaf0f757a9d204c69202268 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_USER_HELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..d4e2f44b0d31b94e26c4a19392160212d0030c1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA @@ -0,0 +1 @@ +CONFIG_GENERIC_ARCH_NUMA=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP new file mode 100644 index 0000000000000000000000000000000000000000..3b77a1ac1f433129e67a2e73b8bc9360472e8368 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_IOREMAP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED new file mode 100644 index 0000000000000000000000000000000000000000..523d35dd4f5efd73b038b3236171226b4390cbb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED @@ -0,0 +1 @@ +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS new file mode 100644 index 0000000000000000000000000000000000000000..5f660149cb44357c4f022fcbab7396c8aa54773b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS @@ -0,0 +1 @@ +# CONFIG_GFS2_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..3ff4618e8f54eb0be18febe2ae98402a9c394a75 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM @@ -0,0 +1 @@ +CONFIG_GPIO_GENERIC_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI new file mode 100644 index 0000000000000000000000000000000000000000..03090da19d95489d23d70a292c266e9689a36435 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI @@ -0,0 +1 @@ +CONFIG_GPIO_HISI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 new file mode 100644 index 0000000000000000000000000000000000000000..55ab719f65ddaf9f6578959b5a32513074ae61ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 @@ -0,0 +1 @@ +CONFIG_GPIO_TEGRA186=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 new file mode 100644 index 0000000000000000000000000000000000000000..86ef85f436087a2419381bb46e5353de8461ea7c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 @@ -0,0 +1 @@ +CONFIG_HISILICON_ERRATUM_161600802=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA new file mode 100644 index 0000000000000000000000000000000000000000..57350600f3339107c73c5e0127459e6bb7e505ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA @@ -0,0 +1 @@ +# CONFIG_HISI_DMA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..fe367d4ff6b59c12e2bbcd9cb60fb7487c764d70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL @@ -0,0 +1 @@ +CONFIG_HISI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 new file mode 100644 index 0000000000000000000000000000000000000000..80d338e39f2d9378f953184b7e986fdcc0dcbbdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 @@ -0,0 +1 @@ +CONFIG_HNS3=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB new file mode 100644 index 0000000000000000000000000000000000000000..fb3b91604256dbe2b326c54116adb5b05b786550 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB @@ -0,0 +1 @@ +CONFIG_HNS3_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET new file mode 100644 index 0000000000000000000000000000000000000000..cf9aa0ff62f1ff64f6feda6bed93d5347e4f931e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET @@ -0,0 +1 @@ +CONFIG_HNS3_ENET=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE new file mode 100644 index 0000000000000000000000000000000000000000..dbbea3f5bc2dd72b0292303c78c5937d3979bed7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE @@ -0,0 +1 @@ +CONFIG_HNS3_HCLGE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF new file mode 100644 index 0000000000000000000000000000000000000000..57ab09961c49c3300ee441db20f4213c78db0bd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF @@ -0,0 +1 @@ +CONFIG_HNS3_HCLGEVF=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF new file mode 100644 index 0000000000000000000000000000000000000000..754ba14348bd57f167cd58429a4113c22a472951 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF @@ -0,0 +1 @@ +CONFIG_HNS_DSAF=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET new file mode 100644 index 0000000000000000000000000000000000000000..304f8cab460377b477647aa3ac804bbba82cc380 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET @@ -0,0 +1 @@ +CONFIG_HNS_ENET=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO new file mode 100644 index 0000000000000000000000000000000000000000..e34372a357bb60dca614e45eb930363c9db723e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO @@ -0,0 +1 @@ +CONFIG_HNS_MDIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD new file mode 100644 index 0000000000000000000000000000000000000000..7a5701ba9ca4e4a3d2ebdf856907d14aab07615e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD @@ -0,0 +1 @@ +CONFIG_HSA_AMD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..c2575260ff601ff4d10eebdda393a06b201c8600 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON @@ -0,0 +1 @@ +# CONFIG_HYPERV_BALLOON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS new file mode 100644 index 0000000000000000000000000000000000000000..fc3232dbe55610d303e1ab4b53d51b58d023be2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS @@ -0,0 +1 @@ +# CONFIG_HYPERV_UTILS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..ad138e65d22629840a49b26d60a308dee7d6b611 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS @@ -0,0 +1 @@ +# CONFIG_HYPERV_VSOCKETS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE new file mode 100644 index 0000000000000000000000000000000000000000..e17d76517bd149e7baf85663a37401a3a25da489 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE @@ -0,0 +1 @@ +CONFIG_I2C_SLAVE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM new file mode 100644 index 0000000000000000000000000000000000000000..682e5476b4f4ebe6818bfdc885972e8af7dd4df2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM @@ -0,0 +1 @@ +CONFIG_I2C_SLAVE_EEPROM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB new file mode 100644 index 0000000000000000000000000000000000000000..b65fe813f69ad29c3b71e18b8358267878a513ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB @@ -0,0 +1 @@ +# CONFIG_I40E_DCB is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO new file mode 100644 index 0000000000000000000000000000000000000000..56e0675cefa549c5c7243311ea27de7d8fe61621 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO @@ -0,0 +1 @@ +CONFIG_INDIRECT_PIO=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INFINIBAND_MTHCA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INFINIBAND_MTHCA new file mode 100644 index 0000000000000000000000000000000000000000..a134e36a320a2a259079555d655dee43a0012e92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INFINIBAND_MTHCA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_MTHCA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 new file mode 100644 index 0000000000000000000000000000000000000000..52e4d562a66c577114cc9472344d1a97298eef65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 @@ -0,0 +1 @@ +# CONFIG_INTEL_IDMA64 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH new file mode 100644 index 0000000000000000000000000000000000000000..4b2285f565b08a3c1ea6a01d64d20de89ccf080d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH @@ -0,0 +1 @@ +# CONFIG_INTEL_TH is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT new file mode 100644 index 0000000000000000000000000000000000000000..173fa6d4dfa60fc396c48d517a57ac83a9ac01ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT @@ -0,0 +1 @@ +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH new file mode 100644 index 0000000000000000000000000000000000000000..b71df81edd20449d674033134db1116d54da13a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE new file mode 100644 index 0000000000000000000000000000000000000000..73494e9d2af01a36a41c78278ce3aff1744e2a05 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE @@ -0,0 +1 @@ +CONFIG_IOMMU_IO_PGTABLE_LPAE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP6_NF_TARGET_HL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP6_NF_TARGET_HL new file mode 100644 index 0000000000000000000000000000000000000000..d094bc611c561027407cbca09fea1b8b8e64f291 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP6_NF_TARGET_HL @@ -0,0 +1 @@ +# CONFIG_IP6_NF_TARGET_HL is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP_DCCP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP_DCCP new file mode 100644 index 0000000000000000000000000000000000000000..6ecb43a3e349240ef01585717b0ee1ec8f853077 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP_DCCP @@ -0,0 +1 @@ +# CONFIG_IP_DCCP is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT new file mode 100644 index 0000000000000000000000000000000000000000..0ca6169eeea2987a94b38ecf23e1281beb24a270 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT @@ -0,0 +1 @@ +# CONFIG_ISCSI_IBFT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 0000000000000000000000000000000000000000..1eeee81d686d58ffb64000d03686f752386fd061 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS new file mode 100644 index 0000000000000000000000000000000000000000..c8644d0f3c1bdae5a76d941f0bf39b3504e5d030 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS @@ -0,0 +1 @@ +CONFIG_KUNPENG_HCCS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..b61b9dd53a35284982cc723c5a407ec208be6957 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS @@ -0,0 +1 @@ +CONFIG_KUSER_HELPERS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..7ed0134a53fef46dceffaa0012bc10badf6e957f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_BUF_SHIFT=20 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..900d0304defd0cced9b98232eb18d66754344f5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM @@ -0,0 +1 @@ +# CONFIG_MELLANOX_PLATFORM is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB new file mode 100644 index 0000000000000000000000000000000000000000..d78d82d65ae1db0987a9be48538b1f60a278e12d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_IPOIB=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD new file mode 100644 index 0000000000000000000000000000000000000000..eab98dd5c438cbb69d42e599b7d32e8f2a98449d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD @@ -0,0 +1 @@ +CONFIG_MTD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR new file mode 100644 index 0000000000000000000000000000000000000000..5c6d557f10798e10346ddb2174d4401a92d9f89a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON new file mode 100644 index 0000000000000000000000000000000000000000..0a881cc22b52d172149097293653996b60521ccc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_HISILICON=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NFS_V2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NFS_V2 new file mode 100644 index 0000000000000000000000000000000000000000..f0f45e1802b6e67b0cf9ed9d38eee0743a0d8cd9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NFS_V2 @@ -0,0 +1 @@ +# CONFIG_NFS_V2 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..06d7e70c3aabfd877e66e483e3b435480aff6735 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_NVGRACE_GPU_VFIO_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR new file mode 100644 index 0000000000000000000000000000000000000000..29cefd2bda14cd454b4b9168a049e5d6d9ef74a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR @@ -0,0 +1 @@ +CONFIG_PID_IN_CONTEXTIDR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP new file mode 100644 index 0000000000000000000000000000000000000000..24baf5a1b45fb11a2cbfcd16a9cde2cb85a08b9b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP @@ -0,0 +1 @@ +# CONFIG_POWERCAP is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 new file mode 100644 index 0000000000000000000000000000000000000000..41768200c145547986e6d09a3f690f1e43a17b07 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_1003=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 new file mode 100644 index 0000000000000000000000000000000000000000..1449efafbbd3c56234662e85876c85054a643797 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_1009=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 new file mode 100644 index 0000000000000000000000000000000000000000..f0d31a87b34ddb42e3f06fb3bebabded5cd49ffe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 new file mode 100644 index 0000000000000000000000000000000000000000..dec9be97084bd9f6b6a2a0bed887a74ac5c36207 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 @@ -0,0 +1 @@ +CONFIG_QCOM_QDF2400_ERRATUM_0065=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL new file mode 100644 index 0000000000000000000000000000000000000000..7645a371e7ef504dec714de319af4aa085565197 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR new file mode 100644 index 0000000000000000000000000000000000000000..a30c007c1182bdd725bb6ae427774a912ca0112d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR @@ -0,0 +1 @@ +CONFIG_RELR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED new file mode 100644 index 0000000000000000000000000000000000000000..4615f4e257b67761ee822ac927f94d404b80ffe1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED @@ -0,0 +1 @@ +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI new file mode 100644 index 0000000000000000000000000000000000000000..e2a2dcd64d43bcbe51b78afb1b0822967537ec8a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI @@ -0,0 +1 @@ +CONFIG_RTC_DRV_EFI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE new file mode 100644 index 0000000000000000000000000000000000000000..dfcf03f079e0374fec27244f37069caf22de0bdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE @@ -0,0 +1 @@ +CONFIG_SATA_AHCI_SEATTLE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SCSI_UFSHCD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SCSI_UFSHCD new file mode 100644 index 0000000000000000000000000000000000000000..542c89fc7a886d124f50bbd5022d492b8529ebab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SCSI_UFSHCD @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..7c8fc508c4093398c617f3f115ee683dfc688362 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SDEI_WATCHDOG=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI new file mode 100644 index 0000000000000000000000000000000000000000..ff9bebcb321ac58ddafed6422793df83d88c5e91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI @@ -0,0 +1 @@ +CONFIG_SERIO_AMBAKMI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SND b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SND new file mode 100644 index 0000000000000000000000000000000000000000..5dfa0106a95205f99cda1057a6f0828494e3a1de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SND @@ -0,0 +1 @@ +# CONFIG_SND is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS new file mode 100644 index 0000000000000000000000000000000000000000..ded5c358ea6105d3fd176f4030be4670d96b8a76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS @@ -0,0 +1 @@ +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..3a8bb168c76ce61a89676d93f9b543fb9cc125d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE @@ -0,0 +1 @@ +CONFIG_SPI_CADENCE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE new file mode 100644 index 0000000000000000000000000000000000000000..990578fbfa5d4873d01490edcb72c9883c721d1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE @@ -0,0 +1 @@ +CONFIG_SPI_DESIGNWARE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..12b55415569766dcceaa73a7898a88d58776b10e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO @@ -0,0 +1 @@ +CONFIG_SPI_DW_MMIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG new file mode 100644 index 0000000000000000000000000000000000000000..6ad4401a4accd73fa75e0ce9a03ff292c8eeed03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG @@ -0,0 +1 @@ +CONFIG_SPI_HISI_KUNPENG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 new file mode 100644 index 0000000000000000000000000000000000000000..67bd507db5562e67781bcd364f8fd8ba8ec18887 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 @@ -0,0 +1 @@ +CONFIG_SPI_PL022=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP new file mode 100644 index 0000000000000000000000000000000000000000..55cd5d2b86a229ed10fb3cefd2006855b6757f65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP @@ -0,0 +1 @@ +CONFIG_SPI_QUP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD new file mode 100644 index 0000000000000000000000000000000000000000..7e9fa5a882f30d38045bc30bd41d9ebb99e008ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD @@ -0,0 +1 @@ +CONFIG_SPI_TEGRA210_QUAD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP new file mode 100644 index 0000000000000000000000000000000000000000..6026d5f511d8f46240beea1aa06b8c6644dcd45c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP @@ -0,0 +1 @@ +CONFIG_SPI_XLP=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..f45773f0c015ec955d4c4688c43e4524656627b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 @@ -0,0 +1 @@ +CONFIG_SQUASHFS_LZ4=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..e335fefdd9bea889bf914a02f356084d8eaf7773 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING new file mode 100644 index 0000000000000000000000000000000000000000..c53ae30fa9b48f57e37646c5ba93ee80f7435311 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING @@ -0,0 +1 @@ +CONFIG_STAGING=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY new file mode 100644 index 0000000000000000000000000000000000000000..704a19ecec34e6d122f00b229ce927fedb34e854 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY @@ -0,0 +1 @@ +# CONFIG_STM_DUMMY is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC new file mode 100644 index 0000000000000000000000000000000000000000..7aed3f091d141640cd656fe4c877b65cadec97fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC @@ -0,0 +1 @@ +# CONFIG_STM_PROTO_BASIC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T new file mode 100644 index 0000000000000000000000000000000000000000..2dfea9b0aebfca8a2329552c48accf8958eeae54 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T @@ -0,0 +1 @@ +# CONFIG_STM_PROTO_SYS_T is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..aa10fd4769d8d601381714a6d9757eff09400861 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_CONSOLE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..db5d7c1dcb8157c2b05e8584ad30079e753b0d2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_FTRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT new file mode 100644 index 0000000000000000000000000000000000000000..70814f3b7bce82e9e4c37e12982c308db63573f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_HEARTBEAT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON new file mode 100644 index 0000000000000000000000000000000000000000..e2e45880fd094cceda8812614ad9f9c093e9377d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON @@ -0,0 +1 @@ +# CONFIG_TCG_INFINEON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI new file mode 100644 index 0000000000000000000000000000000000000000..79fbc0cc6d946dc0fce3fc896877a294b9502b45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI @@ -0,0 +1 @@ +CONFIG_TCG_TIS_SPI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG new file mode 100644 index 0000000000000000000000000000000000000000..30b467e167031cba43afa092871cbf88cfb2f447 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG @@ -0,0 +1 @@ +# CONFIG_TCP_CONG_CDG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG new file mode 100644 index 0000000000000000000000000000000000000000..63f90615aed62f6f9b24ee13e40210c37efffe29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG @@ -0,0 +1 @@ +# CONFIG_THERMAL_GOV_BANG_BANG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..004769c5f15465866e7d498cc4934bce9885e8e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO @@ -0,0 +1 @@ +# CONFIG_THERMAL_MMIO is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_NETLINK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..8abdc692a524f213cdee28672b0686ab12cd8a6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_NETLINK @@ -0,0 +1 @@ +# CONFIG_THERMAL_NETLINK is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF new file mode 100644 index 0000000000000000000000000000000000000000..e8ba034f7ce8e4929cf9b548d61e4d8d7ae5dba1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF @@ -0,0 +1 @@ +CONFIG_THERMAL_OF=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS new file mode 100644 index 0000000000000000000000000000000000000000..abc3c076e1d62e5dbd0a286034a4bd84580ee0a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS @@ -0,0 +1 @@ +# CONFIG_THERMAL_WRITABLE_TRIPS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UEVENT_HELPER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UEVENT_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..262a0f05487295c75c10ceee758715e51bf59185 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UEVENT_HELPER @@ -0,0 +1 @@ +# CONFIG_UEVENT_HELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..ba62e18258ee382832cdec174b737d2f4bfe2e79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC @@ -0,0 +1 @@ +# CONFIG_UIO_HV_GENERIC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..c05b69e841a60febd2437ebfab380865ef310e75 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM @@ -0,0 +1 @@ +CONFIG_VFIO_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE new file mode 100644 index 0000000000000000000000000000000000000000..993fade9832faf510c70fa2b186899fb255815d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE @@ -0,0 +1 @@ +CONFIG_VFIO_PLATFORM_BASE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB new file mode 100644 index 0000000000000000000000000000000000000000..fc5d87b7666e5a2ec2a1145ea61efdb484b61421 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..fce6f51c8b190736dd3eb527158fa942f203704e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB_LEGACY=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..8cc1125d46ee8b614a3dc818953b089d093d76ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS @@ -0,0 +1 @@ +# CONFIG_VIRT_DRIVERS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI new file mode 100644 index 0000000000000000000000000000000000000000..db263c420e93b6c4477d5a57862c7bd01cecd17c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI @@ -0,0 +1 @@ +# CONFIG_VMWARE_VMCI is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 new file mode 100644 index 0000000000000000000000000000000000000000..5055f163e3932bf7ae81fb31b7babefccb24a4fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 @@ -0,0 +1 @@ +# CONFIG_VMXNET3 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT new file mode 100644 index 0000000000000000000000000000000000000000..cf7b00dd987885014ddd97b0300fe165e1756d50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT @@ -0,0 +1 @@ +# CONFIG_WDAT_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN new file mode 100644 index 0000000000000000000000000000000000000000..f154fee42a657df768c05bd12cf993994a820feb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN @@ -0,0 +1 @@ +# CONFIG_XEN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA new file mode 100644 index 0000000000000000000000000000000000000000..09fe8d346af39b01d63d5eda3a08bf94377f3820 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA @@ -0,0 +1 @@ +CONFIG_YITIAN_CPER_RAWDATA=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ZRAM_MEMORY_TRACKING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ZRAM_MEMORY_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..a45c396a428c341a5d70da42dd874925c0185c2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ZRAM_MEMORY_TRACKING @@ -0,0 +1 @@ +# CONFIG_ZRAM_MEMORY_TRACKING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC new file mode 100644 index 0000000000000000000000000000000000000000..5dbaee1a85df3b43c9d4e85c7d0072a6cb6bde4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC @@ -0,0 +1 @@ +CONFIG_ACPI_AC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ new file mode 100644 index 0000000000000000000000000000000000000000..66e425d5b77019ea5d8f9746fcb7728ed7872362 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_EINJ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..235760d408f4796425431f96435a17da5a3c6435 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG @@ -0,0 +1 @@ +# CONFIG_ACPI_APEI_ERST_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES new file mode 100644 index 0000000000000000000000000000000000000000..8fd037d8f12b505e46be3893f076578b7821adca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_GHES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE new file mode 100644 index 0000000000000000000000000000000000000000..46aa1579fd2a6e704a46e50389cb6c540618a1b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_MEMORY_FAILURE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER new file mode 100644 index 0000000000000000000000000000000000000000..26f3e912912fd863e7adf870f075957d856274ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_PCIEAER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY new file mode 100644 index 0000000000000000000000000000000000000000..eb3286698d343ca57f1b4bd4df832d7229f05d71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY @@ -0,0 +1 @@ +CONFIG_ACPI_BATTERY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON new file mode 100644 index 0000000000000000000000000000000000000000..1f552016b1da4f41e3e22008f64f8ee3f5bf9fcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON @@ -0,0 +1 @@ +CONFIG_ACPI_BUTTON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS new file mode 100644 index 0000000000000000000000000000000000000000..4214adc8cf3f8774470f413b16973581d3fe42ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS @@ -0,0 +1 @@ +# CONFIG_ACPI_CONFIGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER new file mode 100644 index 0000000000000000000000000000000000000000..24287daf8421dafbc75a6cce11d0b659553537fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER @@ -0,0 +1 @@ +CONFIG_ACPI_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD new file mode 100644 index 0000000000000000000000000000000000000000..76853b6d822410dd6480cbd61f6f731f84fab62e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD @@ -0,0 +1 @@ +CONFIG_ACPI_CUSTOM_METHOD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..aee5969aa48dc0c93e01194c985e01bc33d863f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_ACPI_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER new file mode 100644 index 0000000000000000000000000000000000000000..3f59992004061e6a2dbefef456efe4a682fa41e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER @@ -0,0 +1 @@ +# CONFIG_ACPI_DEBUGGER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN new file mode 100644 index 0000000000000000000000000000000000000000..0feefba7b3f0c863605e4e31959b1de4caaa8b77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN @@ -0,0 +1 @@ +CONFIG_ACPI_FAN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED new file mode 100644 index 0000000000000000000000000000000000000000..06042df5ab2e958c5a626411dc1918ba9136ee79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED @@ -0,0 +1 @@ +CONFIG_ACPI_HED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT new file mode 100644 index 0000000000000000000000000000000000000000..34e2b9331ffea6ce32fa1c984972d17d10fc5d96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT @@ -0,0 +1 @@ +CONFIG_ACPI_HMAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..d2933e65ff918bfda0e4f20748578b6fbd446e4d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_MEMORY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT new file mode 100644 index 0000000000000000000000000000000000000000..922b719bf78b8eec187c7b4a585cd917f09b0301 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT @@ -0,0 +1 @@ +CONFIG_ACPI_NFIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC new file mode 100644 index 0000000000000000000000000000000000000000..741c7152a4c609f8e5cae5f6bc8363a0caa106b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC @@ -0,0 +1 @@ +CONFIG_ACPI_PCC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT new file mode 100644 index 0000000000000000000000000000000000000000..6ef14483b6d975090f0a2ca1a58b2796265facb1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT @@ -0,0 +1 @@ +CONFIG_ACPI_PCI_SLOT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT new file mode 100644 index 0000000000000000000000000000000000000000..416b5bab52269604c8725eec39e82765a0f4b61f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT @@ -0,0 +1 @@ +CONFIG_ACPI_PRMT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE new file mode 100644 index 0000000000000000000000000000000000000000..f8a19253c3b09b3687f4153a5b5b83c1788edf6d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE @@ -0,0 +1 @@ +CONFIG_ACPI_SPCR_TABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE new file mode 100644 index 0000000000000000000000000000000000000000..276233fba1dd147755d00dcc01a65a2628afbdc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE @@ -0,0 +1 @@ +CONFIG_ACPI_TABLE_UPGRADE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..1bd7964b474e74ca60f6782c217ff3d72e22dcce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL @@ -0,0 +1 @@ +CONFIG_ACPI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO new file mode 100644 index 0000000000000000000000000000000000000000..b642d06e3ad43647acf1353c924e5485ec36b91c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO @@ -0,0 +1 @@ +CONFIG_ACPI_VIDEO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO new file mode 100644 index 0000000000000000000000000000000000000000..6a7ffe559b94cdfe188641c29026476a83b4b3e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO @@ -0,0 +1 @@ +CONFIG_AHCI_ZHAOXIN_SGPIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..0d809fccc867d7e835a334e0c9fa893af7a02fab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE @@ -0,0 +1 @@ +CONFIG_ASYNC_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK new file mode 100644 index 0000000000000000000000000000000000000000..16d0f9953bbfb323e1b42213c3308851d06d93f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK @@ -0,0 +1 @@ +CONFIG_ASYNC_FORK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY new file mode 100644 index 0000000000000000000000000000000000000000..80c35b93679ef58fdb9e0505d6d3b08f17c0ed35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY @@ -0,0 +1 @@ +CONFIG_ASYNC_MEMCPY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ new file mode 100644 index 0000000000000000000000000000000000000000..645846af2bc58f23e006faf3c1cf0247f4899259 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ @@ -0,0 +1 @@ +CONFIG_ASYNC_PQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV new file mode 100644 index 0000000000000000000000000000000000000000..72ab56e75f3fea8f0d8f24edc5418e861f865896 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV @@ -0,0 +1 @@ +CONFIG_ASYNC_RAID6_RECOV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST new file mode 100644 index 0000000000000000000000000000000000000000..8fb25dd27814b76e5b22db39b9524b46b7b8cd32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST @@ -0,0 +1 @@ +CONFIG_ASYNC_RAID6_TEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA new file mode 100644 index 0000000000000000000000000000000000000000..c62f8624b5d6f3ed00c168a794f8f78bd0f4b76a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA @@ -0,0 +1 @@ +CONFIG_ASYNC_TX_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR new file mode 100644 index 0000000000000000000000000000000000000000..7c2396c384938addb269f2c0de8548c92922885a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR @@ -0,0 +1 @@ +CONFIG_ASYNC_XOR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA new file mode 100644 index 0000000000000000000000000000000000000000..14102c1ab11b3d31add6ab947cf0b7096410b3e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA @@ -0,0 +1 @@ +CONFIG_ATA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..a0be76e566680ed29b6056032bc64f9f74b5e9d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI @@ -0,0 +1 @@ +CONFIG_ATA_ACPI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA new file mode 100644 index 0000000000000000000000000000000000000000..06163193f34955a45c60916bd6e03253b98189d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA @@ -0,0 +1 @@ +CONFIG_ATA_BMDMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..aacf2b3c7eab46f1934dbe5dede5959f55441731 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE @@ -0,0 +1 @@ +CONFIG_ATA_FORCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..eb3093a119397816ece441c6e250a95b5cebfdbd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC @@ -0,0 +1 @@ +CONFIG_ATA_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX new file mode 100644 index 0000000000000000000000000000000000000000..45bfefead8a50cedba29971bd17bf62b3c7edd4b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX @@ -0,0 +1 @@ +CONFIG_ATA_PIIX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF new file mode 100644 index 0000000000000000000000000000000000000000..8631ad6d7d6a243169323c7bf462c38b7b037049 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF @@ -0,0 +1 @@ +CONFIG_ATA_SFF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR new file mode 100644 index 0000000000000000000000000000000000000000..cc5b03f8d342a053d28212796e1c7313943bcb07 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR @@ -0,0 +1 @@ +CONFIG_ATA_VERBOSE_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..9ef2a621becb8eeb912a4e384f0fe1118e3f9617 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS @@ -0,0 +1 @@ +# CONFIG_ATM_DRIVERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..26a054e271604f312fb4c2222cd3ba61f665bfba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST @@ -0,0 +1 @@ +CONFIG_ATOMIC64_SELFTEST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION new file mode 100644 index 0000000000000000000000000000000000000000..930afec9df3e0a95f3734b7a9d8feeec13266e69 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION @@ -0,0 +1 @@ +CONFIG_BALLOON_COMPACTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE new file mode 100644 index 0000000000000000000000000000000000000000..c7813b08191a498cad8f84483af8fb0310aa4f56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE @@ -0,0 +1 @@ +CONFIG_BCACHE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET new file mode 100644 index 0000000000000000000000000000000000000000..674bcb0ddd0c3d2d91893b02b5a40ffe7d0edc26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET @@ -0,0 +1 @@ +# CONFIG_BCMGENET is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..90c958fa573aa9c72fb1be5893502759c8a10ee8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG @@ -0,0 +1 @@ +# CONFIG_BFQ_CGROUP_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC new file mode 100644 index 0000000000000000000000000000000000000000..20754804bf5055194b0c26563c0eda56e8a03c91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC @@ -0,0 +1 @@ +CONFIG_BINFMT_MISC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES new file mode 100644 index 0000000000000000000000000000000000000000..5abaa4d2046e44ef0958b586e3dca7739561167c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES @@ -0,0 +1 @@ +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED new file mode 100644 index 0000000000000000000000000000000000000000..15ef78eaa592c0d524b8a6c28256e50b040c77f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED @@ -0,0 +1 @@ +CONFIG_BLK_DEBUG_FS_ZONED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG new file mode 100644 index 0000000000000000000000000000000000000000..8acabf1ac6973856e32c97a0fdd9e6ddc3448611 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB new file mode 100644 index 0000000000000000000000000000000000000000..5e1a78b5e007c5e5eb2d9824f200e061341816a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSGLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM new file mode 100644 index 0000000000000000000000000000000000000000..e6cd5d106e179ac35e215379faa73fcca720164a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_DM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD new file mode 100644 index 0000000000000000000000000000000000000000..5ce59aee1b7ec8cac843a2358b03dc7b11f30675 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_DRBD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY new file mode 100644 index 0000000000000000000000000000000000000000..1ef600f47b4259d55adf89e7123347ca957e0303 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INTEGRITY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 new file mode 100644 index 0000000000000000000000000000000000000000..e1ad0a7a8a16bb8df88bac3051fafeeb8e86d011 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INTEGRITY_T10=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP new file mode 100644 index 0000000000000000000000000000000000000000..72437e0c0fc1cb91f77b60f7138046b83ab23c2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP @@ -0,0 +1 @@ +CONFIG_BLK_DEV_LOOP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT new file mode 100644 index 0000000000000000000000000000000000000000..e816906491069659bd1f6d6d8a7c9303bb7f696e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT @@ -0,0 +1 @@ +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD new file mode 100644 index 0000000000000000000000000000000000000000..791d32f4588c127dd1f9c1d8e07dddd968b96d77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_MD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD new file mode 100644 index 0000000000000000000000000000000000000000..be2735d4538c0e6dc944a8b0472fe785af36d877 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NBD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK new file mode 100644 index 0000000000000000000000000000000000000000..09340ef3dde8c83491f9adbf7e7feb6308d73d52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NULL_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM new file mode 100644 index 0000000000000000000000000000000000000000..834088f95a0ed22ee7fc52444a78fe393d64a8a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT new file mode 100644 index 0000000000000000000000000000000000000000..cefe13e1483d0441b5dea76124db023f34f61039 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM_COUNT=16 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..ac4916f29ab4fdf5d3d1e1e8d2c4c47bba05e505 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM_SIZE=16384 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD new file mode 100644 index 0000000000000000000000000000000000000000..156a099704bc94e3dcd76e5a817c1b05180cae03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RBD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD new file mode 100644 index 0000000000000000000000000000000000000000..f283d1a826dc5c466f4480f1156bab3abf04c70f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_SD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR new file mode 100644 index 0000000000000000000000000000000000000000..fff43e8802d5fe757c03499e5737d09f99475aaa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR @@ -0,0 +1 @@ +CONFIG_BLK_DEV_SR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW new file mode 100644 index 0000000000000000000000000000000000000000..802bc55b4c5ef9477f096c7e2a28b748e143f770 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_THROTTLING_LOW is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK new file mode 100644 index 0000000000000000000000000000000000000000..592b0ba4d66180bf338801df45a3184cf17a9d77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK @@ -0,0 +1 @@ +CONFIG_BLK_DEV_UBLK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED new file mode 100644 index 0000000000000000000000000000000000000000..529b0b105c2ab6426dbd72ae915d0e082be2f4af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED @@ -0,0 +1 @@ +CONFIG_BLK_DEV_ZONED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT new file mode 100644 index 0000000000000000000000000000000000000000..1e5381167dad20960fe483274b48ba70ab113e1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT @@ -0,0 +1 @@ +# CONFIG_BLK_WBT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 new file mode 100644 index 0000000000000000000000000000000000000000..ff541ef7fb47dd44b74888557f2a530229de9152 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 @@ -0,0 +1 @@ +CONFIG_BNX2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X new file mode 100644 index 0000000000000000000000000000000000000000..d32b37750388526f595243d76e2a8c64f10d71ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X @@ -0,0 +1 @@ +CONFIG_BNX2X=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV new file mode 100644 index 0000000000000000000000000000000000000000..73521e4aad84d40e86761335020c049b91634e62 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV @@ -0,0 +1 @@ +CONFIG_BNX2X_SRIOV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT new file mode 100644 index 0000000000000000000000000000000000000000..3305b042d1777143c39194167e9a3dd11bf04e77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT @@ -0,0 +1 @@ +CONFIG_BNXT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB new file mode 100644 index 0000000000000000000000000000000000000000..fd4061d58f4179581bfd6a1bbe26dd5c54035ae4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB @@ -0,0 +1 @@ +CONFIG_BNXT_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD new file mode 100644 index 0000000000000000000000000000000000000000..170bbf312ca2c3e22ce2d3452cb6cd70e07867f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD @@ -0,0 +1 @@ +CONFIG_BNXT_FLOWER_OFFLOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..4de524b9613dd5cb2c17749280774a99a3510420 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON @@ -0,0 +1 @@ +CONFIG_BNXT_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV new file mode 100644 index 0000000000000000000000000000000000000000..92285090d0982ccac6b6661351c61072e1150294 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV @@ -0,0 +1 @@ +CONFIG_BNXT_SRIOV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC new file mode 100644 index 0000000000000000000000000000000000000000..ea640e553409cd1a3ec74eea666f34a188d22b1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC @@ -0,0 +1 @@ +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC new file mode 100644 index 0000000000000000000000000000000000000000..20270b1c6ee0b2d1370c9258569b4e8be286592f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC @@ -0,0 +1 @@ +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..d2bc2c14e5739a872e913b3a7ee6a34784385977 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING @@ -0,0 +1 @@ +# CONFIG_BOOTTIME_TRACING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG new file mode 100644 index 0000000000000000000000000000000000000000..947b159c3805216260732cb896963f9cf4de855a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG @@ -0,0 +1 @@ +# CONFIG_BOOT_CONFIG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY new file mode 100644 index 0000000000000000000000000000000000000000..081352b0ce8113492a3e0ed683c1ed2ebb8f26be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY @@ -0,0 +1 @@ +CONFIG_BOOT_PRINTK_DELAY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER new file mode 100644 index 0000000000000000000000000000000000000000..5da614fb247f70b0ba01fc8fddd8f763ef1576c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER @@ -0,0 +1 @@ +# CONFIG_BPFILTER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON new file mode 100644 index 0000000000000000000000000000000000000000..45aacb43476b5763dc12ea347e86e419a87d2a03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON @@ -0,0 +1 @@ +CONFIG_BPF_JIT_ALWAYS_ON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE new file mode 100644 index 0000000000000000000000000000000000000000..573604162bfb02e092cb39b9e86c4ad2dc7e4d04 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE @@ -0,0 +1 @@ +# CONFIG_BPF_KPROBE_OVERRIDE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..7cf78350603306499a1b7111250137d160118678 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER @@ -0,0 +1 @@ +CONFIG_BPF_STREAM_PARSER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE new file mode 100644 index 0000000000000000000000000000000000000000..a741dd6151f3bab75182321d070b8316670d93b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE @@ -0,0 +1 @@ +CONFIG_BRANCH_PROFILE_NONE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 new file mode 100644 index 0000000000000000000000000000000000000000..a7a385edfa31ef92d5d580635e642659f46d000a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_802_3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG new file mode 100644 index 0000000000000000000000000000000000000000..5b854f904018ba8a5cde3f76cae4cdcafe278291 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_AMONG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP new file mode 100644 index 0000000000000000000000000000000000000000..a8781c379b0fa1ab29653f03be4b475622fe17bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_ARP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY new file mode 100644 index 0000000000000000000000000000000000000000..f1bd17c9d1245a5a31f8d3b6fa8a0a5ebbd9cdf6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_ARPREPLY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE new file mode 100644 index 0000000000000000000000000000000000000000..d8c6c59adb86640da143a3a8568775875ad66cd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_BROUTE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT new file mode 100644 index 0000000000000000000000000000000000000000..309f316d12568a68c09544553c034f0b403059b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_DNAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP new file mode 100644 index 0000000000000000000000000000000000000000..5a60e537e6d1ea395e84fb50bd05ccaa311f5a68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 new file mode 100644 index 0000000000000000000000000000000000000000..df1578dc274bcc05aaafd84a9639cec6229c8c75 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_IP6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..5c74954a0399b9dcfed70545c578c4bf3a716cc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG new file mode 100644 index 0000000000000000000000000000000000000000..33821290e398aebbcaafac4639b75e4766d037fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK new file mode 100644 index 0000000000000000000000000000000000000000..63b5e4b0abf59d5f7621e2fbf559dee62d3ecafd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T new file mode 100644 index 0000000000000000000000000000000000000000..b89d06ee31cc689ecb52fc7b9e2b7b4bdbb1c65b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_MARK_T=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG new file mode 100644 index 0000000000000000000000000000000000000000..0419263a0cd8f86fe072372e0dedb42daa80fa76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_NFLOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE new file mode 100644 index 0000000000000000000000000000000000000000..c29e3e03036d28fb564b77cac8f08095e95cba45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_PKTTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT new file mode 100644 index 0000000000000000000000000000000000000000..c40b497d184ad8e1cada45c71e0ee17ed1ba8f61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT new file mode 100644 index 0000000000000000000000000000000000000000..1bd963216eecd036b882823e5e463f2e45f345f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_SNAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP new file mode 100644 index 0000000000000000000000000000000000000000..f1c9e454d219dd7f210b000e157d872e520e8e3e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_STP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..e2c7f477221daf6907a522c694bab001e4b0a38d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_T_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT new file mode 100644 index 0000000000000000000000000000000000000000..a64d08d8d8f36e8112352a4467306282879577d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_T_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN new file mode 100644 index 0000000000000000000000000000000000000000..bde07d6ecf29a5ad6e13ae34a99617d6f7ddc854 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_VLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING new file mode 100644 index 0000000000000000000000000000000000000000..a7abc6bb4ed7194a1de227d24f0daca6122fd8e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING @@ -0,0 +1 @@ +CONFIG_BRIDGE_IGMP_SNOOPING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP new file mode 100644 index 0000000000000000000000000000000000000000..38f0a923d22d16ac66e161a16c0befae08ad822f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP @@ -0,0 +1 @@ +# CONFIG_BRIDGE_MRP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER new file mode 100644 index 0000000000000000000000000000000000000000..d052fbcce3a17e9966bf2dbf9ccc9981070698c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER @@ -0,0 +1 @@ +CONFIG_BRIDGE_NETFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES new file mode 100644 index 0000000000000000000000000000000000000000..f68518f59f3de9dbc056a9fcca7a7e796b2c1cf0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES @@ -0,0 +1 @@ +CONFIG_BRIDGE_NF_EBTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING new file mode 100644 index 0000000000000000000000000000000000000000..0792e0d510d2f8bf28a72b3e734985ca15581fbb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING @@ -0,0 +1 @@ +CONFIG_BRIDGE_VLAN_FILTERING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT new file mode 100644 index 0000000000000000000000000000000000000000..b9a4966014fe2909d6f62f7c914f83c9d0735b49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT @@ -0,0 +1 @@ +CONFIG_BSD_PROCESS_ACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 new file mode 100644 index 0000000000000000000000000000000000000000..bf334a252c19dbb49fce2168228ffb9a26582e26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 @@ -0,0 +1 @@ +CONFIG_BSD_PROCESS_ACCT_V3=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..3b4d4254c153f93f0f3ac46f1369b0f588c962c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS @@ -0,0 +1 @@ +CONFIG_BTRFS_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION new file mode 100644 index 0000000000000000000000000000000000000000..5ebeba7b200d9970f6020db99482dcc5753142fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION @@ -0,0 +1 @@ +CONFIG_BUG_ON_DATA_CORRUPTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT new file mode 100644 index 0000000000000000000000000000000000000000..6cf55b283df797602dff6dd7c9de764cc53eb502 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT @@ -0,0 +1 @@ +CONFIG_BUILD_SALT="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..81ad37f9789a739ed6ee91d8bdd3b666acfaf4a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG @@ -0,0 +1 @@ +# CONFIG_CACHEFILES_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM new file mode 100644 index 0000000000000000000000000000000000000000..2bf814b0a776b88d27a1ee426c639bf00551c8bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM @@ -0,0 +1 @@ +CONFIG_CDROM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD new file mode 100644 index 0000000000000000000000000000000000000000..509827e58aba6727b6af062c32d205463f888902 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD @@ -0,0 +1 @@ +CONFIG_CDROM_PKTCDVD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS new file mode 100644 index 0000000000000000000000000000000000000000..8dafda73512c4085e18f82ad64f1fb9c253b227b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS @@ -0,0 +1 @@ +CONFIG_CDROM_PKTCDVD_BUFFERS=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE new file mode 100644 index 0000000000000000000000000000000000000000..ce8fec39d57fb372901791b1e4e4bbecf8875809 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE @@ -0,0 +1 @@ +# CONFIG_CDROM_PKTCDVD_WCACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS new file mode 100644 index 0000000000000000000000000000000000000000..25623fa9739944f5f3d9d24411d51de6135f04eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS @@ -0,0 +1 @@ +CONFIG_CEPH_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE new file mode 100644 index 0000000000000000000000000000000000000000..7c46162890d5154bd9ad8ac9a846407c3fb21730 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE @@ -0,0 +1 @@ +# CONFIG_CEPH_FSCACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..680364368a16974e6278f3701940d533973175cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_CEPH_FS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..3c5d082372140f46e8bf416b194b7a4dd84054c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL @@ -0,0 +1 @@ +# CONFIG_CEPH_FS_SECURITY_LABEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..3d57cb63b39455c91601b6d999ed74140b867945 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG @@ -0,0 +1 @@ +# CONFIG_CGROUP_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER new file mode 100644 index 0000000000000000000000000000000000000000..d7e06d250f4028296b33b5d84a7494c1d48c4f41 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER @@ -0,0 +1 @@ +CONFIG_CGROUP_FREEZER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID new file mode 100644 index 0000000000000000000000000000000000000000..73e4e4b3f0245529fa99204b703c1ca91294dcab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID @@ -0,0 +1 @@ +CONFIG_CGROUP_NET_CLASSID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO new file mode 100644 index 0000000000000000000000000000000000000000..b4e8e68d514002510cf4ae0f4186f02e083a5d72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO @@ -0,0 +1 @@ +CONFIG_CGROUP_NET_PRIO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK new file mode 100644 index 0000000000000000000000000000000000000000..baf2252de5ff1698cecfba9e64169d1fa6ec69ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK @@ -0,0 +1 @@ +CONFIG_CGROUP_WRITEBACK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH new file mode 100644 index 0000000000000000000000000000000000000000..06017f05f6ab6389f52098a65978e220afc0e287 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH @@ -0,0 +1 @@ +CONFIG_CHR_DEV_SCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG new file mode 100644 index 0000000000000000000000000000000000000000..7350041d12c6894565b0415cab351afcb655283e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG @@ -0,0 +1 @@ +CONFIG_CHR_DEV_SG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST new file mode 100644 index 0000000000000000000000000000000000000000..430ba8718a20363a734c87528c3d4e36afa915cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST @@ -0,0 +1 @@ +CONFIG_CHR_DEV_ST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK new file mode 100644 index 0000000000000000000000000000000000000000..5fb0654ba12ff5c16e409c00686d64064ba22f18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK @@ -0,0 +1 @@ +CONFIG_CLS_U32_MARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF new file mode 100644 index 0000000000000000000000000000000000000000..44646ed873f0063f2c3e6174387f013e359db63f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF @@ -0,0 +1 @@ +CONFIG_CLS_U32_PERF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA new file mode 100644 index 0000000000000000000000000000000000000000..309c9e771d6b00cb800d4f79e322c04a51cbe05f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA @@ -0,0 +1 @@ +CONFIG_CMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT new file mode 100644 index 0000000000000000000000000000000000000000..7941445e73ae7d8f1232f1fab5d58c15e4e8adcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_CMA_ALIGNMENT=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS new file mode 100644 index 0000000000000000000000000000000000000000..9aac2ce735e51f16ef47b309c49a0d1a2b1ced65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS @@ -0,0 +1 @@ +CONFIG_CMA_AREAS=19 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..64ff80c566819751ecda91f993f9251aa333b0b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG @@ -0,0 +1 @@ +# CONFIG_CMA_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..fba89903a06c5fbdd863603aecd21230e9696ed2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CMA_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX new file mode 100644 index 0000000000000000000000000000000000000000..e8ad8cf05cdcf53f167a1b2f95c9f2d14d867c82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_MAX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES new file mode 100644 index 0000000000000000000000000000000000000000..2a76a105cd03f01d7aa7066783406673a2b1b62b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_SEL_MBYTES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN new file mode 100644 index 0000000000000000000000000000000000000000..2748b1eb698a112033581327d45803ee39f20065 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_MIN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE new file mode 100644 index 0000000000000000000000000000000000000000..a23118a96938f279685a9ce8227aa456aa618a10 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CNIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CNIC new file mode 100644 index 0000000000000000000000000000000000000000..b32c2cc2af81a52ad5c8d8f7d120958a1e5b8e51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CNIC @@ -0,0 +1 @@ +CONFIG_CNIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..9b072bae787ef066b6e2c7025d4db8bdffbe47b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT @@ -0,0 +1 @@ +CONFIG_COMPAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME new file mode 100644 index 0000000000000000000000000000000000000000..da143b00f8bccf7025c9c0eb045db9b6f2798485 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME @@ -0,0 +1 @@ +CONFIG_COMPAT_32BIT_TIME=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK new file mode 100644 index 0000000000000000000000000000000000000000..e05246612c550930f77f68b85f8caee1b684be3b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK @@ -0,0 +1 @@ +# CONFIG_COMPAT_BRK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..bcee8efc30e2f5882fd67cbb2c486285f0b02d27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST @@ -0,0 +1 @@ +# CONFIG_COMPILE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR new file mode 100644 index 0000000000000000000000000000000000000000..ea191496dbdae4d8671af210c5ab51fe29584162 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR @@ -0,0 +1 @@ +CONFIG_CONNECTOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..7f3f03e39dafbc6bf74c84e590404be9244f2877 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT @@ -0,0 +1 @@ +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET new file mode 100644 index 0000000000000000000000000000000000000000..0c5771bca6ff4609ee42a9ff03d6aac45ede68cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET @@ -0,0 +1 @@ +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS new file mode 100644 index 0000000000000000000000000000000000000000..db11fb1a7fa8d55017d1dee008c342bfc4ea97c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS @@ -0,0 +1 @@ +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..f99021fe213769708763f393a0312ee080709932 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE new file mode 100644 index 0000000000000000000000000000000000000000..e8723b1ef09e8d80fde41bdc7c6109fb2c719a25 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL new file mode 100644 index 0000000000000000000000000000000000000000..c08cd0d5dd7232fb2428325dab54a7fef3b789ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE new file mode 100644 index 0000000000000000000000000000000000000000..896fd6dbff5ef5c013fc2686f2dbd410dd15dc6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE new file mode 100644 index 0000000000000000000000000000000000000000..ff981a945db741fef63116e6fb89ac14bae7e9b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND new file mode 100644 index 0000000000000000000000000000000000000000..fb152cb920432c76f14220b733875c7ccaef0642 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_ONDEMAND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..2bf548fc409a16b1141bfda0bf6402a1eb324166 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE new file mode 100644 index 0000000000000000000000000000000000000000..a22c379626bb7eac540c337ab7817a6f8b217032 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_POWERSAVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_SCHEDUTIL new file mode 100644 index 0000000000000000000000000000000000000000..0aec996431ace27e6dc908060004fec3d86c1d63 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_SCHEDUTIL @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE new file mode 100644 index 0000000000000000000000000000000000000000..8c1bc6848c91cb5d970e34a1bd852e391ab4192a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_USERSPACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT new file mode 100644 index 0000000000000000000000000000000000000000..ea0bc7f5397f5626d306e2bff63e1b940a3bef60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_STAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER new file mode 100644 index 0000000000000000000000000000000000000000..776a3b20b233ca07d52119bf6d02a52ab69ccccc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER @@ -0,0 +1 @@ +# CONFIG_CPU_IDLE_GOV_LADDER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU new file mode 100644 index 0000000000000000000000000000000000000000..38d24a2762fcabe9ce466c4672518a5d574a47f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU @@ -0,0 +1 @@ +CONFIG_CPU_IDLE_GOV_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO new file mode 100644 index 0000000000000000000000000000000000000000..6bb1788aa034a4ca74c61ccdf68cf86bb5eb78f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO @@ -0,0 +1 @@ +# CONFIG_CPU_IDLE_GOV_TEO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV new file mode 100644 index 0000000000000000000000000000000000000000..c5433919b939b4b16da6540928ee18359f1cc8c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV @@ -0,0 +1 @@ +CONFIG_CRAMFS_BLOCKDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD new file mode 100644 index 0000000000000000000000000000000000000000..d5c14020ff22a843d3d9649a200986ca6de1bc90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD @@ -0,0 +1 @@ +# CONFIG_CRAMFS_MTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH new file mode 100644 index 0000000000000000000000000000000000000000..e960b10202c44155eed133ee3872011bf4c72e4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH @@ -0,0 +1 @@ +CONFIG_CROSS_MEMORY_ATTACH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 new file mode 100644 index 0000000000000000000000000000000000000000..a5e4ffef2dcf1a3b67a12b97b9f691b8575d6eb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_842 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 new file mode 100644 index 0000000000000000000000000000000000000000..87f3fab93216314a4804ddfbc44d0518be64fbd3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ACOMP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM new file mode 100644 index 0000000000000000000000000000000000000000..ef4db40e328709f30d1ad46a6a61f5f2dca3832e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ADIANTUM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 new file mode 100644 index 0000000000000000000000000000000000000000..d5748cf4c3e850307aad4ab00093b5d0f003c456 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AEGIS128 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI new file mode 100644 index 0000000000000000000000000000000000000000..de13d3ebe487994f89117c2985301db47401f382 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AES_TI is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG new file mode 100644 index 0000000000000000000000000000000000000000..eca113843b6cf523a37ece527cb3fafb9b2722ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_ANSI_CPRNG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS new file mode 100644 index 0000000000000000000000000000000000000000..f7884852a984061915a4761a9b1c7264b1245b24 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS @@ -0,0 +1 @@ +CONFIG_CRYPTO_ANUBIS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 new file mode 100644 index 0000000000000000000000000000000000000000..08d710ec50795c9891ff9062c6814346a501f9fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARC4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC new file mode 100644 index 0000000000000000000000000000000000000000..07d7a88a55644d4c52fb04806fe8484a22fa68b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC @@ -0,0 +1 @@ +CONFIG_CRYPTO_AUTHENC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B new file mode 100644 index 0000000000000000000000000000000000000000..13f92b91f5b6cc852145c6fe6e875f155c10bca1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLAKE2B=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH new file mode 100644 index 0000000000000000000000000000000000000000..5145b0c977ec8eb22cded5692b014f5aca314ea9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..5a8ded61a18b3b9c154655302a224eccc6ae8b1e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA new file mode 100644 index 0000000000000000000000000000000000000000..5781d7da1d33dd33411f39e0fc562691c9b26460 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 new file mode 100644 index 0000000000000000000000000000000000000000..3dd03560f85dabb980b09d2d3586dc6768547c94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST5=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 new file mode 100644 index 0000000000000000000000000000000000000000..6c52c80f3e7c59ee9080c087f775b0d93e4a2264 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..f89da04cc6fd465591315f267c6dd59d72d2e64e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC new file mode 100644 index 0000000000000000000000000000000000000000..c501e8e03ffa876362847161382e7b61807e4d50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_CBC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM new file mode 100644 index 0000000000000000000000000000000000000000..f552c9a96c5eea527cf61c97b0360a3b8245f765 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_CCM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB new file mode 100644 index 0000000000000000000000000000000000000000..6f257b4545d49546957c3b06757cd22e545fe9c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB @@ -0,0 +1 @@ +CONFIG_CRYPTO_CFB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 new file mode 100644 index 0000000000000000000000000000000000000000..1eb6ab8d33e86853fe2f7d7244b4c133710f2fed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 new file mode 100644 index 0000000000000000000000000000000000000000..09126b7b2ac9491639b01d4405efa9b82d1d2e19 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC new file mode 100644 index 0000000000000000000000000000000000000000..587b31509430a6d2e7bab1288bb11491dbdf7d4d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_CMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 new file mode 100644 index 0000000000000000000000000000000000000000..9ab72d65224853ca34a40463871bd3888cb35eac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C new file mode 100644 index 0000000000000000000000000000000000000000..9323d8397fe8e2dc1b00156c73c5433b0e890b68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32C=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF new file mode 100644 index 0000000000000000000000000000000000000000..eb632d3f0de8cbd979365145787d0ac25a6a63bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD new file mode 100644 index 0000000000000000000000000000000000000000..36649cff393d01e585756c08053f10701c029200 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRYPTD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR new file mode 100644 index 0000000000000000000000000000000000000000..63e15a0d8da78d88b4233e4d7e9166159142d221 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR @@ -0,0 +1 @@ +CONFIG_CRYPTO_CTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS new file mode 100644 index 0000000000000000000000000000000000000000..e7ff7297caed7e93b1260d470b077914defc1b32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS @@ -0,0 +1 @@ +CONFIG_CRYPTO_CTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE new file mode 100644 index 0000000000000000000000000000000000000000..071307ba639c4cb5138667037381f905a5403809 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEFLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES new file mode 100644 index 0000000000000000000000000000000000000000..e8145a59a8845abcb91737a663b3feaf8ea786d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP new file mode 100644 index 0000000000000000000000000000000000000000..d2f5497f2d95cd535072352381c2c1017095e3e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_CRYPTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..4887c6d76436ee5336899d0c9694032dc336d461 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DD new file mode 100644 index 0000000000000000000000000000000000000000..371645117adf61f54e5d4abff63883cb2bfa3b47 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DD @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP_DD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..fe46585daea7ca223ac15af4bcc3df81ae2cc422 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_SP_CCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_SP_CCP new file mode 100644 index 0000000000000000000000000000000000000000..413ad9c2adce95456c7831d5a646fd0b5128d89b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_SP_CCP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_SP_CCP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..2997f10e21740ee3b664e7c3d7a3a5259e54875b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_VIRTIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH new file mode 100644 index 0000000000000000000000000000000000000000..ea06ab3c25c5752912def80c0afcee58af17ae5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH @@ -0,0 +1 @@ +CONFIG_CRYPTO_DH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG new file mode 100644 index 0000000000000000000000000000000000000000..c31236d2472b858d625a4548ee7461f712f3cb4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR new file mode 100644 index 0000000000000000000000000000000000000000..1c292fe5ca6685d680c4c66e21316671fe24a169 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_CTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH new file mode 100644 index 0000000000000000000000000000000000000000..2ec6b20ee10f4879d8d0714302b475ade8c207c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_HASH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC new file mode 100644 index 0000000000000000000000000000000000000000..d61f0434fc03f5a7fa081499a7d9072ee14630b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_HMAC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU new file mode 100644 index 0000000000000000000000000000000000000000..f11183adbf558703f434f1e61edafb1bf62da171 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB new file mode 100644 index 0000000000000000000000000000000000000000..bc645abc9053b35a12ee235bef6ea8f21197e335 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC new file mode 100644 index 0000000000000000000000000000000000000000..0e9e5705f32c9b1d42ac3f9855ea10ef7ca520f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH new file mode 100644 index 0000000000000000000000000000000000000000..0244ab63e71a63fb4fa10840d55e8850334aa541 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECDH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV new file mode 100644 index 0000000000000000000000000000000000000000..f6f8d76f37cb685b811f5f70be77ffc9eb7eb65e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECHAINIV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV new file mode 100644 index 0000000000000000000000000000000000000000..9c914d4467a00c2e3f1915298576f4852475fa78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_ESSIV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT new file mode 100644 index 0000000000000000000000000000000000000000..e3905ab84c573212f4afb7f4fbd5f23f47154c2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_FCRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS new file mode 100644 index 0000000000000000000000000000000000000000..a948c820830752a8d9b99118b50fa9ce74e5bf34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS @@ -0,0 +1 @@ +CONFIG_CRYPTO_FIPS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO new file mode 100644 index 0000000000000000000000000000000000000000..d511eacd5eeb14e6ff08a1180c1abe7e93fdb7d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC new file mode 100644 index 0000000000000000000000000000000000000000..2aa4086a0a70d55ca105a7a7140cadb52b3d9ccd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_HMAC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW new file mode 100644 index 0000000000000000000000000000000000000000..245062d86f4331b8724311624d2ed25b625f1235 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW @@ -0,0 +1 @@ +CONFIG_CRYPTO_HW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY new file mode 100644 index 0000000000000000000000000000000000000000..bee3ca16f7dc950e3a54ef9df2173e8286f544f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY @@ -0,0 +1 @@ +CONFIG_CRYPTO_JITTERENTROPY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP new file mode 100644 index 0000000000000000000000000000000000000000..75daba15358e5d8989e3e8c2fb822e6982ebbe4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP @@ -0,0 +1 @@ +# CONFIG_CRYPTO_KEYWRAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD new file mode 100644 index 0000000000000000000000000000000000000000..23d94b45f5fdfa0fa2a101cf24e44a52cf2b94d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_KHAZAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP new file mode 100644 index 0000000000000000000000000000000000000000..47c6b01604c6ac196548fa6b6e071dff05f87924 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP @@ -0,0 +1 @@ +CONFIG_CRYPTO_KPP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 new file mode 100644 index 0000000000000000000000000000000000000000..005f1b813d15d400a4faf9f363e0394a3c5eca0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_KPP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW new file mode 100644 index 0000000000000000000000000000000000000000..22ce862b83e95dccce11ba2b154b738b9eff825d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW @@ -0,0 +1 @@ +CONFIG_CRYPTO_LRW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..2f1d09a8a1733fa10ec8259f337ea33edd3a485c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZ4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC new file mode 100644 index 0000000000000000000000000000000000000000..9f1a3d6a2d9bf512ae025329eec2dfe01063562b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZ4HC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO new file mode 100644 index 0000000000000000000000000000000000000000..418ab2f794d3e95ad287237c789b804a10206f9b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS new file mode 100644 index 0000000000000000000000000000000000000000..2b7116fd210a2a435260249a74501ba1872dc738 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS new file mode 100644 index 0000000000000000000000000000000000000000..b27d3dbb75662e224217b6ae253564c05b052203 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 new file mode 100644 index 0000000000000000000000000000000000000000..52b52847546ac53c82b5040025d2e8b6c85e780e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MD4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 new file mode 100644 index 0000000000000000000000000000000000000000..4ae53bd159f4c113173fd2d076f8b1c80ffd2260 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MD5=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC new file mode 100644 index 0000000000000000000000000000000000000000..7075e63ecffa8973092bc3092eb8796f6df994a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL new file mode 100644 index 0000000000000000000000000000000000000000..0747b1197264073c55dcfb597175ff1db028aeea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL @@ -0,0 +1 @@ +CONFIG_CRYPTO_NULL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 new file mode 100644 index 0000000000000000000000000000000000000000..471900dda2455bd0a0f7b6ae159478867fcebca4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_NULL2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB new file mode 100644 index 0000000000000000000000000000000000000000..2874ba2d83f3d821e0e4136ea640ff9f04573fba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB @@ -0,0 +1 @@ +CONFIG_CRYPTO_OFB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC new file mode 100644 index 0000000000000000000000000000000000000000..b59ce6d78181a45c07e385193f982d7a49397829 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_PCBC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT new file mode 100644 index 0000000000000000000000000000000000000000..59772cc021a88f35d4f0a71e0259edc2e90f657e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_PCRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 new file mode 100644 index 0000000000000000000000000000000000000000..71c5d4b3865a3fe824faa455d4eaf1e1946dc095 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 new file mode 100644 index 0000000000000000000000000000000000000000..7fd4b816b0cf6b1f80844662d9875d1203ad5919 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 @@ -0,0 +1 @@ +CONFIG_CRYPTO_RMD160=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..fb0dd802353e5897a667adfa41abd3980371b122 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED new file mode 100644 index 0000000000000000000000000000000000000000..09ec68ec6ad03bc024add24b47e252d8874cae15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED @@ -0,0 +1 @@ +CONFIG_CRYPTO_SEED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV new file mode 100644 index 0000000000000000000000000000000000000000..7be12018f24f9833c71ecd6382cb6641f2978e4b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_SEQIV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT new file mode 100644 index 0000000000000000000000000000000000000000..f8a7bfbb59779ba70bc1aa90b4fce50b6d081b21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..82e74b118961b304772056a7221922b1c219c127 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 new file mode 100644 index 0000000000000000000000000000000000000000..c98ae6eb65e1e8218facb20dd79750f5c85b7a25 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA3=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 new file mode 100644 index 0000000000000000000000000000000000000000..5c25197e538bc8697805cae8868e9ee248080820 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA512=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS new file mode 100644 index 0000000000000000000000000000000000000000..058c5ef85f1d6bc3014f0fb94f1c2cb9d9c51d48 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG new file mode 100644 index 0000000000000000000000000000000000000000..67ae425b2da18ce8650fdc081591015b2a6307f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG @@ -0,0 +1 @@ +# CONFIG_CRYPTO_STREEBOG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA new file mode 100644 index 0000000000000000000000000000000000000000..53982fa7c62d1dab4292e22d1e965c8a5bb51a2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA @@ -0,0 +1 @@ +CONFIG_CRYPTO_TEA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST new file mode 100644 index 0000000000000000000000000000000000000000..a2e883781cae90eb594f5078345101a457c55276 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST @@ -0,0 +1 @@ +CONFIG_CRYPTO_TEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH new file mode 100644 index 0000000000000000000000000000000000000000..41e8b34b85d0d306ff3d8c55fd8160a250568ce8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..92d3fd0bd3ddd98da3a1d3e1a1164fbb84604173 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER new file mode 100644 index 0000000000000000000000000000000000000000..9bd523374490a9606db440c4a3b3325ad1c434bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API new file mode 100644 index 0000000000000000000000000000000000000000..e99d1bf5c683d56e9d167070732dc025794a05a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD new file mode 100644 index 0000000000000000000000000000000000000000..01f9c504d242092085cf0f19fa101a7466de7ead --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_AEAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE new file mode 100644 index 0000000000000000000000000000000000000000..21d316c28741bbd4c3a9249c5a878f9145c088c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH new file mode 100644 index 0000000000000000000000000000000000000000..d5d61c6547f4a7fbe070f3148c4b50ef3f0d8697 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_HASH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG new file mode 100644 index 0000000000000000000000000000000000000000..3562fafe7bb68b7701bd1c66a3b4655cbc5e11e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_RNG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP new file mode 100644 index 0000000000000000000000000000000000000000..7826178972a973c9c73f106685b4d3f3aedef45b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP @@ -0,0 +1 @@ +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER new file mode 100644 index 0000000000000000000000000000000000000000..4a7da08a5ec1160e9eaad7a698ef655e7ca1ea20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_SKCIPHER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC new file mode 100644 index 0000000000000000000000000000000000000000..eb719b78e850ad2aa7b3f4e61e0df89444a7e4a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_VMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 new file mode 100644 index 0000000000000000000000000000000000000000..d907a1abecbb51008567ec8066a5e51cfad0add5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 @@ -0,0 +1 @@ +CONFIG_CRYPTO_WP512=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC new file mode 100644 index 0000000000000000000000000000000000000000..8984fb2e7c5fe46a94d66089e19c47a203f6ac80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_XCBC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS new file mode 100644 index 0000000000000000000000000000000000000000..b7e82261c0f310d599c6fa40fd398a451da0541a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS @@ -0,0 +1 @@ +CONFIG_CRYPTO_XTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH new file mode 100644 index 0000000000000000000000000000000000000000..95c76019f410b0b1c4f106970b94ca1a3576c0b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_XXHASH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..3c80ecf2ad2f3c7589d141db7d3c608937016f54 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD @@ -0,0 +1 @@ +CONFIG_CRYPTO_ZSTD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE new file mode 100644 index 0000000000000000000000000000000000000000..9796e51dabc3de9fa16bc392f557f56cc8990d46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE @@ -0,0 +1 @@ +CONFIG_CUSE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..8e96f120a919a0e83cb9d735a2d5ed67bed3687b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI @@ -0,0 +1 @@ +CONFIG_CXL_ACPI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM new file mode 100644 index 0000000000000000000000000000000000000000..25be7c22471afa2994d2be694fcd4517c6c4b591 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM @@ -0,0 +1 @@ +CONFIG_CXL_MEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS new file mode 100644 index 0000000000000000000000000000000000000000..7349bf47042e0e0655baada2d0aebd311ca4a539 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS @@ -0,0 +1 @@ +# CONFIG_CXL_MEM_RAW_COMMANDS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI new file mode 100644 index 0000000000000000000000000000000000000000..6f46cf249a8eba56bcf75ef754b9530bf0402d92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI @@ -0,0 +1 @@ +CONFIG_CXL_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..8ada02548d151595d4df0b4e0806dd7cc7bb201a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM @@ -0,0 +1 @@ +CONFIG_CXL_PMEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU new file mode 100644 index 0000000000000000000000000000000000000000..0ad05f80c2ab7b010331dffe6e09cb7ba92262f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU @@ -0,0 +1 @@ +CONFIG_CXL_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT new file mode 100644 index 0000000000000000000000000000000000000000..20d91ef81d80b8ba56cd7c63688bb88680e114af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT @@ -0,0 +1 @@ +CONFIG_CXL_PORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION new file mode 100644 index 0000000000000000000000000000000000000000..3a4b4b45bb7d68776945dc750a29ff1b5343af7e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION @@ -0,0 +1 @@ +CONFIG_CXL_REGION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND new file mode 100644 index 0000000000000000000000000000000000000000..b9f6e99529ec75d3e92a5daec7b42b31cf000a67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND @@ -0,0 +1 @@ +CONFIG_CXL_SUSPEND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON new file mode 100644 index 0000000000000000000000000000000000000000..05e0ca4a8df42df0f5137bc0e7b3b5958550222d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON @@ -0,0 +1 @@ +CONFIG_DAMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS new file mode 100644 index 0000000000000000000000000000000000000000..ab10113a5f985aa9798698e2dc5aed33068cab36 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS @@ -0,0 +1 @@ +CONFIG_DAMON_DBGFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR new file mode 100644 index 0000000000000000000000000000000000000000..8941ac88a9da5214cc6e835173a71acad653feb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR @@ -0,0 +1 @@ +CONFIG_DAMON_PADDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR new file mode 100644 index 0000000000000000000000000000000000000000..ee67a052d7a914aa6c29912940f40a7ef5d2e98f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR @@ -0,0 +1 @@ +CONFIG_DAMON_VADDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB new file mode 100644 index 0000000000000000000000000000000000000000..01340a052800f09734863fc284f32c052f10a13a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB @@ -0,0 +1 @@ +CONFIG_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU new file mode 100644 index 0000000000000000000000000000000000000000..2d64b977959eb8c5c70e47b2b559d95a849785a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU @@ -0,0 +1 @@ +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL new file mode 100644 index 0000000000000000000000000000000000000000..69490c3a53d55ffcce7c81bdbbdf018b4d3b5da8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL @@ -0,0 +1 @@ +CONFIG_DEBUG_FS_ALLOW_ALL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE new file mode 100644 index 0000000000000000000000000000000000000000..2d2a249b14c98cc770fd40f25e837b40fce20a97 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE @@ -0,0 +1 @@ +# CONFIG_DEBUG_FS_ALLOW_NONE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT new file mode 100644 index 0000000000000000000000000000000000000000..d70cb272a1006a4fcf10337c4a1190db1f2c91f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT @@ -0,0 +1 @@ +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES new file mode 100644 index 0000000000000000000000000000000000000000..bc7bbace27c7995376053d7c49a1b01c45a6b041 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_BTF_MODULES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..f0c49fae6b0cf30338d6b67bbd9718364765c533 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED new file mode 100644 index 0000000000000000000000000000000000000000..e78eada40b6e47bf71c75f485f3c3e9bf8bec28a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_REDUCED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT new file mode 100644 index 0000000000000000000000000000000000000000..dbce5882ebaf68bf33159d399b91559e5addf683 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_SPLIT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC new file mode 100644 index 0000000000000000000000000000000000000000..8cd6a5085e79c388cf43e1e78fe9992cff953ce2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC @@ -0,0 +1 @@ +# CONFIG_DEBUG_KERNEL_DC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK new file mode 100644 index 0000000000000000000000000000000000000000..40fa8633d035925e456fa824a6b684b1f884534c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK @@ -0,0 +1 @@ +# CONFIG_DEBUG_KMEMLEAK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST new file mode 100644 index 0000000000000000000000000000000000000000..b5386ce11f168111c018e84b68156906d790af2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST @@ -0,0 +1 @@ +CONFIG_DEBUG_LIST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT new file mode 100644 index 0000000000000000000000000000000000000000..22b5d941923344e4a1d42a8301ae8bb7f4b278d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT @@ -0,0 +1 @@ +CONFIG_DEBUG_MEMORY_INIT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS new file mode 100644 index 0000000000000000000000000000000000000000..4f6c1fe4c5b08e1f6415f4519ad119301e81c561 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS @@ -0,0 +1 @@ +# CONFIG_DEBUG_NOTIFIERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC new file mode 100644 index 0000000000000000000000000000000000000000..0e1c7855b0dd102183924639f7f0d0b845a4a44e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_PAGEALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF new file mode 100644 index 0000000000000000000000000000000000000000..c35abbd89164215c81209a5969b8afb20df6b6bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF @@ -0,0 +1 @@ +# CONFIG_DEBUG_PAGE_REF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST new file mode 100644 index 0000000000000000000000000000000000000000..602b2be892788062dbdfcc6c5fc16cde09b475a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST @@ -0,0 +1 @@ +# CONFIG_DEBUG_PLIST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST new file mode 100644 index 0000000000000000000000000000000000000000..c56477430b900128b3bebf3ef875c466ae279519 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST @@ -0,0 +1 @@ +# CONFIG_DEBUG_RODATA_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG new file mode 100644 index 0000000000000000000000000000000000000000..8c3ab98dd6fbfd21a211146323dbe10f424209c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG @@ -0,0 +1 @@ +# CONFIG_DEBUG_SG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX new file mode 100644 index 0000000000000000000000000000000000000000..932dfd61d8f27db95900369f6e30c7e23ce22f70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX @@ -0,0 +1 @@ +# CONFIG_DEBUG_WX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL new file mode 100644 index 0000000000000000000000000000000000000000..4f8ee1d8124c89087abe3415819351b4bed6f7e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL @@ -0,0 +1 @@ +# CONFIG_DEFAULT_CODEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ new file mode 100644 index 0000000000000000000000000000000000000000..d2b368f2b7e01a97c73f9cb65c3089841db3b081 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ @@ -0,0 +1 @@ +# CONFIG_DEFAULT_FQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL new file mode 100644 index 0000000000000000000000000000000000000000..e648072672bae550f3fb12e992f8eaac69d4f250 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL @@ -0,0 +1 @@ +CONFIG_DEFAULT_FQ_CODEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..715091634ea052f56ed4250e13e01e07cc8d4cce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR new file mode 100644 index 0000000000000000000000000000000000000000..68e78b7aa230f42a98911908223a1a9265001324 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR @@ -0,0 +1 @@ +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH new file mode 100644 index 0000000000000000000000000000000000000000..26ffbc18bf7698808f766dc4a18d91273195fac8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH @@ -0,0 +1 @@ +CONFIG_DEFAULT_NET_SCH="fq_codel" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST new file mode 100644 index 0000000000000000000000000000000000000000..4e4f4f3d61d9b38e351913c24aec3811c63c1a1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST @@ -0,0 +1 @@ +# CONFIG_DEFAULT_PFIFO_FAST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO new file mode 100644 index 0000000000000000000000000000000000000000..b284b6c3ea8405b46b3c79b531624365c12fdfa8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO @@ -0,0 +1 @@ +# CONFIG_DEFAULT_RENO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX new file mode 100644 index 0000000000000000000000000000000000000000..ef2f9974a530bead2f90cb0c1478c0e1b2b34b47 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX @@ -0,0 +1 @@ +CONFIG_DEFAULT_SECURITY_SELINUX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ new file mode 100644 index 0000000000000000000000000000000000000000..be80a9cf8ab76bee9c65c7d00fc75a5e75fead26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ @@ -0,0 +1 @@ +# CONFIG_DEFAULT_SFQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG new file mode 100644 index 0000000000000000000000000000000000000000..6e69eeae9d31f3ee56467d952392b4868166b567 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG @@ -0,0 +1 @@ +CONFIG_DEFAULT_TCP_CONG="cubic" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVICE_PRIVATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVICE_PRIVATE new file mode 100644 index 0000000000000000000000000000000000000000..ef0a4ad5b9f8d6fb524420103c5c711901db67fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVICE_PRIVATE @@ -0,0 +1 @@ +CONFIG_DEVICE_PRIVATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM new file mode 100644 index 0000000000000000000000000000000000000000..174a3f2030123ef1e857670dfd6fe322135b3a30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM @@ -0,0 +1 @@ +CONFIG_DEVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL new file mode 100644 index 0000000000000000000000000000000000000000..377ea24aa0e6fdd71a06c2b502589a26d25e6599 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL @@ -0,0 +1 @@ +CONFIG_DEV_DAX_CXL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM new file mode 100644 index 0000000000000000000000000000000000000000..beb328c9bc42efce39236f690ffa520b8c15bba7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_HMEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES new file mode 100644 index 0000000000000000000000000000000000000000..817d302f0e7d7cd93a9fc1c98ca6b71d2c9950a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES @@ -0,0 +1 @@ +CONFIG_DEV_DAX_HMEM_DEVICES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM new file mode 100644 index 0000000000000000000000000000000000000000..5a1c1ed7c1a31caaada55bb1036d61061eb416f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM @@ -0,0 +1 @@ +CONFIG_DLM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..7cd4fec866f78579fb0600d1a1c9432d270a34a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMADEVICES_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST new file mode 100644 index 0000000000000000000000000000000000000000..d11f7746f8dbf01e50d70e5ea90aec13068035fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST @@ -0,0 +1 @@ +CONFIG_DMATEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..cf9e6050ec290fcb7a069183fdc3b531501b6c45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMA_API_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA new file mode 100644 index 0000000000000000000000000000000000000000..c7c1c2c4378715e524cbb3c825a332fa9c8d72cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA @@ -0,0 +1 @@ +CONFIG_DMA_CMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE new file mode 100644 index 0000000000000000000000000000000000000000..167354c015748009f461b6ce6aac6d8079a1a588 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE @@ -0,0 +1 @@ +CONFIG_DMA_ENGINE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID new file mode 100644 index 0000000000000000000000000000000000000000..d0ea3622a47b205af01f628a8bed7f3d52b3193a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID @@ -0,0 +1 @@ +CONFIG_DMIID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..76565caf88f7d5ec1940eedd73c721568726b47e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS @@ -0,0 +1 @@ +CONFIG_DMI_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..24a42667d2044f310acbdd9b17b776a387b64ffd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE @@ -0,0 +1 @@ +CONFIG_DM_CACHE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ new file mode 100644 index 0000000000000000000000000000000000000000..41f26854fea096a6cfa430854dfb534f3bb0c957 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ @@ -0,0 +1 @@ +CONFIG_DM_CACHE_SMQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT new file mode 100644 index 0000000000000000000000000000000000000000..2ca34ebc75c2c75966b2b161239f17baeec29f98 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT @@ -0,0 +1 @@ +CONFIG_DM_CRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..1f45a5797411c9d3d180f489440cdbd2768517dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG @@ -0,0 +1 @@ +CONFIG_DM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY new file mode 100644 index 0000000000000000000000000000000000000000..95fbe6249d29f3c56d8d9282d900fbe6a5c3cf3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY @@ -0,0 +1 @@ +CONFIG_DM_DELAY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA new file mode 100644 index 0000000000000000000000000000000000000000..62543add45dfd3422a9c929870d10570485429f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA @@ -0,0 +1 @@ +CONFIG_DM_ERA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY new file mode 100644 index 0000000000000000000000000000000000000000..a68a41332e012d3ee6f1fa4acb121b7565415e47 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY @@ -0,0 +1 @@ +CONFIG_DM_FLAKEY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY new file mode 100644 index 0000000000000000000000000000000000000000..ee953fd2dfe398b5c14e721ad8fb11b69427ecda --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY @@ -0,0 +1 @@ +CONFIG_DM_INTEGRITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE new file mode 100644 index 0000000000000000000000000000000000000000..085b4e385e4f4f7b41d75fb18241230a2274b046 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE @@ -0,0 +1 @@ +CONFIG_DM_LOG_USERSPACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES new file mode 100644 index 0000000000000000000000000000000000000000..f9030b9f4c12b5172679cbf4a628930500d3ec74 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES @@ -0,0 +1 @@ +CONFIG_DM_LOG_WRITES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR new file mode 100644 index 0000000000000000000000000000000000000000..65d6ea60287eb064f019f8b2c2a7b6d12e6651cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR @@ -0,0 +1 @@ +CONFIG_DM_MIRROR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH new file mode 100644 index 0000000000000000000000000000000000000000..3613fcce0d1607518bf03358b165778968c33e9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL new file mode 100644 index 0000000000000000000000000000000000000000..d220fcab620a049b1932ea0085d20513c068f587 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH_QL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST new file mode 100644 index 0000000000000000000000000000000000000000..6bd64251b6ebfc0c38c7c245beb69a962e8d71cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH_ST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID new file mode 100644 index 0000000000000000000000000000000000000000..c2387b555c52ec0a9b700b29fe34f027eec8758c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID @@ -0,0 +1 @@ +CONFIG_DM_RAID=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT new file mode 100644 index 0000000000000000000000000000000000000000..cd20f0919f5335cb5f3d4580dc81527029505e0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT @@ -0,0 +1 @@ +CONFIG_DM_SNAPSHOT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH new file mode 100644 index 0000000000000000000000000000000000000000..4dc3ff401d8b8b648eac2a5ffa5ce52410ba32f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH @@ -0,0 +1 @@ +CONFIG_DM_SWITCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING new file mode 100644 index 0000000000000000000000000000000000000000..b5e94cbd68aa6dd5ede806dc74b7077cdda45f3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING @@ -0,0 +1 @@ +CONFIG_DM_THIN_PROVISIONING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT new file mode 100644 index 0000000000000000000000000000000000000000..6d88d4893efe8e6492bf8641d6114ab7161bc1be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT @@ -0,0 +1 @@ +CONFIG_DM_UEVENT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY new file mode 100644 index 0000000000000000000000000000000000000000..3906845c16144a9e5399b9ef4a7a63cae3314cc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY @@ -0,0 +1 @@ +CONFIG_DM_VERITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE new file mode 100644 index 0000000000000000000000000000000000000000..6a1a639c882ad52b680bc26bc7685cc5524ef1df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE @@ -0,0 +1 @@ +CONFIG_DM_WRITECACHE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO new file mode 100644 index 0000000000000000000000000000000000000000..1751792de332738ec47c9ec12fcd25c4a93999b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO @@ -0,0 +1 @@ +CONFIG_DM_ZERO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED new file mode 100644 index 0000000000000000000000000000000000000000..8924814fb9a7bf4ded814a65a99792a72a2a51fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED @@ -0,0 +1 @@ +# CONFIG_DM_ZONED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU new file mode 100644 index 0000000000000000000000000000000000000000..a7c6b65b63c0d12307ca99fd6907c5333d3be36c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI new file mode 100644 index 0000000000000000000000000000000000000000..78dd0a3c6d27a648e593a429923fb992f20cdbf2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_SI is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC new file mode 100644 index 0000000000000000000000000000000000000000..a642bb05ada299c1d8a44d4b97491149d250b01c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC @@ -0,0 +1 @@ +CONFIG_DRM_AMD_DC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST new file mode 100644 index 0000000000000000000000000000000000000000..d427867f1336ee4d6306f98e530a38945d13e4d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST @@ -0,0 +1 @@ +CONFIG_DRM_AST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS new file mode 100644 index 0000000000000000000000000000000000000000..8dff9db0645fb5217cb43c15db8f226ff705e1cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS @@ -0,0 +1 @@ +CONFIG_DRM_BOCHS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU new file mode 100644 index 0000000000000000000000000000000000000000..75df6271a7169f47af40fa6171c5b63f1b036583 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU @@ -0,0 +1 @@ +CONFIG_DRM_CIRRUS_QEMU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION new file mode 100644 index 0000000000000000000000000000000000000000..16ef2c82ec882c5a0552ce87e9ba56a776a8c8e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION @@ -0,0 +1 @@ +CONFIG_DRM_FBDEV_EMULATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC new file mode 100644 index 0000000000000000000000000000000000000000..32e5c45207e6c5f0d0696746ae7aa2d2c4c3bd42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC @@ -0,0 +1 @@ +CONFIG_DRM_FBDEV_OVERALLOC=100 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 new file mode 100644 index 0000000000000000000000000000000000000000..0e04442bf1b5936e5eff200b085212926e9e8c42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 @@ -0,0 +1 @@ +CONFIG_DRM_I2C_CH7006=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR new file mode 100644 index 0000000000000000000000000000000000000000..b6105f9482beacd110c009b3c55aca38c5b28bcb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR @@ -0,0 +1 @@ +CONFIG_DRM_INSPUR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE new file mode 100644 index 0000000000000000000000000000000000000000..cbc53f636d0df6962131d8c307a0ba177c43af6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE @@ -0,0 +1 @@ +CONFIG_DRM_LOAD_EDID_FIRMWARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 new file mode 100644 index 0000000000000000000000000000000000000000..48b6c6106fe073b3943eb1b59123c5d272bbf6b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 @@ -0,0 +1 @@ +CONFIG_DRM_MGAG200=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU new file mode 100644 index 0000000000000000000000000000000000000000..9375fdfc7a1f16c8ea3cca2a386044dec8c9a481 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU @@ -0,0 +1 @@ +CONFIG_DRM_NOUVEAU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..bb06e545858dba1f1f1ddc2f8685baa813afc7f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_DRM_NOUVEAU_BACKLIGHT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL new file mode 100644 index 0000000000000000000000000000000000000000..cff18896bc528b2f542b43ec40d3f2d7464953c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL @@ -0,0 +1 @@ +CONFIG_DRM_QXL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON new file mode 100644 index 0000000000000000000000000000000000000000..12dfb1eb31df0ebe1bacdb9bfb85bbfd7a6cc6a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON @@ -0,0 +1 @@ +CONFIG_DRM_RADEON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR new file mode 100644 index 0000000000000000000000000000000000000000..feecc185370b1a5dda594144731847e65453e321 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_RADEON_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL new file mode 100644 index 0000000000000000000000000000000000000000..6b64ab832bf9b8874278018f8448af537f33f67a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL @@ -0,0 +1 @@ +CONFIG_DRM_UDL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU new file mode 100644 index 0000000000000000000000000000000000000000..3f7fd91d7799ec63dd590d5bfdb4fc4e3b5f3370 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU @@ -0,0 +1 @@ +CONFIG_DRM_VIRTIO_GPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY new file mode 100644 index 0000000000000000000000000000000000000000..5c3261758a81c47b2051aa5394d61d7c9b9d9d28 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY @@ -0,0 +1 @@ +CONFIG_DUMMY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC new file mode 100644 index 0000000000000000000000000000000000000000..5c8179948d3658b07922bc2731a93a4cad6a00ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC @@ -0,0 +1 @@ +CONFIG_DW_DMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 b/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 new file mode 100644 index 0000000000000000000000000000000000000000..ab8dce967f68794fe1acb3527c4d6df6343a36f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 @@ -0,0 +1 @@ +# CONFIG_E100 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 new file mode 100644 index 0000000000000000000000000000000000000000..7aebd6baf17d782b4ce515e6f2b1e6a3dc54f4d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 @@ -0,0 +1 @@ +CONFIG_E1000=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E new file mode 100644 index 0000000000000000000000000000000000000000..5c5726365578f53e29794faa231d5e811b9dffe4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E @@ -0,0 +1 @@ +CONFIG_E1000E=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..78248bea8a2d8427bf5f2dc4f9d01bed3db0f244 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG @@ -0,0 +1 @@ +# CONFIG_EDAC_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES new file mode 100644 index 0000000000000000000000000000000000000000..e68c7c4c27764812b7defcb93937770e2018afa1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES @@ -0,0 +1 @@ +CONFIG_EDAC_GHES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..5d389a2740038dff63d4cfe8b17c52e794c33260 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS @@ -0,0 +1 @@ +CONFIG_EDAC_LEGACY_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS new file mode 100644 index 0000000000000000000000000000000000000000..4e151f1005b2b2a61f8bb504df99f46f5e44106d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS @@ -0,0 +1 @@ +CONFIG_EFIVAR_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS new file mode 100644 index 0000000000000000000000000000000000000000..e2c7e30e6b058d218d17558fe96017a74d6c4ef3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS @@ -0,0 +1 @@ +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE new file mode 100644 index 0000000000000000000000000000000000000000..f6b5ec7c7269c34ad13d2d527717b0dbe389e79f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE @@ -0,0 +1 @@ +CONFIG_EFI_SOFT_RESERVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB new file mode 100644 index 0000000000000000000000000000000000000000..c8859686c10ab16b70ba9b62c3fd8d6141f25e27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB @@ -0,0 +1 @@ +CONFIG_EFI_STUB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE new file mode 100644 index 0000000000000000000000000000000000000000..231576abfac46a1aefd5d72b5e344533b9791263 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE @@ -0,0 +1 @@ +CONFIG_EFI_VARS_PSTORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE new file mode 100644 index 0000000000000000000000000000000000000000..b26ce1fb9a85125d8f4fa54aa2e923ac2fb7b914 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE @@ -0,0 +1 @@ +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..09d264daff2b79c90ecb454be5ae5634fbef8822 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS @@ -0,0 +1 @@ +CONFIG_ENCRYPTED_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE new file mode 100644 index 0000000000000000000000000000000000000000..88e0babd0932b9a12d51f90f98bac16987fb86a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE @@ -0,0 +1 @@ +CONFIG_EROFS_FS_BACKED_BY_FILE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..5363c0f0a759e85edc7fd4b9bdf9ff69bbdddf14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND new file mode 100644 index 0000000000000000000000000000000000000000..c738efed2f789d837ac28d58d5dd7b95df3bd904 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ONDEMAND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..fe4f9a82613fa8089a75222ed0fa06e833ca1053 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_EROFS_FS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..f24b07262b0e3c49218aae6cb54e481b0e2137ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY @@ -0,0 +1 @@ +CONFIG_EROFS_FS_SECURITY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR new file mode 100644 index 0000000000000000000000000000000000000000..751034acd74d450817131f7849b7a61e28141416 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR @@ -0,0 +1 @@ +CONFIG_EROFS_FS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP new file mode 100644 index 0000000000000000000000000000000000000000..fe558502b3015e4f3efe88f7c6fc126d80b15f5c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE new file mode 100644 index 0000000000000000000000000000000000000000..a5b66f23823f8ea50d12ce945f776441b14d6d86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP_DEFLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA new file mode 100644 index 0000000000000000000000000000000000000000..843dc4e89d4ebe063f03b8094efff358e1dddc34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP_LZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS new file mode 100644 index 0000000000000000000000000000000000000000..687632a21f2b5ae13d895d06341ca0f3681862ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS @@ -0,0 +1 @@ +# CONFIG_EVM_ADD_XATTRS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID new file mode 100644 index 0000000000000000000000000000000000000000..559a1dad3497892302379110e0707bf28c6a23c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID @@ -0,0 +1 @@ +CONFIG_EVM_ATTR_FSUUID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 new file mode 100644 index 0000000000000000000000000000000000000000..0dd95a176560a8fcc68f440c2ecd0a8243fc604e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 @@ -0,0 +1 @@ +CONFIG_EVM_LOAD_X509=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH new file mode 100644 index 0000000000000000000000000000000000000000..11b63bed0287c436b8a134a14b74565a6b708390 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH @@ -0,0 +1 @@ +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS new file mode 100644 index 0000000000000000000000000000000000000000..2113d81064a9cc33cd9079b08a5d032cd26aa264 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS @@ -0,0 +1 @@ +# CONFIG_EXFAT_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS new file mode 100644 index 0000000000000000000000000000000000000000..a796344a723a6eefb7c52cc8864527871b0fdbb9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS @@ -0,0 +1 @@ +CONFIG_EXPORTFS_BLOCK_OPS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS new file mode 100644 index 0000000000000000000000000000000000000000..95332e711c4c84c32fe05b738667e1500711a335 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS @@ -0,0 +1 @@ +# CONFIG_EXT2_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..cade17e50e658dc87891af4708ec622a320834b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG @@ -0,0 +1 @@ +# CONFIG_EXT4_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 new file mode 100644 index 0000000000000000000000000000000000000000..05d6610ffde887f62041209d254c4ab7f7ca9b24 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 @@ -0,0 +1 @@ +CONFIG_EXT4_USE_FOR_EXT2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE new file mode 100644 index 0000000000000000000000000000000000000000..46a0a270c15dde8ea69de5e62c54a97cd56f389c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE @@ -0,0 +1 @@ +CONFIG_EXTRA_FIRMWARE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER new file mode 100644 index 0000000000000000000000000000000000000000..2c85d6ab76e317ccb85e690621bb723bed5cc6c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER @@ -0,0 +1 @@ +CONFIG_FAILOVER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS new file mode 100644 index 0000000000000000000000000000000000000000..197a02ec791ffed840e8de1cd6d5cd0f2eca4b00 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS @@ -0,0 +1 @@ +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE new file mode 100644 index 0000000000000000000000000000000000000000..280e26edb30883913ca5a8d285a51b5a4ca06bdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE @@ -0,0 +1 @@ +CONFIG_FAT_DEFAULT_CODEPAGE=437 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET new file mode 100644 index 0000000000000000000000000000000000000000..f4e49e2a3dd4ee7b0a05ae8d99d891985070b70d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET @@ -0,0 +1 @@ +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 new file mode 100644 index 0000000000000000000000000000000000000000..ee85a55f00dd3061c6fdd8ede4e3f20577cf80e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 @@ -0,0 +1 @@ +# CONFIG_FAT_DEFAULT_UTF8 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..288866e72b304e4598d6fb7bf472bd6083045a3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION @@ -0,0 +1 @@ +# CONFIG_FAULT_INJECTION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI new file mode 100644 index 0000000000000000000000000000000000000000..62adf192c62a17188166620e17d34daa7ce3ce74 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI @@ -0,0 +1 @@ +CONFIG_FB_EFI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 new file mode 100644 index 0000000000000000000000000000000000000000..d34e5b98ecaf4b8dc1e2045530cdfe7745f7d2d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 @@ -0,0 +1 @@ +# CONFIG_FB_LS2K500 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING new file mode 100644 index 0000000000000000000000000000000000000000..f3b6635cff9790a1ef3dbe3d0c8edc0e53a6fda6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING @@ -0,0 +1 @@ +CONFIG_FB_TILEBLITTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM new file mode 100644 index 0000000000000000000000000000000000000000..10b715239bab66d1290d33d6a43e1a03537e8b65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM @@ -0,0 +1 @@ +CONFIG_FIX_EARLYCON_MEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K b/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K new file mode 100644 index 0000000000000000000000000000000000000000..c9b11d9bd88401070c2656acc5059af9c742200d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K @@ -0,0 +1 @@ +CONFIG_FM10K=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE new file mode 100644 index 0000000000000000000000000000000000000000..926b56799e78c13acc4a2dfc6dc83060eb4893e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE @@ -0,0 +1 @@ +CONFIG_FORTIFY_SOURCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY new file mode 100644 index 0000000000000000000000000000000000000000..3153802cdb9e9ff5ce754a9f71f228e288cf1d09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION new file mode 100644 index 0000000000000000000000000000000000000000..3887f86bbd231469068eeaa0a8f70888b8164b66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN new file mode 100644 index 0000000000000000000000000000000000000000..6826578df976180635c337adc14a3700f79fd8c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN @@ -0,0 +1 @@ +CONFIG_FRAME_WARN=2048 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..4da10ce3554947496d8faf8b813fe17f87ef63fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_FSCACHE_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS new file mode 100644 index 0000000000000000000000000000000000000000..a65dcfa31c0a430e7367070c9b8dbfebcc468916 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS @@ -0,0 +1 @@ +CONFIG_FSCACHE_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION new file mode 100644 index 0000000000000000000000000000000000000000..613dd9d01aef2da84cdb9b49284955b939821a65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION @@ -0,0 +1 @@ +# CONFIG_FTRACE_RECORD_RECURSION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX new file mode 100644 index 0000000000000000000000000000000000000000..cc453d1c5bc7f4ad404863aae1eb3ca4576b29da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX @@ -0,0 +1 @@ +CONFIG_FUSE_DAX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION new file mode 100644 index 0000000000000000000000000000000000000000..6c920d6b1e7f2de98d2a0d9c512c1f31edc3a638 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION @@ -0,0 +1 @@ +CONFIG_FUSION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC new file mode 100644 index 0000000000000000000000000000000000000000..ce3b17f6cdcc57528ea45d2e0310bcb64776d738 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC @@ -0,0 +1 @@ +# CONFIG_FUSION_FC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING new file mode 100644 index 0000000000000000000000000000000000000000..0c2d45eff71332fd599572bea8935c685945d87c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING @@ -0,0 +1 @@ +CONFIG_FUSION_LOGGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE new file mode 100644 index 0000000000000000000000000000000000000000..7e3440002f1c4e11a75e757914b4a98dca58164d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE @@ -0,0 +1 @@ +CONFIG_FUSION_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS new file mode 100644 index 0000000000000000000000000000000000000000..fa9006f153d716f7ff90e84ced7d07d65ffa3c7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS @@ -0,0 +1 @@ +CONFIG_FUSION_SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI new file mode 100644 index 0000000000000000000000000000000000000000..938c1127599c1620ef4ab19379416998598c0052 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI @@ -0,0 +1 @@ +CONFIG_FUSION_SPI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..374610244e84aeccc84f9babbbce5d27f1527b9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE @@ -0,0 +1 @@ +CONFIG_FW_CACHE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..5c41f98a0b275dd25e289fd352bcfa5c28de1c0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS @@ -0,0 +1 @@ +CONFIG_FW_CFG_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE new file mode 100644 index 0000000000000000000000000000000000000000..ffca1920fe6748f66c84453c120637d6d761e1d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE @@ -0,0 +1 @@ +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB b/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB new file mode 100644 index 0000000000000000000000000000000000000000..aaec40095e7f668e8888a4a0c6a151124115f5b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB @@ -0,0 +1 @@ +CONFIG_GACT_PROB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL new file mode 100644 index 0000000000000000000000000000000000000000..05f92777b7aad42fb84a86c1e8f67c4977f3c554 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL @@ -0,0 +1 @@ +# CONFIG_GCOV_KERNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS new file mode 100644 index 0000000000000000000000000000000000000000..72774e7079308d339f6261b1c94b95cce8d0a3b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS @@ -0,0 +1 @@ +# CONFIG_GDB_SCRIPTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..539bb6640b8225dc62ab7c21155aec1c31467fcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_GENERIC_IRQ_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..11c9853455ea5565cb63b3a1f74fbc5ded1ce259 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_INJECTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..99ffc502cc26cd6621718389e47ca137fa6c8932 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_PROBE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW new file mode 100644 index 0000000000000000000000000000000000000000..6de8d0b4de3f9670e67385a9ed0d7095c3bc9053 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_SHOW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY new file mode 100644 index 0000000000000000000000000000000000000000..40cd1a4f556ecb03f1bfe9a1c115a87e100b7937 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY @@ -0,0 +1 @@ +CONFIG_GENERIC_PHY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE new file mode 100644 index 0000000000000000000000000000000000000000..99344bcd9e12f56f5a0fd074044dafc6e7147fe7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE @@ -0,0 +1 @@ +CONFIG_GENEVE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION b/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION new file mode 100644 index 0000000000000000000000000000000000000000..b64cd9e90b37c773a3c8a200082dcd6bd2cf109c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION @@ -0,0 +1 @@ +CONFIG_GET_FREE_REGION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 new file mode 100644 index 0000000000000000000000000000000000000000..efc77941e54cf25d18c22735a85b7fdd34e0f4c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 @@ -0,0 +1 @@ +# CONFIG_GPIO_DS4520 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 new file mode 100644 index 0000000000000000000000000000000000000000..6a63066040c51642ac064e904cd5efd4d9bafe45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 @@ -0,0 +1 @@ +# CONFIG_GPIO_FXL6408 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..a1dfb641912fd1e03a6bdd07bc23b139cbe5a5db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC @@ -0,0 +1 @@ +CONFIG_GPIO_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH new file mode 100644 index 0000000000000000000000000000000000000000..4fcea037be64554ae4ec84626a958931612e147f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH @@ -0,0 +1 @@ +# CONFIG_GPIO_LATCH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM new file mode 100644 index 0000000000000000000000000000000000000000..177dc4b4d1d4556de76c79b96006417e9eee7315 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM @@ -0,0 +1 @@ +# CONFIG_GPIO_SIM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..5524fe6a6c1f59023c1178a1118503811e363cc4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO @@ -0,0 +1 @@ +# CONFIG_GPIO_VIRTIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX b/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX new file mode 100644 index 0000000000000000000000000000000000000000..5cd838999bc4d892f2f142fec5ef7016cfd3cfc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_GP_PCI1XXXX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..11cfcba0b0d28ac2d939551993ed816754e9e35b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_GUEST_PERF_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST new file mode 100644 index 0000000000000000000000000000000000000000..5df4896bb4470ef784b238308e04820218075e45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST @@ -0,0 +1 @@ +# CONFIG_GUP_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY new file mode 100644 index 0000000000000000000000000000000000000000..d2dcc857f2dc5b8f9381da4e096db2c233a5566f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY @@ -0,0 +1 @@ +CONFIG_HARDENED_USERCOPY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH new file mode 100644 index 0000000000000000000000000000000000000000..7ebdb924703e83bf53d1cf65d694cc6c3f2435ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE new file mode 100644 index 0000000000000000000000000000000000000000..2ce8faabc4cf8d22f2e1240b50fe5779c48d7bfb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..6f36a32d84ae3d13712676f7140d81a67343ce55 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL new file mode 100644 index 0000000000000000000000000000000000000000..5b30575118a3adfa6f17be587b09ccd3c811b689 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL @@ -0,0 +1 @@ +# CONFIG_HEADERS_INSTALL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS new file mode 100644 index 0000000000000000000000000000000000000000..563709ddefb644e2342315142bf59049c1656045 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS @@ -0,0 +1 @@ +CONFIG_HIBERNATE_CALLBACKS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION new file mode 100644 index 0000000000000000000000000000000000000000..8df6f5c694d2aa5e43e7149a310d51b1cd5a8efb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION @@ -0,0 +1 @@ +CONFIG_HIBERNATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV new file mode 100644 index 0000000000000000000000000000000000000000..c4627aa7e564a56f4035f2fb1b4fc4bb18ad472d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV @@ -0,0 +1 @@ +CONFIG_HIBERNATION_SNAPSHOT_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HID b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID new file mode 100644 index 0000000000000000000000000000000000000000..b82f510fc4be3cce2112992c6f3a51bcd801a12a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID @@ -0,0 +1 @@ +CONFIG_HID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..27e01b36f61b2fa8aba968c01ba7334c732bcbc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT @@ -0,0 +1 @@ +CONFIG_HID_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC new file mode 100644 index 0000000000000000000000000000000000000000..5a9004f4a1c4bb6a131a2ac108e85fb32a46a46b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC @@ -0,0 +1 @@ +CONFIG_HINIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS new file mode 100644 index 0000000000000000000000000000000000000000..93ab853e5a46d9c139fc0132a0b0862d05ed3b3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS @@ -0,0 +1 @@ +CONFIG_HIST_TRIGGERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..8086e293ad0a237129d2cbc5623d9ed93b621154 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_ACPI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..cfdda978444b075f647999f2c2c7e748a3211173 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER @@ -0,0 +1 @@ +CONFIG_HWLAT_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT new file mode 100644 index 0000000000000000000000000000000000000000..ae646aa80fd11cc6320ba732f9eac8bc3d054366 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT @@ -0,0 +1 @@ +CONFIG_HWPOISON_INJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM new file mode 100644 index 0000000000000000000000000000000000000000..d991b3c93ca4ff8c4aa5569fca0b4c187c95a9dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_TPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER new file mode 100644 index 0000000000000000000000000000000000000000..5a7a369f9fdb96bd02dbcaf536c0db7eaeaa8963 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER @@ -0,0 +1 @@ +# CONFIG_HYDCU_FIXUP_HEADER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV new file mode 100644 index 0000000000000000000000000000000000000000..586091822f18819dd79920a2a92f9d0b30a75a9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV @@ -0,0 +1 @@ +CONFIG_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_KEYBOARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_KEYBOARD new file mode 100644 index 0000000000000000000000000000000000000000..25dccab6b17377088d725ddf06a95a9c10dc8ab2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_KEYBOARD @@ -0,0 +1 @@ +CONFIG_HYPERV_KEYBOARD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_STORAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_STORAGE new file mode 100644 index 0000000000000000000000000000000000000000..6782fa59b524fa5838b4481fa6e6444445a2e8b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_STORAGE @@ -0,0 +1 @@ +CONFIG_HYPERV_STORAGE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV new file mode 100644 index 0000000000000000000000000000000000000000..d843de200b5aa6cd7add1de0781c40bab4f9a353 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV @@ -0,0 +1 @@ +CONFIG_I2C_CHARDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX new file mode 100644 index 0000000000000000000000000000000000000000..6982ed98a06fb922f5cfb82bd4160d0cd0e79e38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX @@ -0,0 +1 @@ +CONFIG_I2C_MUX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..1a756e77bd86a0a15aac88f4bbe8033032ace4aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS @@ -0,0 +1 @@ +CONFIG_I2C_SMBUS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT b/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT new file mode 100644 index 0000000000000000000000000000000000000000..e65f0870cc213a7ce54183de0c7e6d6558652b8a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT @@ -0,0 +1 @@ +CONFIG_I6300ESB_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB new file mode 100644 index 0000000000000000000000000000000000000000..738210cd4c13ccc21d019e4e6e3db90fdb42f4e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB @@ -0,0 +1 @@ +CONFIG_IFB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..d47de32e622513b135c9fb3a84f21fd6d70a6372 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON @@ -0,0 +1 @@ +CONFIG_IGB_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC new file mode 100644 index 0000000000000000000000000000000000000000..f744b9900faa5a04cb37275e3c740689427e1540 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC @@ -0,0 +1 @@ +CONFIG_IGC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG new file mode 100644 index 0000000000000000000000000000000000000000..3d80bd86a19673e37250f96ebe626c384c7f9daf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG @@ -0,0 +1 @@ +CONFIG_IKCONFIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC new file mode 100644 index 0000000000000000000000000000000000000000..7d45f8bfb8572d12ea6b550e41e27a19659bea33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC @@ -0,0 +1 @@ +CONFIG_IKCONFIG_PROC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS new file mode 100644 index 0000000000000000000000000000000000000000..e214495e0279a3b351fe06265a1564e806ef9313 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS @@ -0,0 +1 @@ +# CONFIG_IKHEADERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE new file mode 100644 index 0000000000000000000000000000000000000000..5fa2f045f2be35abb9bc3684c5c0b3dfb6b69bcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE @@ -0,0 +1 @@ +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE new file mode 100644 index 0000000000000000000000000000000000000000..da04fd67d6a60616f8a6ae7e838d0fa7b2827418 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM new file mode 100644 index 0000000000000000000000000000000000000000..000a58fb65a3b3f0481b0f3cfb6df3885d5b1907 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE_BOOTPARAM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..b89ec93a48ad97a90d31f3a53281ee406687eb64 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE_BUILD_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG new file mode 100644 index 0000000000000000000000000000000000000000..2718d45137c729ae1749cdad85ab83fd09f477e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_MODSIG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS new file mode 100644 index 0000000000000000000000000000000000000000..64c5f2bf79fd17bd3c71be93463ae66111fe6d7e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS new file mode 100644 index 0000000000000000000000000000000000000000..bf15301d4ca97b5d25afba1a3ddccd57f79a8632 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS new file mode 100644 index 0000000000000000000000000000000000000000..6596de713c571f88c68341f4c59aa5ff6717f440 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS new file mode 100644 index 0000000000000000000000000000000000000000..3abafe60ec9f91cf6266b8db0e0d11f01a389555 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT new file mode 100644 index 0000000000000000000000000000000000000000..2c92177c72d61d74d834cf1527f3f9e04ac30ea9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..7187ae0dce9d530a887654278b7849ee09bc4529 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY @@ -0,0 +1 @@ +# CONFIG_IMA_ARCH_POLICY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..7457767336f45546788011fe269d293425d43129 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING @@ -0,0 +1 @@ +CONFIG_IMA_BLACKLIST_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH new file mode 100644 index 0000000000000000000000000000000000000000..35a36af692eadbeda92e801894f06397832b5ec5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_HASH="sha256" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..b51889849965e396f5ff836d1fb6562d011009d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 new file mode 100644 index 0000000000000000000000000000000000000000..e627fd9e9a2fa967d013d6a47f839e25cbb04f62 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_HASH_SHA256=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 new file mode 100644 index 0000000000000000000000000000000000000000..63c78568591ff824bdd78e58542cd3464f83d10c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 new file mode 100644 index 0000000000000000000000000000000000000000..d00f8cdc20aa5e7ef74c57161bc1968ba53860c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE new file mode 100644 index 0000000000000000000000000000000000000000..0d38cb6c5fe048d5b18ae150feec63a7fb8f923f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY new file mode 100644 index 0000000000000000000000000000000000000000..08056234d1d9de333ecacfd8680ef043b4af8b78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY @@ -0,0 +1 @@ +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 new file mode 100644 index 0000000000000000000000000000000000000000..37c785db29b38b86cda62a61dea4c574b9bf0670 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 @@ -0,0 +1 @@ +CONFIG_IMA_LOAD_X509=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES new file mode 100644 index 0000000000000000000000000000000000000000..97d7dd6429a430859180b23d489a11554b8ef75c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES @@ -0,0 +1 @@ +CONFIG_IMA_LSM_RULES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..3682c2753d205d4f24893a1da82adfbb70c3f9da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS @@ -0,0 +1 @@ +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX new file mode 100644 index 0000000000000000000000000000000000000000..685377b3111d6356fc4b6e4fdac80d4c2c67802d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX @@ -0,0 +1 @@ +CONFIG_IMA_MEASURE_PCR_IDX=10 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE new file mode 100644 index 0000000000000000000000000000000000000000..970afd0203f062d9bef2158f3c3a1efbf472361e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE @@ -0,0 +1 @@ +# CONFIG_IMA_NG_TEMPLATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..eb31e05a08b79032e544d89245071fd0f9bedc84 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS @@ -0,0 +1 @@ +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..8f280d80334d6701507beb0ebc1b62a206a1027a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_READ_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT new file mode 100644 index 0000000000000000000000000000000000000000..3dbf4221f830c67bd2dd6c00df88b44ee65bc37c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT @@ -0,0 +1 @@ +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE new file mode 100644 index 0000000000000000000000000000000000000000..f1d95dcd96716a04d47a976c00cacc8ad057e24e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE @@ -0,0 +1 @@ +CONFIG_IMA_SIG_TEMPLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..e54ce85d7ff0ea98d83df5d696b69d134d07f82c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_WRITE_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH new file mode 100644 index 0000000000000000000000000000000000000000..2b2332402234c75e3a2631450565feae75c99aad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH @@ -0,0 +1 @@ +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH new file mode 100644 index 0000000000000000000000000000000000000000..9e4fbe002bb529a46ec1e68de6338c75f34918c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH @@ -0,0 +1 @@ +CONFIG_INET6_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP new file mode 100644 index 0000000000000000000000000000000000000000..e3a4a08d6174ff1dbaf2fe7cf2bda8b328dd7caf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP @@ -0,0 +1 @@ +CONFIG_INET6_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP new file mode 100644 index 0000000000000000000000000000000000000000..1b4ebe140e273ef2d6637ed935ac35b06ab90d2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP @@ -0,0 +1 @@ +# CONFIG_INET6_ESPINTCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD new file mode 100644 index 0000000000000000000000000000000000000000..24c35000494dd92f9b44823f88a2c0e5529bea6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD @@ -0,0 +1 @@ +CONFIG_INET6_ESP_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP new file mode 100644 index 0000000000000000000000000000000000000000..40e0dde512e634c6db5a1910aee5bde5c0a22614 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP @@ -0,0 +1 @@ +CONFIG_INET6_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..78cd37a0a3b77ae8aa483e06a18a847be2164c9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET6_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..0b2416447fc63057cf464475f8c9b1c4e3af1ba0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET6_XFRM_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH new file mode 100644 index 0000000000000000000000000000000000000000..89b662d112369283a6fcf73164d8db2281f7bffb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH @@ -0,0 +1 @@ +CONFIG_INET_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY new file mode 100644 index 0000000000000000000000000000000000000000..6b50163279b225bca77f174a31da10ae9e31ea75 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY @@ -0,0 +1 @@ +# CONFIG_INET_DIAG_DESTROY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP new file mode 100644 index 0000000000000000000000000000000000000000..36f69aa2a72a33c7a23a2ed96b20c1eb9caae886 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP @@ -0,0 +1 @@ +CONFIG_INET_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP new file mode 100644 index 0000000000000000000000000000000000000000..04ac14c89384a8cd4f790cd4b171b0063048e1a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP @@ -0,0 +1 @@ +# CONFIG_INET_ESPINTCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD new file mode 100644 index 0000000000000000000000000000000000000000..d5cca6d1daeb0c0a85c14c42a9e891ab81422f83 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD @@ -0,0 +1 @@ +CONFIG_INET_ESP_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP new file mode 100644 index 0000000000000000000000000000000000000000..27b6ba9d8fa919b205467df1d375759a6ed35c44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP @@ -0,0 +1 @@ +CONFIG_INET_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..b1932e98a8e82c47bfb473ecef52cc09a79b2fc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG @@ -0,0 +1 @@ +CONFIG_INET_RAW_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS new file mode 100644 index 0000000000000000000000000000000000000000..b68f9bbd9837c44abf635cc79fa46239dec26fac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ADDR_TRANS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA new file mode 100644 index 0000000000000000000000000000000000000000..a83695df816c66c12d6e526ac1b456243a08fc51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ERDMA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB new file mode 100644 index 0000000000000000000000000000000000000000..8548857aa9bbc5e3bb1af147c106504bcf207ccf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM new file mode 100644 index 0000000000000000000000000000000000000000..e4086829975d319d129b23abafbf395bee3c3084 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB_CM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..d86370d98b24b0137a304fe965066b1c87b73263 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER new file mode 100644 index 0000000000000000000000000000000000000000..c963877f5e7c77e576d994e74d2a2344c36150f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ISER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT new file mode 100644 index 0000000000000000000000000000000000000000..62b0f926c6ec6b50fa5e5aa19abec221105860c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ISERT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING new file mode 100644 index 0000000000000000000000000000000000000000..ea5a3d8d8b6fd8fdfa060bd3335f93acf842297e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT new file mode 100644 index 0000000000000000000000000000000000000000..4d8db5cda5a843df78e213478b3a0026e110f9e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_RTRS_CLIENT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER new file mode 100644 index 0000000000000000000000000000000000000000..dfdaaad5e995f7418dbf28faf99222c21ec310d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_RTRS_SERVER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP new file mode 100644 index 0000000000000000000000000000000000000000..b382bf65665920c146992b23980ac9448f0a486d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP @@ -0,0 +1 @@ +CONFIG_INFINIBAND_SRP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT new file mode 100644 index 0000000000000000000000000000000000000000..ff4989b54945eed4a6fb6edbdb79a985869c8062 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_SRPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS new file mode 100644 index 0000000000000000000000000000000000000000..1b11b6f4aefc175606ae171c0475e9dc11a25347 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_ACCESS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD new file mode 100644 index 0000000000000000000000000000000000000000..36e7eb29d29ff2051866d8b00435027f50b1bae8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_MAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC new file mode 100644 index 0000000000000000000000000000000000000000..734ca6c9dfe0942cb749ff8ed8cd823aca34a51e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_XSC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE new file mode 100644 index 0000000000000000000000000000000000000000..becad4d7ed34f3e15538ca54447d26c5e00538af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE @@ -0,0 +1 @@ +CONFIG_INITRAMFS_SOURCE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..67917dc22f18075c36e7bce9809c145fa46325f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..5fd7392f7dbecb9419cb4636ed961a2494542291 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER new file mode 100644 index 0000000000000000000000000000000000000000..eb2e0c9a8145f48f91ebd774f09c26024baf285a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER @@ -0,0 +1 @@ +CONFIG_INOTIFY_USER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV new file mode 100644 index 0000000000000000000000000000000000000000..b738491e804639f9a98f4bab688d4c33fdd42243 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV @@ -0,0 +1 @@ +CONFIG_INPUT_EVDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_MOUSEDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_MOUSEDEV new file mode 100644 index 0000000000000000000000000000000000000000..53cd13a609a7d9ff1aa81f917d704f4cc555a68b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_MOUSEDEV @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..a1485b903d08fe3f2709ca70b2eb9a8b7317821a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS @@ -0,0 +1 @@ +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT new file mode 100644 index 0000000000000000000000000000000000000000..09d5db2b6a8c6a3e144270acea084145b0af1f80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT @@ -0,0 +1 @@ +CONFIG_INTEGRITY_AUDIT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..a7b1b167b4796842c9acde8927896755950aef46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING @@ -0,0 +1 @@ +CONFIG_INTEGRITY_PLATFORM_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE new file mode 100644 index 0000000000000000000000000000000000000000..2d104809dd91619c4969ab969222a0445bd5eafd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE @@ -0,0 +1 @@ +CONFIG_INTEGRITY_SIGNATURE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..cfb23d479a99cffe4da477b2b6cf840e7c92a378 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_INTEGRITY_TRUSTED_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API new file mode 100644 index 0000000000000000000000000000000000000000..0d7838e89dc515dead39f138adb84db6443e62d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API @@ -0,0 +1 @@ +CONFIG_IOMMU_API=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..ac8aa1de6e72f68bc75d3ebf1976897a53ff8b16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY new file mode 100644 index 0000000000000000000000000000000000000000..8d9990cfabf46b00ada4abf6a8d6babc2cb117ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA new file mode 100644 index 0000000000000000000000000000000000000000..a9155fba14ea15709a3eae9d2412990eec923b81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA @@ -0,0 +1 @@ +CONFIG_IOMMU_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA new file mode 100644 index 0000000000000000000000000000000000000000..70bdc8af504fc29084e7af13d785c95ba0ae84c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA @@ -0,0 +1 @@ +CONFIG_IOMMU_IOVA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE new file mode 100644 index 0000000000000000000000000000000000000000..bef737908c5b694ba4651ce31fd97e8d8ea70d21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE @@ -0,0 +1 @@ +CONFIG_IOMMU_IO_PGTABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM new file mode 100644 index 0000000000000000000000000000000000000000..f95505f7cc9cdc6d4ae7d92598873905b4077314 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM @@ -0,0 +1 @@ +# CONFIG_IO_STRICT_DEVMEM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..40c33ab1f627d97215ff4793c5231d40d50ed8fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER @@ -0,0 +1 @@ +CONFIG_IP6_NF_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES new file mode 100644 index 0000000000000000000000000000000000000000..6505ac4fe894d11914516e8d18f3018a050222e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES @@ -0,0 +1 @@ +CONFIG_IP6_NF_IPTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE new file mode 100644 index 0000000000000000000000000000000000000000..20221d52326eccc2545a38a5c06d00fe4397054e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE @@ -0,0 +1 @@ +CONFIG_IP6_NF_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH new file mode 100644 index 0000000000000000000000000000000000000000..c3a8d4bbfd13942b96a618e6a3b9b0d6db8c3532 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 new file mode 100644 index 0000000000000000000000000000000000000000..caa47b591cbf87f8a693566eedf5a273a8e4063b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_EUI64=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG new file mode 100644 index 0000000000000000000000000000000000000000..e288cb5386dc4ec6d7a4a5f047445829705f0e92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_FRAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL new file mode 100644 index 0000000000000000000000000000000000000000..179ce8d4763f314212407154a8214410c29e599d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER new file mode 100644 index 0000000000000000000000000000000000000000..e5c78739634e3d71d9db2f411d847bf62e3fb2b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_IPV6HEADER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH new file mode 100644 index 0000000000000000000000000000000000000000..81b6caa1f7ea62b05eb5cc4b42f5b371d82ba4bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_MH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS new file mode 100644 index 0000000000000000000000000000000000000000..ff6c9a495effc35ea013dfd155ae72ff12379db1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_OPTS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER new file mode 100644 index 0000000000000000000000000000000000000000..7b0204783acd33148b1a30fc362f2ef9809f5154 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_RPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT new file mode 100644 index 0000000000000000000000000000000000000000..4f7509d4fcce0f3dc58cd2dddf9e4f9236bae281 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_RT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH new file mode 100644 index 0000000000000000000000000000000000000000..5f5f25c4d9c25e72a30501be20d8b6d3b69d8683 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH @@ -0,0 +1 @@ +# CONFIG_IP6_NF_MATCH_SRH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT new file mode 100644 index 0000000000000000000000000000000000000000..9796b0b059a8115c79b63ca4b6dd0143edabe05f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT @@ -0,0 +1 @@ +CONFIG_IP6_NF_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW new file mode 100644 index 0000000000000000000000000000000000000000..d84b4a62d5d8de0f9be324b4f52062805e93253d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW @@ -0,0 +1 @@ +CONFIG_IP6_NF_RAW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..01c6144f893cc9f865ee870dff8b5cd03e89ae48 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY @@ -0,0 +1 @@ +CONFIG_IP6_NF_SECURITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE new file mode 100644 index 0000000000000000000000000000000000000000..4b63a260a877b609e7d86bc3496192820476cf33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT new file mode 100644 index 0000000000000000000000000000000000000000..e9a67d4fe963ce84d3f2352c8af007a6efbead30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_NPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT new file mode 100644 index 0000000000000000000000000000000000000000..2de07d601e9a08d1e5200b4b3269b506ee46bd86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY new file mode 100644 index 0000000000000000000000000000000000000000..6a3245b1f85c6e0d6840c6bd7db3db21ddb4a3b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..3d0a94d76b3a052c6b5b012fb065caf1624e96b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE @@ -0,0 +1 @@ +CONFIG_IPMI_DEVICE_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE new file mode 100644 index 0000000000000000000000000000000000000000..7444b769c2fa980890c5a4b1759bb9ad51e20376 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE @@ -0,0 +1 @@ +CONFIG_IPMI_DMI_DECODE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT new file mode 100644 index 0000000000000000000000000000000000000000..c95560a24c1f6469a9cd3bfd2b6ea4c23a5e7aea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT @@ -0,0 +1 @@ +CONFIG_IPMI_PANIC_EVENT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING new file mode 100644 index 0000000000000000000000000000000000000000..c560a1299414fe1975874d06f8a127b388c2b43a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING @@ -0,0 +1 @@ +CONFIG_IPMI_PANIC_STRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA new file mode 100644 index 0000000000000000000000000000000000000000..ae2c67ead77de43a5630cd21214f63f4cb589ffa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA @@ -0,0 +1 @@ +CONFIG_IPMI_PLAT_DATA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF new file mode 100644 index 0000000000000000000000000000000000000000..e37543efcac95b805507d087eb3e96c1ff8578ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF @@ -0,0 +1 @@ +CONFIG_IPMI_POWEROFF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI new file mode 100644 index 0000000000000000000000000000000000000000..ba6bb31db42cb934587b38d93e7fc796db4c9e8a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI @@ -0,0 +1 @@ +CONFIG_IPMI_SI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF new file mode 100644 index 0000000000000000000000000000000000000000..d563156fbda65cba3836918e5becdbea22966dc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF @@ -0,0 +1 @@ +CONFIG_IPMI_SSIF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..4feb657b8ec228ca3543ebb88189ea11dee19f44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_IPMI_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE new file mode 100644 index 0000000000000000000000000000000000000000..d182bc16193e113b108f7c68d31c762120ef904c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE @@ -0,0 +1 @@ +CONFIG_IPV6_GRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA new file mode 100644 index 0000000000000000000000000000000000000000..c9fa4cf9d9a7b596e0268eefad7dd10af5ab9ae7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA @@ -0,0 +1 @@ +# CONFIG_IPV6_ILA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 new file mode 100644 index 0000000000000000000000000000000000000000..c3c95b5570d9a9da407b188cb4a09a9784d726d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 @@ -0,0 +1 @@ +CONFIG_IPV6_MIP6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE new file mode 100644 index 0000000000000000000000000000000000000000..5dcf23403f69dbb9520d051165e6c02b4a2b0eb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE @@ -0,0 +1 @@ +CONFIG_IPV6_MROUTE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES new file mode 100644 index 0000000000000000000000000000000000000000..0e6ac689574c0f35d50f41186dd9d84e043d3ead --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES new file mode 100644 index 0000000000000000000000000000000000000000..18c565e1b99e934312045bc613dd679a23a92eab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IPV6_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE new file mode 100644 index 0000000000000000000000000000000000000000..46c0fcea2165cea8e333cc6086388647a46bfdaf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE @@ -0,0 +1 @@ +CONFIG_IPV6_NDISC_NODETYPE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD new file mode 100644 index 0000000000000000000000000000000000000000..a60dd2fc2b92e2e4935359d24004821fe0fc3c19 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD @@ -0,0 +1 @@ +CONFIG_IPV6_OPTIMISTIC_DAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 new file mode 100644 index 0000000000000000000000000000000000000000..f0b06614627dc7e6c20b341943a8ea6010c7733e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 @@ -0,0 +1 @@ +CONFIG_IPV6_PIMSM_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF new file mode 100644 index 0000000000000000000000000000000000000000..8f5958c8f1f2a27b42cb1d03c39199e61837de82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF @@ -0,0 +1 @@ +CONFIG_IPV6_ROUTER_PREF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO new file mode 100644 index 0000000000000000000000000000000000000000..842b4d775f9536b75de9df01b69056241fb43fad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO @@ -0,0 +1 @@ +CONFIG_IPV6_ROUTE_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..695e2a10a10d7e72d8fc05abad5c39d8a86d72a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_RPL_LWTUNNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC new file mode 100644 index 0000000000000000000000000000000000000000..b98fbf937d1dab8eba2438a95202ed395abfc8c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC @@ -0,0 +1 @@ +# CONFIG_IPV6_SEG6_HMAC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..cedc498cb1b8a7f3f9a7f8554c2a1342bc9f5b92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_SEG6_LWTUNNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT new file mode 100644 index 0000000000000000000000000000000000000000..af75f60dd325e3fa0f093bc3ab971e5d201e930b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT @@ -0,0 +1 @@ +CONFIG_IPV6_SIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD new file mode 100644 index 0000000000000000000000000000000000000000..ab337b29b09e9cf911b0fb40276ee07dcdbf4136 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD @@ -0,0 +1 @@ +CONFIG_IPV6_SIT_6RD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES new file mode 100644 index 0000000000000000000000000000000000000000..e79ed983094028a36a8f460a279946354190abab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES @@ -0,0 +1 @@ +CONFIG_IPV6_SUBTREES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..2665c69ef3b3eec256b15360c6afd01bb38e258c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL @@ -0,0 +1 @@ +CONFIG_IPV6_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI new file mode 100644 index 0000000000000000000000000000000000000000..cb7d072de381ac7c5cde368ecaba830a86e7ea0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI @@ -0,0 +1 @@ +CONFIG_IPV6_VTI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN new file mode 100644 index 0000000000000000000000000000000000000000..7926522454e5f3b9b3bc81a54f0be0d40929709d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN @@ -0,0 +1 @@ +CONFIG_IPVLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP new file mode 100644 index 0000000000000000000000000000000000000000..4bb3856734cd9bdec630d97025504967baf665fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP @@ -0,0 +1 @@ +CONFIG_IPVTAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER new file mode 100644 index 0000000000000000000000000000000000000000..d8f8f5b4ee781c650c4f53d6eca69fead33cfa02 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER @@ -0,0 +1 @@ +CONFIG_IP_ADVANCED_ROUTER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS new file mode 100644 index 0000000000000000000000000000000000000000..c6bc9c42c1d671fd666ab1c67d789faffdf2cb66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS @@ -0,0 +1 @@ +CONFIG_IP_FIB_TRIE_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE new file mode 100644 index 0000000000000000000000000000000000000000..56fa1578de217a63a798e5fd333d4abd0b7fedc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE @@ -0,0 +1 @@ +CONFIG_IP_MROUTE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES new file mode 100644 index 0000000000000000000000000000000000000000..450a74bd67883c0864389c6d69cb92a05898e437 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST new file mode 100644 index 0000000000000000000000000000000000000000..15dbdffe14f9e5dcb57c7654d37fd5c67cefa06b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST @@ -0,0 +1 @@ +CONFIG_IP_MULTICAST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES new file mode 100644 index 0000000000000000000000000000000000000000..b68058a302d73ec0ea3c12c9cfdbd5c8d8b1acea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IP_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER new file mode 100644 index 0000000000000000000000000000000000000000..06b00645c72a651015b3398e5ccb7b8bbaf63646 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_ARPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE new file mode 100644 index 0000000000000000000000000000000000000000..2af015359f7f50e5a9f596d6ce2dd08b290564a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE @@ -0,0 +1 @@ +CONFIG_IP_NF_ARP_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..24e5d7ee297a52b0e3dcf15683d6ca4518b204c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES new file mode 100644 index 0000000000000000000000000000000000000000..5238d9e0b5636691abc5dcb98b7bb7dbe706125b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES @@ -0,0 +1 @@ +CONFIG_IP_NF_IPTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE new file mode 100644 index 0000000000000000000000000000000000000000..f0082088892a8b3821af8f866a96f4d0f72ef1ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE @@ -0,0 +1 @@ +CONFIG_IP_NF_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH new file mode 100644 index 0000000000000000000000000000000000000000..e9277f2bb7a3bad4ca323587561a0ed06c2d2e61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN new file mode 100644 index 0000000000000000000000000000000000000000..15cb5557877c3e39a7103b480c4bbff74dd4231d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER new file mode 100644 index 0000000000000000000000000000000000000000..cce5cf1e41ded367e36ed135db176cf9903dc4cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_RPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL new file mode 100644 index 0000000000000000000000000000000000000000..82e731606e7df6499167a0505ed4929e92930afa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_TTL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT new file mode 100644 index 0000000000000000000000000000000000000000..d2aa272407dc16051dcf50fbe0399c6f522050c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT @@ -0,0 +1 @@ +CONFIG_IP_NF_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN new file mode 100644 index 0000000000000000000000000000000000000000..3f81954ca20bd179ee68ef35dab6546d8e78b2a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE new file mode 100644 index 0000000000000000000000000000000000000000..a72c7fad593db63ce662364f3e5924b309de4dde --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP new file mode 100644 index 0000000000000000000000000000000000000000..7b44a7a7617b51383c73a33d07df7802037abf20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_NETMAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT new file mode 100644 index 0000000000000000000000000000000000000000..0f42816a902342559116d003a008658785b4dafc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT new file mode 100644 index 0000000000000000000000000000000000000000..7db5e7783a78b280115b9408cfea5dbe147fc795 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY new file mode 100644 index 0000000000000000000000000000000000000000..8fc66aff47a4f145b4e2b232e39706cb6ab0c22e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL new file mode 100644 index 0000000000000000000000000000000000000000..f45533e024c3682a1127fafeb64521a243077d59 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_TTL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 new file mode 100644 index 0000000000000000000000000000000000000000..8d52cd2eb204631d444ffbaf622c8b1b1c63810f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 @@ -0,0 +1 @@ +CONFIG_IP_PIMSM_V1=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 new file mode 100644 index 0000000000000000000000000000000000000000..a475102ea8d68baedfc0546d8213093eaee767dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 @@ -0,0 +1 @@ +CONFIG_IP_PIMSM_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH new file mode 100644 index 0000000000000000000000000000000000000000..9daaed3b6824e0e4f7f2b6154a98fe7035516501 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_MULTIPATH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE new file mode 100644 index 0000000000000000000000000000000000000000..f616d8ad1608da4d2ad4cdef98191bffc3426cea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_VERBOSE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP new file mode 100644 index 0000000000000000000000000000000000000000..194e715a0606f969934a2c6660b1c6c07aef1a71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC new file mode 100644 index 0000000000000000000000000000000000000000..26361aab6480609fde5d0a2cdb8a532373435aa5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_IPMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT new file mode 100644 index 0000000000000000000000000000000000000000..b251fecbf6ff89f57bbbd88da8d09f44a6a89bd5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_PORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP new file mode 100644 index 0000000000000000000000000000000000000000..ed55391a02480f7ccfa5ee1645a80b60bad4baa3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC new file mode 100644 index 0000000000000000000000000000000000000000..5e50cb0d7724d9c35beb1bc826a4b3d0cba958c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK new file mode 100644 index 0000000000000000000000000000000000000000..f93dde91427d9b96f23f0a8e18158a8008972b30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT new file mode 100644 index 0000000000000000000000000000000000000000..8f2368eb11d0c053f5cfefc5c51d48672360e7ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP new file mode 100644 index 0000000000000000000000000000000000000000..ce51dedf7db228c8f1273811f42a14afc8db9fb6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORTIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET new file mode 100644 index 0000000000000000000000000000000000000000..755daa4ab43939d6604d9584120b9b51abe88afb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORTNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC new file mode 100644 index 0000000000000000000000000000000000000000..f0f2f1eab422d4f8840e7605d6fc36c1d393ea1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_MAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET new file mode 100644 index 0000000000000000000000000000000000000000..2f1258e6a1bf37f6ca27d46256c758054ead4fab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE new file mode 100644 index 0000000000000000000000000000000000000000..3f37e0275537bcd842caf79ce29c4d7c5a1cce1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETIFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET new file mode 100644 index 0000000000000000000000000000000000000000..c644763e4ff565089034e6e8414ddcc8c814d167 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT new file mode 100644 index 0000000000000000000000000000000000000000..ad2168816a794621ffa0fc975f646c2b67ad49ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET new file mode 100644 index 0000000000000000000000000000000000000000..416de30b8d396f934e136df03f109a7d7325016c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETPORTNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET new file mode 100644 index 0000000000000000000000000000000000000000..71cafb46d3b08546755d12c58f195a708d78649c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET @@ -0,0 +1 @@ +CONFIG_IP_SET_LIST_SET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX new file mode 100644 index 0000000000000000000000000000000000000000..e1ae2701a2a01f7e8e7089f6e3941877235ae7e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX @@ -0,0 +1 @@ +CONFIG_IP_SET_MAX=256 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..19dd826277df0753b0c63fc4ae24101e0bf1e2fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_VS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH new file mode 100644 index 0000000000000000000000000000000000000000..0740f4f2f9fa434b148439966356b3b4c843e667 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH @@ -0,0 +1 @@ +CONFIG_IP_VS_DH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO new file mode 100644 index 0000000000000000000000000000000000000000..335f1003d574cc9489c43e04172b877bf2d2d53a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO @@ -0,0 +1 @@ +CONFIG_IP_VS_FO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP new file mode 100644 index 0000000000000000000000000000000000000000..9bea42ef42301f714cc71b4afa7e28875fc83b15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP @@ -0,0 +1 @@ +CONFIG_IP_VS_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC new file mode 100644 index 0000000000000000000000000000000000000000..e2e9dda1110873bce67f73ab2a5edb5263376f4c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC @@ -0,0 +1 @@ +CONFIG_IP_VS_LBLC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR new file mode 100644 index 0000000000000000000000000000000000000000..4e8c2296024643b916d0ababae0935819b114e6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR @@ -0,0 +1 @@ +CONFIG_IP_VS_LBLCR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC new file mode 100644 index 0000000000000000000000000000000000000000..bde490d673e168b885c77aeecc520fc92a266529 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC @@ -0,0 +1 @@ +CONFIG_IP_VS_LC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH new file mode 100644 index 0000000000000000000000000000000000000000..1d07e9a6114bec3c49d3ed97497739d94792949e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH @@ -0,0 +1 @@ +CONFIG_IP_VS_MH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX new file mode 100644 index 0000000000000000000000000000000000000000..5e864011428285cbefab82dc8c539012158b68d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX @@ -0,0 +1 @@ +CONFIG_IP_VS_MH_TAB_INDEX=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT new file mode 100644 index 0000000000000000000000000000000000000000..96260705f201c9890bf51f8362dd4e6d6c0c1672 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT @@ -0,0 +1 @@ +CONFIG_IP_VS_NFCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ new file mode 100644 index 0000000000000000000000000000000000000000..9ac21c3a57d40fe3e6e18d174eb5be604970122d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ @@ -0,0 +1 @@ +CONFIG_IP_VS_NQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF new file mode 100644 index 0000000000000000000000000000000000000000..1adeebe14ddea2cea03f70df4d46c48030d5def1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF @@ -0,0 +1 @@ +CONFIG_IP_VS_OVF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP new file mode 100644 index 0000000000000000000000000000000000000000..29b770f0ffd808cf854cb407d00de316718a0869 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP @@ -0,0 +1 @@ +CONFIG_IP_VS_PE_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR new file mode 100644 index 0000000000000000000000000000000000000000..0f146f62e7821bc9c629cb1eab3788bbc9da3c89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR @@ -0,0 +1 @@ +CONFIG_IP_VS_RR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED new file mode 100644 index 0000000000000000000000000000000000000000..9689e6f0c8941e36b19f96990f3a9e53cb539c4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED @@ -0,0 +1 @@ +CONFIG_IP_VS_SED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH new file mode 100644 index 0000000000000000000000000000000000000000..17b4fd13ebbdb4800b83b3c58a62c95376ad5dc8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH @@ -0,0 +1 @@ +CONFIG_IP_VS_SH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS new file mode 100644 index 0000000000000000000000000000000000000000..0a0f326b82c9da19d42b4f2c9342f93de3265e6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS @@ -0,0 +1 @@ +CONFIG_IP_VS_SH_TAB_BITS=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS new file mode 100644 index 0000000000000000000000000000000000000000..b41927ca0bc355b7ac7d5b3fec1e6f0d175a80dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS @@ -0,0 +1 @@ +CONFIG_IP_VS_TAB_BITS=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC new file mode 100644 index 0000000000000000000000000000000000000000..79fb718ad18231a423252418a7d78179057e5168 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC @@ -0,0 +1 @@ +CONFIG_IP_VS_WLC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR new file mode 100644 index 0000000000000000000000000000000000000000..3dabf2db5991175b97387e52eb82d411132722dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR @@ -0,0 +1 @@ +CONFIG_IP_VS_WRR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..1b9880a01972a130d7bcf95e7abeb5ebeb817a33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS @@ -0,0 +1 @@ +CONFIG_ISCSI_BOOT_SYSFS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET new file mode 100644 index 0000000000000000000000000000000000000000..78b66a50858e888d650885cdd72529470560ad4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET @@ -0,0 +1 @@ +CONFIG_ISCSI_TARGET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP new file mode 100644 index 0000000000000000000000000000000000000000..fefe32a01b122b2ddc3fbb67f767818b55aeb56f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP @@ -0,0 +1 @@ +CONFIG_ISCSI_TCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC new file mode 100644 index 0000000000000000000000000000000000000000..38828328ee613df4bb932ed893d82bede2629c1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC @@ -0,0 +1 @@ +CONFIG_IXGBEVF_IPSEC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB new file mode 100644 index 0000000000000000000000000000000000000000..aacb4453ac5ee018dd5fb8900e1f8cd2c238a5b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB @@ -0,0 +1 @@ +CONFIG_IXGBE_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..27194b4ae5239054ec39cecee0382959073424b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON @@ -0,0 +1 @@ +CONFIG_IXGBE_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC new file mode 100644 index 0000000000000000000000000000000000000000..17c87410fac030aba471c22973bed3784e338b5f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC @@ -0,0 +1 @@ +CONFIG_IXGBE_IPSEC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..bce5ddaf272d0d6fd598339ebe88a2f05f1172b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG @@ -0,0 +1 @@ +# CONFIG_JBD2_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET b/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET new file mode 100644 index 0000000000000000000000000000000000000000..4a9f8fd47c6d7d5fa34eef9f25b5f357df70a02d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET @@ -0,0 +1 @@ +CONFIG_JOLIET=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN new file mode 100644 index 0000000000000000000000000000000000000000..31767b1fe4e281f1cd45084b60bcea5425ff4373 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN @@ -0,0 +1 @@ +# CONFIG_KASAN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV new file mode 100644 index 0000000000000000000000000000000000000000..736eb752a23f6b096043082629678622f2a61ea3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV @@ -0,0 +1 @@ +# CONFIG_KCOV is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC new file mode 100644 index 0000000000000000000000000000000000000000..47466928f766198838fc3420966a6b2acdc5b618 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC @@ -0,0 +1 @@ +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_DEFAULT_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..aeef61ee9433f80c34a97c049d6debe1f2a3675d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_DEFAULT_ENABLE @@ -0,0 +1 @@ +CONFIG_KDB_DEFAULT_ENABLE=0x0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD new file mode 100644 index 0000000000000000000000000000000000000000..afe601d90f0656cfebe65de19b4831a38f805c5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD @@ -0,0 +1 @@ +CONFIG_KDB_KEYBOARD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG new file mode 100644 index 0000000000000000000000000000000000000000..67b68865886cd9ef174cb5a247ffd1ffebf409e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYBOARD_ATKBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYBOARD_ATKBD new file mode 100644 index 0000000000000000000000000000000000000000..54a1bd12bfd8132e6b52e53dc85c23e8d59ab648 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYBOARD_ATKBD @@ -0,0 +1 @@ +CONFIG_KEYBOARD_ATKBD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..957ee122d27594aa478d2075a4c9cc6519be2018 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS @@ -0,0 +1 @@ +CONFIG_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..7d5a6bb6127c22408689e67bacc6fb1fadd77400 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE @@ -0,0 +1 @@ +# CONFIG_KEYS_REQUEST_CACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS new file mode 100644 index 0000000000000000000000000000000000000000..87d8b646df85f6f4bc681bc91528c47dc319b71f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS @@ -0,0 +1 @@ +# CONFIG_KEY_DH_OPERATIONS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE new file mode 100644 index 0000000000000000000000000000000000000000..6f6a6a279aea0a063cc84cf18aa140d41576e39e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE @@ -0,0 +1 @@ +CONFIG_KFENCE_DEFERRABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS new file mode 100644 index 0000000000000000000000000000000000000000..d3f46787a4dd39c9e1227c7c93db5dd8cd98233a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS @@ -0,0 +1 @@ +CONFIG_KFENCE_NUM_OBJECTS=255 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL new file mode 100644 index 0000000000000000000000000000000000000000..d5ab530e803a783e50d602f671c17e2737716192 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL @@ -0,0 +1 @@ +CONFIG_KFENCE_SAMPLE_INTERVAL=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS new file mode 100644 index 0000000000000000000000000000000000000000..6cdbcfa610e324a136651edd199f3c0b3d9ee06f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS @@ -0,0 +1 @@ +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST new file mode 100644 index 0000000000000000000000000000000000000000..7da79f6618838c94040b1f7f54cbbc875766b961 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST @@ -0,0 +1 @@ +CONFIG_KGDB_HONOUR_BLOCKLIST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB new file mode 100644 index 0000000000000000000000000000000000000000..3bb4d48c4395552d3a98bad08352c76c2626c57a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB @@ -0,0 +1 @@ +CONFIG_KGDB_KDB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS new file mode 100644 index 0000000000000000000000000000000000000000..2defea72ef13d96d96e05eb25657edb89de61630 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS @@ -0,0 +1 @@ +CONFIG_KGDB_TESTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE new file mode 100644 index 0000000000000000000000000000000000000000..d36844fdb504486d15cf95817b7bb45aaef887b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE @@ -0,0 +1 @@ +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM b/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM new file mode 100644 index 0000000000000000000000000000000000000000..757efcb905c27db62b8a609936966bc71d382381 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM @@ -0,0 +1 @@ +CONFIG_KSM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD new file mode 100644 index 0000000000000000000000000000000000000000..4f92dceabf3aff1d922ea4abb07106ac9eb1bfc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD @@ -0,0 +1 @@ +CONFIG_LDISC_AUTOLOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC new file mode 100644 index 0000000000000000000000000000000000000000..af692f8d7b564ab02fd526d94fb84bec5ccf3830 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC @@ -0,0 +1 @@ +CONFIG_LIBFC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE new file mode 100644 index 0000000000000000000000000000000000000000..8c693951bbad20a95f434a473db666502708960a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE @@ -0,0 +1 @@ +CONFIG_LIBFCOE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED new file mode 100644 index 0000000000000000000000000000000000000000..99cee0814645120a45258a03e0d87c20c67e510b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED @@ -0,0 +1 @@ +CONFIG_LIST_HARDENED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..22502e98174875563db776913fa2b9287f483e13 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS @@ -0,0 +1 @@ +CONFIG_LOAD_UEFI_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION new file mode 100644 index 0000000000000000000000000000000000000000..22833e328af2bf3ec925d4517e09976516ea7a98 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION @@ -0,0 +1 @@ +CONFIG_LOCALVERSION="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..3addafb9eabe6542b50f05d9c89eab2c4fc6a837 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO @@ -0,0 +1 @@ +# CONFIG_LOCALVERSION_AUTO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..b2f2705c18ca7c576736c21cf7973c143aa200e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT @@ -0,0 +1 @@ +CONFIG_LOCKDEP_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS new file mode 100644 index 0000000000000000000000000000000000000000..8fb759417d49bbf461fdbc4c6cefde4a02cd2c12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS @@ -0,0 +1 @@ +# CONFIG_LOCK_EVENT_COUNTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..a3c44ae926ec46bc3191cd0ec5d5d40c835bfae0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET new file mode 100644 index 0000000000000000000000000000000000000000..8d0415cbf0db29317a74ea83c1103d28fb5ca085 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET @@ -0,0 +1 @@ +CONFIG_LOOPBACK_TARGET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED new file mode 100644 index 0000000000000000000000000000000000000000..d32c5afd6e3964c793defab8955592cca12628dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED @@ -0,0 +1 @@ +# CONFIG_LRU_GEN_ENABLED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS new file mode 100644 index 0000000000000000000000000000000000000000..44b5a476d4cb971fdec17efbd22cba251f81abf1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS @@ -0,0 +1 @@ +# CONFIG_LRU_GEN_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM new file mode 100644 index 0000000000000000000000000000000000000000..e9af105b6c2432df437597a8cdd8ea513497dc9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM @@ -0,0 +1 @@ +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR new file mode 100644 index 0000000000000000000000000000000000000000..8a24c1f03fe6d73f1cea720f5906bbb61ebcd622 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR @@ -0,0 +1 @@ +CONFIG_LSM_MMAP_MIN_ADDR=65535 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE new file mode 100644 index 0000000000000000000000000000000000000000..8e9a8bbdac2caf0529090e09348e6bfccf1b3f95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE @@ -0,0 +1 @@ +CONFIG_LTO_NONE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..1bd2e1c28eab55bc80110adf66aa122ec556ad5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL @@ -0,0 +1 @@ +CONFIG_LWTUNNEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF new file mode 100644 index 0000000000000000000000000000000000000000..ecd9c79a2f9f1f40f63de02055ca1d7c3ef4cd56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF @@ -0,0 +1 @@ +CONFIG_LWTUNNEL_BPF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC new file mode 100644 index 0000000000000000000000000000000000000000..678c117d07fe3e154df2be1cd0078a1fd8f190fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC @@ -0,0 +1 @@ +CONFIG_MACSEC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN new file mode 100644 index 0000000000000000000000000000000000000000..76ead66855f9217fec7bfd2809e8b052c72ed6e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN @@ -0,0 +1 @@ +CONFIG_MACVLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP new file mode 100644 index 0000000000000000000000000000000000000000..ee7d8996d470d0f806837c91a119fd8caa94fd09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP @@ -0,0 +1 @@ +CONFIG_MACVTAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..6d80e2cddcae40f840dc4c14622cadf682f40126 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..649399011f5fbda1755dd5a289493f28007aeae8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_SERIAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE new file mode 100644 index 0000000000000000000000000000000000000000..3b2b7e8deee7fa58f5a68fdc8fd8fa94724d629c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS new file mode 100644 index 0000000000000000000000000000000000000000..10682c4542d534ea3570e902aa203dcf80af9755 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS @@ -0,0 +1 @@ +CONFIG_MAX_SKB_FRAGS=17 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT new file mode 100644 index 0000000000000000000000000000000000000000..29191f7e1ed226d031e3da0339feb8711a3c0b31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT @@ -0,0 +1 @@ +CONFIG_MD_AUTODETECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER new file mode 100644 index 0000000000000000000000000000000000000000..1750ff8ca8370de296f1c3a2f9ca340a54812695 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER @@ -0,0 +1 @@ +CONFIG_MD_CLUSTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY new file mode 100644 index 0000000000000000000000000000000000000000..d332a7499c80a7b104ac551d477869ebc2187bcc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY @@ -0,0 +1 @@ +CONFIG_MD_FAULTY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR new file mode 100644 index 0000000000000000000000000000000000000000..7ea8e58fb564b6ef8bedc780e5620311ff507611 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR @@ -0,0 +1 @@ +CONFIG_MD_LINEAR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH new file mode 100644 index 0000000000000000000000000000000000000000..c95cb9cb3e6f0a58b17bc42cbb9605fe36d90747 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH @@ -0,0 +1 @@ +# CONFIG_MD_MULTIPATH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 new file mode 100644 index 0000000000000000000000000000000000000000..bc197d6db237254c0ae1a52cc716e7f1aba1e44c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 @@ -0,0 +1 @@ +CONFIG_MD_RAID0=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 new file mode 100644 index 0000000000000000000000000000000000000000..b54611532c51b6b9ac6c86f98edd1c06e9a0eb36 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 @@ -0,0 +1 @@ +CONFIG_MD_RAID1=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 new file mode 100644 index 0000000000000000000000000000000000000000..fbfc466709ac84f3247fc0ce95b6637100e70bbf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 @@ -0,0 +1 @@ +CONFIG_MD_RAID10=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 new file mode 100644 index 0000000000000000000000000000000000000000..f5f9b93e1c86ea9e13688fb1ef440a627a2c4da7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 @@ -0,0 +1 @@ +CONFIG_MD_RAID456=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..3109de7c1fe0bc7f525ad37e60ba4e6eaad3031a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY @@ -0,0 +1 @@ +# CONFIG_MEGARAID_LEGACY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN new file mode 100644 index 0000000000000000000000000000000000000000..a132e5f39ca8bde28011b9419615ed5fbf89bcfa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN @@ -0,0 +1 @@ +# CONFIG_MEGARAID_NEWGEN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS new file mode 100644 index 0000000000000000000000000000000000000000..c32b82a357a5be04c6931673440573c98862f13c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS @@ -0,0 +1 @@ +CONFIG_MEGARAID_SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..0e3aceb9597ab03024700e45ef6703175311d7fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY @@ -0,0 +1 @@ +# CONFIG_MEMORY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..d3b2adc6b9718bed499cfd8b7b87b5bc4fbba4ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON @@ -0,0 +1 @@ +CONFIG_MEMORY_BALLOON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE new file mode 100644 index 0000000000000000000000000000000000000000..362150e6923c20be612db487248fe913e1d5bbb7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTREMOVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI new file mode 100644 index 0000000000000000000000000000000000000000..ae1f861749f0c2a3711160ccc278987f1e782004 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI @@ -0,0 +1 @@ +CONFIG_MEMSLI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..ca39b7f7a35d978de754f077b695ec4306d169c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT @@ -0,0 +1 @@ +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE new file mode 100644 index 0000000000000000000000000000000000000000..1b4c55bdd30c6a834cd065fc90ea57ae41b332d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE @@ -0,0 +1 @@ +CONFIG_MLX4_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 new file mode 100644 index 0000000000000000000000000000000000000000..e3653e88b67b73c1ff7b0057592769ca29fa80a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 @@ -0,0 +1 @@ +# CONFIG_MLX4_CORE_GEN2 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..8c1ea389b39d905f142f28922d95888ee090c958 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG @@ -0,0 +1 @@ +CONFIG_MLX4_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB new file mode 100644 index 0000000000000000000000000000000000000000..33382c1ac42ae881cbe57d99bddd15553946eed9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB @@ -0,0 +1 @@ +CONFIG_MLX4_EN_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..4e7033e48f95a5c54cfab84fe2732a2c367eb763 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE @@ -0,0 +1 @@ +CONFIG_MLX5_BRIDGE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT new file mode 100644 index 0000000000000000000000000000000000000000..2ff38eef774792d0525a26e926ffcfea140e703c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT @@ -0,0 +1 @@ +CONFIG_MLX5_CLS_ACT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB new file mode 100644 index 0000000000000000000000000000000000000000..6607070b8d09952f24cde2e9ee9ce27f5f57274f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_EN_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS new file mode 100644 index 0000000000000000000000000000000000000000..0a220b0e7cc057ed85ecb0733b9bd7c5872c5ac0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS @@ -0,0 +1 @@ +CONFIG_MLX5_EN_ARFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC new file mode 100644 index 0000000000000000000000000000000000000000..0a64be4dbcf000e1ca003aa4c417fb8b26a21c09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC @@ -0,0 +1 @@ +CONFIG_MLX5_EN_RXNFC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH new file mode 100644 index 0000000000000000000000000000000000000000..8a69e0671e37bbb7421c4e095e2e799581169e16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH @@ -0,0 +1 @@ +CONFIG_MLX5_ESWITCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA new file mode 100644 index 0000000000000000000000000000000000000000..bac6b305da116d64bc9452b1ae95655a3138d694 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA @@ -0,0 +1 @@ +CONFIG_MLX5_FPGA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND new file mode 100644 index 0000000000000000000000000000000000000000..a81a552d1fb3d3e4d4198db9dd9c784fd6de85bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND @@ -0,0 +1 @@ +CONFIG_MLX5_INFINIBAND=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS new file mode 100644 index 0000000000000000000000000000000000000000..6799ed484a57f1326cf9cfc7667e77d9e68a4476 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS @@ -0,0 +1 @@ +CONFIG_MLX5_MPFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF new file mode 100644 index 0000000000000000000000000000000000000000..3375a7223f73b94dc18f1826684f91b59a9dfddc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF @@ -0,0 +1 @@ +# CONFIG_MLX5_SF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING new file mode 100644 index 0000000000000000000000000000000000000000..28a9bc46aae52d172b2c5b1203c0baf0fcbd9b66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING @@ -0,0 +1 @@ +CONFIG_MLX5_SW_STEERING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT new file mode 100644 index 0000000000000000000000000000000000000000..0651a412ecd76c29c8d300ff684dcf31c6e04c06 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT @@ -0,0 +1 @@ +CONFIG_MLX5_TC_CT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE new file mode 100644 index 0000000000000000000000000000000000000000..777a6403729c37d8a460e83c6ceed79517de317a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE @@ -0,0 +1 @@ +CONFIG_MLX5_TC_SAMPLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW new file mode 100644 index 0000000000000000000000000000000000000000..5b475180673b8439166ff0771223b17bb3e43479 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW @@ -0,0 +1 @@ +CONFIG_MLXFW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..a0a37fc5281fe8cc9bc77fab69a033c152172d85 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..b3b3b2fc50494c22a4ecd099744b91ee40df2598 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C new file mode 100644 index 0000000000000000000000000000000000000000..57927c23e4f2a60a4d8c9dfec6919c7353193c55 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C @@ -0,0 +1 @@ +CONFIG_MLXSW_I2C=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL new file mode 100644 index 0000000000000000000000000000000000000000..5cc296afcafb985f87c25acc37418bb1c3999e1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL @@ -0,0 +1 @@ +CONFIG_MLXSW_MINIMAL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI new file mode 100644 index 0000000000000000000000000000000000000000..f1cbb9173b78d2dbdf453d97dbc34e85202c0abe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI @@ -0,0 +1 @@ +CONFIG_MLXSW_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM new file mode 100644 index 0000000000000000000000000000000000000000..0bdac0c197079fae897d65633230af2a10469d1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM @@ -0,0 +1 @@ +CONFIG_MLXSW_SPECTRUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB new file mode 100644 index 0000000000000000000000000000000000000000..3c8c38d0bc0e739854af65dcf532708be4f802e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB @@ -0,0 +1 @@ +CONFIG_MLXSW_SPECTRUM_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS new file mode 100644 index 0000000000000000000000000000000000000000..35c9fe219b3a8a47fa39c98185153124f2d2546e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS @@ -0,0 +1 @@ +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD new file mode 100644 index 0000000000000000000000000000000000000000..8583b86bd57feaaad590d0f6a73e03512e7f1d1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD @@ -0,0 +1 @@ +CONFIG_MODULE_FORCE_LOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD new file mode 100644 index 0000000000000000000000000000000000000000..757f81d34bcdd0e0a66c89c4ceb0ea02b5823950 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD @@ -0,0 +1 @@ +# CONFIG_MODULE_FORCE_UNLOAD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL new file mode 100644 index 0000000000000000000000000000000000000000..4a502b6f02df3f52336793f6a6903a15bb524222 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_ALL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..80b1d0c4b4360cf4a6d22d878ec38dd2d84bcaa1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH new file mode 100644 index 0000000000000000000000000000000000000000..04ae06b2dafb12d57b6150b1c1ab59caa71092e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_HASH="sha256" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY new file mode 100644 index 0000000000000000000000000000000000000000..80339e3427c3ef8725a3a47877abef4bf102434c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..d96584a87e50f4817e1c4c1ca9703697d28ca08b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 new file mode 100644 index 0000000000000000000000000000000000000000..d49245d2291be013b949a0970d312bcb48f49074 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA224 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 new file mode 100644 index 0000000000000000000000000000000000000000..b350aa05ab58923400f9cce29d1431952058e104 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_SHA256=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 new file mode 100644 index 0000000000000000000000000000000000000000..ac52049ea819cee9f6f29e5840880f8f301ed05e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA384 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 new file mode 100644 index 0000000000000000000000000000000000000000..2910d833029c88a9d17f037e869c4d18683c529b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA512 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 new file mode 100644 index 0000000000000000000000000000000000000000..ed68613bee8bd233193c961479a706da84aeedcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS new file mode 100644 index 0000000000000000000000000000000000000000..a709ae7d93f7b9ba45f2244b9de1a4ea777a2df3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS @@ -0,0 +1 @@ +CONFIG_MPLS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..229af61b854fa6d06b566ace873a40d1271ee910 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL @@ -0,0 +1 @@ +CONFIG_MPLS_IPTUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING new file mode 100644 index 0000000000000000000000000000000000000000..50248380eb7b8eff00b0e09ea6577338bcb23fb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING @@ -0,0 +1 @@ +CONFIG_MPLS_ROUTING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..d0780145de593958669cc4e242e39c1386d8509e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 @@ -0,0 +1 @@ +CONFIG_MPTCP_IPV6=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS new file mode 100644 index 0000000000000000000000000000000000000000..841afeb52f736de1b97a87d57c740a570e6ccefb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS @@ -0,0 +1 @@ +CONFIG_MSDOS_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..37ec75b06d39fdbbbbd3f7c27e0e003b99c64ff6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE @@ -0,0 +1 @@ +CONFIG_NETCONSOLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..764af964b571e093a5aed282aa08465a112f7146 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC @@ -0,0 +1 @@ +CONFIG_NETCONSOLE_DYNAMIC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM new file mode 100644 index 0000000000000000000000000000000000000000..96004592ade407d275d5b5ea06956d47fe7915e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM @@ -0,0 +1 @@ +CONFIG_NETDEVSIM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT new file mode 100644 index 0000000000000000000000000000000000000000..354fd1be45bbfd2e6802d4c9f658a5bef1f7311d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT @@ -0,0 +1 @@ +CONFIG_NETFILTER_CONNCOUNT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS new file mode 100644 index 0000000000000000000000000000000000000000..df0daaae81dc792fb8b7b2959390555320938cae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_EGRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP new file mode 100644 index 0000000000000000000000000000000000000000..f778280ee529ad71fb0fe3d39d3f1a4bf4da0ae7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP @@ -0,0 +1 @@ +CONFIG_NETFILTER_FAMILY_ARP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..3767186695e4a08d477a6a3d32c3eb4b8f70eb96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE @@ -0,0 +1 @@ +CONFIG_NETFILTER_FAMILY_BRIDGE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..cb5690897c81ccaffa8884ee50e9c871c6c1d529 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT new file mode 100644 index 0000000000000000000000000000000000000000..2ad7ea565da2b79b02011d8af11b2584c7e30afb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_ACCT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT new file mode 100644 index 0000000000000000000000000000000000000000..e413357f9eb8961a9871fa3f584e56557c85691e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_GLUE_CT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG new file mode 100644 index 0000000000000000000000000000000000000000..3f8d3d07ccad5f24c05b65521be88ed17e1fa672 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF new file mode 100644 index 0000000000000000000000000000000000000000..aa7e3042f0af9cfde2b263567a0e70c84c854fc0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE new file mode 100644 index 0000000000000000000000000000000000000000..c31c5b848f02909cdbdb292c223f223750fea4b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_QUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY new file mode 100644 index 0000000000000000000000000000000000000000..6bf87167fa6a1012ebec1d8028abef196e2be095 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY @@ -0,0 +1 @@ +CONFIG_NETFILTER_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES new file mode 100644 index 0000000000000000000000000000000000000000..a97d971dcb5dc16e6b3ad932669ecc06906140de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES @@ -0,0 +1 @@ +CONFIG_NETFILTER_XTABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK new file mode 100644 index 0000000000000000000000000000000000000000..5a7391f6b92d5884b33a2331201caf4ba8cb5ca2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK new file mode 100644 index 0000000000000000000000000000000000000000..a65aee77e712de80eab71a503c429cbb6e61c79e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE new file mode 100644 index 0000000000000000000000000000000000000000..d0ebb666a7239dfeb002d3d257d51ebfb93282e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF new file mode 100644 index 0000000000000000000000000000000000000000..dd51452a876a987046426b6cc964502afb96163b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP new file mode 100644 index 0000000000000000000000000000000000000000..d8a9d8646820c395fb04c95ef5f59c43800cabe5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CGROUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER new file mode 100644 index 0000000000000000000000000000000000000000..020fa47f483dfb4bd10cb5fb21e64632d0904ea8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT new file mode 100644 index 0000000000000000000000000000000000000000..a3c59620f18cfd3c97d686708ecfdd13227bc9bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_COMMENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES new file mode 100644 index 0000000000000000000000000000000000000000..9b3a056b7b1f8453a22ea4ea68fbfcb784f02285 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL new file mode 100644 index 0000000000000000000000000000000000000000..29f3433131120076ab710f700efa5270630bb236 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT new file mode 100644 index 0000000000000000000000000000000000000000..ee136fa468f020a95becee18f7bec90b5a71a99a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK new file mode 100644 index 0000000000000000000000000000000000000000..8ad13363923b989c47302db7a2d5019931688081 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK new file mode 100644 index 0000000000000000000000000000000000000000..45db821ffa97ccaedd5338b93d495add782ca597 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU new file mode 100644 index 0000000000000000000000000000000000000000..556ebfac9a436e8300ab54f6acf5ee260b44e3ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP new file mode 100644 index 0000000000000000000000000000000000000000..8b4224d8ce432fa914ecacaac3694dc731b88ece --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DCCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP new file mode 100644 index 0000000000000000000000000000000000000000..2f50c144f73c19292e69e999c77e940bb0d83c68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP new file mode 100644 index 0000000000000000000000000000000000000000..d4f55cb4fe15cca8cbcbcaab3a314f046949ff53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DSCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN new file mode 100644 index 0000000000000000000000000000000000000000..3ac23e5f8573db8a965d5d31959403cb3f92aa66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP new file mode 100644 index 0000000000000000000000000000000000000000..16ce1ab2e5e387c6f6527be5f972bbc49878cda6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT new file mode 100644 index 0000000000000000000000000000000000000000..3112cfca2dc729ca07f99776a66590a43b1aa424 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..1c79d125ae2ef56b495e33162fe074409c0208bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HELPER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL new file mode 100644 index 0000000000000000000000000000000000000000..fe60ffc566b4ad575c8d1ed6e2ce0d63574715bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP new file mode 100644 index 0000000000000000000000000000000000000000..5df60f906719482f4b1eb78b9a9af66814a191cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE new file mode 100644 index 0000000000000000000000000000000000000000..42bf14df38f4b01b9b85f04f96f321df189a9609 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS new file mode 100644 index 0000000000000000000000000000000000000000..db117714a5cecc676e983bfccb6a82a128e3a72b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_IPVS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP new file mode 100644 index 0000000000000000000000000000000000000000..b1115825322e8735850d53a0e05e3396513b8593 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_L2TP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH new file mode 100644 index 0000000000000000000000000000000000000000..22f3920af106cf4075f325d737e4c638911cef6b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_LENGTH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..69d2d65dcb7c222771d15864db25ed9a2cfb1e4d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC new file mode 100644 index 0000000000000000000000000000000000000000..2175aa582900b2fbb891c5b481ec882012b4c4f8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK new file mode 100644 index 0000000000000000000000000000000000000000..6c20543fba3b332f9a20a965ab072b9486638b44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT new file mode 100644 index 0000000000000000000000000000000000000000..c3d9c38a6e1451cf36eceb2a7e85d8d6cce1af45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT new file mode 100644 index 0000000000000000000000000000000000000000..81ec63eb68a0f790b8c03a53d3d24a0529a3163e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_NFACCT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF new file mode 100644 index 0000000000000000000000000000000000000000..fc51bc9866ea0c147b16ea653de622ce65ba038c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER new file mode 100644 index 0000000000000000000000000000000000000000..34be704c15fe06e9f05e1bd7c441a6b86560de92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_OWNER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV new file mode 100644 index 0000000000000000000000000000000000000000..756371f03a642fb137cb94921380e3b5b6921f72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE new file mode 100644 index 0000000000000000000000000000000000000000..d1412b2ac2494c401aaaded2010e0ca7f0af841b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..05e378a49f70ad9282a4c49f635446d2efe52a3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_POLICY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA new file mode 100644 index 0000000000000000000000000000000000000000..5977d4d970c645067f9464b48d2d13308b732ebd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_QUOTA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST new file mode 100644 index 0000000000000000000000000000000000000000..a7bec3652661b5cd604ef999161399456cf037c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_RATEEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM new file mode 100644 index 0000000000000000000000000000000000000000..e7ce1f51ac37be7ee7d914e31dea6a4828244b0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_REALM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT new file mode 100644 index 0000000000000000000000000000000000000000..0f092d603487bb1d7647fb328f3e04cfd653fbe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_RECENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP new file mode 100644 index 0000000000000000000000000000000000000000..61a2485946b91e6cefa273ea6810cebd151e5d6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_SCTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET new file mode 100644 index 0000000000000000000000000000000000000000..ec4886c06a6e72b01b19fd7ef53191c9eea153cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_SOCKET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE new file mode 100644 index 0000000000000000000000000000000000000000..87f0e3fe7499387f254ff0cfde0d822bfa19ea60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STATE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC new file mode 100644 index 0000000000000000000000000000000000000000..79f06effbf8fc1cb0c7920cd0f9f476e71148ff0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING new file mode 100644 index 0000000000000000000000000000000000000000..b7bf31333ced467b0d25468fd968349c6e83aa91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STRING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS new file mode 100644 index 0000000000000000000000000000000000000000..abc5cdb59604a63607597604fd45198b54e9f990 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME new file mode 100644 index 0000000000000000000000000000000000000000..d4eb9fcb212c8bb3fd6b8766d659c1679ec187d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_TIME=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 new file mode 100644 index 0000000000000000000000000000000000000000..3033733e5a1c59fc8f8d3e8ab894ec43a1feca56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT new file mode 100644 index 0000000000000000000000000000000000000000..79773333441367c89244c2305413d578fd064314 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET new file mode 100644 index 0000000000000000000000000000000000000000..422aad468050e0cd8a03b56365bb2b62523e8efd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_SET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT new file mode 100644 index 0000000000000000000000000000000000000000..9b8ef0d6d69755ba2fb476f0dd1424249a1f8682 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_AUDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM new file mode 100644 index 0000000000000000000000000000000000000000..58afbff6d9ca74405211af542a70af9285498705 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY new file mode 100644 index 0000000000000000000000000000000000000000..595deedceaa9fb84cd768ed012f27619e1586bb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK new file mode 100644 index 0000000000000000000000000000000000000000..b64202c157f3fcb2bf658916fee164223a846a81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK new file mode 100644 index 0000000000000000000000000000000000000000..6cd3b71b7abe9cc3d594fa19ccc93262076d41b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT new file mode 100644 index 0000000000000000000000000000000000000000..e777799b0677903e244b257d83597c317ac409b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP new file mode 100644 index 0000000000000000000000000000000000000000..2b7b2177260ebded050947989e0362fe26a66b0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_DSCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL new file mode 100644 index 0000000000000000000000000000000000000000..72cf5b3e5cd7e4ff7106d6b0afa10aeb6aa2094d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK new file mode 100644 index 0000000000000000000000000000000000000000..e75e82b04a351e8203ac9444140fea31cecb9ae1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_HMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER new file mode 100644 index 0000000000000000000000000000000000000000..ecb2856c0e31073439c0a22e4b3e2c03de5dc008 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED new file mode 100644 index 0000000000000000000000000000000000000000..819ec668cc90aee103eb3e71a1758832f2546e68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_LED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG new file mode 100644 index 0000000000000000000000000000000000000000..a4e073d6f30deb3d932206184c217a9b650a648c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK new file mode 100644 index 0000000000000000000000000000000000000000..077ea52954814b3a95f167089dd3a21416aeda2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE new file mode 100644 index 0000000000000000000000000000000000000000..37960b6b1f457f81d9465dc7b6b9128c0356ded0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP new file mode 100644 index 0000000000000000000000000000000000000000..ca635aa2a1c2f593c398df65c62d7bceb054cbac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NETMAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG new file mode 100644 index 0000000000000000000000000000000000000000..c4c331a568fd5b4cf872a22f942442872a8e9d90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NFLOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE new file mode 100644 index 0000000000000000000000000000000000000000..9885488c8cf0d8c85218e05ee420373c6971713a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK new file mode 100644 index 0000000000000000000000000000000000000000..2c0abc17f2e38ba8fa0a54f4924d8f074e9e02dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST new file mode 100644 index 0000000000000000000000000000000000000000..140be4a026f6b7653e858cdeff4e2937a524f565 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_RATEEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT new file mode 100644 index 0000000000000000000000000000000000000000..e31cca97a78411cc53e18d64a7fb213b40b7707c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK new file mode 100644 index 0000000000000000000000000000000000000000..4d6dd7ff477487ddc78f445c54ade2ac6e186021 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_SECMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS new file mode 100644 index 0000000000000000000000000000000000000000..fa3ac174dfd35bf625f33a78ed123e1e22568099 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP new file mode 100644 index 0000000000000000000000000000000000000000..72c208d325d17d99eae767b5673c73da5ab45c60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE new file mode 100644 index 0000000000000000000000000000000000000000..1ed57ae5143dc6528b3c9ab4dfef215233164b27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TEE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY new file mode 100644 index 0000000000000000000000000000000000000000..89f3b7d2b0da49200c4adfffdde0a1df3c7bd179 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..dc5f43fb316b1bf31ff60a91cec67073686f24c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TRACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS new file mode 100644 index 0000000000000000000000000000000000000000..253ef3a36d069946a521c6a5688a4c086b85e7da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS @@ -0,0 +1 @@ +CONFIG_NETFS_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..0a7f76b994b0bbca9842e504e6b98f07679ab9e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT @@ -0,0 +1 @@ +CONFIG_NETFS_SUPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL new file mode 100644 index 0000000000000000000000000000000000000000..b6e2bb09024ddb0380c2c7b73c9da0a1d0362f0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL @@ -0,0 +1 @@ +CONFIG_NETLABEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING new file mode 100644 index 0000000000000000000000000000000000000000..3ef3fad28bb6106bdebc604829c8b708e3b87c2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING @@ -0,0 +1 @@ +CONFIG_NETWORK_PHY_TIMESTAMPING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK new file mode 100644 index 0000000000000000000000000000000000000000..8218703c0bd709adbd6fd2461a4c7eb95c60342f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK @@ -0,0 +1 @@ +CONFIG_NETWORK_SECMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF new file mode 100644 index 0000000000000000000000000000000000000000..b66cfa276d66dc046638510a8885943f1ea5424c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF @@ -0,0 +1 @@ +CONFIG_NET_ACT_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK new file mode 100644 index 0000000000000000000000000000000000000000..eb651d1910aa3158d2dfb9cee4805c26e85e35a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK @@ -0,0 +1 @@ +# CONFIG_NET_ACT_CONNMARK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM new file mode 100644 index 0000000000000000000000000000000000000000..4e94094060704f3ad3c327ae0f34296899c6abd1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM @@ -0,0 +1 @@ +CONFIG_NET_ACT_CSUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT new file mode 100644 index 0000000000000000000000000000000000000000..93f4c486eb29231f83276b442d797dd4f8b6a27d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT @@ -0,0 +1 @@ +CONFIG_NET_ACT_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO new file mode 100644 index 0000000000000000000000000000000000000000..0becc0cc7fe4ed1197acb7cedbe6ae2341d88160 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO @@ -0,0 +1 @@ +# CONFIG_NET_ACT_CTINFO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE new file mode 100644 index 0000000000000000000000000000000000000000..39e73e5da996921a1d4e14088511461758409d5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE @@ -0,0 +1 @@ +# CONFIG_NET_ACT_GATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE new file mode 100644 index 0000000000000000000000000000000000000000..a466bca7be094373edde7e4c05cdd50d01851728 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE @@ -0,0 +1 @@ +# CONFIG_NET_ACT_IFE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT new file mode 100644 index 0000000000000000000000000000000000000000..3b03e8356ba42caa7880ef319b4a1b4003281ee0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT @@ -0,0 +1 @@ +CONFIG_NET_ACT_IPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED new file mode 100644 index 0000000000000000000000000000000000000000..e16b33a223d730e6d3ef7274bfc795201b004d26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED @@ -0,0 +1 @@ +CONFIG_NET_ACT_MIRRED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS new file mode 100644 index 0000000000000000000000000000000000000000..2f486dbf828f5fdcdad13c611666d287d57e376c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS @@ -0,0 +1 @@ +# CONFIG_NET_ACT_MPLS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT new file mode 100644 index 0000000000000000000000000000000000000000..e6812fbdc9ae9c02456e12fc9be3f9ebb107efa8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT @@ -0,0 +1 @@ +CONFIG_NET_ACT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT new file mode 100644 index 0000000000000000000000000000000000000000..8ac1123aaa30407e261d4ae64cfae9643d92e315 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT @@ -0,0 +1 @@ +CONFIG_NET_ACT_PEDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE new file mode 100644 index 0000000000000000000000000000000000000000..4582be19d0a8a2e8dbc6ee880ddee6290e27ab7b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE @@ -0,0 +1 @@ +CONFIG_NET_ACT_SAMPLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP new file mode 100644 index 0000000000000000000000000000000000000000..7203e8d682d1a69be575d7c0fea04db323b32b8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP @@ -0,0 +1 @@ +CONFIG_NET_ACT_SIMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT new file mode 100644 index 0000000000000000000000000000000000000000..f3da96aa3dfa847aecde5fd10d0221cbdb96bd20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT @@ -0,0 +1 @@ +CONFIG_NET_ACT_SKBEDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD new file mode 100644 index 0000000000000000000000000000000000000000..90fa7304404beb5227bc8488ff9c8ea8561890aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD @@ -0,0 +1 @@ +CONFIG_NET_ACT_SKBMOD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY new file mode 100644 index 0000000000000000000000000000000000000000..a030419554a9df1f2047f8c1a39be0c46cbec87a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY @@ -0,0 +1 @@ +CONFIG_NET_ACT_TUNNEL_KEY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN new file mode 100644 index 0000000000000000000000000000000000000000..b47e5e427b0d0f179bfda6b4d99c5198d74fb8c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN @@ -0,0 +1 @@ +CONFIG_NET_ACT_VLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC new file mode 100644 index 0000000000000000000000000000000000000000..9f9628713237838024cb39772e3050eae032d5b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC @@ -0,0 +1 @@ +CONFIG_NET_CLS_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF new file mode 100644 index 0000000000000000000000000000000000000000..5645a27b56e7fe83d4f6409fb330ca708ef04b33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF @@ -0,0 +1 @@ +CONFIG_NET_CLS_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP new file mode 100644 index 0000000000000000000000000000000000000000..43802195c9dfbfdb7192052cc25f5db67b434796 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP @@ -0,0 +1 @@ +CONFIG_NET_CLS_CGROUP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW new file mode 100644 index 0000000000000000000000000000000000000000..0f3fee0898a6c0cceb0cb7358f35ecdec0173436 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW @@ -0,0 +1 @@ +CONFIG_NET_CLS_FLOW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER new file mode 100644 index 0000000000000000000000000000000000000000..d26b4ef821e4eabf74ee86851067523693fd25fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER @@ -0,0 +1 @@ +CONFIG_NET_CLS_FLOWER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW new file mode 100644 index 0000000000000000000000000000000000000000..fb88679bbe3f31cc21a851a6f20cc4349f439e80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW @@ -0,0 +1 @@ +CONFIG_NET_CLS_FW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL new file mode 100644 index 0000000000000000000000000000000000000000..a365ce8703026feaff8d64aba4c42b5facc1490f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL @@ -0,0 +1 @@ +CONFIG_NET_CLS_MATCHALL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 new file mode 100644 index 0000000000000000000000000000000000000000..1260f8c9a136c0ad725d02e88faf060626b8af0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 @@ -0,0 +1 @@ +CONFIG_NET_CLS_ROUTE4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 new file mode 100644 index 0000000000000000000000000000000000000000..ebbda24969adc50d7e4eab41d7d5013e45131cf1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 @@ -0,0 +1 @@ +CONFIG_NET_CLS_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR new file mode 100644 index 0000000000000000000000000000000000000000..9c5a23a5f1694cb0fb44e3ee12525fc215cb4fcb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR @@ -0,0 +1 @@ +CONFIG_NET_DROP_MONITOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH new file mode 100644 index 0000000000000000000000000000000000000000..f6171a72f06e0e14fa56ba54802f17cde4c22a79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH @@ -0,0 +1 @@ +CONFIG_NET_EMATCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP new file mode 100644 index 0000000000000000000000000000000000000000..086dbaf24fecd5fba379f9c223391b055c730d9a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_CMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET new file mode 100644 index 0000000000000000000000000000000000000000..2b0a70570acec6e5c5a7a1487c31ce4f300d16de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_IPSET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT new file mode 100644 index 0000000000000000000000000000000000000000..194717561a6666fcfee3188fbee71e7196b3f388 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT @@ -0,0 +1 @@ +# CONFIG_NET_EMATCH_IPT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META new file mode 100644 index 0000000000000000000000000000000000000000..0b395db767cd04c3bcdbfd695c771cd265bfe516 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_META=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE new file mode 100644 index 0000000000000000000000000000000000000000..4567128fc90ead37a25f4a6d55b52d1e2e9168a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_NBYTE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK new file mode 100644 index 0000000000000000000000000000000000000000..e37337192580dd55f8eeda681f805f3214422fdf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_STACK=32 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT new file mode 100644 index 0000000000000000000000000000000000000000..6fd4727362f08b2bdc4efcd391734d6a4d999b6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_TEXT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 new file mode 100644 index 0000000000000000000000000000000000000000..4bdebd3eea34596ca8e0dbc7cd7c5221ddde528b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC new file mode 100644 index 0000000000000000000000000000000000000000..e232d912f7fe2033844d68811a2baaea7e6a2de1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC @@ -0,0 +1 @@ +CONFIG_NET_FC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE new file mode 100644 index 0000000000000000000000000000000000000000..ad527835c120067d9da563c1977d54a2e9193443 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE @@ -0,0 +1 @@ +CONFIG_NET_IPGRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST new file mode 100644 index 0000000000000000000000000000000000000000..dfb259c45d6b9530783eaca24eaa2dcd0a95e7ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST @@ -0,0 +1 @@ +CONFIG_NET_IPGRE_BROADCAST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX new file mode 100644 index 0000000000000000000000000000000000000000..787db526330e7263043364c4639f64d7535c918d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX @@ -0,0 +1 @@ +CONFIG_NET_IPGRE_DEMUX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP new file mode 100644 index 0000000000000000000000000000000000000000..3f1247bfc9d45bdab33de6712c38616f73910bbf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP @@ -0,0 +1 @@ +CONFIG_NET_IPIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE new file mode 100644 index 0000000000000000000000000000000000000000..2168c1a29a9d3d9664379bb97e85f98554973026 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE @@ -0,0 +1 @@ +CONFIG_NET_KEY_MIGRATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV new file mode 100644 index 0000000000000000000000000000000000000000..bdf39009ea657464d18d3b47fc80e02b998a0fb7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV @@ -0,0 +1 @@ +CONFIG_NET_L3_MASTER_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO new file mode 100644 index 0000000000000000000000000000000000000000..a1aedf04c34888cb4b8b489f3e7121ad8b9dfdae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO @@ -0,0 +1 @@ +CONFIG_NET_MPLS_GSO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH new file mode 100644 index 0000000000000000000000000000000000000000..67f7ca8f68ee2247289a7328ef898bd0b6ef61ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH @@ -0,0 +1 @@ +CONFIG_NET_NSH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN new file mode 100644 index 0000000000000000000000000000000000000000..b12bf03e93339e54afeb3dcda6b758098eb3d749 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN @@ -0,0 +1 @@ +CONFIG_NET_PKTGEN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY new file mode 100644 index 0000000000000000000000000000000000000000..3b024145c3d27e001f5d25456bd3e5a3dd9e2c23 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY @@ -0,0 +1 @@ +CONFIG_NET_PTP_CLASSIFY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE new file mode 100644 index 0000000000000000000000000000000000000000..1413c498e3a7342b95198c56561811a6504b3927 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE @@ -0,0 +1 @@ +# CONFIG_NET_SCH_CAKE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS new file mode 100644 index 0000000000000000000000000000000000000000..96a7f28e56d293809a7ef3e57c22173b64d50aca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS @@ -0,0 +1 @@ +# CONFIG_NET_SCH_CBS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE new file mode 100644 index 0000000000000000000000000000000000000000..6ab0baf140f7479639c2dd5e42452316184a0963 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE @@ -0,0 +1 @@ +CONFIG_NET_SCH_CHOKE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL new file mode 100644 index 0000000000000000000000000000000000000000..817865e082b35d4944737c492205fa9e669269b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL @@ -0,0 +1 @@ +CONFIG_NET_SCH_CODEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..268f6d95793643ef9c059262de31729aca27c1d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT @@ -0,0 +1 @@ +CONFIG_NET_SCH_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR new file mode 100644 index 0000000000000000000000000000000000000000..4577d16029435545878ece7e6c79600382d91a42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR @@ -0,0 +1 @@ +CONFIG_NET_SCH_DRR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF new file mode 100644 index 0000000000000000000000000000000000000000..81be382999ac7b43dc9251fe6c66eb43a7164a0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF @@ -0,0 +1 @@ +# CONFIG_NET_SCH_ETF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS new file mode 100644 index 0000000000000000000000000000000000000000..95ea61e437bee556f65658a9e6626b8838b123a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS @@ -0,0 +1 @@ +# CONFIG_NET_SCH_ETS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ new file mode 100644 index 0000000000000000000000000000000000000000..a8feeea079b7b785c5e89bf072db2a36c65f6069 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_FQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE new file mode 100644 index 0000000000000000000000000000000000000000..71241b274a8c3f760da59c1ad607f424de73ee0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE @@ -0,0 +1 @@ +# CONFIG_NET_SCH_FQ_PIE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED new file mode 100644 index 0000000000000000000000000000000000000000..6c050d77049ae9013b3499c3999cb97c2896c86d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED @@ -0,0 +1 @@ +CONFIG_NET_SCH_GRED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC new file mode 100644 index 0000000000000000000000000000000000000000..6c5361abd1548404cf044fca7336cc1666180315 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC @@ -0,0 +1 @@ +CONFIG_NET_SCH_HFSC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF new file mode 100644 index 0000000000000000000000000000000000000000..784312a46be44f47014949fd0e5b7895d542fc24 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF @@ -0,0 +1 @@ +CONFIG_NET_SCH_HHF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB new file mode 100644 index 0000000000000000000000000000000000000000..046d4d9fb05b4ad1b6de9036a8eeb20fe7272053 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB @@ -0,0 +1 @@ +CONFIG_NET_SCH_HTB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO new file mode 100644 index 0000000000000000000000000000000000000000..4acab89150fab5f65c38aaba67798707b92be234 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO @@ -0,0 +1 @@ +CONFIG_NET_SCH_MQPRIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ new file mode 100644 index 0000000000000000000000000000000000000000..118b13e09ed823cb665578ce04884f8be69a1ddd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_MULTIQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM new file mode 100644 index 0000000000000000000000000000000000000000..37799e653ed1ef8f9f11f4f35411e51bcc7d7216 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM @@ -0,0 +1 @@ +CONFIG_NET_SCH_NETEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE new file mode 100644 index 0000000000000000000000000000000000000000..dc9f48fe535cb6e431c1ee629030825011582068 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE @@ -0,0 +1 @@ +CONFIG_NET_SCH_PIE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG new file mode 100644 index 0000000000000000000000000000000000000000..b27bf6ac099f9109bba4f26a017ec573cf600bc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG @@ -0,0 +1 @@ +CONFIG_NET_SCH_PLUG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO new file mode 100644 index 0000000000000000000000000000000000000000..d7c3ad33d97d47d3a3499a47a98c4f53a032b70d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO @@ -0,0 +1 @@ +CONFIG_NET_SCH_PRIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ new file mode 100644 index 0000000000000000000000000000000000000000..a4000beb94cf499266e41f17cd480617b6c4aaaf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_QFQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED new file mode 100644 index 0000000000000000000000000000000000000000..c08d04d940401f09329baebb40e08d10dac3ea25 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED @@ -0,0 +1 @@ +CONFIG_NET_SCH_RED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB new file mode 100644 index 0000000000000000000000000000000000000000..72985ce86c6be1bffc39a94afa749063c15462e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB @@ -0,0 +1 @@ +CONFIG_NET_SCH_SFB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ new file mode 100644 index 0000000000000000000000000000000000000000..66b3ebbae60035e7248e1d295831b2c52c4cbb76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_SFQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO new file mode 100644 index 0000000000000000000000000000000000000000..9805c0149c6aa4db4c67c52d0a6c11f3a20edd71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO @@ -0,0 +1 @@ +# CONFIG_NET_SCH_SKBPRIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO new file mode 100644 index 0000000000000000000000000000000000000000..fd5e3d773e09c1cf0bc0aee792113a6e0ef9f088 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO @@ -0,0 +1 @@ +# CONFIG_NET_SCH_TAPRIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF new file mode 100644 index 0000000000000000000000000000000000000000..b45dd0213a2b78acf7220727998649e314ea204e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF @@ -0,0 +1 @@ +CONFIG_NET_SCH_TBF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL new file mode 100644 index 0000000000000000000000000000000000000000..2c7a73b04e79722e575e8bc2a8d6eaab950d7064 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL @@ -0,0 +1 @@ +CONFIG_NET_SCH_TEQL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT new file mode 100644 index 0000000000000000000000000000000000000000..3290f992f5ac8a5ff591350e3ec7e47dec2e2c4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT @@ -0,0 +1 @@ +CONFIG_NET_TC_SKB_EXT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM new file mode 100644 index 0000000000000000000000000000000000000000..1577c67e3e728f4781e6815ba2567d5f2a38bc7b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM @@ -0,0 +1 @@ +CONFIG_NET_TEAM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP new file mode 100644 index 0000000000000000000000000000000000000000..bcfc4d09f970f38c33802852b28e128bccef8d10 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST new file mode 100644 index 0000000000000000000000000000000000000000..ec786b43112ddd5192d0437a46d02f740fe7efef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_BROADCAST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE new file mode 100644 index 0000000000000000000000000000000000000000..018ec31ed1e24ea6a3721565c0e4ab77e30bdd4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_LOADBALANCE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM new file mode 100644 index 0000000000000000000000000000000000000000..f4ed7452ea7282c2299a7c37a42fcf26e8f6aa81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_RANDOM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN new file mode 100644 index 0000000000000000000000000000000000000000..c7d4ad7aec2d1e6163b593126a16627e4ce235f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI new file mode 100644 index 0000000000000000000000000000000000000000..b3af680f0a0ef8eea9bccaf0f0e4fa6309839308 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_HUAWEI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE new file mode 100644 index 0000000000000000000000000000000000000000..8b1f0b6bfe6180a722f0651879142d5790da8340 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_SOLARFLARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON new file mode 100644 index 0000000000000000000000000000000000000000..f6aca2a290f7ffebb4f6c127a967acca60ded17d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_YUNSILICON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF new file mode 100644 index 0000000000000000000000000000000000000000..8c9e84febcaddd5269274d1f0ea590f26ab052d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF @@ -0,0 +1 @@ +CONFIG_NET_VRF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS new file mode 100644 index 0000000000000000000000000000000000000000..e0441ff5017ac91c3b018d541fbda0041b199592 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS @@ -0,0 +1 @@ +CONFIG_NET_XGRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..fff1bd54155efaad1db575e48d8adbbaabb72b36 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG @@ -0,0 +1 @@ +# CONFIG_NFIT_SECURITY_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..60150858f87ad75e064216712d58ae4a47033bca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT @@ -0,0 +1 @@ +# CONFIG_NFSD_BLOCKLAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..fd0c215381f298200dcb4279a267a41de14e7696 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT @@ -0,0 +1 @@ +# CONFIG_NFSD_FLEXFILELAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS new file mode 100644 index 0000000000000000000000000000000000000000..d934bd18ce5724567aa675cc855be9e8d6c7d565 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS @@ -0,0 +1 @@ +CONFIG_NFSD_PNFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..1d94de7162797a1385f9e3974c92668b51385b49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT @@ -0,0 +1 @@ +CONFIG_NFSD_SCSILAYOUT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL new file mode 100644 index 0000000000000000000000000000000000000000..e48b468c77548ba413d6e726b1c851a6b3dd82b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL @@ -0,0 +1 @@ +CONFIG_NFSD_V2_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL new file mode 100644 index 0000000000000000000000000000000000000000..451933884fb0faea14cf0a587df1be5c4c8ffc0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL @@ -0,0 +1 @@ +CONFIG_NFSD_V3_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC new file mode 100644 index 0000000000000000000000000000000000000000..6a5dff80979ff307b06a2a78febab1e2c3e2c06f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC @@ -0,0 +1 @@ +# CONFIG_NFSD_V4_2_INTER_SSC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..16f3ee380e6681c04041c808e7ce016c869b5886 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL @@ -0,0 +1 @@ +CONFIG_NFSD_V4_SECURITY_LABEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..62f13210429b38aec96f5905358688c07d12b5b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT @@ -0,0 +1 @@ +CONFIG_NFS_ACL_SUPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..9537c47e325d8d44c25bbed0aed137e999cc0882 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT @@ -0,0 +1 @@ +CONFIG_NFS_DISABLE_UDP_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..6618a3d86be9530f948acea9c1d01e545c934d04 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP @@ -0,0 +1 @@ +# CONFIG_NFS_SWAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS new file mode 100644 index 0000000000000000000000000000000000000000..6919929412c8eeec9aeafee6357ea646032db608 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS @@ -0,0 +1 @@ +# CONFIG_NFS_USE_LEGACY_DNS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL new file mode 100644 index 0000000000000000000000000000000000000000..d4c6a260e7479fd96e23a080a16c53d310dd3977 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL @@ -0,0 +1 @@ +CONFIG_NFS_V3_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..c64757144a865d195f7954d318c0cdea644d840b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION @@ -0,0 +1 @@ +# CONFIG_NFS_V4_1_MIGRATION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS new file mode 100644 index 0000000000000000000000000000000000000000..4d62001f1f91fbc1d44e6c03ebeab35b1a9661f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS @@ -0,0 +1 @@ +# CONFIG_NFS_V4_2_READ_PLUS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META new file mode 100644 index 0000000000000000000000000000000000000000..3e5759b849e4d67e423384fcb55459cd4284e415 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META @@ -0,0 +1 @@ +# CONFIG_NFT_BRIDGE_META is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT new file mode 100644 index 0000000000000000000000000000000000000000..1ec341fc3e4bc32c5cb3e6a42611410fe37842ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT @@ -0,0 +1 @@ +CONFIG_NFT_BRIDGE_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..3b5eab9dfda298863d6781017ea604324a55489b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT @@ -0,0 +1 @@ +CONFIG_NFT_COMPAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT new file mode 100644 index 0000000000000000000000000000000000000000..dfa4af5dbfec3925800c5cb013df6ecd22d85cd4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT @@ -0,0 +1 @@ +CONFIG_NFT_CONNLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT new file mode 100644 index 0000000000000000000000000000000000000000..33c2f017d44d8d61ba76b63094fd41f208c737be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT @@ -0,0 +1 @@ +CONFIG_NFT_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..fb9ab921d4342b1d020569496bfa441a716f17b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_DUP_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..391a7f256e50c35b6583fe993d72b21e001ab082 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_DUP_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..4cab8e0306743c0d8a2932c22dfd12e90ae1a54c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_DUP_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB new file mode 100644 index 0000000000000000000000000000000000000000..82fa162bbfb1982ce729ba68f5929d58ca5b8398 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB @@ -0,0 +1 @@ +CONFIG_NFT_FIB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET new file mode 100644 index 0000000000000000000000000000000000000000..fe7a8f35b4b5faf2b390c2d299be04dc3a52ec98 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET @@ -0,0 +1 @@ +CONFIG_NFT_FIB_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..210fb695a4cafbf64fa492dcdf9c34b9d958b380 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_FIB_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..26b95dea82e75ab0f9aade3d1906ad36823062d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_FIB_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..273bfeb6a70605c618d5325f497a514049e55dbe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_FIB_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD new file mode 100644 index 0000000000000000000000000000000000000000..2960ce593de2ad5bf50daec65663949638bb467c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD @@ -0,0 +1 @@ +CONFIG_NFT_FLOW_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..2f10f39e2b64a0f09443a5f0107948c42bdc3fdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_FWD_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH new file mode 100644 index 0000000000000000000000000000000000000000..581bd38e130f022b5fc828714bbfca0eb3f2a740 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH @@ -0,0 +1 @@ +CONFIG_NFT_HASH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..ab64be55f2666f67cb045d9f43d76042d39a0d26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT @@ -0,0 +1 @@ +CONFIG_NFT_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG new file mode 100644 index 0000000000000000000000000000000000000000..beb1a00ccbf4caa4468293fab06a06d741b55f5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG @@ -0,0 +1 @@ +CONFIG_NFT_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ new file mode 100644 index 0000000000000000000000000000000000000000..ab4234885e8332c867dd7a2e138a42a1b8b26d39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ @@ -0,0 +1 @@ +CONFIG_NFT_MASQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT new file mode 100644 index 0000000000000000000000000000000000000000..6fc5795d258f8b9d81a2891196263db9aaed906a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT @@ -0,0 +1 @@ +CONFIG_NFT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN new file mode 100644 index 0000000000000000000000000000000000000000..243007744b52ecf5a1cabf36ed5989d19facf906 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN @@ -0,0 +1 @@ +CONFIG_NFT_NUMGEN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF new file mode 100644 index 0000000000000000000000000000000000000000..beadca8f51d00c7222dbee1b29739fbab255a951 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF @@ -0,0 +1 @@ +CONFIG_NFT_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE new file mode 100644 index 0000000000000000000000000000000000000000..b8aea55e813c7f51b34e1ffd45065ae9e2802e23 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE @@ -0,0 +1 @@ +CONFIG_NFT_QUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA new file mode 100644 index 0000000000000000000000000000000000000000..1984d0ec795b38144fa43b287158f56966d0b132 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA @@ -0,0 +1 @@ +CONFIG_NFT_QUOTA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR new file mode 100644 index 0000000000000000000000000000000000000000..d7f2d7c76a69027ba65d71a363068227942bff01 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR @@ -0,0 +1 @@ +CONFIG_NFT_REDIR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT new file mode 100644 index 0000000000000000000000000000000000000000..3f0167e83a623b94bf0ed1916d40c5788cffaa15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT @@ -0,0 +1 @@ +CONFIG_NFT_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET new file mode 100644 index 0000000000000000000000000000000000000000..62ebfe6e850c5919bb7a6a5545263bbc9e1db44f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..2e8750b56c81b6df32c0ba44c7a92c8fbc9811e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET new file mode 100644 index 0000000000000000000000000000000000000000..84aa8fd92b7d527d9ea0f0b40ae45369c413c0f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET @@ -0,0 +1 @@ +CONFIG_NFT_SOCKET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY new file mode 100644 index 0000000000000000000000000000000000000000..a8e9cdca8e5fb4f6275a299d446f4a4d5fde7d27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY @@ -0,0 +1 @@ +# CONFIG_NFT_SYNPROXY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY new file mode 100644 index 0000000000000000000000000000000000000000..d43e8c5f4bbbbb4647891d1c2b5575dc2099284c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY @@ -0,0 +1 @@ +CONFIG_NFT_TPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..30f2b484f6fe1b23073a3cd472f418d00b013e5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL @@ -0,0 +1 @@ +CONFIG_NFT_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM new file mode 100644 index 0000000000000000000000000000000000000000..9147adfb0fa9a358cb5f09da36fc68aa521f7831 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM @@ -0,0 +1 @@ +CONFIG_NFT_XFRM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA new file mode 100644 index 0000000000000000000000000000000000000000..07c3990d1f788f120c7a1622e96c6e5b7c367461 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_AMANDA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..1a7916a68faf120e0c924b88ed10851ff7b539ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE @@ -0,0 +1 @@ +# CONFIG_NF_CONNTRACK_BRIDGE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST new file mode 100644 index 0000000000000000000000000000000000000000..04a20201b044180571f4b93115bd0e0fcd224136 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_BROADCAST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..711ab9d5d354281aa507f0754f357479ce3b45ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP new file mode 100644 index 0000000000000000000000000000000000000000..7a0409d0500234e73e09acc0e1d44b990f8409dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 new file mode 100644 index 0000000000000000000000000000000000000000..a8b54d4baa04515982daef810b9e841411a84c7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_H323=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC new file mode 100644 index 0000000000000000000000000000000000000000..5034694f2754c46678f815b1d0beef8686b7c366 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_IRC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS new file mode 100644 index 0000000000000000000000000000000000000000..d1b4da6338a239d2cbee3839e9247ba181970f04 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_LABELS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK new file mode 100644 index 0000000000000000000000000000000000000000..e8915b4a5f99641e0a6bef8759431f09dbed4685 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_MARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS new file mode 100644 index 0000000000000000000000000000000000000000..dc0343f638d5bbf2b8d25c6ec8f97d482829c13f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_NETBIOS_NS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP new file mode 100644 index 0000000000000000000000000000000000000000..a1a813aaf23b29cdcdcdfd005a569ed4600833dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_PPTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS new file mode 100644 index 0000000000000000000000000000000000000000..11ac5f67b2945bb68b5547e2943377bdc66e4e71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_PROCFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE new file mode 100644 index 0000000000000000000000000000000000000000..a17c92be62f98a1c2c21163ba67c2d23df33e392 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SANE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK new file mode 100644 index 0000000000000000000000000000000000000000..9490d56ed23a62fc0e6b4ae19ff72f6ab6f1b4b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SECMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP new file mode 100644 index 0000000000000000000000000000000000000000..82f089e80c8153baf904b56a8cc60ca43dc575b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP new file mode 100644 index 0000000000000000000000000000000000000000..44c5aec3708f7f2894c8a40cf3ef71a841c23385 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SNMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP new file mode 100644 index 0000000000000000000000000000000000000000..161b2f0077a8a3117382d0b8d350e62c7d777124 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TFTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..24a71e9388a5a5704a476235b905f5118c33fb76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TIMEOUT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP new file mode 100644 index 0000000000000000000000000000000000000000..09d9e909bb903ee4b1ad1b035b5d83f18ae42536 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TIMESTAMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES new file mode 100644 index 0000000000000000000000000000000000000000..e4b2f40cdffbbcdb9ab4020dce6e5a2d7603fe2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_ZONES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..805a8bc939a990354735371aaa01c31aae56adb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..6dc2ad6c7021e7f5191da9f0c15eb2d86f6919c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK_HELPER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..57a6aac8509a4083602e01705ac8f6171591eb82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK_TIMEOUT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP new file mode 100644 index 0000000000000000000000000000000000000000..87cb5eaf980a2921cce88cb9cdddac60b2865e13 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_DCCP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE new file mode 100644 index 0000000000000000000000000000000000000000..acbbf595f8671689b4fab04d9f91e68c455269dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_GRE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP new file mode 100644 index 0000000000000000000000000000000000000000..c882bb26292cce0933fab33ed3d45bc375b3f194 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_SCTP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE new file mode 100644 index 0000000000000000000000000000000000000000..25c61d43064183a6b2bac294e740930e4b54cda6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_UDPLITE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..6540eb96d8c3e123ab76dea484d4b9d9df154377 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_DEFRAG_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..7c349df6c16cef946fdef715e195d4e5c8c1604f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_DUP_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..a130933ed9948ccf85eeccbc22add5e85c3ea247 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_DUP_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..d058a0ab802d9fc039e274d42994c698bf3237af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV @@ -0,0 +1 @@ +CONFIG_NF_DUP_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE new file mode 100644 index 0000000000000000000000000000000000000000..a2b5e03a7d8cc1d372e70a190f746fe118685251 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE @@ -0,0 +1 @@ +CONFIG_NF_FLOW_TABLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET new file mode 100644 index 0000000000000000000000000000000000000000..f41129cf14a64e6ec71b50cc5751dd0dd0864bd6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET @@ -0,0 +1 @@ +CONFIG_NF_FLOW_TABLE_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP new file mode 100644 index 0000000000000000000000000000000000000000..561b42dbb05d3adfde243bb607e29528198ab949 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP @@ -0,0 +1 @@ +CONFIG_NF_LOG_ARP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..97d73f968877a523277613d2c9b9ebf20fdeceec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_LOG_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..dbfa7adfbb58d8faeb42c53b156bfaed3e64c06a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_LOG_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA new file mode 100644 index 0000000000000000000000000000000000000000..4e670b07247082dd6f61c7935e802ca8f1597201 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA @@ -0,0 +1 @@ +CONFIG_NF_NAT_AMANDA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP new file mode 100644 index 0000000000000000000000000000000000000000..f760b7fff4c995af04ad163d5e8dfd6c84012ad4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 new file mode 100644 index 0000000000000000000000000000000000000000..0af2054a49b9635418f850f6ed0f393d4c24b653 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 @@ -0,0 +1 @@ +CONFIG_NF_NAT_H323=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC new file mode 100644 index 0000000000000000000000000000000000000000..7db14c9ce12848400c78339518331abc469a2918 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC @@ -0,0 +1 @@ +CONFIG_NF_NAT_IRC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE new file mode 100644 index 0000000000000000000000000000000000000000..2039d997069a2eda0780478030284306afa2aca4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE @@ -0,0 +1 @@ +CONFIG_NF_NAT_MASQUERADE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP new file mode 100644 index 0000000000000000000000000000000000000000..6026fcc8753426488f6036f736dc1d16cdbb5ec5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_PPTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT new file mode 100644 index 0000000000000000000000000000000000000000..4fa55b7848caa1b09b97158d5a0906d963b311a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT @@ -0,0 +1 @@ +CONFIG_NF_NAT_REDIRECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP new file mode 100644 index 0000000000000000000000000000000000000000..390f1fce80daf6e36d18c6ea44c74d332c7e07ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP @@ -0,0 +1 @@ +CONFIG_NF_NAT_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC new file mode 100644 index 0000000000000000000000000000000000000000..b73738613728d29307f79ae27b2e07f8b9cc43a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC @@ -0,0 +1 @@ +CONFIG_NF_NAT_SNMP_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP new file mode 100644 index 0000000000000000000000000000000000000000..099ed3e59cc8df53e174a21ac584a82b1ca655de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_TFTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..8223fd9009ed197ee8f5748e78a46721848c49a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_REJECT_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..efa588cd51a5f7655849fd09dff57f881151c0f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_REJECT_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..330b7cf66edbfe09e66daa5b77368275007fdad2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_SOCKET_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..f3ec9e0fda4832c5f03a3393f3e907c6675ab00f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_SOCKET_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP new file mode 100644 index 0000000000000000000000000000000000000000..5e3cf6e4b3112b89d84b0c7c9caab89af0acc0d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP @@ -0,0 +1 @@ +CONFIG_NF_TABLES_ARP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..bfdd627528406ca46c0bb4a619dede529d342cc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE @@ -0,0 +1 @@ +CONFIG_NF_TABLES_BRIDGE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..2fd27f8e9ad788cfc03f655e9e65b24cc99373ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV @@ -0,0 +1 @@ +CONFIG_NF_TABLES_NETDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 new file mode 100644 index 0000000000000000000000000000000000000000..d8785321cd61db79fe836cde0df05101e894b12f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_TPROXY_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON new file mode 100644 index 0000000000000000000000000000000000000000..53e8b774262756bdc5eaf9a5de90ddc09c5e4a33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON @@ -0,0 +1 @@ +CONFIG_NLMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..59f7bcc001580d622c4718851b65eab4777dfdd9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT @@ -0,0 +1 @@ +CONFIG_NODES_SHIFT=6 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..6a31e2d26c9deec4c32c7da58f7c731db6fbe31f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG @@ -0,0 +1 @@ +CONFIG_NOUVEAU_DEBUG=5 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..521d58d7d5ce7e6a4ae20f79964d8786a9e96802 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT @@ -0,0 +1 @@ +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB new file mode 100644 index 0000000000000000000000000000000000000000..f9d9791f4855122b2a9f486582a1641b34ca7c2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB @@ -0,0 +1 @@ +CONFIG_NTB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING new file mode 100644 index 0000000000000000000000000000000000000000..9de89d51d8df546ae308191b56edf85d5161ed72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING @@ -0,0 +1 @@ +CONFIG_NUMA_BALANCING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED new file mode 100644 index 0000000000000000000000000000000000000000..5f7402180579dbcc8d0635a233459497ca86af60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED @@ -0,0 +1 @@ +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..a844847b2f389c87b7a93a2d8b8fac0be17ff69c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS @@ -0,0 +1 @@ +CONFIG_NVDIMM_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM new file mode 100644 index 0000000000000000000000000000000000000000..df779d8ecac5240e26ba31fcf9061f4067375d22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM @@ -0,0 +1 @@ +CONFIG_NVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..decd343e67fbbd3da3a523d1edd47b63f3b422e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS @@ -0,0 +1 @@ +CONFIG_NVMEM_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC new file mode 100644 index 0000000000000000000000000000000000000000..2152575d9dbac53fac7e1204d53e0a14913e8bab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC @@ -0,0 +1 @@ +CONFIG_NVME_FC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL new file mode 100644 index 0000000000000000000000000000000000000000..cf3a9f20f93d2cfe8baa006f1f94c41c819e9162 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL @@ -0,0 +1 @@ +CONFIG_OBJTOOL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH new file mode 100644 index 0000000000000000000000000000000000000000..82b61f8a1688743996aeca4919c8daf345281e0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE new file mode 100644 index 0000000000000000000000000000000000000000..158138e6595b8f1d6c7a4f63d6fc6297145aee51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_GENEVE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE new file mode 100644 index 0000000000000000000000000000000000000000..462594567347430ecfeb1b532bf5c05528dcde73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_GRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN new file mode 100644 index 0000000000000000000000000000000000000000..c572c1296987845a72208fa840d7f8791c98ef38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_VXLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..07e643608e53280b3a93f9a0be6360cc400bbbbd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER @@ -0,0 +1 @@ +CONFIG_OSNOISE_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX new file mode 100644 index 0000000000000000000000000000000000000000..c61cab2ace0780755bc1a5f51842e283fe0442c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_INDEX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY new file mode 100644 index 0000000000000000000000000000000000000000..6b4564cf17749e7d9ba9cce798430ffbc6eaf0dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_METACOPY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT new file mode 100644 index 0000000000000000000000000000000000000000..6b3609a328944fa5a21316d919feeb446e19d2d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW new file mode 100644 index 0000000000000000000000000000000000000000..23e8ade165b69f02ae0d5cd60b260d1bdd98f2c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR new file mode 100644 index 0000000000000000000000000000000000000000..80cb207a956f380b31d4d23fb43c5a05b91d4d0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_REDIRECT_DIR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..3a73c51ee49e5a610ba31ab1ae7aa54ca446f15e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_XINO_AUTO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..f9e9bf42594464d54965c679feb7bd6a06a1f4c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG @@ -0,0 +1 @@ +CONFIG_PACKET_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..5727f56a0b3ebc2ee2a0c66d1a385cb5c6b2ac6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT @@ -0,0 +1 @@ +CONFIG_PAGECACHE_LIMIT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION new file mode 100644 index 0000000000000000000000000000000000000000..f55df4f4e26450c64345ce3c691cbda5052fdd1a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION @@ -0,0 +1 @@ +# CONFIG_PAGE_EXTENSION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER new file mode 100644 index 0000000000000000000000000000000000000000..441661dcfade027b691552e10019f5e7ba137db3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER @@ -0,0 +1 @@ +# CONFIG_PAGE_OWNER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING new file mode 100644 index 0000000000000000000000000000000000000000..0602168f36e632f75b75bd2a7a2212bb35e3c0ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING @@ -0,0 +1 @@ +# CONFIG_PAGE_POISONING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING new file mode 100644 index 0000000000000000000000000000000000000000..454926ab42efdaa3dcf3fe72454e02b0861015f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING @@ -0,0 +1 @@ +CONFIG_PAGE_REPORTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING new file mode 100644 index 0000000000000000000000000000000000000000..176320ad58962be65b64abec45d843e5a9ae8511 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_PARAVIRT_TIME_ACCOUNTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER new file mode 100644 index 0000000000000000000000000000000000000000..47b8ec3cad545cfef47fde11196ce0d592796536 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER @@ -0,0 +1 @@ +CONFIG_PCIEAER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM new file mode 100644 index 0000000000000000000000000000000000000000..5233234fe94957c7f69c658a738b1ed96a8ccc6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM @@ -0,0 +1 @@ +CONFIG_PCIEASPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..cc46215a4b21d76841ca2d166f69c215b370b4c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT @@ -0,0 +1 @@ +CONFIG_PCIEASPM_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC new file mode 100644 index 0000000000000000000000000000000000000000..e45e816fc8b25241ee150a1fdc07b11a11af4758 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC @@ -0,0 +1 @@ +CONFIG_PCIE_DPC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC new file mode 100644 index 0000000000000000000000000000000000000000..e44624ce1b3d3fdcf5df9f1ca2651813b73e56f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC @@ -0,0 +1 @@ +CONFIG_PCIE_ECRC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR new file mode 100644 index 0000000000000000000000000000000000000000..9c6ee7bc6bdba137cd4442aa2da01448a870b5cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR @@ -0,0 +1 @@ +CONFIG_PCIE_EDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV new file mode 100644 index 0000000000000000000000000000000000000000..baf87f9b9c5377d65fa846638dbde85d3b504131 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV @@ -0,0 +1 @@ +CONFIG_PCI_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..20446aad4a67767c9a4010338c8f3a4e2afe2f9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV_INTERFACE @@ -0,0 +1 @@ +CONFIG_PCI_HYPERV_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID new file mode 100644 index 0000000000000000000000000000000000000000..33abee91ca0a0fcfeeef163d1475964309440919 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID @@ -0,0 +1 @@ +CONFIG_PCI_PASID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI new file mode 100644 index 0000000000000000000000000000000000000000..4f25cd0ae505afeb4715c93c26c763a2cc853617 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI @@ -0,0 +1 @@ +CONFIG_PCI_PRI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS new file mode 100644 index 0000000000000000000000000000000000000000..ddc2d4cb1149893b2e8a1df9526fcc6ffeccf4f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS @@ -0,0 +1 @@ +CONFIG_PCI_QUIRKS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX new file mode 100644 index 0000000000000000000000000000000000000000..8c42e3567daa42e477994ca37502f7320a4b13b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX @@ -0,0 +1 @@ +CONFIG_PCP_BATCH_SCALE_MAX=5 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS new file mode 100644 index 0000000000000000000000000000000000000000..873749756446fae4a7ddd730779b1a8c2ef9f31c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS @@ -0,0 +1 @@ +# CONFIG_PERCPU_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST new file mode 100644 index 0000000000000000000000000000000000000000..f3d441c00e2b27997e0855f633663ebf2628b1eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST @@ -0,0 +1 @@ +# CONFIG_PERCPU_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS new file mode 100644 index 0000000000000000000000000000000000000000..6894726868c0b9bcc001f76af5966811d71402ba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS @@ -0,0 +1 @@ +CONFIG_PERSISTENT_KEYRINGS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB new file mode 100644 index 0000000000000000000000000000000000000000..64ef522339b4cdfaabf1cb631571ec2e2a5d3425 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB @@ -0,0 +1 @@ +CONFIG_PHYLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY new file mode 100644 index 0000000000000000000000000000000000000000..ba15fabfb2df574fa4eda7c409c4233e1ce0a4d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY @@ -0,0 +1 @@ +# CONFIG_PKCS7_TEST_KEY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..f7a7c853989218c7d7a28705b9ec50f3cbaa0652 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER @@ -0,0 +1 @@ +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS new file mode 100644 index 0000000000000000000000000000000000000000..8c904bea1883d1f55bbf5894d453c39f5073733f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS @@ -0,0 +1 @@ +CONFIG_PMBUS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..5ebe2ad863434679d3f8d7b2baae096453af3ed6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG @@ -0,0 +1 @@ +# CONFIG_PM_ADVANCED_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP new file mode 100644 index 0000000000000000000000000000000000000000..f79c502c36192bd4ce40ba4a831cb6d96b9bfd41 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP @@ -0,0 +1 @@ +# CONFIG_PM_AUTOSLEEP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..7bdf35967d06b9c8f51df05c0aef049d9a61a0c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG @@ -0,0 +1 @@ +CONFIG_PM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP new file mode 100644 index 0000000000000000000000000000000000000000..94359f9ce90c57e1505b1a5a982f92fcd8a58d9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP @@ -0,0 +1 @@ +CONFIG_PM_SLEEP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..2af0abe869c71dd72632ba6d723f0ec9c72aa3b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG @@ -0,0 +1 @@ +CONFIG_PM_SLEEP_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP new file mode 100644 index 0000000000000000000000000000000000000000..2927603759bb07072bc1c04fe3348f1def49dcab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP @@ -0,0 +1 @@ +CONFIG_PM_SLEEP_SMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..f4866f0ea2e0016d2622376c207bdaced810c1c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION @@ -0,0 +1 @@ +CONFIG_PM_STD_PARTITION="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND new file mode 100644 index 0000000000000000000000000000000000000000..f0311233a3509a3318578338512f41dd6f8709b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND @@ -0,0 +1 @@ +# CONFIG_PM_TEST_SUSPEND is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS new file mode 100644 index 0000000000000000000000000000000000000000..8c4739d32b88894cc46ba9f78f2dc0acfd41aedc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS @@ -0,0 +1 @@ +# CONFIG_PM_WAKELOCKS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE new file mode 100644 index 0000000000000000000000000000000000000000..6991d0bcbb21d57b7818750fbea36572fb5f15f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE @@ -0,0 +1 @@ +CONFIG_POSIX_MQUEUE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL new file mode 100644 index 0000000000000000000000000000000000000000..23783f9a63bc7d505f0a9b656c835240c5a67003 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL @@ -0,0 +1 @@ +CONFIG_POSIX_MQUEUE_SYSCTL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET b/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET new file mode 100644 index 0000000000000000000000000000000000000000..453890b18e26d7cfeb0c943f0657e52fa4581e1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET @@ -0,0 +1 @@ +CONFIG_POWER_RESET=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP new file mode 100644 index 0000000000000000000000000000000000000000..f2e4de8c9216b00f378c7e5413760ba9fc7b2505 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP @@ -0,0 +1 @@ +CONFIG_PPP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE new file mode 100644 index 0000000000000000000000000000000000000000..3451811b54fa614820c0ba683cd807d5320ceb9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE @@ -0,0 +1 @@ +CONFIG_PPPOE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN new file mode 100644 index 0000000000000000000000000000000000000000..e0d900831e351b9cd0ab4ab329482fea12278479 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN @@ -0,0 +1 @@ +CONFIG_PROC_CHILDREN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..3755504b7dd2b8778060fccfa1eff3bfa34862ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS @@ -0,0 +1 @@ +CONFIG_PROC_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..44da3b23f1d1e9b9c0bb8523a7d3aeda3e6b7fe5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE @@ -0,0 +1 @@ +CONFIG_PROC_MEM_ALWAYS_FORCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..1a63c6ae7a45b6552197aaa49b2fb69eac27b45d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP @@ -0,0 +1 @@ +CONFIG_PROC_VMCORE_DEVICE_DUMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES new file mode 100644 index 0000000000000000000000000000000000000000..ea7229988789da72c09eeea1b50e2bfdd06ee21b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES @@ -0,0 +1 @@ +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING new file mode 100644 index 0000000000000000000000000000000000000000..5c7124d6011964bb49228ed8851fdacad7d2b50f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING @@ -0,0 +1 @@ +CONFIG_PROFILING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE new file mode 100644 index 0000000000000000000000000000000000000000..fca0860ea69fd331b26e5b681c27378958b9394f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE @@ -0,0 +1 @@ +CONFIG_PSAMPLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI new file mode 100644 index 0000000000000000000000000000000000000000..72452cf33af886adccb28ddf73f99bd1fc50d711 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI @@ -0,0 +1 @@ +CONFIG_PSI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED new file mode 100644 index 0000000000000000000000000000000000000000..35dd99eec0618ddb13209c9185f22f312b5b950e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED @@ -0,0 +1 @@ +CONFIG_PSI_DEFAULT_DISABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE new file mode 100644 index 0000000000000000000000000000000000000000..e7ffca20825c92e2afe76b3c2f85187136312e37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE @@ -0,0 +1 @@ +CONFIG_PSTORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..92d9fa1aad2e146d9f3280b4f8ad4d868c013df8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS @@ -0,0 +1 @@ +CONFIG_PSTORE_COMPRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..c740961f5cc81f57fbc34287bb0fba5ff5d5d5de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE @@ -0,0 +1 @@ +CONFIG_PSTORE_CONSOLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM new file mode 100644 index 0000000000000000000000000000000000000000..cf887d29571e81eb74a2a072c9cbc54b669a2399 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM @@ -0,0 +1 @@ +CONFIG_PSTORE_RAM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 new file mode 100644 index 0000000000000000000000000000000000000000..ca782388a577de9f4b4dbeeb0f1c4383ce9d8ded --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 @@ -0,0 +1 @@ +# CONFIG_QFMT_V1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 new file mode 100644 index 0000000000000000000000000000000000000000..961af550cac3461102477722257a3dca8b14b1be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 @@ -0,0 +1 @@ +CONFIG_QFMT_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..f7312a8411be90fa78a4183c54a68e84ddba062b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG @@ -0,0 +1 @@ +# CONFIG_QUOTA_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..83abecebb84192ce881cc6f29292567a626bb5de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE @@ -0,0 +1 @@ +CONFIG_QUOTA_NETLINK_INTERFACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ new file mode 100644 index 0000000000000000000000000000000000000000..3b150e43bdd0e6fd05f1e8e240ddfb2756332066 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ @@ -0,0 +1 @@ +CONFIG_RAID6_PQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK new file mode 100644 index 0000000000000000000000000000000000000000..1f010f386359884b683ac66af828e6484b719b0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK @@ -0,0 +1 @@ +CONFIG_RAID6_PQ_BENCHMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..6ba5f1cc4726f9fbcff44a47456da533115a6905 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS @@ -0,0 +1 @@ +CONFIG_RAID_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..472dbad6eadf63f05d51ea9697c5003d0c8fa8db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT @@ -0,0 +1 @@ +CONFIG_RCU_CPU_STALL_TIMEOUT=60 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..225c6bd3a6ee18bb8d20cb448a5bbf08933df39e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG @@ -0,0 +1 @@ +# CONFIG_RCU_EQS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT new file mode 100644 index 0000000000000000000000000000000000000000..8c9922cf3d2e4d660e3b8f71bffedc5b72827ec2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT @@ -0,0 +1 @@ +# CONFIG_RCU_EXPERT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU new file mode 100644 index 0000000000000000000000000000000000000000..19a9621357b9c47f09260cffe1942d5c7a69dc38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU @@ -0,0 +1 @@ +CONFIG_RCU_NOCB_CPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..50d7d874be7a7164bfe84526d1f1fdcfd25fa46f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_REF_SCALE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..af6860852e899b290d1ce6633792e2b60308df03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_SCALE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..ee84473477c4cc6752f07142ec95d5e5fc6e765c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_TORTURE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..4acae1aab8c762a5aa4a1da7eef613c75893ef18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE @@ -0,0 +1 @@ +# CONFIG_RCU_TRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE new file mode 100644 index 0000000000000000000000000000000000000000..53cd17fc227e328bc0984413ff9d854b6750201b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE @@ -0,0 +1 @@ +CONFIG_RDMA_RXE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW new file mode 100644 index 0000000000000000000000000000000000000000..3bc1ec48a5a07c9d94d5217861ba9571a59e55ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW @@ -0,0 +1 @@ +CONFIG_RDMA_SIW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 new file mode 100644 index 0000000000000000000000000000000000000000..9d25ca8111852f7d4ed1a7bd7d309ab1c2c01e15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 @@ -0,0 +1 @@ +CONFIG_RD_BZIP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP new file mode 100644 index 0000000000000000000000000000000000000000..d38a59a2bdb25e9d78c7ed529d34c33de027a7e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP @@ -0,0 +1 @@ +CONFIG_RD_GZIP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..ecc2a70486a378cd90df31dc3b26187a076a57d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 @@ -0,0 +1 @@ +CONFIG_RD_LZ4=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA new file mode 100644 index 0000000000000000000000000000000000000000..02827c69bec1e2afe70af319facd3998e8711718 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA @@ -0,0 +1 @@ +CONFIG_RD_LZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO new file mode 100644 index 0000000000000000000000000000000000000000..4734aec1da081f23794def0f08e458b29f36dfd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO @@ -0,0 +1 @@ +CONFIG_RD_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ new file mode 100644 index 0000000000000000000000000000000000000000..bbd13fc54c3cd12c4018f8fcaef2455a9fd4eaa7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ @@ -0,0 +1 @@ +CONFIG_RD_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..da1496ccb2e27731305993404e6b43bb0453d688 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD @@ -0,0 +1 @@ +CONFIG_RD_ZSTD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM b/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM new file mode 100644 index 0000000000000000000000000000000000000000..147b2c061836f848588592fdb691c4da8fead696 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM @@ -0,0 +1 @@ +# CONFIG_READABLE_ASM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS new file mode 100644 index 0000000000000000000000000000000000000000..9b7a59151f7dc74db8470ca4cff6b0a8f0aad580 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS @@ -0,0 +1 @@ +CONFIG_READ_ONLY_THP_FOR_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER new file mode 100644 index 0000000000000000000000000000000000000000..18c58f3058f3d7efaace1f60b2f89d73a3d3aa9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER @@ -0,0 +1 @@ +CONFIG_RICH_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 new file mode 100644 index 0000000000000000000000000000000000000000..81fb2fe892e0432719cbef519149b132ec0822b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 @@ -0,0 +1 @@ +CONFIG_RPCSEC_GSS_KRB5=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS new file mode 100644 index 0000000000000000000000000000000000000000..bea3441816b61c0973f5aa7456ef32e64c24d73f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS @@ -0,0 +1 @@ +CONFIG_RTC_HCTOSYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..0e6f28849f89c332220f16fcdd88838731e2fee5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE @@ -0,0 +1 @@ +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV new file mode 100644 index 0000000000000000000000000000000000000000..9481b9dfa59f0ce012053ec6a9b0165b3a6795ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV @@ -0,0 +1 @@ +CONFIG_RTC_INTF_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC new file mode 100644 index 0000000000000000000000000000000000000000..12f03421672d402678d4e39694511cf4cc142101 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC @@ -0,0 +1 @@ +CONFIG_RTC_INTF_PROC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..7c60145874f2f711b455d854b8e5f9f447dd1623 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS @@ -0,0 +1 @@ +CONFIG_RTC_INTF_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM new file mode 100644 index 0000000000000000000000000000000000000000..bee8859105a416da829cae73861a58d732249fdf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM @@ -0,0 +1 @@ +CONFIG_RTC_NVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC new file mode 100644 index 0000000000000000000000000000000000000000..f3581c76b8c67f3cca4983fd67afe88f6037635f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC @@ -0,0 +1 @@ +CONFIG_RTC_SYSTOHC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..031950602e96c8e41b577b6fc11b99e2c2537638 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE @@ -0,0 +1 @@ +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED b/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED new file mode 100644 index 0000000000000000000000000000000000000000..15dbb458364ad19b607a1437501ce2c9ce67b169 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED @@ -0,0 +1 @@ +CONFIG_RT_GROUP_SCHED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU new file mode 100644 index 0000000000000000000000000000000000000000..7d54fe1195e4ebc079673f306848a4749b8707a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU @@ -0,0 +1 @@ +CONFIG_RUNTIME_TESTING_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI new file mode 100644 index 0000000000000000000000000000000000000000..18bfae1bc2841bef28296b83f7c2df26a4fd2b3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI @@ -0,0 +1 @@ +CONFIG_SATA_AHCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..ff2a626abffbf4a7afa73ad532d1643d2d091b51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM @@ -0,0 +1 @@ +CONFIG_SATA_AHCI_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..a510f6d0b50be4943d90921c84d77f94b103c7ba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY @@ -0,0 +1 @@ +CONFIG_SATA_MOBILE_LPM_POLICY=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP new file mode 100644 index 0000000000000000000000000000000000000000..11f39dd5035e0c528ba5da91235bd7c1935eb3a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP @@ -0,0 +1 @@ +CONFIG_SATA_PMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU new file mode 100644 index 0000000000000000000000000000000000000000..770ca7798e77d2147f275065de4b64c44a404388 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU @@ -0,0 +1 @@ +CONFIG_SCHED_ACPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER new file mode 100644 index 0000000000000000000000000000000000000000..046feb27d436a582b7acecd47a891bbcf3a492b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER @@ -0,0 +1 @@ +CONFIG_SCHED_CLUSTER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO new file mode 100644 index 0000000000000000000000000000000000000000..aea94368094a0c8dc087c23218105d481e70d252 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO @@ -0,0 +1 @@ +CONFIG_SCHED_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI new file mode 100644 index 0000000000000000000000000000000000000000..31147745cfef8d2ecef164a56089c5e820fdc44a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI @@ -0,0 +1 @@ +CONFIG_SCHED_SLI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT new file mode 100644 index 0000000000000000000000000000000000000000..ed3f04a207ad72df30033a3595de5c00297aef66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT @@ -0,0 +1 @@ +CONFIG_SCHED_SMT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..413447ad6ec15c25fea955c891453f323daa67a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER @@ -0,0 +1 @@ +CONFIG_SCHED_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS new file mode 100644 index 0000000000000000000000000000000000000000..7d1c6cb2226c821d365ed898358906e0bf51555e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS @@ -0,0 +1 @@ +CONFIG_SCSI_CONSTANTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..86b328498d36c3434300e9f0c8d79e3903196c31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG @@ -0,0 +1 @@ +CONFIG_SCSI_DEBUG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH new file mode 100644 index 0000000000000000000000000000000000000000..b73df00a21cf940bd4a56ff77d72e9442761b3f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH @@ -0,0 +1 @@ +CONFIG_SCSI_DH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA new file mode 100644 index 0000000000000000000000000000000000000000..55138cc94c49ec734f0b81669cc3b13b76e8c70a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA @@ -0,0 +1 @@ +CONFIG_SCSI_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE new file mode 100644 index 0000000000000000000000000000000000000000..adc36eab068cd63957feaee194e7f29e3ff2553d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE @@ -0,0 +1 @@ +CONFIG_SCSI_ENCLOSURE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..71f848d2d5c87a59bd7cd5a2c426836313b66168 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_FC_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..e75a2fb48df8794359910dfc3adfe81f9711060b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_ISCSI_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID new file mode 100644 index 0000000000000000000000000000000000000000..55062a39cfe4184c93744a4977dd90d4945b7300 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID @@ -0,0 +1 @@ +CONFIG_SCSI_LEAPIORAID=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING new file mode 100644 index 0000000000000000000000000000000000000000..5739436c0d895402f168b03ea2da24d219d8bb76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING @@ -0,0 +1 @@ +CONFIG_SCSI_LOGGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL new file mode 100644 index 0000000000000000000000000000000000000000..0aa35b3047d42e179102c9b9030e915853c1b15f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL @@ -0,0 +1 @@ +CONFIG_SCSI_LOWLEVEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR new file mode 100644 index 0000000000000000000000000000000000000000..ba2223b78c9043ee0017c67ae9a740083e66bb9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR @@ -0,0 +1 @@ +CONFIG_SCSI_MPI3MR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS new file mode 100644 index 0000000000000000000000000000000000000000..b9b08c3fb1dc7edf4523781fbcc9f8d3075914f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS @@ -0,0 +1 @@ +CONFIG_SCSI_MPT2SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE new file mode 100644 index 0000000000000000000000000000000000000000..b0633b38ddd9c928d8dfe9b8cc9deb49610e9c76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE @@ -0,0 +1 @@ +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS new file mode 100644 index 0000000000000000000000000000000000000000..dc5f4d8ffb772bfdff499f34bdad8b893bcbec6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS @@ -0,0 +1 @@ +CONFIG_SCSI_MPT3SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE new file mode 100644 index 0000000000000000000000000000000000000000..b5f29773f06c3482e45bc2110a3fce23d8238f05 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE @@ -0,0 +1 @@ +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS new file mode 100644 index 0000000000000000000000000000000000000000..565a44b8b920259c89d4f8af128a79f3e328918e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS @@ -0,0 +1 @@ +CONFIG_SCSI_PROC_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA new file mode 100644 index 0000000000000000000000000000000000000000..5bd5a74d7dd3873ce18fde8aff31a35c6b1316cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_ATA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..11e4323abaaecb9d30c2b01acd88682872c19b94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP new file mode 100644 index 0000000000000000000000000000000000000000..8227d7063f123bb8ef148564c804892f28bc4df5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_HOST_SMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS new file mode 100644 index 0000000000000000000000000000000000000000..31d5483879fa5d9051062ba14a66773c8eeaa0a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_LIBSAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC new file mode 100644 index 0000000000000000000000000000000000000000..7de665f4b8310eb9e94b39205a17bd04e27ca28b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC @@ -0,0 +1 @@ +CONFIG_SCSI_SCAN_ASYNC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI new file mode 100644 index 0000000000000000000000000000000000000000..da9632f9abd45edebe14e1515f3cb5e0a28b1221 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI @@ -0,0 +1 @@ +CONFIG_SCSI_SMARTPQI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..fab2e0b91b577eb89f00e8fb6afa9158351eab5a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SPI_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS new file mode 100644 index 0000000000000000000000000000000000000000..b1f7492d328f4243f3a1bfcdabcdf5fef5a85da0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SRP_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..6fadaf0f2d8e9b46c7fb13736d4a1870cb03656a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO @@ -0,0 +1 @@ +CONFIG_SCSI_VIRTIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM new file mode 100644 index 0000000000000000000000000000000000000000..440f8bc232893b9bb3a80eccb2655a4ce6980bcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM @@ -0,0 +1 @@ +CONFIG_SECRETMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY new file mode 100644 index 0000000000000000000000000000000000000000..0a6814b814cbedc387f5643b314e5b720096164b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY @@ -0,0 +1 @@ +CONFIG_SECTION_MISMATCH_WARN_ONLY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT new file mode 100644 index 0000000000000000000000000000000000000000..353ad62886faa53345aa572b6d02a63654b31f6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT @@ -0,0 +1 @@ +# CONFIG_SECURITY_DMESG_RESTRICT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS new file mode 100644 index 0000000000000000000000000000000000000000..8596c96eb9249b4ccf614cce644727d30fb975b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_AVC_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP new file mode 100644 index 0000000000000000000000000000000000000000..b9559b49fa200dfe6460e7f78fb993ff2371d395 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_DEVELOP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..e8e9e3ea8c1e85b798c12c85efceecb08757bff2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS new file mode 100644 index 0000000000000000000000000000000000000000..5e8c57ec512e1d8ba94f5dce46d59619385f0846 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK new file mode 100644 index 0000000000000000000000000000000000000000..32bdd7cd55494a9fb9b29135e0e90c18c78d2706 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK @@ -0,0 +1 @@ +# CONFIG_SECURITY_SMACK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 new file mode 100644 index 0000000000000000000000000000000000000000..9e366c11a8ac784b4a94941456b978c4e87b4294 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 @@ -0,0 +1 @@ +CONFIG_SERIO_ALTERA_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 new file mode 100644 index 0000000000000000000000000000000000000000..7535bbef9cf2f659cde6f079387c8be742fef7be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 @@ -0,0 +1 @@ +CONFIG_SERIO_ARC_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 new file mode 100644 index 0000000000000000000000000000000000000000..0db209ef285eda79a44190aae0370a84029dc630 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 @@ -0,0 +1 @@ +CONFIG_SERIO_LIBPS2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW new file mode 100644 index 0000000000000000000000000000000000000000..29fd33a295fd7938e19e3b7b81fe4477fee3c6f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW @@ -0,0 +1 @@ +CONFIG_SERIO_RAW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT new file mode 100644 index 0000000000000000000000000000000000000000..341b61290d260d72451d2eac79b03447d306bc53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT @@ -0,0 +1 @@ +CONFIG_SERIO_SERPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON b/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON new file mode 100644 index 0000000000000000000000000000000000000000..95bfee80fb8ba6daf86ca33dd91619ce2f4e574e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON @@ -0,0 +1 @@ +# CONFIG_SFC_FALCON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR new file mode 100644 index 0000000000000000000000000000000000000000..cff3a0b960e963d4f6c5012b14d13f533778d744 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION new file mode 100644 index 0000000000000000000000000000000000000000..d7551a5891274d186232118c9f9c9d2a18d97528 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION @@ -0,0 +1 @@ +CONFIG_SIGNED_PE_FILE_VERIFICATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED new file mode 100644 index 0000000000000000000000000000000000000000..630a3ed8150ea3b7b620b702efdb4ee768ac0e77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED @@ -0,0 +1 @@ +# CONFIG_SLAB_FREELIST_HARDENED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM new file mode 100644 index 0000000000000000000000000000000000000000..2ec53d02bc4f7c1ee56eef99dd3b655e08e31503 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM @@ -0,0 +1 @@ +CONFIG_SLAB_FREELIST_RANDOM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..2baeb5938e8fad91b1214b1f009c3cda6617c9db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT @@ -0,0 +1 @@ +# CONFIG_SLAB_MERGE_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP new file mode 100644 index 0000000000000000000000000000000000000000..86e448e492ee24a1695b9c605f36b45aab07fd08 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP @@ -0,0 +1 @@ +CONFIG_SLIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..728cb1e4f36e720b59abcd26a0a5146f125c040d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG @@ -0,0 +1 @@ +CONFIG_SMC_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..0e0d36a4806dea7d28e04d788449f515c90e0ce2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SOFT_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..eb5fcb37be3d7eeea553fb535f82ce9092687ae4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_SPI_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..575ec850104727557a6efa5a0ff7528362f6cdb5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU new file mode 100644 index 0000000000000000000000000000000000000000..79f750f7e14e1a583e7c6765e8121d07b365779b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU @@ -0,0 +1 @@ +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED new file mode 100644 index 0000000000000000000000000000000000000000..a28449ed1fc2fc17578834f9f72186bab037572a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_EMBEDDED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..fc22b2a90f809bde29e9334d1f5ca7f781901bed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_FILE_CACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT new file mode 100644 index 0000000000000000000000000000000000000000..dee95e704627d8b79fb95031db4d3c6e3128be6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT @@ -0,0 +1 @@ +CONFIG_SQUASHFS_FILE_DIRECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..16cdab759a61c2da3aa829b7160bed581a172f30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE @@ -0,0 +1 @@ +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO new file mode 100644 index 0000000000000000000000000000000000000000..8b3e6f9333055070d89670196830b093326bbbc3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO @@ -0,0 +1 @@ +CONFIG_SQUASHFS_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR new file mode 100644 index 0000000000000000000000000000000000000000..81b21b94c54f854f4c2295d38e937032abfcf595 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR @@ -0,0 +1 @@ +CONFIG_SQUASHFS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ new file mode 100644 index 0000000000000000000000000000000000000000..2a9b9bd461e34dee8d50920ef37f4153f677b164 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ @@ -0,0 +1 @@ +CONFIG_SQUASHFS_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB new file mode 100644 index 0000000000000000000000000000000000000000..dcb246a8ef265956cc513a374f59b82d1925fe1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB @@ -0,0 +1 @@ +CONFIG_SQUASHFS_ZLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..e4707f831f67b87ccaa36dbbac8d32152aad9f1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR new file mode 100644 index 0000000000000000000000000000000000000000..b5942a55150872617b9c6a60c269e456ef9a0d44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG new file mode 100644 index 0000000000000000000000000000000000000000..6c885445ee6871ae9fe4d2f18a5e1136c346f322 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR_STRONG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..d7f613555a9036ced26730ee52bf20d091c0279c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_STACKTRACE_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..643bacdcd269006a74ea570d88141c44d9ede257 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER @@ -0,0 +1 @@ +CONFIG_STACK_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..09446dc77d7776abf75a131be92d5d4908302fa6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STATIC_KEYS_SELFTEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER new file mode 100644 index 0000000000000000000000000000000000000000..9cfc00a5ffb82f6b7015e52156da6698a4c30fef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER @@ -0,0 +1 @@ +# CONFIG_STATIC_USERMODEHELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM new file mode 100644 index 0000000000000000000000000000000000000000..f2c0c3a6122c97f7b9be294b75d0ee04d1ebf5bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM @@ -0,0 +1 @@ +CONFIG_STRICT_DEVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS new file mode 100644 index 0000000000000000000000000000000000000000..274455389126a13c96b7637ef3c41776aa57e997 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS @@ -0,0 +1 @@ +CONFIG_STRIP_ASM_SYMS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..448eb69eaa31a7443ce77d316395f928d44a97a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG @@ -0,0 +1 @@ +CONFIG_SUNRPC_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA new file mode 100644 index 0000000000000000000000000000000000000000..b85162d8f7f0a370beb8ab68fccbe9b26f7b179a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA @@ -0,0 +1 @@ +CONFIG_SUNRPC_XPRT_RDMA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND new file mode 100644 index 0000000000000000000000000000000000000000..68d22d8bdce546582f4e3adbb0fc44f24d79f195 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND @@ -0,0 +1 @@ +CONFIG_SUSPEND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER new file mode 100644 index 0000000000000000000000000000000000000000..f816da7cd0bad40a043474ab00d1884351b3287c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER @@ -0,0 +1 @@ +CONFIG_SUSPEND_FREEZER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME new file mode 100644 index 0000000000000000000000000000000000000000..237264c75aa9860295394309856040084befaae1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME @@ -0,0 +1 @@ +CONFIG_SYMBOLIC_ERRNAME=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE new file mode 100644 index 0000000000000000000000000000000000000000..391ab547b458dc5fb10af03881ed660afaf2afbd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE @@ -0,0 +1 @@ +CONFIG_SYNC_FILE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..6d49aea46556812a7ab13865e90784a1e676eb31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS @@ -0,0 +1 @@ +CONFIG_SYNTH_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB new file mode 100644 index 0000000000000000000000000000000000000000..6ae3e10a489ec242535a7c7a950ae6a8369e47d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB @@ -0,0 +1 @@ +# CONFIG_SYSFB_SIMPLEFB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..0876b76976804db7ed4ef1c38a4fd0677b5adeb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL @@ -0,0 +1 @@ +CONFIG_SYSFS_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT new file mode 100644 index 0000000000000000000000000000000000000000..edb77a634b9212562d45d534997190111c5dc6d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT @@ -0,0 +1 @@ +# CONFIG_SYSTEMPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST new file mode 100644 index 0000000000000000000000000000000000000000..858e87e78a9cef2264bff1e4ea70fb6fef584c26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST @@ -0,0 +1 @@ +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..4fcc4b31e966967f757cbf83f20fc2c14643195e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING @@ -0,0 +1 @@ +CONFIG_SYSTEM_BLACKLIST_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION new file mode 100644 index 0000000000000000000000000000000000000000..0c264f7d40ee19bd7cb5186a510023c0920cf930 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION @@ -0,0 +1 @@ +CONFIG_SYSTEM_DATA_VERIFICATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE new file mode 100644 index 0000000000000000000000000000000000000000..a831f7ab182011d033d5e4c140de495bd3b60e31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE @@ -0,0 +1 @@ +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..a270cb857dfeb9aab256eeacc35961154fab926c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE @@ -0,0 +1 @@ +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST new file mode 100644 index 0000000000000000000000000000000000000000..8e8438fd1a83e169c4f2ef97d1b545b5957c67b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST @@ -0,0 +1 @@ +# CONFIG_SYSTEM_REVOCATION_LIST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL new file mode 100644 index 0000000000000000000000000000000000000000..ac42a5fcc8b4f6e0320ba3ef10ffd16679322c4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL @@ -0,0 +1 @@ +CONFIG_SYSVIPC_SYSCTL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS new file mode 100644 index 0000000000000000000000000000000000000000..2e4f141d57891cd40c31da16bfbb90a2776cd154 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS @@ -0,0 +1 @@ +CONFIG_TASKSTATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT new file mode 100644 index 0000000000000000000000000000000000000000..11205fe20ac2a7ba516a796ebd5c39c822ae64db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT @@ -0,0 +1 @@ +CONFIG_TASK_DELAY_ACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING new file mode 100644 index 0000000000000000000000000000000000000000..7c36f88980aa0138a42b70341eb2d0778b2539f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_TASK_IO_ACCOUNTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT new file mode 100644 index 0000000000000000000000000000000000000000..8b61b9cb0b12386a91bf4e84f98e9ba29f9347fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT @@ -0,0 +1 @@ +CONFIG_TASK_XACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL new file mode 100644 index 0000000000000000000000000000000000000000..05d7d17d4c7dbcffeafc2d604ce494636701ecd5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL @@ -0,0 +1 @@ +CONFIG_TCG_ATMEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB new file mode 100644 index 0000000000000000000000000000000000000000..a36cd309dc2e69f758943e250f8ebaff0285cf9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB @@ -0,0 +1 @@ +CONFIG_TCG_CRB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE new file mode 100644 index 0000000000000000000000000000000000000000..fc7a623eee39aae38ab1122c5eeb19278151ebd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE @@ -0,0 +1 @@ +CONFIG_TCG_TIS_CORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO new file mode 100644 index 0000000000000000000000000000000000000000..cab5895c1dcca8cba60acb0123869b2e2b033aca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO @@ -0,0 +1 @@ +CONFIG_TCM_FILEIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK new file mode 100644 index 0000000000000000000000000000000000000000..0eccf352f957630f7a447a8f842017ef30aab4c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK @@ -0,0 +1 @@ +CONFIG_TCM_IBLOCK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI new file mode 100644 index 0000000000000000000000000000000000000000..5fa5e2b3a720e2fa47bbd4b9ac24e13892d4808b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI @@ -0,0 +1 @@ +CONFIG_TCM_PSCSI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 new file mode 100644 index 0000000000000000000000000000000000000000..df07ef1d435a1d4b2135a92737c7de170a2fc7f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 @@ -0,0 +1 @@ +CONFIG_TCM_USER2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC new file mode 100644 index 0000000000000000000000000000000000000000..82e23b7f3f14475693d0ff57f026e4aca33f7df9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC @@ -0,0 +1 @@ +CONFIG_TCP_CONG_BIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP new file mode 100644 index 0000000000000000000000000000000000000000..f9aa892d883fa349091a0cbae0ef1c37f3b226f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_DCTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP new file mode 100644 index 0000000000000000000000000000000000000000..6f546faad4733351df4262ec7948f559429f5b32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HSTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP new file mode 100644 index 0000000000000000000000000000000000000000..c68bb11a6f3efe7025bd0455a67b2143104d9590 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA new file mode 100644 index 0000000000000000000000000000000000000000..82892dbfdc1931a587a986b9a0ebd5091eba2c86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HYBLA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS new file mode 100644 index 0000000000000000000000000000000000000000..7dbbe73774e9b6154bc883cadd745484a7eda8e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS @@ -0,0 +1 @@ +CONFIG_TCP_CONG_ILLINOIS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP new file mode 100644 index 0000000000000000000000000000000000000000..1136111b9aca424a63ffeec717d11106c6125730 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_LP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV new file mode 100644 index 0000000000000000000000000000000000000000..2994a802500919eaddf7f20a17f12511cac12394 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV @@ -0,0 +1 @@ +CONFIG_TCP_CONG_NV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE new file mode 100644 index 0000000000000000000000000000000000000000..d5b5bf020c0ac7addd3a5d3585def170ef31db57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE @@ -0,0 +1 @@ +CONFIG_TCP_CONG_SCALABLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS new file mode 100644 index 0000000000000000000000000000000000000000..a584ed4f125d2dc4e8b5a8cac652744ddce8a834 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS @@ -0,0 +1 @@ +CONFIG_TCP_CONG_VEGAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO new file mode 100644 index 0000000000000000000000000000000000000000..e450df2f939a87f1306d117b06792efbf20bccae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO @@ -0,0 +1 @@ +CONFIG_TCP_CONG_VENO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD new file mode 100644 index 0000000000000000000000000000000000000000..ca233b9df5b7d826ced0b52b337b4fe9c1b7673a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD @@ -0,0 +1 @@ +CONFIG_TCP_CONG_WESTWOOD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH new file mode 100644 index 0000000000000000000000000000000000000000..c2db3694198bd202da8d7e8ce2e51778d91fa9e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH @@ -0,0 +1 @@ +CONFIG_TCP_CONG_YEAH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG new file mode 100644 index 0000000000000000000000000000000000000000..b92ad2fb56b63f24db935f987ff540116d700334 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG @@ -0,0 +1 @@ +CONFIG_TCP_MD5SIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE new file mode 100644 index 0000000000000000000000000000000000000000..85be673c532e0f367c48f1cae7cf286d8c2102d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE @@ -0,0 +1 @@ +CONFIG_TEE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF new file mode 100644 index 0000000000000000000000000000000000000000..c5ddc0080fbad9b75d11de5d659be6227fd55a40 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF @@ -0,0 +1 @@ +CONFIG_TEST_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH new file mode 100644 index 0000000000000000000000000000000000000000..0dd7700464a819795bdb3ac2e74acff1012eabf4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_TEST_LIVEPATCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE new file mode 100644 index 0000000000000000000000000000000000000000..83801238f6ada4727ec7984185f8b8c837744ac3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE new file mode 100644 index 0000000000000000000000000000000000000000..4acf93b3675dea8d2db5b23a64574bda4e579a51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE @@ -0,0 +1 @@ +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE new file mode 100644 index 0000000000000000000000000000000000000000..e70c564b44dc9500c272cbaa13193c42ea5f0fec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS new file mode 100644 index 0000000000000000000000000000000000000000..9288765d6192c1c20d2c00cd936e06d4c722ba6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS @@ -0,0 +1 @@ +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE new file mode 100644 index 0000000000000000000000000000000000000000..b32c155900636ec6040478de1cc343358133da56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_FAIR_SHARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE new file mode 100644 index 0000000000000000000000000000000000000000..614bc305eed9686e0c07153762e1b5c80b36b10d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_STEP_WISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE new file mode 100644 index 0000000000000000000000000000000000000000..040cf394731899f9f5130a2181ea328857927a49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_USER_SPACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..3404084e04751c1e67cc979f0d3b24d1f853cf76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON @@ -0,0 +1 @@ +CONFIG_THERMAL_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 new file mode 100644 index 0000000000000000000000000000000000000000..673c01b6a2bbb42d9c734c6ac1ab0cfa26866b60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 @@ -0,0 +1 @@ +CONFIG_TIGON3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..31215b5556b1b841bbe046c8691be308c264e9f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON @@ -0,0 +1 @@ +CONFIG_TIGON3_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..560c834542ecfe035b72b20b2c1a437c34ac1297 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER @@ -0,0 +1 @@ +CONFIG_TIMERLAT_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS new file mode 100644 index 0000000000000000000000000000000000000000..4480620f6f4972d993cedf3470ab22ac948033b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS @@ -0,0 +1 @@ +CONFIG_TIME_NS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..7a19aef6ae49264b050118792ac2ef8141a21246 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE @@ -0,0 +1 @@ +CONFIG_TLS_DEVICE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE new file mode 100644 index 0000000000000000000000000000000000000000..a6c7df399b4ac014e1d5a657484f84e59fbdd46a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE @@ -0,0 +1 @@ +# CONFIG_TLS_TOE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 new file mode 100644 index 0000000000000000000000000000000000000000..cce8a2274ff4d7f7b4ddc77c20248f74a766b2c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 @@ -0,0 +1 @@ +# CONFIG_TMPFS_INODE64 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..2c4c8f4e8efc20609c7fdacfb7dc9042f2d1b3cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_TMPFS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR new file mode 100644 index 0000000000000000000000000000000000000000..c83e77775eab6b6bbffc99dbf0cf8634cfbf3283 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR @@ -0,0 +1 @@ +CONFIG_TMPFS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT new file mode 100644 index 0000000000000000000000000000000000000000..31429b63de4670cf5bfe3411821974d1a40c14fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT @@ -0,0 +1 @@ +CONFIG_TRACER_SNAPSHOT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..83708605ff2517386a88bac9a027fab4e435d068 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP @@ -0,0 +1 @@ +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP new file mode 100644 index 0000000000000000000000000000000000000000..e463a5ad9ef8817a136c176d3ede328dbaddd373 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP @@ -0,0 +1 @@ +CONFIG_TRACING_MAP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS new file mode 100644 index 0000000000000000000000000000000000000000..65f6432d9c1555e927a23fe15ed109d6908eaf27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS @@ -0,0 +1 @@ +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE new file mode 100644 index 0000000000000000000000000000000000000000..018db0a0f4e215d898a8c6d64e81725c71217eb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE @@ -0,0 +1 @@ +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..921580124211b2aabead5830df14ae5bce10c3cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS @@ -0,0 +1 @@ +CONFIG_TRUSTED_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM new file mode 100644 index 0000000000000000000000000000000000000000..66895051f2137b584ca05364428b200ce30ab3b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM @@ -0,0 +1 @@ +CONFIG_TRUSTED_KEYS_TPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN new file mode 100644 index 0000000000000000000000000000000000000000..ef973c71d610c13e9e596c7d8af4b09179dff878 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN @@ -0,0 +1 @@ +# CONFIG_UBSAN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UCLAMP_TASK b/anolis/configs/L1-RECOMMEND/default/CONFIG_UCLAMP_TASK new file mode 100644 index 0000000000000000000000000000000000000000..aea06191db20f4eee347c722c6a286155fe24954 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UCLAMP_TASK @@ -0,0 +1 @@ +# CONFIG_UCLAMP_TASK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS new file mode 100644 index 0000000000000000000000000000000000000000..4c8e1e829b2fb7b618117fb2f201acfd47f82754 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS @@ -0,0 +1 @@ +CONFIG_UDF_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 new file mode 100644 index 0000000000000000000000000000000000000000..c766b9089ef7f7da2f9d1470e28caaa7d610df2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 @@ -0,0 +1 @@ +CONFIG_UID16=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC new file mode 100644 index 0000000000000000000000000000000000000000..2ee61ddb0a6f7f843b6cfa324ddcbac779a8577e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC @@ -0,0 +1 @@ +CONFIG_UIO_AEC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF new file mode 100644 index 0000000000000000000000000000000000000000..6ed58ca58c930fed06ac63275942822edf9387b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF @@ -0,0 +1 @@ +CONFIG_UIO_CIF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ new file mode 100644 index 0000000000000000000000000000000000000000..2ad111cbe32a509f53b19e9e6f038712909edc81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ @@ -0,0 +1 @@ +# CONFIG_UIO_DMEM_GENIRQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 new file mode 100644 index 0000000000000000000000000000000000000000..4a912c48646807ea0d0359062788b9b92bfb9b59 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 @@ -0,0 +1 @@ +# CONFIG_UIO_MF624 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX new file mode 100644 index 0000000000000000000000000000000000000000..faca8ad872227b88aa83243a9022ccce5495224b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX @@ -0,0 +1 @@ +# CONFIG_UIO_NETX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..46f7ab71c13b62c2c41a7abeb4c0f630c822feb4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC @@ -0,0 +1 @@ +CONFIG_UIO_PCI_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ new file mode 100644 index 0000000000000000000000000000000000000000..393d57f1d8a7c621d5695ca2a96fecbc6d2cfd06 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ @@ -0,0 +1 @@ +CONFIG_UIO_PDRV_GENIRQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS new file mode 100644 index 0000000000000000000000000000000000000000..e1549ea24f0f63d135c60c9fd0faa550761990ba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS @@ -0,0 +1 @@ +# CONFIG_UIO_PRUSS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 new file mode 100644 index 0000000000000000000000000000000000000000..287c23a643c0155c45b9a9209f17db8b52602b92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 @@ -0,0 +1 @@ +CONFIG_UIO_SERCOS3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..51b222763d7862132006f3c29fb9469c5f9aac22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG @@ -0,0 +1 @@ +CONFIG_UNIX_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES b/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES new file mode 100644 index 0000000000000000000000000000000000000000..4822a082f41bc25c6bfedebafeaeaf3dc5c4278e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES @@ -0,0 +1 @@ +CONFIG_UPROBES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..954a18989f3b06f604a17b6c613684720aae02a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS @@ -0,0 +1 @@ +CONFIG_USB_NET_DRIVERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI new file mode 100644 index 0000000000000000000000000000000000000000..26c372a3a8fb18eef2f8be964ea40368ef58f774 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI @@ -0,0 +1 @@ +CONFIG_USB_PCI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB new file mode 100644 index 0000000000000000000000000000000000000000..a7491a18a14234f1454b36386f0183e697495bc1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB @@ -0,0 +1 @@ +# CONFIG_USELIB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS new file mode 100644 index 0000000000000000000000000000000000000000..416bd53ce982874948c3e84a2d12d952bf1c1977 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS @@ -0,0 +1 @@ +CONFIG_USER_NS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA b/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA new file mode 100644 index 0000000000000000000000000000000000000000..1cf31b0878988959e819cce25d9ae3e7b639f973 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA @@ -0,0 +1 @@ +# CONFIG_VDPA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER new file mode 100644 index 0000000000000000000000000000000000000000..72cb59edff824677cc72430129ddb5c8b8a39050 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER @@ -0,0 +1 @@ +CONFIG_VFIO_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP new file mode 100644 index 0000000000000000000000000000000000000000..eaa917e807e370c1481fee03489b99991928acbe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP @@ -0,0 +1 @@ +CONFIG_VFIO_GROUP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU new file mode 100644 index 0000000000000000000000000000000000000000..09ba4d1cd5b6ef8dfae256012e0072fdaac7cc23 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU @@ -0,0 +1 @@ +CONFIG_VFIO_NOIOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE new file mode 100644 index 0000000000000000000000000000000000000000..ebc0606bd5c0a2278579afe270c00a363b774830 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS new file mode 100644 index 0000000000000000000000000000000000000000..e66aea99da8d54ab315a29fbf867c9af2f50d23a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS @@ -0,0 +1 @@ +CONFIG_VGA_ARB_MAX_GPUS=64 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU new file mode 100644 index 0000000000000000000000000000000000000000..00536a2b000fe4668c6fa104efbf5fd348094074 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU @@ -0,0 +1 @@ +CONFIG_VHOST_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI new file mode 100644 index 0000000000000000000000000000000000000000..3f733834e424b3771e86979c5648a9ecc7593349 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI @@ -0,0 +1 @@ +CONFIG_VHOST_SCSI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER new file mode 100644 index 0000000000000000000000000000000000000000..fe206fc5b16747f0ffe4446266534b4a7eb62c6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER @@ -0,0 +1 @@ +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT new file mode 100644 index 0000000000000000000000000000000000000000..87130e4039d60924d9309fcfaabf60df5d693cab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT @@ -0,0 +1 @@ +CONFIG_VIRTIO_INPUT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES new file mode 100644 index 0000000000000000000000000000000000000000..4066b9c11b29d7c07a79c5d8b854a0338361738d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..dc31c9947a653e469f4b758ad65ceb01560064da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..a14419e0fdab9d1d85f7833df64f0fa07c679902 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VIRTIO_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..3ae644bb11783f4ce6e62ab6696773f4d83e6ae2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON @@ -0,0 +1 @@ +CONFIG_VIRTIO_VSOCKETS_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE new file mode 100644 index 0000000000000000000000000000000000000000..ebd7105d198e3f14aba079a158248a5f64ca6b15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE @@ -0,0 +1 @@ +CONFIG_VIRT_FUSE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..89432b02b65ffd37096f73a35647807ea5040f87 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG @@ -0,0 +1 @@ +CONFIG_VSOCKETS_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK new file mode 100644 index 0000000000000000000000000000000000000000..e07891f5bb9f823fec30e4020f680cfd24cf8f56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK @@ -0,0 +1 @@ +CONFIG_VSOCKETS_LOOPBACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON new file mode 100644 index 0000000000000000000000000000000000000000..82594c48836939fdad8a8f5267befecf6a23d497 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON @@ -0,0 +1 @@ +CONFIG_VSOCKMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING b/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING new file mode 100644 index 0000000000000000000000000000000000000000..04f0917d8871831c0855644616118f1d1ed38be5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING @@ -0,0 +1 @@ +CONFIG_VT_HW_CONSOLE_BINDING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN new file mode 100644 index 0000000000000000000000000000000000000000..2aa404d8dabaf92c3fda238bcde7c3443216e904 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN @@ -0,0 +1 @@ +CONFIG_VXLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN new file mode 100644 index 0000000000000000000000000000000000000000..215b93ea87c69975417dc49e0998c5ffb7bfd3be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN @@ -0,0 +1 @@ +CONFIG_WAN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM new file mode 100644 index 0000000000000000000000000000000000000000..5244e5664f1a1ca5c158e4c56c8ad61ccc247594 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM @@ -0,0 +1 @@ +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE new file mode 100644 index 0000000000000000000000000000000000000000..e70e50b705621f5d8b988355a33f522e57631cf9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE @@ -0,0 +1 @@ +CONFIG_WATCHDOG_CORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED new file mode 100644 index 0000000000000000000000000000000000000000..2cdeb93aa9651a38fa89e5a9fb66b59e081768f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED @@ -0,0 +1 @@ +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..0eb79411b060fa652147a547d67943b88623712b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_NOWAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..3f1d15f6e29346e29da4a694acc9adf2949d0f07 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT @@ -0,0 +1 @@ +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV new file mode 100644 index 0000000000000000000000000000000000000000..9002114dfe15b883da294b9ec21e9db6fb343bc1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..7eb5ab9fc39fd355b687c88f964a419478af1073 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS @@ -0,0 +1 @@ +CONFIG_WATCHDOG_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD new file mode 100644 index 0000000000000000000000000000000000000000..f4bb670b4a41ab2a73dc7cce6c9c7379de5db0a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD @@ -0,0 +1 @@ +CONFIG_WIREGUARD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..bcd81132829bd2cdf34b1f27b4616183401e8633 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG @@ -0,0 +1 @@ +# CONFIG_WIREGUARD_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..459eb0f7989bfeafb6c741eb20c3ed49b012f029 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_WQ_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..99fbcd0958fb31c2cdb42716199e7c19e0c6ca15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG @@ -0,0 +1 @@ +CONFIG_XDP_SOCKETS_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..d808c272dad077e18cfda0e23ac9b9ebdf550f15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE @@ -0,0 +1 @@ +CONFIG_XFRM_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE new file mode 100644 index 0000000000000000000000000000000000000000..a0e21902312bf0e223c6ff62973831c3c180b73c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE @@ -0,0 +1 @@ +CONFIG_XFRM_MIGRATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS new file mode 100644 index 0000000000000000000000000000000000000000..27c7ef7a527f731bf05a141a67b3f6c4e9ba962e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS @@ -0,0 +1 @@ +CONFIG_XFRM_STATISTICS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY new file mode 100644 index 0000000000000000000000000000000000000000..75c36f7a8f922b7d259998c8b2e0518ae97fc460 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY @@ -0,0 +1 @@ +CONFIG_XFRM_SUB_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..63b3c69b30081a1c7557b5d563eea34b073fd91c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG @@ -0,0 +1 @@ +# CONFIG_XFS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB new file mode 100644 index 0000000000000000000000000000000000000000..dd4ff1f484a6325d4f9b8a41b4d709fe02fb1072 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB @@ -0,0 +1 @@ +# CONFIG_XFS_ONLINE_SCRUB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..3ec55ecb1320dcd83963357066f4d11029e0d01f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_XFS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA new file mode 100644 index 0000000000000000000000000000000000000000..34757192adde5286bc5fdc61a6016064952a2976 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA @@ -0,0 +1 @@ +CONFIG_XFS_QUOTA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT new file mode 100644 index 0000000000000000000000000000000000000000..c6b8fc0c559f20b814cb109fa7e0ee5d06f07ea1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT @@ -0,0 +1 @@ +# CONFIG_XFS_RT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 new file mode 100644 index 0000000000000000000000000000000000000000..12315e1fff2b9f51fed7cfa6867fdb1eb49334d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 @@ -0,0 +1 @@ +CONFIG_XFS_SUPPORT_V4=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN new file mode 100644 index 0000000000000000000000000000000000000000..abdb2fd861226ce770b22016b07707e6bae3676c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN @@ -0,0 +1 @@ +# CONFIG_XFS_WARN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..584c49c1818d6b0b92a7f9afac57b11aaee20430 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS @@ -0,0 +1 @@ +CONFIG_XOR_BLOCKS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM new file mode 100644 index 0000000000000000000000000000000000000000..52cbc2d1097d94f794665eaed4ab7f460088070a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM @@ -0,0 +1 @@ +CONFIG_XZ_DEC_ARM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB new file mode 100644 index 0000000000000000000000000000000000000000..50b05d1159cda8896dc0800e01637869e23ff58c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB @@ -0,0 +1 @@ +CONFIG_XZ_DEC_ARMTHUMB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ new file mode 100644 index 0000000000000000000000000000000000000000..c7d5e04283b1b54abdac750094f8c36fda0798dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ @@ -0,0 +1 @@ +CONFIG_XZ_DEC_BCJ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 new file mode 100644 index 0000000000000000000000000000000000000000..34a4cb72e9a9a202c8d3faabc3647e96249fbdb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 @@ -0,0 +1 @@ +CONFIG_XZ_DEC_IA64=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA new file mode 100644 index 0000000000000000000000000000000000000000..514ce6c253d7b6694217fa079ef1125edbfca923 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA @@ -0,0 +1 @@ +CONFIG_XZ_DEC_MICROLZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC new file mode 100644 index 0000000000000000000000000000000000000000..118f59ba1638c052713d52e1458df131765350d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC @@ -0,0 +1 @@ +CONFIG_XZ_DEC_POWERPC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC new file mode 100644 index 0000000000000000000000000000000000000000..328ae24e23b6cc8555200f1ee86af19d3ab06d2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC @@ -0,0 +1 @@ +CONFIG_XZ_DEC_SPARC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST new file mode 100644 index 0000000000000000000000000000000000000000..bc04be3e6cfc5187ce8bddac193399fa7ecb7f61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST @@ -0,0 +1 @@ +# CONFIG_XZ_DEC_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 new file mode 100644 index 0000000000000000000000000000000000000000..1be802334c8bf4d40e879de33b4f63511b4fcde6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 @@ -0,0 +1 @@ +CONFIG_XZ_DEC_X86=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH new file mode 100644 index 0000000000000000000000000000000000000000..343284c7c0de1b02f4b8edc32c91ee4abf953064 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_ETH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI new file mode 100644 index 0000000000000000000000000000000000000000..3a3fbc36325a9d9f48e10e158b197dc936d0924e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD new file mode 100644 index 0000000000000000000000000000000000000000..87b4f7fecc0f0566102911e1f4db8c40981b39d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD @@ -0,0 +1 @@ +CONFIG_ZBUD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS new file mode 100644 index 0000000000000000000000000000000000000000..edba7cc04736e1a5edafe4b8b60c6ab177340532 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS @@ -0,0 +1 @@ +# CONFIG_ZERO_CALL_USED_REGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS new file mode 100644 index 0000000000000000000000000000000000000000..ff0b58565dc27fd2902be8d1b80aaaaf764f17f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS @@ -0,0 +1 @@ +CONFIG_ZISOFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL new file mode 100644 index 0000000000000000000000000000000000000000..a4b814c14924071e45c8ddd9283077260879668a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL @@ -0,0 +1 @@ +CONFIG_ZPOOL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP new file mode 100644 index 0000000000000000000000000000000000000000..b611bad3a790d3b06184b5e994612d888ef13f97 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP @@ -0,0 +1 @@ +CONFIG_ZRAM_DEF_COMP="lzo-rle" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..92b2902592cd200cc96d55a776fef13ff324a4e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC new file mode 100644 index 0000000000000000000000000000000000000000..24855f5243ea3f6fd1ed5fe7839aa3fc8516e64c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO new file mode 100644 index 0000000000000000000000000000000000000000..d07adc06e60969ea52c740b92f28553b315fafee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE new file mode 100644 index 0000000000000000000000000000000000000000..2da229a019387dc2e39d2790f7c28b48e65791bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE @@ -0,0 +1 @@ +CONFIG_ZRAM_DEF_COMP_LZORLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..16073be8e0822ca33c98b8fbacb39532f0b78ac5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP new file mode 100644 index 0000000000000000000000000000000000000000..121ae124dbbf8584b60c15092734a3f3639da6dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP @@ -0,0 +1 @@ +# CONFIG_ZRAM_MULTI_COMP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK new file mode 100644 index 0000000000000000000000000000000000000000..b1fcb086bf736df8839c0052fe41a9556c2c89de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK @@ -0,0 +1 @@ +CONFIG_ZRAM_WRITEBACK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..c716bc70c4f28b5d988f261d6aa374aec100de0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC @@ -0,0 +1 @@ +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..8924c75fa1b33d11489d1ad4ea6eebfbbe20a201 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE @@ -0,0 +1 @@ +CONFIG_ZSMALLOC_CHAIN_SIZE=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT new file mode 100644 index 0000000000000000000000000000000000000000..3e024e092627c9ecbbd7bb9377670a6a6feeebe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT @@ -0,0 +1 @@ +CONFIG_ZSMALLOC_STAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..8dd27a340d8188a6d7c770fdcbaa11c7309708d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON @@ -0,0 +1 @@ +CONFIG_ZSTD_COMMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP new file mode 100644 index 0000000000000000000000000000000000000000..64b92172dfd07a78b9b358463a4452a913284bc0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP @@ -0,0 +1 @@ +CONFIG_ZSWAP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..7a61b6fcde048768574fca923faa97fa2e93ce3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT @@ -0,0 +1 @@ +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 new file mode 100644 index 0000000000000000000000000000000000000000..3f2f042dc7b47a497cd03f1e8fede2a4c9a9a53c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE new file mode 100644 index 0000000000000000000000000000000000000000..73b9be5b05999c25e04fa58eb31ae546a60cbf79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..d15eadf94d124d0038732fa20879dd034aea0260 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC new file mode 100644 index 0000000000000000000000000000000000000000..96763a4c4ecfaac7bfdb9b59de01a3e278d10689 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO new file mode 100644 index 0000000000000000000000000000000000000000..c6af1aeb90fa7c2bd33e70cc09fd1197acac248b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO @@ -0,0 +1 @@ +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..adcc3b94723e1d274478b09c02fb79df8a0b12f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..93a95edd6d53536b32422dfad738b9dbc7f9b420 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_ZSWAP_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..1792514c9b45d019f35c301537dcff13044fbe5c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..753d56a0c343202e1dbaa56b017197dfc1a267c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT @@ -0,0 +1 @@ +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD new file mode 100644 index 0000000000000000000000000000000000000000..15a8e22be2a02ca1d089280913a59fe25f4a5097 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD @@ -0,0 +1 @@ +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..cb1392399f584479da1730c6187a030c6bcc7ed6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC @@ -0,0 +1 @@ +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT new file mode 100644 index 0000000000000000000000000000000000000000..13035dd8275b91f77429126211222e45656ba48c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT @@ -0,0 +1 @@ +CONFIG_ACPI_BGRT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK new file mode 100644 index 0000000000000000000000000000000000000000..e4d916353f68c4f9fecc993415c7ae2536909ece --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK @@ -0,0 +1 @@ +CONFIG_ACPI_DOCK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..dfc6b278f4214412513b05823812b75f4175d4ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS @@ -0,0 +1 @@ +CONFIG_ACPI_EC_DEBUGFS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG new file mode 100644 index 0000000000000000000000000000000000000000..f1c760c4566cfa7adb8a75b198df92ba11bf016c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG @@ -0,0 +1 @@ +CONFIG_ACPI_EXTLOG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR new file mode 100644 index 0000000000000000000000000000000000000000..67779d4893c10b3051c2d9198530d8a898591ae8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..021ea2f25134f6fa53263ec38fe26d4ad3816c3e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS new file mode 100644 index 0000000000000000000000000000000000000000..82848bd17fc2dc2b3d60ed251eaf38340757548a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS @@ -0,0 +1 @@ +CONFIG_ACPI_SBS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP new file mode 100644 index 0000000000000000000000000000000000000000..68612464dbe0f095fce96fbcdfdb7b30859a25ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP @@ -0,0 +1 @@ +CONFIG_ACPI_SLEEP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD new file mode 100644 index 0000000000000000000000000000000000000000..9588df9878721a8bebe0c4b8322f390fd5582cfe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD @@ -0,0 +1 @@ +CONFIG_ACPI_TAD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL new file mode 100644 index 0000000000000000000000000000000000000000..e4794b0ebb4fa752b6c3017bbe904c20bcf762ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL @@ -0,0 +1 @@ +CONFIG_ACPI_THERMAL_REL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..d82966e07f87de20339708db2d5bbe5d8276bd0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ACPI_WATCHDOG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT new file mode 100644 index 0000000000000000000000000000000000000000..400222bb07ee9031e29e54a5bfd55cce452adee2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT @@ -0,0 +1 @@ +# CONFIG_ACQUIRE_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT new file mode 100644 index 0000000000000000000000000000000000000000..07d7f5d17cf9af0dd0b676becca2196c8d8f0c47 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT @@ -0,0 +1 @@ +# CONFIG_ADVANTECH_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE new file mode 100644 index 0000000000000000000000000000000000000000..9f091c9f5fa1439b0d671c8c8f5ae85274c55b11 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE @@ -0,0 +1 @@ +CONFIG_AMDTEE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP new file mode 100644 index 0000000000000000000000000000000000000000..bf5761a2d85136449d919efe0703d1197af6be91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP @@ -0,0 +1 @@ +# CONFIG_AMD_HSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..ede0caaa0e3f4b54139d73a5888abba023007484 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU @@ -0,0 +1 @@ +CONFIG_AMD_IOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 new file mode 100644 index 0000000000000000000000000000000000000000..bc103a7b9c35b2e163531cc57d9421e88a71fc45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 @@ -0,0 +1 @@ +CONFIG_AMD_IOMMU_V2=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT new file mode 100644 index 0000000000000000000000000000000000000000..f9eacfabc8062514cb8a90f25493743be09cecdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_AMD_MEM_ENCRYPT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA new file mode 100644 index 0000000000000000000000000000000000000000..227504d2134848465a54873caa204e71f1d3ed3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA @@ -0,0 +1 @@ +CONFIG_AMD_PTDMA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL new file mode 100644 index 0000000000000000000000000000000000000000..87ff0c771e505e0054a614e8868efaffbf896e82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL @@ -0,0 +1 @@ +CONFIG_ARCH_CPUIDLE_HALTPOLL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..9b51745063dcaf5535f64245ddd514807144689c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE @@ -0,0 +1 @@ +# CONFIG_ARCH_MEMORY_PROBE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH new file mode 100644 index 0000000000000000000000000000000000000000..f9a30b59a373373e773d68e809ea132982b83306 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH @@ -0,0 +1 @@ +CONFIG_ATA_OVER_ETH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX new file mode 100644 index 0000000000000000000000000000000000000000..4efb94f64d5c04cc1a602cba76ed699449cecfb9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..2e0554f3cc89f71d651df96ba0a347dfe53ba11c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING @@ -0,0 +1 @@ +CONFIG_CALL_DEPTH_TRACKING=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES new file mode 100644 index 0000000000000000000000000000000000000000..2fb4d4a552cb070a75c054dcde4402c946b8173b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_MBYTES=0 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK new file mode 100644 index 0000000000000000000000000000000000000000..aa9b7f32309da950e7fc6a4cdb65814e23297d0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK @@ -0,0 +1 @@ +CONFIG_CPUMASK_OFFSTACK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..64efeec23f9c75582276477cff1f30e692ba71e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_IBPB_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..9ab5b99170aafe48238b30ff80e083140eda63f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_IBRS_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL new file mode 100644 index 0000000000000000000000000000000000000000..4e01ab97cd89bcf8395889d40b92ab8c3b501861 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL @@ -0,0 +1 @@ +CONFIG_CPU_IDLE_GOV_HALTPOLL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO new file mode 100644 index 0000000000000000000000000000000000000000..1512ec8e976608b9c73ed34447bdac42efb7dcdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO @@ -0,0 +1 @@ +CONFIG_CPU_SRSO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..aa4a0eb5b6a46a8e3701b85a5225f07fc3f69303 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_UNRET_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..a723b9e5b45c9673c69da8b8153b502238834b17 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG @@ -0,0 +1 @@ +CONFIG_CRASH_HOTPLUG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 new file mode 100644 index 0000000000000000000000000000000000000000..7eb1a4c0299a01b4cccb349589d2bd22a70871c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..7f29bd9bcf861825959131adf12f37d6d89f11b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_NI_INTEL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 new file mode 100644 index 0000000000000000000000000000000000000000..a9c552040aa0ec913995d411fda86a141e61765c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLAKE2S_X86=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..52bdc5fdde9ecb02eb744a8c116bfb6117112e86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..67edf8ddeb7348e5cfa058bd8f3c2efe3696b2b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..6b7b873157d9d036537f318e2b5bf951e9c1f2a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..b60b238f480c2cdd6fb6333d723c2863184f6435 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..a8afa374dad334467ee7b3135e400f8c4977d881 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST5_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..5dbd020a95d894cb6783ef17174681c07b2c9280 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST6_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..6131a7b56e90ac9633651324ec36f85c5ac7c517 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..c500b4f55bf2f0166234f18e25a5a26b5e8e9895 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32C_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL new file mode 100644 index 0000000000000000000000000000000000000000..c503f02e39440d7deb680f4a4af55ee511c865a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32_PCLMUL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL new file mode 100644 index 0000000000000000000000000000000000000000..14e5d708b70aacefbe8a0055ce55b502f0eb13b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 new file mode 100644 index 0000000000000000000000000000000000000000..fc4c61ee3fc535fac1ebb8062287434a247b300f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_CURVE25519 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 new file mode 100644 index 0000000000000000000000000000000000000000..19b41bb62262e79311ccea89a6d1ecf4161e38f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CURVE25519_X86=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..fac68c6a1d23ce5c9b2db1033b6afacdf0da4822 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_DES3_EDE_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT new file mode 100644 index 0000000000000000000000000000000000000000..e135e9e50ec761d5737b7be6f75d23d854cc478c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HCT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..3204078bac0bb68d00e5876b5dc31e48c00ca39c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_IAA_CRYPTO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS new file mode 100644 index 0000000000000000000000000000000000000000..34817183f2daa237ebadf0d2422757bda2d57bec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT new file mode 100644 index 0000000000000000000000000000000000000000..06ec9df1867277def554d98204914e60b0bba737 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX new file mode 100644 index 0000000000000000000000000000000000000000..9fcd620a9899f3e667a36e06990418d310eb0029 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C3XXX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF new file mode 100644 index 0000000000000000000000000000000000000000..9e5c620530a06e3dc891938d687b95ac060d7646 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X new file mode 100644 index 0000000000000000000000000000000000000000..2583c47f29eaaadf7e5b288faa58e6a92bd7f86a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C62X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF new file mode 100644 index 0000000000000000000000000000000000000000..589fd67c5ab1034782ca3e6e54d1dccdab7e0a14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C62XVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC new file mode 100644 index 0000000000000000000000000000000000000000..3d37c7af56586b1cf13e1371ada6815e0384e8ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF new file mode 100644 index 0000000000000000000000000000000000000000..8035712989f136aa2e5d916cd26187e469e97a3b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP new file mode 100644 index 0000000000000000000000000000000000000000..7b0c6490a36cadd82000e10121df5c7f55074341 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_SP_PSP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE new file mode 100644 index 0000000000000000000000000000000000000000..110860ed4b4a0910896d01cc612914ae0e4b7d90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_TSSE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..b17515fdcbce4c4d4b02c23941fc0d34b34489e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES new file mode 100644 index 0000000000000000000000000000000000000000..3619496e7f7086166f63776adf36d8a05e0c24e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA new file mode 100644 index 0000000000000000000000000000000000000000..1d4629abb0493891cb279254585f8897878abf2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA new file mode 100644 index 0000000000000000000000000000000000000000..efbe82ad8c8bd20272fb5dfc718d993a9dbe3023 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECDSA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..3c9561aee00bda5d5c90927c22d0f5b539ce6da5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 new file mode 100644 index 0000000000000000000000000000000000000000..c15f094b1d1f38f7ee7cd781c4e6418fca03d864 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 new file mode 100644 index 0000000000000000000000000000000000000000..69f3444b40dddb484836e675cd0653c0fc2eead1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..b444d23171f7eff11afa9d8084ff4cedc59d7c71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..604ebff651db4ae992c5f57b626f49923e092dff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..e3ab517c6dd67ec13170cd0b26e6cc86f13342f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..6706b3873b46f6dba8b3c43c12c2fe068f3796e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 new file mode 100644 index 0000000000000000000000000000000000000000..dc0e1b2a6ac580fdbb3fbad8324c51a7ba5f1cb4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 new file mode 100644 index 0000000000000000000000000000000000000000..4969f8f458d4d712e7450a7d7616493c3f409dc4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 new file mode 100644 index 0000000000000000000000000000000000000000..227c91b5bfd9f0d9b4bf463de42c29781f0f5daf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA512_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI new file mode 100644 index 0000000000000000000000000000000000000000..6242432eb58bdda284ecc05c018ae1258e730b05 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI new file mode 100644 index 0000000000000000000000000000000000000000..98554908a1fe139bbe2bf29d90a1cd986b12c722 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI new file mode 100644 index 0000000000000000000000000000000000000000..327d40661bd433337cc6aac78f9fabbfa61fd135 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..abdc67677392007189f28f92f846ae23feefbad6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..cf51269aa37091f3752b9cc1d83619685a9818bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY new file mode 100644 index 0000000000000000000000000000000000000000..3dbb7102f7372c0fafd0c02bc8ec6de1c66d487c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..df53c0727d75368a47be6850a66269428c395e10 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST @@ -0,0 +1 @@ +CONFIG_CSV_GUEST=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS new file mode 100644 index 0000000000000000000000000000000000000000..fa10c0ea793edafbd07da9822754a50c74ae0a74 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS @@ -0,0 +1 @@ +CONFIG_DEBUG_BOOT_PARAMS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..a1d865d212ae0a73087d99cd387a7fe72db06a31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT new file mode 100644 index 0000000000000000000000000000000000000000..ff170aad12a642d9d4a3ffa9679b950f93d79f50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT @@ -0,0 +1 @@ +CONFIG_DEVPORT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX new file mode 100644 index 0000000000000000000000000000000000000000..e7bd7d00db515605cd91363eafdab6b95ccd04b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX @@ -0,0 +1 @@ +CONFIG_DEV_DAX=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM new file mode 100644 index 0000000000000000000000000000000000000000..e301a496b274dcc7319728951107a2692a093111 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_KMEM=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..603159c2b2ef75704c79cf9dd8769a5b32cf9274 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_PMEM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..002a38454b3b4f92f135491fc39fa7def91f781b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG @@ -0,0 +1 @@ +CONFIG_DLM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DMABUF_HEAPS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DMABUF_HEAPS new file mode 100644 index 0000000000000000000000000000000000000000..06c5f4cf1acc9c2dba846188956afd5efee52bc8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DMABUF_HEAPS @@ -0,0 +1 @@ +# CONFIG_DMABUF_HEAPS is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK new file mode 100644 index 0000000000000000000000000000000000000000..e184e53af3c04706b37384f9fb039f931bd8fc9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_CIK is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR new file mode 100644 index 0000000000000000000000000000000000000000..06dba06e3d32739f1afc6d656f03d3b05653f4b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_USERPTR is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP new file mode 100644 index 0000000000000000000000000000000000000000..6d5ecf147b34e386c721d51b73f4e82ad5675198 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP @@ -0,0 +1 @@ +# CONFIG_DRM_AMD_ACP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV new file mode 100644 index 0000000000000000000000000000000000000000..9f59149e02bc638e91d970b6f79431dccf76dfc9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV @@ -0,0 +1 @@ +# CONFIG_DRM_DP_AUX_CHARDEV is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 new file mode 100644 index 0000000000000000000000000000000000000000..38ba652000c6f169756d6c61941ec3d44e874553 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 @@ -0,0 +1 @@ +CONFIG_DRM_GMA500=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 new file mode 100644 index 0000000000000000000000000000000000000000..1034adf42222a0ff996fe5ef14f303dbbe6e8ce9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 @@ -0,0 +1 @@ +CONFIG_DRM_I915=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR new file mode 100644 index 0000000000000000000000000000000000000000..d85c7203563a9f447d437b7eaf1275e655082bb7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR @@ -0,0 +1 @@ +CONFIG_DRM_I915_CAPTURE_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR new file mode 100644 index 0000000000000000000000000000000000000000..6d6c129f5d27ef84d2eb246d19a0d821dfc7b2db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR @@ -0,0 +1 @@ +CONFIG_DRM_I915_COMPRESS_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..04c3d575ecdbd8d376965d4481e85dcaa1c6e0ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..660b7a1eabdcebf0ac2449fea9028d91fd9341a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE @@ -0,0 +1 @@ +CONFIG_DRM_I915_FORCE_PROBE="" diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT new file mode 100644 index 0000000000000000000000000000000000000000..c6af3c3ccb96749788e7bcc1de475cb135e73101 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT @@ -0,0 +1 @@ +CONFIG_DRM_I915_GVT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT new file mode 100644 index 0000000000000000000000000000000000000000..016a41e8a679017c2a582f6a5a1560ee75cad779 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT @@ -0,0 +1 @@ +CONFIG_DRM_I915_GVT_KVMGT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL new file mode 100644 index 0000000000000000000000000000000000000000..6db87334d09d53072e401c752574a9510d64ec88 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL @@ -0,0 +1 @@ +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT new file mode 100644 index 0000000000000000000000000000000000000000..150c2e288609a61ee8344ad535c1573ea450d264 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT @@ -0,0 +1 @@ +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..455aa7c8427ff0094b30de6458d2bbc2b6deeadb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..f987ddca213d8fcd0186347a12b2554005393c21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_STOP_TIMEOUT=100 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION new file mode 100644 index 0000000000000000000000000000000000000000..67b628a5b97e4e34b5e716056918c52bdcbed94e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION @@ -0,0 +1 @@ +CONFIG_DRM_I915_TIMESLICE_DURATION=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND new file mode 100644 index 0000000000000000000000000000000000000000..90b96974302b3df8454d9109036ac859874b4bb0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND @@ -0,0 +1 @@ +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR new file mode 100644 index 0000000000000000000000000000000000000000..4f253abc1402d2191ef6021b574fc8a63dedcdec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_I915_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX new file mode 100644 index 0000000000000000000000000000000000000000..12fe6b15f13e4b2a50d0dc27aa7337b142cd4c60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX @@ -0,0 +1 @@ +CONFIG_DRM_VMWGFX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU new file mode 100644 index 0000000000000000000000000000000000000000..66e35fcc546e08e194f1931162c07bf37ecd9831 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU @@ -0,0 +1 @@ +# CONFIG_DWC_PCIE_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI new file mode 100644 index 0000000000000000000000000000000000000000..2a2239b2440f9b3be218246e6703dfcef6882eb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI @@ -0,0 +1 @@ +CONFIG_DW_DMAC_PCI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK new file mode 100644 index 0000000000000000000000000000000000000000..1d9f0653942f432189e12a0c8f8bcb9afebb6ffa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK @@ -0,0 +1 @@ +CONFIG_DYNAMIC_PHYSICAL_MASK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME new file mode 100644 index 0000000000000000000000000000000000000000..0f0491574115cc5c43ccf547e9320150d7a35e28 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME @@ -0,0 +1 @@ +CONFIG_DYNAMIC_SIGFRAME=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS new file mode 100644 index 0000000000000000000000000000000000000000..9b00c96b49a93a653b9ba124ebf6572b246d87b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS @@ -0,0 +1 @@ +CONFIG_E1000E_HWTS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK new file mode 100644 index 0000000000000000000000000000000000000000..d8c2487be3e2daee94a91089e9982f3166341cbc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP new file mode 100644 index 0000000000000000000000000000000000000000..b8016d0cb890c10402b1912152cf6c251d9e8138 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_DBGP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC new file mode 100644 index 0000000000000000000000000000000000000000..47e8f409068454ad5c365af9070cdfdbaffa9b26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_USB_XDBC=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 new file mode 100644 index 0000000000000000000000000000000000000000..667e1c0abd323b9582a7c83ad825b1202849c863 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 @@ -0,0 +1 @@ +CONFIG_EDAC_AMD64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE new file mode 100644 index 0000000000000000000000000000000000000000..e408064da251e3fd8636f1253b3187d423bb5203 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE @@ -0,0 +1 @@ +CONFIG_EDAC_DECODE_MCE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X new file mode 100644 index 0000000000000000000000000000000000000000..4340af994e25b4c5fc8e2b7e313e62f4309fe9e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X @@ -0,0 +1 @@ +CONFIG_EDAC_E752X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM new file mode 100644 index 0000000000000000000000000000000000000000..8214abca5a094c8012a81d6edf24a891d7b2094c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM @@ -0,0 +1 @@ +CONFIG_EDAC_I10NM=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 new file mode 100644 index 0000000000000000000000000000000000000000..4f30c3dbb607df397c47b771f6090b483183799e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 @@ -0,0 +1 @@ +CONFIG_EDAC_I3000=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 new file mode 100644 index 0000000000000000000000000000000000000000..eaf5b33007195660691b01d1c6e085ee1fec84ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 @@ -0,0 +1 @@ +CONFIG_EDAC_I3200=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 new file mode 100644 index 0000000000000000000000000000000000000000..255b23ed32dfe0c59327634a405cc63b2dddc033 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 @@ -0,0 +1 @@ +CONFIG_EDAC_I5100=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 new file mode 100644 index 0000000000000000000000000000000000000000..5cd55fc1266034f4f1e5eaebffc3c30e96d0a2c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 @@ -0,0 +1 @@ +CONFIG_EDAC_I5400=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 new file mode 100644 index 0000000000000000000000000000000000000000..d1c3314d74b0a112697059996eee6849b4df7c1e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 @@ -0,0 +1 @@ +CONFIG_EDAC_I7300=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE new file mode 100644 index 0000000000000000000000000000000000000000..b49e129797259316a7e0d102967f862de08a7305 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE @@ -0,0 +1 @@ +CONFIG_EDAC_I7CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X new file mode 100644 index 0000000000000000000000000000000000000000..0fff85c4a34c6a64d9561ea00c05529f608601ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X @@ -0,0 +1 @@ +CONFIG_EDAC_I82975X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..18dc9b53a86af08237e5584910c84de98d6bc386 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE @@ -0,0 +1 @@ +CONFIG_EDAC_SBRIDGE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX new file mode 100644 index 0000000000000000000000000000000000000000..33f417f1e3a93c225a76c00e2bd80f899c87335f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX @@ -0,0 +1 @@ +CONFIG_EDAC_SKX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD new file mode 100644 index 0000000000000000000000000000000000000000..9b8a635dea5b87631c2e778a9b021b5e4f64f9bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD @@ -0,0 +1 @@ +CONFIG_EDD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET new file mode 100644 index 0000000000000000000000000000000000000000..c2da2e0bfdb276f80beec953afa86ecf8c2f5d96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET @@ -0,0 +1 @@ +CONFIG_EFI_COCO_SECRET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED new file mode 100644 index 0000000000000000000000000000000000000000..3eb4a43bf55b19e98f74357d5491dc9bc1729196 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED @@ -0,0 +1 @@ +CONFIG_EFI_MIXED=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE new file mode 100644 index 0000000000000000000000000000000000000000..083461929710e10c0ee135069d1b3f5580821ba0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE @@ -0,0 +1 @@ +CONFIG_EFI_RCI2_TABLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP new file mode 100644 index 0000000000000000000000000000000000000000..3a4462f3949bf8c3860a5bcac93ddd576b57d207 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP @@ -0,0 +1 @@ +CONFIG_EFI_RUNTIME_MAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET new file mode 100644 index 0000000000000000000000000000000000000000..7c4a3fbc212f1e194e201b8134c14869d8b8cfb0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET @@ -0,0 +1 @@ +CONFIG_EFI_SECRET=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV new file mode 100644 index 0000000000000000000000000000000000000000..06c30d0e72cb0d9b6e5fed7b7cac1821392df71f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV @@ -0,0 +1 @@ +CONFIG_FB_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..3ae9be22cdf974218fe55b89647fd8cd51028645 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE @@ -0,0 +1 @@ +# CONFIG_FB_SIMPLE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 new file mode 100644 index 0000000000000000000000000000000000000000..cddd16bb73980f4526af0bf3e8536fe4185b3ebc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 @@ -0,0 +1 @@ +# CONFIG_FB_SSD1307 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA new file mode 100644 index 0000000000000000000000000000000000000000..3ef695ad37e99f2d657a34209b1d5d07fb2fbb9a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA @@ -0,0 +1 @@ +CONFIG_FB_VESA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC new file mode 100644 index 0000000000000000000000000000000000000000..197c5e7045e98a925155bff131591d9d0dcd5dcb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC @@ -0,0 +1 @@ +CONFIG_FCOE_FNIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID new file mode 100644 index 0000000000000000000000000000000000000000..ad5ccc9592e99e7946e6e7ccf6602f762a181262 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID @@ -0,0 +1 @@ +CONFIG_FIRMWARE_EDID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..6d06dab85813ea7c1cb34b8ba91c29752d613f5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP @@ -0,0 +1 @@ +CONFIG_FIRMWARE_MEMMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER new file mode 100644 index 0000000000000000000000000000000000000000..de1cfdb543b9dc76191d14bc1d31b17f45b1ec89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER @@ -0,0 +1 @@ +CONFIG_FUNCTION_PROFILER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL new file mode 100644 index 0000000000000000000000000000000000000000..82bf50128277c0204f445df13ccc68ef6c526a8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL @@ -0,0 +1 @@ +CONFIG_FUSION_CTL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..da865e89877f8352fa341a20415f9d9a1f7069b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ new file mode 100644 index 0000000000000000000000000000000000000000..fc7cc884ba613d9eabc058e8b79bd0f201fe163a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..71aa01bf1211ef30a972aab494dea97656d12c10 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS_ZSTD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..8ce27a4393153f190608de749e3df66383406f4b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER @@ -0,0 +1 @@ +CONFIG_FW_LOADER_USER_HELPER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..c735af30c5df0e435f19645b6d4682c58d8617bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU @@ -0,0 +1 @@ +# CONFIG_GART_IOMMU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..7719b8960eef84492ab784a3670cacc3f3bdf741 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL @@ -0,0 +1 @@ +# CONFIG_GENERIC_ADC_THERMAL is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU new file mode 100644 index 0000000000000000000000000000000000000000..9cd8d3177e2d7a3e9e54969b93d6292a7d07d918 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA new file mode 100644 index 0000000000000000000000000000000000000000..01c1798573ce8f1584770e9bfd0d4b3d99a1a0be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA @@ -0,0 +1 @@ +CONFIG_GENERIC_ISA_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..b534c7167ab51ee8886701d98c4c414e1d5d5af2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ @@ -0,0 +1 @@ +CONFIG_GENERIC_PENDING_IRQ=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS new file mode 100644 index 0000000000000000000000000000000000000000..0ddd7115391abf82e55e1fd7ac9aa00907541b99 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS @@ -0,0 +1 @@ +CONFIG_GFS2_FS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM new file mode 100644 index 0000000000000000000000000000000000000000..424a3046b157e1e0d40161c1e9be3502fac5b4f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM @@ -0,0 +1 @@ +CONFIG_GFS2_FS_LOCKING_DLM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..d45d5bae2a11c6b1faa416f4c527ce3f4ec3ade2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM @@ -0,0 +1 @@ +# CONFIG_GPIO_GENERIC_PLATFORM is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE new file mode 100644 index 0000000000000000000000000000000000000000..2a48c8bdc78ff93c36fee1173282b8107966887b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE @@ -0,0 +1 @@ +CONFIG_HALTPOLL_CPUIDLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..74d6f3e7a5a4ca58a4b47d26fa51d78baa800511 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER @@ -0,0 +1 @@ +CONFIG_HANGCHECK_TIMER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 0000000000000000000000000000000000000000..0c8134ea4cdc8273ef413d79258a83cd51fc9e9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET new file mode 100644 index 0000000000000000000000000000000000000000..9ac1b11db4c12f048dc993d00ff38ea7ba9d5a37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET @@ -0,0 +1 @@ +CONFIG_HPET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP new file mode 100644 index 0000000000000000000000000000000000000000..d6eb1d36e01f944cff8729559f2ca38cb01b23ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP @@ -0,0 +1 @@ +CONFIG_HPET_MMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD new file mode 100644 index 0000000000000000000000000000000000000000..2515e018011e1783f24fdf4487208f47443ea5d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD @@ -0,0 +1 @@ +# CONFIG_HSA_AMD is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..c4938b4fb501dca36db73c8b689b49b41feaefc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..641af94e1b1bcaedca23c460ff2aa648ccb0050d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD new file mode 100644 index 0000000000000000000000000000000000000000..dd7a51f3d484ef6934151d849dc8f9d33c588578 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_AMD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..8929685e6a4405a68554b929f950ad06b4227364 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..61ccb3bff141ad36c70ff0a8a94f4171de0721d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV new file mode 100644 index 0000000000000000000000000000000000000000..631fc6896012213bd5ba421abc5d7cc4a3072944 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV @@ -0,0 +1 @@ +CONFIG_HYGON_CSV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM new file mode 100644 index 0000000000000000000000000000000000000000..0266c4f60b322b427d54f9a19e4ad581901aec8c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM @@ -0,0 +1 @@ +CONFIG_HYGON_GM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD new file mode 100644 index 0000000000000000000000000000000000000000..1c6ee94d90413d11131b822c68bac930da9e4f11 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD @@ -0,0 +1 @@ +CONFIG_HYGON_PSP2CPU_CMD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..2770560d56a0c6710b637166d477621faf1450ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST @@ -0,0 +1 @@ +CONFIG_HYPERVISOR_GUEST=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..b1f01cbe4591c6e0c6d50546751e170e9dfa467a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON @@ -0,0 +1 @@ +CONFIG_HYPERV_BALLOON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..2f259f2f3f42852838d2fc9cec215e8bc473f058 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU @@ -0,0 +1 @@ +CONFIG_HYPERV_IOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS new file mode 100644 index 0000000000000000000000000000000000000000..9b8c0d2a473c7c5f34ffdaac6681ac96c00b4b6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS @@ -0,0 +1 @@ +CONFIG_HYPERV_UTILS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..bd21cd6753b4d56c8ec50aeaca7da728892747de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS @@ -0,0 +1 @@ +CONFIG_HYPERV_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE new file mode 100644 index 0000000000000000000000000000000000000000..364b36792f70eb1ec1fde2670f2e91a2b3752b8a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE @@ -0,0 +1 @@ +# CONFIG_I2C_SLAVE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB new file mode 100644 index 0000000000000000000000000000000000000000..55eb7892030b290739491a73c4fcbd67ea9da2ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB @@ -0,0 +1 @@ +CONFIG_I40E_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K new file mode 100644 index 0000000000000000000000000000000000000000..fe2362aceea1127bf665ee0cf84539253be83cf7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K @@ -0,0 +1 @@ +# CONFIG_I8K is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION new file mode 100644 index 0000000000000000000000000000000000000000..66c73dad00d7c77d27c16f6b96b9ced8ab0091de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION @@ -0,0 +1 @@ +CONFIG_IA32_EMULATION=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA new file mode 100644 index 0000000000000000000000000000000000000000..c2fe0c3f6a504b3a82dc46f86a7096d73676efdf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA @@ -0,0 +1 @@ +CONFIG_IGB_DCA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA new file mode 100644 index 0000000000000000000000000000000000000000..b5f09c38c0b7ab3b9451f704f4baafb2aab048b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_MTHCA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC new file mode 100644 index 0000000000000000000000000000000000000000..d79565e481517f36ba28e891f5391d54272c353f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_OPA_VNIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT new file mode 100644 index 0000000000000000000000000000000000000000..ce4854611a3356fd07c3655a7a7677f226ce6f71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_RDMAVT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..ba7fc04e6983c91e0d5bb62f6bba0820b2cd494f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL @@ -0,0 +1 @@ +# CONFIG_INT3406_THERMAL is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..1df71f851b4fc3ada8c9e4108868a46e88400211 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL @@ -0,0 +1 @@ +CONFIG_INT340X_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..e410d3f983d3a5203f53729332e43e3c5260852b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL @@ -0,0 +1 @@ +CONFIG_INTEL_HFI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 new file mode 100644 index 0000000000000000000000000000000000000000..599b2317ee4f9cce4ff53fea36a483789a2eeb22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 @@ -0,0 +1 @@ +CONFIG_INTEL_IDMA64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD new file mode 100644 index 0000000000000000000000000000000000000000..5ca68a398eb8fbee73f9c401ec3a305445dbef2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS new file mode 100644 index 0000000000000000000000000000000000000000..66f94d13c622e24aca20050aafcb4d072b5d3d2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD_BUS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON new file mode 100644 index 0000000000000000000000000000000000000000..f21c240492b1f2abbece397298f4537faea828d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD_PERFMON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM new file mode 100644 index 0000000000000000000000000000000000000000..930a6b9ca482d5fb7c1e5ac57e9d22db2802ad5c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD_SVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA new file mode 100644 index 0000000000000000000000000000000000000000..916ea17276b1865e2dece522256dd7fa0a5c562b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA @@ -0,0 +1 @@ +CONFIG_INTEL_IOATDMA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..f2574d5938c44b86bded99e3e7f823c972690c92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_PERF_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..e0046c4d9212c9f4eb1357f464225ade413b0148 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI new file mode 100644 index 0000000000000000000000000000000000000000..15212b9f87edee19ed45bf0bd14bd32a4b090743 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI @@ -0,0 +1 @@ +CONFIG_INTEL_MEI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME new file mode 100644 index 0000000000000000000000000000000000000000..1592e963b34f4ec68c08f29116fe3bb4a75da26f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME @@ -0,0 +1 @@ +CONFIG_INTEL_MEI_ME=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT new file mode 100644 index 0000000000000000000000000000000000000000..a1d5fc1bff9e19bcd15a61052dcaf751b5f06676 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT @@ -0,0 +1 @@ +CONFIG_INTEL_MEI_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..626785458c1de09b0270e258a6005b19088bfb92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL @@ -0,0 +1 @@ +CONFIG_INTEL_PCH_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..55f71325a996f0911f8b84e1df4a2e6b83f1b5c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_PMC_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS new file mode 100644 index 0000000000000000000000000000000000000000..166f5cbbe49d2816d31fedfc388ebdae8dfb5eb5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_CLASS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG new file mode 100644 index 0000000000000000000000000000000000000000..4b31113c6a4a7a7e37a2e7dd1af03b0450034d1d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_CRASHLOG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY new file mode 100644 index 0000000000000000000000000000000000000000..25a382862e09679bf1a012fbdbffb743fcb66137 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_TELEMETRY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP new file mode 100644 index 0000000000000000000000000000000000000000..84f4db21f22d770237d0ad82c8c5def93278565f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP @@ -0,0 +1 @@ +CONFIG_INTEL_POWERCLAMP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL new file mode 100644 index 0000000000000000000000000000000000000000..c894934f73dc978c6659823444d16f92b0e21575 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL @@ -0,0 +1 @@ +CONFIG_INTEL_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE new file mode 100644 index 0000000000000000000000000000000000000000..0b06ec1dd119dd33c2926e7a8455324dd348068c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_RAPL_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI new file mode 100644 index 0000000000000000000000000000000000000000..9acd69083d77cc8d9fb6c5e4a900d418b78b26f8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_RAPL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST new file mode 100644 index 0000000000000000000000000000000000000000..3a471adf04eee75871d74d849b36f4b8546806af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST @@ -0,0 +1 @@ +CONFIG_INTEL_RST=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..293d4d2eb3e6370e5f59475fdf165f5ce3cde1b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE @@ -0,0 +1 @@ +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI new file mode 100644 index 0000000000000000000000000000000000000000..27dc766e7858ecc8ae73aaf1d80306c1f3538aa2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_SPEED_SELECT_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH new file mode 100644 index 0000000000000000000000000000000000000000..27419eb298aacee5de4463c39f4807d9b5d2959c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH @@ -0,0 +1 @@ +CONFIG_INTEL_TH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..06eaf9127685297557758b47ba0d69984cf6a6b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_ACPI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..dc2f37af726ae180db3b7550f3d41eb4297c27a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG @@ -0,0 +1 @@ +# CONFIG_INTEL_TH_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH new file mode 100644 index 0000000000000000000000000000000000000000..f3574b9e7ce8fad2af299d43d267a37171b03dc4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH @@ -0,0 +1 @@ +CONFIG_INTEL_TH_GTH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU new file mode 100644 index 0000000000000000000000000000000000000000..e3a95a9b805ff439d6e1d4715fa5c854f11a2430 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU @@ -0,0 +1 @@ +CONFIG_INTEL_TH_MSU=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI new file mode 100644 index 0000000000000000000000000000000000000000..dcb9cb074efff8ea207336021af1a8e331e00dfa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI new file mode 100644 index 0000000000000000000000000000000000000000..1db8f6b3501285b766ca0e172eb5ba5c219eb5fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_PTI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH new file mode 100644 index 0000000000000000000000000000000000000000..68bdb4399f281b85e9dc17580f31b3f35a9d046e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH @@ -0,0 +1 @@ +CONFIG_INTEL_TH_STH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI new file mode 100644 index 0000000000000000000000000000000000000000..0e4c0ec3308ed24eb069769be59b517d57266b58 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 new file mode 100644 index 0000000000000000000000000000000000000000..9516cd8f6999319aa45c6e0a1191b64ccfdcc804 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 @@ -0,0 +1 @@ +CONFIG_INTEL_TURBO_MAX_3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT new file mode 100644 index 0000000000000000000000000000000000000000..f5428a4ebce2b06ebbb7385f16e750d06c7f07b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT @@ -0,0 +1 @@ +CONFIG_INTEL_TXT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL new file mode 100644 index 0000000000000000000000000000000000000000..f8e5172cfb5d668f4b2b1ac6b2dc24afdf13c938 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL @@ -0,0 +1 @@ +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI new file mode 100644 index 0000000000000000000000000000000000000000..786925e1f88395be2616a0b85c1c229b69114b7e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC new file mode 100644 index 0000000000000000000000000000000000000000..e399ee6fe52082ed04e9aa7ac25a21e856778e79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC @@ -0,0 +1 @@ +CONFIG_INTEL_VSEC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT new file mode 100644 index 0000000000000000000000000000000000000000..33c82672246dfad985b1607aa125a2d959c76d4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH new file mode 100644 index 0000000000000000000000000000000000000000..8c9db2c8ff5a2b5740b6f6394ecc31dc67c507af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH @@ -0,0 +1 @@ +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI new file mode 100644 index 0000000000000000000000000000000000000000..27b224a5631eec1737ddb3d79897f34e10cb4d5a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI @@ -0,0 +1 @@ +CONFIG_IOSF_MBI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 new file mode 100644 index 0000000000000000000000000000000000000000..4acbe19706c1b2fa42c63b099309ad73ad01780d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 @@ -0,0 +1 @@ +CONFIG_IO_DELAY_0X80=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL new file mode 100644 index 0000000000000000000000000000000000000000..96b2c947041c9497237058b1e0858031c4a11358 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_HL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP new file mode 100644 index 0000000000000000000000000000000000000000..26ba41376ee8d2fe382929628cbfcd104eac91b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP @@ -0,0 +1 @@ +CONFIG_IP_DCCP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER new file mode 100644 index 0000000000000000000000000000000000000000..aec1b1f271201dfbb6d7fb2654d8dcb6bd3e290f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER @@ -0,0 +1 @@ +# CONFIG_IR_SERIAL_TRANSMITTER is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..ecbacc50aee61829dd8d2bec37a66805cde68526 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER @@ -0,0 +1 @@ +CONFIG_IR_SHARP_DECODER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..e5368826d93ac66de73de96fd68f6cb48a740284 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER @@ -0,0 +1 @@ +CONFIG_IR_XMP_DECODER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API new file mode 100644 index 0000000000000000000000000000000000000000..22d7b84ab12d9bd774c89ecbe6bc96f469fdf751 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API @@ -0,0 +1 @@ +CONFIG_ISA_DMA_API=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT new file mode 100644 index 0000000000000000000000000000000000000000..de0808095c46a3c0b027f7028c223126a80458cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT @@ -0,0 +1 @@ +CONFIG_ISCSI_IBFT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND new file mode 100644 index 0000000000000000000000000000000000000000..4737a1e892f221f2ad608a0e3a2c851cf927e53f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND @@ -0,0 +1 @@ +CONFIG_ISCSI_IBFT_FIND=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..f563e52c7b1bdd2e2b7de478ae12184c28c2de35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT @@ -0,0 +1 @@ +CONFIG_ITCO_VENDOR_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT new file mode 100644 index 0000000000000000000000000000000000000000..c18e387e35e41bfed43efbe5e1fd93173c6da67e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT @@ -0,0 +1 @@ +CONFIG_ITCO_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA new file mode 100644 index 0000000000000000000000000000000000000000..36c6076d317b03df0b78a256395edbc5de363e1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA @@ -0,0 +1 @@ +CONFIG_IXGBE_DCA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN new file mode 100644 index 0000000000000000000000000000000000000000..f9c0456c344ce86093c5baea4e12419139b9109c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN @@ -0,0 +1 @@ +# CONFIG_KCSAN is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 new file mode 100644 index 0000000000000000000000000000000000000000..8beb2ec344875e45c9bbb7144fe7d80f2e9b40b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 @@ -0,0 +1 @@ +# CONFIG_KERNEL_BZIP2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP new file mode 100644 index 0000000000000000000000000000000000000000..e6689c725c9a013aa1f6db87c6732f4193642e86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP @@ -0,0 +1 @@ +# CONFIG_KERNEL_GZIP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..9a61eec25d85fc27130a94b0530e4f01c8f01111 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA new file mode 100644 index 0000000000000000000000000000000000000000..149abd49577793743b0cba3a40877e1cf647f9cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZMA is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO new file mode 100644 index 0000000000000000000000000000000000000000..5810f4138dc0c7ac1724f44949ec8b159abcb778 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZO is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ new file mode 100644 index 0000000000000000000000000000000000000000..4129549c260a3e792ea5c190271326608a5b4d72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ @@ -0,0 +1 @@ +# CONFIG_KERNEL_XZ is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..dfaf8e6a9ae8b7423347ecc181111f9e106bc77c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD @@ -0,0 +1 @@ +CONFIG_KERNEL_ZSTD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG new file mode 100644 index 0000000000000000000000000000000000000000..e740740657e7a9f801ed3babea4a2ba681b207d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP new file mode 100644 index 0000000000000000000000000000000000000000..e87b72ed17ab6e90cadcf7f40c3e88119085b443 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP @@ -0,0 +1 @@ +CONFIG_KEXEC_JUMP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..21d707af1ae3cbeb1beca376fc5cd271b5585fe6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE @@ -0,0 +1 @@ +# CONFIG_KEXEC_SIG_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC new file mode 100644 index 0000000000000000000000000000000000000000..be5cd27d685748201f3aaceba3a31d6a632810a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP new file mode 100644 index 0000000000000000000000000000000000000000..18fdda1977f675f3fe0e714170d0f4276654dd7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP @@ -0,0 +1 @@ +CONFIG_KGDB_LOW_LEVEL_TRAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV new file mode 100644 index 0000000000000000000000000000000000000000..de33426a5a81e35cca3fdd8de9ec2071153e4721 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV @@ -0,0 +1 @@ +CONFIG_KVM_AMD_SEV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID new file mode 100644 index 0000000000000000000000000000000000000000..e09de32dc399e9e21fd81eb871bed869a43954ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID @@ -0,0 +1 @@ +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE new file mode 100644 index 0000000000000000000000000000000000000000..d3697026578d0722c347c566582cccda1609b1f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE @@ -0,0 +1 @@ +# CONFIG_LEGACY_VSYSCALL_NONE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..d57cc3d2d84ab463efbce5bcc8d7f40df91c0361 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_BUF_SHIFT=21 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..22e6d62645c51f126cda9981001bd974a99124d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS @@ -0,0 +1 @@ +CONFIG_MAPPING_DIRTY_HELPERS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP new file mode 100644 index 0000000000000000000000000000000000000000..d0d71de5336d98c16e1b6f737b5d4b30982ce74f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP @@ -0,0 +1 @@ +# CONFIG_MAXSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 new file mode 100644 index 0000000000000000000000000000000000000000..5d6819c2c7628dcaa014f8a66c8b879db2ba495f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 @@ -0,0 +1 @@ +# CONFIG_MCORE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..8bd7b2e548c8bafa80253589259c7332a476938e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM @@ -0,0 +1 @@ +CONFIG_MELLANOX_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY new file mode 100644 index 0000000000000000000000000000000000000000..356f2edd8522a4e30202c1799aa37eb47efb2f53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY @@ -0,0 +1 @@ +CONFIG_MEM_SOFT_DIRTY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE new file mode 100644 index 0000000000000000000000000000000000000000..bbf6abac40e2264b8b450a459e4555d6cb866611 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE @@ -0,0 +1 @@ +CONFIG_MICROCODE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS new file mode 100644 index 0000000000000000000000000000000000000000..01a4bd8d895571d3f4cebfb752236ef33fff5023 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS @@ -0,0 +1 @@ +CONFIG_MITIGATION_RFDS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI new file mode 100644 index 0000000000000000000000000000000000000000..71b428227384df2334f71ea829b3156aa83e2f17 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI @@ -0,0 +1 @@ +CONFIG_MITIGATION_SPECTRE_BHI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB new file mode 100644 index 0000000000000000000000000000000000000000..0b04de7574c6f326675bed79dbd5fd5a65920b9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB @@ -0,0 +1 @@ +# CONFIG_MLX5_CORE_IPOIB is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..44d9d37713a4f157fd64c107efe5dfe97664bdfe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG @@ -0,0 +1 @@ +CONFIG_MLXREG_HOTPLUG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..54e7906ec22b7e891dd7e54f1b85628c1d05e602 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM @@ -0,0 +1 @@ +CONFIG_MLX_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..769222e7e83431a5f392399e628261387c2b633a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL @@ -0,0 +1 @@ +CONFIG_MODIFY_LDT_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD new file mode 100644 index 0000000000000000000000000000000000000000..5a861e745c1f73239151b90d099bb511007c9435 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD @@ -0,0 +1 @@ +CONFIG_MTD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD_SPI_NOR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD_SPI_NOR new file mode 100644 index 0000000000000000000000000000000000000000..7d0f3cae3b913a7a15e1cec0d0ebe7500a316744 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD_SPI_NOR @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER new file mode 100644 index 0000000000000000000000000000000000000000..2a32885dd6de364b3d1f8e0c2da24d116994dd0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..80cb642a332f98862da1bf0f7738534c42e386ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..f400e9c4437e1032949f2579af51ecb355875546 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP new file mode 100644 index 0000000000000000000000000000000000000000..1191c38531ee0bc8a558b9cc2b11587e9e513414 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP @@ -0,0 +1 @@ +CONFIG_NET_TULIP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 new file mode 100644 index 0000000000000000000000000000000000000000..3bad5613f6e3375bde043fd2e3256a2c1d10279d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 @@ -0,0 +1 @@ +CONFIG_NFS_V2=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..b6b78b619e42947fc2790878ef030b48f88d447f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT @@ -0,0 +1 @@ +CONFIG_NR_CPUS_DEFAULT=64 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN new file mode 100644 index 0000000000000000000000000000000000000000..5ba0de00121d0e4f8d9bdaa6d03b18e76cdde549 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN @@ -0,0 +1 @@ +CONFIG_NR_CPUS_RANGE_BEGIN=2 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END new file mode 100644 index 0000000000000000000000000000000000000000..61a5647996842cad081e922dba8dccbfa39f286e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END @@ -0,0 +1 @@ +CONFIG_NR_CPUS_RANGE_END=8192 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..cec88812022a00d8b870e6727902bd635b79cac5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_NUMA_AWARE_SPINLOCKS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU new file mode 100644 index 0000000000000000000000000000000000000000..a444d47bb6da38fbc2b3e6b6d5ad075f7337fc9f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU @@ -0,0 +1 @@ +CONFIG_NUMA_EMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM new file mode 100644 index 0000000000000000000000000000000000000000..a296f91340f707636e86d998d5a37e7679f62dbe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM @@ -0,0 +1 @@ +CONFIG_NVRAM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO new file mode 100644 index 0000000000000000000000000000000000000000..984c1ff6594431c561c15403ee271aa08ebbc1fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO @@ -0,0 +1 @@ +CONFIG_NV_TCO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..14b4c8d8d785313e4bbeddbca6ccff168ce9ad09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_PARAVIRT_SPINLOCKS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG new file mode 100644 index 0000000000000000000000000000000000000000..cd749582cb1615dff6199a36f3c6499ac40f6d81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG @@ -0,0 +1 @@ +CONFIG_PCI_MMCONFIG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..3da062fee8cc6baf1ba264aa6fee18bd817ef08a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM @@ -0,0 +1 @@ +CONFIG_PCSPKR_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS new file mode 100644 index 0000000000000000000000000000000000000000..aed94fdcd2ce300de9cb91ae5074d2542bad7901 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_BRS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER new file mode 100644 index 0000000000000000000000000000000000000000..481bdca2953bd2d76984a1284da90f257d61f144 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_POWER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE new file mode 100644 index 0000000000000000000000000000000000000000..93401465991612e290be3cd918762fcd8217922a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_UNCORE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE new file mode 100644 index 0000000000000000000000000000000000000000..aca4e0481c580bed97d447063464eba3af8c185b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_CSTATE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL new file mode 100644 index 0000000000000000000000000000000000000000..b4c5123f1063adb2d8bae2f8a084e8a203e18230 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE new file mode 100644 index 0000000000000000000000000000000000000000..c96e92f02580be0c288ff799c846fe2fc4cb0dd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_UNCORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN new file mode 100644 index 0000000000000000000000000000000000000000..6a12c860e655840292926d3d23a96cb79ff387f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN @@ -0,0 +1 @@ +CONFIG_PHYSICAL_ALIGN=0x1000000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 new file mode 100644 index 0000000000000000000000000000000000000000..5aa54c945709c38a1237d0cf20692b56ccba153a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 @@ -0,0 +1 @@ +CONFIG_PINCTRL_KX7000=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..82e13600e54687bd9c29bd3f865016a6c9936b88 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_PINCTRL_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC new file mode 100644 index 0000000000000000000000000000000000000000..56d77e177ee95bd5c31d8a862d7d9300d1df7708 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC @@ -0,0 +1 @@ +# CONFIG_PM_TRACE_RTC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP new file mode 100644 index 0000000000000000000000000000000000000000..279fe368fceaacf7327832e36bf20cf6a36e3aff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP @@ -0,0 +1 @@ +CONFIG_POWERCAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL new file mode 100644 index 0000000000000000000000000000000000000000..7460c13275f12e65a296efbb892f066e69f6c9c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL @@ -0,0 +1 @@ +CONFIG_PROC_THERMAL_MMIO_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP new file mode 100644 index 0000000000000000000000000000000000000000..644df34574dd99c175ccb82ba586767cc34d7494 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP @@ -0,0 +1 @@ +CONFIG_PTE_MARKER_UFFD_WP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..6a4f45288b3df580ae74890a19dd98a55025d97e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_QAT_VFIO_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 0000000000000000000000000000000000000000..c08960cff891a5d26d443d72c1fc2736340f2a57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_KSTACK_OFFSET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..d680659c170364c8a31f4791368afd3e3c419620 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING new file mode 100644 index 0000000000000000000000000000000000000000..2063d2ecfdcbf1b7081953fab13720a800f7d988 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK new file mode 100644 index 0000000000000000000000000000000000000000..731e801f5294b607e96d27ab9be36f6134fd66dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK @@ -0,0 +1 @@ +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS new file mode 100644 index 0000000000000000000000000000000000000000..2051e4afebd5cde72a489ea72ec412c3ae26b36c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS @@ -0,0 +1 @@ +CONFIG_RTC_DRV_CMOS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO new file mode 100644 index 0000000000000000000000000000000000000000..893581e346bcc8a44a302fa65b5d98028621a671 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO @@ -0,0 +1 @@ +CONFIG_SCHED_MC_PRIO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER new file mode 100644 index 0000000000000000000000000000000000000000..c567a751378e1eaba6db5bb0312c0099a55683b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_SCHED_OMIT_FRAME_POINTER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD new file mode 100644 index 0000000000000000000000000000000000000000..041b8209b69c2a001087d9219a8471aa810b6637 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD @@ -0,0 +1 @@ +CONFIG_SCSI_UFSHCD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..01cbe8e321afce34e8dd8c2abad2bc9e7779227c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_SDEI_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP new file mode 100644 index 0000000000000000000000000000000000000000..c8c7e99082e4b02832e981f56509b367b02aeb12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 new file mode 100644 index 0000000000000000000000000000000000000000..8e5a28dd35c89cae95a9d9683770379fc2858058 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 @@ -0,0 +1 @@ +CONFIG_SERIO_I8042=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..61bf135dd3f91088279ae1f8d09665de0f13fb38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL @@ -0,0 +1 @@ +CONFIG_SGETMASK_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS new file mode 100644 index 0000000000000000000000000000000000000000..96eccd587468e24e178e533a54708d8fe611ed2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS @@ -0,0 +1 @@ +# CONFIG_SLS is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND new file mode 100644 index 0000000000000000000000000000000000000000..1f2dde914637c25f281fec6e95c4462577e7a701 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND @@ -0,0 +1 @@ +CONFIG_SND=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO new file mode 100644 index 0000000000000000000000000000000000000000..db59115bfab4beb6c78d908949f44d3439329a8d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO @@ -0,0 +1 @@ +CONFIG_SP5100_TCO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..78e16ec974b89222040f7cbd7c268db49cdfd65c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE new file mode 100644 index 0000000000000000000000000000000000000000..de58a1341d27c45536ae39afdc064d383ce55a67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE @@ -0,0 +1 @@ +# CONFIG_SPI_DESIGNWARE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..27d171277c77b74922dad11061fc27625bed3a4b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING new file mode 100644 index 0000000000000000000000000000000000000000..9f033d229aac60da6de3042930fc58889745dfa5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING @@ -0,0 +1 @@ +# CONFIG_STAGING is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..ac1a3526ff7a94d7de1aa691d7747e682a0a4c18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STATIC_CALL_SELFTEST is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY new file mode 100644 index 0000000000000000000000000000000000000000..309ca390e91ee7217af86f4192b669213d31702b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY @@ -0,0 +1 @@ +CONFIG_STM_DUMMY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC new file mode 100644 index 0000000000000000000000000000000000000000..76be4de38a822e59034a89fe1ea027240ec2314e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC @@ -0,0 +1 @@ +CONFIG_STM_PROTO_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T new file mode 100644 index 0000000000000000000000000000000000000000..c0bbf7e78067d7f507b3a2c4fa70d88df019f404 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T @@ -0,0 +1 @@ +CONFIG_STM_PROTO_SYS_T=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..6a4b15b5e598e70d8ac3735cbcd7d972b6b702b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_CONSOLE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..d832097801ca20b4313897f6f0fa43e0c77e25ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_FTRACE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT new file mode 100644 index 0000000000000000000000000000000000000000..0df073d48dcd8e39913e8dc56686752afc1921c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_HEARTBEAT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..042170f3b2a79213ca5889368fa8e83051bb379d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE @@ -0,0 +1 @@ +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON new file mode 100644 index 0000000000000000000000000000000000000000..bf7d85b6af8fdfe9fde9580426855cceb5027b84 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON @@ -0,0 +1 @@ +CONFIG_TCG_HYGON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON new file mode 100644 index 0000000000000000000000000000000000000000..d73a2c668a557e60c372f216d32f1540ee3f5903 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON @@ -0,0 +1 @@ +CONFIG_TCG_INFINEON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC new file mode 100644 index 0000000000000000000000000000000000000000..21b0d68655825fb6b795671b30be70d56eb97794 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC @@ -0,0 +1 @@ +CONFIG_TCG_NSC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_TIS_SPI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_TIS_SPI new file mode 100644 index 0000000000000000000000000000000000000000..3b66237980be8889bd40bd891ff9c13040cf31af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_TIS_SPI @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_SPI is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON new file mode 100644 index 0000000000000000000000000000000000000000..432adad10f93089426a6163ed2d587762462dd7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON @@ -0,0 +1 @@ +CONFIG_TCM_HYGON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG new file mode 100644 index 0000000000000000000000000000000000000000..fb074cdd525714018e3b335e7a0cc8c790025995 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG @@ -0,0 +1 @@ +CONFIG_TCP_CONG_CDG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON new file mode 100644 index 0000000000000000000000000000000000000000..ba303419c7cb03aacfcb14f50a1aafc16ad77613 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON @@ -0,0 +1 @@ +CONFIG_TDM_DEV_HYGON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD new file mode 100644 index 0000000000000000000000000000000000000000..4498c082785f94e7fa7908ce82cfd0fb20ba87eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD @@ -0,0 +1 @@ +CONFIG_TDM_KERNEL_GUARD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER new file mode 100644 index 0000000000000000000000000000000000000000..eb5121298e70b3211a624ebf08c2f277a449cf2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER @@ -0,0 +1 @@ +CONFIG_TDX_GUEST_DRIVER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG new file mode 100644 index 0000000000000000000000000000000000000000..7f6f73a466ab154149ad914cb8d2d335e9781055 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_BANG_BANG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..a7a857579e1acff052e7adc9ca210e635f1c7839 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK @@ -0,0 +1 @@ +CONFIG_THERMAL_NETLINK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS new file mode 100644 index 0000000000000000000000000000000000000000..a0a8924e042b55edecb3aabbcc33ba6c7271f378 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS @@ -0,0 +1 @@ +CONFIG_THERMAL_WRITABLE_TRIPS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC new file mode 100644 index 0000000000000000000000000000000000000000..e20c7d85a490a3e6a2e5c917d65488cf937f698e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ADC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..6091d5635e432e3e6e12623f2f9cc54d028a4411 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER @@ -0,0 +1 @@ +CONFIG_UEVENT_HELPER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..12f0b2b2a4039e34dbaccb2f88200d8ee93753f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC @@ -0,0 +1 @@ +CONFIG_UIO_HV_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..2c1ae834f15f647836dbb574bad83f2fcc36b5d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY @@ -0,0 +1 @@ +CONFIG_UNACCEPTED_MEMORY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST new file mode 100644 index 0000000000000000000000000000000000000000..790186610b101edaf2c84796a0089652b3fd1cd6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST @@ -0,0 +1 @@ +# CONFIG_VBOXGUEST is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV new file mode 100644 index 0000000000000000000000000000000000000000..6657966d948d2f7d726048dff73c0537d00785e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV @@ -0,0 +1 @@ +CONFIG_VFIO_MDEV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD new file mode 100644 index 0000000000000000000000000000000000000000..88a6f32c19be24af3b9b7ff67ccc86a8e179e1dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_IGD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO new file mode 100644 index 0000000000000000000000000000000000000000..1bfcb26d6e76c4a6bc368a1efe300d8a08aad757 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO @@ -0,0 +1 @@ +CONFIG_VGA_SWITCHEROO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB new file mode 100644 index 0000000000000000000000000000000000000000..ec44dcaec92a2ffe3f0151c8467bdb5f5260f779 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..03baf7192cc726171d609cebc63182f2cf6be3db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..7173b9c64eebac7633ee9cca9cc591278f1f79ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS @@ -0,0 +1 @@ +CONFIG_VIRT_DRIVERS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD new file mode 100644 index 0000000000000000000000000000000000000000..7434a552c9e4ca532e1190a78806e3e5763869ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD @@ -0,0 +1 @@ +CONFIG_VMD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..324a0cebc0e36fae40a732a0cfb33c59d896dd66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON @@ -0,0 +1 @@ +CONFIG_VMWARE_BALLOON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI new file mode 100644 index 0000000000000000000000000000000000000000..3568e9188595fa7a5a730c6c578bda7e7914dcf7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI @@ -0,0 +1 @@ +CONFIG_VMWARE_PVSCSI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI new file mode 100644 index 0000000000000000000000000000000000000000..e54667c66b594615e63063c8aadbc6dd7f098b6d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI @@ -0,0 +1 @@ +CONFIG_VMWARE_VMCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS new file mode 100644 index 0000000000000000000000000000000000000000..e49bd5cf214c5278088e4c00d631f9550809f43b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VMWARE_VMCI_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 new file mode 100644 index 0000000000000000000000000000000000000000..a5a8f9fa7eebeb0488f1ceeee1fc642fb3e60410 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 @@ -0,0 +1 @@ +CONFIG_VMXNET3=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT new file mode 100644 index 0000000000000000000000000000000000000000..0aeb123ab6fb32388523b182d6ad8366ab97d8e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT @@ -0,0 +1 @@ +CONFIG_WDAT_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT new file mode 100644 index 0000000000000000000000000000000000000000..471298885d6598271e1978807e6e2f71b1ae2c38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT @@ -0,0 +1 @@ +CONFIG_X86_16BIT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ new file mode 100644 index 0000000000000000000000000000000000000000..95aca65354c3c8351ee3fb61e9da3e28a1562b07 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ @@ -0,0 +1 @@ +CONFIG_X86_ACPI_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY new file mode 100644 index 0000000000000000000000000000000000000000..30b7900268427c3ed975b1516a30748a25ab9303 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY @@ -0,0 +1 @@ +CONFIG_X86_AMD_FREQ_SENSITIVITY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..4da780aa721a9e41d5446ba21021ebcaa17d99d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PLATFORM_DEVICE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE new file mode 100644 index 0000000000000000000000000000000000000000..377cfefb7f94a44801eb9a935456042a61295b33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PSTATE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE new file mode 100644 index 0000000000000000000000000000000000000000..c72fef3fc669915b4d5cadd5d287b268b393e2f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK new file mode 100644 index 0000000000000000000000000000000000000000..9f86fd4f5ee8d4beaf3cfe86c93e1645ca0bb602 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK @@ -0,0 +1 @@ +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION new file mode 100644 index 0000000000000000000000000000000000000000..be693cd811ffe075effe1e9ef7d2f6b0648323fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION @@ -0,0 +1 @@ +CONFIG_X86_CHECK_BIOS_CORRUPTION=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS new file mode 100644 index 0000000000000000000000000000000000000000..7aa847ecbd82f7a21742d104ff078bed5c185320 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS @@ -0,0 +1 @@ +CONFIG_X86_CPA_STATISTICS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID new file mode 100644 index 0000000000000000000000000000000000000000..165a101605bdf70dabb601288766a9e4bf9b7501 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID @@ -0,0 +1 @@ +CONFIG_X86_CPUID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU new file mode 100644 index 0000000000000000000000000000000000000000..26258a1147640e3bdee53d5b45bf96f0227e8bf9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU @@ -0,0 +1 @@ +# CONFIG_X86_DEBUG_FPU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..4452e43ddc1ad7da8028ede33731216803cb5507 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST @@ -0,0 +1 @@ +CONFIG_X86_DECODER_SELFTEST=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES new file mode 100644 index 0000000000000000000000000000000000000000..4cf6e4f59733864a52d9573a86cdad4d2f4bbc03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES @@ -0,0 +1 @@ +CONFIG_X86_DIRECT_GBPAGES=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 new file mode 100644 index 0000000000000000000000000000000000000000..30aedc033a67b4d06b874ab04cdc1068f4e3bebc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 @@ -0,0 +1 @@ +CONFIG_X86_ESPFIX64=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..8000cbb78a009a5da756b8f94e6bd159966867da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM @@ -0,0 +1 @@ +CONFIG_X86_EXTENDED_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH new file mode 100644 index 0000000000000000000000000000000000000000..e509890ff03990dab2d5ca40a0ed6770f946be6d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_X86_GOLDFISH is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS new file mode 100644 index 0000000000000000000000000000000000000000..2d20612f80dbef5b655bd842a1805fc94209546b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS @@ -0,0 +1 @@ +CONFIG_X86_INTEL_LPSS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..9d135af1dee37d16cdcc2b027e082d37f6645f4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS @@ -0,0 +1 @@ +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID new file mode 100644 index 0000000000000000000000000000000000000000..9c338605e6df3fdebb52a5b7f22c360970284017 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_MID is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE new file mode 100644 index 0000000000000000000000000000000000000000..35365aa7095406a2ddb5fdda1be304b24234d392 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE @@ -0,0 +1 @@ +CONFIG_X86_INTEL_PSTATE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..60d980ced4bf078941c86b547d86986340926ca4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO @@ -0,0 +1 @@ +CONFIG_X86_INTEL_TSX_MODE_AUTO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF new file mode 100644 index 0000000000000000000000000000000000000000..7a9d3e959359b179cbca677f971fdd787a19273b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON new file mode 100644 index 0000000000000000000000000000000000000000..b6471915c1501a9efe2ae87b5f47eab47cb5a4c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_TSX_MODE_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT new file mode 100644 index 0000000000000000000000000000000000000000..a2a71465fadc73cb3981ba11bebb2ccdd40a27e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT @@ -0,0 +1 @@ +CONFIG_X86_KERNEL_IBT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..2a4755640b4abbf3fd7269983243f33187bd5243 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY @@ -0,0 +1 @@ +CONFIG_X86_MCELOG_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT new file mode 100644 index 0000000000000000000000000000000000000000..1e3d328432bfbf0196375ef83f7db15984982981 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT @@ -0,0 +1 @@ +CONFIG_X86_MCE_INJECT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE new file mode 100644 index 0000000000000000000000000000000000000000..1e4f55144dbb6d4471528491830d97c01b064bc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE @@ -0,0 +1 @@ +CONFIG_X86_MPPARSE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR new file mode 100644 index 0000000000000000000000000000000000000000..18d5b64ddd08d863537cc71962e1fd87f74a75b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR @@ -0,0 +1 @@ +CONFIG_X86_MSR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP new file mode 100644 index 0000000000000000000000000000000000000000..13b9221418fa7f9bad2a96beef94556bced834ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP @@ -0,0 +1 @@ +# CONFIG_X86_NUMACHIP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD new file mode 100644 index 0000000000000000000000000000000000000000..714e722dd085d93b5430551443fa9da60361850a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD @@ -0,0 +1 @@ +CONFIG_X86_P4_CLOCKMOD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ new file mode 100644 index 0000000000000000000000000000000000000000..10c283d7dc48bf63bea8ea24b7489c9fb2f20a96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ @@ -0,0 +1 @@ +CONFIG_X86_PCC_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..8fb87c3a602882563e20413070e36fb083902a5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL @@ -0,0 +1 @@ +CONFIG_X86_PKG_TEMP_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..75c2da2a426e01eb13bb5a4c1f5f33ad6788f50b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER @@ -0,0 +1 @@ +CONFIG_X86_PM_TIMER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS new file mode 100644 index 0000000000000000000000000000000000000000..d7dc1147e9ab1a76c0230acc106d188fda1ea8c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS @@ -0,0 +1 @@ +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM new file mode 100644 index 0000000000000000000000000000000000000000..3737e6294a8cd8f89bf198388ae706a3604f0a39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM @@ -0,0 +1 @@ +CONFIG_X86_SGX_KVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE new file mode 100644 index 0000000000000000000000000000000000000000..ec1cc6f384553de24717b7f4c793f043ce7ffa35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV new file mode 100644 index 0000000000000000000000000000000000000000..48e2726e3fbe0afc7743f7db1caeb980afcada71 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV @@ -0,0 +1 @@ +CONFIG_X86_UV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP new file mode 100644 index 0000000000000000000000000000000000000000..e209c212bc461526f0fd45ec4a22a383f0b687df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP @@ -0,0 +1 @@ +# CONFIG_X86_VERBOSE_BOOTUP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP new file mode 100644 index 0000000000000000000000000000000000000000..808ee39bfccb1023bff15ac2b31feb6147827bd5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP @@ -0,0 +1 @@ +# CONFIG_X86_VSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN new file mode 100644 index 0000000000000000000000000000000000000000..a4985b44e2d6ab26f13e0816c5597fe4ceeb599c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN @@ -0,0 +1 @@ +CONFIG_XEN=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..eb86ad0f82d8ead279e0d491039931a864de4f31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_NETDEV_FRONTEND=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM new file mode 100644 index 0000000000000000000000000000000000000000..be722d2200ef349c3c98750ce8ae1d699c8dec61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..798d10e8e93e0c05bb284ed69c4e0f0be8ff7706 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT @@ -0,0 +1 @@ +# CONFIG_XFRM_USER_COMPAT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..fb0483ea5dd21b338139089ff3d8a1656093e530 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING @@ -0,0 +1 @@ +CONFIG_ZRAM_MEMORY_TRACKING=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..feaee255ff055435cba438816973b0a38947e067 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG @@ -0,0 +1 @@ +# CONFIG_A64FX_DIAG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA new file mode 100644 index 0000000000000000000000000000000000000000..db573ffb867bc1ef48ffbd71668214de7a24db48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_SEA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT new file mode 100644 index 0000000000000000000000000000000000000000..844ccb4d36d2e30cb87ad5652443f4c3fa5961c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT @@ -0,0 +1 @@ +CONFIG_ACPI_APMT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED new file mode 100644 index 0000000000000000000000000000000000000000..341bde47989e86bda950ff4f8e779c84981b5efd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED @@ -0,0 +1 @@ +CONFIG_ACPI_CCA_REQUIRED=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE new file mode 100644 index 0000000000000000000000000000000000000000..da007c591e66b1658b75148befab92c058f2a574 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI new file mode 100644 index 0000000000000000000000000000000000000000..b65d19be0a03069785049d3b08ea2e8b29ee4d71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI @@ -0,0 +1 @@ +CONFIG_ACPI_GENERIC_GSI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT new file mode 100644 index 0000000000000000000000000000000000000000..c8fd21b4d5e089101d37f0a800b03a65c46ab463 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT @@ -0,0 +1 @@ +CONFIG_ACPI_GTDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT new file mode 100644 index 0000000000000000000000000000000000000000..447dcd823407ec05ff33acbcd23feb66816905ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT @@ -0,0 +1 @@ +CONFIG_ACPI_IORT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG new file mode 100644 index 0000000000000000000000000000000000000000..26b4dba417bb6a559b454717851f6cd5b191d98d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG @@ -0,0 +1 @@ +CONFIG_ACPI_MCFG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM new file mode 100644 index 0000000000000000000000000000000000000000..e93cbd36cedc1b687c9c2d09124388fc11798580 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM @@ -0,0 +1 @@ +CONFIG_ACPI_MPAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT new file mode 100644 index 0000000000000000000000000000000000000000..bfd01f155d3c66e1d0584071f2fb6400562b4dde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT @@ -0,0 +1 @@ +CONFIG_ACPI_PPTT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA new file mode 100644 index 0000000000000000000000000000000000000000..d9279dda2974d392f65d425105b74970bd5e1ef7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA @@ -0,0 +1 @@ +# CONFIG_AHCI_CEVA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL new file mode 100644 index 0000000000000000000000000000000000000000..f454734fa68e581cad02d4edff5bac4646b982f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL @@ -0,0 +1 @@ +# CONFIG_ALTERA_STAPL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC new file mode 100644 index 0000000000000000000000000000000000000000..9e85b9bdae31dfc51d1db6153a55cabc5586bb90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC @@ -0,0 +1 @@ +# CONFIG_AL_FIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X new file mode 100644 index 0000000000000000000000000000000000000000..a13d91b90d19d803cb8c240fd7d72bf7251d00e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X @@ -0,0 +1 @@ +# CONFIG_AMBA_PL08X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH new file mode 100644 index 0000000000000000000000000000000000000000..a8d56c027174560379898da37b55e848600cde7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH @@ -0,0 +1 @@ +# CONFIG_AMD8111_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE new file mode 100644 index 0000000000000000000000000000000000000000..27be1a7ee2c788cafe69f469aff4c8547317d774 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE @@ -0,0 +1 @@ +CONFIG_AMD_XGBE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB new file mode 100644 index 0000000000000000000000000000000000000000..f76ed0831cd6dcdb25c466fc5e395364722e6cee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB @@ -0,0 +1 @@ +# CONFIG_AMD_XGBE_DCB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..300a97b3270fa38afcc648cc0409c899dc9e2c8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION @@ -0,0 +1 @@ +# CONFIG_AMIGA_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 new file mode 100644 index 0000000000000000000000000000000000000000..fa29e9c838d16f8ebc7de21b665964dc408f1ca9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 @@ -0,0 +1 @@ +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS new file mode 100644 index 0000000000000000000000000000000000000000..c40795bfa26f0310affb513e918c1edef7ea86fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS @@ -0,0 +1 @@ +# CONFIG_APDS9802ALS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION new file mode 100644 index 0000000000000000000000000000000000000000..42dea55ccf3bf8efa0dc5176e66e41a3d1f5f85d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION @@ -0,0 +1 @@ +# CONFIG_AQTION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS new file mode 100644 index 0000000000000000000000000000000000000000..760663b505483c94851e8611e3f463f82de92bcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS @@ -0,0 +1 @@ +# CONFIG_ARCH_ACTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE new file mode 100644 index 0000000000000000000000000000000000000000..a347a04fc89924a1edecbb0d69a48e5bea1ac71e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE @@ -0,0 +1 @@ +# CONFIG_ARCH_ALPINE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE new file mode 100644 index 0000000000000000000000000000000000000000..793ee5ebd4f598ad6b712110d7eae7ebca73b9b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE @@ -0,0 +1 @@ +# CONFIG_ARCH_APPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM new file mode 100644 index 0000000000000000000000000000000000000000..3d8a993e6fd729550a783bf25ab5318484812548 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM @@ -0,0 +1 @@ +# CONFIG_ARCH_BCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN new file mode 100644 index 0000000000000000000000000000000000000000..9a647a4a160cd08028b0d62bdf0d2d54c67a26b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN @@ -0,0 +1 @@ +# CONFIG_ARCH_BERLIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS new file mode 100644 index 0000000000000000000000000000000000000000..df9c31a7d8530988a530270c8f83c0e59f309a08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS @@ -0,0 +1 @@ +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE new file mode 100644 index 0000000000000000000000000000000000000000..5f20719ea1d6860a7e497089f238f0588bbb85f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE @@ -0,0 +1 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN new file mode 100644 index 0000000000000000000000000000000000000000..ac0bc6480373262300d3616afae07a6838a62d37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN @@ -0,0 +1 @@ +# CONFIG_ARCH_BITMAIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 0000000000000000000000000000000000000000..cf025a741a9016242572ce190fe97ef941739660 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS new file mode 100644 index 0000000000000000000000000000000000000000..1ee894dd8194da7aecb71cdaff4f00111627be2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS @@ -0,0 +1 @@ +# CONFIG_ARCH_EXYNOS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT new file mode 100644 index 0000000000000000000000000000000000000000..2c6e5013bfe22aa2564e90320499d85b30e42d28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD new file mode 100644 index 0000000000000000000000000000000000000000..84f3bdc5074000ea757e1f6ad0e02bb891fe5a45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_KEEPINITRD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR new file mode 100644 index 0000000000000000000000000000000000000000..701f9cc08991ec0b1e12d6f5141fe8783c5a5421 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_RELR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER new file mode 100644 index 0000000000000000000000000000000000000000..bf8f541016ca004670186e400f5a1911eb72063d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_RESET_CONTROLLER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS new file mode 100644 index 0000000000000000000000000000000000000000..e371dd95954dc4edbc924434b6a1fc98f4928672 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS new file mode 100644 index 0000000000000000000000000000000000000000..907b4c6e5f4e1e1f8caa695fc16066c02ecf75b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU new file mode 100644 index 0000000000000000000000000000000000000000..d3f3f32bdbcae16eec5d895089acc20a9e5ad98b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..131ace1f9533bc9b045c9bfa3f79f8088637555e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS new file mode 100644 index 0000000000000000000000000000000000000000..31ff9e21830a35168830e865a8bfeb9d51ac2ba7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST new file mode 100644 index 0000000000000000000000000000000000000000..869e3ebc6380892fa472dd30858d47b79e4394e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_TICK_BROADCAST=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT new file mode 100644 index 0000000000000000000000000000000000000000..54b8c8c2f4573d0f23e3f7bc245c01556756553b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_ELF_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS new file mode 100644 index 0000000000000000000000000000000000000000..a321411bd03ec9b129d6dd8d2ddf5aae414d7ab4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI new file mode 100644 index 0000000000000000000000000000000000000000..9afa5dcee23331566750f492943507019c0098c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI @@ -0,0 +1 @@ +CONFIG_ARCH_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA new file mode 100644 index 0000000000000000000000000000000000000000..2a600d0dc26bb02e4988ddcf420e1d8bebf00b57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA @@ -0,0 +1 @@ +# CONFIG_ARCH_INTEL_SOCFPGA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 new file mode 100644 index 0000000000000000000000000000000000000000..6929420f8d0f650e3fbe7b8dd907155cc10fe587 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 @@ -0,0 +1 @@ +# CONFIG_ARCH_K3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY new file mode 100644 index 0000000000000000000000000000000000000000..08875182cdd8c54b356361090346f938dc71a7b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY @@ -0,0 +1 @@ +# CONFIG_ARCH_KEEMBAY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK new file mode 100644 index 0000000000000000000000000000000000000000..20aa4070bcf1a56fb35429d8931893d1322cc2ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK @@ -0,0 +1 @@ +CONFIG_ARCH_KEEP_MEMBLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K new file mode 100644 index 0000000000000000000000000000000000000000..0d73af4e2246ca4eda08c2e52928207211a5e6e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K @@ -0,0 +1 @@ +# CONFIG_ARCH_LG1K is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 new file mode 100644 index 0000000000000000000000000000000000000000..2f00524de4cef428d21861f26dc8a3294ad9ef9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 @@ -0,0 +1 @@ +# CONFIG_ARCH_MA35 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK new file mode 100644 index 0000000000000000000000000000000000000000..62b0a1f19a0f12ee1e97dc315c397d12e0f7a80b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK @@ -0,0 +1 @@ +# CONFIG_ARCH_MEDIATEK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON new file mode 100644 index 0000000000000000000000000000000000000000..849319f60afb8e3d42e8f49a8e3a0149be78016b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON @@ -0,0 +1 @@ +# CONFIG_ARCH_MESON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS new file mode 100644 index 0000000000000000000000000000000000000000..5d8b9fd796191b25b19ae712da4e667538ec14de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS=18 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX new file mode 100644 index 0000000000000000000000000000000000000000..9b8aa49b0b7c7e254ded617d0819cb5e3dddb523 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN new file mode 100644 index 0000000000000000000000000000000000000000..d409ee8284f038eb46e80ea66a57d65df51733b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN new file mode 100644 index 0000000000000000000000000000000000000000..d4c9b09089a891189811adf9ceba5669bd68abb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU new file mode 100644 index 0000000000000000000000000000000000000000..d7a6da1f2e2b50bc9474feb19cd1f3e2c0708bf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU @@ -0,0 +1 @@ +# CONFIG_ARCH_MVEBU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM new file mode 100644 index 0000000000000000000000000000000000000000..4117554db7f52d4d2605e828b6ea472d4169ce7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM @@ -0,0 +1 @@ +# CONFIG_ARCH_NPCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP new file mode 100644 index 0000000000000000000000000000000000000000..4e89cddb9cd241422fc2e89e722fa88603d1818a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP @@ -0,0 +1 @@ +# CONFIG_ARCH_NXP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM new file mode 100644 index 0000000000000000000000000000000000000000..e51a38aa67b884e418f498585c7ec556a8f1a31b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM @@ -0,0 +1 @@ +CONFIG_ARCH_QCOM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK new file mode 100644 index 0000000000000000000000000000000000000000..49536f6d57803a6ec2e3530c5fdbf8d46bf8ffae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK @@ -0,0 +1 @@ +# CONFIG_ARCH_REALTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS new file mode 100644 index 0000000000000000000000000000000000000000..0fa3a2ec3d4cab7456dea9d78ed4ad0e4b164bca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS @@ -0,0 +1 @@ +# CONFIG_ARCH_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP new file mode 100644 index 0000000000000000000000000000000000000000..f0df52228bd855042340656464a5955d0f6b39eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP @@ -0,0 +1 @@ +# CONFIG_ARCH_ROCKCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE new file mode 100644 index 0000000000000000000000000000000000000000..83c1e8bc4b6d37e633690a9cc8749e35053363c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE @@ -0,0 +1 @@ +CONFIG_ARCH_SEATTLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 new file mode 100644 index 0000000000000000000000000000000000000000..cb416e9915a027b711443e19f675c7ca26e90f96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 @@ -0,0 +1 @@ +# CONFIG_ARCH_SPARX5 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD new file mode 100644 index 0000000000000000000000000000000000000000..a090576dec9afab9ea2b69141e83105554e05bec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD @@ -0,0 +1 @@ +# CONFIG_ARCH_SPRD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 new file mode 100644 index 0000000000000000000000000000000000000000..cb609deb9c1087633b664060492eeda5073be5e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 @@ -0,0 +1 @@ +# CONFIG_ARCH_STM32 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI new file mode 100644 index 0000000000000000000000000000000000000000..e802c1d4f8275bb8eac9ebc29a77699e1afb8afd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI @@ -0,0 +1 @@ +# CONFIG_ARCH_SUNXI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS new file mode 100644 index 0000000000000000000000000000000000000000..d17b5b55577f02cec12b80f598a3df3692d35af0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 0000000000000000000000000000000000000000..b202bfb6ca3c69586bd061d72f7d5d2a2ebd4511 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK new file mode 100644 index 0000000000000000000000000000000000000000..289fd1ea24df0b34f7b375d08b22f8619fe73eff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER new file mode 100644 index 0000000000000000000000000000000000000000..a73cda5a18e885edfec8eec2dabb131704d0ffa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER @@ -0,0 +1 @@ +# CONFIG_ARCH_SYNQUACER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC new file mode 100644 index 0000000000000000000000000000000000000000..16eca84f92afdfd8a37a5ffc3cf67c1df12f5ca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_132_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC new file mode 100644 index 0000000000000000000000000000000000000000..1cafdb24fa7dc5075c964cc92d1e97826d327152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA_186_SOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC new file mode 100644 index 0000000000000000000000000000000000000000..719c9f05fa669090ab13e29b3bfe967ef2a2f373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_194_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC new file mode 100644 index 0000000000000000000000000000000000000000..76117199a477f0bc5eb9dfcedb8fd3d3704d2a65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_210_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC new file mode 100644 index 0000000000000000000000000000000000000000..4c94a03094e34d41c8a4f774540b9760189b447b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_234_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER new file mode 100644 index 0000000000000000000000000000000000000000..fc1527e5ece976f28ae85a3f7731c99b07ed94ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER @@ -0,0 +1 @@ +CONFIG_ARCH_THUNDER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 new file mode 100644 index 0000000000000000000000000000000000000000..aa0f1f3199dc9efdeb5a805d4bb3a79873304e1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 @@ -0,0 +1 @@ +CONFIG_ARCH_THUNDER2=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER new file mode 100644 index 0000000000000000000000000000000000000000..2c110333793a61cdca6cd7975daffb6459eec34a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER @@ -0,0 +1 @@ +# CONFIG_ARCH_UNIPHIER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X new file mode 100644 index 0000000000000000000000000000000000000000..b762db4030baa087d75f98c07161b156c4f6d1cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X @@ -0,0 +1 @@ +CONFIG_ARCH_USES_PG_ARCH_X=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY new file mode 100644 index 0000000000000000000000000000000000000000..f773a78edbb55fa1550e7eaa42fb6bef99a063c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY @@ -0,0 +1 @@ +CONFIG_ARCH_USE_GNU_PROPERTY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS new file mode 100644 index 0000000000000000000000000000000000000000..aa238e6be0a1dd159b10ce54951c409f56e9b906 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS @@ -0,0 +1 @@ +CONFIG_ARCH_VEXPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI new file mode 100644 index 0000000000000000000000000000000000000000..099b60922ebfbedcf01b9d7433dc6b479a3e767a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI @@ -0,0 +1 @@ +# CONFIG_ARCH_VISCONTI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..b4d0a5cbcd79f04444b3829f2b03ff8b8037dabd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS new file mode 100644 index 0000000000000000000000000000000000000000..614f6979533c088c9b166691d65c83da2fea926a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_FRAME_POINTERS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..c1bd4d11639c66fbdae4f98a39ae4265fc1a9534 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE @@ -0,0 +1 @@ +CONFIG_ARCH_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP new file mode 100644 index 0000000000000000000000000000000000000000..f92a386c63a328d06c0a7ca9dce380c8259a8887 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP @@ -0,0 +1 @@ +# CONFIG_ARCH_ZYNQMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE new file mode 100644 index 0000000000000000000000000000000000000000..5bb9a4675eb4104af396b59f28b8754d85a813be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE @@ -0,0 +1 @@ +CONFIG_ARM64_AS_HAS_MTE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..5216b165ec13c09a7e7badb70ebd64252673b322 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_CONT_PMD_SHIFT=4 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..d5e455df2a2e7c080a9de0777340cb8f29a69aad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_CONT_PTE_SHIFT=4 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 new file mode 100644 index 0000000000000000000000000000000000000000..055a6880cdc4c1056f9608698dd55c3111e81e95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_858921=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 new file mode 100644 index 0000000000000000000000000000000000000000..28e186e92a27c8e61ca0bf4564de4d2e42c43b30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 @@ -0,0 +1 @@ +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS new file mode 100644 index 0000000000000000000000000000000000000000..a54d60033875867715ad9e806f6877b2a9dff985 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_ARM64_LSE_ATOMICS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..71ee2d57a0798066c705bfe711af1c511e47837f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_PAGE_SHIFT=12 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS new file mode 100644 index 0000000000000000000000000000000000000000..8d200cc60779e0b4533c67655e5b21ac422beb50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS @@ -0,0 +1 @@ +CONFIG_ARM64_PA_BITS=48 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 new file mode 100644 index 0000000000000000000000000000000000000000..742d9411da74a5524b468ee6469cc4205f451353 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 @@ -0,0 +1 @@ +CONFIG_ARM64_PA_BITS_48=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST new file mode 100644 index 0000000000000000000000000000000000000000..864fc6a6bb73739d812b73476a4ca73656bb8f50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST @@ -0,0 +1 @@ +# CONFIG_ARM64_RELOC_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS new file mode 100644 index 0000000000000000000000000000000000000000..3a2764df2ed1efea6330569466b7e76b0b9905eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS @@ -0,0 +1 @@ +CONFIG_ARM64_VA_BITS=48 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 new file mode 100644 index 0000000000000000000000000000000000000000..1bafe6e581ed8ab9b4bb63633ec0f62d751bdd94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 @@ -0,0 +1 @@ +CONFIG_ARM64_VA_BITS_48=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..66d54a8595c8acaa0b7e2262957819cb6082762e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI new file mode 100644 index 0000000000000000000000000000000000000000..404622b8cd010595082685a611bdbc5a670ca846 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT new file mode 100644 index 0000000000000000000000000000000000000000..e30fdea517966ff46498cb40793be1bad9d47464 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD new file mode 100644 index 0000000000000000000000000000000000000000..df3d7de5eef4cbd312ccc1d93d24a0dcea468c9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE new file mode 100644 index 0000000000000000000000000000000000000000..5900d7737840b8cd2afba46d9d9cdba8da3f7022 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA new file mode 100644 index 0000000000000000000000000000000000000000..ed20b0276309ce3ccca2ca12d099c4e02a419b3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA @@ -0,0 +1 @@ +CONFIG_ARM_AMBA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..f30148a82030f3d7d2b43ee5e88cd39d67d75825 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM new file mode 100644 index 0000000000000000000000000000000000000000..1073ce86bfc64f1b233635a29713f01ef10f4a9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND new file mode 100644 index 0000000000000000000000000000000000000000..a76e926050fce858cb975cd811c4e32318b22703 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU new file mode 100644 index 0000000000000000000000000000000000000000..1fea9928288b326727006a69d86563e6620f1ee4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_CCI_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT new file mode 100644 index 0000000000000000000000000000000000000000..8914e84510f29ce6c9d5641686bcac23adc84ac1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT @@ -0,0 +1 @@ +# CONFIG_ARM_FFA_TRANSPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR new file mode 100644 index 0000000000000000000000000000000000000000..ed911917bf166d3c0717038c2bb215c43fa287c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR @@ -0,0 +1 @@ +CONFIG_ARM_GIC_MAX_NR=1 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM new file mode 100644 index 0000000000000000000000000000000000000000..8c4e75c2f5b650195b0c3779b3f7323b44dd8cd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM @@ -0,0 +1 @@ +CONFIG_ARM_GIC_PM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU new file mode 100644 index 0000000000000000000000000000000000000000..fd6e8cc15f2c6a504913f2a3e0978cc2bcb797f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU @@ -0,0 +1 @@ +CONFIG_ARM_MHU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER new file mode 100644 index 0000000000000000000000000000000000000000..8d66ce36a339d7897c6b2f2927d6cc9030188c3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER @@ -0,0 +1 @@ +# CONFIG_ARM_PSCI_CHECKER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE new file mode 100644 index 0000000000000000000000000000000000000000..5d304af486e64385ddcb56f951ca85b08b535b9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE @@ -0,0 +1 @@ +# CONFIG_ARM_PSCI_CPUIDLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW new file mode 100644 index 0000000000000000000000000000000000000000..8f3a935754bc84dcd48410f37cd7671dceeab8a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW @@ -0,0 +1 @@ +CONFIG_ARM_PSCI_FW=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW new file mode 100644 index 0000000000000000000000000000000000000000..cc99ba49a528f1314425568c981abaef99fdb255 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW @@ -0,0 +1 @@ +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..01aa8f1525d934652e4451efe03743cdf8777cdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ARM_SBSA_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL new file mode 100644 index 0000000000000000000000000000000000000000..8e99d7695567a1c9639a659b5ad6f19431860cff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL @@ -0,0 +1 @@ +# CONFIG_ARM_SCMI_PROTOCOL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID new file mode 100644 index 0000000000000000000000000000000000000000..c53e28f0b5996cc6ba368a787ece80fd9c9e2ad1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID @@ -0,0 +1 @@ +CONFIG_ARM_SMCCC_SOC_ID=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS new file mode 100644 index 0000000000000000000000000000000000000000..a8cf80f896c3c8687d6740ad7e9b41aa043092c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS @@ -0,0 +1 @@ +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM new file mode 100644 index 0000000000000000000000000000000000000000..dbd9c71b903388890734a2badf0b7d71966e7b80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_QCOM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..898fa4177cd7d7c96442e9a2364798f3db8c4862 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG @@ -0,0 +1 @@ +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA new file mode 100644 index 0000000000000000000000000000000000000000..73b0c35772c4ccffa782b0d30b19fef6d942e8e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3_SVA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ new file mode 100644 index 0000000000000000000000000000000000000000..9e4de8815d4304787a8ae26dabbe7c82c6ca61a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ @@ -0,0 +1 @@ +# CONFIG_ARM_TEGRA186_CPUFREQ is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 new file mode 100644 index 0000000000000000000000000000000000000000..94c9ede0a24425e36ebf880ad766171fe8c02bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 @@ -0,0 +1 @@ +CONFIG_ARM_TIMER_SP804=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 new file mode 100644 index 0000000000000000000000000000000000000000..abbc5ca641a0bbc8e374f11f612bed4180c90da1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_2=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 new file mode 100644 index 0000000000000000000000000000000000000000..33b9b5ba161fefdf84c8526bda9c1cda2f49b123 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_3=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 new file mode 100644 index 0000000000000000000000000000000000000000..7f650f221e5ab37b936d8d0eeab11028bf6d026d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_4=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 new file mode 100644 index 0000000000000000000000000000000000000000..70144b1c117e559f83f37d99414ee141c3b007a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_5=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE new file mode 100644 index 0000000000000000000000000000000000000000..12b3f6ebe3de1f406b79cf893640c23468a05b9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE @@ -0,0 +1 @@ +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR new file mode 100644 index 0000000000000000000000000000000000000000..b5a93d0b7c3ec0d9e18a083825adfbe4a3443d65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR @@ -0,0 +1 @@ +CONFIG_AS_HAS_LDAPR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS new file mode 100644 index 0000000000000000000000000000000000000000..dbd7bfb4577062b58ddc23532acf7e10c2781645 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_AS_HAS_LSE_ATOMICS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 new file mode 100644 index 0000000000000000000000000000000000000000..4c8d5b3d9ae238480aa4749b1594167dd092ac46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 @@ -0,0 +1 @@ +CONFIG_AS_HAS_SHA3=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY new file mode 100644 index 0000000000000000000000000000000000000000..93e86302bb00fef887d97b446488e5f2f860148a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY @@ -0,0 +1 @@ +CONFIG_AT803X_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 new file mode 100644 index 0000000000000000000000000000000000000000..f5b426720e085a1f0a288bde01ffb1b7da18ac83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 @@ -0,0 +1 @@ +# CONFIG_ATL2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..ec86bf5ac29ac45063e6b073203a2a873ac31707 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..60219cf94e578364a262a889009f2130d9c09478 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_COMPAT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..b20e404a08aa5a044471759d46fd535129319ad3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..e4776be5a75682b7a15a55f921e9f336f3a7b7b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED new file mode 100644 index 0000000000000000000000000000000000000000..70fef3d5d54983d67b91165e0a572cb0a1ddb108 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LED is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM new file mode 100644 index 0000000000000000000000000000000000000000..44737e2148ffb1bc8e8d1589c6e183203ed69255 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_PWM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION new file mode 100644 index 0000000000000000000000000000000000000000..3f41639fd68f1fe7df49e16d61eb48d526a6b904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION @@ -0,0 +1 @@ +CONFIG_BCACHE_ASYNC_REGISTRATION=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID new file mode 100644 index 0000000000000000000000000000000000000000..ea5dba56e282b67a43cc124f847b96bbe748adab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID @@ -0,0 +1 @@ +# CONFIG_BCM_SBA_RAID is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY new file mode 100644 index 0000000000000000000000000000000000000000..59e1ad44df4ef43dd8d81af9195c7e8ace0be962 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_IOLATENCY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..04cb6f83afed9f545d90cc4572903dcdec0530cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PMEM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB new file mode 100644 index 0000000000000000000000000000000000000000..36e31edd24fcf16d642df1bfc32fd2e72162269d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB @@ -0,0 +1 @@ +# CONFIG_BRCMSTB_GISB_ARB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT new file mode 100644 index 0000000000000000000000000000000000000000..ce6ddb435863314ca2884b81708f572d4bbc5f79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT @@ -0,0 +1 @@ +# CONFIG_BT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC new file mode 100644 index 0000000000000000000000000000000000000000..2a6b95e058c11e5afc11b4eacc8da24b37f712b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC @@ -0,0 +1 @@ +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT new file mode 100644 index 0000000000000000000000000000000000000000..18380568726d0906fa42d13bfc63f5efe98097a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT @@ -0,0 +1 @@ +CONFIG_CAVIUM_CPT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET new file mode 100644 index 0000000000000000000000000000000000000000..df222f914e4e84ca2108e13b8acc861a79cb7863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET @@ -0,0 +1 @@ +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI new file mode 100644 index 0000000000000000000000000000000000000000..4270ef8ea7823bab2d7d26ddecbff2a255cdb079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI @@ -0,0 +1 @@ +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS new file mode 100644 index 0000000000000000000000000000000000000000..4a4787f56440f3eca6b324512bf6c92fd7260276 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS @@ -0,0 +1 @@ +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK new file mode 100644 index 0000000000000000000000000000000000000000..a8b721605f61630e4eef406d84e8552a517eff84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK @@ -0,0 +1 @@ +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG new file mode 100644 index 0000000000000000000000000000000000000000..39c7dc935e4371a0be7288204e801ef121cd3bc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG @@ -0,0 +1 @@ +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS new file mode 100644 index 0000000000000000000000000000000000000000..78899124383294a5127c6c2524d8a2a97a07216d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS @@ -0,0 +1 @@ +# CONFIG_CDX_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 new file mode 100644 index 0000000000000000000000000000000000000000..676283f06119b1eb6c82bd2d8ee93fab1ff528ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24190 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 new file mode 100644 index 0000000000000000000000000000000000000000..434d9466ffc3b29dd985c1e4e0765342feb1cab1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 @@ -0,0 +1 @@ +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER new file mode 100644 index 0000000000000000000000000000000000000000..51fe252eb85e377dd2891c31f0eefc0fe7f83198 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER @@ -0,0 +1 @@ +# CONFIG_CHARGER_MANAGER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 new file mode 100644 index 0000000000000000000000000000000000000000..46b07d722b213863fb868e7a6a623d8049177979 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 new file mode 100644 index 0000000000000000000000000000000000000000..cd3acdd16199b90df8929f61b25510bf78edec6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9471 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 new file mode 100644 index 0000000000000000000000000000000000000000..b388a2f6e9cdc2e12009800f18d376cdf3249628 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 @@ -0,0 +1 @@ +CONFIG_CHARGER_SMB347=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 new file mode 100644 index 0000000000000000000000000000000000000000..22ed3b9c32adc0fd077e2a5d18f331eb908ffddd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 @@ -0,0 +1 @@ +# CONFIG_CHARGER_UCS1002 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..b8b2ba79c7e4ed7781db349a548a8ba2282c82d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN new file mode 100644 index 0000000000000000000000000000000000000000..2e8ede7801207ca75e3bfc8f4bdf621569e8b396 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC new file mode 100644 index 0000000000000000000000000000000000000000..9252aafa4d955eff64531665e349847feea00ddb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_TBMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS new file mode 100644 index 0000000000000000000000000000000000000000..c9336a3aaa0add841899dc7c36e026fdde19bcba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS @@ -0,0 +1 @@ +CONFIG_CHROME_PLATFORMS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..9f20297c5420353279874b13b9bcfa67cfbd1e47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO @@ -0,0 +1 @@ +CONFIG_CLKSRC_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST new file mode 100644 index 0000000000000000000000000000000000000000..0816de9818159228478a029c0756258d8002ff1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST @@ -0,0 +1 @@ +# CONFIG_CLK_ICST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 new file mode 100644 index 0000000000000000000000000000000000000000..ff341b3ab47cd15e92cedc39116e702bdc37a3f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 @@ -0,0 +1 @@ +CONFIG_CLK_SP810=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP new file mode 100644 index 0000000000000000000000000000000000000000..f6c6f3d59eff52224b02546a707e2c8725866cc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_CLK_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC new file mode 100644 index 0000000000000000000000000000000000000000..867fff294d10e26e9e029e2fea17835156d4e8c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC @@ -0,0 +1 @@ +CONFIG_CLK_VEXPRESS_OSC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS new file mode 100644 index 0000000000000000000000000000000000000000..ef87d0e3bb7444a88e1fc15b47551160a7fbee14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS @@ -0,0 +1 @@ +CONFIG_CLONE_BACKWARDS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN new file mode 100644 index 0000000000000000000000000000000000000000..3db5c0c087ab6251b8b3dd560f84141a230cd254 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 new file mode 100644 index 0000000000000000000000000000000000000000..ea85e587618fbd67e5c7e48687efbc9155921ee7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CDCE925 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..85e803318d762c872b1317461b1af76bb231fe1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_FIXED_MMIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 new file mode 100644 index 0000000000000000000000000000000000000000..efc11711dfa46d2514a720e845822976f63beb58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3516CV300=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 new file mode 100644 index 0000000000000000000000000000000000000000..7e847bb7ad2c86502024457c5e23d3017930860a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3519=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A new file mode 100644 index 0000000000000000000000000000000000000000..d2a7dff8958a6fc483dc62c851255425c5c57eb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3559A=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 new file mode 100644 index 0000000000000000000000000000000000000000..44693544d35b1431efdca7e84bf71ccfbc7dbe36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3660=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 new file mode 100644 index 0000000000000000000000000000000000000000..a2d57c82d98b705a88977ecdb8acbe97f49597d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3670=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 new file mode 100644 index 0000000000000000000000000000000000000000..b5e428f1536c2172501939519ca55edbcbaf8b80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3798CV200=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 new file mode 100644 index 0000000000000000000000000000000000000000..7704eee260abd57af83c39a2fd110d3b5f7fc553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_HI6220 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM new file mode 100644 index 0000000000000000000000000000000000000000..e2adf60aa609e95fdbc48644606d4baebeab2ba3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..aaf8755ddc810887ca7ba2ecfacd899664e3e15c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_RS9_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI new file mode 100644 index 0000000000000000000000000000000000000000..b6694a09cbf05264e22422081d72c9bc45878b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_SCPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 new file mode 100644 index 0000000000000000000000000000000000000000..04b8fe1dcfedcf99cc22c04cd7d611344498038e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI514 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX new file mode 100644 index 0000000000000000000000000000000000000000..0d95f4d4830031a37cac1e4a485fb41bf331086d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI521XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 new file mode 100644 index 0000000000000000000000000000000000000000..aa746413aeeb60c0482537738f74837516300a7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI570 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 new file mode 100644 index 0000000000000000000000000000000000000000..fbf1e1b65afec6a268640d5b3edb7862778e2f9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 new file mode 100644 index 0000000000000000000000000000000000000000..9aaf6ae19368b1919ed2059a054ec1e187755a4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC5 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 new file mode 100644 index 0000000000000000000000000000000000000000..7d244219685d1956ee0bdbe4e157037f28fba123 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC7 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..44d0a9d9bdf8cf252fda7a2074ccdba2fb8f2ccf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD new file mode 100644 index 0000000000000000000000000000000000000000..de088b678b1c2f205f9569ca9537ddf08b18aacc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 new file mode 100644 index 0000000000000000000000000000000000000000..0d13136e0265f948ec72ee734ab91f57fbba795d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 @@ -0,0 +1 @@ +# CONFIG_COMMON_RESET_HI3660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 new file mode 100644 index 0000000000000000000000000000000000000000..a603d7319ea240565d42491f5c2598228d31c434 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 @@ -0,0 +1 @@ +CONFIG_COMMON_RESET_HI6220=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS new file mode 100644 index 0000000000000000000000000000000000000000..8107deaf0449bc290c5d38d8cc43972d2388e407 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS @@ -0,0 +1 @@ +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY new file mode 100644 index 0000000000000000000000000000000000000000..349cd167642410b22429af5489cf484349280d76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_DUMMY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA new file mode 100644 index 0000000000000000000000000000000000000000..4d36d06d8e9529de8398a87bba028d1ac9207067 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TPDA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM new file mode 100644 index 0000000000000000000000000000000000000000..92de2396b7a602bbc8b21bb1519fb765e74124ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TPDM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE new file mode 100644 index 0000000000000000000000000000000000000000..4f6158a7e82614a8d6c562a6fdfe56f12fbc7163 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TRBE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT new file mode 100644 index 0000000000000000000000000000000000000000..5a49a05308e83e8566cac94a6fb96f316972e318 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT @@ -0,0 +1 @@ +# CONFIG_CPUFREQ_DT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV new file mode 100644 index 0000000000000000000000000000000000000000..62fd7906ad408cba15f1efb7597472f8484dc869 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV @@ -0,0 +1 @@ +# CONFIG_CPUFREQ_DT_PLATDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM new file mode 100644 index 0000000000000000000000000000000000000000..aa877af41c758896d096accc697dd83182b0a031 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM @@ -0,0 +1 @@ +CONFIG_CPU_PM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 new file mode 100644 index 0000000000000000000000000000000000000000..5bc3a058626aa1ec3a21121792dcc805432ddbbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 @@ -0,0 +1 @@ +CONFIG_CRC64=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT new file mode 100644 index 0000000000000000000000000000000000000000..223c14ad37fe8e82222147236f58653466d63f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRC64_ROCKSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC new file mode 100644 index 0000000000000000000000000000000000000000..bd6dd449ba8e86809064ef0eb027ee229e229c38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC @@ -0,0 +1 @@ +# CONFIG_CROS_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C new file mode 100644 index 0000000000000000000000000000000000000000..19dd510cacb7d1a499682fe3041a129b28f58605 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C @@ -0,0 +1 @@ +# CONFIG_CROS_HPS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..95f043d6889e75050027e8d15317880482024101 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT @@ -0,0 +1 @@ +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT new file mode 100644 index 0000000000000000000000000000000000000000..1f6b24e092c6ff124c9f6178acdd7e980f815534 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC64_ROCKSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP new file mode 100644 index 0000000000000000000000000000000000000000..d5226e15704988e184cb10eb59203797c4030a19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE new file mode 100644 index 0000000000000000000000000000000000000000..fe4fcee59846fd373995a5740137657f5dbed1cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_CCREE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT new file mode 100644 index 0000000000000000000000000000000000000000..364ba089fbf57fcda56b619e09da5a845fd02d71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CPT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE new file mode 100644 index 0000000000000000000000000000000000000000..5c97a6853454835c328e1e1bd66aa988e26ccf8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_HPRE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM new file mode 100644 index 0000000000000000000000000000000000000000..6c75485a207ac276f199c74c3afac3bf1af2d73e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_QM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC new file mode 100644 index 0000000000000000000000000000000000000000..517b45df771e94a8f06247d28163f0cd2f7015c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_SEC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 new file mode 100644 index 0000000000000000000000000000000000000000..59b3e238e20116565cfc43bb0c9f0d0ea664c689 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_SEC2=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG new file mode 100644 index 0000000000000000000000000000000000000000..f203ee55d4b6ebd22eea997457b29bbf6d212c4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_TRNG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP new file mode 100644 index 0000000000000000000000000000000000000000..53d7f44de0a63dd07e069edf021ff0b87f2a752b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_ZIP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT new file mode 100644 index 0000000000000000000000000000000000000000..19e3dc42c46c5862d472d116ccc1faba558cd23e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX new file mode 100644 index 0000000000000000000000000000000000000000..54fd0faec1c8ae226cf3d0d9c951cc57fcd04c47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE new file mode 100644 index 0000000000000000000000000000000000000000..206220e544d440981bf1d0d3cd6c4881f14c4d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG new file mode 100644 index 0000000000000000000000000000000000000000..cde300803e4c7d285f8096593945dde94db7af1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE new file mode 100644 index 0000000000000000000000000000000000000000..7eab5761438f4b5506c2bff6d9248bda61534c11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON new file mode 100644 index 0000000000000000000000000000000000000000..4dc5989cc3ac749245c5e718ef364e17352f439c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..611ebb23a0696da0669e80c211217062171df1b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 new file mode 100644 index 0000000000000000000000000000000000000000..dfbbf4fa5c2208ec0f0048a713a2bc6cf1ccce19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA3_ARM64 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 new file mode 100644 index 0000000000000000000000000000000000000000..ed3f38d42132f714dd247ee286e7a15a669ebb0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA512_ARM64 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE new file mode 100644 index 0000000000000000000000000000000000000000..26a1ababe45c06fec87b475eb09855aadbf2d85c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI new file mode 100644 index 0000000000000000000000000000000000000000..68b7b8a1e5a767903de32c6f6ade04fdcebce8f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI @@ -0,0 +1 @@ +# CONFIG_DEBUG_EFI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA new file mode 100644 index 0000000000000000000000000000000000000000..ce7b3dc64bc3cfe21ef48cff0833237498769c01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA @@ -0,0 +1 @@ +# CONFIG_DMABUF_HEAPS_CMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..f8a01763fe23d75b6e2c2bc2d4f271a65cfa6b12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC @@ -0,0 +1 @@ +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT new file mode 100644 index 0000000000000000000000000000000000000000..77abed5ca11ccf28ac7dc1fb7ee56adc1a1987df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT @@ -0,0 +1 @@ +CONFIG_DMA_DECLARE_COHERENT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP new file mode 100644 index 0000000000000000000000000000000000000000..d918392b946440dfcac2c3d03648362980bacdd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP @@ -0,0 +1 @@ +CONFIG_DMA_DIRECT_REMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP new file mode 100644 index 0000000000000000000000000000000000000000..b0dd4928f15716b7de8c0b4c37a343bc5629fdf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP @@ -0,0 +1 @@ +CONFIG_DMA_NONCOHERENT_MMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF new file mode 100644 index 0000000000000000000000000000000000000000..ffc03612089fd36908af7fabd4f237771c0c2146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF @@ -0,0 +1 @@ +CONFIG_DMA_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL new file mode 100644 index 0000000000000000000000000000000000000000..2f680768ebef136e6f821c00e8f2c8278fdf6692 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL @@ -0,0 +1 @@ +# CONFIG_DMA_RESTRICTED_POOL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 new file mode 100644 index 0000000000000000000000000000000000000000..4597ef1bb5adda37ef7969f258ec12d3034d9eb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX6345 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 new file mode 100644 index 0000000000000000000000000000000000000000..8399c28a8c180314cef1d744ea542fd2457b5a5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX7625 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU new file mode 100644 index 0000000000000000000000000000000000000000..d7d5d576abf628ca534c91a6794a865442cf2985 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU @@ -0,0 +1 @@ +# CONFIG_DRM_ARCPGU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI new file mode 100644 index 0000000000000000000000000000000000000000..c20551264c473908369dc75f282e3e376784e035 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI @@ -0,0 +1 @@ +# CONFIG_DRM_CDNS_DSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 new file mode 100644 index 0000000000000000000000000000000000000000..2f08c639144cb821fcb592e2ac71a70cdb9c9e19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 @@ -0,0 +1 @@ +# CONFIG_DRM_CDNS_MHDP8546 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 new file mode 100644 index 0000000000000000000000000000000000000000..1c971f5c7a81e9a2ba7ef56f0de15d96bd945289 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 @@ -0,0 +1 @@ +# CONFIG_DRM_CHIPONE_ICN6211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 new file mode 100644 index 0000000000000000000000000000000000000000..5cfc88342d4f92a7efb28b6369f1f419dc531a8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 @@ -0,0 +1 @@ +# CONFIG_DRM_CHRONTEL_CH7033 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR new file mode 100644 index 0000000000000000000000000000000000000000..e39ee39f440385feb29c83fbec3466edc38c48cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR @@ -0,0 +1 @@ +# CONFIG_DRM_DISPLAY_CONNECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC new file mode 100644 index 0000000000000000000000000000000000000000..5f9b385554c1e1a4f674f250fef1065eaa202904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC @@ -0,0 +1 @@ +CONFIG_DRM_DP_CEC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD new file mode 100644 index 0000000000000000000000000000000000000000..36ae461a61cc12dd41ba39879dca3e6bcccb249e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD @@ -0,0 +1 @@ +# CONFIG_DRM_HDLCD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC new file mode 100644 index 0000000000000000000000000000000000000000..3138ee3c9ad752a8f6fee3ee307b3c63447ecc7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC @@ -0,0 +1 @@ +CONFIG_DRM_HISI_HIBMC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN new file mode 100644 index 0000000000000000000000000000000000000000..23c7279b82b571e48e066b63728de8d3a287c7ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN @@ -0,0 +1 @@ +# CONFIG_DRM_HISI_KIRIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 new file mode 100644 index 0000000000000000000000000000000000000000..18208c93f5003bde27f0e1260566b8663124cda9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_ADV7511 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X new file mode 100644 index 0000000000000000000000000000000000000000..a816d583e51051d8bebabfa8621fd0c4f822b98b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X @@ -0,0 +1 @@ +CONFIG_DRM_I2C_NXP_TDA998X=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 new file mode 100644 index 0000000000000000000000000000000000000000..44078d9b63cb295fb0c392079d2b046afdae1206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_SIL164 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 new file mode 100644 index 0000000000000000000000000000000000000000..9575170f0c4f7c1232364a1e14c32aa897e70038 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 @@ -0,0 +1 @@ +# CONFIG_DRM_ITE_IT6505 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 new file mode 100644 index 0000000000000000000000000000000000000000..4e7581a9507cb5bd153b8545467912a6c57ace87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 @@ -0,0 +1 @@ +# CONFIG_DRM_ITE_IT66121 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA new file mode 100644 index 0000000000000000000000000000000000000000..14369562c667d5ced99588d2a61782b05d09b417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA @@ -0,0 +1 @@ +# CONFIG_DRM_KOMEDA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA new file mode 100644 index 0000000000000000000000000000000000000000..eb1331ceaeeeba44f71f2f315c39fb25814430c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA @@ -0,0 +1 @@ +# CONFIG_DRM_LIMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC new file mode 100644 index 0000000000000000000000000000000000000000..30b6af31130e092720f5cc5f08a962574fd97d98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC @@ -0,0 +1 @@ +# CONFIG_DRM_LOGICVC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B new file mode 100644 index 0000000000000000000000000000000000000000..3ea4cd1709251ae89cda9eec6250a46ee1a0742e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT8912B is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 new file mode 100644 index 0000000000000000000000000000000000000000..8ab179f6394ec831d34d2b27dcfddf0eb62ebb4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 new file mode 100644 index 0000000000000000000000000000000000000000..0ee3b259ff15af3945e702d8c3cd98857c3d12bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9611 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC new file mode 100644 index 0000000000000000000000000000000000000000..d9827b457ccc48ebb395c585952d39cdb66c846a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9611UXC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC new file mode 100644 index 0000000000000000000000000000000000000000..9b5f29038f3939a626c6d1b6f58935e43878e9e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC @@ -0,0 +1 @@ +# CONFIG_DRM_LVDS_CODEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY new file mode 100644 index 0000000000000000000000000000000000000000..af3d09fbe2eb018050472c656b125504c8d78acf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY @@ -0,0 +1 @@ +# CONFIG_DRM_MALI_DISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW new file mode 100644 index 0000000000000000000000000000000000000000..7aa3826f4d4c77f2ad0cd28415edf37b27717b9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW @@ -0,0 +1 @@ +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM new file mode 100644 index 0000000000000000000000000000000000000000..fc038c98bf410f06b0e81189454bb32267a03a35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM @@ -0,0 +1 @@ +# CONFIG_DRM_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM new file mode 100644 index 0000000000000000000000000000000000000000..dbd2333b2894bd6c2335efb267826f17ee963aa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM @@ -0,0 +1 @@ +# CONFIG_DRM_NOUVEAU_SVM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI new file mode 100644 index 0000000000000000000000000000000000000000..6f4e4e857904b4dd8b07e131b0648e26272d5585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI @@ -0,0 +1 @@ +# CONFIG_DRM_NWL_MIPI_DSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 new file mode 100644 index 0000000000000000000000000000000000000000..be8b96f7918c977bba9c875b17c1aa4fb64aef0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 @@ -0,0 +1 @@ +# CONFIG_DRM_NXP_PTN3460 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A new file mode 100644 index 0000000000000000000000000000000000000000..9aea79539571eab232b39f933f932dacee81dd14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE new file mode 100644 index 0000000000000000000000000000000000000000..f5ffd8d31741cc726eee934c020baf83b63fdd66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP new file mode 100644 index 0000000000000000000000000000000000000000..92a647ca83f2ad8870feaf4a0e353aa8158f598e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_EDP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 new file mode 100644 index 0000000000000000000000000000000000000000..4a9fd454c2b86badbdc96f6b4b907a0e7b0338de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA new file mode 100644 index 0000000000000000000000000000000000000000..8713ccc27e1ef75f066d2453a923968f8a5eaff4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 new file mode 100644 index 0000000000000000000000000000000000000000..fe60a1992da450d10f30f2aeb690477203683821 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 new file mode 100644 index 0000000000000000000000000000000000000000..35ecd6eba8e6ab56f0c71cb35a2ba3a88573e4b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LG_LG4573 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS new file mode 100644 index 0000000000000000000000000000000000000000..af4bf6e016a36f9df220ed8ee00ac51b479f9d74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LVDS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 new file mode 100644 index 0000000000000000000000000000000000000000..339ff848fea429df82a83576816e2114ccc717ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C new file mode 100644 index 0000000000000000000000000000000000000000..16c72b0ef3ec67638a84c3a77cfdd500cb2f3d24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 new file mode 100644 index 0000000000000000000000000000000000000000..edce2272337d0fc2138fbe0ad2602bbd9a172f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO new file mode 100644 index 0000000000000000000000000000000000000000..14e8c8bb288dab31f91933ab7410047752f5a167 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 new file mode 100644 index 0000000000000000000000000000000000000000..5d2b9f2ef7ffcb785c757602b27ad1d5c6db4f57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 new file mode 100644 index 0000000000000000000000000000000000000000..f1c456d76050fc85334e7c04e4e65dfb784365cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 new file mode 100644 index 0000000000000000000000000000000000000000..8aae098881b99d604238a22df3b004aa9ade247b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 new file mode 100644 index 0000000000000000000000000000000000000000..b9c43be811c1bbf4d313415f18cd64e340ca937a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 new file mode 100644 index 0000000000000000000000000000000000000000..44ebe3ed950420e7eabfac62bda659728048e4ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 new file mode 100644 index 0000000000000000000000000000000000000000..1dd8218ba968123dd90c0d2dffb6747651dc13f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 new file mode 100644 index 0000000000000000000000000000000000000000..8572cb504b43113894442e4623223e57657fdffe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 new file mode 100644 index 0000000000000000000000000000000000000000..253f569ccd123223fcfa843dc012072e948bd3ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G new file mode 100644 index 0000000000000000000000000000000000000000..b32cceac2f1dec88e57f3023f8fb6772343dc866 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 new file mode 100644 index 0000000000000000000000000000000000000000..26cceaa221e2ca96d0c544acca53cd3dd7967a2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..a1ed63eca094ae8ee6eec3719887b14302672b20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V new file mode 100644 index 0000000000000000000000000000000000000000..712cb79a7b320be56d3fb23d64071633ad42b839 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM new file mode 100644 index 0000000000000000000000000000000000000000..de1d34f6bd0152cc0c41faf0327c381dddb00e2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 new file mode 100644 index 0000000000000000000000000000000000000000..8c0af5fb484ea3720ad679ab21cd229f779a03f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 new file mode 100644 index 0000000000000000000000000000000000000000..4494ef96f0cdec03ab403911968464b8dd9e7a90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 new file mode 100644 index 0000000000000000000000000000000000000000..df2c43c266f155b068bdb2a4fa456e98915593e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TPG110 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST new file mode 100644 index 0000000000000000000000000000000000000000..2f4bbc3243e48bb3db595513495d0e0f214105a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST @@ -0,0 +1 @@ +# CONFIG_DRM_PANFROST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 new file mode 100644 index 0000000000000000000000000000000000000000..e541cb69375f4001a3b343da3b7c21e84e49236a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 @@ -0,0 +1 @@ +# CONFIG_DRM_PARADE_PS8622 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 new file mode 100644 index 0000000000000000000000000000000000000000..fe2aea0dcf319923abc1940fd77db5dffa534291 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 @@ -0,0 +1 @@ +# CONFIG_DRM_PARADE_PS8640 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 new file mode 100644 index 0000000000000000000000000000000000000000..770564391dc06004840ab4e0612dc4e8a94c80a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 @@ -0,0 +1 @@ +# CONFIG_DRM_PL111 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM new file mode 100644 index 0000000000000000000000000000000000000000..ab2a01510d370094e5ed6bd9487e5d0522c24c4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM @@ -0,0 +1 @@ +# CONFIG_DRM_SAMSUNG_DSIM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X new file mode 100644 index 0000000000000000000000000000000000000000..18a102e07d1ba3608d2170608c70b9caa63969fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X @@ -0,0 +1 @@ +# CONFIG_DRM_SII902X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 new file mode 100644 index 0000000000000000000000000000000000000000..b0bfbfb7afd9b938a79019be882e4502dabf90ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 @@ -0,0 +1 @@ +# CONFIG_DRM_SII9234 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 new file mode 100644 index 0000000000000000000000000000000000000000..651ddf007090a358f05a8830ba10f2a39e1a58e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 @@ -0,0 +1 @@ +# CONFIG_DRM_SIL_SII8620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..263c76996e194e6588ff5bfa9c3ee55ca69ba932 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE @@ -0,0 +1 @@ +# CONFIG_DRM_SIMPLE_BRIDGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..37ca811bfebcc60f129b5bcf0238e93e6c3ffbbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA @@ -0,0 +1 @@ +# CONFIG_DRM_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 new file mode 100644 index 0000000000000000000000000000000000000000..d62fd2aa222ce424f3093a3a1b09a3dfc9155e2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 @@ -0,0 +1 @@ +# CONFIG_DRM_THINE_THC63LVD1024 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS new file mode 100644 index 0000000000000000000000000000000000000000..8afce19a3e0c5e9222e7e63acbc80f3a3fbdee5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS @@ -0,0 +1 @@ +# CONFIG_DRM_TIDSS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 new file mode 100644 index 0000000000000000000000000000000000000000..2ae917aa6c2e9c1a974cdd53e6cb18b04405a855 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_DLPC3433 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 new file mode 100644 index 0000000000000000000000000000000000000000..7e47d0baf490b2e90902a29e3c4759571ebcecfc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_SN65DSI83 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 new file mode 100644 index 0000000000000000000000000000000000000000..d3088d3a9d5f7f0ce749fc63d57fdeb813ccfb09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_SN65DSI86 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 new file mode 100644 index 0000000000000000000000000000000000000000..b4f765e302535d018a06beaff07c376a0cd411c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_TFP410 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 new file mode 100644 index 0000000000000000000000000000000000000000..038aad3b72ba92c0483d8a678898b6bea771bba5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_TPD12S015 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 new file mode 100644 index 0000000000000000000000000000000000000000..a99c1adf0ba5ee764bff95c0c38d8d56e2a15dc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358762 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 new file mode 100644 index 0000000000000000000000000000000000000000..67c629a9d438ba5dd27cdd89999a3ac8defcf43b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358764 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 new file mode 100644 index 0000000000000000000000000000000000000000..cdb5218c62f3fcba47e1e4eeddbc89856a83ab36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358767 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 new file mode 100644 index 0000000000000000000000000000000000000000..5aef40e4d497753c0ed1f5fa5b36179c3e63e389 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358768 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 new file mode 100644 index 0000000000000000000000000000000000000000..5090a0707875217410cacdf9aa54060218b639fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358775 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC new file mode 100644 index 0000000000000000000000000000000000000000..64dbfdd901d71bf5c4d7ca94830d0f630ef73679 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC @@ -0,0 +1 @@ +CONFIG_DTC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC new file mode 100644 index 0000000000000000000000000000000000000000..0d88fd60aa04f65e73a4c75f75a8637abe3f0ab7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC @@ -0,0 +1 @@ +# CONFIG_DW_AXI_DMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..bac83695b10f1b3d36478147dacad67af8a4c7fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE @@ -0,0 +1 @@ +CONFIG_DW_DMAC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS new file mode 100644 index 0000000000000000000000000000000000000000..080a3fc455b679f7d279029478773d553f669a01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 new file mode 100644 index 0000000000000000000000000000000000000000..c93ee4727e98e28b6a68296c2dd6e05b728e1d36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 @@ -0,0 +1 @@ +# CONFIG_EDAC_DMC520 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX new file mode 100644 index 0000000000000000000000000000000000000000..dae44bb0212587c1b82e7a3c736a16d47c5bf7f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX @@ -0,0 +1 @@ +CONFIG_EDAC_THUNDERX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB new file mode 100644 index 0000000000000000000000000000000000000000..7e8f6b07561c953021f082d027ef15c0b023cd6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB @@ -0,0 +1 @@ +CONFIG_EFI_GENERIC_STUB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT new file mode 100644 index 0000000000000000000000000000000000000000..2324b275770ec2f977ee89c385c647012773fd6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT @@ -0,0 +1 @@ +CONFIG_EFI_PARAMS_FROM_FDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT new file mode 100644 index 0000000000000000000000000000000000000000..dd98202456bb0aae743a62b3161c65491284c8eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT @@ -0,0 +1 @@ +# CONFIG_EFI_ZBOOT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 new file mode 100644 index 0000000000000000000000000000000000000000..d1cee17ac1b927a8aabb28c863a73c04a3803850 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 @@ -0,0 +1 @@ +# CONFIG_EXTCON_FSA9480 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 new file mode 100644 index 0000000000000000000000000000000000000000..680b5a774265bc04a732106535078d18c6d7b542 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 @@ -0,0 +1 @@ +# CONFIG_EXTCON_MAX3355 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 new file mode 100644 index 0000000000000000000000000000000000000000..092a8419c9ff41df87a6bf6d348c30583ad30059 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 @@ -0,0 +1 @@ +# CONFIG_EXTCON_PTN5150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC new file mode 100644 index 0000000000000000000000000000000000000000..5f2508da3ffc28c89a9026735fd36cbd47dcbb35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC @@ -0,0 +1 @@ +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A new file mode 100644 index 0000000000000000000000000000000000000000..e5f7236c9c69195645eab8f4a9347b7655bce614 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A @@ -0,0 +1 @@ +# CONFIG_EXTCON_RT8973A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 new file mode 100644 index 0000000000000000000000000000000000000000..916994aa9714a68cc5e1c8efa2a5893bd3825e5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 @@ -0,0 +1 @@ +# CONFIG_EXTCON_SM5502 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 new file mode 100644 index 0000000000000000000000000000000000000000..4eedae83091b20281d04db15c368c4060d827890 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 @@ -0,0 +1 @@ +# CONFIG_EXTCON_USBC_TUSB320 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..7a0c9af305f06d0bbaf17615a4a9248801007ce5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO @@ -0,0 +1 @@ +# CONFIG_EXTCON_USB_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD new file mode 100644 index 0000000000000000000000000000000000000000..7d58051b7869b0e3aae74ae7950983a165111d59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD @@ -0,0 +1 @@ +# CONFIG_FB_ARMCLCD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..cbd70c3ee0c70ef8b68935574d5f353d8c03772e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_FB_BACKLIGHT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 new file mode 100644 index 0000000000000000000000000000000000000000..2275940beb47412ede8cd6c6385e440b071f00b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 @@ -0,0 +1 @@ +# CONFIG_FB_SM750 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT new file mode 100644 index 0000000000000000000000000000000000000000..ec64dd1894d34739eccd9a37611b58e9b36c837f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT @@ -0,0 +1 @@ +# CONFIG_FB_TFT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV new file mode 100644 index 0000000000000000000000000000000000000000..7c61724580d652e718cdc347edd3cc477ac2c569 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV @@ -0,0 +1 @@ +# CONFIG_FIELDBUS_DEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE new file mode 100644 index 0000000000000000000000000000000000000000..d75b839c4d30778626f2a0fba4fa32a595132b21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE @@ -0,0 +1 @@ +# CONFIG_FIREWIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER new file mode 100644 index 0000000000000000000000000000000000000000..90a6184b155abc438a7aa2564b408ca6dba62fbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_FRAME_POINTER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI new file mode 100644 index 0000000000000000000000000000000000000000..da7d313294159e1af76b5817f619f4f6d8327779 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI @@ -0,0 +1 @@ +# CONFIG_FSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA new file mode 100644 index 0000000000000000000000000000000000000000..d9673ee72c68154bf9c47797a5c620b6191fcdda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA @@ -0,0 +1 @@ +# CONFIG_FSL_EDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 new file mode 100644 index 0000000000000000000000000000000000000000..8dce0afd8c311faf0b7448b7911c88a8bb35710b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 @@ -0,0 +1 @@ +CONFIG_FSL_ERRATUM_A008585=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA new file mode 100644 index 0000000000000000000000000000000000000000..f26803f1e4559573f9c7c2b49a99853697a952e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA @@ -0,0 +1 @@ +# CONFIG_FSL_QDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM new file mode 100644 index 0000000000000000000000000000000000000000..aab0d456e6a1b267cefcfffaf56f76b0e0c09ee0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM @@ -0,0 +1 @@ +# CONFIG_FSL_RCPM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..d1f89cd7802f2a5ab2703841a139443ebc8a0073 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES new file mode 100644 index 0000000000000000000000000000000000000000..39b50c904155dbb9dfb10acb8ac99ed1548395a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES @@ -0,0 +1 @@ +# CONFIG_FUJITSU_ES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT new file mode 100644 index 0000000000000000000000000000000000000000..711e4b76e221d3cfea7f155a0efd3c2ab3ef091e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT=8 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B new file mode 100644 index 0000000000000000000000000000000000000000..0fe9f0d17b10be25ecbe92b8a77d8130282eadf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_8B=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 0000000000000000000000000000000000000000..9f843f7e3aa47f0796f6fe25a257ab9c8dbe404e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY new file mode 100644 index 0000000000000000000000000000000000000000..e40dd4f28e2dd3f4106d053629f8ed305fda2be9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY @@ -0,0 +1 @@ +CONFIG_GENERIC_ARCH_TOPOLOGY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM new file mode 100644 index 0000000000000000000000000000000000000000..b93b55b27d643c3fffc8dc0e9deacacd27dceb95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM @@ -0,0 +1 @@ +CONFIG_GENERIC_CSUM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT new file mode 100644 index 0000000000000000000000000000000000000000..6b9eb552a3de0a096e8c99c709ae5ddd143bec44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT @@ -0,0 +1 @@ +CONFIG_GENERIC_HWEIGHT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP new file mode 100644 index 0000000000000000000000000000000000000000..e509206c959f5ed6ca479d1bdcfa9a779c3be732 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP @@ -0,0 +1 @@ +CONFIG_GENERIC_IDLE_POLL_SETUP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI new file mode 100644 index 0000000000000000000000000000000000000000..64496f3114935140f39fb0a38de8d00babf0c77c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_IPI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL new file mode 100644 index 0000000000000000000000000000000000000000..b5f8b58f38a9eeb9731a1cf8e0fde5efb5c4a25d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK new file mode 100644 index 0000000000000000000000000000000000000000..3f66970f695784b3bf38d9f5946066a46335eeff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK @@ -0,0 +1 @@ +CONFIG_GENERIC_SCHED_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 new file mode 100644 index 0000000000000000000000000000000000000000..543197b0bab16c86abca3919621c017bf19e2786 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 @@ -0,0 +1 @@ +# CONFIG_GPIO_74X164 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..b761ec18af4a092f057b78e71167f0b8ae4dc7fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO @@ -0,0 +1 @@ +# CONFIG_GPIO_74XX_MMIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP new file mode 100644 index 0000000000000000000000000000000000000000..5f2bab733e6704f0501ae49e88ec2a857d616f10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP @@ -0,0 +1 @@ +# CONFIG_GPIO_ADNP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA new file mode 100644 index 0000000000000000000000000000000000000000..621d4475bd0ac32c3cb87271b590759b4d2d87f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA @@ -0,0 +1 @@ +# CONFIG_GPIO_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..fd915b0e90d9fdc9beaad5c73376a0e1e38ba87d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE @@ -0,0 +1 @@ +# CONFIG_GPIO_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB new file mode 100644 index 0000000000000000000000000000000000000000..eabf56effa8af71c8e8b97e0da321178bfede1ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB @@ -0,0 +1 @@ +CONFIG_GPIO_DWAPB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 new file mode 100644 index 0000000000000000000000000000000000000000..3a1fb41ea8694d1508aab57a87cda558d184e4c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 @@ -0,0 +1 @@ +# CONFIG_GPIO_FTGPIO010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO new file mode 100644 index 0000000000000000000000000000000000000000..826d836bf5af2f94ba25d410a3e796798e06ff7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO @@ -0,0 +1 @@ +# CONFIG_GPIO_GRGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD new file mode 100644 index 0000000000000000000000000000000000000000..a521799fe070401d57637df0cad813dea2e021e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD @@ -0,0 +1 @@ +# CONFIG_GPIO_GW_PLD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD new file mode 100644 index 0000000000000000000000000000000000000000..85864554caa380e2210e8502c53eaa7de380b254 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD @@ -0,0 +1 @@ +# CONFIG_GPIO_HLWD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC new file mode 100644 index 0000000000000000000000000000000000000000..1ee4eb11a7995c92de0d08c549055f1adfd7ee46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC @@ -0,0 +1 @@ +# CONFIG_GPIO_LOGICVC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 new file mode 100644 index 0000000000000000000000000000000000000000..36d6b513b17631c52160c62c3dbaed4612b8a9c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 @@ -0,0 +1 @@ +CONFIG_GPIO_PL061=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE new file mode 100644 index 0000000000000000000000000000000000000000..461cb49e425c2c9e94f33fce9f0a1f6fe2e5b818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE @@ -0,0 +1 @@ +# CONFIG_GPIO_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..714d52d62085d401dc9d8b6ba808a58fc8517d62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON @@ -0,0 +1 @@ +# CONFIG_GPIO_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..72b59399c32c56769665f0f0ce0b1ac64380c32b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA @@ -0,0 +1 @@ +CONFIG_GPIO_TEGRA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX new file mode 100644 index 0000000000000000000000000000000000000000..7a45d6a752153e820855db363a73e1babd39d5db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX @@ -0,0 +1 @@ +# CONFIG_GPIO_THUNDERX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..37cf2150c1fe26f75f131dc97835e26231059e09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG @@ -0,0 +1 @@ +CONFIG_GPIO_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..f210ebcce6a6851c17711e8870dd5a1a7978b4bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE @@ -0,0 +1 @@ +CONFIG_GPIO_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB new file mode 100644 index 0000000000000000000000000000000000000000..01e2940751f2614d405b1d221483372a8a8f0bd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB @@ -0,0 +1 @@ +CONFIG_GPIO_XGENE_SB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..d7769012beb7d42e5b5e979675421c20cec69af6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX @@ -0,0 +1 @@ +# CONFIG_GPIO_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP new file mode 100644 index 0000000000000000000000000000000000000000..f99cd41f3bc59420382852bc29fc905c62f2d8fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP @@ -0,0 +1 @@ +CONFIG_GPIO_XLP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE new file mode 100644 index 0000000000000000000000000000000000000000..e13003cb82ac8aecf2af432a27442b63f27f4bb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_BITREVERSE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H new file mode 100644 index 0000000000000000000000000000000000000000..f6287de8ae2143c389fe4fa3fb4ae09aa2637150 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_COMPILER_H=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS new file mode 100644 index 0000000000000000000000000000000000000000..c6dd2f170ad4eacf3af9c8bbddfad0dcddcf0a54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS new file mode 100644 index 0000000000000000000000000000000000000000..18b5a02c7b7c454b8346ec310bfa55daeeb074d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC new file mode 100644 index 0000000000000000000000000000000000000000..34c80e3eacfb2b7d7829493b4a7c7c6dea429553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC @@ -0,0 +1 @@ +CONFIG_HAVE_ARM_SMCCC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY new file mode 100644 index 0000000000000000000000000000000000000000..a156962aec957a41892b1a6b00765066be1b639b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY @@ -0,0 +1 @@ +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS new file mode 100644 index 0000000000000000000000000000000000000000..a4bf5edc053507076af77a99d994b51416f95b68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE new file mode 100644 index 0000000000000000000000000000000000000000..fd2a1e088d72863ee00a4d968a08859e18d27c17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY new file mode 100644 index 0000000000000000000000000000000000000000..c0dea18ed0a7f8bd95a5fd2bf8b0fae2c4a1a070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..f41b9a6d575b029f9a8e9421e2772aecbba7a29b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX @@ -0,0 +1 @@ +CONFIG_HI3660_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..e8313e8a8fa38eff11b38a48387f3503d448a278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX @@ -0,0 +1 @@ +CONFIG_HI6220_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS new file mode 100644 index 0000000000000000000000000000000000000000..a0a848d703f035407e7977a5d069be4a4bdefd69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS @@ -0,0 +1 @@ +# CONFIG_HID_ALPS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS new file mode 100644 index 0000000000000000000000000000000000000000..b5df524a7a25e2fa35078dd5a3df6bc78ee9febe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS @@ -0,0 +1 @@ +# CONFIG_HID_ASUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA new file mode 100644 index 0000000000000000000000000000000000000000..63e6eb1cdf1c7a75e563927e6b4c41e3ee748fa3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA @@ -0,0 +1 @@ +# CONFIG_HID_CMEDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE new file mode 100644 index 0000000000000000000000000000000000000000..4207656134b8d500b8995097b5336c91a681be1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE @@ -0,0 +1 @@ +# CONFIG_HID_HYPERV_MOUSE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR new file mode 100644 index 0000000000000000000000000000000000000000..ff4111852e8ca20cba511dec52a0c7aebfd7e7b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR @@ -0,0 +1 @@ +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB new file mode 100644 index 0000000000000000000000000000000000000000..bfb17e4031e30f8d439a0de3f83140255b411a00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH new file mode 100644 index 0000000000000000000000000000000000000000..0c4541ec0626bef3fda50264be628e8e250c2845 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH @@ -0,0 +1 @@ +# CONFIG_HIP04_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 new file mode 100644 index 0000000000000000000000000000000000000000..b5914254a5aaedd0973c8e1654e58b719eabe90e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 @@ -0,0 +1 @@ +CONFIG_HISILICON_ERRATUM_161010101=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN new file mode 100644 index 0000000000000000000000000000000000000000..5ca6d30d145a508cacd915e719c8069f88b7fc7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN @@ -0,0 +1 @@ +CONFIG_HISILICON_IRQ_MBIGEN=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC new file mode 100644 index 0000000000000000000000000000000000000000..fff86879cd66317e9bf101aad6a12decc30c35bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC @@ -0,0 +1 @@ +CONFIG_HISILICON_LPC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..ac59d0074877ef4a26651977230085e6237e8caa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI @@ -0,0 +1 @@ +# CONFIG_HISI_ACC_VFIO_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC new file mode 100644 index 0000000000000000000000000000000000000000..42a71fa39fcbd1ff9b1ea9a1139a72351872cf04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC @@ -0,0 +1 @@ +# CONFIG_HISI_FEMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB new file mode 100644 index 0000000000000000000000000000000000000000..9e6126e97046916e1b45488eb4108033c6719435 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB @@ -0,0 +1 @@ +# CONFIG_HISI_HIKEY_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU new file mode 100644 index 0000000000000000000000000000000000000000..6f10dd0dbed961e03eb7f06104e013bcc4d432c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU @@ -0,0 +1 @@ +CONFIG_HISI_PCIE_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU new file mode 100644 index 0000000000000000000000000000000000000000..f9809111ad969b31ed6cf1713b0e9effeb829c2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU @@ -0,0 +1 @@ +CONFIG_HISI_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT new file mode 100644 index 0000000000000000000000000000000000000000..790d8f3547781e0dbd646bb034a0ab2c44448085 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT @@ -0,0 +1 @@ +# CONFIG_HISI_PTT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC new file mode 100644 index 0000000000000000000000000000000000000000..94b80b5d701a8c2a28229aa3dcfd6ed8d252a14a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC @@ -0,0 +1 @@ +# CONFIG_HIX5HD2_GMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS new file mode 100644 index 0000000000000000000000000000000000000000..b4839e93f57f06ecb8d55c764c5107929f76ccb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS @@ -0,0 +1 @@ +CONFIG_HNS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU new file mode 100644 index 0000000000000000000000000000000000000000..513062961f770a130f8b51484e1d058f03ddceb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU @@ -0,0 +1 @@ +# CONFIG_HNS3_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC new file mode 100644 index 0000000000000000000000000000000000000000..f03be8a8f294f6eeeff0c6dbb9b52d1d969f259b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC @@ -0,0 +1 @@ +# CONFIG_HOTPLUG_PCI_SHPC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO new file mode 100644 index 0000000000000000000000000000000000000000..74de63a688846394fe3c096c76874c9264d9879e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO @@ -0,0 +1 @@ +# CONFIG_HP_ILO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..d2bf24e6b5cdf738e07d97c7c40608b0cdee7e14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_HP_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM new file mode 100644 index 0000000000000000000000000000000000000000..59b1db6bcfbbcdbd017ed3e542728c1c95ae0ef7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM @@ -0,0 +1 @@ +CONFIG_HSA_AMD_SVM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC new file mode 100644 index 0000000000000000000000000000000000000000..b2e800764a497dad92c01199bea5cdead13a58f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC @@ -0,0 +1 @@ +# CONFIG_HVC_DCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM new file mode 100644 index 0000000000000000000000000000000000000000..ccfa8e92649e3e7ab4750f7e7a934b0f73d918ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM @@ -0,0 +1 @@ +# CONFIG_HWSPINLOCK_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..10c0008c175e073c4a45e92fcf9b3e4f17221ea3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_HW_PERF_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG new file mode 100644 index 0000000000000000000000000000000000000000..bf0e4ee3a9dc4c84e515a016396f7e0f555eb643 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM new file mode 100644 index 0000000000000000000000000000000000000000..e3bec9744b535c7bd6bcae678de493beb9bb9f92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_CAVIUM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG new file mode 100644 index 0000000000000000000000000000000000000000..97f5363c7233e2627135df18a8fb540182d874b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_CCTRNG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K new file mode 100644 index 0000000000000000000000000000000000000000..84a1cd4682d0f38a802933e1b056079c0dd76216 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_CN10K=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI new file mode 100644 index 0000000000000000000000000000000000000000..727609690c50a3ea21321b065f50ae5326b56c31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB new file mode 100644 index 0000000000000000000000000000000000000000..621d496a1abad933f79e27d7c3af149a965829f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_HISTB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..1b31e553522e78342fcdb58561c00eaaa92577af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIRTIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..85af60d35a4143443fbf759baae23b2173c92708 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET new file mode 100644 index 0000000000000000000000000000000000000000..4868361bd13ddbe8571dfae32553589dcb30336c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET @@ -0,0 +1 @@ +# CONFIG_HYPERV_NET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF new file mode 100644 index 0000000000000000000000000000000000000000..98e1f0c7a1ee30afe46663ec2738468e08ab826e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF @@ -0,0 +1 @@ +CONFIG_I2C_ALGOPCF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 new file mode 100644 index 0000000000000000000000000000000000000000..d30b6d5d4827f3649f7043f6e24cbdf59ca38f25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD756 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 new file mode 100644 index 0000000000000000000000000000000000000000..f200ec046b480899023009fe21e2ccb79fbe9697 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD8111 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE new file mode 100644 index 0000000000000000000000000000000000000000..b828d65082c1b32194ddea6e83efd7b217732afb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE @@ -0,0 +1 @@ +CONFIG_I2C_ARB_GPIO_CHALLENGE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..f18079afe06f6c9f0368b6fad4146c832f102411 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE @@ -0,0 +1 @@ +# CONFIG_I2C_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL new file mode 100644 index 0000000000000000000000000000000000000000..92132d0e44d9b5d7839c6a40e9269399713ccc3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL @@ -0,0 +1 @@ +# CONFIG_I2C_DEMUX_PINCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..99bcd1824491c750d78bc3ac568a9b8339eaf5eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO @@ -0,0 +1 @@ +CONFIG_I2C_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR new file mode 100644 index 0000000000000000000000000000000000000000..711cee4c93fb536e9a03f13f779dfbc91e782889 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR @@ -0,0 +1 @@ +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..335f5749903c29f9e08263e8296a9b0a1f93a48c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO @@ -0,0 +1 @@ +# CONFIG_I2C_HELPER_AUTO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN new file mode 100644 index 0000000000000000000000000000000000000000..26857f6f7ebed11be72ca94415bb4bdce708e113 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF_ELAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX new file mode 100644 index 0000000000000000000000000000000000000000..ad8ec4bc1fb7a4603118956882d5bfe19f8fc1f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF_GOODIX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI new file mode 100644 index 0000000000000000000000000000000000000000..47a85722ea5fea44c45a87a4ff674a8ed1fe0d0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI @@ -0,0 +1 @@ +CONFIG_I2C_HISI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 new file mode 100644 index 0000000000000000000000000000000000000000..7b16fe2e5038f48a150c1ce23f7daf9c583784c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 @@ -0,0 +1 @@ +# CONFIG_I2C_HIX5HD2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 new file mode 100644 index 0000000000000000000000000000000000000000..2663365cdb830eb2ea52a55ecf75c73e42f7e923 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 @@ -0,0 +1 @@ +# CONFIG_I2C_I801 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH new file mode 100644 index 0000000000000000000000000000000000000000..083275fd959490e910ba4d80319ca2d8152a766f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH @@ -0,0 +1 @@ +# CONFIG_I2C_ISCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD new file mode 100644 index 0000000000000000000000000000000000000000..5f09745e3fc5f14cd0f05cfbc7636793e888a5ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD @@ -0,0 +1 @@ +# CONFIG_I2C_MLXCPLD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..f9fcb2b2fe9742f5ea3755c929d22a87f40b3a1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO @@ -0,0 +1 @@ +CONFIG_I2C_MUX_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX new file mode 100644 index 0000000000000000000000000000000000000000..62f68a33512900a86c60cd56b616aee8b66257fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_GPMUX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 new file mode 100644 index 0000000000000000000000000000000000000000..5b5fe60520758493dc9a675de64228041b8fa5b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PCA9541=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x new file mode 100644 index 0000000000000000000000000000000000000000..24c8bfd0fcb61c9b9f150d6a0a208c2e06a16083 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PCA954x=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL new file mode 100644 index 0000000000000000000000000000000000000000..eadba88bd9866716cafa514e21880ae136fcf5df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PINCTRL=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK new file mode 100644 index 0000000000000000000000000000000000000000..244fb5321433ae5ac778ca2c30ceed49b0f8e4d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK @@ -0,0 +1 @@ +# CONFIG_I2C_NOMADIK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 new file mode 100644 index 0000000000000000000000000000000000000000..ed534b4739a52b1f59fb67b7175649ebbe9abe8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 @@ -0,0 +1 @@ +# CONFIG_I2C_PIIX4 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI new file mode 100644 index 0000000000000000000000000000000000000000..b66c46744f33bfd6fc8189ec5e4c8d7659524dd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI @@ -0,0 +1 @@ +# CONFIG_I2C_QCOM_CCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP new file mode 100644 index 0000000000000000000000000000000000000000..7fb7cd350d2c7fb9e91984f631ab722ac1d9ac97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP @@ -0,0 +1 @@ +CONFIG_I2C_QUP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X new file mode 100644 index 0000000000000000000000000000000000000000..b11d3db83351f856afe240d79536d990891bce8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X @@ -0,0 +1 @@ +# CONFIG_I2C_RK3X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI new file mode 100644 index 0000000000000000000000000000000000000000..fcaad2a3a490c9f615193db85a688d06eff01c2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI @@ -0,0 +1 @@ +# CONFIG_I2C_SCMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X new file mode 100644 index 0000000000000000000000000000000000000000..d896b4d2fbb54b8ad3ab97dbf308aa99e84ff840 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X @@ -0,0 +1 @@ +# CONFIG_I2C_SIS96X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT new file mode 100644 index 0000000000000000000000000000000000000000..14e66f89c8a8ce7e087580a1cc18901d52d54c7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT @@ -0,0 +1 @@ +# CONFIG_I2C_SLAVE_TESTUNIT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..eb085793b5290d2489ab11f2a6be0f61bb729bcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA @@ -0,0 +1 @@ +# CONFIG_I2C_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP new file mode 100644 index 0000000000000000000000000000000000000000..1357b8f6b829bc7d1680ba890ab12fd201186719 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_I2C_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX new file mode 100644 index 0000000000000000000000000000000000000000..c544a8c6863a96c2bbfca3f86f94a672ddf5a9af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX @@ -0,0 +1 @@ +CONFIG_I2C_THUNDERX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE new file mode 100644 index 0000000000000000000000000000000000000000..3591505da8710ff50908550fb84135971a9cd239 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE @@ -0,0 +1 @@ +CONFIG_I2C_VERSATILE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA new file mode 100644 index 0000000000000000000000000000000000000000..7fa98de9de28ec6f518b9aeb505d8d63e6b7d23e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA @@ -0,0 +1 @@ +# CONFIG_I2C_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO new file mode 100644 index 0000000000000000000000000000000000000000..a1e152bbe6d657bf5fb2ecde1185ca9ed2b22c2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO @@ -0,0 +1 @@ +# CONFIG_I2C_VIAPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO new file mode 100644 index 0000000000000000000000000000000000000000..44af582d4b80d3bc7d5887c17cf72c67547f0590 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO @@ -0,0 +1 @@ +CONFIG_I2C_XGENE_SLIMPRO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX new file mode 100644 index 0000000000000000000000000000000000000000..bcc41c37608fe00b262a13bb2a4133e9145ffb31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX @@ -0,0 +1 @@ +CONFIG_I2C_XLP9XX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..b8667abedd33e81633d0bc1eafc2198136156819 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN @@ -0,0 +1 @@ +# CONFIG_I2C_ZHAOXIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..65414ab9fc9c8e6d6c55cbbd429d5d61afadd0af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS @@ -0,0 +1 @@ +# CONFIG_I2C_ZHAOXIN_SMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB new file mode 100644 index 0000000000000000000000000000000000000000..a6386c72c6c1287da774b9cc766fe972aa325649 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB @@ -0,0 +1 @@ +# CONFIG_IEEE802154_FAKELB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO new file mode 100644 index 0000000000000000000000000000000000000000..80d289815272656dd688c4429f6565640bc3e49e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO @@ -0,0 +1 @@ +# CONFIG_IIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS new file mode 100644 index 0000000000000000000000000000000000000000..7b67654797c7e019c8e420717c5a113a42542e00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HNS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 new file mode 100644 index 0000000000000000000000000000000000000000..bc6e71eae8b612ebab8c1375a9703c99124d7e6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HNS_HIP08=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV new file mode 100644 index 0000000000000000000000000000000000000000..f0730fa0823faa4fcdbe431bc4f991daa5f0a6e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV @@ -0,0 +1 @@ +# CONFIG_INPUT_JOYDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC new file mode 100644 index 0000000000000000000000000000000000000000..f9ad3d4b752dbf9fd68266cb0b4dd29b72ca9c6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC @@ -0,0 +1 @@ +# CONFIG_INPUT_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET new file mode 100644 index 0000000000000000000000000000000000000000..ea8f01dfa4c62b17ca9c6cb8790c78ed7527dca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET @@ -0,0 +1 @@ +# CONFIG_INPUT_TABLET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN new file mode 100644 index 0000000000000000000000000000000000000000..e74a7dfb04e848c398018aede43ee3c1d0cf30b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN @@ -0,0 +1 @@ +# CONFIG_INPUT_TOUCHSCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD new file mode 100644 index 0000000000000000000000000000000000000000..e8af97f060942d206a6e7bb4bbbda6055d34f3bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD @@ -0,0 +1 @@ +# CONFIG_IOMMUFD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S new file mode 100644 index 0000000000000000000000000000000000000000..9f8a9a2ca806994a0b0d170cf6d8aac985c22922 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART new file mode 100644 index 0000000000000000000000000000000000000000..2a03ed16f53b2e690abeaacff38b092ab43aedda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_DART is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..b6f514a3f1aca626ce08ef798124c1e5ca11a353 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..ec241f3218b3046893e107cf3272aa9bc70eb5dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE @@ -0,0 +1 @@ +# CONFIG_IPMB_DEVICE_INTERFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB new file mode 100644 index 0000000000000000000000000000000000000000..a76add6debefd1ad7fe9e70f6ea40cdc247b7285 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB @@ -0,0 +1 @@ +# CONFIG_IPMI_IPMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP new file mode 100644 index 0000000000000000000000000000000000000000..fd0d005fc8548b65ddcb325d82c0c693ba7f6902 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP @@ -0,0 +1 @@ +CONFIG_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER new file mode 100644 index 0000000000000000000000000000000000000000..b10c64ccdb3acb41d575cee1df14601318846771 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER @@ -0,0 +1 @@ +CONFIG_IRQ_BYPASS_MANAGER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS new file mode 100644 index 0000000000000000000000000000000000000000..76712fca39729ad40957bef670ae269dfb4e596e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS @@ -0,0 +1 @@ +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN new file mode 100644 index 0000000000000000000000000000000000000000..7cf059484bb36384d4a2e50e1c8a03eee88b6f73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN @@ -0,0 +1 @@ +# CONFIG_ISDN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 new file mode 100644 index 0000000000000000000000000000000000000000..26672531904838c2b68d41ad086f6dc93554f7ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 @@ -0,0 +1 @@ +# CONFIG_ISL29003 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 new file mode 100644 index 0000000000000000000000000000000000000000..abf4d72ad7713903aca5d52a570668d920459f9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 @@ -0,0 +1 @@ +# CONFIG_ISL29020 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA new file mode 100644 index 0000000000000000000000000000000000000000..b698e7e5e1e9643fd86f1dc484ae14b68446f4ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA @@ -0,0 +1 @@ +# CONFIG_K3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..ea284b410a9099811829f0ad0e113c0a0e4d395b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION @@ -0,0 +1 @@ +# CONFIG_KARMA_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON new file mode 100644 index 0000000000000000000000000000000000000000..9f6b7232f00f94e8b8628e202be1712d089a07a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON @@ -0,0 +1 @@ +CONFIG_KERNEL_MODE_NEON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM new file mode 100644 index 0000000000000000000000000000000000000000..d904364b517f6dec12ee74cd742e5e97d52cb840 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_BCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX new file mode 100644 index 0000000000000000000000000000000000000000..add2537e2901901bbc1edcf33502d6d9a40aaa57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_CAP11XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..46a9f62efb08b9532854a69b69b9e09486f6d132 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO @@ -0,0 +1 @@ +CONFIG_KEYBOARD_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 new file mode 100644 index 0000000000000000000000000000000000000000..e3ce3f315af23779f547c63767f6f3a5dba35639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_OMAP4 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE new file mode 100644 index 0000000000000000000000000000000000000000..714e0bec40bc648ce9cf965820a9187bf86690a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_PINEPHONE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..e9a849277c1bc70f7614e95ac1fb64629c34135d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 new file mode 100644 index 0000000000000000000000000000000000000000..169ffed69d682a2ecfa149fbfb03ed38228e14be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 @@ -0,0 +1 @@ +# CONFIG_KS7010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 new file mode 100644 index 0000000000000000000000000000000000000000..ab2a5b89a146eeaae80f4691c8e93d85bf38c305 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 @@ -0,0 +1 @@ +# CONFIG_LEDS_AAT1290 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A new file mode 100644 index 0000000000000000000000000000000000000000..7732da37590d183af8d7708c8cd53ae061a0b18f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A @@ -0,0 +1 @@ +# CONFIG_LEDS_AN30259A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A new file mode 100644 index 0000000000000000000000000000000000000000..b5ebf4ccf3419de16fca02ad21e81cced23a8ad9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A @@ -0,0 +1 @@ +# CONFIG_LEDS_AS3645A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 new file mode 100644 index 0000000000000000000000000000000000000000..9a84d9f6bb8659adca6416eade35a782009e1dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 @@ -0,0 +1 @@ +# CONFIG_LEDS_AW2013 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 new file mode 100644 index 0000000000000000000000000000000000000000..b4c1a1e368a87edf0fce2b1a810cf0aaec52bce4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 @@ -0,0 +1 @@ +# CONFIG_LEDS_BCM6328 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 new file mode 100644 index 0000000000000000000000000000000000000000..98e396e48929c5a342584812fc0591868d0e5bbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 @@ -0,0 +1 @@ +# CONFIG_LEDS_BCM6358 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH new file mode 100644 index 0000000000000000000000000000000000000000..fef225ff2d538f47b7215887e9e73e8d3d40f9d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH @@ -0,0 +1 @@ +CONFIG_LEDS_CLASS_FLASH=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 new file mode 100644 index 0000000000000000000000000000000000000000..41b6f059691477efa41a24388d72de7ef3c6a313 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 @@ -0,0 +1 @@ +# CONFIG_LEDS_CR0014114 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 new file mode 100644 index 0000000000000000000000000000000000000000..0345007181703469d1902dd31333bda67a6b0a3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 @@ -0,0 +1 @@ +# CONFIG_LEDS_EL15203000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX new file mode 100644 index 0000000000000000000000000000000000000000..bc726f797e2c7c8d80a79df6c014046ba703b97b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX @@ -0,0 +1 @@ +# CONFIG_LEDS_IS31FL32XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 new file mode 100644 index 0000000000000000000000000000000000000000..69f58992613b1edff86e630efac44a45182da6a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 @@ -0,0 +1 @@ +# CONFIG_LEDS_KTD2692 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X new file mode 100644 index 0000000000000000000000000000000000000000..86560369f43db36e7e2c49cc2df67e4c6d5923d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3601X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X new file mode 100644 index 0000000000000000000000000000000000000000..00fbe48a707078af92f3666c62ec27dd91c0f44c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3692X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 new file mode 100644 index 0000000000000000000000000000000000000000..fe1bb37742b087539efca0d4005848eb8d327047 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3697 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..a90d7885b2a335ab587e8515af83d38bd9b8094e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON @@ -0,0 +1 @@ +# CONFIG_LEDS_LP55XX_COMMON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 new file mode 100644 index 0000000000000000000000000000000000000000..cc362679de0dd70d8fac528f6783941b8cb84c69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 @@ -0,0 +1 @@ +# CONFIG_LEDS_LP8860 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 new file mode 100644 index 0000000000000000000000000000000000000000..50bf0613fe3c6db5cede7106502b86c573efc4da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 @@ -0,0 +1 @@ +CONFIG_LEDS_LT3593=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR new file mode 100644 index 0000000000000000000000000000000000000000..b190ec3a8404df916358c847ebcda0f4f924d655 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR @@ -0,0 +1 @@ +# CONFIG_LEDS_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 new file mode 100644 index 0000000000000000000000000000000000000000..04841c7d3df02fbb779712c467117ba4e664d29e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 @@ -0,0 +1 @@ +# CONFIG_LEDS_RT4505 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 new file mode 100644 index 0000000000000000000000000000000000000000..e8f8a1a5aec48856beb6bd3d471a648960cba4b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 @@ -0,0 +1 @@ +# CONFIG_LEDS_RT8515 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 new file mode 100644 index 0000000000000000000000000000000000000000..38c3598c855278328960d8acbd1ecea80f7754ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 @@ -0,0 +1 @@ +# CONFIG_LEDS_SGM3140 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE new file mode 100644 index 0000000000000000000000000000000000000000..60b8283165f50733ed03f1ebb884932e3808af33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE @@ -0,0 +1 @@ +# CONFIG_LEDS_SPI_BYTE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..cc80700c53995c0eee4b35d62d2038e9b9f7508b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON @@ -0,0 +1 @@ +# CONFIG_LEDS_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO new file mode 100644 index 0000000000000000000000000000000000000000..6004ddbe5727ac6cd417d279753e98e0cddad7dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK new file mode 100644 index 0000000000000000000000000000000000000000..5c1e5bef601f51370dcc9a860f4a939793c0a7f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_DISK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT new file mode 100644 index 0000000000000000000000000000000000000000..ff6b4da46e590354d4b8bb28df90bc12b069762f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT @@ -0,0 +1 @@ +CONFIG_LIBFDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM new file mode 100644 index 0000000000000000000000000000000000000000..aeaaefec7061451e316164119abb1e00268d7a4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM @@ -0,0 +1 @@ +CONFIG_LIBNVDIMM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES new file mode 100644 index 0000000000000000000000000000000000000000..7b41203e91d9b7e833b549bbd7a884af602c7699 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES @@ -0,0 +1 @@ +CONFIG_LINEAR_RANGES=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH new file mode 100644 index 0000000000000000000000000000000000000000..39dafe4de68530d6b41c886cb7f0832e45c7e020 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH @@ -0,0 +1 @@ +# CONFIG_LITEX_LITEETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER new file mode 100644 index 0000000000000000000000000000000000000000..0070e1494baea222236bd805fd69cacdf1ca6be7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER @@ -0,0 +1 @@ +# CONFIG_LITEX_SOC_CONTROLLER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH new file mode 100644 index 0000000000000000000000000000000000000000..117d89d785c40193d5afef25ed11e292a264adf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH @@ -0,0 +1 @@ +# CONFIG_LPC_ICH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH new file mode 100644 index 0000000000000000000000000000000000000000..13ca80491f485406f5d74fecbeb6d36b6f51a5c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH @@ -0,0 +1 @@ +# CONFIG_LPC_SCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X new file mode 100644 index 0000000000000000000000000000000000000000..ae10a4a61f7cae2e12a129f5f27ae26597ed8c05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X @@ -0,0 +1 @@ +# CONFIG_LTE_GDM724X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..b6cee505757c43d401dc9da8b37ee8716006a58d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION @@ -0,0 +1 @@ +# CONFIG_MAC_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST new file mode 100644 index 0000000000000000000000000000000000000000..6b2b9888d55cea38214a359a3f60638456c79aa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST @@ -0,0 +1 @@ +# CONFIG_MAILBOX_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU new file mode 100644 index 0000000000000000000000000000000000000000..e9beb55d69ad8d1cf9b2cf8f6b71ce63dcaff162 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU @@ -0,0 +1 @@ +# CONFIG_MARVELL_CN10K_DDR_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU new file mode 100644 index 0000000000000000000000000000000000000000..2341fc1eb2b9d0a9ad84967fdcfc293bcba02947 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU @@ -0,0 +1 @@ +# CONFIG_MARVELL_CN10K_TAD_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT new file mode 100644 index 0000000000000000000000000000000000000000..deac0785918c62af14a7cac61013b625c253c16b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT @@ -0,0 +1 @@ +CONFIG_MARVELL_GTI_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..27b93466f9fd635908ddd50f6b5132546cff30c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG new file mode 100644 index 0000000000000000000000000000000000000000..4d738fd985dff3985792900592cc775773554e56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER new file mode 100644 index 0000000000000000000000000000000000000000..c2786fc089f0f53ee7a0476b1a7d036082d66942 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..a317fa125fc72cb7b9f8c2d01df5f355668c077f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO @@ -0,0 +1 @@ +CONFIG_MDIO_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC new file mode 100644 index 0000000000000000000000000000000000000000..6e4c16cf9d0d820f6a99482c2aba27e4f68a8f0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC @@ -0,0 +1 @@ +CONFIG_MDIO_HISI_FEMAC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 new file mode 100644 index 0000000000000000000000000000000000000000..7bdf4704b65188f7e8aa406e2b65df092916778a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 @@ -0,0 +1 @@ +# CONFIG_MDIO_IPQ4019 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 new file mode 100644 index 0000000000000000000000000000000000000000..3104618fc456cdae43827acb1ad0c90b9f7dc769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 @@ -0,0 +1 @@ +# CONFIG_MDIO_IPQ8064 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM new file mode 100644 index 0000000000000000000000000000000000000000..7ed03900e08f795e98f9163fd67e13d72789119d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM @@ -0,0 +1 @@ +CONFIG_MDIO_MSCC_MIIM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON new file mode 100644 index 0000000000000000000000000000000000000000..28cd01876971a081981a7ce5682d283b96051d3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON @@ -0,0 +1 @@ +CONFIG_MDIO_OCTEON=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..8a6b53a5b7094b446f4fa4e299a705a40fd24cc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE @@ -0,0 +1 @@ +CONFIG_MDIO_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..99e81383fc8f9b016cedae1e5ebd1396499714ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_CEC_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..0348bacd103f188a0afde090d9c6a50b3d40adc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A new file mode 100644 index 0000000000000000000000000000000000000000..d13164db622c63efc75a37f4560bbe10c586a9b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A @@ -0,0 +1 @@ +# CONFIG_MFD_ACT8945A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 new file mode 100644 index 0000000000000000000000000000000000000000..eeaa8ff75763f5249cff343371714f14e6cb5c49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 @@ -0,0 +1 @@ +# CONFIG_MFD_AS3722 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM new file mode 100644 index 0000000000000000000000000000000000000000..e41ced06262a4c7fea69b8cf01e10bcb9eaa32bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM @@ -0,0 +1 @@ +# CONFIG_MFD_ATMEL_FLEXCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC new file mode 100644 index 0000000000000000000000000000000000000000..cfd9b42ae1bf73607da35c37a8c2e3122c5f7f7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC @@ -0,0 +1 @@ +# CONFIG_MFD_ATMEL_HLCDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE new file mode 100644 index 0000000000000000000000000000000000000000..c8855e8a0a4bb21350da3c74ab747f09a243e408 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE @@ -0,0 +1 @@ +CONFIG_MFD_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP new file mode 100644 index 0000000000000000000000000000000000000000..0f04081818e05e67b4a02e951b9c812da47a856a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP @@ -0,0 +1 @@ +# CONFIG_MFD_CPCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC new file mode 100644 index 0000000000000000000000000000000000000000..19ea1a015197472f82afc8910b83f06af7bc3fe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC @@ -0,0 +1 @@ +# CONFIG_MFD_GATEWORKS_GSC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC new file mode 100644 index 0000000000000000000000000000000000000000..5b380187f3375d28ce4f1151b8490e02df1ae0e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC @@ -0,0 +1 @@ +# CONFIG_MFD_HI6421_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC new file mode 100644 index 0000000000000000000000000000000000000000..b45244e7282e9e69b304eac945e524e4a096a69d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC @@ -0,0 +1 @@ +# CONFIG_MFD_HI655X_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR new file mode 100644 index 0000000000000000000000000000000000000000..400a681aba05b8472d4192d82a6e830a87dfb2c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR @@ -0,0 +1 @@ +# CONFIG_MFD_LOCHNAGAR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 new file mode 100644 index 0000000000000000000000000000000000000000..0c7e2967be08918e33971b7a53e3fec0aca5a634 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX5970 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 new file mode 100644 index 0000000000000000000000000000000000000000..a4fa3073f7a495fcca9543f04f3e8463dcf2b02d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 new file mode 100644 index 0000000000000000000000000000000000000000..3ebe2fe0fd31013a3b69bb97f9d7972f7b43d65a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77650 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 new file mode 100644 index 0000000000000000000000000000000000000000..9cf9bc1ee4c8c92c2ade74ac7544751a28f6b485 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77686 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 new file mode 100644 index 0000000000000000000000000000000000000000..3702f1749e6676de81f1f0af1e8b987164c9340c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77714 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC new file mode 100644 index 0000000000000000000000000000000000000000..5f7ec3cee22512b52631fb7b31df04e4c6b67488 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC @@ -0,0 +1 @@ +# CONFIG_MFD_NTXEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC new file mode 100644 index 0000000000000000000000000000000000000000..a17523130a73c39dc9284becdf0398d819740585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC @@ -0,0 +1 @@ +# CONFIG_MFD_NVEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 new file mode 100644 index 0000000000000000000000000000000000000000..090e632199b3e45660a966db3b10c3ede639f1bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 @@ -0,0 +1 @@ +# CONFIG_MFD_QCOM_PM8008 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM new file mode 100644 index 0000000000000000000000000000000000000000..a5c3c02d4dd8b0ac6c532654408f9dfa1142142a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM @@ -0,0 +1 @@ +# CONFIG_MFD_QCOM_RPM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C new file mode 100644 index 0000000000000000000000000000000000000000..ae6e69800ca5626368d0a0f26ed7e55cb16c4fab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_RK8XX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI new file mode 100644 index 0000000000000000000000000000000000000000..4f39f1a9cd6d9ab3d35b0646ce2288ae316da3d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_RK8XX_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 new file mode 100644 index 0000000000000000000000000000000000000000..752630240df1169bb797257a49bbe489b6bf462b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 @@ -0,0 +1 @@ +# CONFIG_MFD_RN5T618 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 new file mode 100644 index 0000000000000000000000000000000000000000..e60c1237e5ee611481f801605383ee6a6a6a200b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD71828 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX new file mode 100644 index 0000000000000000000000000000000000000000..3132a5664b891ccf608e047a1d97a4e68d1c7461 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD718XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF new file mode 100644 index 0000000000000000000000000000000000000000..f24e7b524dd9f1dbc34d1ff7116f1974d7a03c16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD957XMUF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C new file mode 100644 index 0000000000000000000000000000000000000000..ad3004be1be53b578da448f7e7ba3787dca447a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_RSMU_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI new file mode 100644 index 0000000000000000000000000000000000000000..4721dc83707b15e6c1aa7c825fe07d92baf6afa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_RSMU_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..d6bc4de4d02b86734ca1f7ccce207625e309efb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_SEC_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 new file mode 100644 index 0000000000000000000000000000000000000000..1a0ec3f74af9ff3238675ba20e919eaa8f09b11e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 @@ -0,0 +1 @@ +# CONFIG_MFD_SM501 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX new file mode 100644 index 0000000000000000000000000000000000000000..480542e982838248e5e3c7bd0e34cb884a9db221 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX @@ -0,0 +1 @@ +# CONFIG_MFD_STMFX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE new file mode 100644 index 0000000000000000000000000000000000000000..a0f17335366bd75f3231a9e18338f9e35dc137d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE @@ -0,0 +1 @@ +# CONFIG_MFD_STMPE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 new file mode 100644 index 0000000000000000000000000000000000000000..d146574b6fdb058db1057e0a241bf58f94dbdd82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 @@ -0,0 +1 @@ +# CONFIG_MFD_STPMIC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..9890ebdb9fa06238b3fd8be452ebc38fffdbdf87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON @@ -0,0 +1 @@ +CONFIG_MFD_SYSCON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X new file mode 100644 index 0000000000000000000000000000000000000000..a11f656fc99bc530bb7a73bafcf1ebe351f57415 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X @@ -0,0 +1 @@ +# CONFIG_MFD_TC3589X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 new file mode 100644 index 0000000000000000000000000000000000000000..112b4154ba3757a8c223d50f473675deac92e0c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LP87565 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 new file mode 100644 index 0000000000000000000000000000000000000000..2b2ee453ffc24e3353e2d89fb23a33086f5ba264 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65217 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 new file mode 100644 index 0000000000000000000000000000000000000000..ebc4bf3c005218ef2de04d1a823e9a1eba80656a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65218 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 new file mode 100644 index 0000000000000000000000000000000000000000..dbd6423b3d6a9519935dbd94f6775c0f3cae49dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65219 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG new file mode 100644 index 0000000000000000000000000000000000000000..0b40c91a58ecf37449ac5506736a615d1c748f99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG @@ -0,0 +1 @@ +# CONFIG_MFD_VEXPRESS_SYSREG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD new file mode 100644 index 0000000000000000000000000000000000000000..1d4d00579b90645dc9cdd671f462d4d06cbc9129 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD @@ -0,0 +1 @@ +# CONFIG_MFD_VIPERBOARD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 new file mode 100644 index 0000000000000000000000000000000000000000..8c01f8ec191d866e0e5218758518d64da4e673e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 @@ -0,0 +1 @@ +# CONFIG_MFD_VX855 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA new file mode 100644 index 0000000000000000000000000000000000000000..cba7a233f9e1924adec144286d448ddf44008bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA @@ -0,0 +1 @@ +CONFIG_MICREL_KS8995MA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION new file mode 100644 index 0000000000000000000000000000000000000000..341becea35ae680fc4c9e35745d86edea7dca1c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION @@ -0,0 +1 @@ +# CONFIG_MINIX_SUBPARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI new file mode 100644 index 0000000000000000000000000000000000000000..eeec3f096a3a0b3f329f13a1a600016d1d2d0436 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI @@ -0,0 +1 @@ +# CONFIG_MISC_RTSX_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB new file mode 100644 index 0000000000000000000000000000000000000000..f41dad5dd03c114888cc5a8b70ab8ba6deefab55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB @@ -0,0 +1 @@ +# CONFIG_MISC_RTSX_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE new file mode 100644 index 0000000000000000000000000000000000000000..3b96f758d9f22316a4789059e71774e6aa8b30a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE @@ -0,0 +1 @@ +# CONFIG_MLXBF_GIGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI new file mode 100644 index 0000000000000000000000000000000000000000..0b49cccef1332b114592cab9ddf61a83c4474941 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI @@ -0,0 +1 @@ +CONFIG_MMC_ARMMMCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW new file mode 100644 index 0000000000000000000000000000000000000000..163b7bed55b1d60cb9e6a92a2ee9725b3b8932dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW @@ -0,0 +1 @@ +CONFIG_MMC_DW=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD new file mode 100644 index 0000000000000000000000000000000000000000..c18a0e09be3e801b63f5eeabe9eb82ed6461ee5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD @@ -0,0 +1 @@ +CONFIG_MMC_DW_BLUEFIELD=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS new file mode 100644 index 0000000000000000000000000000000000000000..36913703dae6ffc5df4415a03d007f163db12d90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS @@ -0,0 +1 @@ +# CONFIG_MMC_DW_EXYNOS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 new file mode 100644 index 0000000000000000000000000000000000000000..27bb58f91d1916c26caa529a659052c7133b51ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 @@ -0,0 +1 @@ +# CONFIG_MMC_DW_HI3798CV200 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 new file mode 100644 index 0000000000000000000000000000000000000000..9ab75ac118e4ab1bb70d21a0c1c518610a6ae556 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 @@ -0,0 +1 @@ +# CONFIG_MMC_DW_K3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI new file mode 100644 index 0000000000000000000000000000000000000000..29336885e28a728096a6d56e32a2545b80a08343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI @@ -0,0 +1 @@ +# CONFIG_MMC_DW_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM new file mode 100644 index 0000000000000000000000000000000000000000..c9318bc4fe47976deffcec1805ca87507331b52d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM @@ -0,0 +1 @@ +CONFIG_MMC_DW_PLTFM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK new file mode 100644 index 0000000000000000000000000000000000000000..8d1b25c489e48798c29437d8e413f013a44c47cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK @@ -0,0 +1 @@ +CONFIG_MMC_MTK=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..db3e24055f6836207f9bdb58b8acf753fdf47ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT new file mode 100644 index 0000000000000000000000000000000000000000..c67ef179b9be8033542f4af844e52122340db82b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_MILBEAUT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM new file mode 100644 index 0000000000000000000000000000000000000000..e9997a15fb0f2ae69d6d911a4039f1124e74417a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN new file mode 100644 index 0000000000000000000000000000000000000000..c8021b0083e6b445f5085350ac22843eacdc0df7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_ARASAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 new file mode 100644 index 0000000000000000000000000000000000000000..933195ce7f1bc9c08e03e1d95dce9d030847d2a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_AT91 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC new file mode 100644 index 0000000000000000000000000000000000000000..a0ac36500753f9de4a2b2534a6225cebeeaab8a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..9b576da7b61a5cde5493fd975241fcaffdd573ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC new file mode 100644 index 0000000000000000000000000000000000000000..a4209fe9ce12dd9e86288477d95b37db8ab7ff55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC @@ -0,0 +1 @@ +CONFIG_MMC_STM32_SDMMC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI new file mode 100644 index 0000000000000000000000000000000000000000..9372cd4fe4326f9c45bea79d26b3edd08ff69a50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI @@ -0,0 +1 @@ +CONFIG_MMC_TOSHIBA_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH new file mode 100644 index 0000000000000000000000000000000000000000..57f14f837286a3aa96b34d08c84795dcd4bdc8d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH @@ -0,0 +1 @@ +# CONFIG_MOUSE_APPLETOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 new file mode 100644 index 0000000000000000000000000000000000000000..6b4472cc7be78bbe26580f6dd22bcaee9f43d7ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 @@ -0,0 +1 @@ +# CONFIG_MOUSE_BCM5974 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA new file mode 100644 index 0000000000000000000000000000000000000000..e577ccf187eefba7c9be9dc501705c2135e92afc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA @@ -0,0 +1 @@ +# CONFIG_MOUSE_CYAPA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..14ce73f7565b6548bc7ee281782a808701456376 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH new file mode 100644 index 0000000000000000000000000000000000000000..b84ff05dc82beabdbd3e6a366ef870f7329f56d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_ELANTECH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC new file mode 100644 index 0000000000000000000000000000000000000000..1ecdbe98e5ef7b2eda0a30a97f52abe2bb24b667 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_SENTELIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..9e443d06f91cd20817a162aef609ee4ce7bae03f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL @@ -0,0 +1 @@ +# CONFIG_MOUSE_SERIAL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA new file mode 100644 index 0000000000000000000000000000000000000000..8e411d15c47e85f3f1e4450778f99e098eee358e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA @@ -0,0 +1 @@ +# CONFIG_MOUSE_VSXXXAA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET new file mode 100644 index 0000000000000000000000000000000000000000..d141565b64b159c09f4a3970af8fe273e55ff494 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET @@ -0,0 +1 @@ +# CONFIG_MOXTET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS new file mode 100644 index 0000000000000000000000000000000000000000..0abf6cc8eaadc0a92d998def637c3f3d26ef9fa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_AFS_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI new file mode 100644 index 0000000000000000000000000000000000000000..bc8c1b81571210bf037700f32809b219b5fe0e31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI @@ -0,0 +1 @@ +CONFIG_MTD_CFI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS new file mode 100644 index 0000000000000000000000000000000000000000..29d7bb0b3251fa9f2781f1ba1b60220d05f2dcf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS @@ -0,0 +1 @@ +# CONFIG_MTD_CFI_ADV_OPTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD new file mode 100644 index 0000000000000000000000000000000000000000..cbc688f2f21881680d8fc231fa0a2049bb7d286f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD @@ -0,0 +1 @@ +CONFIG_MTD_CFI_AMDSTD=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT new file mode 100644 index 0000000000000000000000000000000000000000..01c8ff426c97099ecae08987e9b4459e6e3cba5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT @@ -0,0 +1 @@ +CONFIG_MTD_CFI_INTELEXT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA new file mode 100644 index 0000000000000000000000000000000000000000..67ee4aed7b14fa2cd39f8af7f282d4de4da1e71c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA @@ -0,0 +1 @@ +CONFIG_MTD_CFI_STAA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL new file mode 100644 index 0000000000000000000000000000000000000000..7610710766f33a7ba4121a5d09b89882b0dbc551 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL @@ -0,0 +1 @@ +CONFIG_MTD_CFI_UTIL=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..17a50bd6ddf98f0b4cabbc9c1d30a37f8d4b5e7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE @@ -0,0 +1 @@ +CONFIG_MTD_GEN_PROBE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS new file mode 100644 index 0000000000000000000000000000000000000000..bddcc3b5e83eb237649794f981a6cc3737e8fc38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS @@ -0,0 +1 @@ +CONFIG_MTD_OF_PARTS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP new file mode 100644 index 0000000000000000000000000000000000000000..8b99e3061f889d48c650a6bb6ce685b32ed3dfd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP @@ -0,0 +1 @@ +CONFIG_MTD_PHYSMAP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..bbb6119b5cd63903edb567b344ef4c9fa0617aa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT @@ -0,0 +1 @@ +# CONFIG_MTD_PHYSMAP_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF new file mode 100644 index 0000000000000000000000000000000000000000..20f59ebc7e4d1c220d1d2cb7ff24e32f4e5f17f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF @@ -0,0 +1 @@ +# CONFIG_MTD_PHYSMAP_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE new file mode 100644 index 0000000000000000000000000000000000000000..7c05500dc235a5636eac75cc13e257821d2c05fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE new file mode 100644 index 0000000000000000000000000000000000000000..baab16dac50cf7a26740893001cf7698bd1fc0f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP new file mode 100644 index 0000000000000000000000000000000000000000..598f82ddcb8890d73a8f358e08563b97ab758268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS new file mode 100644 index 0000000000000000000000000000000000000000..740f5c1f6b650d36c7f2f85d09cf5250dad72afd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 new file mode 100644 index 0000000000000000000000000000000000000000..a6e590eb888450b46676d37765d0a057346dc745 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 @@ -0,0 +1 @@ +# CONFIG_MV_XOR_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE new file mode 100644 index 0000000000000000000000000000000000000000..231d963dbb19a5b6ede5877f9f89441f9c6d6f50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE @@ -0,0 +1 @@ +# CONFIG_MYRI10GE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT new file mode 100644 index 0000000000000000000000000000000000000000..d4e20e237e30295d8a52c9f6ceaaa9b0df73231f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT @@ -0,0 +1 @@ +CONFIG_ND_BTT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN new file mode 100644 index 0000000000000000000000000000000000000000..8dd69e19b404a7cdfb3dbda1cb9c0b76ff168be9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN @@ -0,0 +1 @@ +CONFIG_ND_PFN=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP new file mode 100644 index 0000000000000000000000000000000000000000..ad5ff3969571a023dd17d53f8f10fb458386e0f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP @@ -0,0 +1 @@ +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD new file mode 100644 index 0000000000000000000000000000000000000000..f67eb8fad7093b72e84ccb7e162e34130ff3eff7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AMD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE new file mode 100644 index 0000000000000000000000000000000000000000..fed5e48a410a57e0ef99f08a870abd93410cfe4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_BROCADE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO new file mode 100644 index 0000000000000000000000000000000000000000..7b4bdaa25a95c1a91dd526630c334966fab26dec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CISCO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC new file mode 100644 index 0000000000000000000000000000000000000000..7c3a697fb78d87f73c84cbe90b53af96562c22ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_DEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX new file mode 100644 index 0000000000000000000000000000000000000000..e48620d8d36d7f5a40fa64f351ae22ce54c53ed1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_EMULEX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM new file mode 100644 index 0000000000000000000000000000000000000000..46be71a9047ea1d243512d6fad0511a12c90a357 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_QUALCOMM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..57527a6168ba738e60b3dbfe10bb4c0e504e75b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE @@ -0,0 +1 @@ +CONFIG_NET_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 new file mode 100644 index 0000000000000000000000000000000000000000..7d5cbcdbf8b5e0fc9da8b28c03e00c32bc48b339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 @@ -0,0 +1 @@ +CONFIG_NET_XGENE_V2=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER new file mode 100644 index 0000000000000000000000000000000000000000..5f06f4c60e8ec3b3d297c6a4de270c5a4bfd3473 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER @@ -0,0 +1 @@ +CONFIG_NOUVEAU_PLATFORM_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI new file mode 100644 index 0000000000000000000000000000000000000000..35ef1afd52b69f49bbe5ff44f6e8f6eee48e5541 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI @@ -0,0 +1 @@ +# CONFIG_NOZOMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..4b6b822e4808c321f3f0463f5ab9d31fb19d94ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG @@ -0,0 +1 @@ +# CONFIG_NVHE_EL2_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM new file mode 100644 index 0000000000000000000000000000000000000000..36cdd53cf483c2bbec76cbd7db72c6a8689b8a7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM @@ -0,0 +1 @@ +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM new file mode 100644 index 0000000000000000000000000000000000000000..fbed6bf6b9a273b9ba309833db640049d045199a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM @@ -0,0 +1 @@ +# CONFIG_NVMEM_QCOM_QFPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM new file mode 100644 index 0000000000000000000000000000000000000000..2844f81892cbb86d152fce1cb54ef475fdc0e385 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM @@ -0,0 +1 @@ +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE new file mode 100644 index 0000000000000000000000000000000000000000..44f3649815ba69a0e908e41bd891138dde960b1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE @@ -0,0 +1 @@ +# CONFIG_NVMEM_REBOOT_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV new file mode 100644 index 0000000000000000000000000000000000000000..8db0f4878b026b8b0ccbb87cefdd11ca0649c1b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV @@ -0,0 +1 @@ +# CONFIG_NVMEM_U_BOOT_ENV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF new file mode 100644 index 0000000000000000000000000000000000000000..b7345dd59430cb3ac4e187653f184e7e81a813c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF @@ -0,0 +1 @@ +CONFIG_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS new file mode 100644 index 0000000000000000000000000000000000000000..1c5bd9918b59832cd0d44a8bde6bd4bf7058c043 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS @@ -0,0 +1 @@ +CONFIG_OF_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..b5c03535f542aae1adea6672df432e47556310c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC @@ -0,0 +1 @@ +CONFIG_OF_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE new file mode 100644 index 0000000000000000000000000000000000000000..f71bca86c30d8a06aba180081adaf9d9fe8d8466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE @@ -0,0 +1 @@ +CONFIG_OF_EARLY_FLATTREE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE new file mode 100644 index 0000000000000000000000000000000000000000..5738a15c0cac65988aa43e66448495aa66fbafda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE @@ -0,0 +1 @@ +CONFIG_OF_FLATTREE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..13fbd11f9ab40331b6325747039b092ba4520796 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO @@ -0,0 +1 @@ +CONFIG_OF_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..994e558eded05fcdea74c96d117c24a3a0488deb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU @@ -0,0 +1 @@ +CONFIG_OF_IOMMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..aa09892b46e446168baa2b779fd8261d8eced304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ @@ -0,0 +1 @@ +CONFIG_OF_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ new file mode 100644 index 0000000000000000000000000000000000000000..e0923313484efb5953ab9ea489d1818e0cb80555 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ @@ -0,0 +1 @@ +CONFIG_OF_KOBJ=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO new file mode 100644 index 0000000000000000000000000000000000000000..cbdf2ee2f6c392f26e82922bcbd746931a9a6618 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO @@ -0,0 +1 @@ +CONFIG_OF_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA new file mode 100644 index 0000000000000000000000000000000000000000..288c3c8f1a9efde5f751adcd86c0d1d8c3dc3cd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA @@ -0,0 +1 @@ +CONFIG_OF_NUMA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY new file mode 100644 index 0000000000000000000000000000000000000000..30ae0afc56dddb94366e3271e7bbd41264286c91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY @@ -0,0 +1 @@ +CONFIG_OF_OVERLAY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..71309bbab176dc8cd7ecc72dd74ce932bf0b9e3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM @@ -0,0 +1 @@ +CONFIG_OF_PMEM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM new file mode 100644 index 0000000000000000000000000000000000000000..b826a1f71630d3df3f7169a3e1be209ce17624be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM @@ -0,0 +1 @@ +CONFIG_OF_RESERVED_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE new file mode 100644 index 0000000000000000000000000000000000000000..2b68a809d7b25f83b71c65d0da88ca1b7026d448 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE @@ -0,0 +1 @@ +CONFIG_OF_RESOLVE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST new file mode 100644 index 0000000000000000000000000000000000000000..f9773f73dec653114583374cd4379cd9a427e9fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST @@ -0,0 +1 @@ +# CONFIG_OF_UNITTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE new file mode 100644 index 0000000000000000000000000000000000000000..65a55add3c129b9a52859668c0b7575e794e91ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE @@ -0,0 +1 @@ +# CONFIG_OPEN_DICE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE new file mode 100644 index 0000000000000000000000000000000000000000..1f66abf9737b5ec675b0b203ddc9186d6eb10f12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE @@ -0,0 +1 @@ +# CONFIG_OPTEE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..50b3cb64eff631900c237129864882c1d86bd6e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION @@ -0,0 +1 @@ +# CONFIG_OSF_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..9dd8f33af36e94bd5d99ace7f7b3497ab6cd047a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT @@ -0,0 +1 @@ +# CONFIG_PARPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU new file mode 100644 index 0000000000000000000000000000000000000000..7cc3f61416975ad4ca053faa3d50cc8a843cd983 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU @@ -0,0 +1 @@ +CONFIG_PARTITION_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..4df88fc889690ed40f5bb00a9cc33415f0457efa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM @@ -0,0 +1 @@ +# CONFIG_PATA_OF_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL new file mode 100644 index 0000000000000000000000000000000000000000..4cfae7f6a2e8d51812754a016ea839455872297c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL @@ -0,0 +1 @@ +# CONFIG_PCIE_AL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA new file mode 100644 index 0000000000000000000000000000000000000000..98cb6dd64d36e5ba933e313055cb10a371c61f5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA @@ -0,0 +1 @@ +# CONFIG_PCIE_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST new file mode 100644 index 0000000000000000000000000000000000000000..23a74e2eb0f1edd20ce3e05b26f2d21d0ff4da14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW new file mode 100644 index 0000000000000000000000000000000000000000..8c266b96d02fc43f31d2be4184d3bbee2e187d9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW @@ -0,0 +1 @@ +CONFIG_PCIE_DW=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST new file mode 100644 index 0000000000000000000000000000000000000000..6aecdd9c1e44410572d90167a51be80b99dbe87b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST @@ -0,0 +1 @@ +CONFIG_PCIE_DW_HOST=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR new file mode 100644 index 0000000000000000000000000000000000000000..be5d9b1ba76aa678aa1b8b958ee9e5dc1f0792dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR @@ -0,0 +1 @@ +# CONFIG_PCIE_HISI_ERR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB new file mode 100644 index 0000000000000000000000000000000000000000..6504604cc4b20a6a6689fb76bb6e373fba5227bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB @@ -0,0 +1 @@ +# CONFIG_PCIE_HISI_STB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN new file mode 100644 index 0000000000000000000000000000000000000000..18352d4b3f7eae6570ee27c7bc1f0109ad4a026d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN @@ -0,0 +1 @@ +# CONFIG_PCIE_KIRIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST new file mode 100644 index 0000000000000000000000000000000000000000..e19d7651202a9028b1937818c63cb2c749aa8bb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_MICROCHIP_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM new file mode 100644 index 0000000000000000000000000000000000000000..363ef92c1fa44c3a30ec9f47dfbd8ff7216cb18c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM @@ -0,0 +1 @@ +# CONFIG_PCIE_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..510085b1fb2ff083ccefee1255e2af682d7f9f49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX @@ -0,0 +1 @@ +# CONFIG_PCIE_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..eee89e99220a7a25ab2ca8545fc8653d82bb2c1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC @@ -0,0 +1 @@ +CONFIG_PCI_DOMAINS_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES new file mode 100644 index 0000000000000000000000000000000000000000..1b8c0f8adcc289dc67c0b9c15927c04ef52f0fdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES @@ -0,0 +1 @@ +# CONFIG_PCI_DYNAMIC_OF_NODES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM new file mode 100644 index 0000000000000000000000000000000000000000..cf0869e9a1d67f0b6b624db1d07980850ddb62ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM @@ -0,0 +1 @@ +CONFIG_PCI_ECAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 new file mode 100644 index 0000000000000000000000000000000000000000..f9fe5b6ea0fa845eb86398f417449535c3b1d34f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 @@ -0,0 +1 @@ +# CONFIG_PCI_FTPCI100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI new file mode 100644 index 0000000000000000000000000000000000000000..468f3e431142128205aea2281176047622988816 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI @@ -0,0 +1 @@ +CONFIG_PCI_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..7269a6fb8a33bec851b80b9b465c08b1553e260d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON @@ -0,0 +1 @@ +CONFIG_PCI_HOST_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM new file mode 100644 index 0000000000000000000000000000000000000000..a812c3179353dc7b411ab04d4571002a27321e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM @@ -0,0 +1 @@ +CONFIG_PCI_HOST_THUNDER_ECAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM new file mode 100644 index 0000000000000000000000000000000000000000..6c45e0930606080ae2f14d498ef6eb01272b60e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM @@ -0,0 +1 @@ +CONFIG_PCI_HOST_THUNDER_PEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST new file mode 100644 index 0000000000000000000000000000000000000000..11ea915c1b274ae9d9c212dfc67fce8d2784bad0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST @@ -0,0 +1 @@ +# CONFIG_PCI_J721E_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..fc3b9475ac107022cc373d810ba1ac428637cbbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL @@ -0,0 +1 @@ +CONFIG_PCI_SYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..74e5837a4fb1654dc0fc5e80a83563dda92dbd2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_PCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..85317304c8ca9f36267b8cd4c8dca385dbe3eb39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE @@ -0,0 +1 @@ +CONFIG_PCI_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI new file mode 100644 index 0000000000000000000000000000000000000000..ce07c62c8c42c6a822571da33f6815308c35f98a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI @@ -0,0 +1 @@ +CONFIG_PCI_XGENE_MSI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 new file mode 100644 index 0000000000000000000000000000000000000000..fc9f806ab79ae73afb85d4ebea5aee23f09ffa0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 @@ -0,0 +1 @@ +# CONFIG_PCNET32 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE new file mode 100644 index 0000000000000000000000000000000000000000..3ed21ba14b1ff4c4a32bb09133cc772a2a0f79d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE @@ -0,0 +1 @@ +# CONFIG_PDS_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..d166723c2229d87560f02fd624c267fb3c0c4fd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +CONFIG_PERF_USE_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..24479deda0da810a706fe573a526a05fa0fd0052 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS @@ -0,0 +1 @@ +CONFIG_PHYLIB_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY new file mode 100644 index 0000000000000000000000000000000000000000..7b45af693a12efbf891bf76f219e7722bea8c48b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_DPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX new file mode 100644 index 0000000000000000000000000000000000000000..3db4592f5bda73e4b5d9238244db222a8fac9277 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_DPHY_RX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO new file mode 100644 index 0000000000000000000000000000000000000000..bb0551527d7ee1df2a0b5d443686bd5f0f54f700 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_SALVO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA new file mode 100644 index 0000000000000000000000000000000000000000..672fe76013abd964f4ac7a9a5b43c46c43a962aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_SIERRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT new file mode 100644 index 0000000000000000000000000000000000000000..070906386a71d80e04879c180c50e638315d567b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_TORRENT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB new file mode 100644 index 0000000000000000000000000000000000000000..bdb7df17f7b90ed6070f19e6e710bb806e569e13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB @@ -0,0 +1 @@ +# CONFIG_PHY_HI3660_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..f17343f12ff918f595641ba1d40bae1d5046a1a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE @@ -0,0 +1 @@ +# CONFIG_PHY_HI3670_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB new file mode 100644 index 0000000000000000000000000000000000000000..9d80fd69422449c15c5bd896f64fd89eb44e1d90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB @@ -0,0 +1 @@ +# CONFIG_PHY_HI3670_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB new file mode 100644 index 0000000000000000000000000000000000000000..462bb7c31671f59f622b4cde3fc29f9d55f47505 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB @@ -0,0 +1 @@ +CONFIG_PHY_HI6220_USB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 new file mode 100644 index 0000000000000000000000000000000000000000..fb2367b44f5be7dd3f844cf766836e06f73c87f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 @@ -0,0 +1 @@ +# CONFIG_PHY_HISI_INNO_USB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY new file mode 100644 index 0000000000000000000000000000000000000000..2d83cdd2368fcffb4c9257d053a86b3dd6914f2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY @@ -0,0 +1 @@ +# CONFIG_PHY_HISTB_COMBPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES new file mode 100644 index 0000000000000000000000000000000000000000..ae926852ada204e4f486afde8064c29373a591f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES @@ -0,0 +1 @@ +# CONFIG_PHY_LAN966X_SERDES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 new file mode 100644 index 0000000000000000000000000000000000000000..e6ad9bd4c45adc6ea60a733344eff532718129e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 @@ -0,0 +1 @@ +# CONFIG_PHY_MAPPHONE_MDM6600 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES new file mode 100644 index 0000000000000000000000000000000000000000..58eceea1dec19748a58641e71cfc06cb46378fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES @@ -0,0 +1 @@ +# CONFIG_PHY_OCELOT_SERDES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA new file mode 100644 index 0000000000000000000000000000000000000000..bb25b4634d9734ad00df7d00e69861dea0e8a760 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_APQ8064_SATA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP new file mode 100644 index 0000000000000000000000000000000000000000..51417886dd82dea66c4b3b2d97110fe7aef8ccc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_EDP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER new file mode 100644 index 0000000000000000000000000000000000000000..beac208b79c72f415f3a82b08fcb899809fa658e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB new file mode 100644 index 0000000000000000000000000000000000000000..feefe01f66947571362256c84c9dd1090999c809 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ4019_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA new file mode 100644 index 0000000000000000000000000000000000000000..360f739273e59243f28f3ea32708a7dbb21e091d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB new file mode 100644 index 0000000000000000000000000000000000000000..de880c0fb3a627ce17228f6ffbc72a3e9fb4b7da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ806X_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB new file mode 100644 index 0000000000000000000000000000000000000000..fb97792a3dc88ee63580ba3f547b880bdcd34f71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_M31_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 new file mode 100644 index 0000000000000000000000000000000000000000..69c59e99a8275ff9fa7a0edef98347f4b83cbe9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_PCIE2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP new file mode 100644 index 0000000000000000000000000000000000000000..a0f4ab70489c7c5687d8c7af9c8d7c7902768d03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_QMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 new file mode 100644 index 0000000000000000000000000000000000000000..9c957ac9a79c54f29c39872ad606c59d70c7e6bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_QUSB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH new file mode 100644 index 0000000000000000000000000000000000000000..310805c0707c125725f90dfe4045c9d3be98013c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_SGMII_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 new file mode 100644 index 0000000000000000000000000000000000000000..ac6ac96c6479cc23a0712eb8965e88c61d45b083 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS new file mode 100644 index 0000000000000000000000000000000000000000..ce9808509ffbcafe1bd9beece5595dc27bbde670 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC new file mode 100644 index 0000000000000000000000000000000000000000..2a37d673c0b0ace93d4bef907e6eb654f7901db0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HSIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM new file mode 100644 index 0000000000000000000000000000000000000000..1f67fe251dd0352d7ddf81a2c724c1ca5ce76171 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HS_28NM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 new file mode 100644 index 0000000000000000000000000000000000000000..7f13bc31554a4947f57a61a52b1e343924d21935 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS new file mode 100644 index 0000000000000000000000000000000000000000..0213a42d748885ceea83a9ad1e013d21ee97b395 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_SS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB new file mode 100644 index 0000000000000000000000000000000000000000..66baf7a802e256e27dc08ef4d9de9ecd6d06068d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB @@ -0,0 +1 @@ +# CONFIG_PHY_TEGRA_XUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 new file mode 100644 index 0000000000000000000000000000000000000000..39d68df37d77652cc73cc13cf2e63ae8e49f4a2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 @@ -0,0 +1 @@ +# CONFIG_PHY_TUSB1210 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..8d9f368f3d65f2bc4e221b7a1c34521ace8b3989 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE @@ -0,0 +1 @@ +CONFIG_PHY_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 new file mode 100644 index 0000000000000000000000000000000000000000..b275e1e6ae91429f0248b3e0c09270215d947e18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 @@ -0,0 +1 @@ +# CONFIG_PI433 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 new file mode 100644 index 0000000000000000000000000000000000000000..2cceeac3b451d60d1e65ad8afb57971435861642 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ5018 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 new file mode 100644 index 0000000000000000000000000000000000000000..055666b1f0aa61f6452b3d87a305ac75f1e2f09b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ5332 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 new file mode 100644 index 0000000000000000000000000000000000000000..2a4b31643e77b9f666b989928eb51bf96cff52bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ6018 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 new file mode 100644 index 0000000000000000000000000000000000000000..3cb74ba4b2be96560e2370e2384fc2910d77122d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ8074 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 new file mode 100644 index 0000000000000000000000000000000000000000..2d6a81f4fe09ee44f446faf93b7c0404da04fd97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ9574 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI new file mode 100644 index 0000000000000000000000000000000000000000..e76dab69c9e34021130e05d37e2c24accf63d6db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LPASS_LPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 new file mode 100644 index 0000000000000000000000000000000000000000..e2d4a4343360f733ae73e8ffc3e183013b4561fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MDM9607 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO new file mode 100644 index 0000000000000000000000000000000000000000..e8878348968c3cead304e17e3bbfc8b6fab0cb92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM new file mode 100644 index 0000000000000000000000000000000000000000..b042dc93b7b4633a90fe16112bfbb7d55ab4febf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM @@ -0,0 +1 @@ +CONFIG_PINCTRL_MSM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 new file mode 100644 index 0000000000000000000000000000000000000000..faea54e94d41bcf13fea2c8843a7306bfff45c25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8916 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 new file mode 100644 index 0000000000000000000000000000000000000000..fa88033ad5bea715084c81740493e70ad4199aef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8953 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 new file mode 100644 index 0000000000000000000000000000000000000000..05e860247d208200fa1f0045799f21a3eca37306 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8976 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 new file mode 100644 index 0000000000000000000000000000000000000000..977b1c3c4304ed2ecbef5b05043bd38de95c3b45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8994 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 new file mode 100644 index 0000000000000000000000000000000000000000..dc49ba08c0f7a16983d5fb831d6902b6e93b609e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8996 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 new file mode 100644 index 0000000000000000000000000000000000000000..29cb6660e724eb22333089bf9e8e96d327fee579 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8998 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT new file mode 100644 index 0000000000000000000000000000000000000000..60a20dba75d9a6de970fd5c15dea816203ead92a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT @@ -0,0 +1 @@ +# CONFIG_PINCTRL_OCELOT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 new file mode 100644 index 0000000000000000000000000000000000000000..9e5ce28f36d41a4ebe863236415cb9a161ca0d3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCM2290 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC new file mode 100644 index 0000000000000000000000000000000000000000..bc00fae73a5a68d02b4e9e806c4f9b8d6698fa15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 new file mode 100644 index 0000000000000000000000000000000000000000..5ac31f57a446cee507b6338623967697ac2badd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCS404 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX new file mode 100644 index 0000000000000000000000000000000000000000..e8dca820de4d4d060e18e4317f58310a7e89c533 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX @@ -0,0 +1 @@ +CONFIG_PINCTRL_QDF2XXX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 new file mode 100644 index 0000000000000000000000000000000000000000..dccaf90c6dc6324d7f6f58681c023e7524127dac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QDU1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P new file mode 100644 index 0000000000000000000000000000000000000000..76ee372169edb559d4d9ad2719d68396795a152c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SA8775P is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 new file mode 100644 index 0000000000000000000000000000000000000000..797cd04fb8636b7bb3b48ecc66bcf19594eb8c15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC7180 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 new file mode 100644 index 0000000000000000000000000000000000000000..13cf0a83eca4364243e8d943904b73040f3a5cbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC7280 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X new file mode 100644 index 0000000000000000000000000000000000000000..764aa990ee8d6a365052939ff072349399f62d23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC8180X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP new file mode 100644 index 0000000000000000000000000000000000000000..323becbcf94c72fc934569bc350accc4ce6ccdb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC8280XP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 new file mode 100644 index 0000000000000000000000000000000000000000..e646188fa0a469796b64bfb7913825e55078b6f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 new file mode 100644 index 0000000000000000000000000000000000000000..f7825f00655812119f0d02a8502a5449865e0561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM670 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 new file mode 100644 index 0000000000000000000000000000000000000000..425ac0b4d6e05861f136886be5d2b390fd6863b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM845 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 new file mode 100644 index 0000000000000000000000000000000000000000..a147a5432090a0a300e03893b3c304e85444d1dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDX75 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE new file mode 100644 index 0000000000000000000000000000000000000000..4142920af3e1fd8a6d79a4c29c91a39ff23af100 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SINGLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 new file mode 100644 index 0000000000000000000000000000000000000000..d82fbc40228575799d1ed4b58fca9dd40d20285e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6115 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 new file mode 100644 index 0000000000000000000000000000000000000000..c6e13a9ae9bef6162b273d8b313ce5e6bbea89fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6125 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 new file mode 100644 index 0000000000000000000000000000000000000000..6d097b1a825b94e0a46cfc8b7a86244d4c1c28c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6350 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 new file mode 100644 index 0000000000000000000000000000000000000000..b0269d0465e3a569c4766d3244a205d5d252ec82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6375 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 new file mode 100644 index 0000000000000000000000000000000000000000..1780f3f3cebdf50c5c3f2da6ca0ff418fe2f137f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM7150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 new file mode 100644 index 0000000000000000000000000000000000000000..b4bb081d9385e765304a8a27648f35e4581d6d38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 new file mode 100644 index 0000000000000000000000000000000000000000..490872420c0ea1d0e684a5b058d5028c96c20383 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8250 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 new file mode 100644 index 0000000000000000000000000000000000000000..002dd3fbd3c228a00a879371e73b77800ec03ea8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8350 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 new file mode 100644 index 0000000000000000000000000000000000000000..601239bd2d1ab8387bb0de5a7db2ea8169be8e18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8450 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 new file mode 100644 index 0000000000000000000000000000000000000000..48641b755f6bb8665336d0b2ee4a3d67573290c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8550 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX new file mode 100644 index 0000000000000000000000000000000000000000..dd3a3a31f2ff268987e3dbdb4703470b56b7aa9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX @@ -0,0 +1 @@ +# CONFIG_PINCTRL_STMFX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB new file mode 100644 index 0000000000000000000000000000000000000000..a707d08020c659301f56f8f0d4fbfe88e1345358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB @@ -0,0 +1 @@ +CONFIG_PINCTRL_TEGRA_XUSB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..1cdbb24bbdee0b74a9fa6c12d427d3f6412b82bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX @@ -0,0 +1 @@ +# CONFIG_PL320_MBOX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA new file mode 100644 index 0000000000000000000000000000000000000000..0e0863a5c0d7261e8bff8e865d8f3ed3b1fcf82d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA @@ -0,0 +1 @@ +# CONFIG_PL330_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU new file mode 100644 index 0000000000000000000000000000000000000000..b30b5761332b1a44e31608ead865440c21886b23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU @@ -0,0 +1 @@ +# CONFIG_PLATFORM_MHU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION new file mode 100644 index 0000000000000000000000000000000000000000..15102fe450acf872a43f1487e434b3915ac0798b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION @@ -0,0 +1 @@ +# CONFIG_PMIC_OPREGION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_DEVFREQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_DEVFREQ new file mode 100644 index 0000000000000000000000000000000000000000..ada3814f1c27238405c5f67d4b98567b89e39186 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_DEVFREQ @@ -0,0 +1 @@ +# CONFIG_PM_DEVFREQ is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS new file mode 100644 index 0000000000000000000000000000000000000000..1b1ea25d197a8fb85ede7695ed48ca37cc888739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF new file mode 100644 index 0000000000000000000000000000000000000000..e878dafb6029f629789da23cde846cc1d7e00a1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP new file mode 100644 index 0000000000000000000000000000000000000000..279fc3496158f6d5b20c971150931fcd4d6b8783 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES new file mode 100644 index 0000000000000000000000000000000000000000..227307038cffe8893390ed8fb7d735ce1b8a506f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES @@ -0,0 +1 @@ +CONFIG_PNP_DEBUG_MESSAGES=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB new file mode 100644 index 0000000000000000000000000000000000000000..35f35e595ebb3cd2fb17790d10b43b19288b7bca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_BRCMSTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..dac784d6c530c4c0751b070fad752a9c74644a50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO @@ -0,0 +1 @@ +CONFIG_POWER_RESET_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART new file mode 100644 index 0000000000000000000000000000000000000000..cb0a3228ba04203f52516939a9ffbcff65141b59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART @@ -0,0 +1 @@ +CONFIG_POWER_RESET_GPIO_RESTART=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI new file mode 100644 index 0000000000000000000000000000000000000000..c5516db40ad18d166adee65ae1e19fce9444bbef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI @@ -0,0 +1 @@ +CONFIG_POWER_RESET_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 new file mode 100644 index 0000000000000000000000000000000000000000..33d2e69bd0e143af08ca01d87dad5e6c80c6c2b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_LTC2952 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM new file mode 100644 index 0000000000000000000000000000000000000000..b03b124a53ab1b6ff6d86177af990f29c96fe09c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR new file mode 100644 index 0000000000000000000000000000000000000000..712536cfc88f7aa93dec40c4e06eb9c0f28acc01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART new file mode 100644 index 0000000000000000000000000000000000000000..5b8c6398dff6d257125559458f6bffebde396ee6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART @@ -0,0 +1 @@ +CONFIG_POWER_RESET_RESTART=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..d0db157033ea04e24a2ac9c2bdea64e70728bde8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON @@ -0,0 +1 @@ +CONFIG_POWER_RESET_SYSCON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF new file mode 100644 index 0000000000000000000000000000000000000000..72673e06d02fea29cc16612f5d75d78f05fb987e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS new file mode 100644 index 0000000000000000000000000000000000000000..6b285d5c913a04e6cc7966ace5c8036e5e94928c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_VEXPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..70d1b925462b916926ae16105861a9605a8273ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM new file mode 100644 index 0000000000000000000000000000000000000000..647dbe3b5529ed4679de0ebf4ed2d640e10de0a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_KVM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM new file mode 100644 index 0000000000000000000000000000000000000000..8bd1025eeae00d9cfb02b8641d47c57958c1580d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM @@ -0,0 +1 @@ +# CONFIG_PWM_FSL_FTM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT new file mode 100644 index 0000000000000000000000000000000000000000..7527f89ecc04b09c73ef8e71c430187d96d71eba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT @@ -0,0 +1 @@ +# CONFIG_PWM_HIBVT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..212beb44c7b7587bd38c7d67470ef10110bfdc64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA @@ -0,0 +1 @@ +# CONFIG_PWM_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC new file mode 100644 index 0000000000000000000000000000000000000000..4f3ddc111d4e78992aabd890e38eefe291684bc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC @@ -0,0 +1 @@ +# CONFIG_PWRSEQ_EMMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..b15e391d3d0bac1c50deca3c38c2c157454ed823 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE @@ -0,0 +1 @@ +# CONFIG_PWRSEQ_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI new file mode 100644 index 0000000000000000000000000000000000000000..9e9088849d9077eade5321050c9145433bc2c39e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI @@ -0,0 +1 @@ +# CONFIG_QCA7000_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP new file mode 100644 index 0000000000000000000000000000000000000000..a51484e8f4534fbabf914fd4d831279b921d924c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP @@ -0,0 +1 @@ +# CONFIG_QCOM_AOSS_QMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC new file mode 100644 index 0000000000000000000000000000000000000000..2d27dfbee0a2d5394c6b9487a4e38392f36f22bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC @@ -0,0 +1 @@ +# CONFIG_QCOM_APCS_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA new file mode 100644 index 0000000000000000000000000000000000000000..a36fb42914ed0123f2d5e508cebf1833eff1f6a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA @@ -0,0 +1 @@ +# CONFIG_QCOM_BAM_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB new file mode 100644 index 0000000000000000000000000000000000000000..50311c7b5a776bf12c076dc0ddd1dbe1cd50b37a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB @@ -0,0 +1 @@ +# CONFIG_QCOM_COMMAND_DB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR new file mode 100644 index 0000000000000000000000000000000000000000..96eea8d43019b3d2210f3f8fab9bb565afbeed51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR @@ -0,0 +1 @@ +# CONFIG_QCOM_CPR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 new file mode 100644 index 0000000000000000000000000000000000000000..e96b3544e9647335cdcce3834bf8b60128a9855f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 @@ -0,0 +1 @@ +# CONFIG_QCOM_EBI2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC new file mode 100644 index 0000000000000000000000000000000000000000..4e3fa019bd8f1811c4ce1ddea1881bd28291b79a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC @@ -0,0 +1 @@ +CONFIG_QCOM_EMAC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE new file mode 100644 index 0000000000000000000000000000000000000000..ac6e626794eb5cc6c181c350419275f53eb4c90a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE @@ -0,0 +1 @@ +# CONFIG_QCOM_GENI_SE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI new file mode 100644 index 0000000000000000000000000000000000000000..43946f18a1b1bf4cbb619c25d97b6d4dbf007fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI @@ -0,0 +1 @@ +# CONFIG_QCOM_GSBI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA new file mode 100644 index 0000000000000000000000000000000000000000..a5442952ffbb2b71a18712a5d2fe4fab9e44cfb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA @@ -0,0 +1 @@ +CONFIG_QCOM_HIDMA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT new file mode 100644 index 0000000000000000000000000000000000000000..8085b2ae4b3f7ff32e1542f2c19d3b9515b4c197 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT @@ -0,0 +1 @@ +CONFIG_QCOM_HIDMA_MGMT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..04124422783a945901bd7a48e637dbdf06064e34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU @@ -0,0 +1 @@ +# CONFIG_QCOM_IOMMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC new file mode 100644 index 0000000000000000000000000000000000000000..a799e3d9b6e6698ab94fd680b713d773a33ac75e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC @@ -0,0 +1 @@ +# CONFIG_QCOM_IPCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER new file mode 100644 index 0000000000000000000000000000000000000000..6c23d15f753b5f974c4eba3ad80f7fa61939aa84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER @@ -0,0 +1 @@ +CONFIG_QCOM_IRQ_COMBINER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS new file mode 100644 index 0000000000000000000000000000000000000000..ce5f413638be99f9a48a139485f44531cae48ac5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS @@ -0,0 +1 @@ +CONFIG_QCOM_KRYO_L2_ACCESSORS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU new file mode 100644 index 0000000000000000000000000000000000000000..2a553c8b17fafd54dbe3c79cea81835ba88134ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU @@ -0,0 +1 @@ +CONFIG_QCOM_L2_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU new file mode 100644 index 0000000000000000000000000000000000000000..ed899d66bc2155a8ea0e4af1c711927a6eb703cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU @@ -0,0 +1 @@ +CONFIG_QCOM_L3_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC new file mode 100644 index 0000000000000000000000000000000000000000..5372311adc73e4f937eb90f4ad1b9f3d6ac2403e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC @@ -0,0 +1 @@ +# CONFIG_QCOM_LLCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM new file mode 100644 index 0000000000000000000000000000000000000000..21e1c3dcdfe253fa0b0ec471570664ae433c57f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM @@ -0,0 +1 @@ +# CONFIG_QCOM_OCMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC new file mode 100644 index 0000000000000000000000000000000000000000..1e7c24135e20b4c207791dfbca91c27f2071a9c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC @@ -0,0 +1 @@ +# CONFIG_QCOM_PDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM new file mode 100644 index 0000000000000000000000000000000000000000..87425e1037885240545e6db43107c211fb9ff94a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM @@ -0,0 +1 @@ +# CONFIG_QCOM_RMTFS_MEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH new file mode 100644 index 0000000000000000000000000000000000000000..f4736e82325849a6fb93afb49deab1bd1d2bd6cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH @@ -0,0 +1 @@ +# CONFIG_QCOM_RPMH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM new file mode 100644 index 0000000000000000000000000000000000000000..59069977efe0e13770bc2a12f05aa736bc222402 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM @@ -0,0 +1 @@ +# CONFIG_QCOM_SMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT new file mode 100644 index 0000000000000000000000000000000000000000..ca08cee25d93b9260824265be6dfb1b3b58c2ca2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT @@ -0,0 +1 @@ +# CONFIG_QCOM_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE new file mode 100644 index 0000000000000000000000000000000000000000..7cf2571c81df8989442f4f1d17e1c13625f9ddc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE @@ -0,0 +1 @@ +# CONFIG_QLGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE new file mode 100644 index 0000000000000000000000000000000000000000..b340a0279cfdca558946f900c3f79884f28ad9c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE @@ -0,0 +1 @@ +# CONFIG_QUICC_ENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..d44890bc9dfbb96e3b7b933a4a118e5ddb058951 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE @@ -0,0 +1 @@ +# CONFIG_RC_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..2e7e1299d83894caf579e921e681d15490a5273e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO @@ -0,0 +1 @@ +CONFIG_REGMAP_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR new file mode 100644 index 0000000000000000000000000000000000000000..5b7c35c8f7bc6d5ab30c43db810f5270df4e74f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR @@ -0,0 +1 @@ +CONFIG_REGULATOR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X new file mode 100644 index 0000000000000000000000000000000000000000..ba92dea72eadfa1dba7b9216bebb6e41049696bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_88PG86X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 new file mode 100644 index 0000000000000000000000000000000000000000..f1e82abd5b0eb317bb6f422314250b7c2a5e1df7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ACT8865 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 new file mode 100644 index 0000000000000000000000000000000000000000..83b5968236a753892e9e84744618036fe0ebde7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_AD5398 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 new file mode 100644 index 0000000000000000000000000000000000000000..f5500cf71c87733c0de3cb241a0fb4ba5fc8f884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_AW37503 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 new file mode 100644 index 0000000000000000000000000000000000000000..3ebf366febfa42cd6165eac04ecc189469056516 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9121 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 new file mode 100644 index 0000000000000000000000000000000000000000..ed858d92d50b9c2e7f854caeeed0f89fd84a9184 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9210 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 new file mode 100644 index 0000000000000000000000000000000000000000..5f4b883da55a1bc26fd2b6da0275ee24cdb94a19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..2894d490943d67658e0d31b80c677037bbc537b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 new file mode 100644 index 0000000000000000000000000000000000000000..d62314c7dda4895d134b056c04e1095419746a70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FAN53555 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 new file mode 100644 index 0000000000000000000000000000000000000000..9c63fafa097fd5258e5a9804f1368d3828e8af1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FAN53880 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE new file mode 100644 index 0000000000000000000000000000000000000000..63c1bd929762b713b6630c5b10b5ed2084dc33db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..91c099fda2f77713b38816fc85e54065c6025163 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO @@ -0,0 +1 @@ +# CONFIG_REGULATOR_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A new file mode 100644 index 0000000000000000000000000000000000000000..f2507c882743d8f417a1625cc3938596e2379dff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ISL6271A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 new file mode 100644 index 0000000000000000000000000000000000000000..70ade2e4dab8de150d363d4d7bb2c9250281e312 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ISL9305 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 new file mode 100644 index 0000000000000000000000000000000000000000..e5bd8a9e8330343ddefb4b7705cd924e490a7151 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP3971 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 new file mode 100644 index 0000000000000000000000000000000000000000..3820f4be8e382b87cf00430780aa3a47a66b2762 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP3972 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X new file mode 100644 index 0000000000000000000000000000000000000000..a41e5d369a04686a61018d170b8d1653b38417b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP872X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 new file mode 100644 index 0000000000000000000000000000000000000000..3d3d38b77b396abeb207a7362203bc2805478b51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP8755 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 new file mode 100644 index 0000000000000000000000000000000000000000..d14c63b54e1fae6eb70c1dadf042a7cdc03ac802 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LTC3589 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 new file mode 100644 index 0000000000000000000000000000000000000000..a8f50af1c9124d52f96c09b727605cc33997e5c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LTC3676 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 new file mode 100644 index 0000000000000000000000000000000000000000..a975396135812c59c45df5f26576e33b9b1226e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX1586 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 new file mode 100644 index 0000000000000000000000000000000000000000..cfa7f164bf7dad637f5bae940c1d66c5c75d043e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX20086 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 new file mode 100644 index 0000000000000000000000000000000000000000..177d58b5804076a5a772085be82422ff8ad90508 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX20411 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 new file mode 100644 index 0000000000000000000000000000000000000000..64d512ed576524dec6bd90ffdac109341997bf2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX77826 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 new file mode 100644 index 0000000000000000000000000000000000000000..cc3902f60bd2290c0258df2e1b0493c577b21343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX77857 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 new file mode 100644 index 0000000000000000000000000000000000000000..79620946aaa814dd951639399865e5525e1e2153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8649 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 new file mode 100644 index 0000000000000000000000000000000000000000..6b033e3b6a38c75fe388278939abb3c88d21bba6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 new file mode 100644 index 0000000000000000000000000000000000000000..38ec09a45308143e9941bfa36fe03d6ba2434b54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8893 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 new file mode 100644 index 0000000000000000000000000000000000000000..f8346c0fcc73283e241aa70d5a24f7be3ccc7e95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8952 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 new file mode 100644 index 0000000000000000000000000000000000000000..27d82cb7815aa86c6479501d49a83eb70ab8a4f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8973 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 new file mode 100644 index 0000000000000000000000000000000000000000..778d3559d6623d7cc4e0a90b2627a9c30f5766ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MCP16502 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 new file mode 100644 index 0000000000000000000000000000000000000000..1e382f58472a70f50f10ce4794c7f09ef2c97675 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP5416 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 new file mode 100644 index 0000000000000000000000000000000000000000..754940abfaf32a07d0c2668e777961af273eff5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP8859 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X new file mode 100644 index 0000000000000000000000000000000000000000..1f7822e56962eca4a1d181370ec03c53de28f511 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP886X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 new file mode 100644 index 0000000000000000000000000000000000000000..c1a23686cdbfcb606bb6f21ebbd0f9fbec7d8bab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MPQ7920 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 new file mode 100644 index 0000000000000000000000000000000000000000..884c0d452dd8c88fcd7c36a4ec14006f002bbedc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MT6311 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 new file mode 100644 index 0000000000000000000000000000000000000000..8545b10b30c95c626042a75a6d7484e59103a817 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PCA9450 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 new file mode 100644 index 0000000000000000000000000000000000000000..f75129b3d561d1683e89ad164034e2a830e02935 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PF8X00 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 new file mode 100644 index 0000000000000000000000000000000000000000..7265415981b28bc37ffec10cdcc9f65ce939f6af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PFUZE100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 new file mode 100644 index 0000000000000000000000000000000000000000..6c69caa243208e33fea6c213cd43028fee363344 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88060 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 new file mode 100644 index 0000000000000000000000000000000000000000..4b024f4ba59faf794b4079d206dde628708a6e7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88080 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 new file mode 100644 index 0000000000000000000000000000000000000000..009707021ef510f73a15f859c7210c8a723e5cdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88090 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM new file mode 100644 index 0000000000000000000000000000000000000000..81698143a0224fcae45fb1859c3b6fb0b101fc2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN new file mode 100644 index 0000000000000000000000000000000000000000..53876721e0768b34ee200d50dc798125873592d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN @@ -0,0 +1 @@ +# CONFIG_REGULATOR_QCOM_REFGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 new file mode 100644 index 0000000000000000000000000000000000000000..98ec4ce12504183f542b3e84ae878394ac558056 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RAA215300 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY new file mode 100644 index 0000000000000000000000000000000000000000..7dbbfb6b412522f4b091900325ea42b26d6aa5e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 new file mode 100644 index 0000000000000000000000000000000000000000..f60258af4beefabd471b678d89ad04c59163ae77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT4801 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 new file mode 100644 index 0000000000000000000000000000000000000000..f1388d814937e6df177971503d300e639c2c8679 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT4803 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A new file mode 100644 index 0000000000000000000000000000000000000000..35e23fba5f0437cb224154ce2a38c070dfdb0146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5190A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 new file mode 100644 index 0000000000000000000000000000000000000000..b8c0b5e1575af318d28ce2c3e836279aaa457f3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5739 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 new file mode 100644 index 0000000000000000000000000000000000000000..9928ef5452327fb05e3599950732a6448cc6f4eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5759 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 new file mode 100644 index 0000000000000000000000000000000000000000..7107fd311c81ffcf11f453ee1958ee7ef396da48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6160 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 new file mode 100644 index 0000000000000000000000000000000000000000..5fe4661c58bb0925bfc7a108c64765a68c93531f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6190 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 new file mode 100644 index 0000000000000000000000000000000000000000..d240a99fcb5ff9ba4274135cf68d56cbc051ee24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 new file mode 100644 index 0000000000000000000000000000000000000000..680603f486160981c6bf91ad30fff432af754bf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTMV20 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 new file mode 100644 index 0000000000000000000000000000000000000000..13a439c360b3c0eb31837abc0b5e37f1e33a2a2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ2134 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 new file mode 100644 index 0000000000000000000000000000000000000000..d879bc88ae0ebc48bc371c5a9ede10c51b9ef244 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ2208 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 new file mode 100644 index 0000000000000000000000000000000000000000..5e9c27e6facad6aa07785ffc7f8aa6a071328c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ6752 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 new file mode 100644 index 0000000000000000000000000000000000000000..b65742cc97a9874bd9102b13b6c8a372a9a928f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SLG51000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A new file mode 100644 index 0000000000000000000000000000000000000000..619b463298838e58d8a78caae17fffe7b388dda5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8106A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X new file mode 100644 index 0000000000000000000000000000000000000000..42053ce4cc2e1d5a5877259ac096fa8361e0ace0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8824X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N new file mode 100644 index 0000000000000000000000000000000000000000..fafd2495eb5eafc1a0ce49f0ea22f5d97c216cbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8827N is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 new file mode 100644 index 0000000000000000000000000000000000000000..b586678e320de8b2fc1e403f4c7f4e83e7b335df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS51632 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 new file mode 100644 index 0000000000000000000000000000000000000000..b6904c247850bb6a864ceff7a76e1980ce31795b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS62360 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X new file mode 100644 index 0000000000000000000000000000000000000000..ddd62b4bd224eccffd947c6ba35d6ea257c1b212 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6286X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X new file mode 100644 index 0000000000000000000000000000000000000000..07d63aa3547d44a8fb79b5ed1706b5ac3fec6579 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6287X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 new file mode 100644 index 0000000000000000000000000000000000000000..7e5697b53d1ae2167ac5e8bccaf9528bc0480109 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS65023 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X new file mode 100644 index 0000000000000000000000000000000000000000..bcb7b9d409f743e3452f8a344867c904637267ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 new file mode 100644 index 0000000000000000000000000000000000000000..b82a99f6c238be387d42d804d7f6be37a183f2ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS65132 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X new file mode 100644 index 0000000000000000000000000000000000000000..a7363878b1e6edf5c307c3f740640f47457dfea7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6524X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER new file mode 100644 index 0000000000000000000000000000000000000000..f6a6e11df1671c4872854ab37e088e05484d7f69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER @@ -0,0 +1 @@ +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL new file mode 100644 index 0000000000000000000000000000000000000000..e27e9024520b91f063e82450b9ab228c7fdcdd93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS new file mode 100644 index 0000000000000000000000000000000000000000..d73432b3362eaffd101b38e61be197b7abfba1ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VEXPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER new file mode 100644 index 0000000000000000000000000000000000000000..cfdfe491c4df7a197a73c6b2d16e19620a49a3aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 new file mode 100644 index 0000000000000000000000000000000000000000..2fe47853da49ad01dbd097ac72ef59c9ea96c588 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID new file mode 100644 index 0000000000000000000000000000000000000000..8cddb03cb13512f3d86acc0333c8bd8e45932152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID @@ -0,0 +1 @@ +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI new file mode 100644 index 0000000000000000000000000000000000000000..af17d8a85fb1b906b96a3c0784e005439895c3a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI @@ -0,0 +1 @@ +CONFIG_RESET_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS new file mode 100644 index 0000000000000000000000000000000000000000..7213d3d92ce03d368c4bd0dd9a10ff2b37115511 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS @@ -0,0 +1 @@ +# CONFIG_RESET_QCOM_AOSS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC new file mode 100644 index 0000000000000000000000000000000000000000..e7a1af4466e858a4203f4b897aec5b97f1a3c72d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC @@ -0,0 +1 @@ +# CONFIG_RESET_QCOM_PDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP new file mode 100644 index 0000000000000000000000000000000000000000..89fb5cfd7d5f25c1b570e6f079099463f71bc05f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_RESET_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..1665e0ed4f92d420da374c83aac3d825faa1a096 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO @@ -0,0 +1 @@ +CONFIG_RFKILL_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 new file mode 100644 index 0000000000000000000000000000000000000000..9cf1d1384961466bb215f529f87c46d0fbcd5681 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 @@ -0,0 +1 @@ +CONFIG_RMI4_F34=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI new file mode 100644 index 0000000000000000000000000000000000000000..805037dfdcfb0934d8cdc07a62e543ae83aecf09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI @@ -0,0 +1 @@ +CONFIG_RMI4_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET new file mode 100644 index 0000000000000000000000000000000000000000..5e8c115b5e5da2066a5ba6c10d452de86462211d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET @@ -0,0 +1 @@ +# CONFIG_RMNET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 new file mode 100644 index 0000000000000000000000000000000000000000..289875384303446e7b038a5770a721af20b758ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 @@ -0,0 +1 @@ +CONFIG_ROCKCHIP_ERRATUM_3588001=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 new file mode 100644 index 0000000000000000000000000000000000000000..4342c2d802e743f35286e7f27f9237d6a2c0c068 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ABB5ZES3=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X new file mode 100644 index 0000000000000000000000000000000000000000..8a32953e60179464281a500f80c4643bac9507d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ABX80X=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..e5d78054ebafd81cec63b7f528d7ca41b83a3650 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 new file mode 100644 index 0000000000000000000000000000000000000000..37b8971b4084bc85a728c6a742edb23802dfdb0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1305=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 new file mode 100644 index 0000000000000000000000000000000000000000..452567d49b4b6f0b27f95d90550f94049e136a86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1343=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 new file mode 100644 index 0000000000000000000000000000000000000000..c0dec754cb7cf5d12d06359b690afc820f67750b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1347=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT new file mode 100644 index 0000000000000000000000000000000000000000..025d8e33a337ef7007734a1edc3b99ec57e586cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1374_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 new file mode 100644 index 0000000000000000000000000000000000000000..9c4133771bb34505890d41990880e0a85c1e7358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1390=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 new file mode 100644 index 0000000000000000000000000000000000000000..2784655cb23792a12ef1c2a3d9ac63f69d265ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1685=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY new file mode 100644 index 0000000000000000000000000000000000000000..c780040dbc235752bdb4a2f841cea9c0d6e7dccf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1685_FAMILY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 new file mode 100644 index 0000000000000000000000000000000000000000..d6b20ab72748bc43663cc930c447b5a706a146b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1689 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 new file mode 100644 index 0000000000000000000000000000000000000000..76bf35382247a2e149efa7f2aa402ccb342ae246 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17285 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 new file mode 100644 index 0000000000000000000000000000000000000000..67bda4bbed53605a1af91194b9a0933f4e70aa18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17485 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 new file mode 100644 index 0000000000000000000000000000000000000000..a38b8f58c2782395beb6fa7efa78d84fd6004c0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17885 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 new file mode 100644 index 0000000000000000000000000000000000000000..2c04b57d9561dcd85c318c212307705d3f080e96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_HYM8563 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 new file mode 100644 index 0000000000000000000000000000000000000000..81aad46d322b607515af6a53d063a431a9ca86c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ISL12026 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 new file mode 100644 index 0000000000000000000000000000000000000000..c2255ff15792536ae5168f59131741c76991a553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T93=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 new file mode 100644 index 0000000000000000000000000000000000000000..85abd7c6a65d5c4c5937c3881a7dc6f994a0cd59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T94=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 new file mode 100644 index 0000000000000000000000000000000000000000..4bfa40bf096a931d18d463568761f716436ceaf4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MAX6902=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 new file mode 100644 index 0000000000000000000000000000000000000000..498c0614beb35c9cc162b6ad658617304b766b3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MCP795=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y new file mode 100644 index 0000000000000000000000000000000000000000..6601f1c36a987ac754c99159e76020f976c59c34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_NCT3018Y is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 new file mode 100644 index 0000000000000000000000000000000000000000..3a3a2e87b571822cb4c434dbe31f030c1c3c3afc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF2123=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 new file mode 100644 index 0000000000000000000000000000000000000000..20c191fb7422b185984f0315bfb3764bcbe276fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF2127=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 new file mode 100644 index 0000000000000000000000000000000000000000..acad89a939fe40ab034c12024e1f40ed00b92186 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF85063=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 new file mode 100644 index 0000000000000000000000000000000000000000..6f6f4ec88b1821809e7898318222c9f673e417c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PL030 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 new file mode 100644 index 0000000000000000000000000000000000000000..42990f0defdffa17b70a6ed97479168a779bd902 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PL031=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 new file mode 100644 index 0000000000000000000000000000000000000000..9b16e0a154a360887b1cb4102bc1787e2b0e0aff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_R7301 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 new file mode 100644 index 0000000000000000000000000000000000000000..3a1cb4f3a06cf6f5b97c1c26bcb8c3fdb4a47373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_R9701=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 new file mode 100644 index 0000000000000000000000000000000000000000..56e73e950ff910f4bff4eca5a75a61a0896d1252 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RS5C348=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 new file mode 100644 index 0000000000000000000000000000000000000000..9d14898946b71c69aefa4476e9d49f2fa9d7bd4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX4581=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 new file mode 100644 index 0000000000000000000000000000000000000000..334d51df6d90f0ca2f6f6eee7fd4d309155dba0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8010=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..6e2e438ff20d741916c056c5d75501fe90aa780a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..35c7bf14084765e019d8a1a32763c876b1e9123e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP new file mode 100644 index 0000000000000000000000000000000000000000..0125f992ec48ddf39d0d5768778b8e75babfdbf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ZYNQMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 new file mode 100644 index 0000000000000000000000000000000000000000..7d01cbf8fc1387ba0778b2869880dcfe6ac36788 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 @@ -0,0 +1 @@ +# CONFIG_RTS5208 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..68dba87c9a8867b4672ae80d7c14631b48191dae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN @@ -0,0 +1 @@ +# CONFIG_SATA_ZHAOXIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE new file mode 100644 index 0000000000000000000000000000000000000000..cf16318c1d29350da24267d5698e6a843523d28d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE @@ -0,0 +1 @@ +CONFIG_SCHED_THERMAL_PRESSURE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID new file mode 100644 index 0000000000000000000000000000000000000000..5686e689d7018dd69e7aae7faeeadb5c5b527a69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID @@ -0,0 +1 @@ +# CONFIG_SCSI_AACRAID is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE new file mode 100644 index 0000000000000000000000000000000000000000..5d36d085f2f66d9a599afb3cb9e9fec327642de5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE @@ -0,0 +1 @@ +# CONFIG_SCSI_BNX2X_FCOE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..d400977dfcb3d683edb2993cd67f58087911349e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI @@ -0,0 +1 @@ +# CONFIG_SCSI_BNX2_ISCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS new file mode 100644 index 0000000000000000000000000000000000000000..778ee9b80f58c82ea3f6a06ce994cf127d4b126e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS @@ -0,0 +1 @@ +CONFIG_SCSI_HISI_SAS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..9b9d7981e19db45460e0a72b78fa75fb63955c16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE @@ -0,0 +1 @@ +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI new file mode 100644 index 0000000000000000000000000000000000000000..601e09bf13805ef8125cc58ea5cde1b81de27584 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI @@ -0,0 +1 @@ +CONFIG_SCSI_HISI_SAS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR new file mode 100644 index 0000000000000000000000000000000000000000..ce2ee561899b674123242f5a0c5eab53b237385e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR @@ -0,0 +1 @@ +CONFIG_SCSI_IPR=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..e9892cabfb12bf91b3cae9e42bd13eb04d9765ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP @@ -0,0 +1 @@ +CONFIG_SCSI_IPR_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..126c699b69c7753a2aa845a9ff7d2b0a5d9c6fe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE @@ -0,0 +1 @@ +CONFIG_SCSI_IPR_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER new file mode 100644 index 0000000000000000000000000000000000000000..6d637e9f8873dbc73c62065f6a17cc924f8c174e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_ACPI_POWER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 new file mode 100644 index 0000000000000000000000000000000000000000..b5581b7983d10f8a9f998cd336ae7a4c4556a5a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7314=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 new file mode 100644 index 0000000000000000000000000000000000000000..e80d53b990277a0ca60178f9275a8044447a9ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7414 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 new file mode 100644 index 0000000000000000000000000000000000000000..d1c3ba2ddea167e1ca8514161fae5d101575fa15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7418 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 new file mode 100644 index 0000000000000000000000000000000000000000..9f24634fc1f6902501b163babd4173ed72d91b64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADC128D818=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX new file mode 100644 index 0000000000000000000000000000000000000000..78229786bc918b807f346943d210e82bee8fe9b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX @@ -0,0 +1 @@ +CONFIG_SENSORS_ADCXX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 new file mode 100644 index 0000000000000000000000000000000000000000..7c7cbf394b5e05cfc205316cc250946f554afc54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1021 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 new file mode 100644 index 0000000000000000000000000000000000000000..3ecefab88ec37a083c1c36db1f75f7f4b2452ba2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1025 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 new file mode 100644 index 0000000000000000000000000000000000000000..e81f9ea44a010830bc2d4a0d10113e354b180cb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1026 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 new file mode 100644 index 0000000000000000000000000000000000000000..0f6b6b3b2f69c6481062be71842ce6294b456584 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1029 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 new file mode 100644 index 0000000000000000000000000000000000000000..d4b3ab743b9f89dc50e8e8c4bf838d5d1c73058f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1031 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 new file mode 100644 index 0000000000000000000000000000000000000000..c0a4008cec90fbc08111a9fdc9df60c60113dee0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1275 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 new file mode 100644 index 0000000000000000000000000000000000000000..919188fe84f05ac439a07878f79fb5943011e501 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM9240 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 new file mode 100644 index 0000000000000000000000000000000000000000..4d1bb6f1483cb60666339018261ea0d1972ea9a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADS7828 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 new file mode 100644 index 0000000000000000000000000000000000000000..9a4091fb891baef18070059dc46688fbe11a1e10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADS7871=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 new file mode 100644 index 0000000000000000000000000000000000000000..c30e7ff6cf22f2db331f1f5fbcbf04bab0c72284 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7410 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 new file mode 100644 index 0000000000000000000000000000000000000000..619c6e260e191d805115c367c133c3ad15f2a2f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7411 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 new file mode 100644 index 0000000000000000000000000000000000000000..a3ad4ef509c9c0c8abfcc5869973c8a933b7446d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7462 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 new file mode 100644 index 0000000000000000000000000000000000000000..1983d0385144e82e426e7870d0f15e006e924ddb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7470 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 new file mode 100644 index 0000000000000000000000000000000000000000..6fb9e97ebcaca8715df922d156122364e8cb946e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7475 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 new file mode 100644 index 0000000000000000000000000000000000000000..54cb7710554ad47b30e270e49140dfc00355f96b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AMC6821 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X new file mode 100644 index 0000000000000000000000000000000000000000..3aa738be2dbf3aebf2850eb48ceb78365b6fe561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X @@ -0,0 +1 @@ +# CONFIG_SENSORS_APDS990X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI new file mode 100644 index 0000000000000000000000000000000000000000..37da653835074aaa680a23bb9bfca23f4f3d7a1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI @@ -0,0 +1 @@ +CONFIG_SENSORS_ARM_SCPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 new file mode 100644 index 0000000000000000000000000000000000000000..a9d200ccee5e53c44b23eb28d4b1bd463d049c99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASC7621 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 new file mode 100644 index 0000000000000000000000000000000000000000..3928000a463dc8d3b0ae31e7116967e1055d1e18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ATXP1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 new file mode 100644 index 0000000000000000000000000000000000000000..f6d8bfafabda4795046bc03f53c7877049989bcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 @@ -0,0 +1 @@ +# CONFIG_SENSORS_BH1770 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 new file mode 100644 index 0000000000000000000000000000000000000000..8a86374249ef049776c41ea3bdc973c4ba291050 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DME1737 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 new file mode 100644 index 0000000000000000000000000000000000000000..9cd0e4ac1442b9232819048c556b894d1dbbfc55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DS1621 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 new file mode 100644 index 0000000000000000000000000000000000000000..45ee72f7b399d970f858e399409e66b803b5c10e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DS620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 new file mode 100644 index 0000000000000000000000000000000000000000..e49ca1d978f455bbc56ccd30829afcf117c48bcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC1403 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 new file mode 100644 index 0000000000000000000000000000000000000000..ccf86b7b428fcbedd510d254609afc48fcadf700 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC6W201 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F new file mode 100644 index 0000000000000000000000000000000000000000..f7176cb54a18aad2ada42c9d7c71d59b646fc32e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F @@ -0,0 +1 @@ +# CONFIG_SENSORS_F71805F is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG new file mode 100644 index 0000000000000000000000000000000000000000..69aef1e8f5edf8dd93fc082a045946799015b1d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG @@ -0,0 +1 @@ +# CONFIG_SENSORS_F71882FG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S new file mode 100644 index 0000000000000000000000000000000000000000..1db7725af385bd7fd3890ea1958ddeaaa898b831 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S @@ -0,0 +1 @@ +# CONFIG_SENSORS_F75375S is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A new file mode 100644 index 0000000000000000000000000000000000000000..2ee6a3d0a05c0045099767218c0b2d71e5d5caa1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A @@ -0,0 +1 @@ +# CONFIG_SENSORS_G760A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 new file mode 100644 index 0000000000000000000000000000000000000000..c036656b3e4145c314191a9c0fc0a14b19c3090c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 @@ -0,0 +1 @@ +CONFIG_SENSORS_G762=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM new file mode 100644 index 0000000000000000000000000000000000000000..86045f87f425dce48132f6ae902cb6f6b68572a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM @@ -0,0 +1 @@ +# CONFIG_SENSORS_GL518SM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM new file mode 100644 index 0000000000000000000000000000000000000000..45c088381d8ca1d6f1d587f1f748b2b8cf1ad482 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM @@ -0,0 +1 @@ +# CONFIG_SENSORS_GL520SM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN new file mode 100644 index 0000000000000000000000000000000000000000..7211f3f577119c3813e768ab3017979064d5a2ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_GPIO_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB new file mode 100644 index 0000000000000000000000000000000000000000..e68ba7b46e792b31d44f1ec35c9f58c49197e5ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB @@ -0,0 +1 @@ +# CONFIG_SENSORS_I5K_AMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM new file mode 100644 index 0000000000000000000000000000000000000000..ecbf82750806b3daee218a6ccb0ee2844c282863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBMAEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX new file mode 100644 index 0000000000000000000000000000000000000000..dcd70ee1e07644ebbbeb637f51473257f66362dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBMPEX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 new file mode 100644 index 0000000000000000000000000000000000000000..32f735340f04460fcd633801ddbc1b295ae7efd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA209 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX new file mode 100644 index 0000000000000000000000000000000000000000..cd7741d38ee65a46517f8cfee0d52126ee89a010 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA2XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 new file mode 100644 index 0000000000000000000000000000000000000000..c5d87c024bb03424cdbcc0317fd643eba7edf0c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IT87 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 new file mode 100644 index 0000000000000000000000000000000000000000..02894fb8f1c44a8cb06248794011445773e00fab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 @@ -0,0 +1 @@ +# CONFIG_SENSORS_JC42 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE new file mode 100644 index 0000000000000000000000000000000000000000..0e9e3f66257b6e422003a1774599511ae39a939e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE @@ -0,0 +1 @@ +# CONFIG_SENSORS_LINEAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C new file mode 100644 index 0000000000000000000000000000000000000000..3086c9ed52c91161efc02bff67006891b695ee86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_LIS3_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 new file mode 100644 index 0000000000000000000000000000000000000000..f5dcf76364b887e84aea65a7daadc64e11ad053e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM25066 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 new file mode 100644 index 0000000000000000000000000000000000000000..251b62540224765c89447866c687f01594de219f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM63 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 new file mode 100644 index 0000000000000000000000000000000000000000..d8626cc07adb19521758b78b4e1e399ff1dbe997 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM70=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 new file mode 100644 index 0000000000000000000000000000000000000000..9d0010922e2278560c86085f55bbbd633795cb5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM73 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 new file mode 100644 index 0000000000000000000000000000000000000000..07e8eb7f7a418f46459d582822201c3b64181007 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM75 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 new file mode 100644 index 0000000000000000000000000000000000000000..601df6402927e0327d11ca43ca71772849de83f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM77 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 new file mode 100644 index 0000000000000000000000000000000000000000..2bc678f9f0b734217574c67cdb02f031ababffed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM78 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 new file mode 100644 index 0000000000000000000000000000000000000000..9625f3de7f8e5dd2087dc1df5d817de75f0e8cd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM80 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 new file mode 100644 index 0000000000000000000000000000000000000000..d9836cd3fa5489957e42d6aaa28b12423cd857cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM83 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 new file mode 100644 index 0000000000000000000000000000000000000000..b2811fbafe87ac8cc5c434a0aec5d7f3142c3d7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM85 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 new file mode 100644 index 0000000000000000000000000000000000000000..6deeb8c262669cd164e67b92eef6ade886d96d86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM87 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 new file mode 100644 index 0000000000000000000000000000000000000000..9400a5b9ff455ff61496316ac922700fbd8cf662 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM90 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 new file mode 100644 index 0000000000000000000000000000000000000000..09f46ba0f37f8169f450e6af035218997015c83c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM92 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 new file mode 100644 index 0000000000000000000000000000000000000000..b5a1f74bda1a20b582efe5118698b030f27ac05c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM93 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 new file mode 100644 index 0000000000000000000000000000000000000000..eab1dd4ed6020a0557b6f6f06d0d6c54f852d26d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95234 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 new file mode 100644 index 0000000000000000000000000000000000000000..098b2e5e36cda437398e23e5676c1e8e5dcdca0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95241 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 new file mode 100644 index 0000000000000000000000000000000000000000..7e37a1c415731e3ddbf4bb7332d595001e689a1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 new file mode 100644 index 0000000000000000000000000000000000000000..4df4be93d6473291b9af2e3af3a327fad6fe7cbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC2945=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 new file mode 100644 index 0000000000000000000000000000000000000000..b9f38acfd97311a60100a06c000d15a92eb6a7da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2978 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 new file mode 100644 index 0000000000000000000000000000000000000000..af91d3915fc40497e588c6460a1ec38463d955f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC3815=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 new file mode 100644 index 0000000000000000000000000000000000000000..c3fe846a242d014cb8d7796f16563ceefab5973e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4151 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 new file mode 100644 index 0000000000000000000000000000000000000000..752ec8bd9af04b61475b62549e7ef0b89d61e8a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4215 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 new file mode 100644 index 0000000000000000000000000000000000000000..e7e172810cb7898eae2315ddd3c7b29a85da1212 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4222=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 new file mode 100644 index 0000000000000000000000000000000000000000..301686b547ff675faf5091637ef839c82ec1407e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 new file mode 100644 index 0000000000000000000000000000000000000000..fa5a33de1f69383fc6c7434ff43704b7bc71b804 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4260=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 new file mode 100644 index 0000000000000000000000000000000000000000..77fd1e970952154fd3ef2b40646096a2b716fd4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4261 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 new file mode 100644 index 0000000000000000000000000000000000000000..fdefcc26b6360506452057205717c138bad4683d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1111=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 new file mode 100644 index 0000000000000000000000000000000000000000..fbcb65964b70126ff78c93186b279eeb15d4297c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16064 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 new file mode 100644 index 0000000000000000000000000000000000000000..5ccb15648f8c18db7ad4936b7c1ea839bc9d7420 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16065 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 new file mode 100644 index 0000000000000000000000000000000000000000..a55124be98b74abd8e21cc238e1119fdd11016e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1619 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 new file mode 100644 index 0000000000000000000000000000000000000000..46c334a6f6b806a028c3f73ab633c262f8b75797 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1668 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 new file mode 100644 index 0000000000000000000000000000000000000000..7199862491f76e1a6ada5a5f80455237af4739e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX197 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 new file mode 100644 index 0000000000000000000000000000000000000000..c1b89ff071b6c781c3efc0681dfcb079a16d0b55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX20751=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 new file mode 100644 index 0000000000000000000000000000000000000000..fb5b83c53482ef9d13b190d5bc1f92db85c00e15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX31790=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 new file mode 100644 index 0000000000000000000000000000000000000000..ec48b85dbec275bfee826700085ca11a9132ead3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX34440 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 new file mode 100644 index 0000000000000000000000000000000000000000..ca9883a4296cf7a91258733bb60e3ebda048b741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6639 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 new file mode 100644 index 0000000000000000000000000000000000000000..8bacba0f745ed735e52f534417a6a788a5a5f6a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6642 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 new file mode 100644 index 0000000000000000000000000000000000000000..01aaa1a3e7b76c88f5bb6ad1c5fcc6253e204e9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6650 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 new file mode 100644 index 0000000000000000000000000000000000000000..27dc23f0a43ba967b1359283c878f83724e5875c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6697 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 new file mode 100644 index 0000000000000000000000000000000000000000..ad2279f08f89618477b97776ad2cf34e0e4b3fcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX8688 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 new file mode 100644 index 0000000000000000000000000000000000000000..0baeaead8278b9846fdc6bdb5014a135037319b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MCP3021 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 new file mode 100644 index 0000000000000000000000000000000000000000..3dfe7f818252e08a96966598b705425088ab7323 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6683=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 new file mode 100644 index 0000000000000000000000000000000000000000..63986c5b2cd2bc6467554cdaf679791692693b71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6775 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 new file mode 100644 index 0000000000000000000000000000000000000000..63cdd409c758a58bc15e88de8d5007b09d53b552 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT7802=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 new file mode 100644 index 0000000000000000000000000000000000000000..adf8136578262899aeb6d816caaa80d6bb6bec45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT7904=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 new file mode 100644 index 0000000000000000000000000000000000000000..741effe11296e5a6ddc9b6544e1f983f6b04eedf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PC87360 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 new file mode 100644 index 0000000000000000000000000000000000000000..ff6fc05635af70ff6b8693255f5d212e79f9dc1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PC87427 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 new file mode 100644 index 0000000000000000000000000000000000000000..6f628a342c6c5ba1dbdf9fb579968b4134669c3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PCF8591 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS new file mode 100644 index 0000000000000000000000000000000000000000..7e4dc7661235cba0f64cd77a20552ca2a88e6a2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS @@ -0,0 +1 @@ +# CONFIG_SENSORS_PMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 new file mode 100644 index 0000000000000000000000000000000000000000..5524b22aae4f960e2a946e863a2c679367356c7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 @@ -0,0 +1 @@ +CONFIG_SENSORS_POWR1220=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN new file mode 100644 index 0000000000000000000000000000000000000000..4d8a381c52e5c791f448e96a68c6f57d06b341da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN @@ -0,0 +1 @@ +CONFIG_SENSORS_PWM_FAN=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 new file mode 100644 index 0000000000000000000000000000000000000000..c024c82e7c498bf75e90c2cbe62ed4cf26e85f0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SCH5627 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 new file mode 100644 index 0000000000000000000000000000000000000000..4752ebfcd8d90204e9b9eb5ac1c0505135204a71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SCH5636 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 new file mode 100644 index 0000000000000000000000000000000000000000..be3633e224da5569132b56e4335bdffd05ad55ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT15 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 new file mode 100644 index 0000000000000000000000000000000000000000..c07100a989b40815eb2f9dbc277b1f85f0e7f898 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT21 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 new file mode 100644 index 0000000000000000000000000000000000000000..7eeb26cc71dd45f9370fb1fa05e156c779ea4287 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHTC1=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 new file mode 100644 index 0000000000000000000000000000000000000000..d6dab578fd89e2ead17e2bf46e3b9adda34febd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SIS5595 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 new file mode 100644 index 0000000000000000000000000000000000000000..745ae49869bd467688d608246092dde18ec549ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47B397 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 new file mode 100644 index 0000000000000000000000000000000000000000..150f310153c36b1c0701fff88f858387bbfc86c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47M1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 new file mode 100644 index 0000000000000000000000000000000000000000..2a306f5f02232c2401b006b4da837eda155984ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47M192 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 new file mode 100644 index 0000000000000000000000000000000000000000..32f0b8ea495eb69f7a12ec3507ff2bda7dda00f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 @@ -0,0 +1 @@ +CONFIG_SENSORS_TC74=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 new file mode 100644 index 0000000000000000000000000000000000000000..7fc6f1ee39b96af758d723ec5d2f6ac4dabb56a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 @@ -0,0 +1 @@ +# CONFIG_SENSORS_THMC50 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 new file mode 100644 index 0000000000000000000000000000000000000000..48f4e8447cfed6058147c8195c795352cc807a8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP102 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 new file mode 100644 index 0000000000000000000000000000000000000000..89c6eca5e525772643d7d25165e368d6cb05b0c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP103=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 new file mode 100644 index 0000000000000000000000000000000000000000..565627932698f9c5a2758b21dd3e1b0d3da85fd4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP401 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 new file mode 100644 index 0000000000000000000000000000000000000000..865e47e2a848fe674a23f3ed2207c02f730f11e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP421 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 new file mode 100644 index 0000000000000000000000000000000000000000..1abcf89704030e64b8427557afbe28fe871f18fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 @@ -0,0 +1 @@ +CONFIG_SENSORS_TPS40422=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 new file mode 100644 index 0000000000000000000000000000000000000000..c2702cc2c8e69a9a5afcdc7c042c4577b502044b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TSL2550 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 new file mode 100644 index 0000000000000000000000000000000000000000..1b3c69391580079923c3bcfcb9088e6a5ea321dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 @@ -0,0 +1 @@ +# CONFIG_SENSORS_UCD9000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 new file mode 100644 index 0000000000000000000000000000000000000000..2e9678aa26093b89ae4f968b7cbb332312181c6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 @@ -0,0 +1 @@ +# CONFIG_SENSORS_UCD9200 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS new file mode 100644 index 0000000000000000000000000000000000000000..a24ba02a891c7a8d910c6d862c7f7b4e137942a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS @@ -0,0 +1 @@ +CONFIG_SENSORS_VEXPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A new file mode 100644 index 0000000000000000000000000000000000000000..70903ff3806b9b794677580f7ec45cb480b66b8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A @@ -0,0 +1 @@ +# CONFIG_SENSORS_VIA686A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 new file mode 100644 index 0000000000000000000000000000000000000000..e6495b5ae58553d0f74ed27790aba4bd17a3d003 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 @@ -0,0 +1 @@ +# CONFIG_SENSORS_VT1211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 new file mode 100644 index 0000000000000000000000000000000000000000..9c3442298f85530e0bc2434ef06faedba47745a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 @@ -0,0 +1 @@ +# CONFIG_SENSORS_VT8231 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF new file mode 100644 index 0000000000000000000000000000000000000000..cf28c425430513fe499e49118606856a1e9c447b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83627EHF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF new file mode 100644 index 0000000000000000000000000000000000000000..3fe5afab024aa81ca798be0322ac46faf43ebf2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83627HF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D new file mode 100644 index 0000000000000000000000000000000000000000..1b25f4725b72e6e99095ec95b676e2951ca1d0fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83781D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D new file mode 100644 index 0000000000000000000000000000000000000000..9b7e1cf9b64c57d9efdefad85410df4edfb26104 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83791D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D new file mode 100644 index 0000000000000000000000000000000000000000..79866eaf88e3403fbf84e15915e715bdc834ab43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83792D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 new file mode 100644 index 0000000000000000000000000000000000000000..55d6678c1a55f43c19289a134f09632f37562bb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83793 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 new file mode 100644 index 0000000000000000000000000000000000000000..7396aa9ec3f285980c551281b3096d60bd965e9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83795 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS new file mode 100644 index 0000000000000000000000000000000000000000..9c053cfdb1c7592ac5366299d3b7b69d84f31485 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83L785TS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG new file mode 100644 index 0000000000000000000000000000000000000000..c2209730bf93165d7f81465fb22c00b85d7ae076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83L786NG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..84ffdf1e91bbbfea9df916a9905196f4369cf0fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE @@ -0,0 +1 @@ +CONFIG_SENSORS_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 new file mode 100644 index 0000000000000000000000000000000000000000..00681ebdc0e905e5a14b57dca431c435c5b03f6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ZL6100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS new file mode 100644 index 0000000000000000000000000000000000000000..689bc1f75a123e976c6786e5b9ab04e2b9dd35ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_16550A_VARIANTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL new file mode 100644 index 0000000000000000000000000000000000000000..2aa5263f2832123c0ff43e26239e639b1ead3853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_FSL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X new file mode 100644 index 0000000000000000000000000000000000000000..8d92a9a3fc83a8571420d1da2ffff4c773eb5d71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RT288X=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..b9ebe42687a39d9c3b2a901c0acc7a117d5efe15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_TEGRA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 new file mode 100644 index 0000000000000000000000000000000000000000..ee4b9d52a1d754abc5fd70e53eed009b08f90e94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 @@ -0,0 +1 @@ +# CONFIG_SERIAL_AMBA_PL010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 new file mode 100644 index 0000000000000000000000000000000000000000..1e7631992213780ca870c1af24a23f8820dea408 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 @@ -0,0 +1 @@ +CONFIG_SERIAL_AMBA_PL011=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..498816601edbf8552b635386364505dc5020968b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC new file mode 100644 index 0000000000000000000000000000000000000000..48427664c8dd8e3085b72d1f11b466883f29e393 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC @@ -0,0 +1 @@ +# CONFIG_SERIAL_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR new file mode 100644 index 0000000000000000000000000000000000000000..b7ae4d4a234b0eaea9244e2b825b20047cd79504 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR @@ -0,0 +1 @@ +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST new file mode 100644 index 0000000000000000000000000000000000000000..c7042c27e68ab26bf78f9683a4a1bc9707b9aaae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST @@ -0,0 +1 @@ +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM new file mode 100644 index 0000000000000000000000000000000000000000..d9de0605a54f0b66cd82ce0a6d306bb872c2f892 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM @@ -0,0 +1 @@ +# CONFIG_SERIAL_JSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM new file mode 100644 index 0000000000000000000000000000000000000000..9835c5271da117d5767ce69fc7f659e9472704cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM @@ -0,0 +1 @@ +# CONFIG_SERIAL_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..837a43b2e89446cb50b727ee91811ffa5e67045f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM @@ -0,0 +1 @@ +CONFIG_SERIAL_OF_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE new file mode 100644 index 0000000000000000000000000000000000000000..95657f51a5868813a0ca38b1bf654fd8b4e03593 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE @@ -0,0 +1 @@ +# CONFIG_SERIAL_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU new file mode 100644 index 0000000000000000000000000000000000000000..4999de05d2dbe55b8e8d46ac257bb133023e6cc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU @@ -0,0 +1 @@ +# CONFIG_SERIAL_TEGRA_TCU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART new file mode 100644 index 0000000000000000000000000000000000000000..3d6ecfc5e05cbe625712b13d16c5f1f9da829835 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART @@ -0,0 +1 @@ +# CONFIG_SERIAL_XILINX_PS_UART is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 new file mode 100644 index 0000000000000000000000000000000000000000..9e86f27a9954b33946620aabc53ae7a618125a24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_APBPS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC new file mode 100644 index 0000000000000000000000000000000000000000..fdc60b90f768591827dec34cb6d95e96bc083d82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC @@ -0,0 +1 @@ +# CONFIG_SFC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..49a58fc9bc968a372464bb0ecaf8be744fa0abe6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION @@ -0,0 +1 @@ +# CONFIG_SGI_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT new file mode 100644 index 0000000000000000000000000000000000000000..b6f8e64dcf48b50742c01fb90fc54c58722fab4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT @@ -0,0 +1 @@ +CONFIG_SG_SPLIT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK new file mode 100644 index 0000000000000000000000000000000000000000..08f4cf8044ea422b17eaacab5824229f0492febf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK @@ -0,0 +1 @@ +# CONFIG_SHADOW_CALL_STACK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB new file mode 100644 index 0000000000000000000000000000000000000000..7b8f8dcbb66f5b8a0c475d585649f7a0bc1d58b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB @@ -0,0 +1 @@ +# CONFIG_SOC_BRCMSTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS new file mode 100644 index 0000000000000000000000000000000000000000..de561f5b4ec65a95c4047d8c6b0d389703bd2f26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS @@ -0,0 +1 @@ +CONFIG_SOC_BUS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE new file mode 100644 index 0000000000000000000000000000000000000000..36d2d3822def5580542b3a61dbf360013a7edc0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_FUSE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC new file mode 100644 index 0000000000000000000000000000000000000000..a6d72efba8fbb403dece877c63c98495ead74b58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_PMC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP new file mode 100644 index 0000000000000000000000000000000000000000..81d94f8cc3bf3102e1934893af47087aac63cf2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_POWERGATE_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..7b428330f6659e83543b23a04e064beb4329e520 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION @@ -0,0 +1 @@ +# CONFIG_SOLARIS_X86_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI new file mode 100644 index 0000000000000000000000000000000000000000..7c242fd0db53de9bb258d64486054a929638b04c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE_QUADSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI new file mode 100644 index 0000000000000000000000000000000000000000..382ba298b7d39ab01bcb1989b0adf22f1e42bc08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE_XSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA new file mode 100644 index 0000000000000000000000000000000000000000..cf996d71fdd4a9c29e55a96446475eac37576fa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA @@ -0,0 +1 @@ +# CONFIG_SPI_DW_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI new file mode 100644 index 0000000000000000000000000000000000000000..ef5d048b4cf4b59d4e52f83f80bf2133dc3338c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI @@ -0,0 +1 @@ +# CONFIG_SPI_DW_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI new file mode 100644 index 0000000000000000000000000000000000000000..aa09f75414c50b09b944f93a8eed32966575ff9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI @@ -0,0 +1 @@ +# CONFIG_SPI_FSL_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC new file mode 100644 index 0000000000000000000000000000000000000000..37a685730420bac50755a57121c2803947d63338 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC @@ -0,0 +1 @@ +# CONFIG_SPI_HISI_SFC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX new file mode 100644 index 0000000000000000000000000000000000000000..5705c51bd0572e853503c073421f3e45628402cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX @@ -0,0 +1 @@ +CONFIG_SPI_HISI_SFC_V3XX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM new file mode 100644 index 0000000000000000000000000000000000000000..118458c27a84010349ee138c0e18936c350d28c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM @@ -0,0 +1 @@ +CONFIG_SPI_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI new file mode 100644 index 0000000000000000000000000000000000000000..aaed52ccea75463f9914d7950a37aaebf0849d17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI @@ -0,0 +1 @@ +# CONFIG_SPI_QCOM_QSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI new file mode 100644 index 0000000000000000000000000000000000000000..6f2ee22f2c03a8b4c6913cb3d1274399dd6de893 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI @@ -0,0 +1 @@ +# CONFIG_SPI_SN_F_OSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH new file mode 100644 index 0000000000000000000000000000000000000000..446ce07282cdd214c8d58e9cc52c63933b158852 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH @@ -0,0 +1 @@ +# CONFIG_SPI_TEGRA20_SFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX new file mode 100644 index 0000000000000000000000000000000000000000..2f95cc2861f7db3b3def86dd8d7248fbec57c52a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX @@ -0,0 +1 @@ +# CONFIG_SPI_THUNDERX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI new file mode 100644 index 0000000000000000000000000000000000000000..48e520fd68c575c6822f9ec47e31b84bc5708fdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI @@ -0,0 +1 @@ +# CONFIG_SPI_ZYNQMP_GQSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC new file mode 100644 index 0000000000000000000000000000000000000000..ce7b4d44146aae356f103d2d1000b42b00865933 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC @@ -0,0 +1 @@ +# CONFIG_SSIF_IPMI_BMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK new file mode 100644 index 0000000000000000000000000000000000000000..2d53b16b601a4e49d6f646f9b6623a348597bce2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR_PER_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD new file mode 100644 index 0000000000000000000000000000000000000000..16496bf2bf3a53921a50a5d2b7b589ac26a69dd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD @@ -0,0 +1 @@ +# CONFIG_STAGING_BOARD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA new file mode 100644 index 0000000000000000000000000000000000000000..59c987cafcfea1e94cc9338b08a26e0df4f23498 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA @@ -0,0 +1 @@ +# CONFIG_STAGING_MEDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 new file mode 100644 index 0000000000000000000000000000000000000000..99ffc024c0deac2a9c6692fc57f6ab5ae631dfe8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 @@ -0,0 +1 @@ +CONFIG_STUB_CLK_HI3660=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..ba52703282bd83c04941d9361341a2cd2d19a876 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION @@ -0,0 +1 @@ +# CONFIG_SUN_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE new file mode 100644 index 0000000000000000000000000000000000000000..0f871191b0f5e543481ba45f74533a62f17039ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE @@ -0,0 +1 @@ +# CONFIG_SYSCON_REBOOT_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL new file mode 100644 index 0000000000000000000000000000000000000000..2d7f3fd83e2dc3269950f52287f9ae0eee330dd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_ATMEL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON new file mode 100644 index 0000000000000000000000000000000000000000..0514455b18772f48528389df2d2aa5e9f810ff7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_INFINEON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON new file mode 100644 index 0000000000000000000000000000000000000000..84d30bc2c0f16b648715e34ac47d119d281c6c32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_NUVOTON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 new file mode 100644 index 0000000000000000000000000000000000000000..734199d2e8e0d557da63a420be93cd5e51083217 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_SPI_CR50 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C new file mode 100644 index 0000000000000000000000000000000000000000..1ff9e8cfa9f55ec97ff08b1c7af9f3c12016d903 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA new file mode 100644 index 0000000000000000000000000000000000000000..d83d1f1a08182c50ea054f85c2059e62fdd00b12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA @@ -0,0 +1 @@ +# CONFIG_TEGRA186_GPC_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..0013fe53336cf025a8e7d756f56bcc7d000feafa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER @@ -0,0 +1 @@ +# CONFIG_TEGRA186_TIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA new file mode 100644 index 0000000000000000000000000000000000000000..9b8efaf504118129dc422255539f3b0243325577 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA @@ -0,0 +1 @@ +# CONFIG_TEGRA20_APB_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA new file mode 100644 index 0000000000000000000000000000000000000000..634ab1ad7fcd88efd0dc84faeb3e01049c94d651 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA @@ -0,0 +1 @@ +# CONFIG_TEGRA210_ADMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT new file mode 100644 index 0000000000000000000000000000000000000000..beeba67136125ec79ba16ebe647433b711a14be7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT @@ -0,0 +1 @@ +# CONFIG_TEGRA_ACONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB new file mode 100644 index 0000000000000000000000000000000000000000..e11cec707a4435df0440e32951c80cb588cef390 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB @@ -0,0 +1 @@ +CONFIG_TEGRA_AHB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP new file mode 100644 index 0000000000000000000000000000000000000000..348002b0bc68cfa08484777ae69a9f82f19dc7de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..e1e1b80f43b35b7fea61ebe730164992cb212f8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL @@ -0,0 +1 @@ +# CONFIG_TEGRA_BPMP_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI new file mode 100644 index 0000000000000000000000000000000000000000..c062dcb429c945dbf7f256e11703b833b15a0261 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI @@ -0,0 +1 @@ +# CONFIG_TEGRA_GMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X new file mode 100644 index 0000000000000000000000000000000000000000..3e42b2b35a00d06bb3583d6af5887d44ec44b193 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X @@ -0,0 +1 @@ +# CONFIG_TEGRA_HOST1X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..3d46b7c93a15d07d0b4acc067f0f855669df86d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX @@ -0,0 +1 @@ +CONFIG_TEGRA_HSP_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC new file mode 100644 index 0000000000000000000000000000000000000000..cdcacbec25a4a05df0f300a0592bceb353ffeeb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC @@ -0,0 +1 @@ +CONFIG_TEGRA_IVC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM new file mode 100644 index 0000000000000000000000000000000000000000..b7444627edca9d141598a4871124dd9bddcdef93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM @@ -0,0 +1 @@ +# CONFIG_TEGRA_SOCTHERM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..024c0dc62e33771d0475936dbd9d6da30f2a32ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_TEGRA_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU new file mode 100644 index 0000000000000000000000000000000000000000..e42dde2ddf847263dbb3e7d23dc38e37c5cfa2a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU @@ -0,0 +1 @@ +CONFIG_THUNDERX2_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX new file mode 100644 index 0000000000000000000000000000000000000000..123fe6f48ad32e518a0d368de53de0d5d023bfb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_BGX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF new file mode 100644 index 0000000000000000000000000000000000000000..6b047a13cf72795e8081d9b1500b26515a2ce473 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_PF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX new file mode 100644 index 0000000000000000000000000000000000000000..c50ae013ff8bce5bd12ffebbc264a0fbc7d0e524 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_RGX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF new file mode 100644 index 0000000000000000000000000000000000000000..5fdfca5dbde7b4ef3590a1cd74e498f04ebc8c04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_VF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 new file mode 100644 index 0000000000000000000000000000000000000000..7cbf187100907f73ba317533ac5efc91ddcd45fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 @@ -0,0 +1 @@ +# CONFIG_TIFM_7XX1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..4eadf73929f25f36fa340e15ead17e4eae9b21cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI @@ -0,0 +1 @@ +CONFIG_TIMER_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF new file mode 100644 index 0000000000000000000000000000000000000000..99fc54e6a614e00af1665e8d615c21bbdfce298c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF @@ -0,0 +1 @@ +CONFIG_TIMER_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..b2aa1db30036ae7e80db5d6860de6c1a75a06d87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE @@ -0,0 +1 @@ +CONFIG_TIMER_PROBE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS new file mode 100644 index 0000000000000000000000000000000000000000..ff22d56979aa6b7c05052e2fa7642621d6ba60f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS @@ -0,0 +1 @@ +# CONFIG_TRACE_MMIO_ACCESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE new file mode 100644 index 0000000000000000000000000000000000000000..4fa677f2e6b12f3acdf287bb69c03f0e74284d51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE @@ -0,0 +1 @@ +CONFIG_TRANS_TABLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 new file mode 100644 index 0000000000000000000000000000000000000000..c3f23ea16bf752ed9c65c486124ea41eed8b5f46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 @@ -0,0 +1 @@ +# CONFIG_TYPEC_FUSB302 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC new file mode 100644 index 0000000000000000000000000000000000000000..b04c909a37fd441647ba644b24b9d32b82a7c1a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC @@ -0,0 +1 @@ +# CONFIG_TYPEC_QCOM_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM new file mode 100644 index 0000000000000000000000000000000000000000..b387eada34cb9ab4609c2fc5e0c6090a677445de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM @@ -0,0 +1 @@ +CONFIG_UEFI_CPER_ARM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB new file mode 100644 index 0000000000000000000000000000000000000000..820ee82fd9dbf3740f9d2bdecc3fa4344cc69ca0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB @@ -0,0 +1 @@ +# CONFIG_ULTRASOC_SMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL new file mode 100644 index 0000000000000000000000000000000000000000..2af4dbd9b63cc3138a4aa358181cedafa1f0f443 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL @@ -0,0 +1 @@ +# CONFIG_UNIXWARE_DISKLABEL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY new file mode 100644 index 0000000000000000000000000000000000000000..e92d4cfb0a5e21071fc536a3f9f869540972050d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY @@ -0,0 +1 @@ +CONFIG_USB_CHAOSKEY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..b8939d990a5e178f81936f9dc5e291521e42dbe5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM @@ -0,0 +1 @@ +CONFIG_USB_EHCI_HCD_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA new file mode 100644 index 0000000000000000000000000000000000000000..43a204ef511f5240b6b71c00ba7cdba6d209369b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 new file mode 100644 index 0000000000000000000000000000000000000000..f2b8724f956460e3b17a00adc5b6da2359332afb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 @@ -0,0 +1 @@ +CONFIG_USB_NET_SR9700=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB new file mode 100644 index 0000000000000000000000000000000000000000..79ccaa2177efd6d64421f44605b3786562f3b0ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB @@ -0,0 +1 @@ +# CONFIG_USB_ONBOARD_HUB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD new file mode 100644 index 0000000000000000000000000000000000000000..38c147a013393a6c3c3a041bdb602be8c19470dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD @@ -0,0 +1 @@ +# CONFIG_USB_QCOM_EUD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..753c9f2232647391464c319b4371355dcb2a0d41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_CONSOLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..6ffd8f60b27b7e773cd287e05089d9b01cdb8bbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SIMPLE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH new file mode 100644 index 0000000000000000000000000000000000000000..e34bebc6254d1bbde7c78b77810e89fd4faa92aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH @@ -0,0 +1 @@ +# CONFIG_USB_SPEEDTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY new file mode 100644 index 0000000000000000000000000000000000000000..084266b2f48fb2aa98ca5c87827fdf98e6187e03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY @@ -0,0 +1 @@ +# CONFIG_USB_TEGRA_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD new file mode 100644 index 0000000000000000000000000000000000000000..9c9d8c2f84c7710104060f1a9015b79ae12822c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_UHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI new file mode 100644 index 0000000000000000000000000000000000000000..63568d53655a42fffe135794f89a095e93162c13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI @@ -0,0 +1 @@ +# CONFIG_USB_ULPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS new file mode 100644 index 0000000000000000000000000000000000000000..2e81d95ff4f562f81c53a6d60ffd63bc6039f736 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS @@ -0,0 +1 @@ +CONFIG_USB_ULPI_BUS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP new file mode 100644 index 0000000000000000000000000000000000000000..195a33d45d0e2329b008875885e79bb9d817d5e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_DBGCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB new file mode 100644 index 0000000000000000000000000000000000000000..c1785f1996422f55a4c7bf84784039a12171a189 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_HISTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..f0dce4b3184568e51ca0604f90b1ce1e3ede4b33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM @@ -0,0 +1 @@ +CONFIG_USB_XHCI_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR new file mode 100644 index 0000000000000000000000000000000000000000..06a28199bd34aea89013b07e43e838f5f2adf6c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR @@ -0,0 +1 @@ +# CONFIG_VCPU_STALL_DETECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG new file mode 100644 index 0000000000000000000000000000000000000000..389660309f56db26b253b4f16e4977d36a0927b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG @@ -0,0 +1 @@ +CONFIG_VEXPRESS_CONFIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA new file mode 100644 index 0000000000000000000000000000000000000000..3b202f4c64ef947f22bc5c4775e1c06564255899 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA @@ -0,0 +1 @@ +# CONFIG_VFIO_AMBA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET new file mode 100644 index 0000000000000000000000000000000000000000..1662b48c3be6f0e3c33785f07214b76400faefc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET @@ -0,0 +1 @@ +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET new file mode 100644 index 0000000000000000000000000000000000000000..7c71659cf7d8ad53b7f18da58143159778c70793 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET @@ -0,0 +1 @@ +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS new file mode 100644 index 0000000000000000000000000000000000000000..4523e4e1dde9b09c78a12c9c5050e54b6b000a4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS @@ -0,0 +1 @@ +# CONFIG_VME_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 new file mode 100644 index 0000000000000000000000000000000000000000..d13d9caef68e9ceffd07fb6213e095263882fd81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 @@ -0,0 +1 @@ +# CONFIG_VT6655 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN new file mode 100644 index 0000000000000000000000000000000000000000..35667b211faedee52bed068365f6c3a276a2b8dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN @@ -0,0 +1 @@ +# CONFIG_WLAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA new file mode 100644 index 0000000000000000000000000000000000000000..7c67d5b57e94e624ad08ae03d4ba96f46f87cb85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA @@ -0,0 +1 @@ +# CONFIG_XGENE_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU new file mode 100644 index 0000000000000000000000000000000000000000..df5afee9fef3dd695ec1eb404eef62e8190e78c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU @@ -0,0 +1 @@ +CONFIG_XGENE_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..cbc486395bb3170720b4216aa118c8bbe544aa22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX @@ -0,0 +1 @@ +CONFIG_XGENE_SLIMPRO_MBOX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC new file mode 100644 index 0000000000000000000000000000000000000000..f5d8ef7f1b7af71150886433d30173015fb490b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC @@ -0,0 +1 @@ +# CONFIG_XILINX_INTC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..818d251e7f0898a39b70e4353b11ef481d15309e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_XILINX_WINDOW_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA new file mode 100644 index 0000000000000000000000000000000000000000..462b390f4043be5af3098396fe95066820f0a238 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA @@ -0,0 +1 @@ +# CONFIG_XILINX_ZYNQMP_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA new file mode 100644 index 0000000000000000000000000000000000000000..70851f72d1e7f6d181d9c7a6215ffb3dd1465a6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA @@ -0,0 +1 @@ +# CONFIG_XILINX_ZYNQMP_DPDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO new file mode 100644 index 0000000000000000000000000000000000000000..0e5adab18aa75f3f0eb3094b96251b469d19ef2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO @@ -0,0 +1 @@ +# CONFIG_XIL_AXIS_FIFO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME new file mode 100644 index 0000000000000000000000000000000000000000..d986e38c4fadd01ac85120b2db77304c8b9c0794 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME @@ -0,0 +1 @@ +# CONFIG_ZRAM_TRACK_ENTRY_ACTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN new file mode 100644 index 0000000000000000000000000000000000000000..4b6ead8298f950f8f2fd0dba69d91f50a5f797a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN @@ -0,0 +1 @@ +CONFIG_6LOWPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..16bf7160507639ef0b8674d0958bce89252593cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_6LOWPAN_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC new file mode 100644 index 0000000000000000000000000000000000000000..002da8af2aa7bffe547c115366a0ed5e4efda391 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC @@ -0,0 +1 @@ +# CONFIG_6LOWPAN_NHC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP new file mode 100644 index 0000000000000000000000000000000000000000..7688cd89347102d67ae16cb647eb203215eb9a98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP @@ -0,0 +1 @@ +CONFIG_8139CP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO new file mode 100644 index 0000000000000000000000000000000000000000..fe01332954f68624e724a1439da44be2aaf724a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO @@ -0,0 +1 @@ +CONFIG_8139TOO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 new file mode 100644 index 0000000000000000000000000000000000000000..b8b8ef3d78f6e6a2876551c0bcc4d3b53a1e915a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 @@ -0,0 +1 @@ +CONFIG_8139TOO_8129=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO new file mode 100644 index 0000000000000000000000000000000000000000..28b3b851a57048295e6fb159e78fb55ebd33fcb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO @@ -0,0 +1 @@ +# CONFIG_8139TOO_PIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER new file mode 100644 index 0000000000000000000000000000000000000000..ba1c1bd0dd8d1d766bb6482476b18ec5b938d083 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER @@ -0,0 +1 @@ +# CONFIG_8139TOO_TUNE_TWISTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET new file mode 100644 index 0000000000000000000000000000000000000000..d30a504dae554043c123d6540962e1ac02f65a3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET @@ -0,0 +1 @@ +# CONFIG_8139_OLD_RX_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY new file mode 100644 index 0000000000000000000000000000000000000000..9047179a277ed8c410c6a23e46f16ad732244967 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY @@ -0,0 +1 @@ +# CONFIG_ACCESSIBILITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..91e1dd62196f0d8bc67faa127411225966710be7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION @@ -0,0 +1 @@ +# CONFIG_ACORN_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB new file mode 100644 index 0000000000000000000000000000000000000000..0b8ca34adade2a84f464ccd35e7e5b6f7b4cada2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH new file mode 100644 index 0000000000000000000000000000000000000000..7961cdc194067bade77b07e41f75071eca97623d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH @@ -0,0 +1 @@ +# CONFIG_ACPI_FFH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT new file mode 100644 index 0000000000000000000000000000000000000000..385ec923a2cbff851bdbcc102d07e508242676c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT @@ -0,0 +1 @@ +# CONFIG_ACPI_FPDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU new file mode 100644 index 0000000000000000000000000000000000000000..ef79411654e0f3ed089ea20cda437b4212f87ba9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_CPU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION new file mode 100644 index 0000000000000000000000000000000000000000..92036b9757c836a1b0c313971e5da7a017583a08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION @@ -0,0 +1 @@ +CONFIG_ACPI_I2C_OPREGION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO new file mode 100644 index 0000000000000000000000000000000000000000..b24aedf501254dd7dca8ca27e0d8eb341ea91040 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO @@ -0,0 +1 @@ +CONFIG_ACPI_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT new file mode 100644 index 0000000000000000000000000000000000000000..870f63f352336d9ff12d15b46dade9dedf7c62d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT @@ -0,0 +1 @@ +# CONFIG_ACPI_PFRUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE new file mode 100644 index 0000000000000000000000000000000000000000..e6e1026db37b2cec842830304d96a6135240fa07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_IDLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB new file mode 100644 index 0000000000000000000000000000000000000000..0744bee411678fc6e633cbd691189232f51e5b66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB @@ -0,0 +1 @@ +CONFIG_ACPI_TABLE_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT new file mode 100644 index 0000000000000000000000000000000000000000..789ddbbca427464c5d8b5190694878b8f96b6563 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT @@ -0,0 +1 @@ +# CONFIG_AD525X_DPOT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..5d5a3e9c8db6815687ad4cac44df7b7bf183a5ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS @@ -0,0 +1 @@ +# CONFIG_ADFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY new file mode 100644 index 0000000000000000000000000000000000000000..128f41d6a2b9d57b5274c0d32499978bfcb2a039 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY @@ -0,0 +1 @@ +# CONFIG_ADIN1100_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 new file mode 100644 index 0000000000000000000000000000000000000000..38c918ed04afd365b59b5e1d838b8f59a1ed2f13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 @@ -0,0 +1 @@ +# CONFIG_ADIN1110 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY new file mode 100644 index 0000000000000000000000000000000000000000..7523e9d1f9c070d7a6b8016d677dbbc4dc1c73a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY @@ -0,0 +1 @@ +# CONFIG_ADIN_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..20d90af1cd464d3c9137413b15bfe897a3fa21c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS @@ -0,0 +1 @@ +# CONFIG_AFFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..b622cdb8ef06fea75d0fe03d4ac15facd3bbb56b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS @@ -0,0 +1 @@ +# CONFIG_AFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM new file mode 100644 index 0000000000000000000000000000000000000000..b26e5261615240209c7afb04f6e030150f629651 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM @@ -0,0 +1 @@ +# CONFIG_AF_KCM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC new file mode 100644 index 0000000000000000000000000000000000000000..b703c0366e67ecb83dd9ab2ba9b97a45cbb6dcfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC @@ -0,0 +1 @@ +# CONFIG_AF_RXRPC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB new file mode 100644 index 0000000000000000000000000000000000000000..be4aa77487b256a705a77f4cb234eda5f3903e91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB @@ -0,0 +1 @@ +CONFIG_AF_UNIX_OOB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC b/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC new file mode 100644 index 0000000000000000000000000000000000000000..d5d0496905ed8d84388d34b93440661d90783015 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC @@ -0,0 +1 @@ +# CONFIG_AHCI_DWC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..6e03c59c70e819f27ad8e1168d94f564c31191af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION @@ -0,0 +1 @@ +# CONFIG_AIX_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT new file mode 100644 index 0000000000000000000000000000000000000000..6ded5139183835ab6cc07afbe5849beb16e6ded1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT @@ -0,0 +1 @@ +CONFIG_ALIM7101_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX new file mode 100644 index 0000000000000000000000000000000000000000..62cd998d230f12b4b27fd97237eb97ee1b2a7293 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX @@ -0,0 +1 @@ +# CONFIG_ALTERA_MBOX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA new file mode 100644 index 0000000000000000000000000000000000000000..88d345ac66b91445a843cf3b4675ad0ce0f2f27f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA @@ -0,0 +1 @@ +# CONFIG_ALTERA_MSGDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE new file mode 100644 index 0000000000000000000000000000000000000000..f803036e892d395a463d96d3bad1ce006752d64e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE @@ -0,0 +1 @@ +# CONFIG_ALTERA_TSE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX new file mode 100644 index 0000000000000000000000000000000000000000..00298a2d451dd62c11c60b2240eecb9582280327 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX @@ -0,0 +1 @@ +CONFIG_ALX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY new file mode 100644 index 0000000000000000000000000000000000000000..6675832f2c5b77b3982eaef746b536e05abeef43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY @@ -0,0 +1 @@ +CONFIG_AMD_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT new file mode 100644 index 0000000000000000000000000000000000000000..55399527e3efc382379ba0956e2e4b5011cd0d4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT @@ -0,0 +1 @@ +# CONFIG_AMT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC new file mode 100644 index 0000000000000000000000000000000000000000..538c5f8c1c6a68b975b9bf8165678ff38dadf8f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC @@ -0,0 +1 @@ +# CONFIG_ANDROID_BINDER_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME new file mode 100644 index 0000000000000000000000000000000000000000..73bfbfec5b304de5145845db43d291ced190d37e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME @@ -0,0 +1 @@ +# CONFIG_ANON_VMA_NAME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..87b1eca12171bf640649e05aa6c96155fbd83459 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS @@ -0,0 +1 @@ +CONFIG_APERTURE_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE new file mode 100644 index 0000000000000000000000000000000000000000..978db90d7ec1117a3a1328d94f0e67afbea07d03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE @@ -0,0 +1 @@ +# CONFIG_APPLE_MFI_FASTCHARGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM new file mode 100644 index 0000000000000000000000000000000000000000..0b3abc60a2b4e352ea320dfc919f1b59726057fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM @@ -0,0 +1 @@ +# CONFIG_APPLICOM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY new file mode 100644 index 0000000000000000000000000000000000000000..81c48619ef1d6919226177441342cd556faab7e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY @@ -0,0 +1 @@ +CONFIG_AQUANTIA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE new file mode 100644 index 0000000000000000000000000000000000000000..f26f165028cb5795cbe981b560e7ff627c0f1f57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE @@ -0,0 +1 @@ +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT new file mode 100644 index 0000000000000000000000000000000000000000..595f734a0ec011c441201ee71d52fd5e8304303c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT @@ -0,0 +1 @@ +CONFIG_ARCH_DMA_ADDR_T_64BIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..fae543d1a0c637fa74caa2f32e4b87182932da43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..10b01946ac51a91b083982a70866c83d05a4628a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE new file mode 100644 index 0000000000000000000000000000000000000000..9be2daab643ae3ee281a52834ab69c26716d8281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK new file mode 100644 index 0000000000000000000000000000000000000000..418099763e477d1d98957153c19055c430bcfd00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..400a50b711c1715a29060a1d12610b3d89348207 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_THP_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE new file mode 100644 index 0000000000000000000000000000000000000000..921cab65c036ba7f4c08fc5470b8910f783941c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..196fda086baa8566072fb4adc83f1cc7fbca2755 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC new file mode 100644 index 0000000000000000000000000000000000000000..83b47f0f952021955edba115767f133136f962ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_COPY_MC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL new file mode 100644 index 0000000000000000000000000000000000000000..6cd1474b8d3299bc79e4f29e3de07765d80b1727 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_RESCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER new file mode 100644 index 0000000000000000000000000000000000000000..83d84d3440413ee5f6dad79361193a097dafd027 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL new file mode 100644 index 0000000000000000000000000000000000000000..77cf47530ad30e6d07d617086174b6cd6b013575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE new file mode 100644 index 0000000000000000000000000000000000000000..b2191b6b813ec46dfce3ebeb7eef2fc6d06090f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX new file mode 100644 index 0000000000000000000000000000000000000000..f6c9583f6153252950226b64928d5e01ea7f9f2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_WX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE new file mode 100644 index 0000000000000000000000000000000000000000..7a97d06d8c7ff34516ce49ff1651f1806575de79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER new file mode 100644 index 0000000000000000000000000000000000000000..25a951619ac240fba5e79d12ba40d58a45040ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE new file mode 100644 index 0000000000000000000000000000000000000000..90349d67d49247b8fec8ca0630e700a63e100dfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL new file mode 100644 index 0000000000000000000000000000000000000000..ad8dc5ba8b130e0e70b456aaef389ddfbddc6a0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE new file mode 100644 index 0000000000000000000000000000000000000000..a99d8fc5735f425517b8340a046d51494a2ef8b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV new file mode 100644 index 0000000000000000000000000000000000000000..ccdeb82355b7c669275a2ddf3ea553e20b73cfc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_KCOV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..5dfa4eb1546fb45ee311e374a9835e37bfd07e14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS new file mode 100644 index 0000000000000000000000000000000000000000..29771664237314613cd166f7b77f885477288547 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE new file mode 100644 index 0000000000000000000000000000000000000000..8efdf990ab6806924710117f030d752a0f066388 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API new file mode 100644 index 0000000000000000000000000000000000000000..fa1d33faebf0907a63753283127f656c852e2eea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PMEM_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP new file mode 100644 index 0000000000000000000000000000000000000000..4b170a7c5dbd952826f8c6d73c4f8fa588bf447b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PTE_DEVMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL new file mode 100644 index 0000000000000000000000000000000000000000..571587567b8ea80398a9f6d59123f38bc5fc6741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PTE_SPECIAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP new file mode 100644 index 0000000000000000000000000000000000000000..9530949b5df109ed99bedcb09d5888639d8b2ea5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..18f175619de459be29d4c26d39fd405320c711cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SET_MEMORY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX new file mode 100644 index 0000000000000000000000000000000000000000..20f381cf9c0ae0062461f9e7021f83226518372d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX new file mode 100644 index 0000000000000000000000000000000000000000..569e82edce5afff21f670f0b85268579c261fbcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER new file mode 100644 index 0000000000000000000000000000000000000000..de5d0e76570f303b5f199fba1eb8160fa01650d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE new file mode 100644 index 0000000000000000000000000000000000000000..e0e96531f64f0ce709681670190bc9a1a63a1b57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL new file mode 100644 index 0000000000000000000000000000000000000000..c2e0af4313503e0abc80f8f4bfccc3a4c9aef311 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG new file mode 100644 index 0000000000000000000000000000000000000000..4f2494e6144140e83021f5f84790c3b320b9ffbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER new file mode 100644 index 0000000000000000000000000000000000000000..14a5d8705579271e399140054589cfe2dcdb99fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER @@ -0,0 +1 @@ +CONFIG_ARCH_HIBERNATION_HEADER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..db0d6e6eb38deba54ad20a26a91005570874405a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ARCH_HIBERNATION_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..5d65abe41431361bb0a22784ea3813f53e8455ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE @@ -0,0 +1 @@ +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX new file mode 100644 index 0000000000000000000000000000000000000000..f8be112fd91fc798fe0b2d7135f737c70e2a5a69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT new file mode 100644 index 0000000000000000000000000000000000000000..67d0d787bf08c6108cd068e47e445c443ea942ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT @@ -0,0 +1 @@ +CONFIG_ARCH_PROC_KCORE_TEXT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE new file mode 100644 index 0000000000000000000000000000000000000000..29027c6066ec2978cdb36d5762e5b48273cd6dbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_ARCH_SELECTS_KEXEC_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..0ff8e294ce87278820c7a9f47dd5d524095c4f38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE @@ -0,0 +1 @@ +CONFIG_ARCH_SPARSEMEM_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK new file mode 100644 index 0000000000000000000000000000000000000000..a78d75b4de0a4a004d5b32d84884b11747c5b5f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK @@ -0,0 +1 @@ +CONFIG_ARCH_STACKWALK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..e9edfd8a8836ae9c90c8cd07698ee9075a5412da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW new file mode 100644 index 0000000000000000000000000000000000000000..285625cdcc9d74f2860192a5e86d9549c7eaeab9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG new file mode 100644 index 0000000000000000000000000000000000000000..f6bc5a7b3e6683d9e930787745349042f21a2bcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..88b6c6a18b85bc72ac18f430935b2e7b723c2218 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC new file mode 100644 index 0000000000000000000000000000000000000000..6f142bb2a4b5c83c708df034da305bbde85238ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 new file mode 100644 index 0000000000000000000000000000000000000000..c570896730010662d80f2bd4173e82f4849ba847 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_INT128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC new file mode 100644 index 0000000000000000000000000000000000000000..599138ae4cd03576a8d9d4d4206ab30dcffbfc40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE new file mode 100644 index 0000000000000000000000000000000000000000..e3b5a0f4d62728d9dd429a9e68eab0f58bd20da9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG new file mode 100644 index 0000000000000000000000000000000000000000..0f90d8751280b74aefda112b8e270c0eb10464ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG new file mode 100644 index 0000000000000000000000000000000000000000..29dd600988c997ff1ccf20116c240553396ae914 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN new file mode 100644 index 0000000000000000000000000000000000000000..86a79b3a911f3709ba140df2f042050f1c7d9920 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE new file mode 100644 index 0000000000000000000000000000000000000000..08c47ee88379b0b714238158611630426f3a02b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING new file mode 100644 index 0000000000000000000000000000000000000000..5880157a74078d8e8c8776bb66c78a44b6c86d7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK new file mode 100644 index 0000000000000000000000000000000000000000..766a7a058d0fad8909a8ec9dd14f93b9b8ee1ae8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK new file mode 100644 index 0000000000000000000000000000000000000000..8f104442a665c2ebe53b0c3bbd14a1cfad23ac16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES new file mode 100644 index 0000000000000000000000000000000000000000..197bae34d98bb60f696ee357eab467e1e97f8040 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_UPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..d90dc4432ce3ffeecb0d2903f01746bc7b63a256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ARCH_SUSPEND_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS new file mode 100644 index 0000000000000000000000000000000000000000..32a337c700fff5226e897d267a97b4c320449911 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS @@ -0,0 +1 @@ +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF new file mode 100644 index 0000000000000000000000000000000000000000..9af3231c35878edfd8249686f67cbfff36f8bc85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF @@ -0,0 +1 @@ +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT new file mode 100644 index 0000000000000000000000000000000000000000..b52fb384d06847a474c25addf7016adb77d321fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT @@ -0,0 +1 @@ +CONFIG_ARCH_USE_MEMREMAP_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST new file mode 100644 index 0000000000000000000000000000000000000000..d6699602bd952706e181a6c99d2729c682d3a23c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST @@ -0,0 +1 @@ +CONFIG_ARCH_USE_MEMTEST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..5c3a81a683feaec621eaf41bf8135a1f82292f6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..3c2e0bd4dd52d2e2a809e8a7cccddb2900492b85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS new file mode 100644 index 0000000000000000000000000000000000000000..20828e090abda77520dd269aa98743d64868de76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR new file mode 100644 index 0000000000000000000000000000000000000000..c43bbe810ca447b95df113bd890605ad49e227d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_NO_INSTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..5bc9663c1ebe3652fb138dd81d59af5fc222e267 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_THP_SWAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH new file mode 100644 index 0000000000000000000000000000000000000000..22d97ed5e0fb19c11d4c1c3f8601082777976108 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..ad11043d07873ae97b5b56da386571b041af9b53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT new file mode 100644 index 0000000000000000000000000000000000000000..22d98b1329fe25c42a4bf3e044bc414367833867 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE new file mode 100644 index 0000000000000000000000000000000000000000..a7682c3bd51f798fab4d1a905834c7895a7f2f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN new file mode 100644 index 0000000000000000000000000000000000000000..83e0c287372de570ce4e30b13e55411bed54e22a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE new file mode 100644 index 0000000000000000000000000000000000000000..43207356f5a7826dbcf6bbdee18435a2dbf0b9ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_PMD_MKWRITE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET new file mode 100644 index 0000000000000000000000000000000000000000..8c988b7ca7d1b30d5cdb3e2543fbd43ebdda82d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET @@ -0,0 +1 @@ +# CONFIG_ARCNET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS new file mode 100644 index 0000000000000000000000000000000000000000..a731c3aa8a26464d70b3de809b472cdc67cb5173 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_ASM_MODVERSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 new file mode 100644 index 0000000000000000000000000000000000000000..f414b61f85ef523003048605944558ebfea379cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 @@ -0,0 +1 @@ +CONFIG_ASN1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER new file mode 100644 index 0000000000000000000000000000000000000000..b388e0b7c9b497fadbe0677733ba22869046e290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER @@ -0,0 +1 @@ +CONFIG_ASN1_ENCODER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY new file mode 100644 index 0000000000000000000000000000000000000000..fa19bf447739c4054546d2b6816479485a6fc200 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY @@ -0,0 +1 @@ +CONFIG_ASSOCIATIVE_ARRAY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 new file mode 100644 index 0000000000000000000000000000000000000000..7aad62d92b3b0b031bd824b5f5b0aed29734474d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 @@ -0,0 +1 @@ +CONFIG_AS_HAS_NON_CONST_LEB128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU new file mode 100644 index 0000000000000000000000000000000000000000..17f9e6ef9e1fbf5db109eb3c5de505f5bb80bb91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU @@ -0,0 +1 @@ +CONFIG_AS_IS_GNU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..c228e3f926da17858615f76dec5257e3e8b85822 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION @@ -0,0 +1 @@ +CONFIG_AS_VERSION=25000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK new file mode 100644 index 0000000000000000000000000000000000000000..577083a7c02d754ca1d93c114f96b5805f5941f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK @@ -0,0 +1 @@ +# CONFIG_ATALK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..c4f07279cad8a58adcbdda2c302f1588e588df8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION @@ -0,0 +1 @@ +# CONFIG_ATARI_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 new file mode 100644 index 0000000000000000000000000000000000000000..ed8b8ff63db8d1dca98f161e4f175aff95bdf379 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 @@ -0,0 +1 @@ +CONFIG_ATL1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C new file mode 100644 index 0000000000000000000000000000000000000000..391eb51ee43c3dc001a64a10bbbe2c2762967de7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C @@ -0,0 +1 @@ +CONFIG_ATL1C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E new file mode 100644 index 0000000000000000000000000000000000000000..e2ce74691bfe074128bf074718440464beaac3bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E @@ -0,0 +1 @@ +CONFIG_ATL1E=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM new file mode 100644 index 0000000000000000000000000000000000000000..70dd368fa146f54fe8276cabe500870b1d4a6155 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM @@ -0,0 +1 @@ +CONFIG_ATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 new file mode 100644 index 0000000000000000000000000000000000000000..65ae8b6ac70c2cefcb145bf6ae4a3e40a328b2ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 @@ -0,0 +1 @@ +CONFIG_ATM_BR2684=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER new file mode 100644 index 0000000000000000000000000000000000000000..655294fd4cd150f9f539ba68aab6196f52a803d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER @@ -0,0 +1 @@ +# CONFIG_ATM_BR2684_IPFILTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP new file mode 100644 index 0000000000000000000000000000000000000000..1474a19781ac8019b8dfa19329b8b333ad715c3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP @@ -0,0 +1 @@ +CONFIG_ATM_CLIP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP new file mode 100644 index 0000000000000000000000000000000000000000..011e40340c41d22ef8937cf1feab61624f0a9a8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP @@ -0,0 +1 @@ +# CONFIG_ATM_CLIP_NO_ICMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE new file mode 100644 index 0000000000000000000000000000000000000000..b7578abbd53503fd486009e403293d568470c8fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE @@ -0,0 +1 @@ +CONFIG_ATM_LANE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA new file mode 100644 index 0000000000000000000000000000000000000000..75378a2c566074dcccfba24252a39836426952ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA @@ -0,0 +1 @@ +# CONFIG_ATM_MPOA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..7ae4f9cd89af8b7540ea812e3b41859e3ac269cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL @@ -0,0 +1 @@ +CONFIG_AUDITSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY new file mode 100644 index 0000000000000000000000000000000000000000..36da27ae2bbd3426c4c7e06b7ce2cde67bb9463b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY @@ -0,0 +1 @@ +# CONFIG_AUXDISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY new file mode 100644 index 0000000000000000000000000000000000000000..ee2e42ff289b9ad9a4d9cd6f5b1a2211b1f9283f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY @@ -0,0 +1 @@ +CONFIG_AX88796B_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 b/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 new file mode 100644 index 0000000000000000000000000000000000000000..f1e41b3072bc9fc4e161191e322497f41cab835b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 @@ -0,0 +1 @@ +# CONFIG_B44 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 new file mode 100644 index 0000000000000000000000000000000000000000..401ceb236ffeeb2bfaf5b2b9b0591a5242972562 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ADP8860 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 new file mode 100644 index 0000000000000000000000000000000000000000..6d0c88caf8020d933393a834ae962c6cc23ce369 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ADP8870 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN new file mode 100644 index 0000000000000000000000000000000000000000..9371c26efa3cc2580191f55935cd1905749decca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ARCXCNN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 new file mode 100644 index 0000000000000000000000000000000000000000..07df4bf1541c79a7f93c442feb39c5e4cd53011f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_BD6107 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..c37961b4c8f9d2b53944a167c14afdf9d98d59ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_CLASS_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 new file mode 100644 index 0000000000000000000000000000000000000000..5f66e1f9d6174e787054a93f40e9939064d24405 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_KTD253 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 new file mode 100644 index 0000000000000000000000000000000000000000..f9355b9ee024d47de8b68fbad185193a7bf44a7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_KTZ8866 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A new file mode 100644 index 0000000000000000000000000000000000000000..1cc4fc66b60ab728d00a3a5157fa89f958bb1fa6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LM3630A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 new file mode 100644 index 0000000000000000000000000000000000000000..cf301c4f996a8ccd075aa65d1ba88d11e1ab42b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LM3639 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X new file mode 100644 index 0000000000000000000000000000000000000000..bf846f83255a857c321f83b152866c186bfd39cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_LP855X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP new file mode 100644 index 0000000000000000000000000000000000000000..ed80d7e6601ec9ea492098369f87b8220e8e65ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LV5207LP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED new file mode 100644 index 0000000000000000000000000000000000000000..d1e1fdbc7d6b487ef5b410b5fb6c498a5a02c546 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_QCOM_WLED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST new file mode 100644 index 0000000000000000000000000000000000000000..d8da786506b2875928fdfa50589089863cc9307a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST @@ -0,0 +1 @@ +# CONFIG_BACKTRACE_SELF_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP new file mode 100644 index 0000000000000000000000000000000000000000..ee554a97a7b4794f0042b4c89a6221c9b862e9f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP @@ -0,0 +1 @@ +# CONFIG_BAREUDP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL new file mode 100644 index 0000000000000000000000000000000000000000..ae5e778e0a495c6c19ac0836bca39f4a6db398a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL @@ -0,0 +1 @@ +CONFIG_BASE_SMALL=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV new file mode 100644 index 0000000000000000000000000000000000000000..8d45d826f6cbd2547a1fd9bfcfda37f2d90c153b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV @@ -0,0 +1 @@ +# CONFIG_BATMAN_ADV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX new file mode 100644 index 0000000000000000000000000000000000000000..4ddb243bae97b1f3400d046df711f29381bcee93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX @@ -0,0 +1 @@ +# CONFIG_BATTERY_BQ27XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 new file mode 100644 index 0000000000000000000000000000000000000000..2c8831e4d2c1c8a5acce9b476aad4f2b4d002795 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 @@ -0,0 +1 @@ +# CONFIG_BATTERY_CW2015 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 new file mode 100644 index 0000000000000000000000000000000000000000..508ab19d9c9346b548d07f09bc782cc8fba983d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2780 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 new file mode 100644 index 0000000000000000000000000000000000000000..f35c00807c9a44ea477ee205658eaee050d02d6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2781 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 new file mode 100644 index 0000000000000000000000000000000000000000..126e3105af7365b981e8c577a19f6790930be079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2782 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 new file mode 100644 index 0000000000000000000000000000000000000000..a4a35e3374ee3234d021daa645a4a5b51459af53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 @@ -0,0 +1 @@ +# CONFIG_BATTERY_GAUGE_LTC2941 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH new file mode 100644 index 0000000000000000000000000000000000000000..75b9c00e9bfceac460cf7f30efe4cf32221428ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_BATTERY_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 new file mode 100644 index 0000000000000000000000000000000000000000..074c2386b1cf1f75f6a0ad934b2bd4ced5f15b28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 @@ -0,0 +1 @@ +# CONFIG_BATTERY_MAX17040 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 new file mode 100644 index 0000000000000000000000000000000000000000..19ff6af7a07988516fc39a22b5e81a00c9985f41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 @@ -0,0 +1 @@ +# CONFIG_BATTERY_MAX17042 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 new file mode 100644 index 0000000000000000000000000000000000000000..a86c2de54c5cd3ff1abb331d478ed7512aac2e4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 @@ -0,0 +1 @@ +# CONFIG_BATTERY_RT5033 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI new file mode 100644 index 0000000000000000000000000000000000000000..b68d543593238a2982817f4e7063a0eee1c47ae1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI @@ -0,0 +1 @@ +# CONFIG_BATTERY_SAMSUNG_SDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS new file mode 100644 index 0000000000000000000000000000000000000000..c624a2f7fa606cef2fda7d1e16a5d028b1299b13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS @@ -0,0 +1 @@ +# CONFIG_BATTERY_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 new file mode 100644 index 0000000000000000000000000000000000000000..2ef40f78c7dfed9e9798d2fadc16a4ede8becd09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 @@ -0,0 +1 @@ +# CONFIG_BATTERY_UG3105 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..eb3f1af90e6cdc42c991522389887e40f204a960 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_CLOSURES_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..36426027b2d10d39b3dcee18fdfcc421c5914d70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY new file mode 100644 index 0000000000000000000000000000000000000000..ebd2ac9225ca45f89185dea805458a35564ff31f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY @@ -0,0 +1 @@ +# CONFIG_BCM54140_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..09c4987ca6c9ff384e3c4c7e78103e9846f3874d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY @@ -0,0 +1 @@ +CONFIG_BCM7XXX_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY new file mode 100644 index 0000000000000000000000000000000000000000..6f472549d5a74b774aa914920a22ea743be0b097 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY @@ -0,0 +1 @@ +# CONFIG_BCM84881_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..b9dd7faba7dd9a56a083e25dc9ae7ea788d5b96f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY @@ -0,0 +1 @@ +CONFIG_BCM87XX_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA new file mode 100644 index 0000000000000000000000000000000000000000..d9d0a9bbc4aab4c1b31608fd5277dfd18e533a4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA @@ -0,0 +1 @@ +CONFIG_BCMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..c5cc6a5cfd19d69fc83fa23f5ec98e497fef708c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCMA_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN new file mode 100644 index 0000000000000000000000000000000000000000..501bc2aecb6e11ccd90912441fddcca96b250290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_GMAC_CMN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..ae73e2287533125041bd126b58c325bd35f1a50d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI new file mode 100644 index 0000000000000000000000000000000000000000..92d35164df86b324e792250678ecbae7238081cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI new file mode 100644 index 0000000000000000000000000000000000000000..a37624a12da888354b5fe156543c3e7e7d73b2b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI @@ -0,0 +1 @@ +CONFIG_BCMA_HOST_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..204a2cca7a6e7ad3f5c2e77719ebc5757febca81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE @@ -0,0 +1 @@ +CONFIG_BCMA_HOST_PCI_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC new file mode 100644 index 0000000000000000000000000000000000000000..0c3f2477f3cb3e17b0c27722153dbc6b7fc6cf4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC @@ -0,0 +1 @@ +# CONFIG_BCMA_HOST_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..aa4a9269007a909f011bccda29ae48209025ebab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE @@ -0,0 +1 @@ +CONFIG_BCMA_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY new file mode 100644 index 0000000000000000000000000000000000000000..787233648ceddec75030d50ae2ab00ed0dfe0f4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY @@ -0,0 +1 @@ +# CONFIG_BCM_KONA_USB2_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB new file mode 100644 index 0000000000000000000000000000000000000000..a31aeb4ac26b1903620c3cd9ba3a182cd45c9997 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB @@ -0,0 +1 @@ +CONFIG_BCM_NET_PHYLIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP new file mode 100644 index 0000000000000000000000000000000000000000..2946b54059ce384e531640f836d690b99512dc46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP @@ -0,0 +1 @@ +CONFIG_BCM_NET_PHYPTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK new file mode 100644 index 0000000000000000000000000000000000000000..17a577df57bd04b537c5584ec7b861656762b915 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK @@ -0,0 +1 @@ +# CONFIG_BCM_VK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..d27a7a0582f098053f65f1a931812c51f2fda8df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI @@ -0,0 +1 @@ +CONFIG_BE2ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..cdabc728df98601377958ddfaf36f777f2e0fef1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS @@ -0,0 +1 @@ +# CONFIG_BEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..e667a937b15e009686c025497c5d3d32cffb8723 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS @@ -0,0 +1 @@ +# CONFIG_BFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF new file mode 100644 index 0000000000000000000000000000000000000000..3dd24e0191b4c75e4f5b1edf4e393c324d576a4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF @@ -0,0 +1 @@ +CONFIG_BINARY_PRINTF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE new file mode 100644 index 0000000000000000000000000000000000000000..22507f9b83bc50c2c345cfa1d5875a4b7f0ced86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE @@ -0,0 +1 @@ +CONFIG_BITREVERSE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID new file mode 100644 index 0000000000000000000000000000000000000000..a3c81418b4e040bc811b80c15921c7ef3987b82b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_FC_APPID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO new file mode 100644 index 0000000000000000000000000000000000000000..39c0d13ff4b2e5271a87ef6f00c8cc2e5b576ca3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_IOPRIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO new file mode 100644 index 0000000000000000000000000000000000000000..5e25b9cba09653dc500857f1052293c89e52e016 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_PUNT_BIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT new file mode 100644 index 0000000000000000000000000000000000000000..5e0f1c36ddeb0d0c6a936b3ac2a8022c448dd681 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_RWSTAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID new file mode 100644 index 0000000000000000000000000000000000000000..0fbe95ebc3764778801db539a025f146c30096ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..b645b9f74708e1b26001584b8225b2413dc4a06e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSG_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN new file mode 100644 index 0000000000000000000000000000000000000000..2cd73581f8eef981c51a2d9bfb7e9ef99a9b543f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN @@ -0,0 +1 @@ +CONFIG_BLK_DEV_DM_BUILTIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ new file mode 100644 index 0000000000000000000000000000000000000000..e76683c61f0f80b34172f3c3ea8edbcdb76dc9ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ @@ -0,0 +1 @@ +CONFIG_BLK_ICQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION new file mode 100644 index 0000000000000000000000000000000000000000..3f642705f0e448ec022efe40985e3d738458fd1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION @@ -0,0 +1 @@ +# CONFIG_BLK_INLINE_ENCRYPTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING new file mode 100644 index 0000000000000000000000000000000000000000..6caf8dfba3030f3a69c72d89885d6d82941abc10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING @@ -0,0 +1 @@ +CONFIG_BLK_MQ_STACKING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM new file mode 100644 index 0000000000000000000000000000000000000000..279d2c44a337379c5a45e64eed37d7e936d262dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM @@ -0,0 +1 @@ +CONFIG_BLK_PM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL new file mode 100644 index 0000000000000000000000000000000000000000..e0c2a4ef6657943101576062804fbeae847ac141 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL @@ -0,0 +1 @@ +# CONFIG_BLK_SED_OPAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED new file mode 100644 index 0000000000000000000000000000000000000000..7dde8926431833b0f0b8111c20e81ab80b5e52d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED @@ -0,0 +1 @@ +CONFIG_BLOCK_HOLDER_DEPRECATED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD new file mode 100644 index 0000000000000000000000000000000000000000..3e7433ac6ee1712abe96a50e7729d606139f2c6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD @@ -0,0 +1 @@ +CONFIG_BLOCK_LEGACY_AUTOLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC new file mode 100644 index 0000000000000000000000000000000000000000..93452648f2f125ea48eb5e349648e19ec110e124 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC @@ -0,0 +1 @@ +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..cb9ab4f3b6ab83f76a2d3ac269decd3b351ef4a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_BPF_JIT_DEFAULT_ON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD new file mode 100644 index 0000000000000000000000000000000000000000..71cb0ca98e19b5e18ea8ea4aab4b5cba5584e893 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD @@ -0,0 +1 @@ +# CONFIG_BPF_PRELOAD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL new file mode 100644 index 0000000000000000000000000000000000000000..726b6e8e2bd2920c690790a633e0366055dfac6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL @@ -0,0 +1 @@ +CONFIG_BQL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM b/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM new file mode 100644 index 0000000000000000000000000000000000000000..d3175e47af57a72a02379a7522b2f2a755dd15ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM @@ -0,0 +1 @@ +# CONFIG_BRIDGE_CFM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY new file mode 100644 index 0000000000000000000000000000000000000000..a4c3dcde035c52b21af0771e44865953b83b9dd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY @@ -0,0 +1 @@ +CONFIG_BROADCOM_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL new file mode 100644 index 0000000000000000000000000000000000000000..980fa2b75f3d530d03902ac5cfb201daddee297a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL @@ -0,0 +1 @@ +CONFIG_BSD_DISKLABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE new file mode 100644 index 0000000000000000000000000000000000000000..592dbd5ee8df02701ee405622127687b5836efa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE @@ -0,0 +1 @@ +CONFIG_BTREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT new file mode 100644 index 0000000000000000000000000000000000000000..a4f8b08173a87419be6b64f218b1f9708b0a055c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT @@ -0,0 +1 @@ +# CONFIG_BTRFS_ASSERT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..3e69c0f7dfef6b91ff5dd376f3ca14170687b7f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG @@ -0,0 +1 @@ +# CONFIG_BTRFS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY new file mode 100644 index 0000000000000000000000000000000000000000..018480b8bc38224ee27a02b866f037a5ce7db4e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..c416c3fff14712b133c1f1d31543268f5eb03cd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY new file mode 100644 index 0000000000000000000000000000000000000000..2fb0f884e4b49188988f00bef5d1ec6b376f6ef8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_REF_VERIFY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS new file mode 100644 index 0000000000000000000000000000000000000000..b3ae62702352776c353b4f88396db4ae1848040b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT new file mode 100644 index 0000000000000000000000000000000000000000..9cfddf1b90f8e6658950cbbdf9c92549f711c358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT @@ -0,0 +1 @@ +CONFIG_BTT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD new file mode 100644 index 0000000000000000000000000000000000000000..dc047db31705b19792a6e3cae9a6f75e0f81e0f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD @@ -0,0 +1 @@ +CONFIG_BUFFER_HEAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT new file mode 100644 index 0000000000000000000000000000000000000000..9c2116a57e92cda4354e2518b2dad8ed2fd4e82e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT @@ -0,0 +1 @@ +CONFIG_BUILDTIME_TABLE_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT new file mode 100644 index 0000000000000000000000000000000000000000..98a99aa3e28fbddfaf8174fe866e2efcff63ecf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT @@ -0,0 +1 @@ +# CONFIG_C2PORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..d1256b3e941a6a256a398f4aea2b1c962c79f7b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_CACHEFILES_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..3892db3284e1467eec62a3dab63bc0766811503d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_CADENCE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF new file mode 100644 index 0000000000000000000000000000000000000000..e484697633a896e7b416a7bc6fa7d2c61d5a661b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF @@ -0,0 +1 @@ +# CONFIG_CAIF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN new file mode 100644 index 0000000000000000000000000000000000000000..37ca11c9522745069b227e724087633727e15d2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN @@ -0,0 +1 @@ +# CONFIG_CAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS new file mode 100644 index 0000000000000000000000000000000000000000..398c236738e4e087e0f0e3d9771de1f23eee15b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS @@ -0,0 +1 @@ +CONFIG_CARDBUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP new file mode 100644 index 0000000000000000000000000000000000000000..61fb6b41a947f307e82e69caa29f3ecca4cd226e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP @@ -0,0 +1 @@ +CONFIG_CAVIUM_PTP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE new file mode 100644 index 0000000000000000000000000000000000000000..88c0c9f9c81e5ae17359f8ec8711b259e8c0a68c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE @@ -0,0 +1 @@ +CONFIG_CB710_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..fc70a51d0ac8f63711c2c292e9cd1ffb7dc156e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG @@ -0,0 +1 @@ +# CONFIG_CB710_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS new file mode 100644 index 0000000000000000000000000000000000000000..8bf7e308822a2142f7720197b302f1e3b620d525 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS @@ -0,0 +1 @@ +CONFIG_CB710_DEBUG_ASSUMPTIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK new file mode 100644 index 0000000000000000000000000000000000000000..e962c4f2a47ce9a6ec8fc6f5d17646810d57db71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK @@ -0,0 +1 @@ +CONFIG_CC_CAN_LINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC new file mode 100644 index 0000000000000000000000000000000000000000..a359402c7cba042823e01ac54dd136637be84a30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC @@ -0,0 +1 @@ +CONFIG_CC_CAN_LINK_STATIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT new file mode 100644 index 0000000000000000000000000000000000000000..9a33905ada1b7701efd71f6a152df7995f09a227 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT new file mode 100644 index 0000000000000000000000000000000000000000..77f479fa9bbbcdfa65fa010ed6d430093520aded --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE new file mode 100644 index 0000000000000000000000000000000000000000..4b72546c10f7dafdfc9f7377c753a2aa3b33b33f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_INLINE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN new file mode 100644 index 0000000000000000000000000000000000000000..4bc8c8dcdf971ee2cbb264c8584ca1acb46a58c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO new file mode 100644 index 0000000000000000000000000000000000000000..21bfd6db4efc1012d26d007ffaf7993a3c272213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE new file mode 100644 index 0000000000000000000000000000000000000000..f5ec022f0d153256b4b0ff0107e88b64e7d4b194 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY new file mode 100644 index 0000000000000000000000000000000000000000..0305414244a7448abb12374c021cf39fd88ffa79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY @@ -0,0 +1 @@ +CONFIG_CC_HAS_COUNTED_BY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 new file mode 100644 index 0000000000000000000000000000000000000000..1b98764bfea385fa3866b126c1d935e748433308 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 @@ -0,0 +1 @@ +CONFIG_CC_HAS_INT128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..e0e19735edd6721729168a99a4cae57310747531 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC @@ -0,0 +1 @@ +CONFIG_CC_HAS_KASAN_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS new file mode 100644 index 0000000000000000000000000000000000000000..530edb74379adc87abc5aa0210e8235b5eedd8df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS @@ -0,0 +1 @@ +CONFIG_CC_HAS_KASAN_SW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR new file mode 100644 index 0000000000000000000000000000000000000000..c3356979cd40e91c368e4e54a0db7c454167884e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR @@ -0,0 +1 @@ +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT new file mode 100644 index 0000000000000000000000000000000000000000..f28e37e100192e53c353e041b3484bd7c3f52e40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT @@ -0,0 +1 @@ +CONFIG_CC_HAS_RANDSTRUCT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC new file mode 100644 index 0000000000000000000000000000000000000000..a0988149d815756d2fe5a436a3b69b518db62caf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC @@ -0,0 +1 @@ +CONFIG_CC_HAS_SANCOV_TRACE_PC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS new file mode 100644 index 0000000000000000000000000000000000000000..1f4a4f60cfd9bc8fec5fc7d2ad779e80081630a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS @@ -0,0 +1 @@ +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS new file mode 100644 index 0000000000000000000000000000000000000000..d1d0c3a9658fe343f84ca4531a495ed3b201094a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS @@ -0,0 +1 @@ +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH new file mode 100644 index 0000000000000000000000000000000000000000..aa8a121fae8d5427389a363b96420719997aee4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH @@ -0,0 +1 @@ +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC new file mode 100644 index 0000000000000000000000000000000000000000..7698d68c208a5d7a2f7d0cd032780fed6f3aebc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC @@ -0,0 +1 @@ +CONFIG_CC_IS_GCC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS new file mode 100644 index 0000000000000000000000000000000000000000..aba54617fc731368cf9e46f9989098e450eea850 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS @@ -0,0 +1 @@ +CONFIG_CC_NO_ARRAY_BOUNDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT new file mode 100644 index 0000000000000000000000000000000000000000..e9f8043b92e6c900d0a70c9943668d96e8485c86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT @@ -0,0 +1 @@ +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..3b38aad569c5ae9304bb1d298430e09089319ff8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE @@ -0,0 +1 @@ +CONFIG_CEC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB new file mode 100644 index 0000000000000000000000000000000000000000..5f539b4b69857ca069722c806487cd27e844c5d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB @@ -0,0 +1 @@ +CONFIG_CEPH_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG new file mode 100644 index 0000000000000000000000000000000000000000..57958e0f5ae5fb46f2feba4b98fcc694445921f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG @@ -0,0 +1 @@ +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER new file mode 100644 index 0000000000000000000000000000000000000000..876075b46f7741376256f99897514e7eb7040659 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER @@ -0,0 +1 @@ +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 new file mode 100644 index 0000000000000000000000000000000000000000..c61c420dda9de663de24fff20b1ee6916adb03ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 @@ -0,0 +1 @@ +CONFIG_CFG80211=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..51f113cca60f57bc3c2ed11f5e0c0cb3cd752176 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT @@ -0,0 +1 @@ +CONFIG_CFG80211_CRDA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..fbc90925d6673d80b070d68fb195c855e5dcd9e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CFG80211_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS new file mode 100644 index 0000000000000000000000000000000000000000..89fd54c1bed8f31ff3a06e68b403673fb3f813d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS @@ -0,0 +1 @@ +CONFIG_CFG80211_DEFAULT_PS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS new file mode 100644 index 0000000000000000000000000000000000000000..92c3d1dcd9dd7f46f1a06e5e4cf128f74655da21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS @@ -0,0 +1 @@ +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB new file mode 100644 index 0000000000000000000000000000000000000000..03b978a85f56887481fc03a0a3f4399c6fdfbf6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB @@ -0,0 +1 @@ +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..6849c7d54c31e6f046e35e6133f82b3268824037 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS @@ -0,0 +1 @@ +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT new file mode 100644 index 0000000000000000000000000000000000000000..5cbaa6e679ec0ba9321c2935eaafd9c849b3e42b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT @@ -0,0 +1 @@ +# CONFIG_CFG80211_WEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG new file mode 100644 index 0000000000000000000000000000000000000000..f1b68b1b18ef1059e3cd1e61131f08f0a6cd6e46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG @@ -0,0 +1 @@ +# CONFIG_CFI_CLANG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS new file mode 100644 index 0000000000000000000000000000000000000000..138558a8e10ef84013399bc93da2ab09d74bcf09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS @@ -0,0 +1 @@ +# CONFIG_CGROUP_FAVOR_DYNMODS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC new file mode 100644 index 0000000000000000000000000000000000000000..39276f477c64b9e276f0a4f28e84901f14b2498e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC @@ -0,0 +1 @@ +# CONFIG_CGROUP_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 new file mode 100644 index 0000000000000000000000000000000000000000..f4086ccd21d9246678804e14c4094aeac79c4a1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 @@ -0,0 +1 @@ +# CONFIG_CHARGER_ADP5061 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 new file mode 100644 index 0000000000000000000000000000000000000000..50cfb7bcf1162d0f11bbbaf105580820b531d968 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BD99954 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X new file mode 100644 index 0000000000000000000000000000000000000000..bd6cf1aefb6b9d8e2f8585c1080f56e24a3d0250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ2415X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 new file mode 100644 index 0000000000000000000000000000000000000000..b4a8aea456323faf4918af92fa426dadee1f240b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24257 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 new file mode 100644 index 0000000000000000000000000000000000000000..bc9915c842bd102f5a647914d1c59a4d4dfb366d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24735 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X new file mode 100644 index 0000000000000000000000000000000000000000..abd8044a92e35f756c6853c13726f48ae3cb739b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ2515X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX new file mode 100644 index 0000000000000000000000000000000000000000..6e6842a5d6ce9762927472f8a612591cee3dbc76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ256XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 new file mode 100644 index 0000000000000000000000000000000000000000..a5d1bc4b359b5dcb2e2c05dacb2a15f727573eaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ25890 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 new file mode 100644 index 0000000000000000000000000000000000000000..65e06f37c92d2612fc436205bfc0afe549db26d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ25980 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..436dad60a92e13798eb82eb51fff3d06884ad862 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO @@ -0,0 +1 @@ +# CONFIG_CHARGER_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 new file mode 100644 index 0000000000000000000000000000000000000000..cf915198f094f262c0b1aa16037846261c2d6f6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 @@ -0,0 +1 @@ +# CONFIG_CHARGER_LP8727 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 new file mode 100644 index 0000000000000000000000000000000000000000..16a7e801ae9d01df18d42c097bd8fffc1501e7ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 @@ -0,0 +1 @@ +# CONFIG_CHARGER_LT3651 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L new file mode 100644 index 0000000000000000000000000000000000000000..911950ca80a7c2c5a5d78da68eb6e73f7ef771d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L @@ -0,0 +1 @@ +# CONFIG_CHARGER_LTC4162L is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 new file mode 100644 index 0000000000000000000000000000000000000000..143efa3a86542bac3f6c0fc27dd4d0b81ed37a08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 @@ -0,0 +1 @@ +# CONFIG_CHARGER_MAX77976 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 new file mode 100644 index 0000000000000000000000000000000000000000..6e62d27d753ec1f008b0e5a3f184471f265f5821 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 @@ -0,0 +1 @@ +# CONFIG_CHARGER_MAX8903 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 new file mode 100644 index 0000000000000000000000000000000000000000..e9ffbe4d172ac360a36bd9518435f53095931666 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9455 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS new file mode 100644 index 0000000000000000000000000000000000000000..afb3f2f1dd8a17b34db3af6e09638e86e82aed0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS @@ -0,0 +1 @@ +# CONFIG_CHARGER_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE new file mode 100644 index 0000000000000000000000000000000000000000..7d82876083447e159cf3c878ab559321ca434276 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE @@ -0,0 +1 @@ +CONFIG_CHECK_SIGNATURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..bd32df3059f24ffebb549dc32aab47168c439819 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO @@ -0,0 +1 @@ +CONFIG_CHELSIO_INLINE_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE new file mode 100644 index 0000000000000000000000000000000000000000..ee972e70f2ef7316a6530f670a80f3d7bd6dd0c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE @@ -0,0 +1 @@ +CONFIG_CHELSIO_IPSEC_INLINE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB new file mode 100644 index 0000000000000000000000000000000000000000..73c420b672914d6a6d4fd46a906ca995ab6c74ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB @@ -0,0 +1 @@ +CONFIG_CHELSIO_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 new file mode 100644 index 0000000000000000000000000000000000000000..78f33d433ce014eb35230745b755a1e5c4e00fa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 new file mode 100644 index 0000000000000000000000000000000000000000..bfa32e65389bd5b58120e2b7f66c00eb1f9c025f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 new file mode 100644 index 0000000000000000000000000000000000000000..a9f70238b466dee2a656a183bd62aac9b63d9180 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 @@ -0,0 +1 @@ +CONFIG_CHELSIO_T4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF new file mode 100644 index 0000000000000000000000000000000000000000..f1805956847e91f1a923ac86c1224a4db794638a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF @@ -0,0 +1 @@ +CONFIG_CHELSIO_T4VF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB new file mode 100644 index 0000000000000000000000000000000000000000..90621bdd912e5170c117553f51408ea5361b3033 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T4_DCB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..a9b5fef19981dabf71fa05f82b1398aff67c808b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE @@ -0,0 +1 @@ +# CONFIG_CHELSIO_TLS_DEVICE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY new file mode 100644 index 0000000000000000000000000000000000000000..4e90d6243a1622102f5a042111d52454a3b9f3de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY @@ -0,0 +1 @@ +CONFIG_CICADA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS new file mode 100644 index 0000000000000000000000000000000000000000..bc37a8fc6a8caa6acae92820f2bb3e6ebbf1d3b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS @@ -0,0 +1 @@ +CONFIG_CIFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..b6531cac300e7242b293ddb7fcf0eff81403c1df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY @@ -0,0 +1 @@ +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..4fc95add98e2687a42adcc6101296a13b22ea6a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG @@ -0,0 +1 @@ +CONFIG_CIFS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 new file mode 100644 index 0000000000000000000000000000000000000000..5f930487324dbb910ae65c6277dabd91347430ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 @@ -0,0 +1 @@ +# CONFIG_CIFS_DEBUG2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..03f554dbac0706aead48298e893c50944cdfeaa6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS @@ -0,0 +1 @@ +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL new file mode 100644 index 0000000000000000000000000000000000000000..5f96d08c0e76bc4cabf1673aaa596cfd5958ad5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL @@ -0,0 +1 @@ +CONFIG_CIFS_DFS_UPCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE new file mode 100644 index 0000000000000000000000000000000000000000..48901f8ee434a386051f5c0ad8cc93117838423f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE @@ -0,0 +1 @@ +# CONFIG_CIFS_FSCACHE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX new file mode 100644 index 0000000000000000000000000000000000000000..1737fa4cd1edb2111dc4f763ce505bd42fefac97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX @@ -0,0 +1 @@ +CONFIG_CIFS_POSIX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT new file mode 100644 index 0000000000000000000000000000000000000000..849bffb38ecd6ab848e0d2a86ab02d4f56d71524 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT @@ -0,0 +1 @@ +# CONFIG_CIFS_SMB_DIRECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 new file mode 100644 index 0000000000000000000000000000000000000000..16763446cdeb19e7a80b8cc6ceed50dc286fb77d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 @@ -0,0 +1 @@ +# CONFIG_CIFS_STATS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL new file mode 100644 index 0000000000000000000000000000000000000000..895af94605100aaee539095bfcf3d8bebf27d3d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL @@ -0,0 +1 @@ +# CONFIG_CIFS_SWN_UPCALL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL new file mode 100644 index 0000000000000000000000000000000000000000..d41540f2313cc3f001f190f074e7eddb1127a6bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL @@ -0,0 +1 @@ +CONFIG_CIFS_UPCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR new file mode 100644 index 0000000000000000000000000000000000000000..7e35cc630fcc6e44107e862a9824baed441e522f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR @@ -0,0 +1 @@ +CONFIG_CIFS_XATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..9e328f41dd0c6f6c88be13e37a3be9801db582f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION @@ -0,0 +1 @@ +CONFIG_CLANG_VERSION=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB new file mode 100644 index 0000000000000000000000000000000000000000..dc08dce1f4b1c38e98a3b5a587bd7b232129af5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB @@ -0,0 +1 @@ +CONFIG_CLZ_TAB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..20d58f7e81173b1facbdce16298e85ef97a77118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS @@ -0,0 +1 @@ +# CONFIG_CMA_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..2de3d9b7e82473cc264636d126c2bf261c842785 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION @@ -0,0 +1 @@ +# CONFIG_CMDLINE_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS new file mode 100644 index 0000000000000000000000000000000000000000..f8fb39996766e90792db27959751ca5487501a17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS @@ -0,0 +1 @@ +# CONFIG_CODA_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI new file mode 100644 index 0000000000000000000000000000000000000000..2d36fa996f9158b5454d08c91d7e284c3341fae0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI @@ -0,0 +1 @@ +# CONFIG_COMEDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 new file mode 100644 index 0000000000000000000000000000000000000000..518e96d457bdf19ef6bcc95bc4972606c4620fd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CDCE706 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP new file mode 100644 index 0000000000000000000000000000000000000000..25982987f7a763cac26f6fb5a2b3f4e29f61a4b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CS2000_CP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 new file mode 100644 index 0000000000000000000000000000000000000000..162d666b289107ac67657e5a8fe405438be584f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_MAX9485 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM new file mode 100644 index 0000000000000000000000000000000000000000..8e67308395e4ffa8a723060499202edccf064d79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 new file mode 100644 index 0000000000000000000000000000000000000000..1496845d78f16dd65ddba3cf7203408073695cc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI5341 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 new file mode 100644 index 0000000000000000000000000000000000000000..3951baf0494ccebddfedfa9a8667f860875fda36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI5351 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 new file mode 100644 index 0000000000000000000000000000000000000000..718d5db93170728d54c1286d48e04117e332158f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI544 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..4f32a29f7eeab3efd9cd0053fea00102e6ca2a23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT @@ -0,0 +1 @@ +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF new file mode 100644 index 0000000000000000000000000000000000000000..de53e58267e3380b2d23a9e656572ae08a658adb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF @@ -0,0 +1 @@ +CONFIG_COMPAT_BINFMT_ELF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION new file mode 100644 index 0000000000000000000000000000000000000000..ef0c1b3718a6f65db809d0f339a6a17246f4db77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION @@ -0,0 +1 @@ +CONFIG_COMPAT_OLD_SIGACTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL new file mode 100644 index 0000000000000000000000000000000000000000..3b674a92553998384cf0679b985e3ee0ba96e3ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL @@ -0,0 +1 @@ +CONFIG_CONSOLE_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..72ba91c11a7e8ae8c348b927b27416e1eda1440b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER @@ -0,0 +1 @@ +CONFIG_CONTEXT_SWITCH_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..b295430906e2ab17a68ec50ccc0aedc9e6b28490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE new file mode 100644 index 0000000000000000000000000000000000000000..4d0d66aa892b2da191c8c728e82b14689779b72c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING_IDLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER new file mode 100644 index 0000000000000000000000000000000000000000..db4d6c58fd594d78fbed6bbb56f681e2d7b2d616 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..5f1671cdd1302e60057772629d4aa657fb771409 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE @@ -0,0 +1 @@ +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..6e91a33ce90b61e1debd8fbfa35affc8c3de007a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC @@ -0,0 +1 @@ +CONFIG_CONTIG_ALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC new file mode 100644 index 0000000000000000000000000000000000000000..1e5e51d4fdfcc6d5c5e6f8e4d0c70ce990154f49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC @@ -0,0 +1 @@ +CONFIG_CORDIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY new file mode 100644 index 0000000000000000000000000000000000000000..87341d40e91bed41838e40457a433e7edd0b52fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY @@ -0,0 +1 @@ +CONFIG_CORTINA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER new file mode 100644 index 0000000000000000000000000000000000000000..7321d72e3f90e5e60d61ca60c98248a214eaa1aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER @@ -0,0 +1 @@ +# CONFIG_COUNTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET new file mode 100644 index 0000000000000000000000000000000000000000..2bdd9c9b59c354018dc2ca5b9de2262a9a9f24bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_ATTR_SET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..2fbd9f776c8f252d2d4a1a1a18ca33bf1bb2b2c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL new file mode 100644 index 0000000000000000000000000000000000000000..4b0b6ab02b16002243c0c498d866e2f1f5b5516e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL @@ -0,0 +1 @@ +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP new file mode 100644 index 0000000000000000000000000000000000000000..8ab51fcba81bb583ff208bd3039d3723d6052db4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP @@ -0,0 +1 @@ +CONFIG_CPU_RMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 new file mode 100644 index 0000000000000000000000000000000000000000..3c20a511ae16e9424a0ac850b9fa5ac276985859 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 @@ -0,0 +1 @@ +CONFIG_CRC16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 new file mode 100644 index 0000000000000000000000000000000000000000..1333300126d85d23295a39f7e342e8b33972aa3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 @@ -0,0 +1 @@ +CONFIG_CRC32=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT new file mode 100644 index 0000000000000000000000000000000000000000..efaa0ffc1472d334de1eae4768dfc425bd38475a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT @@ -0,0 +1 @@ +# CONFIG_CRC32_BIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE new file mode 100644 index 0000000000000000000000000000000000000000..6f9aa0b2716261272a82bffc590eb1ca27b5cd10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE @@ -0,0 +1 @@ +# CONFIG_CRC32_SARWATE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..ddea70b11a874b78aab7908b2fda3b7c3f85808f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST @@ -0,0 +1 @@ +# CONFIG_CRC32_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 new file mode 100644 index 0000000000000000000000000000000000000000..3741ea9300d964ef55c15354b466c50f7cdadb70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 @@ -0,0 +1 @@ +# CONFIG_CRC32_SLICEBY4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 new file mode 100644 index 0000000000000000000000000000000000000000..9af267ff7e91b7c64a6ec9c83ea20751118c508c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 @@ -0,0 +1 @@ +CONFIG_CRC32_SLICEBY8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 new file mode 100644 index 0000000000000000000000000000000000000000..7cd25b4af4c3c50a4674a7aa4b0f58875bfadadd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 @@ -0,0 +1 @@ +# CONFIG_CRC4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 new file mode 100644 index 0000000000000000000000000000000000000000..0e8b98dc4f3d50e00b292d3e97e24f7bd51dc2fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 @@ -0,0 +1 @@ +CONFIG_CRC7=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 new file mode 100644 index 0000000000000000000000000000000000000000..2fd408e1bbb8afd46d6375289ead9e36f1bdfe36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 @@ -0,0 +1 @@ +CONFIG_CRC8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT new file mode 100644 index 0000000000000000000000000000000000000000..e5498da09ee7a761d879ca542bb286071b552116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT @@ -0,0 +1 @@ +CONFIG_CRC_CCITT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T new file mode 100644 index 0000000000000000000000000000000000000000..b69e01140c8bbca0c7dba34c67618ba42c2319d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T @@ -0,0 +1 @@ +CONFIG_CRC_ITU_T=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF new file mode 100644 index 0000000000000000000000000000000000000000..0f16713a685526073204d8c10afc0542b652ee9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF @@ -0,0 +1 @@ +CONFIG_CRC_T10DIF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA new file mode 100644 index 0000000000000000000000000000000000000000..d9d7604f76951bea16a39d96754425b5b7fe55dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 new file mode 100644 index 0000000000000000000000000000000000000000..537df62b40fc2c8a345a63ba0b98094c40ad21ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA new file mode 100644 index 0000000000000000000000000000000000000000..2ff8e14fcfa91ad7433cd38cd29d9a162955b515 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL new file mode 100644 index 0000000000000000000000000000000000000000..bd91573298a8e8965124f97a40867d7fbd6bf895 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC new file mode 100644 index 0000000000000000000000000000000000000000..cc0194c3ca8e30fc4aecae32ce5616c117c59e23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A new file mode 100644 index 0000000000000000000000000000000000000000..c2d37b3dabb998246f329771ca89e144fac988fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO new file mode 100644 index 0000000000000000000000000000000000000000..42606535d64826a28dbcc4ec6513cfc7f55540f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CHELSIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX new file mode 100644 index 0000000000000000000000000000000000000000..4f48a626fa2179949a8e51c1827a6d348ac2dea5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_NITROX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX new file mode 100644 index 0000000000000000000000000000000000000000..47ee7d9bc6d425a30ad8484be8233b21b27b1231 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX new file mode 100644 index 0000000000000000000000000000000000000000..077d8661509b0f52bfc753d2e866eeeee2f18353 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_420XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL new file mode 100644 index 0000000000000000000000000000000000000000..6bd1d5baa694a4bac5aeb1d6d581ec16cc2d451f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS new file mode 100644 index 0000000000000000000000000000000000000000..ca071066c02baf1d0adf0e8451a2bd636047f2e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA new file mode 100644 index 0000000000000000000000000000000000000000..63f9e139a11becfa6bc286354fa4a17cff18ddd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ECRDSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE new file mode 100644 index 0000000000000000000000000000000000000000..947c8fcc13ed5827152fa3a0116ab311b2bd65d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE @@ -0,0 +1 @@ +CONFIG_CRYPTO_ENGINE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..ea35dbd137cc1adba8188858538f5d4296ff314e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION @@ -0,0 +1 @@ +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME new file mode 100644 index 0000000000000000000000000000000000000000..c5396d0eb7d26e11c9f67da896c40275eae102c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME @@ -0,0 +1 @@ +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV new file mode 100644 index 0000000000000000000000000000000000000000..86cb18c3b821e9dbddddda6a6df1089ca77f88b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_GENIV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 new file mode 100644 index 0000000000000000000000000000000000000000..d6d6e5acef0bf35be3b895c67686a5540e63394d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_HCTR2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE new file mode 100644 index 0000000000000000000000000000000000000000..6c0d8f6fef335d02a8f242aff754432c0c7bccca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 new file mode 100644 index 0000000000000000000000000000000000000000..a7ab5dc3b8f93ee498e7042ef348c435858ec47e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_ARC4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..4669430bdf36bcca50effc05e19b1a91bf079011 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA new file mode 100644 index 0000000000000000000000000000000000000000..7259b16414a0ec60bbf62c99beb50e1fc9360523 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 new file mode 100644 index 0000000000000000000000000000000000000000..cbd06835522ef9b2b07bcba87548c6877ed72e35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..55269579a77b46aca7ff0cd960b90d4caae76268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 new file mode 100644 index 0000000000000000000000000000000000000000..3b1931064346f354fad67747a8004436ba6e7705 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CURVE25519=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..d2afd62f53e95e64670fa712cc6c44869cfc960d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES new file mode 100644 index 0000000000000000000000000000000000000000..fa076d710943f4f797f3d4a1b4feffe5c4c08bde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_DES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL new file mode 100644 index 0000000000000000000000000000000000000000..38fce81f8e9a00ce6ec17144c3f12ddc205fbaea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_GF128MUL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 new file mode 100644 index 0000000000000000000000000000000000000000..3d2420f667a5ccd095c27fdcad7890b0f816bc93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..d0e733171567f4c6bce1154569d1494c404b636e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..33d50c29937383faa9d2206e0e2f805ed0be7a68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS new file mode 100644 index 0000000000000000000000000000000000000000..4f2879497171108573a8978e80090c07371da6d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_UTILS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG new file mode 100644 index 0000000000000000000000000000000000000000..01ee2034fbdcbf1b861226829cd5b11a72865469 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 new file mode 100644 index 0000000000000000000000000000000000000000..e67083ef0815bafdc89c91a4deca0fecdd61ca3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIG2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..086e41bbfcf836e0bfb71dd869317f6bb4a79061 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG @@ -0,0 +1 @@ +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST new file mode 100644 index 0000000000000000000000000000000000000000..a07a1943aa56b126369bda60389b1e1a9dc6fb6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST @@ -0,0 +1 @@ +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT new file mode 100644 index 0000000000000000000000000000000000000000..773ad5d68b1d740b8acca79a86b924bece4ba3d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT @@ -0,0 +1 @@ +# CONFIG_DAMON_LRU_SORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM new file mode 100644 index 0000000000000000000000000000000000000000..e3e14ea37a2225b7f6e922832a8dc69fd8ec590e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM @@ -0,0 +1 @@ +# CONFIG_DAMON_RECLAIM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..9cd6a1d32eb540affef6f6d756dbddd95f714a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS @@ -0,0 +1 @@ +# CONFIG_DAMON_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY new file mode 100644 index 0000000000000000000000000000000000000000..064b2bebafc14cc15cfa2a18b083fb532f640e07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY @@ -0,0 +1 @@ +CONFIG_DAVICOM_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS new file mode 100644 index 0000000000000000000000000000000000000000..7e6e1f9f172518e46eb5f31414385886f8ce8d09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS @@ -0,0 +1 @@ +CONFIG_DCACHE_WORD_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP new file mode 100644 index 0000000000000000000000000000000000000000..cfd25ccf58f041fbeb751fd73e10a0d26b3290ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP @@ -0,0 +1 @@ +# CONFIG_DEBUG_ATOMIC_SLEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF new file mode 100644 index 0000000000000000000000000000000000000000..0019796d66941bbf2d62f4cb8545dd60cf37cc2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF @@ -0,0 +1 @@ +# CONFIG_DEBUG_CGROUP_REF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES new file mode 100644 index 0000000000000000000000000000000000000000..f54e7fd39acddd32e0bb6f9566119d175dea2f1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES @@ -0,0 +1 @@ +# CONFIG_DEBUG_DEVRES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER new file mode 100644 index 0000000000000000000000000000000000000000..84f5416f9116085da5e1d75be2651e10456a74f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER @@ -0,0 +1 @@ +# CONFIG_DEBUG_DRIVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..c278d8cce41e0631508e64c53dc9a72b6c8181ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO @@ -0,0 +1 @@ +# CONFIG_DEBUG_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE new file mode 100644 index 0000000000000000000000000000000000000000..d6a7951bf8be7d4aae8c2994ad792fe995cb24b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB new file mode 100644 index 0000000000000000000000000000000000000000..426fd1faeb00930fe2273b22612ab1a27d26d40c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..815ade5f15a5507e59d4e67d7d98332b5bea0d47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 new file mode 100644 index 0000000000000000000000000000000000000000..e31a11334fce7d322f0da694f29b38108eefa119 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_DWARF5 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE new file mode 100644 index 0000000000000000000000000000000000000000..3e8d195ebbfa1533dfc485b2f31bd754cacc5864 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS new file mode 100644 index 0000000000000000000000000000000000000000..87c713339cef37e2f1e3e6316a1ec9ab83623be3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS @@ -0,0 +1 @@ +# CONFIG_DEBUG_IRQFLAGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT new file mode 100644 index 0000000000000000000000000000000000000000..c62a3581c0104d4acf14adfde427ba27b7e5bdce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT @@ -0,0 +1 @@ +# CONFIG_DEBUG_KOBJECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS new file mode 100644 index 0000000000000000000000000000000000000000..bc7067b6bd58e8731e166c0e685d794ce936788e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS @@ -0,0 +1 @@ +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..ea3c2dc126d7ce609027de9eb40e36ad55827876 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_LOCK_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE new file mode 100644 index 0000000000000000000000000000000000000000..a41698b8ec24bd07dd0082b29bb12d1d2622e75d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE @@ -0,0 +1 @@ +# CONFIG_DEBUG_MAPLE_TREE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES new file mode 100644 index 0000000000000000000000000000000000000000..92a6a5feabe5d61753fc5355fe5eaf639ce43eb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES @@ -0,0 +1 @@ +# CONFIG_DEBUG_MUTEXES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET new file mode 100644 index 0000000000000000000000000000000000000000..03304d546a5d14a32a4a3969cc23482774d950a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET @@ -0,0 +1 @@ +# CONFIG_DEBUG_NET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS new file mode 100644 index 0000000000000000000000000000000000000000..0846705c973910ac970207c8a54c7c5c8f000a71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS @@ -0,0 +1 @@ +# CONFIG_DEBUG_OBJECTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS new file mode 100644 index 0000000000000000000000000000000000000000..01ead72817e107bfb6d5ba018b6ec812d6f940b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS @@ -0,0 +1 @@ +# CONFIG_DEBUG_PER_CPU_MAPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL new file mode 100644 index 0000000000000000000000000000000000000000..82a04e826bfa71d17cb8a7a7a1229c402d06125f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL @@ -0,0 +1 @@ +# CONFIG_DEBUG_PINCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT new file mode 100644 index 0000000000000000000000000000000000000000..a1f73ed5c17796528b731541c22783ea2ab330a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT @@ -0,0 +1 @@ +# CONFIG_DEBUG_PREEMPT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES new file mode 100644 index 0000000000000000000000000000000000000000..742d62fb459f5a344bcfa049557fdbe79fba24a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES @@ -0,0 +1 @@ +# CONFIG_DEBUG_RT_MUTEXES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS new file mode 100644 index 0000000000000000000000000000000000000000..7572af4fc0680922af98c1b9d1a6f1f985ed5821 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS @@ -0,0 +1 @@ +# CONFIG_DEBUG_RWSEMS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ new file mode 100644 index 0000000000000000000000000000000000000000..ab64f3007d1b7a4a7db9d94aea4ec5da394472f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ @@ -0,0 +1 @@ +CONFIG_DEBUG_SHIRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK new file mode 100644 index 0000000000000000000000000000000000000000..1440abeb319491c1b34f909b0272a53dab4c730f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK @@ -0,0 +1 @@ +# CONFIG_DEBUG_SPINLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE new file mode 100644 index 0000000000000000000000000000000000000000..50688e28a552e0b3570e80d36364626a1563033f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE @@ -0,0 +1 @@ +# CONFIG_DEBUG_STACK_USAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE new file mode 100644 index 0000000000000000000000000000000000000000..a7886e4ac04b9bf6cbfd1cac893eaba12a06bfa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE @@ -0,0 +1 @@ +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING new file mode 100644 index 0000000000000000000000000000000000000000..2c0dd678c6c7f71c2e8b7c419b3c8d6af5b7e57b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING @@ -0,0 +1 @@ +# CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL new file mode 100644 index 0000000000000000000000000000000000000000..c06eb53bede5dcfc9c28131aae4d6273542a1f65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL @@ -0,0 +1 @@ +# CONFIG_DEBUG_VIRTUAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM new file mode 100644 index 0000000000000000000000000000000000000000..5dc25e1154591bf67bae5ac07418e13751881d7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM @@ -0,0 +1 @@ +# CONFIG_DEBUG_VM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE new file mode 100644 index 0000000000000000000000000000000000000000..2dabf3429c813d21db42a8cfcacf8958280094ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE @@ -0,0 +1 @@ +# CONFIG_DEBUG_VM_PGTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU new file mode 100644 index 0000000000000000000000000000000000000000..f5952cf0b4903a402b80d03fad0b0d51f5129118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU @@ -0,0 +1 @@ +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH new file mode 100644 index 0000000000000000000000000000000000000000..f4151e3d5e7645ca33ecda2bf0d0dc7e30c3ec9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH @@ -0,0 +1 @@ +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 new file mode 100644 index 0000000000000000000000000000000000000000..73f457e49f5ed05eab043f56b4d4efdc338ee556 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_BZIP2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP new file mode 100644 index 0000000000000000000000000000000000000000..7bb30c7d993560ea4f6683f776f25ee1329f113b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_GZIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..1dc3df164bfa57e7ee17a3427bff6205798e4d57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZ4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA new file mode 100644 index 0000000000000000000000000000000000000000..dbec9f244d635ddbfb1032c976ace11185fa07e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO new file mode 100644 index 0000000000000000000000000000000000000000..9b21e6b9987d9a4c8b6cc77ed5d1f209533496a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ new file mode 100644 index 0000000000000000000000000000000000000000..ee04f1dca51d64a5c2bf99b95438915ee1a5cd47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_XZ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..4cad492ad6c47dfd805747b7107d9f781c45c328 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_ZSTD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME new file mode 100644 index 0000000000000000000000000000000000000000..2991d2f1191ae6a7eedf1eae131b8591ef3028c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME @@ -0,0 +1 @@ +CONFIG_DEFAULT_HOSTNAME="(none)" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT new file mode 100644 index 0000000000000000000000000000000000000000..f89ac0249ea042be9cabe5e18682c11d36e18387 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT @@ -0,0 +1 @@ +CONFIG_DEFAULT_INIT="" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..6b188826054a12a55f9829a5ba534c6da8f796e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION @@ -0,0 +1 @@ +CONFIG_DEVICE_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE new file mode 100644 index 0000000000000000000000000000000000000000..29951ab12ab39da6cfd5e89899ee0b1eb2665999 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE @@ -0,0 +1 @@ +# CONFIG_DEVTMPFS_SAFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB new file mode 100644 index 0000000000000000000000000000000000000000..a082ff1adc015fdae1338229d10a218e8594d348 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB @@ -0,0 +1 @@ +CONFIG_DIMLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 new file mode 100644 index 0000000000000000000000000000000000000000..b0a87854d9bb1b1b502200f271c5f2ad62234a43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 @@ -0,0 +1 @@ +# CONFIG_DM9051 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..15e748ca8bebb371274c547196bd3a4ce3f39a65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMABUF_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY new file mode 100644 index 0000000000000000000000000000000000000000..33c2fe87688e846a7e4feca9e85351ba48ca32e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY @@ -0,0 +1 @@ +# CONFIG_DMABUF_MOVE_NOTIFY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS new file mode 100644 index 0000000000000000000000000000000000000000..6943ce41c58ffb2ef4960e718ee992402dde5076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS @@ -0,0 +1 @@ +# CONFIG_DMABUF_SELFTESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS new file mode 100644 index 0000000000000000000000000000000000000000..2a616d744d11cb71ba95c64ec66616dfc5cf778b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS @@ -0,0 +1 @@ +# CONFIG_DMABUF_SYSFS_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST new file mode 100644 index 0000000000000000000000000000000000000000..8049f96f03224c64cd3a0088c6d07f010dc94f9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST @@ -0,0 +1 @@ +# CONFIG_DMAPOOL_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..cfc6819153b95663cdffca21b10778f025be5fb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI @@ -0,0 +1 @@ +CONFIG_DMA_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL new file mode 100644 index 0000000000000000000000000000000000000000..4350357375fedd347d26996967877665ce4fc8cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL @@ -0,0 +1 @@ +CONFIG_DMA_COHERENT_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID new file mode 100644 index 0000000000000000000000000000000000000000..074c691a93474a7c8729fa2b6b045c2497162afb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID @@ -0,0 +1 @@ +CONFIG_DMA_ENGINE_RAID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..bb21d9c521b1ffc0b1103ccfe5e7abbaaa0a52e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE @@ -0,0 +1 @@ +# CONFIG_DMA_FENCE_TRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK new file mode 100644 index 0000000000000000000000000000000000000000..c168b4b6cd69051dcc9bc5783709971e55f5dccf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_DMA_MAP_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA new file mode 100644 index 0000000000000000000000000000000000000000..e4146244134d601a94997f1caefbdebc189f58ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA @@ -0,0 +1 @@ +# CONFIG_DMA_NUMA_CMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS new file mode 100644 index 0000000000000000000000000000000000000000..c18773fcb27ffb66db7a28753f013ae03e0359e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS @@ -0,0 +1 @@ +CONFIG_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER new file mode 100644 index 0000000000000000000000000000000000000000..1d2691cb1de418faf736af1b51379b1b636fc94d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER @@ -0,0 +1 @@ +CONFIG_DMA_SHARED_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT new file mode 100644 index 0000000000000000000000000000000000000000..318a255494f6aba7cfc588815fa945725afea927 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT @@ -0,0 +1 @@ +CONFIG_DM_AUDIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON new file mode 100644 index 0000000000000000000000000000000000000000..17aa67d71984a047465a025faeeff1d63f290f22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON @@ -0,0 +1 @@ +CONFIG_DM_BIO_PRISON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO new file mode 100644 index 0000000000000000000000000000000000000000..93af8fc46ce56e3fc9e972157af44da2ad135548 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO @@ -0,0 +1 @@ +CONFIG_DM_BUFIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE new file mode 100644 index 0000000000000000000000000000000000000000..03f992664487e2a2cd0c1303d22f1698e14f31f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE @@ -0,0 +1 @@ +# CONFIG_DM_CLONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING new file mode 100644 index 0000000000000000000000000000000000000000..52dd34960278bfeea29b33b26e602d59c273ae6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING @@ -0,0 +1 @@ +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST new file mode 100644 index 0000000000000000000000000000000000000000..ffac78bb5d3093a237c4c3ff5f56f3206a99650c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST @@ -0,0 +1 @@ +# CONFIG_DM_DUST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS new file mode 100644 index 0000000000000000000000000000000000000000..fef2f5dccd1274169984e55a420ed7df44398dec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS @@ -0,0 +1 @@ +# CONFIG_DM_EBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST new file mode 100644 index 0000000000000000000000000000000000000000..a9939aa97bdecf3a731e7fd950bfde152604b1c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST @@ -0,0 +1 @@ +# CONFIG_DM_MULTIPATH_HST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA new file mode 100644 index 0000000000000000000000000000000000000000..9dbcf92fa81e4e9895bfbcd5ee01e09316de6ecb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA @@ -0,0 +1 @@ +# CONFIG_DM_MULTIPATH_IOA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA new file mode 100644 index 0000000000000000000000000000000000000000..529f0c35b9244cda7ee05e482ffd4efbe3cc3482 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA @@ -0,0 +1 @@ +CONFIG_DM_PERSISTENT_DATA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED new file mode 100644 index 0000000000000000000000000000000000000000..f3d4f533b1b8aef64b2638beeb867fa09a64d06e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED @@ -0,0 +1 @@ +# CONFIG_DM_UNSTRIPED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC new file mode 100644 index 0000000000000000000000000000000000000000..955cb360f27b8def370e58003a650f7557baeb5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC @@ -0,0 +1 @@ +# CONFIG_DM_VERITY_FEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG new file mode 100644 index 0000000000000000000000000000000000000000..4d2da6ce4dda3d994e845327587739b0e1c099ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG @@ -0,0 +1 @@ +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET new file mode 100644 index 0000000000000000000000000000000000000000..55807a9e0450c59b3a6463452bf92385a8f9f9b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET @@ -0,0 +1 @@ +CONFIG_DNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY new file mode 100644 index 0000000000000000000000000000000000000000..15f62f80290b1462dd829ef746ba0ab5887b4889 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY @@ -0,0 +1 @@ +CONFIG_DP83640_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY new file mode 100644 index 0000000000000000000000000000000000000000..e97e5ab0da2900ee88264f9016556227e493fc0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY @@ -0,0 +1 @@ +CONFIG_DP83822_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY new file mode 100644 index 0000000000000000000000000000000000000000..86d916a4aafc972cae3be266573e4fc316eb7aa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY @@ -0,0 +1 @@ +CONFIG_DP83848_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY new file mode 100644 index 0000000000000000000000000000000000000000..5ba3d57bd52c31148c350b9ba9c300b067d676e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY @@ -0,0 +1 @@ +CONFIG_DP83867_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY new file mode 100644 index 0000000000000000000000000000000000000000..100b19b53f5687bd3a75a261669723ef2c676c73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY @@ -0,0 +1 @@ +# CONFIG_DP83869_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY new file mode 100644 index 0000000000000000000000000000000000000000..5c750e473f530f79388cc34b8541d15ada55b558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY @@ -0,0 +1 @@ +CONFIG_DP83TC811_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY new file mode 100644 index 0000000000000000000000000000000000000000..e12a8fbdfb7a3c97a55b46ec8d69bcbb87779107 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY @@ -0,0 +1 @@ +# CONFIG_DP83TD510_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL new file mode 100644 index 0000000000000000000000000000000000000000..a7120cf432d9bc87879bcce83076df152befb6ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL @@ -0,0 +1 @@ +CONFIG_DQL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF new file mode 100644 index 0000000000000000000000000000000000000000..c1951e201a132ba321981c3701d999f63ab4632e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF @@ -0,0 +1 @@ +# CONFIG_DRAGONRISE_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL new file mode 100644 index 0000000000000000000000000000000000000000..2cf0b91ca5daa3e9f21cbeccb9141e62c7758268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL @@ -0,0 +1 @@ +# CONFIG_DRM_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP new file mode 100644 index 0000000000000000000000000000000000000000..eb6a593afa3c8f054e9d531b69cde57cedbf86fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP @@ -0,0 +1 @@ +CONFIG_DRM_AMD_DC_FP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY new file mode 100644 index 0000000000000000000000000000000000000000..1159df7e69cd3643709dd1b2aed5206ad049651a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY @@ -0,0 +1 @@ +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX new file mode 100644 index 0000000000000000000000000000000000000000..5593305c317886cd05afbbccf674d8c84168575e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX78XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..243fa7dfa527e79084d1745d2c8c08058184120c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE @@ -0,0 +1 @@ +CONFIG_DRM_BRIDGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY new file mode 100644 index 0000000000000000000000000000000000000000..3f817c6c3525d663ed2750890fbd493c7c395344 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY @@ -0,0 +1 @@ +CONFIG_DRM_BUDDY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..119df919c13d96bd2e61e3fcd51e29cceb69d11c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_DP_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..bc9c3a681decfdff7685198ad47b4129e2e893f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HDCP_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..84ff83cac27511ffc3eb81ae839de0b0b5d75777 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HDMI_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..e19cd5d255e0596b3cd3d82757e0a6ee3f46e142 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV new file mode 100644 index 0000000000000000000000000000000000000000..e9f67ce86810d8a6650584d178477df836304644 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV @@ -0,0 +1 @@ +# CONFIG_DRM_ETNAVIV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC new file mode 100644 index 0000000000000000000000000000000000000000..1d9dffe557e44ca4e44bb478cfd631f18edf2124 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC @@ -0,0 +1 @@ +CONFIG_DRM_EXEC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..256f244966f1c0e6c611a1a37e8011a3164b24a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_GEM_SHMEM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 new file mode 100644 index 0000000000000000000000000000000000000000..68cc5925e030872fc90d5ead4eee267aa0c42dbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 @@ -0,0 +1 @@ +# CONFIG_DRM_GM12U320 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD new file mode 100644 index 0000000000000000000000000000000000000000..824b3830a0c5247cef4d9e3978cd5d011bc8679a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD @@ -0,0 +1 @@ +# CONFIG_DRM_GUD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_HYPERV b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_HYPERV new file mode 100644 index 0000000000000000000000000000000000000000..b263ea50239c39481b72c0ed31aebdafccaa0cab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_HYPERV @@ -0,0 +1 @@ +# CONFIG_DRM_HYPERV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 new file mode 100644 index 0000000000000000000000000000000000000000..e077c7537236009931b31deb1784a0e3a0b73247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_NXP_TDA9950 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..b35bf4c66a106c6d6f69033dea7535926eb9fefa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_KMS_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..2c74b876fb48e719e80c6233f7420960c7a404fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY @@ -0,0 +1 @@ +# CONFIG_DRM_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON new file mode 100644 index 0000000000000000000000000000000000000000..2c481701cea887a30e8b1e3593eca27815617c32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON @@ -0,0 +1 @@ +# CONFIG_DRM_LOONGSON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL new file mode 100644 index 0000000000000000000000000000000000000000..de8a9c247d1c03b95c0eee99f06a8fa1cac5562f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL @@ -0,0 +1 @@ +CONFIG_DRM_PANEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 new file mode 100644 index 0000000000000000000000000000000000000000..80eb7d860091510400744950d3f43b2eeb886a0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..38e0dfae76bcbc90400af0bd512d6c3a880193d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE @@ -0,0 +1 @@ +CONFIG_DRM_PANEL_BRIDGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 new file mode 100644 index 0000000000000000000000000000000000000000..8b5a455d4f26c9801fa4cbf81892b3bdb52977ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI new file mode 100644 index 0000000000000000000000000000000000000000..036e60ec65493e1b7bdf3fae63fa299ae4e90ea1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_MIPI_DBI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS new file mode 100644 index 0000000000000000000000000000000000000000..8e68b27635a44efd3eb79112664af246898a51d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS @@ -0,0 +1 @@ +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A new file mode 100644 index 0000000000000000000000000000000000000000..e07f56f0bf7f692a59d729c2a737a90d5158f92f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 new file mode 100644 index 0000000000000000000000000000000000000000..3886d75b0cdef2742f2b18f4b1fc8e1beebf9554 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED new file mode 100644 index 0000000000000000000000000000000000000000..e13621fecffaf19526708f3997daaf6f39f4464d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED @@ -0,0 +1 @@ +CONFIG_DRM_SCHED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM new file mode 100644 index 0000000000000000000000000000000000000000..2bd12280c07427c49d61c76cafe24d01beca1b21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM @@ -0,0 +1 @@ +# CONFIG_DRM_SIMPLEDRM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X new file mode 100644 index 0000000000000000000000000000000000000000..8ebb4ae752ee054ea834e7fca1a57df414f485b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X @@ -0,0 +1 @@ +# CONFIG_DRM_SSD130X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..9edd082ef9c2da3d67d72f647b20c47655a80c38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_SUBALLOC_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM new file mode 100644 index 0000000000000000000000000000000000000000..0b3c61dd1f619e01ab22ae24aaf3a77ed93545ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM @@ -0,0 +1 @@ +CONFIG_DRM_TTM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..2b1b32c6c831930601e3ac497073c7c00e829304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_TTM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM new file mode 100644 index 0000000000000000000000000000000000000000..66aeb12990bee66cf61c3e92a2168f4407414ac6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM @@ -0,0 +1 @@ +# CONFIG_DRM_VGEM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS new file mode 100644 index 0000000000000000000000000000000000000000..4e81ebc2f73c2ffcbd4a2c5fad9404a241e762be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS @@ -0,0 +1 @@ +CONFIG_DRM_VIRTIO_GPU_KMS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS new file mode 100644 index 0000000000000000000000000000000000000000..5e10197e7c2669a5f8480738e1c99402fc04ca6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS @@ -0,0 +1 @@ +CONFIG_DRM_VKMS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..c9ca0b26147fb4269aba55bb761d5778e190048d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_VRAM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 new file mode 100644 index 0000000000000000000000000000000000000000..7266bceb40171a23851d85d6c08aba9a0dfc3bcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 @@ -0,0 +1 @@ +# CONFIG_DS1682 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE new file mode 100644 index 0000000000000000000000000000000000000000..989343b204af23be7b05161c7aa8771cf17ab182 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE @@ -0,0 +1 @@ +CONFIG_DST_CACHE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..4a7a56908765a012f06d9190df63c23fb56d2ad8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS new file mode 100644 index 0000000000000000000000000000000000000000..e05b288c432238c79a669b32e8fcb5b27cef71f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE_COLUMNS=80 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS new file mode 100644 index 0000000000000000000000000000000000000000..4b42476d88af9ea89ecb7dc954f0fd30c0ec5b39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE_ROWS=25 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..d24642867df86fbacfbdd2d4a656de7bdea15e42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ @@ -0,0 +1 @@ +# CONFIG_DUMMY_IRQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA new file mode 100644 index 0000000000000000000000000000000000000000..dc2c1589af2df6e54f9c20408b2922c815ed3a8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA @@ -0,0 +1 @@ +# CONFIG_DW_EDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..e4db55403e04931dd0879e0399c68fd85cf873b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_DW_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..d5aecfc82d79e642d2c215430636087db341eef6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE @@ -0,0 +1 @@ +# CONFIG_DW_XDATA_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..08f1910943e48827980a90be16600f359889fd74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 0000000000000000000000000000000000000000..28e0ae95aa58acd7f140000d3fd558fe26660a68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS new file mode 100644 index 0000000000000000000000000000000000000000..bdcba5638e8ad2619b54917d250becd640d899e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO new file mode 100644 index 0000000000000000000000000000000000000000..b84a07b07a76c79908c323db7efe8016c2c3ad07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO @@ -0,0 +1 @@ +# CONFIG_ECHO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS new file mode 100644 index 0000000000000000000000000000000000000000..5d2468d89d642ae32ff709a37731ce5a7fc0843e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS @@ -0,0 +1 @@ +# CONFIG_ECRYPT_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..ff1c03ae44e2b6eb3d369b618961d917b01cb0fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT @@ -0,0 +1 @@ +CONFIG_EDAC_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 new file mode 100644 index 0000000000000000000000000000000000000000..9e93b9eb029e3e3eef81643f4ef10ce658d21d32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 @@ -0,0 +1 @@ +CONFIG_EEPROM_93CX6=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 new file mode 100644 index 0000000000000000000000000000000000000000..483f0eb88ebb11d477fcbc282a5271ae5cca1abe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 @@ -0,0 +1 @@ +# CONFIG_EEPROM_93XX46 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 new file mode 100644 index 0000000000000000000000000000000000000000..c19b44bc963980746bf238de5f1ea80aca1d047d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 @@ -0,0 +1 @@ +# CONFIG_EEPROM_AT24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 new file mode 100644 index 0000000000000000000000000000000000000000..5fe1f07dee3d539a7d29e03c5acd99100f6ccd78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 @@ -0,0 +1 @@ +# CONFIG_EEPROM_AT25 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 new file mode 100644 index 0000000000000000000000000000000000000000..7bb8b65f09ff55fba0cbe65a0d3023a487596786 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 @@ -0,0 +1 @@ +# CONFIG_EEPROM_EE1004 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX new file mode 100644 index 0000000000000000000000000000000000000000..7f11cc3038f164662e1af5b5dfd015fb684bfb01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX @@ -0,0 +1 @@ +# CONFIG_EEPROM_IDT_89HPESX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..c53217d4d47218c1ed7486986388dbbaf9ea603f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY @@ -0,0 +1 @@ +CONFIG_EEPROM_LEGACY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 new file mode 100644 index 0000000000000000000000000000000000000000..9fc04b971031310f5b5f8f422536bf24bc0dac6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 @@ -0,0 +1 @@ +CONFIG_EEPROM_MAX6875=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL new file mode 100644 index 0000000000000000000000000000000000000000..ca42dfb428c2eb0604c2451c8b90b2352e81c7b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL @@ -0,0 +1 @@ +# CONFIG_EFI_BOOTLOADER_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER new file mode 100644 index 0000000000000000000000000000000000000000..2cc06321635b8d3f0ba67acde4689e11f7335b7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER @@ -0,0 +1 @@ +# CONFIG_EFI_CAPSULE_LOADER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA new file mode 100644 index 0000000000000000000000000000000000000000..db43b223469e9fd999bc1921ae1b3e883af41a6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA @@ -0,0 +1 @@ +# CONFIG_EFI_DISABLE_PCI_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME new file mode 100644 index 0000000000000000000000000000000000000000..a406d6669210e03a9659d2a9528d50edd197409d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME @@ -0,0 +1 @@ +# CONFIG_EFI_DISABLE_RUNTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON new file mode 100644 index 0000000000000000000000000000000000000000..dcc91b9ff6a45aac5de2e9264240d7b75bae4f28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON @@ -0,0 +1 @@ +CONFIG_EFI_EARLYCON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT new file mode 100644 index 0000000000000000000000000000000000000000..b0c1d889512d38e639d2e4a1c0bc016d80f331c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT @@ -0,0 +1 @@ +CONFIG_EFI_ESRT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..a7a899356a31680d3e73f70d649e73e509b47df6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION @@ -0,0 +1 @@ +CONFIG_EFI_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS new file mode 100644 index 0000000000000000000000000000000000000000..417cfeda4afd35d94a3389c61c5fa608b5ff5329 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS @@ -0,0 +1 @@ +CONFIG_EFI_RUNTIME_WRAPPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST new file mode 100644 index 0000000000000000000000000000000000000000..455eb306151d16c8d1efe4f4a447029232601647 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST @@ -0,0 +1 @@ +# CONFIG_EFI_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..8b1f1cbecb2a89ff73256f618389125cae399fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS @@ -0,0 +1 @@ +# CONFIG_EFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE new file mode 100644 index 0000000000000000000000000000000000000000..55854ef005f5f976e9ef95daf8da0aa453220966 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE @@ -0,0 +1 @@ +CONFIG_ELFCORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET new file mode 100644 index 0000000000000000000000000000000000000000..64c0a73c1959254edee68ad8e2a8d5795736314b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET @@ -0,0 +1 @@ +CONFIG_ENA_ETHERNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES new file mode 100644 index 0000000000000000000000000000000000000000..040c8ef0085a260da3a528b69bb8eb57e5b966c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES @@ -0,0 +1 @@ +CONFIG_ENCLOSURE_SERVICES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL new file mode 100644 index 0000000000000000000000000000000000000000..ae61c90f2ecca05290c66ba9aa30aa71b32b1670 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL @@ -0,0 +1 @@ +# CONFIG_ENERGY_MODEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER new file mode 100644 index 0000000000000000000000000000000000000000..5c3282d3cdd2ba2457791e08299b628eeafa169a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER @@ -0,0 +1 @@ +# CONFIG_EQUALIZER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD new file mode 100644 index 0000000000000000000000000000000000000000..76ef583e60b1a374f84ac3df7cc02287d3241c76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..fdfca6dcc2622c9841b45b5d4418b8287f594736 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_ZIP_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET new file mode 100644 index 0000000000000000000000000000000000000000..62a7778f150b17e2dedaea483b799ea0da5fb519 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET @@ -0,0 +1 @@ +CONFIG_ETHERNET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC new file mode 100644 index 0000000000000000000000000000000000000000..e78d27772937351ba63e51a789d59c52ac8459cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC @@ -0,0 +1 @@ +CONFIG_ETHOC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..c411df81986ad99dd938e17addf00f3d9422d8cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING @@ -0,0 +1 @@ +CONFIG_EVENT_TRACING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM new file mode 100644 index 0000000000000000000000000000000000000000..a06b7c7a995aed7c25497464d4f3372308f050a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM @@ -0,0 +1 @@ +CONFIG_EXCLUSIVE_SYSTEM_RAM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS new file mode 100644 index 0000000000000000000000000000000000000000..21b4cdebaf32fe4bd22a4d9db0c7d97ec0ecd2b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS @@ -0,0 +1 @@ +CONFIG_EXPORTFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..ab615c293cc0d801db4af18a9aca1c7cd24844a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_EXT3_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY new file mode 100644 index 0000000000000000000000000000000000000000..e8c96ad0b63974b0c443ac080015734520b9de0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY @@ -0,0 +1 @@ +# CONFIG_EXT3_FS_SECURITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP new file mode 100644 index 0000000000000000000000000000000000000000..f4ac470bdaea0abb32609c1124c4aba6c9dbdb35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP @@ -0,0 +1 @@ +# CONFIG_EZX_PCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS new file mode 100644 index 0000000000000000000000000000000000000000..e71bcee61e39e42282f55dfee08dc676c366866a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS @@ -0,0 +1 @@ +# CONFIG_F2FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC new file mode 100644 index 0000000000000000000000000000000000000000..1dd0929a4e2f91f2b5665ca563aefe82a6974b04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC @@ -0,0 +1 @@ +# CONFIG_FARSYNC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX new file mode 100644 index 0000000000000000000000000000000000000000..63214cf1b92a8014945966ed9f9f54b8e7d5ce95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX @@ -0,0 +1 @@ +# CONFIG_FB_3DFX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK new file mode 100644 index 0000000000000000000000000000000000000000..3ed9dfc30b5019ba30ea664388ee637b0540101c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK @@ -0,0 +1 @@ +# CONFIG_FB_ARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT new file mode 100644 index 0000000000000000000000000000000000000000..34148fdc4befa4d036c8d1405714930ebd2d88f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT @@ -0,0 +1 @@ +# CONFIG_FB_ASILIANT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY new file mode 100644 index 0000000000000000000000000000000000000000..cae8eee36d9040239d4586b44e781d0d01950a78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY @@ -0,0 +1 @@ +# CONFIG_FB_ATY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 new file mode 100644 index 0000000000000000000000000000000000000000..da69465555ae90ee2a89ee0ca56da5bd67467871 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 @@ -0,0 +1 @@ +# CONFIG_FB_ATY128 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE new file mode 100644 index 0000000000000000000000000000000000000000..4710f2f333c621251bb079337ecbd32090b2c233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE @@ -0,0 +1 @@ +# CONFIG_FB_CARMINE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA new file mode 100644 index 0000000000000000000000000000000000000000..c7e361de651117188400dfae5d8704fc784d9785 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA @@ -0,0 +1 @@ +CONFIG_FB_CFB_COPYAREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT new file mode 100644 index 0000000000000000000000000000000000000000..704d65343e0779c3a74ad47d96be8ca43bfd2e65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT @@ -0,0 +1 @@ +CONFIG_FB_CFB_FILLRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT new file mode 100644 index 0000000000000000000000000000000000000000..86440cdbf6e91694460c87fcfc5a6e59ce65a368 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT @@ -0,0 +1 @@ +CONFIG_FB_CFB_IMAGEBLIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS new file mode 100644 index 0000000000000000000000000000000000000000..e4a9f519acf3c3eeaaddfe607f7c87b18679a9aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS @@ -0,0 +1 @@ +# CONFIG_FB_CIRRUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE new file mode 100644 index 0000000000000000000000000000000000000000..22cafa667b23ec84e8f58196a8408edfa3d79175 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE @@ -0,0 +1 @@ +CONFIG_FB_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 new file mode 100644 index 0000000000000000000000000000000000000000..27a73fa27136f72afce63ded091d6c9ba4011e03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 @@ -0,0 +1 @@ +# CONFIG_FB_CYBER2000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO new file mode 100644 index 0000000000000000000000000000000000000000..0b29411b8b17318a189c98aadecb404dc7bb3e7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO @@ -0,0 +1 @@ +CONFIG_FB_DEFERRED_IO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..a01b5849f85fe90d24e89d74337270d26f56f5ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE @@ -0,0 +1 @@ +CONFIG_FB_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN new file mode 100644 index 0000000000000000000000000000000000000000..583ddc4f9bf21b9aa5c2d5043bf2561294d07ed3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN @@ -0,0 +1 @@ +# CONFIG_FB_FOREIGN_ENDIAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 new file mode 100644 index 0000000000000000000000000000000000000000..4a3cb7cebd5030fde90b2566774e97d5b99c04ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 @@ -0,0 +1 @@ +# CONFIG_FB_I740 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 new file mode 100644 index 0000000000000000000000000000000000000000..628c9a85e0123f1e06b8b7ec8d40bcb11a735cb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 @@ -0,0 +1 @@ +# CONFIG_FB_IBM_GXT4500 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT new file mode 100644 index 0000000000000000000000000000000000000000..4ca1a915d60ac9f5d7af68797a620e01a1d41cd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT @@ -0,0 +1 @@ +# CONFIG_FB_IMSTT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS new file mode 100644 index 0000000000000000000000000000000000000000..485cf9b71de5bc92aa9662c4ce05178444f036c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS @@ -0,0 +1 @@ +CONFIG_FB_IOMEM_FOPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..762245fb865d2260cb17e3d0b10cce5b3c9d0354 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS @@ -0,0 +1 @@ +CONFIG_FB_IOMEM_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO new file mode 100644 index 0000000000000000000000000000000000000000..812aad0b8f360d989ef95d26f6c16afbdfb8bf13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO @@ -0,0 +1 @@ +# CONFIG_FB_KYRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX new file mode 100644 index 0000000000000000000000000000000000000000..ff6a83e395f1e4241891432ee8893ab7333107f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX @@ -0,0 +1 @@ +# CONFIG_FB_MATROX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX new file mode 100644 index 0000000000000000000000000000000000000000..b2ea119f36a69a6420baa829a9d9d8a82fd6480c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX @@ -0,0 +1 @@ +# CONFIG_FB_MB862XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME new file mode 100644 index 0000000000000000000000000000000000000000..7eb7b58caeb0f702781cbf8081cb887978f29382 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME @@ -0,0 +1 @@ +# CONFIG_FB_METRONOME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..e9eb6ec4ffe99cda83b05fd1fea0254894d5527b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS @@ -0,0 +1 @@ +# CONFIG_FB_MODE_HELPERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC new file mode 100644 index 0000000000000000000000000000000000000000..c40b63c27f98ffdabd4fca56d728adcfe0a0da83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC @@ -0,0 +1 @@ +# CONFIG_FB_NEOMAGIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY new file mode 100644 index 0000000000000000000000000000000000000000..cf08f4c4db5712929979a8afd1a5504afe1ad761 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY @@ -0,0 +1 @@ +CONFIG_FB_NOTIFY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA new file mode 100644 index 0000000000000000000000000000000000000000..00e8d12d5055a4b2457c558a07518542bab0b4c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA @@ -0,0 +1 @@ +# CONFIG_FB_NVIDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES new file mode 100644 index 0000000000000000000000000000000000000000..af7bd5b489861fdb50cbb592a0cb3aef6942b205 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES @@ -0,0 +1 @@ +# CONFIG_FB_OPENCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 new file mode 100644 index 0000000000000000000000000000000000000000..402b162af8e4badc5df789ed0267ad8d5869a769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 @@ -0,0 +1 @@ +# CONFIG_FB_PM2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 new file mode 100644 index 0000000000000000000000000000000000000000..c0e9092e33629fbe8386b71d042ea500621b4d5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 @@ -0,0 +1 @@ +# CONFIG_FB_PM3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON new file mode 100644 index 0000000000000000000000000000000000000000..844570e81d789bdbf6f65640f69f6f7178ac42a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON @@ -0,0 +1 @@ +# CONFIG_FB_RADEON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA new file mode 100644 index 0000000000000000000000000000000000000000..51d3df8ebfa2738826425ebc40bcdd25b9929721 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA @@ -0,0 +1 @@ +# CONFIG_FB_RIVA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX new file mode 100644 index 0000000000000000000000000000000000000000..723326189585efab8319989630b0371fdd7284cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX @@ -0,0 +1 @@ +# CONFIG_FB_S1D13XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 new file mode 100644 index 0000000000000000000000000000000000000000..5e8d701d41f986270027de4ffe3b0acaf2c14205 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 @@ -0,0 +1 @@ +# CONFIG_FB_S3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE new file mode 100644 index 0000000000000000000000000000000000000000..7512c54c647dcd50b54d58739f6a50cec61515d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE @@ -0,0 +1 @@ +# CONFIG_FB_SAVAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS new file mode 100644 index 0000000000000000000000000000000000000000..3ad07d50ad15da82b1ea676cf4c543e862acd2df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS @@ -0,0 +1 @@ +# CONFIG_FB_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 new file mode 100644 index 0000000000000000000000000000000000000000..78188e33d5c10ffe8067c0e8d52ea89886157b10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 @@ -0,0 +1 @@ +# CONFIG_FB_SM712 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX new file mode 100644 index 0000000000000000000000000000000000000000..f80de74b87dee07ab598e4fe51c726480ba5f996 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX @@ -0,0 +1 @@ +# CONFIG_FB_SMSCUFX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..4296fa8b9b921f76c48c7ca386435744d88469d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS @@ -0,0 +1 @@ +CONFIG_FB_SYSMEM_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED new file mode 100644 index 0000000000000000000000000000000000000000..d2ea0b42899d3bcb3f4a56781817097e5e66327b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED @@ -0,0 +1 @@ +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA new file mode 100644 index 0000000000000000000000000000000000000000..d165742d50fe287173e2a43e940cb0fa8e1d87aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA @@ -0,0 +1 @@ +CONFIG_FB_SYS_COPYAREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT new file mode 100644 index 0000000000000000000000000000000000000000..ae245540bcceadf0ee89f1c6b845d6ca73a81876 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT @@ -0,0 +1 @@ +CONFIG_FB_SYS_FILLRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS new file mode 100644 index 0000000000000000000000000000000000000000..5d67337536f9c90b32aff60da9d1a10ba2f8ec36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS @@ -0,0 +1 @@ +CONFIG_FB_SYS_FOPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT new file mode 100644 index 0000000000000000000000000000000000000000..910e47f5e45d2c094ebee1a4e0529ab67cb1fc2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT @@ -0,0 +1 @@ +CONFIG_FB_SYS_IMAGEBLIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT new file mode 100644 index 0000000000000000000000000000000000000000..d5a885fb9fb92e06795783d1e3703e396028c673 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT @@ -0,0 +1 @@ +# CONFIG_FB_TRIDENT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL new file mode 100644 index 0000000000000000000000000000000000000000..b61aece886b77dba1546d90a2a821611f8bb26a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL @@ -0,0 +1 @@ +# CONFIG_FB_UDL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA new file mode 100644 index 0000000000000000000000000000000000000000..b677212dcfe2402698ee8ebb6615259b702d1980 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA @@ -0,0 +1 @@ +# CONFIG_FB_UVESA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL new file mode 100644 index 0000000000000000000000000000000000000000..79dd529e9a79ae38a435a09e3219a27c237db070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL @@ -0,0 +1 @@ +# CONFIG_FB_VIRTUAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 new file mode 100644 index 0000000000000000000000000000000000000000..230c5eca286128cb05c72d541ee4301bd85ec42e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 @@ -0,0 +1 @@ +# CONFIG_FB_VOODOO1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 new file mode 100644 index 0000000000000000000000000000000000000000..e8ebebc5fe678452b1765a4b0a5c031ae0b32153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 @@ -0,0 +1 @@ +# CONFIG_FB_VT8623 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI new file mode 100644 index 0000000000000000000000000000000000000000..e13f968a5a790d2b40035bbcb0cc755d4bff265e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI @@ -0,0 +1 @@ +# CONFIG_FDDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX new file mode 100644 index 0000000000000000000000000000000000000000..4cca6a26d04f757450f624497fe112f93f0f4d36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX @@ -0,0 +1 @@ +# CONFIG_FEALNX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES new file mode 100644 index 0000000000000000000000000000000000000000..0c0fd787522f94068d11481758087c78314824d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES @@ -0,0 +1 @@ +CONFIG_FIB_RULES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK new file mode 100644 index 0000000000000000000000000000000000000000..93fe7e0390481a5cd50c1c8e161bc9a65cf49f14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_FIND_BIT_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..bd5282b3ceb16c3b6c851b3805e2861a38dda621 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST @@ -0,0 +1 @@ +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY new file mode 100644 index 0000000000000000000000000000000000000000..0b48c48d17962e11243dfb2ec62a2dc18d694f41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY @@ -0,0 +1 @@ +# CONFIG_FIREWIRE_NOSY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY new file mode 100644 index 0000000000000000000000000000000000000000..80379efd3cf99cd2a8b841081bf41c14d4e65abf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY @@ -0,0 +1 @@ +CONFIG_FIXED_PHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS new file mode 100644 index 0000000000000000000000000000000000000000..abe0213f932ffcb32a3aac32200ccecf4d349f0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS @@ -0,0 +1 @@ +# CONFIG_FONTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 new file mode 100644 index 0000000000000000000000000000000000000000..aecbfb2b1ed7254f30fdb8cd6af5d14beb96c7c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 @@ -0,0 +1 @@ +CONFIG_FONT_8x16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 new file mode 100644 index 0000000000000000000000000000000000000000..6efb90c1daa74647231ff52acca537fb156dcb8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 @@ -0,0 +1 @@ +CONFIG_FONT_8x8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..89f62253375ff9fc68942a828559182a81f730ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT @@ -0,0 +1 @@ +CONFIG_FONT_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA new file mode 100644 index 0000000000000000000000000000000000000000..8bb6ca99e3bf360420bd36d23fbe8a49ddf5ad98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA @@ -0,0 +1 @@ +# CONFIG_FPGA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER new file mode 100644 index 0000000000000000000000000000000000000000..91eda6cd3a62424bf1dc5a3ecf4ed5b605e9f657 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER @@ -0,0 +1 @@ +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION new file mode 100644 index 0000000000000000000000000000000000000000..725b06c7d8e24357681f33a74e0f19bb8cdb0b26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION @@ -0,0 +1 @@ +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER new file mode 100644 index 0000000000000000000000000000000000000000..db6aab2a5722067aa6ad5615cca1224e2eb1a875 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER @@ -0,0 +1 @@ +CONFIG_FREEZER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD new file mode 100644 index 0000000000000000000000000000000000000000..2345dbe2f7eb96f9dbc28b5258e01f28a417a13f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD @@ -0,0 +1 @@ +CONFIG_FS_DAX_PMD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION new file mode 100644 index 0000000000000000000000000000000000000000..b36ec94f07604b8be2a99869d27de96367e9f9eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION @@ -0,0 +1 @@ +# CONFIG_FS_ENCRYPTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP new file mode 100644 index 0000000000000000000000000000000000000000..d21093b7bcc5602141428fd301563de3cde644de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP @@ -0,0 +1 @@ +CONFIG_FS_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_MBCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_MBCACHE new file mode 100644 index 0000000000000000000000000000000000000000..6bfe922de885dff671f66c512286976b7f5b4dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_MBCACHE @@ -0,0 +1 @@ +CONFIG_FS_MBCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..ac587b8e48d29fabf7bcdbda00ec692e8fe5696a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_FS_POSIX_ACL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY new file mode 100644 index 0000000000000000000000000000000000000000..1c1298830047391d71a996b848e1fca888543e51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY @@ -0,0 +1 @@ +# CONFIG_FS_VERITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL new file mode 100644 index 0000000000000000000000000000000000000000..cc37e0b2bfb64115256b486a6f1acd1e9c2a9c21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL @@ -0,0 +1 @@ +# CONFIG_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5849b3c45b70b3e7aa0a5f506514c871ef745a95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_RECORD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST new file mode 100644 index 0000000000000000000000000000000000000000..8e7db27579a8206e6972060f14db519720b4da49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_FTRACE_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B new file mode 100644 index 0000000000000000000000000000000000000000..4e48d80c8f64a7a888a1b7cb6d67ef0bb0bbaa73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_4B=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..f6b81a84aff32f3b410b6a89c1716521ddc5ce13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION @@ -0,0 +1 @@ +CONFIG_FUNCTION_ERROR_INJECTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL new file mode 100644 index 0000000000000000000000000000000000000000..6161ea515619a1716663873fcd4120f34c4eb799 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL @@ -0,0 +1 @@ +# CONFIG_FUNCTION_GRAPH_RETVAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH new file mode 100644 index 0000000000000000000000000000000000000000..ff715bf7dbe889d12ae3351b2306b1a5c95c1acc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH @@ -0,0 +1 @@ +# CONFIG_FUN_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI new file mode 100644 index 0000000000000000000000000000000000000000..80d8966e7a9a877cecfce3cf3ad0201b8f762336 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI @@ -0,0 +1 @@ +CONFIG_FUTEX_PI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO new file mode 100644 index 0000000000000000000000000000000000000000..c9e5bf427515969cdb98bfcd624493f7fa5eb355 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO @@ -0,0 +1 @@ +CONFIG_FWNODE_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..0f3f509c640cfa3db7131a75f1d6175603929ddb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT @@ -0,0 +1 @@ +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..87cc900a515238d2af3911b6e3bbb1c6045ae796 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG @@ -0,0 +1 @@ +CONFIG_FW_LOADER_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF new file mode 100644 index 0000000000000000000000000000000000000000..f515939aa3f7953949e58bf80ba93e85417f822b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF @@ -0,0 +1 @@ +CONFIG_FW_LOADER_PAGED_BUF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..b6548de023d3ce244fee33d63a16b0f249506769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS @@ -0,0 +1 @@ +CONFIG_FW_LOADER_SYSFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD new file mode 100644 index 0000000000000000000000000000000000000000..733915f1756acf07372557055a8ca344d1b32865 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD @@ -0,0 +1 @@ +CONFIG_FW_UPLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT new file mode 100644 index 0000000000000000000000000000000000000000..03c782bf39e70d8d9d0a36c880deb06131a5794f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT @@ -0,0 +1 @@ +# CONFIG_GAMEPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP new file mode 100644 index 0000000000000000000000000000000000000000..37a1a1b697eabdceec081857c15155c2e55d8ae7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP @@ -0,0 +1 @@ +CONFIG_GARP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS new file mode 100644 index 0000000000000000000000000000000000000000..c4f1555e19d852cc8c127e12f238ad1f03652961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS @@ -0,0 +1 @@ +CONFIG_GCC10_NO_ARRAY_BOUNDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS new file mode 100644 index 0000000000000000000000000000000000000000..178242a97f0b5d041f08834af525a4372dd94270 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS @@ -0,0 +1 @@ +CONFIG_GCC_PLUGINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY new file mode 100644 index 0000000000000000000000000000000000000000..4775521e9792f7d9e78783816da1743993730926 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY @@ -0,0 +1 @@ +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK new file mode 100644 index 0000000000000000000000000000000000000000..701c497716913923526bb4ec4d08d506997a0170 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK @@ -0,0 +1 @@ +# CONFIG_GCC_PLUGIN_STACKLEAK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..0b34ca5bf12c583477b7b7962703d4b49b365b4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION @@ -0,0 +1 @@ +CONFIG_GCC_VERSION=200000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR new file mode 100644 index 0000000000000000000000000000000000000000..532e98c5134b0f2308889bf905078a39a04506e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_GENERIC_ALLOCATOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG new file mode 100644 index 0000000000000000000000000000000000000000..2c5f5f1da54ed1f114aafbb3f368cdbb3e1e0b22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG @@ -0,0 +1 @@ +CONFIG_GENERIC_BUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS new file mode 100644 index 0000000000000000000000000000000000000000..13714e8647a1be74856dc98c116899abc23cc884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS @@ -0,0 +1 @@ +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY new file mode 100644 index 0000000000000000000000000000000000000000..7670ace05c835824432b9dc6113e5870657c2139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY @@ -0,0 +1 @@ +CONFIG_GENERIC_CALIBRATE_DELAY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS new file mode 100644 index 0000000000000000000000000000000000000000..156cf712f0b9eaa50f4ef5c9dc4cde4128b674cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST new file mode 100644 index 0000000000000000000000000000000000000000..ffe0d1894806b9d9bb8f9516ae47b1a58e39a9cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE new file mode 100644 index 0000000000000000000000000000000000000000..30af7a839fe1cd1e72bdc3361717fc1817423487 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU_AUTOPROBE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES new file mode 100644 index 0000000000000000000000000000000000000000..66a48cca71078b14c8d6b093dc7da04cf4074430 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU_VULNERABILITIES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP new file mode 100644 index 0000000000000000000000000000000000000000..56c8df711663c863b4a4e7674c56d1fff71b2c6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_EARLY_IOREMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK new file mode 100644 index 0000000000000000000000000000000000000000..69743b49cb15606b7a21cb2c17d7a22d141fdc03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION new file mode 100644 index 0000000000000000000000000000000000000000..79cc3d3473f9fd07ca19491c653760ea49092838 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..3c8d8e76789349fbed454226b87d928fd2b56036 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ @@ -0,0 +1 @@ +CONFIG_GENERIC_MSI_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS new file mode 100644 index 0000000000000000000000000000000000000000..e4d79181a5b3fae042312fac45772b943fb3d268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS @@ -0,0 +1 @@ +CONFIG_GENERIC_NET_UTILS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP new file mode 100644 index 0000000000000000000000000000000000000000..31aaf92b387bdc8d5087b37c5e9e426c4c5db920 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_PCI_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF new file mode 100644 index 0000000000000000000000000000000000000000..dededed2ddc5053b8ca451b7bc45c6ae4f9389d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF @@ -0,0 +1 @@ +CONFIG_GENERIC_PINCONF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP new file mode 100644 index 0000000000000000000000000000000000000000..8924484ad9c57989767d25c8df2c29aede4a96a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP @@ -0,0 +1 @@ +CONFIG_GENERIC_PTDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD new file mode 100644 index 0000000000000000000000000000000000000000..3af93e91c286bc2b74825535653730fe62bf3ead --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD @@ -0,0 +1 @@ +CONFIG_GENERIC_SMP_IDLE_THREAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER new file mode 100644 index 0000000000000000000000000000000000000000..da6133f23c71dfba43a1dba906e16c963317628e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER @@ -0,0 +1 @@ +CONFIG_GENERIC_STRNCPY_FROM_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER new file mode 100644 index 0000000000000000000000000000000000000000..3479709f71f8cde961dddbdcd9af4b4e85b68155 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER @@ -0,0 +1 @@ +CONFIG_GENERIC_STRNLEN_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..e28ded4ea4c46be039392964f0fc68d902e481a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL @@ -0,0 +1 @@ +CONFIG_GENERIC_TIME_VSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..3597bf1a85773c1c767cb1964d35882b9bd37632 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER @@ -0,0 +1 @@ +CONFIG_GENERIC_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE new file mode 100644 index 0000000000000000000000000000000000000000..0d695246418945f7412c7c507fe1a2f792ac4ef8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE @@ -0,0 +1 @@ +# CONFIG_GENWQE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB new file mode 100644 index 0000000000000000000000000000000000000000..7ad953de48eb4f93df1d49cf17909376a143e63c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB @@ -0,0 +1 @@ +CONFIG_GLOB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..8ee3343208185ad38052e73e60ff0ac23a63e969 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST @@ -0,0 +1 @@ +# CONFIG_GLOB_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS new file mode 100644 index 0000000000000000000000000000000000000000..07c7233bf96c4bcacc149d7ad5533b9bfd8577cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS @@ -0,0 +1 @@ +# CONFIG_GNSS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH new file mode 100644 index 0000000000000000000000000000000000000000..570eec3f0716e9ce7eb478910f7bcf0435348690 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE new file mode 100644 index 0000000000000000000000000000000000000000..a9a15cf4139806d59ab7d0ddcbe0ec582d8bc0ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE @@ -0,0 +1 @@ +# CONFIG_GOOGLE_FIRMWARE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB new file mode 100644 index 0000000000000000000000000000000000000000..7c7603d68e3e79aa776b00a3c01d2190112341fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB @@ -0,0 +1 @@ +CONFIG_GPIOLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..09425777796ac76d67152bba6c4eacff385893c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT @@ -0,0 +1 @@ +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP new file mode 100644 index 0000000000000000000000000000000000000000..48c78b42cd6192514f636e46504712aa6e670273 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP @@ -0,0 +1 @@ +CONFIG_GPIOLIB_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..4c39fc0dde338daeadef981dc88fa53b5509c405 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI @@ -0,0 +1 @@ +CONFIG_GPIO_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR new file mode 100644 index 0000000000000000000000000000000000000000..71bc3505a35fc6884b4eeb55f25056923fdaf0a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR @@ -0,0 +1 @@ +# CONFIG_GPIO_AGGREGATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT new file mode 100644 index 0000000000000000000000000000000000000000..04ac1ad2c10b5fa7ebf5ec307893d02bb6b5ab29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT @@ -0,0 +1 @@ +CONFIG_GPIO_AMDPT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH new file mode 100644 index 0000000000000000000000000000000000000000..6a7a2f22328cb9e116209b01db45061748d664bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH @@ -0,0 +1 @@ +# CONFIG_GPIO_AMD_FCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX new file mode 100644 index 0000000000000000000000000000000000000000..dbc524d7ced0ea1d716bff79826a5f3554ea07ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX @@ -0,0 +1 @@ +# CONFIG_GPIO_BT8XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV new file mode 100644 index 0000000000000000000000000000000000000000..eb3c0e436697210a539b5880e73b5c2f955e8422 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV @@ -0,0 +1 @@ +CONFIG_GPIO_CDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 new file mode 100644 index 0000000000000000000000000000000000000000..2a4e60a576ae01345637bdc887dfff4316d8cf8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 @@ -0,0 +1 @@ +CONFIG_GPIO_CDEV_V1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR new file mode 100644 index 0000000000000000000000000000000000000000..895a0888b304e7b2e0b18ddd187bfaafeacd4a7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR @@ -0,0 +1 @@ +# CONFIG_GPIO_EXAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X new file mode 100644 index 0000000000000000000000000000000000000000..b0cd128e84c560ca46a2b6ddce8ec0184fde4a05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX3191X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 new file mode 100644 index 0000000000000000000000000000000000000000..27e781e577f8b5f66c294b572568c1f6636deb10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX7300 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 new file mode 100644 index 0000000000000000000000000000000000000000..cd127dd6beb345ee964323362a2f7814e95d14db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX7301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X new file mode 100644 index 0000000000000000000000000000000000000000..690f870476ce2d92e89d1ca164b44043def3fd6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX732X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X new file mode 100644 index 0000000000000000000000000000000000000000..1b3c1acc10e378f8fad5353337e253b29756147c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X @@ -0,0 +1 @@ +# CONFIG_GPIO_MB86S7X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 new file mode 100644 index 0000000000000000000000000000000000000000..17f039cfc3b7833d05e29db0361db6b9b8cc558e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 @@ -0,0 +1 @@ +# CONFIG_GPIO_MC33880 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP new file mode 100644 index 0000000000000000000000000000000000000000..7f135a6ef04e0f2babc9297d586cbd6f5c464d48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP @@ -0,0 +1 @@ +# CONFIG_GPIO_MOCKUP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X new file mode 100644 index 0000000000000000000000000000000000000000..ca0543fbb7842b3e1bd622223b18604985e5ef0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X @@ -0,0 +1 @@ +# CONFIG_GPIO_PCA953X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 new file mode 100644 index 0000000000000000000000000000000000000000..ab334325aed69541058509c3558eb5bf1f19f8d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCA9570 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X new file mode 100644 index 0000000000000000000000000000000000000000..d58fab0742d654eb32b85ed2fa748b2289340740 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X @@ -0,0 +1 @@ +# CONFIG_GPIO_PCF857X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 new file mode 100644 index 0000000000000000000000000000000000000000..6313cc61d8e3b44a732d2113b24de3d7afe0b76a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCIE_IDIO_24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 new file mode 100644 index 0000000000000000000000000000000000000000..2228f07877c4041db69df9ca77f5d9babba90e2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCI_IDIO_16 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR new file mode 100644 index 0000000000000000000000000000000000000000..0026e4baf77618448638cd9ccfd941f68f05fb57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR @@ -0,0 +1 @@ +# CONFIG_GPIO_PISOSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X new file mode 100644 index 0000000000000000000000000000000000000000..241293f19d339b3a04975a897c0780c34c8900df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X @@ -0,0 +1 @@ +# CONFIG_GPIO_RDC321X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 new file mode 100644 index 0000000000000000000000000000000000000000..a522f61016badfd7b6393ac117d5329231fea1b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 @@ -0,0 +1 @@ +# CONFIG_GPIO_TPIC2810 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 new file mode 100644 index 0000000000000000000000000000000000000000..c9567433fedb781b0ad45bb3885092b152837f74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 @@ -0,0 +1 @@ +# CONFIG_GPIO_XRA1403 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD new file mode 100644 index 0000000000000000000000000000000000000000..eeb72ba895833a8f0e00cd581a711e1fb6279a6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD @@ -0,0 +1 @@ +CONFIG_GRACE_PERIOD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF new file mode 100644 index 0000000000000000000000000000000000000000..def920e42e88d09cdedda91da00d0765c8556874 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF @@ -0,0 +1 @@ +# CONFIG_GREENASIA_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS new file mode 100644 index 0000000000000000000000000000000000000000..7fa945c3364f7a7f8e8e9756d77a18414af60c49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS @@ -0,0 +1 @@ +# CONFIG_GREYBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS new file mode 100644 index 0000000000000000000000000000000000000000..1ffae25c20de2c6379015d26e8b5f8f1256690e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS @@ -0,0 +1 @@ +CONFIG_GRO_CELLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP new file mode 100644 index 0000000000000000000000000000000000000000..ec01f6d28b7d2162645ac6e7416f1a1623249931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP @@ -0,0 +1 @@ +# CONFIG_GTP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE new file mode 100644 index 0000000000000000000000000000000000000000..26aadc46d3c8244a7eb3c3ffe0193cc72415aba0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE @@ -0,0 +1 @@ +CONFIG_GVE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO new file mode 100644 index 0000000000000000000000000000000000000000..477ae40dbb27aad6f64f6635959a7e0f3565f25e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO @@ -0,0 +1 @@ +# CONFIG_HAMRADIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND new file mode 100644 index 0000000000000000000000000000000000000000..4e6810c6ee17513cdfe78a2501927c0309eacd85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND @@ -0,0 +1 @@ +CONFIG_HARDIRQS_SW_RESEND=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH new file mode 100644 index 0000000000000000000000000000000000000000..39ea0c40bd9c99a58e6a758370434c41df2810a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY new file mode 100644 index 0000000000000000000000000000000000000000..6cab318853368147eca98c94c78919f16363e8bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER new file mode 100644 index 0000000000000000000000000000000000000000..5737b3d6e445a2f8e07910b39478273a829dc053 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF new file mode 100644 index 0000000000000000000000000000000000000000..e2ce4db33460023ec6cfbab5ad4bf0573857d645 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY new file mode 100644 index 0000000000000000000000000000000000000000..e0fcf6e9163c2c0df1d60109f6da59f38e6c16f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA new file mode 100644 index 0000000000000000000000000000000000000000..074fcade6e38ca1fadc7e6a6cdc200a4fc7def0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA @@ -0,0 +1 @@ +CONFIG_HAS_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM new file mode 100644 index 0000000000000000000000000000000000000000..2e73c44ae51dc720a3d76bee78cecf185c7cc01b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM @@ -0,0 +1 @@ +CONFIG_HAS_IOMEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT new file mode 100644 index 0000000000000000000000000000000000000000..3fb32343e428c380d510d96b88f5e62448f4f014 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT @@ -0,0 +1 @@ +CONFIG_HAS_IOPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP new file mode 100644 index 0000000000000000000000000000000000000000..26e978eb9599b93f36aaeb0042820070aacf0c06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP @@ -0,0 +1 @@ +CONFIG_HAS_IOPORT_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI new file mode 100644 index 0000000000000000000000000000000000000000..f6e2adf17547b1accd0cdc9585c55cf949d13355 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_HAVE_ACPI_APEI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE new file mode 100644 index 0000000000000000000000000000000000000000..c7b4aa5012062b91b829c20f150f50af13bc0a9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE @@ -0,0 +1 @@ +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL new file mode 100644 index 0000000000000000000000000000000000000000..a7f74c1e403f046c025750f58221fb3b50d587a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_AUDITSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..99b8b5b62fa0a26b76267f92746ab0d65c4d07b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP new file mode 100644 index 0000000000000000000000000000000000000000..424d3e46c3bd7c377db3bddd943a3dbe00663c05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_HUGE_VMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..f8e9b05d5647316f172afdf92c82acea38c7c1b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_JUMP_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE new file mode 100644 index 0000000000000000000000000000000000000000..cf9ecf703edb31ab571f3898ee19d1787d9c386a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN new file mode 100644 index 0000000000000000000000000000000000000000..c91df1bf0c700d2e0e157c842fe89fcf8cd6070c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..74fe2bd0b7f1f6cd2c256972bd6f2cb7cec1c879 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE new file mode 100644 index 0000000000000000000000000000000000000000..30e3e70d573a848bed6dff391f21bfd9e8257898 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KFENCE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB new file mode 100644 index 0000000000000000000000000000000000000000..bab9449945db3b1663ac5a99254921cc4d4aa585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KGDB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS new file mode 100644 index 0000000000000000000000000000000000000000..7ad15d3c444ee19ec1190b0cf3402bb5cf90bbb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 0000000000000000000000000000000000000000..c603b80ac3392b4631c0ee4332d0c7d229327c6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS new file mode 100644 index 0000000000000000000000000000000000000000..c7b2979290b6c6ca3e916dd478b951c113120274 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP new file mode 100644 index 0000000000000000000000000000000000000000..3e2f16ecb858d3c75d3f7c1f3d5512b6ffafe7b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SECCOMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..c3a4f1bce6646dfaaf066fc990b9f7b86da533b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK new file mode 100644 index 0000000000000000000000000000000000000000..cf87c8026f6dc6909914fe3c6ddbd0400e7acf7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_STACKLEAK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST new file mode 100644 index 0000000000000000000000000000000000000000..328ddf968b73a487b8b55f59100a792c22157f2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK new file mode 100644 index 0000000000000000000000000000000000000000..d834130d89f1f8327d9b6979ffe3171ba6b24c1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRACEHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE new file mode 100644 index 0000000000000000000000000000000000000000..6e840a347b8a8810418ad1f380fc4fa7f6ce4842 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR new file mode 100644 index 0000000000000000000000000000000000000000..250507e2d4e68e1d0d448fcea2e461c38c791cf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK new file mode 100644 index 0000000000000000000000000000000000000000..6fdf79325aa48b38c3166a7fe47ef8143fb24abb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_VMAP_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS new file mode 100644 index 0000000000000000000000000000000000000000..3965241145021bb0ff0664791104fa30f1c45bc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_HAVE_ASM_MODVERSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK new file mode 100644 index 0000000000000000000000000000000000000000..d1e4deb60dacf17f1eba6bfe22979e1af28ce585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK @@ -0,0 +1 @@ +CONFIG_HAVE_CLK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE new file mode 100644 index 0000000000000000000000000000000000000000..1ffe5a9b6a73b976a3a4175f55948d3b08be180b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE @@ -0,0 +1 @@ +CONFIG_HAVE_CLK_PREPARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE new file mode 100644 index 0000000000000000000000000000000000000000..bbb12794a56ba9feb8d924b648a66b633c899568 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE @@ -0,0 +1 @@ +CONFIG_HAVE_CMPXCHG_DOUBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL new file mode 100644 index 0000000000000000000000000000000000000000..924e3b1cecb545a484f6e132e29f6965fd3788a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL @@ -0,0 +1 @@ +CONFIG_HAVE_CMPXCHG_LOCAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER new file mode 100644 index 0000000000000000000000000000000000000000..50c186f9d4177e6517e673b4db5936092c04d221 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER @@ -0,0 +1 @@ +CONFIG_HAVE_CONTEXT_TRACKING_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT new file mode 100644 index 0000000000000000000000000000000000000000..e5e5b7aea3b2b369c712fe6480a6e8ac8d742b54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_C_RECORDMCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK new file mode 100644 index 0000000000000000000000000000000000000000..fc41260656e48f8a48bf6e11c2ccfae55ee92ba5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK @@ -0,0 +1 @@ +CONFIG_HAVE_DEBUG_KMEMLEAK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS new file mode 100644 index 0000000000000000000000000000000000000000..2d0cab411cc2d7c8cd97a0c4bf5d13a48eae837e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS @@ -0,0 +1 @@ +CONFIG_HAVE_DMA_CONTIGUOUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..392a9fcedbf5a1a8c4ea8c26dac3191db82ca785 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 0000000000000000000000000000000000000000..97a77a3b1f2766116837518d5005844f6ca61d3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS new file mode 100644 index 0000000000000000000000000000000000000000..79483bdeefca399f65a493da13aff1e447ac833c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT new file mode 100644 index 0000000000000000000000000000000000000000..29ba64074d88786f78c062f83f79addacde88430 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT @@ -0,0 +1 @@ +CONFIG_HAVE_EBPF_JIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS new file mode 100644 index 0000000000000000000000000000000000000000..eb414b6fa153b0ab2814b73e5589c3fb4b02718a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -0,0 +1 @@ +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP new file mode 100644 index 0000000000000000000000000000000000000000..042e83df6c47c02f372e0cf350f974e7558c72d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP @@ -0,0 +1 @@ +CONFIG_HAVE_FAST_GUP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f3fbc8cd56a65b368e3f6c55a9318d69a5fd087d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD @@ -0,0 +1 @@ +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API new file mode 100644 index 0000000000000000000000000000000000000000..f6cc5601c93b31fcb02095214344d203f7a76f38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..148288801d65a6f9ebe89b1f8a3e40a30460f639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL new file mode 100644 index 0000000000000000000000000000000000000000..b4ff36b365d9cdfe4190fe11a1758adcb8202dfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..2c1cd9480b32aff549f0480967c242795cbacf13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..856fbeef149959d1779b4cdec431b773e82fdc90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS new file mode 100644 index 0000000000000000000000000000000000000000..e6906ae85050f8787b030790fe1d66b0482b3d55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS @@ -0,0 +1 @@ +CONFIG_HAVE_GCC_PLUGINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO new file mode 100644 index 0000000000000000000000000000000000000000..6e6d74a3d352a7f98ba0eb408419c6b4be4adfe4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO @@ -0,0 +1 @@ +CONFIG_HAVE_GENERIC_VDSO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY new file mode 100644 index 0000000000000000000000000000000000000000..7b43eb3981d21aa3f2f159562062696938399d68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY @@ -0,0 +1 @@ +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF new file mode 100644 index 0000000000000000000000000000000000000000..55bb31912950dedbf06bbc6ed512f6e21c4f24c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF @@ -0,0 +1 @@ +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT new file mode 100644 index 0000000000000000000000000000000000000000..def43335c4fa711bef43739d711be3a6da3aa782 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT @@ -0,0 +1 @@ +CONFIG_HAVE_HW_BREAKPOINT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC new file mode 100644 index 0000000000000000000000000000000000000000..04187ebe8f32865b98a4a9c48391505b488d5cde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC @@ -0,0 +1 @@ +CONFIG_HAVE_IMA_KEXEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT new file mode 100644 index 0000000000000000000000000000000000000000..77192b88488a239cb7d875679aaace416ec80c3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT @@ -0,0 +1 @@ +CONFIG_HAVE_IOREMAP_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING new file mode 100644 index 0000000000000000000000000000000000000000..b8327b498ddc32fae8db33765590aa9c3704a928 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER new file mode 100644 index 0000000000000000000000000000000000000000..875afb1d43d5b27ae017c0b3bc5c5419b7d236f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER @@ -0,0 +1 @@ +CONFIG_HAVE_KCSAN_COMPILER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES new file mode 100644 index 0000000000000000000000000000000000000000..92c5cb62f5dfb23b12ee5a10c692f9a926c6562b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_KPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES new file mode 100644 index 0000000000000000000000000000000000000000..c15a04c9f32b7aed752e66580a3014dbcfa3d50e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_KRETPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM new file mode 100644 index 0000000000000000000000000000000000000000..0b2f8920a3a8c43cfd8caa306fb2670afd66999d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM @@ -0,0 +1 @@ +CONFIG_HAVE_KVM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT new file mode 100644 index 0000000000000000000000000000000000000000..552732f475e1669c40fade467b85a04385c2c251 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING new file mode 100644 index 0000000000000000000000000000000000000000..d0cfa603f0964b5b535debb6d19787279e8ca9b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL new file mode 100644 index 0000000000000000000000000000000000000000..fd0862c5da33bff062e5feea05f0dfa733062e4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD new file mode 100644 index 0000000000000000000000000000000000000000..c64e0b862e8c47319ba061f797eff1d1ecabf4de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_EVENTFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP new file mode 100644 index 0000000000000000000000000000000000000000..520c556af7ff017d35061d697506ab1ac1e82584 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD new file mode 100644 index 0000000000000000000000000000000000000000..7a18c8d5c636e6b07636bacf2bfd8f2555a6611d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS new file mode 100644 index 0000000000000000000000000000000000000000..937b9171e08486ec8a26a8e61981978d24d70f1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQ_BYPASS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING new file mode 100644 index 0000000000000000000000000000000000000000..5beab811773d436a29edd84c956d17df00595406 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQ_ROUTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI new file mode 100644 index 0000000000000000000000000000000000000000..57f155750e49ad9dcb5b4b911bb51cfbc7a36047 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_MSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC new file mode 100644 index 0000000000000000000000000000000000000000..d110694044a2732265cc273f5d869321ae55dad4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC @@ -0,0 +1 @@ +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD new file mode 100644 index 0000000000000000000000000000000000000000..004d4f599f6f656159fcc079b27f412a0460d750 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD @@ -0,0 +1 @@ +CONFIG_HAVE_MOVE_PMD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD new file mode 100644 index 0000000000000000000000000000000000000000..12381366d52037090d1b622d5858bbc1ec77e3ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD @@ -0,0 +1 @@ +CONFIG_HAVE_MOVE_PUD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI new file mode 100644 index 0000000000000000000000000000000000000000..e5c6c28a242dfbeecaf76a5a76925361ad352179 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI new file mode 100644 index 0000000000000000000000000000000000000000..963b18208caa2ce68c859299b08e227821ffbe95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI @@ -0,0 +1 @@ +CONFIG_HAVE_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..1ac8ca88d415274a3ca2469d756204e29978b4cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI new file mode 100644 index 0000000000000000000000000000000000000000..90edb6ad071525fbd741cdf96f3adf5663fa489b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_EVENTS_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS new file mode 100644 index 0000000000000000000000000000000000000000..300ba387b8924fc0dfef42636dd9e606d56ad9dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..64793851e749cf0653cbbc21c3d0fcc152a2ee4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_USER_STACK_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK new file mode 100644 index 0000000000000000000000000000000000000000..2fc3d0500feda12e95ef647345308f88123d97fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK @@ -0,0 +1 @@ +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..61099041ff0218799b63c59fc3da4810abf3987a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API new file mode 100644 index 0000000000000000000000000000000000000000..0e0c363403e51010f7948319e60fcfd92e71ed06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API @@ -0,0 +1 @@ +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ new file mode 100644 index 0000000000000000000000000000000000000000..8a24f5d4f59deb5af7449b8b10c0a51077a317f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ @@ -0,0 +1 @@ +CONFIG_HAVE_RSEQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT new file mode 100644 index 0000000000000000000000000000000000000000..dc29cb2e93bc96fb310bd71826ea8680b4e47759 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT @@ -0,0 +1 @@ +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI new file mode 100644 index 0000000000000000000000000000000000000000..e162169a9cc04637a2e72ed2820b8d974ba9d9a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI @@ -0,0 +1 @@ +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..b6cf7a74d5fde27ee91e96abfbf51a14e1b565f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ @@ -0,0 +1 @@ +CONFIG_HAVE_SCHED_AVG_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA new file mode 100644 index 0000000000000000000000000000000000000000..355eade795d74ccaef7604e3ec25f469ec874db6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA @@ -0,0 +1 @@ +CONFIG_HAVE_SETUP_PER_CPU_AREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK new file mode 100644 index 0000000000000000000000000000000000000000..7cd0e74f6347c8eaefd7f0baee4bdb582c885c6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR new file mode 100644 index 0000000000000000000000000000000000000000..f79989b20543a8f970bf2183f62d9de0c4abd2e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_HAVE_STACKPROTECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS new file mode 100644 index 0000000000000000000000000000000000000000..85483c6bea7d94735e471a212bbe14f9c3aec54e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS @@ -0,0 +1 @@ +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 new file mode 100644 index 0000000000000000000000000000000000000000..48d2bdb947dc71deae53354f7696bd3f6261ad45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 @@ -0,0 +1 @@ +CONFIG_HAVE_UID16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN new file mode 100644 index 0000000000000000000000000000000000000000..1fdb561060ecf9eff21b7cad9f0137b9f7822c39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN @@ -0,0 +1 @@ +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC new file mode 100644 index 0000000000000000000000000000000000000000..c9224b642b17e7f77423245459404121783466bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC @@ -0,0 +1 @@ +CONFIG_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO new file mode 100644 index 0000000000000000000000000000000000000000..4319985ec7b7ef360d591277a319020934cc5de4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO @@ -0,0 +1 @@ +CONFIG_HDLC_CISCO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR new file mode 100644 index 0000000000000000000000000000000000000000..8eba85d222d0b38409bc1bfcaba709870628d535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR @@ -0,0 +1 @@ +CONFIG_HDLC_FR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP new file mode 100644 index 0000000000000000000000000000000000000000..509257a9b1a743eaf218eec3481b6e0170c40a73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP @@ -0,0 +1 @@ +CONFIG_HDLC_PPP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW new file mode 100644 index 0000000000000000000000000000000000000000..890580afb83ec3c19ebc58236a79b5e4074ea66c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW @@ -0,0 +1 @@ +CONFIG_HDLC_RAW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH new file mode 100644 index 0000000000000000000000000000000000000000..0ff29fb976a4ba4436c3ab1a0c6709f465f1eabe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH @@ -0,0 +1 @@ +# CONFIG_HDLC_RAW_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS new file mode 100644 index 0000000000000000000000000000000000000000..18720556cc7199c8d1d6a62fd06a3c2c6e632d9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS @@ -0,0 +1 @@ +# CONFIG_HFSPLUS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..0e53a5809041797299beb07371a1895fbdbca506 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS @@ -0,0 +1 @@ +# CONFIG_HFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW new file mode 100644 index 0000000000000000000000000000000000000000..bbd271ab1129e045cf288642dc320f4d9a7fd691 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW @@ -0,0 +1 @@ +CONFIG_HIDRAW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH new file mode 100644 index 0000000000000000000000000000000000000000..bc5b3baabf1fc47fa76d5b887d904317319be5fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH @@ -0,0 +1 @@ +CONFIG_HID_A4TECH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH new file mode 100644 index 0000000000000000000000000000000000000000..8fa02a6c7142dd0bd3cd2e2f6ab17b6c099cbe37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH @@ -0,0 +1 @@ +# CONFIG_HID_ACCUTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX new file mode 100644 index 0000000000000000000000000000000000000000..4964a93c2a035d39835da2cd495ebadec2260dc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX @@ -0,0 +1 @@ +CONFIG_HID_ACRUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF new file mode 100644 index 0000000000000000000000000000000000000000..60ec856234ea62c6f7bf8c5d650a6d148216225d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF @@ -0,0 +1 @@ +# CONFIG_HID_ACRUX_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE new file mode 100644 index 0000000000000000000000000000000000000000..cf025b274cc15cf14b5d3d5e715b30ad68acb970 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE @@ -0,0 +1 @@ +CONFIG_HID_APPLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR new file mode 100644 index 0000000000000000000000000000000000000000..6e8cc4a50ce221cdec667690dd51ca36f026195b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR @@ -0,0 +1 @@ +CONFIG_HID_APPLEIR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL new file mode 100644 index 0000000000000000000000000000000000000000..c639c7db12da9f30c64eeb5b26f8b11f8f2cd1fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL @@ -0,0 +1 @@ +CONFIG_HID_AUREAL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH new file mode 100644 index 0000000000000000000000000000000000000000..46efe6a265f9c873f2b52906676c64255d1a053a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH @@ -0,0 +1 @@ +CONFIG_HID_BATTERY_STRENGTH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN new file mode 100644 index 0000000000000000000000000000000000000000..62b7e7ed7c57cd7471b9cf17884318984e18121e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN @@ -0,0 +1 @@ +CONFIG_HID_BELKIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF new file mode 100644 index 0000000000000000000000000000000000000000..61c9a90cfecadd3a5d96a7e0202fe9fe324367f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF @@ -0,0 +1 @@ +CONFIG_HID_BETOP_FF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF new file mode 100644 index 0000000000000000000000000000000000000000..e210fd5e837de1eb71f9a7bbb95f55d88f95d27f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF @@ -0,0 +1 @@ +# CONFIG_HID_BIGBEN_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF new file mode 100644 index 0000000000000000000000000000000000000000..c2590759eeba0695299d0df83742fc26fc7e91bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF @@ -0,0 +1 @@ +# CONFIG_HID_BPF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY new file mode 100644 index 0000000000000000000000000000000000000000..160f0d2006dcf57ca4a60f4dcd0eeff4510c43a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY @@ -0,0 +1 @@ +CONFIG_HID_CHERRY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY new file mode 100644 index 0000000000000000000000000000000000000000..c1d156579b8248e8de4527a53d55fa0b21a04a87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY @@ -0,0 +1 @@ +CONFIG_HID_CHICONY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR new file mode 100644 index 0000000000000000000000000000000000000000..a169a44cecaa7f7134f7cddbbacd78fca9eb5bf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR @@ -0,0 +1 @@ +CONFIG_HID_CORSAIR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR new file mode 100644 index 0000000000000000000000000000000000000000..20f10861eb9366e0baf625ffb6167d4717ac2f6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR @@ -0,0 +1 @@ +# CONFIG_HID_COUGAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 new file mode 100644 index 0000000000000000000000000000000000000000..3f9425d1d955779599ac9f716bc0af9fd14bf37b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 @@ -0,0 +1 @@ +# CONFIG_HID_CP2112 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 new file mode 100644 index 0000000000000000000000000000000000000000..ce52dd6a4540eb240f3a3b4b140299089a68949a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 @@ -0,0 +1 @@ +# CONFIG_HID_CREATIVE_SB0540 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS new file mode 100644 index 0000000000000000000000000000000000000000..8e9d3427e513be86bc24427b2455e7e5764ff8f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS @@ -0,0 +1 @@ +CONFIG_HID_CYPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE new file mode 100644 index 0000000000000000000000000000000000000000..284f173cb235442ca72dc4da375dedb9f2fdeaea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE @@ -0,0 +1 @@ +CONFIG_HID_DRAGONRISE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN new file mode 100644 index 0000000000000000000000000000000000000000..78d5680c8175ff415251d0afb4a3459c359928ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN @@ -0,0 +1 @@ +CONFIG_HID_ELAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM new file mode 100644 index 0000000000000000000000000000000000000000..6257669d8fee8e6e410e369362c644686c32b76c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM @@ -0,0 +1 @@ +CONFIG_HID_ELECOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO new file mode 100644 index 0000000000000000000000000000000000000000..96a4baae71b523d4141106257a734be8fd91dad0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO @@ -0,0 +1 @@ +CONFIG_HID_ELO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF new file mode 100644 index 0000000000000000000000000000000000000000..6c3035d84aeb92684d4c3ed06e6ffcb52021fe97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF @@ -0,0 +1 @@ +# CONFIG_HID_EMS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION new file mode 100644 index 0000000000000000000000000000000000000000..47e6dace37ab6e59d34b34a7e958695f490cf932 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION @@ -0,0 +1 @@ +# CONFIG_HID_EVISION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY new file mode 100644 index 0000000000000000000000000000000000000000..ddb013273c187ffadbd543216fc28547b802fabe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY @@ -0,0 +1 @@ +CONFIG_HID_EZKEY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 new file mode 100644 index 0000000000000000000000000000000000000000..23db7a2548030c35903d25b8baf725138d3b8694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 @@ -0,0 +1 @@ +# CONFIG_HID_FT260 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD new file mode 100644 index 0000000000000000000000000000000000000000..30a71336e2fadffb0a319925e9ff0f2923b75fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD @@ -0,0 +1 @@ +CONFIG_HID_GEMBIRD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..a2b8a6c4a44f1ef267aac077a55ff41a7fc36208 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC @@ -0,0 +1 @@ +CONFIG_HID_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM new file mode 100644 index 0000000000000000000000000000000000000000..7d79dc791837c3d41d6ec80dd35492ae27f0cadd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM @@ -0,0 +1 @@ +CONFIG_HID_GFRM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS new file mode 100644 index 0000000000000000000000000000000000000000..2619055d095eb25fe89cd5bc85c8226fefcac0dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS @@ -0,0 +1 @@ +# CONFIG_HID_GLORIOUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF new file mode 100644 index 0000000000000000000000000000000000000000..52519b024e8a95b863e47c70038340322306895c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF @@ -0,0 +1 @@ +# CONFIG_HID_GOOGLE_STADIA_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA new file mode 100644 index 0000000000000000000000000000000000000000..faed7bf9d9619ac86a942982622a6208b34e0c26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA @@ -0,0 +1 @@ +CONFIG_HID_GREENASIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R new file mode 100644 index 0000000000000000000000000000000000000000..c65fcc22fcb3c5027324d2995082ab365178b8cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R @@ -0,0 +1 @@ +CONFIG_HID_GT683R=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION new file mode 100644 index 0000000000000000000000000000000000000000..6d4a9391f67f75be73a7ca97ff4eb4a99bbe6ea3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION @@ -0,0 +1 @@ +CONFIG_HID_GYRATION=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK new file mode 100644 index 0000000000000000000000000000000000000000..2118385dd6ea22f055b391b7bcada2b803cb4b58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK @@ -0,0 +1 @@ +CONFIG_HID_HOLTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE new file mode 100644 index 0000000000000000000000000000000000000000..c9d3d1edd3e15d31d30648284dcfef6eda0414c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE @@ -0,0 +1 @@ +CONFIG_HID_ICADE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE new file mode 100644 index 0000000000000000000000000000000000000000..b4af4b45e2eb17c582a9c5733b23387e8aed9b1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE @@ -0,0 +1 @@ +CONFIG_HID_ITE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA new file mode 100644 index 0000000000000000000000000000000000000000..c93fae8a607912223a681d5e2da3e0d3466aabc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA @@ -0,0 +1 @@ +CONFIG_HID_JABRA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON new file mode 100644 index 0000000000000000000000000000000000000000..4fbf2d237c303b3f0de5365e3d5f80ee5da86352 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON @@ -0,0 +1 @@ +CONFIG_HID_KENSINGTON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH new file mode 100644 index 0000000000000000000000000000000000000000..39d8ced6043d1f9a12b7f6720db2e8a504906a64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH @@ -0,0 +1 @@ +CONFIG_HID_KEYTOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE new file mode 100644 index 0000000000000000000000000000000000000000..0987d5b063f61da23b6e1bd45891d322477676c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE @@ -0,0 +1 @@ +CONFIG_HID_KYE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER new file mode 100644 index 0000000000000000000000000000000000000000..4ccf9204d3ffaedcd3c03ecbb901304b70d629e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER @@ -0,0 +1 @@ +CONFIG_HID_LCPOWER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED new file mode 100644 index 0000000000000000000000000000000000000000..dfe3a9a1eb2668627d55279572a7fd66e8b8ea25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED @@ -0,0 +1 @@ +CONFIG_HID_LED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO new file mode 100644 index 0000000000000000000000000000000000000000..32888ff64e297b3dbaf399a125fe7cb716914c92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO @@ -0,0 +1 @@ +CONFIG_HID_LENOVO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH new file mode 100644 index 0000000000000000000000000000000000000000..e987810bd4f1f83889eacd236c2b873c2068f6f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH @@ -0,0 +1 @@ +# CONFIG_HID_LETSKETCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH new file mode 100644 index 0000000000000000000000000000000000000000..dc04d70e82d3c1ec8b9861ee257399ae732c3f2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ new file mode 100644 index 0000000000000000000000000000000000000000..5973b1e78f84c9b9dac91e79bdfeb0ef9723525b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH_DJ=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP new file mode 100644 index 0000000000000000000000000000000000000000..965f6aa0f83d441f5250dfaaf7a81ea7c9d58495 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH_HIDPP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY new file mode 100644 index 0000000000000000000000000000000000000000..6b38c1f2101a5b6313a949165a8b922db1394913 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY @@ -0,0 +1 @@ +# CONFIG_HID_MACALLY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE new file mode 100644 index 0000000000000000000000000000000000000000..524303e2407008e08314eabab0f267d35c734207 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE @@ -0,0 +1 @@ +CONFIG_HID_MAGICMOUSE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON new file mode 100644 index 0000000000000000000000000000000000000000..2b17c227352aacb70a6fe2b879f431baad4832e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON @@ -0,0 +1 @@ +# CONFIG_HID_MALTRON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH new file mode 100644 index 0000000000000000000000000000000000000000..eaee56db96e60886ce6b1556f159973b5d2c6af8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH @@ -0,0 +1 @@ +# CONFIG_HID_MAYFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 new file mode 100644 index 0000000000000000000000000000000000000000..cc0d5e686b078bc33c866d173f5338c0afc11f21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 @@ -0,0 +1 @@ +# CONFIG_HID_MCP2200 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 new file mode 100644 index 0000000000000000000000000000000000000000..556c46a013d93cbfebd979113debfc63e3326195 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 @@ -0,0 +1 @@ +# CONFIG_HID_MCP2221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF new file mode 100644 index 0000000000000000000000000000000000000000..3f697747afa785d8ca81eafa9e3e44627364937a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF @@ -0,0 +1 @@ +# CONFIG_HID_MEGAWORLD_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT new file mode 100644 index 0000000000000000000000000000000000000000..48983ea3b17514391d27adb583ec8033a550102c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT @@ -0,0 +1 @@ +CONFIG_HID_MICROSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY new file mode 100644 index 0000000000000000000000000000000000000000..087b000d38672d5dd6e596b2cffd678919a68382 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY @@ -0,0 +1 @@ +CONFIG_HID_MONTEREY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH new file mode 100644 index 0000000000000000000000000000000000000000..7902798886b15832c704cb10ca2ee06c6fbf9c54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH @@ -0,0 +1 @@ +CONFIG_HID_MULTITOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO new file mode 100644 index 0000000000000000000000000000000000000000..c2269e5bfbfadf0e665ab633be686dc992ef3ba9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO @@ -0,0 +1 @@ +# CONFIG_HID_NINTENDO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI new file mode 100644 index 0000000000000000000000000000000000000000..c239c7052d12b183ca726954625a9fae2fc004d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI @@ -0,0 +1 @@ +CONFIG_HID_NTI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG new file mode 100644 index 0000000000000000000000000000000000000000..9b0ac9189e22474b09da568a14968824e6a1f743 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG @@ -0,0 +1 @@ +CONFIG_HID_NTRIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK new file mode 100644 index 0000000000000000000000000000000000000000..a0030810c2650d1658f956b919b8bae2086aa16b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK @@ -0,0 +1 @@ +CONFIG_HID_ORTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD new file mode 100644 index 0000000000000000000000000000000000000000..e12ea738a67343fcf95072888521d1cabacc7908 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD @@ -0,0 +1 @@ +CONFIG_HID_PANTHERLORD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT new file mode 100644 index 0000000000000000000000000000000000000000..e8ce0dab6ba9c0fccd119f6e057ef7f14ac26546 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT @@ -0,0 +1 @@ +CONFIG_HID_PENMOUNT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX new file mode 100644 index 0000000000000000000000000000000000000000..260ea69bd68483c01f89dbe1dd511e1e26a163a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX @@ -0,0 +1 @@ +CONFIG_HID_PETALYNX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD new file mode 100644 index 0000000000000000000000000000000000000000..fb20d5ce3a324f29b042366530f638e2426628c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..9accd76906a86c7a71f268b9dc4315179499cd79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_BACKLIGHT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB new file mode 100644 index 0000000000000000000000000000000000000000..38631826cdaf1c4cdef5f4e38071a93a8273ef29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_FB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD new file mode 100644 index 0000000000000000000000000000000000000000..84d52ea84e2eb347ad66ff0c8ed9f5e8f58e39d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_LCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..bf6539eaaba794369aa2600587a6260f38c3d44f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID new file mode 100644 index 0000000000000000000000000000000000000000..d4c80820d1699dd4125ca3940b39fe7db47ddb6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID @@ -0,0 +1 @@ +CONFIG_HID_PID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS new file mode 100644 index 0000000000000000000000000000000000000000..f99242de7c4669820af9a2d1bd42db5f6d802fe1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS @@ -0,0 +1 @@ +CONFIG_HID_PLANTRONICS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX new file mode 100644 index 0000000000000000000000000000000000000000..1c60e2cdc9c7043725ef96bdb7cf357b74046c86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX @@ -0,0 +1 @@ +CONFIG_HID_PRIMAX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC new file mode 100644 index 0000000000000000000000000000000000000000..c1be11caef0095165742a2f39d7d21b73d7713ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC @@ -0,0 +1 @@ +# CONFIG_HID_PXRC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER new file mode 100644 index 0000000000000000000000000000000000000000..55369bcd061cc74e2a69eef998f80160fef617ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER @@ -0,0 +1 @@ +# CONFIG_HID_RAZER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON new file mode 100644 index 0000000000000000000000000000000000000000..b3aa83efaa3e1bea6fcbab0a9becc80e15a916d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON @@ -0,0 +1 @@ +# CONFIG_HID_REDRAGON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE new file mode 100644 index 0000000000000000000000000000000000000000..18cf5fa2b0696d4683d1b0649c8f3d7f683323f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE @@ -0,0 +1 @@ +# CONFIG_HID_RETRODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI new file mode 100644 index 0000000000000000000000000000000000000000..287d4bc0f354b0fb0a766afb8d72303fcac754f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI @@ -0,0 +1 @@ +CONFIG_HID_RMI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT new file mode 100644 index 0000000000000000000000000000000000000000..9b2f14d42e923795409f7cad059227638f2634fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT @@ -0,0 +1 @@ +CONFIG_HID_ROCCAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK new file mode 100644 index 0000000000000000000000000000000000000000..fecfba018cd86d4551ec10e93ea89a4e4cf87565 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK @@ -0,0 +1 @@ +CONFIG_HID_SAITEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG new file mode 100644 index 0000000000000000000000000000000000000000..2540e9c9a58cb9e9b73f1ad27a622d9b62c5bffd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG @@ -0,0 +1 @@ +CONFIG_HID_SAMSUNG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK new file mode 100644 index 0000000000000000000000000000000000000000..4bc807d8b642c60c2742d1d7ba418d190d8df463 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK @@ -0,0 +1 @@ +# CONFIG_HID_SEMITEK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO new file mode 100644 index 0000000000000000000000000000000000000000..255a346890867595967d995bab55c2f58e765a07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO @@ -0,0 +1 @@ +# CONFIG_HID_SIGMAMICRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS new file mode 100644 index 0000000000000000000000000000000000000000..9bbdeb0e6b192eaf42d8be192ba7850b78de26cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS @@ -0,0 +1 @@ +CONFIG_HID_SMARTJOYPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY new file mode 100644 index 0000000000000000000000000000000000000000..6ee0f6474bd0f75851a4accc4174db7cc088d739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY @@ -0,0 +1 @@ +CONFIG_HID_SONY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK new file mode 100644 index 0000000000000000000000000000000000000000..6019addf92837e16a66ca0149e7a0df43b15bd8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK @@ -0,0 +1 @@ +CONFIG_HID_SPEEDLINK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM new file mode 100644 index 0000000000000000000000000000000000000000..dcc1ee8519e94d943c85896bffa07df5c6e5db4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM @@ -0,0 +1 @@ +# CONFIG_HID_STEAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES new file mode 100644 index 0000000000000000000000000000000000000000..77f65e9e779da6438125a42cd0cf9c8511ec05d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES @@ -0,0 +1 @@ +CONFIG_HID_STEELSERIES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS new file mode 100644 index 0000000000000000000000000000000000000000..724c2c737416084865a195bf614c7f812c2eb4db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS @@ -0,0 +1 @@ +CONFIG_HID_SUNPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM new file mode 100644 index 0000000000000000000000000000000000000000..a41c6e458cc3ed6edbdf6b02c8a63d561f92fd15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM @@ -0,0 +1 @@ +CONFIG_HID_THINGM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER new file mode 100644 index 0000000000000000000000000000000000000000..4dde89479a33c3a1758a2eacb3067fd1ac6578e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER @@ -0,0 +1 @@ +CONFIG_HID_THRUSTMASTER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO new file mode 100644 index 0000000000000000000000000000000000000000..a33d735dd3b93b3e04a4d1b04f4f16def5e67214 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO @@ -0,0 +1 @@ +CONFIG_HID_TIVO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE new file mode 100644 index 0000000000000000000000000000000000000000..926ac41cd2cec58225257a9053dc2b751e8e305f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE @@ -0,0 +1 @@ +# CONFIG_HID_TOPRE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED new file mode 100644 index 0000000000000000000000000000000000000000..8e1879f7cdfd08a12fe392bf0e2c2b74a645a310 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED @@ -0,0 +1 @@ +CONFIG_HID_TOPSEED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN new file mode 100644 index 0000000000000000000000000000000000000000..0f930a425df0e0160cb25368b233617986391e16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN @@ -0,0 +1 @@ +CONFIG_HID_TWINHAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO new file mode 100644 index 0000000000000000000000000000000000000000..2b337170093973e1a48fa1f9eef1cfeb17cf3119 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO @@ -0,0 +1 @@ +# CONFIG_HID_U2FZERO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC new file mode 100644 index 0000000000000000000000000000000000000000..9fa1b889bf9b22f4957afc49804593b5761baa2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC @@ -0,0 +1 @@ +CONFIG_HID_UCLOGIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 new file mode 100644 index 0000000000000000000000000000000000000000..ae8a9040a8f3d9c06b92e999fa9e00774ffde8a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 @@ -0,0 +1 @@ +# CONFIG_HID_UDRAW_PS3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC new file mode 100644 index 0000000000000000000000000000000000000000..dd2c624e2cb9a4ba8d44f4529f6ee192c2c014ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC @@ -0,0 +1 @@ +# CONFIG_HID_VIEWSONIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI new file mode 100644 index 0000000000000000000000000000000000000000..289b0f0090f3e77dfc5759db0804ffb349572260 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI @@ -0,0 +1 @@ +# CONFIG_HID_VIVALDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 new file mode 100644 index 0000000000000000000000000000000000000000..d76c4dc2e477bee5f139953622db1a3e859d5e6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 @@ -0,0 +1 @@ +# CONFIG_HID_VRC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM new file mode 100644 index 0000000000000000000000000000000000000000..9da42d4d3ed586035a89fc7f5058ce2366a848b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM @@ -0,0 +1 @@ +CONFIG_HID_WACOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP new file mode 100644 index 0000000000000000000000000000000000000000..be9f1ac7df00f7ee4536318721c1c247dc853309 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP @@ -0,0 +1 @@ +CONFIG_HID_WALTOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE new file mode 100644 index 0000000000000000000000000000000000000000..1f69eb18fa0b624ac91928250d7ea7723fce0769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE @@ -0,0 +1 @@ +CONFIG_HID_WIIMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI new file mode 100644 index 0000000000000000000000000000000000000000..31d23ec9ab643d5352ed7f0fbbf1edca4c3bd99c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI @@ -0,0 +1 @@ +# CONFIG_HID_XIAOMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO new file mode 100644 index 0000000000000000000000000000000000000000..05c54c7a9cf6c517b56e161e20f43ef33d99f98e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO @@ -0,0 +1 @@ +CONFIG_HID_XINMO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS new file mode 100644 index 0000000000000000000000000000000000000000..204a0cdf5298b2d3ebcccdb8997312ada8a79de2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS @@ -0,0 +1 @@ +CONFIG_HID_ZEROPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON new file mode 100644 index 0000000000000000000000000000000000000000..c145906db5b8ad3e9b702afccd7071ba8c01cc1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON @@ -0,0 +1 @@ +CONFIG_HID_ZYDACRON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI new file mode 100644 index 0000000000000000000000000000000000000000..8f4d0983a853d4549ae18349fc464d58847a8230 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI @@ -0,0 +1 @@ +# CONFIG_HIPPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..a6a06ed369a089987aada20dc08a9ef7c423deae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG @@ -0,0 +1 @@ +# CONFIG_HIST_TRIGGERS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 new file mode 100644 index 0000000000000000000000000000000000000000..c283a00bb465ff38126f214e8fd5091a60c66278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 @@ -0,0 +1 @@ +# CONFIG_HMC6352 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING new file mode 100644 index 0000000000000000000000000000000000000000..e4a0b396ae6db6e821b06f060be5b774194d5bce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING @@ -0,0 +1 @@ +CONFIG_HMEM_REPORTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR new file mode 100644 index 0000000000000000000000000000000000000000..11dfee6c1576550f23d19fb87420cd456d331c99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR @@ -0,0 +1 @@ +CONFIG_HMM_MIRROR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF new file mode 100644 index 0000000000000000000000000000000000000000..f19776110e2575d3733a037dcc45b7cee2351173 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF @@ -0,0 +1 @@ +# CONFIG_HOLTEK_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC new file mode 100644 index 0000000000000000000000000000000000000000..bfbc290642bd3ce779cc6e409df69807509beded --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD new file mode 100644 index 0000000000000000000000000000000000000000..545a0b721c2caa013e21e98e85cb2b15b7e62fca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM new file mode 100644 index 0000000000000000000000000000000000000000..c2e8de4e5d266336aadda37262071feb4a2402ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_ACPI_IBM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI new file mode 100644 index 0000000000000000000000000000000000000000..dcd07008eb2c058343a5b4cac9645b5d393a4b06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI @@ -0,0 +1 @@ +# CONFIG_HOTPLUG_PCI_CPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..e5ad7579ffb6800f7ea9d5132cdad5de647f5373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS @@ -0,0 +1 @@ +# CONFIG_HPFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI new file mode 100644 index 0000000000000000000000000000000000000000..1581f53f92e22c941119a3c255bde2b4994cbd12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI @@ -0,0 +1 @@ +# CONFIG_HSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR new file mode 100644 index 0000000000000000000000000000000000000000..9a440220af2820d34b1e760e772d537fcd2eb973 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR @@ -0,0 +1 @@ +# CONFIG_HSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE new file mode 100644 index 0000000000000000000000000000000000000000..9603fbdc3b298d6b28f3101362f274983ac8a765 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE @@ -0,0 +1 @@ +# CONFIG_HTE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER new file mode 100644 index 0000000000000000000000000000000000000000..2b13a0cc9b595eb41ab2193fa0e86710025d67a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER @@ -0,0 +1 @@ +CONFIG_HVC_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP new file mode 100644 index 0000000000000000000000000000000000000000..700df44029ea03b70fa37be4ac9e1e22b59d5bdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP @@ -0,0 +1 @@ +# CONFIG_HWMON_DEBUG_CHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK new file mode 100644 index 0000000000000000000000000000000000000000..6b531e93f5bee3ce4cdb56283247ae140fdde0c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK @@ -0,0 +1 @@ +CONFIG_HWSPINLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..1a03cd404e588699a3883c71aad4d3562458ddaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE @@ -0,0 +1 @@ +CONFIG_HW_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 new file mode 100644 index 0000000000000000000000000000000000000000..ccafb50a2a93574f30335fd15a428355cdc492d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_BA431 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM new file mode 100644 index 0000000000000000000000000000000000000000..b1963489bf7fd21045859e692151b3ba3a99076c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_TIMERIOMEM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA new file mode 100644 index 0000000000000000000000000000000000000000..779befaec438761ed4d71e67947c46041cd22f02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_XIPHERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HYPERV_TESTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HYPERV_TESTING new file mode 100644 index 0000000000000000000000000000000000000000..d763bef97e276682949108fe869aa92f30692599 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HYPERV_TESTING @@ -0,0 +1 @@ +# CONFIG_HYPERV_TESTING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT new file mode 100644 index 0000000000000000000000000000000000000000..75e39f8b6df1fdb2b1f386c63d4fdf5f0a47f6b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT @@ -0,0 +1 @@ +CONFIG_I2C_ALGOBIT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA new file mode 100644 index 0000000000000000000000000000000000000000..2c5a407690f23e3c6fb01ebfa69135e3fbd99d2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA @@ -0,0 +1 @@ +CONFIG_I2C_ALGOPCA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 new file mode 100644 index 0000000000000000000000000000000000000000..3f9e2fef11fbd700a15575c03ed518de6706ad80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI1535 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 new file mode 100644 index 0000000000000000000000000000000000000000..035045f8b56d35db67b180fb2842fb1b9c243fde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI1563 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 new file mode 100644 index 0000000000000000000000000000000000000000..1621ea3814e02639bb744c9006d38714607a8abc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI15X3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 new file mode 100644 index 0000000000000000000000000000000000000000..e128a6c99bdba96e483e0568233abd43cc063a74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD_MP2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO new file mode 100644 index 0000000000000000000000000000000000000000..8bb991a444daea524f97b55e289cfe61a2ffea73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO @@ -0,0 +1 @@ +CONFIG_I2C_BOARDINFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..d626de063b6b09982b557c2c77268141bb561449 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_CBUS_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..e24cc28d31eeee60ca1b0fd9c23e7d8a069b0224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT @@ -0,0 +1 @@ +CONFIG_I2C_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 new file mode 100644 index 0000000000000000000000000000000000000000..099aa14b27a4d9f534818262c6e96dcbecf346b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 @@ -0,0 +1 @@ +# CONFIG_I2C_CP2615 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO new file mode 100644 index 0000000000000000000000000000000000000000..6c95613697a64802908152557aab585763564183 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_ALGO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS new file mode 100644 index 0000000000000000000000000000000000000000..944e060939ec7a5801fcfaf411f162248197d7a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE new file mode 100644 index 0000000000000000000000000000000000000000..6b0f751b1caff87a9b0bb921d14c4cda39caed7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE new file mode 100644 index 0000000000000000000000000000000000000000..661ffb01a393de1e5f3e8e99499fb4ea9ff33bce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI new file mode 100644 index 0000000000000000000000000000000000000000..7f371b4dc4dd7baf6b60cc5f95b8d7c918dfb1e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..cec2f863359089e3f4a32f02f2a3456aa0e60d2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE new file mode 100644 index 0000000000000000000000000000000000000000..b4ae47a7eaec9708b64967c909c0e2a12297661d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_SLAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C new file mode 100644 index 0000000000000000000000000000000000000000..7cd85b9d7ced65bfa08b172ca66b3a636a2b3384 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C @@ -0,0 +1 @@ +CONFIG_I2C_DIOLAN_U2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 new file mode 100644 index 0000000000000000000000000000000000000000..9121ff3a70b0e522bc808d1aa31d8c59d1c0d35d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 @@ -0,0 +1 @@ +# CONFIG_I2C_EMEV2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID new file mode 100644 index 0000000000000000000000000000000000000000..d4b9febdd17def3a3f102bea592ab79f36ff317a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID @@ -0,0 +1 @@ +CONFIG_I2C_HID=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..540ca3a21518a13e10c431e5f1d4e5ed0b35c417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI @@ -0,0 +1 @@ +# CONFIG_I2C_HID_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF new file mode 100644 index 0000000000000000000000000000000000000000..847537d7b616e28b67e0b9a55723efb7ce5e591f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 new file mode 100644 index 0000000000000000000000000000000000000000..84a6a3221c5664306283367af35e7edb8fc0122a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_LTC4306 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD new file mode 100644 index 0000000000000000000000000000000000000000..d4e6b7b4ff8e071b4255b245472e40ee0e1613c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD @@ -0,0 +1 @@ +CONFIG_I2C_MUX_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG new file mode 100644 index 0000000000000000000000000000000000000000..f16a809e12816ae33bf2165373688e9132ec0f42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_REG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 new file mode 100644 index 0000000000000000000000000000000000000000..50bbe34f292c26c6fae348aff8ff07d255f9dc30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 @@ -0,0 +1 @@ +CONFIG_I2C_NFORCE2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU new file mode 100644 index 0000000000000000000000000000000000000000..fde08e31378b2fa5e05d6e34f494b5c23363f133 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU @@ -0,0 +1 @@ +# CONFIG_I2C_NVIDIA_GPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES new file mode 100644 index 0000000000000000000000000000000000000000..302dfede4bee6f696d9368efdd3516cd57b59a6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES @@ -0,0 +1 @@ +# CONFIG_I2C_OCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..f01485df45914816f4c452a077615b95d25b4644 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM @@ -0,0 +1 @@ +CONFIG_I2C_PCA_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX new file mode 100644 index 0000000000000000000000000000000000000000..7a3160209795d23b0ef853da34f595eedddbdf61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_I2C_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF new file mode 100644 index 0000000000000000000000000000000000000000..f9b66870a9a83e4ddb0f7f21661636d0aed417bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF @@ -0,0 +1 @@ +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC new file mode 100644 index 0000000000000000000000000000000000000000..9cfdf4eec6aaedb124e4d9d0f3f7cd74724dcd1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC @@ -0,0 +1 @@ +CONFIG_I2C_SIMTEC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 new file mode 100644 index 0000000000000000000000000000000000000000..881262cd6223bca0814ee6540e1d563bc674be49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 @@ -0,0 +1 @@ +# CONFIG_I2C_SIS5595 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 new file mode 100644 index 0000000000000000000000000000000000000000..daa5891514109d5a3c8a3532fed605301d41097b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 @@ -0,0 +1 @@ +# CONFIG_I2C_SIS630 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB new file mode 100644 index 0000000000000000000000000000000000000000..4966820791d68c342eca7fefbf394a357658299c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB @@ -0,0 +1 @@ +CONFIG_I2C_STUB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM new file mode 100644 index 0000000000000000000000000000000000000000..3db7fabfb248abc46067e61c68dfa207731e4baf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM @@ -0,0 +1 @@ +# CONFIG_I2C_TAOS_EVM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB new file mode 100644 index 0000000000000000000000000000000000000000..ae818d4e9b08334fb40216b4240bf58d1a4ffabf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB @@ -0,0 +1 @@ +CONFIG_I2C_TINY_USB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..e6a1fcb2844de3a03c8ce73805edaa1e7542f7e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO @@ -0,0 +1 @@ +# CONFIG_I2C_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..9526c6adf237a7be13fac3f5a9088e0c4fa12cef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX @@ -0,0 +1 @@ +# CONFIG_I2C_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C b/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C new file mode 100644 index 0000000000000000000000000000000000000000..387a138c15d8027e64dded91ec197d144060995e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C @@ -0,0 +1 @@ +# CONFIG_I3C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF b/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF new file mode 100644 index 0000000000000000000000000000000000000000..8c6e509e9e43618355bf0e06af3f70c962904ee2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF @@ -0,0 +1 @@ +CONFIG_IAVF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV new file mode 100644 index 0000000000000000000000000000000000000000..d0e4f7610f0ce8086db13c81bae3da0a260459de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV @@ -0,0 +1 @@ +CONFIG_ICE_SWITCHDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY new file mode 100644 index 0000000000000000000000000000000000000000..f58838d2fb907dfa9d3069998f9b32d177ee802a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY @@ -0,0 +1 @@ +CONFIG_ICPLUS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 new file mode 100644 index 0000000000000000000000000000000000000000..e8020befdd28e180ce0c983e1c489ea30ce74d7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 @@ -0,0 +1 @@ +# CONFIG_ICS932S401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 new file mode 100644 index 0000000000000000000000000000000000000000..bff5041fdd69bad7528b8dcde3c8c0c934ec8464 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 @@ -0,0 +1 @@ +CONFIG_IEEE802154=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN new file mode 100644 index 0000000000000000000000000000000000000000..4c23edbf740bee6aec83b1b07f2559997472f1ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN @@ -0,0 +1 @@ +CONFIG_IEEE802154_6LOWPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 new file mode 100644 index 0000000000000000000000000000000000000000..57df1ab7f9c2d3f12d429f891fd723bef800c70a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_ADF7242 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 new file mode 100644 index 0000000000000000000000000000000000000000..d46e811585089b38c689206a7a3c516446469a1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_AT86RF230 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB new file mode 100644 index 0000000000000000000000000000000000000000..ab561ab9e2646715b6dde24e3a0044d0914b1cd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB @@ -0,0 +1 @@ +# CONFIG_IEEE802154_ATUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 new file mode 100644 index 0000000000000000000000000000000000000000..462211e30813a99b03d6b1555b81e52f86c807b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_CA8210 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 new file mode 100644 index 0000000000000000000000000000000000000000..037cabb8459a2e097bd7f0e18c8bd95e74c05e6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_CC2520 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..caa6c2f8765fbd419eb5b60e7f4ff9c4268990e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS @@ -0,0 +1 @@ +CONFIG_IEEE802154_DRIVERS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM new file mode 100644 index 0000000000000000000000000000000000000000..02d9827475044ba0c41dd2c91a441cae53312e70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM @@ -0,0 +1 @@ +# CONFIG_IEEE802154_HWSIM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A new file mode 100644 index 0000000000000000000000000000000000000000..a574a9660520b498d42cb3225e7d0bb59170ba17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A @@ -0,0 +1 @@ +# CONFIG_IEEE802154_MCR20A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 new file mode 100644 index 0000000000000000000000000000000000000000..48d1236eb3262a357ec23a5c268a946c49be7954 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_MRF24J40 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL new file mode 100644 index 0000000000000000000000000000000000000000..9a637d779be06f72edb36ec22aa604057650a213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL @@ -0,0 +1 @@ +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET new file mode 100644 index 0000000000000000000000000000000000000000..e7bfdca2d383bb8cfdf1a67aabc5ffcc68c80f68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET @@ -0,0 +1 @@ +CONFIG_IEEE802154_SOCKET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE new file mode 100644 index 0000000000000000000000000000000000000000..c749fd4869ad93e2b7de681f27ceb0216661b139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE @@ -0,0 +1 @@ +# CONFIG_IMA_DISABLE_HTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC new file mode 100644 index 0000000000000000000000000000000000000000..20a119416adb3a9a0fa526f5d2954a3c4b515243 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC @@ -0,0 +1 @@ +# CONFIG_IMA_KEXEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..7ab0f4e2b8fe75435cec05400e1c221e907d75d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_SCTP_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER new file mode 100644 index 0000000000000000000000000000000000000000..61b701a25a3fd27f007d5713e450ea6751d3a1e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER @@ -0,0 +1 @@ +CONFIG_INET_TABLE_PERTURB_ORDER=16 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..ab07f30096dfc547c4c68f20b5f4ef53e437158f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..795cbac7a8d83a812193d53abaac3cd5d4216e00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET_XFRM_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS new file mode 100644 index 0000000000000000000000000000000000000000..cc04097ebc125262d91e6ed127defc35d8c729d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE new file mode 100644 index 0000000000000000000000000000000000000000..23a29a3f1ff34dd422dfbcd1acaf03c7fc146549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE @@ -0,0 +1 @@ +CONFIG_INFINIBAND_BNXT_RE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 new file mode 100644 index 0000000000000000000000000000000000000000..5968a3ee27d691eefd31c44a85e624a70e97c602 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_CXGB4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA new file mode 100644 index 0000000000000000000000000000000000000000..b6635f12bc3837f4ba762f80f6e2ebe4f1004b17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_EFA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA new file mode 100644 index 0000000000000000000000000000000000000000..00e419c17777bfc672fd907706838bee1faccd84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA new file mode 100644 index 0000000000000000000000000000000000000000..967fdb3b13a17b34fa57560f7e5d444d4fa99659 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IRDMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA new file mode 100644 index 0000000000000000000000000000000000000000..12ff35161153da9bd9aa8ba7b067847cc503e7b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_OCRDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR new file mode 100644 index 0000000000000000000000000000000000000000..657c87e5c03f1e0e084078eec5339d46749e5221 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR @@ -0,0 +1 @@ +CONFIG_INFINIBAND_QEDR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM new file mode 100644 index 0000000000000000000000000000000000000000..f26ba36a5f4e7b66ed38c69277eb614dda051899 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA new file mode 100644 index 0000000000000000000000000000000000000000..139371233296a30a19029493e1907acd5407af19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_VIRT_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL new file mode 100644 index 0000000000000000000000000000000000000000..bb3c8b3c5a9df9c669d5985c46ef15a0467a54bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL @@ -0,0 +1 @@ +# CONFIG_INFTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME new file mode 100644 index 0000000000000000000000000000000000000000..3095ee146ae8e20a503030ea6d7a2b2afaf73b79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME @@ -0,0 +1 @@ +CONFIG_INITRAMFS_PRESERVE_MTIME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..11eb3976e766cf5237bcdd5697a6f85c7b7ef671 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT @@ -0,0 +1 @@ +CONFIG_INIT_ENV_ARG_LIMIT=32 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN new file mode 100644 index 0000000000000000000000000000000000000000..5a246dc0fc42040dc5238e76f6a24c1a09903c99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN @@ -0,0 +1 @@ +# CONFIG_INIT_STACK_ALL_PATTERN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO new file mode 100644 index 0000000000000000000000000000000000000000..06b00f4c7b1de75dedd7652221aef8c13df4638f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO @@ -0,0 +1 @@ +# CONFIG_INIT_STACK_ALL_ZERO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE new file mode 100644 index 0000000000000000000000000000000000000000..16e74023a91899b92453015b2ddf6e415e41ec9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE @@ -0,0 +1 @@ +CONFIG_INIT_STACK_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG new file mode 100644 index 0000000000000000000000000000000000000000..93144f90acec347506e23f7889efb8a8062c61ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG @@ -0,0 +1 @@ +# CONFIG_INPUT_EVBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS new file mode 100644 index 0000000000000000000000000000000000000000..817e3e1ed3468580ba9d6bd5c5e17207e374a224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS @@ -0,0 +1 @@ +CONFIG_INPUT_FF_MEMLESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK new file mode 100644 index 0000000000000000000000000000000000000000..6f75cf8151efe2d14922f41031eb9d288f4c36a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK @@ -0,0 +1 @@ +# CONFIG_INPUT_JOYSTICK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..4fd79ff1b455044155908957831126814b0f5c83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS @@ -0,0 +1 @@ +CONFIG_INPUT_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP new file mode 100644 index 0000000000000000000000000000000000000000..2662c28f0b4a300b14a73625872162fec4b94f82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP @@ -0,0 +1 @@ +# CONFIG_INPUT_MATRIXKMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX new file mode 100644 index 0000000000000000000000000000000000000000..9f3efab31946f2fa48deefc7e58a543aec17ce7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX @@ -0,0 +1 @@ +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X new file mode 100644 index 0000000000000000000000000000000000000000..2d2125c5e2d6608331bcad9925cdd2d99c3ab6ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y new file mode 100644 index 0000000000000000000000000000000000000000..661c803b9298f19b288644e92dfdfd1ef9aba506 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP new file mode 100644 index 0000000000000000000000000000000000000000..5b212431a67ff8d621627614fe35c64ff09ff0db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP @@ -0,0 +1 @@ +CONFIG_INPUT_SPARSEKMAP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_VIVALDIFMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_VIVALDIFMAP new file mode 100644 index 0000000000000000000000000000000000000000..3cec2d822f911c78c55e3087ce577a5da94b3d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_VIVALDIFMAP @@ -0,0 +1 @@ +CONFIG_INPUT_VIVALDIFMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING new file mode 100644 index 0000000000000000000000000000000000000000..513e1f806df5372012993e180c4e4c70716f6ed8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING @@ -0,0 +1 @@ +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY new file mode 100644 index 0000000000000000000000000000000000000000..a21e3fff7af365f0d970f6a36afdf70e4b940e12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY @@ -0,0 +1 @@ +CONFIG_INTEL_XWAY_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT new file mode 100644 index 0000000000000000000000000000000000000000..44680093ffc2a73f44795de394ab12bba8035d31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT @@ -0,0 +1 @@ +# CONFIG_INTERCONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE new file mode 100644 index 0000000000000000000000000000000000000000..d41cedd8495f4d8dea6d443a996d4101098671d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE @@ -0,0 +1 @@ +CONFIG_INTERVAL_TREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..16bcd6d9dd4c6eb1999062efa956aa9ee56e1bd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST @@ -0,0 +1 @@ +# CONFIG_INTERVAL_TREE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA b/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA new file mode 100644 index 0000000000000000000000000000000000000000..406b9486746da137bcd61e93f38f3a60ea5db27b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA @@ -0,0 +1 @@ +CONFIG_IOMMU_SVA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC new file mode 100644 index 0000000000000000000000000000000000000000..d363a092732f8ff595f57c5eb8f4ad63ec2a4614 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC @@ -0,0 +1 @@ +# CONFIG_IONIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ new file mode 100644 index 0000000000000000000000000000000000000000..b08ae18ca6c3ab4d9d4d19a537c6aeb561660997 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ @@ -0,0 +1 @@ +CONFIG_IO_WQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER new file mode 100644 index 0000000000000000000000000000000000000000..6198a67bfbb2af0b335cd897e1fecd8f300071ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER @@ -0,0 +1 @@ +# CONFIG_IP5XXX_POWER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS new file mode 100644 index 0000000000000000000000000000000000000000..6c582c83a872066f9e3616301bb5d9a1bff51456 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS @@ -0,0 +1 @@ +# CONFIG_IPACK_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..c1839dbecffd9e8f47c75a6dfc2e24889102c42e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S new file mode 100644 index 0000000000000000000000000000000000000000..1beb2a49fd646d18c4538bc17939fa4ee06b44c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S @@ -0,0 +1 @@ +CONFIG_IPVLAN_L3S=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..48d772ed5a33bbc1e419908d5ae5c9502fefde6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON @@ -0,0 +1 @@ +CONFIG_IP_MROUTE_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP new file mode 100644 index 0000000000000000000000000000000000000000..43f68080b72f56e52b0b06787ecfa7d399f56efd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP @@ -0,0 +1 @@ +# CONFIG_IP_PNP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID new file mode 100644 index 0000000000000000000000000000000000000000..5e45007dacf5bab8bd24e0f3d74fd4116d1e1d5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_CLASSID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP new file mode 100644 index 0000000000000000000000000000000000000000..cf97ee4f7028b2d1dda3432a25cfbc5711900133 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP @@ -0,0 +1 @@ +CONFIG_IP_SCTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH new file mode 100644 index 0000000000000000000000000000000000000000..7fd4c93204af6481484ee7efe6c04b756da01439 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_AH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP new file mode 100644 index 0000000000000000000000000000000000000000..1264051d41c9dec2ac2a689686bf295b5f083bdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_AH_ESP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP new file mode 100644 index 0000000000000000000000000000000000000000..7f109769c0bf723f14628f8f0cc17f49f9413443 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_ESP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP new file mode 100644 index 0000000000000000000000000000000000000000..2fae9319d6a6ea744bc2b2125d45318488348344 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_SCTP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP new file mode 100644 index 0000000000000000000000000000000000000000..cd43245e2343a71b46dee94251823aaa65cbf20b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_TCP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP new file mode 100644 index 0000000000000000000000000000000000000000..bf6d4c1b1d7d3079980b95aba5f1c07697b54ac0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_UDP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS new file mode 100644 index 0000000000000000000000000000000000000000..fa92cefd835ea244ab28bad4043bcb6449c7b39e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS @@ -0,0 +1 @@ +# CONFIG_IP_VS_TWOS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..e04443c539ef5389420620b52d06a0d392f9616f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER @@ -0,0 +1 @@ +# CONFIG_IRQSOFF_TRACER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN new file mode 100644 index 0000000000000000000000000000000000000000..88e18f18c92c7ebc091c29f79c3f9e269d8567eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN @@ -0,0 +1 @@ +CONFIG_IRQ_DOMAIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY new file mode 100644 index 0000000000000000000000000000000000000000..a7c5b6f50438065926759efb9274c4e7f14814a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY @@ -0,0 +1 @@ +CONFIG_IRQ_DOMAIN_HIERARCHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING new file mode 100644 index 0000000000000000000000000000000000000000..c69c8b694f3fdff1d72d2fe51bafd0d5cff2200a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING @@ -0,0 +1 @@ +CONFIG_IRQ_FORCED_THREADING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..aafc7eeb7bd65bb7dd5c2c61e93f7785a92aebb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU @@ -0,0 +1 @@ +CONFIG_IRQ_MSI_IOMMU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL new file mode 100644 index 0000000000000000000000000000000000000000..57deb13287cd33612ae357fb9f1feeea126dd429 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL @@ -0,0 +1 @@ +CONFIG_IRQ_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK new file mode 100644 index 0000000000000000000000000000000000000000..375211307ad2e9fc9c7bae27d2982a0b48aa1867 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK @@ -0,0 +1 @@ +CONFIG_IRQ_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 new file mode 100644 index 0000000000000000000000000000000000000000..6b68bab4148386c165ed66482499e6b3f0ad0bec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 @@ -0,0 +1 @@ +CONFIG_ISCSI_TARGET_CXGB4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS new file mode 100644 index 0000000000000000000000000000000000000000..1093b23294446ccf2f98e7224c550505a451147c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS @@ -0,0 +1 @@ +# CONFIG_JFFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..b92ea893cb8e1fb751b3ee36dd8fd527108aeeaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS @@ -0,0 +1 @@ +# CONFIG_JFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JME b/anolis/configs/L2-OPTIONAL/default/CONFIG_JME new file mode 100644 index 0000000000000000000000000000000000000000..63c5aeb653fcaba155391084d798c6b626870af2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JME @@ -0,0 +1 @@ +# CONFIG_JME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE new file mode 100644 index 0000000000000000000000000000000000000000..a90301e39dc2d23eea3fc5523ebfd652a35aa5af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE @@ -0,0 +1 @@ +CONFIG_KALLSYMS_BASE_RELATIVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..6782a881cb6d2e5f11038658e61388ec5c93f1f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST @@ -0,0 +1 @@ +# CONFIG_KALLSYMS_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 new file mode 100644 index 0000000000000000000000000000000000000000..8249429fa112fb551786329b55047bd6c8ab1547 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADP5588 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 new file mode 100644 index 0000000000000000000000000000000000000000..d4c770f47d2095178741b73cb3952c5766b481f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADP5589 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF new file mode 100644 index 0000000000000000000000000000000000000000..368ac620faafd1ae0c173eb5069205d05933a64c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_CYPRESS_SF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 new file mode 100644 index 0000000000000000000000000000000000000000..9f273ca53db41db22b63d31441022a2437543020 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_DLINK_DIR685 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED new file mode 100644 index 0000000000000000000000000000000000000000..33c4b140dfe2f9fbce4350d52b5ff4dd9c5f039f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_GPIO_POLLED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD new file mode 100644 index 0000000000000000000000000000000000000000..2f71c1a03f9150ab045c700c749c87b9232b547f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LKKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 new file mode 100644 index 0000000000000000000000000000000000000000..be40c9e2c9575f897d6eaaa907cea7f56b490f7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LM8323 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 new file mode 100644 index 0000000000000000000000000000000000000000..0c8d88d48bb66622c874f7d79a24e9421646f749 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LM8333 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX new file mode 100644 index 0000000000000000000000000000000000000000..47a15a46386316f418281e1f40df1a641a0903a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MATRIX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 new file mode 100644 index 0000000000000000000000000000000000000000..c2ac1e60a40534d35812aa848b15ae2b5f8206b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MAX7359 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS new file mode 100644 index 0000000000000000000000000000000000000000..ea7fe67f8715fd5f934dd64f8a3cb58d6e07ff16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MCS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 new file mode 100644 index 0000000000000000000000000000000000000000..7f335d05fd30daa861080ce985a6c968ebe4414b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MPR121 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON new file mode 100644 index 0000000000000000000000000000000000000000..3f4e6520893c755111cd30fae3550a48ab4b2f60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_NEWTON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES new file mode 100644 index 0000000000000000000000000000000000000000..36e0f18f2407ef7ea2995d916bec5defd3a1eb79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_OPENCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 new file mode 100644 index 0000000000000000000000000000000000000000..517dc46ebd7caf9db24c0f5b007f6f05006133ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT1050 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 new file mode 100644 index 0000000000000000000000000000000000000000..7deb75f9af86b2a124d3528907806701f9081c52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT1070 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 new file mode 100644 index 0000000000000000000000000000000000000000..4a7aaa4934c6632b528360c1adad6baf06242da4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT2160 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG new file mode 100644 index 0000000000000000000000000000000000000000..591faaee08210c4d68881e4321eac06e83100b2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_SAMSUNG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY new file mode 100644 index 0000000000000000000000000000000000000000..1b8d883b9d2a7a6702e2711dddf6c42d5f47dbcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_STOWAWAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD new file mode 100644 index 0000000000000000000000000000000000000000..7ace2dc5334b1fa388c813726edcb3f729e99efd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_SUNKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 new file mode 100644 index 0000000000000000000000000000000000000000..f2db3acca1581d377678522e8a48aa38bf89e9a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TCA6416 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 new file mode 100644 index 0000000000000000000000000000000000000000..b853fc9da25c9ff70eda0ec35e9b70c6f538777e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TCA8418 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY new file mode 100644 index 0000000000000000000000000000000000000000..92da2d28489312fae4a4929fc69ecf86d39ad8f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD new file mode 100644 index 0000000000000000000000000000000000000000..5e70361bcbfea0e87e1ff502a63ceab8536ea3b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_XTKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST new file mode 100644 index 0000000000000000000000000000000000000000..01e6cc50de61469b54d7c16d62efbbac6827c586 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST @@ -0,0 +1 @@ +# CONFIG_KPROBE_EVENT_GEN_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT new file mode 100644 index 0000000000000000000000000000000000000000..ce06a7ad37a74ac2b0f4ddba2d114149d631a2ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT @@ -0,0 +1 @@ +# CONFIG_KUNIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT new file mode 100644 index 0000000000000000000000000000000000000000..61cfae823b72d73268def8f114520a4a99a51e26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT @@ -0,0 +1 @@ +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING new file mode 100644 index 0000000000000000000000000000000000000000..43e7a9aaad721b5da4eff198bac8dd91c6709845 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING @@ -0,0 +1 @@ +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..930fda45aa75b10ac5669dbb7c28e99d65254e2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO @@ -0,0 +1 @@ +CONFIG_KVM_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO new file mode 100644 index 0000000000000000000000000000000000000000..80cc4b9d23d2707493385d5b232a459d7c9469f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO @@ -0,0 +1 @@ +CONFIG_KVM_VFIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK new file mode 100644 index 0000000000000000000000000000000000000000..f17cdfae993504564e2cb027386c0a938445da78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK @@ -0,0 +1 @@ +CONFIG_KVM_XFER_TO_GUEST_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP new file mode 100644 index 0000000000000000000000000000000000000000..04ed675319d6999838f5ec66ec0a3fcc7f520abf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP @@ -0,0 +1 @@ +CONFIG_L2TP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..776213a3b5fc988e2d0d85d38c937b40ab9979b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS @@ -0,0 +1 @@ +CONFIG_L2TP_DEBUGFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH new file mode 100644 index 0000000000000000000000000000000000000000..ecca78c0d1d9f927650b5700c3ef017686eb8276 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH @@ -0,0 +1 @@ +CONFIG_L2TP_ETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP new file mode 100644 index 0000000000000000000000000000000000000000..00bcedf83cdff13840d0c32ff6abe8cb4deefa36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP @@ -0,0 +1 @@ +CONFIG_L2TP_IP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 new file mode 100644 index 0000000000000000000000000000000000000000..aaa7e44e9c1ca341fc884e3971819821c5642a68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 @@ -0,0 +1 @@ +CONFIG_L2TP_V3=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB b/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB new file mode 100644 index 0000000000000000000000000000000000000000..4279431a3aaf63dc3d85e0a2499dc67397765fe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB @@ -0,0 +1 @@ +# CONFIG_LAPB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP new file mode 100644 index 0000000000000000000000000000000000000000..9d1f9b49d3a5d4847004822b048bad3f6b9f8e74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP @@ -0,0 +1 @@ +# CONFIG_LATENCYTOP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG new file mode 100644 index 0000000000000000000000000000000000000000..7212e8899b781b76719a080df0f004622ef64d16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG @@ -0,0 +1 @@ +# CONFIG_LATTICE_ECP3_CONFIG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 new file mode 100644 index 0000000000000000000000000000000000000000..9fdcb8e361d7617c9b38d66d337a63705e30bf4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 @@ -0,0 +1 @@ +# CONFIG_LCD_AMS369FG06 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..b558d099950f2131178b7e0556967831f5dc103f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE @@ -0,0 +1 @@ +CONFIG_LCD_CLASS_DEVICE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 new file mode 100644 index 0000000000000000000000000000000000000000..26e06206449e2b63d7012b09fdd59423da84b86c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 @@ -0,0 +1 @@ +# CONFIG_LCD_HX8357 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X new file mode 100644 index 0000000000000000000000000000000000000000..540e27d99c4dcb3cf140c39f111fa73088cc51a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X @@ -0,0 +1 @@ +# CONFIG_LCD_ILI922X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 new file mode 100644 index 0000000000000000000000000000000000000000..a3beda73c5be8333a4a9370598f06fbd30885ed2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 @@ -0,0 +1 @@ +# CONFIG_LCD_ILI9320 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 new file mode 100644 index 0000000000000000000000000000000000000000..393941bfaf100868ecce1e33662817593cab6a8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 @@ -0,0 +1 @@ +# CONFIG_LCD_L4F00242T03 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 new file mode 100644 index 0000000000000000000000000000000000000000..dd444aac3305ce80b8dbc6321693d95a6161d727 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 @@ -0,0 +1 @@ +# CONFIG_LCD_LMS283GF05 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 new file mode 100644 index 0000000000000000000000000000000000000000..1daabfaefba64ce4480407d79d8569004965526b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 @@ -0,0 +1 @@ +# CONFIG_LCD_LMS501KF03 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV new file mode 100644 index 0000000000000000000000000000000000000000..d6b328870ad1ba877b7d8168b227a84cdec46ffb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV @@ -0,0 +1 @@ +# CONFIG_LCD_LTV350QV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A new file mode 100644 index 0000000000000000000000000000000000000000..8ace97ec132d62f5450400027c25e44fc2fe8972 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A @@ -0,0 +1 @@ +# CONFIG_LCD_OTM3225A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..b607ce508b5ffceb6e1328cb700f785f6bbefbfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM @@ -0,0 +1 @@ +CONFIG_LCD_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M new file mode 100644 index 0000000000000000000000000000000000000000..d75934134bed23e107887461c830322e0558cb41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M @@ -0,0 +1 @@ +# CONFIG_LCD_TDO24M is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 new file mode 100644 index 0000000000000000000000000000000000000000..812eb1cf3bdc0c0bd4cec91e8bf5863b20e60c3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 @@ -0,0 +1 @@ +# CONFIG_LCD_VGG2432A4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..39c9fed31f2d5bd47c186cc199fb5ca31a8d6176 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION @@ -0,0 +1 @@ +# CONFIG_LDM_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD new file mode 100644 index 0000000000000000000000000000000000000000..4805d9b35f03a4e105a3c856b19c4a9046f64011 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD @@ -0,0 +1 @@ +CONFIG_LD_IS_BFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN new file mode 100644 index 0000000000000000000000000000000000000000..797671dc0ea164e054eec68e62071fce34fa464c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN @@ -0,0 +1 @@ +CONFIG_LD_ORPHAN_WARN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL new file mode 100644 index 0000000000000000000000000000000000000000..111581a8888da5eaa755d84f97ec4efcad43c206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL @@ -0,0 +1 @@ +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..06cd1617130c223b92e699c45cf6917c0bff9c55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION @@ -0,0 +1 @@ +CONFIG_LD_VERSION=25000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX new file mode 100644 index 0000000000000000000000000000000000000000..7acfb44a9aa5b606152a76520b85416b15dcb5d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX @@ -0,0 +1 @@ +# CONFIG_LEDS_AW200XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV new file mode 100644 index 0000000000000000000000000000000000000000..4bad376c6aaffde1b067b63063c3c36b6cb178b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV @@ -0,0 +1 @@ +# CONFIG_LEDS_BD2606MVV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 new file mode 100644 index 0000000000000000000000000000000000000000..e8bc68ba9ed784a5da416f92310e980e82d569ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 @@ -0,0 +1 @@ +# CONFIG_LEDS_BD2802 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM new file mode 100644 index 0000000000000000000000000000000000000000..31e5d7658fa5e1278b04548987c8d3e047712ae4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM @@ -0,0 +1 @@ +CONFIG_LEDS_BLINKM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED new file mode 100644 index 0000000000000000000000000000000000000000..d2f9256b468fed750ce8dcf822e13b1ce16a6146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED @@ -0,0 +1 @@ +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS new file mode 100644 index 0000000000000000000000000000000000000000..6ab3be8d5dc71da44aa5c31593aa03ba1f073a34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS @@ -0,0 +1 @@ +CONFIG_LEDS_CLASS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR new file mode 100644 index 0000000000000000000000000000000000000000..159fd0d5c0c7e3ad86c6840769cc9b7b5a280afc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR @@ -0,0 +1 @@ +# CONFIG_LEDS_CLASS_MULTICOLOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 new file mode 100644 index 0000000000000000000000000000000000000000..a26e77d7fd4178f9ab5566f889993ecab8cd3f8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 @@ -0,0 +1 @@ +# CONFIG_LEDS_DAC124S085 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..f7c11aa949564e93ec37ce5061f660a9a970401b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO @@ -0,0 +1 @@ +# CONFIG_LEDS_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X new file mode 100644 index 0000000000000000000000000000000000000000..1fa853d1c4c9cd959e6a5e946391380ea432c95f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X @@ -0,0 +1 @@ +# CONFIG_LEDS_IS31FL319X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 new file mode 100644 index 0000000000000000000000000000000000000000..4ecc73b3498e1f005aecc086efd003922431c553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 @@ -0,0 +1 @@ +CONFIG_LEDS_LM3530=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 new file mode 100644 index 0000000000000000000000000000000000000000..eb788f96e5a3b1211f1af299b492ddf8b0492168 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3532 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x new file mode 100644 index 0000000000000000000000000000000000000000..4824c7640496ba0f144abcadd54970c43ac4a675 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x @@ -0,0 +1 @@ +# CONFIG_LEDS_LM355x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 new file mode 100644 index 0000000000000000000000000000000000000000..0d6f2ea9a7308906e965900f47848ea4046d84c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3642 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 new file mode 100644 index 0000000000000000000000000000000000000000..25af7786239a798aaba98af11cdd11bdbba5b474 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 @@ -0,0 +1 @@ +CONFIG_LEDS_LP3944=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 new file mode 100644 index 0000000000000000000000000000000000000000..be5fbf7a9804bc37e808848a2175c7d7036d9a0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 @@ -0,0 +1 @@ +# CONFIG_LEDS_LP3952 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX new file mode 100644 index 0000000000000000000000000000000000000000..99ee0f5990b4b6797007c2d49a26756d375148eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX @@ -0,0 +1 @@ +# CONFIG_LEDS_LP50XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG new file mode 100644 index 0000000000000000000000000000000000000000..30420c7833ae728c28f608fff2c23f543e8b2047 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG @@ -0,0 +1 @@ +# CONFIG_LEDS_MLXREG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 new file mode 100644 index 0000000000000000000000000000000000000000..1c43dff3333e8d13c59ae74e32fb55acc85e2e69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA9532 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X new file mode 100644 index 0000000000000000000000000000000000000000..7660d64c170a499ef7d7d737ba5fb46f28f8628c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA955X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X new file mode 100644 index 0000000000000000000000000000000000000000..4e351d6541af899835a8660491be882b2c1b8d52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA963X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X new file mode 100644 index 0000000000000000000000000000000000000000..d5bca711d915326b79dad529a9db1573cfb77c18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA995X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM new file mode 100644 index 0000000000000000000000000000000000000000..da45a39e0af6d3c0dd0764c427195addf3bc3118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM @@ -0,0 +1 @@ +# CONFIG_LEDS_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 new file mode 100644 index 0000000000000000000000000000000000000000..074d795d8fd928f672759fe80660d3ab90bfdb41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 @@ -0,0 +1 @@ +# CONFIG_LEDS_TCA6507 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX new file mode 100644 index 0000000000000000000000000000000000000000..31a66a3fc8a9615d0c0a4d49d7bb94f3d179d639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX @@ -0,0 +1 @@ +# CONFIG_LEDS_TLC591XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS new file mode 100644 index 0000000000000000000000000000000000000000..5793ba35438178aa79782636ae3d32e77f1c8a2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY new file mode 100644 index 0000000000000000000000000000000000000000..2a67a187f2082e8d652fa1eed6b2f8bfbff60c90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..714cf78df87123b9533c7d1dc74e681fd92e68bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_BACKLIGHT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA new file mode 100644 index 0000000000000000000000000000000000000000..09cfe4e9dd707c5e28071c5175668514b6bcabcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_CAMERA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU new file mode 100644 index 0000000000000000000000000000000000000000..de9ab8230a9e100f666244d999a3214a6210e43a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..28cf72daa467695b1565962f5f852b67846a5fc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT new file mode 100644 index 0000000000000000000000000000000000000000..f1aa15726333247b1490b01f89e233754a9d2ab7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_HEARTBEAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD new file mode 100644 index 0000000000000000000000000000000000000000..12e4f7f857f2cba4098137a92e0aa01398d42ac8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_MTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..cbd7cccc7bdf8fd648b58d9d4ba4f2613d2f42ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_NETDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT new file mode 100644 index 0000000000000000000000000000000000000000..5f108ca76140a7b7c3e9c4087f184e0dfdc60643 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_ONESHOT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC new file mode 100644 index 0000000000000000000000000000000000000000..f9e36c96e267c1a560a4fbbf020267af5d02347e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_PANIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN new file mode 100644 index 0000000000000000000000000000000000000000..21b9a789c38870fb0c698beb9c5f2eab4c7fc6da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_PATTERN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..b274bc418c72fd61e8977b6e829b456c010b8bf4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_TIMER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT new file mode 100644 index 0000000000000000000000000000000000000000..0fb49a106797815594d0ed7c80d7ca07d6c6f45f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_TRANSIENT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY new file mode 100644 index 0000000000000000000000000000000000000000..0f997924116551be6b9f93c370d776f33e5d4868 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_TTY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER new file mode 100644 index 0000000000000000000000000000000000000000..15eaa9cbba966e398f819da14f724bedf5406b36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER @@ -0,0 +1 @@ +# CONFIG_LEDS_USER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY new file mode 100644 index 0000000000000000000000000000000000000000..fb63bf4f8150e85d0fb81de04cbca521e52ed17c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY @@ -0,0 +1 @@ +CONFIG_LED_TRIGGER_PHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO new file mode 100644 index 0000000000000000000000000000000000000000..4ee74e5ffa6be7b8190d01d9f64a4e52787cb494 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO @@ -0,0 +1 @@ +CONFIG_LEGACY_DIRECT_IO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS new file mode 100644 index 0000000000000000000000000000000000000000..ddc9171d6189c4bb1aaeef1ca960f2ca809ddc36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS @@ -0,0 +1 @@ +# CONFIG_LEGACY_PTYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI new file mode 100644 index 0000000000000000000000000000000000000000..c6373aba66366c82435bb26c019eb360eb6310eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI @@ -0,0 +1 @@ +CONFIG_LEGACY_TIOCSTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C new file mode 100644 index 0000000000000000000000000000000000000000..ed0c7dfc719104156d60b578752aa8ba610e62fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C @@ -0,0 +1 @@ +CONFIG_LIBCRC32C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX new file mode 100644 index 0000000000000000000000000000000000000000..6d784c86d22f420f3f3c98a759e8d098b9174ea9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX @@ -0,0 +1 @@ +CONFIG_LIBWX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO new file mode 100644 index 0000000000000000000000000000000000000000..9a815fc0627e2297fe9bfbd7f4c5f3cf7e704269 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO @@ -0,0 +1 @@ +CONFIG_LIQUIDIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE new file mode 100644 index 0000000000000000000000000000000000000000..2f9d0a1c6429432da00193b9995be38288959268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE @@ -0,0 +1 @@ +CONFIG_LIQUIDIO_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF new file mode 100644 index 0000000000000000000000000000000000000000..c32f5e60c81e7c0b6049cd68a8145f6555d372ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF @@ -0,0 +1 @@ +CONFIG_LIQUIDIO_VF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM new file mode 100644 index 0000000000000000000000000000000000000000..5ac6135a922603c1663dab8a7929e589cee7a52f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM @@ -0,0 +1 @@ +# CONFIG_LKDTM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC new file mode 100644 index 0000000000000000000000000000000000000000..1e58a6325dd6e345b5b0c97db11f13755acbb7ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC @@ -0,0 +1 @@ +CONFIG_LLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 new file mode 100644 index 0000000000000000000000000000000000000000..7b1143ffc782746921f2db7f51b9dac348fbebba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 @@ -0,0 +1 @@ +# CONFIG_LLC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..339de241324e9f1fa231f0b1976c822ca6c3df72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION @@ -0,0 +1 @@ +CONFIG_LLD_VERSION=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 new file mode 100644 index 0000000000000000000000000000000000000000..563084c7c873d6690b1804b2d32e716313ff3c90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 @@ -0,0 +1 @@ +# CONFIG_LMK04832 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..b7ff683eb3a059e7a07754849c5cc610d53608e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT @@ -0,0 +1 @@ +CONFIG_LOCK_DEBUGGING_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA new file mode 100644 index 0000000000000000000000000000000000000000..2b43310193c37c0e5da266ebef762b680ff3f2a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA @@ -0,0 +1 @@ +CONFIG_LOCK_MM_AND_FIND_VMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER new file mode 100644 index 0000000000000000000000000000000000000000..c00775144ca597ed55f89cdbe482ddcc74464a25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_LOCK_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT new file mode 100644 index 0000000000000000000000000000000000000000..ab733ddc26ab29e3caf1acea234e6dcd1a29faa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT @@ -0,0 +1 @@ +# CONFIG_LOCK_STAT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..8d2ede5fbd19fa431e00f4051ed105e501fc6f4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_LOCK_TORTURE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF new file mode 100644 index 0000000000000000000000000000000000000000..c4201b052bb403a188529082d6d6a194efe859de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF @@ -0,0 +1 @@ +# CONFIG_LOGIG940_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF new file mode 100644 index 0000000000000000000000000000000000000000..07bc15222d58125ed6f496a672eaf17c2a424685 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF @@ -0,0 +1 @@ +# CONFIG_LOGIRUMBLEPAD2_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF new file mode 100644 index 0000000000000000000000000000000000000000..0f258d20be17eb532ee5ec0d3b13c2a9aafb38d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF @@ -0,0 +1 @@ +# CONFIG_LOGITECH_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF new file mode 100644 index 0000000000000000000000000000000000000000..5730edeb0ffb01d8d68e2ba4a4b572980b7a1bff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF @@ -0,0 +1 @@ +# CONFIG_LOGIWHEELS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO new file mode 100644 index 0000000000000000000000000000000000000000..9772c12e8197a648e2f8b58393c2edc1f6fc7aa1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO @@ -0,0 +1 @@ +CONFIG_LOGO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 new file mode 100644 index 0000000000000000000000000000000000000000..53fbc2986a7fe9c0b060daed46840cf365279209 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 @@ -0,0 +1 @@ +CONFIG_LOGO_LINUX_CLUT224=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO new file mode 100644 index 0000000000000000000000000000000000000000..98033fe35bf719a85e35107b7bdb53bbf1b2138c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO @@ -0,0 +1 @@ +# CONFIG_LOGO_LINUX_MONO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 new file mode 100644 index 0000000000000000000000000000000000000000..28fc94e9b8cb35d86de985dc81c64b7c604348cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 @@ -0,0 +1 @@ +# CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY new file mode 100644 index 0000000000000000000000000000000000000000..d1c03f3a1fc77c49c76e8b13c6560292de306586 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY @@ -0,0 +1 @@ +CONFIG_LSI_ET1011C_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY new file mode 100644 index 0000000000000000000000000000000000000000..11a8eb8d2e58d7339700829d8d187554071d4fa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY @@ -0,0 +1 @@ +CONFIG_LXT_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..5e792ce7701cccb674bff705a2bc1e8bde14d1e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4HC_COMPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..d16694035c1e932ab4faef5ac35be3d87d3c12e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4_COMPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..f7f4d65e2b0371be5b7cea328b30f0d7e16d7106 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4_DECOMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..3da61c520b50d7b3614a7f29f2b9fdfb0f9100d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZO_COMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS new file mode 100644 index 0000000000000000000000000000000000000000..322d2185e406d92808b6724d7b690f0c4d1c8cb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_LZO_DECOMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 new file mode 100644 index 0000000000000000000000000000000000000000..eaf3124108b7dea226dad8f1456ad5153b19fddf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 @@ -0,0 +1 @@ +CONFIG_MAC80211=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..704e88c8b0f86c18c7eb6d58c25bbb3faeae7d72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS @@ -0,0 +1 @@ +CONFIG_MAC80211_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU new file mode 100644 index 0000000000000000000000000000000000000000..3d7fd5e3c9562648605087f8f02a668251b2f594 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU @@ -0,0 +1 @@ +# CONFIG_MAC80211_DEBUG_MENU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC new file mode 100644 index 0000000000000000000000000000000000000000..8491b8f3093e597f07a95f010e79cfbdda68dcac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC @@ -0,0 +1 @@ +CONFIG_MAC80211_HAS_RC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..d4e90cabcb600d3861ce6f754171136ded25e0cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS @@ -0,0 +1 @@ +CONFIG_MAC80211_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH new file mode 100644 index 0000000000000000000000000000000000000000..b322ebd5c158ce76a2dce9cec99ff836a760810e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH @@ -0,0 +1 @@ +# CONFIG_MAC80211_MESH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..5e52e182b7918e31ddc7d142ced392d7b9ae778b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING @@ -0,0 +1 @@ +# CONFIG_MAC80211_MESSAGE_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..e0c92ab2c8dbdf2ea4949f785b43fb3d97ab9d9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL new file mode 100644 index 0000000000000000000000000000000000000000..62bbca7615b47e6081051d8be9a66882e68b27b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL new file mode 100644 index 0000000000000000000000000000000000000000..99bb0c8359607f1a8418e959dc7dca764fe37521 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_MINSTREL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..c8e7c16b850bb2f916136d3066b1cf5b57dea827 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE @@ -0,0 +1 @@ +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 new file mode 100644 index 0000000000000000000000000000000000000000..d63984cd58be28ce5f21d38d8f91aa211aa8fd18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 @@ -0,0 +1 @@ +CONFIG_MAC802154=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX new file mode 100644 index 0000000000000000000000000000000000000000..f1a6342b456a9222608699f27894890edf73449b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX @@ -0,0 +1 @@ +CONFIG_MAILBOX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS new file mode 100644 index 0000000000000000000000000000000000000000..aae8258e3e3690d0fd83a6cc6043cee4a4a8994b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS @@ -0,0 +1 @@ +# CONFIG_MANAGER_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY new file mode 100644 index 0000000000000000000000000000000000000000..6dadd98ffcdb17fb26c6bf1ee16e30084c16f710 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY @@ -0,0 +1 @@ +CONFIG_MARVELL_10G_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..f5aeed40f24702bcdafd460ebddd7de380c1aab1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY @@ -0,0 +1 @@ +# CONFIG_MARVELL_88Q2XXX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY new file mode 100644 index 0000000000000000000000000000000000000000..dbcfd730e375c37c3aefd2c29b9660641ac838a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY @@ -0,0 +1 @@ +# CONFIG_MARVELL_88X2222_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY new file mode 100644 index 0000000000000000000000000000000000000000..e6aeb4de19f9527f863b945441115434a8d7ffea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY @@ -0,0 +1 @@ +CONFIG_MARVELL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 new file mode 100644 index 0000000000000000000000000000000000000000..6a6d25b02030005e1cf79dc7a53436274e7aa3d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 @@ -0,0 +1 @@ +# CONFIG_MAX31827 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..cb71a58bd18ce31e71e2cfa895e43fa770c5d2f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_MAX63XX_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY new file mode 100644 index 0000000000000000000000000000000000000000..c69d4261e8d003a4207829459a386fbd54e3b395 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY @@ -0,0 +1 @@ +# CONFIG_MAXLINEAR_GPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB new file mode 100644 index 0000000000000000000000000000000000000000..775421e9629c85b4efd60e8085dddaba934837fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB @@ -0,0 +1 @@ +# CONFIG_MCB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP new file mode 100644 index 0000000000000000000000000000000000000000..c0ad73c2d1137afa977b235382b2f60fd1c3647f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP @@ -0,0 +1 @@ +# CONFIG_MCTP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO new file mode 100644 index 0000000000000000000000000000000000000000..c2915e9c3a4636b0ea04f0649dc2994f74d35336 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO @@ -0,0 +1 @@ +CONFIG_MDIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC new file mode 100644 index 0000000000000000000000000000000000000000..a0c92ceeb2e791d49392fa7d737774494100ff8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC @@ -0,0 +1 @@ +CONFIG_MDIO_BCM_UNIMAC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG new file mode 100644 index 0000000000000000000000000000000000000000..06f4ddebc84d29a47988227a3c1c89685daf5f9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG @@ -0,0 +1 @@ +CONFIG_MDIO_BITBANG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS new file mode 100644 index 0000000000000000000000000000000000000000..00e812243dcc504c3325a6007e62a2a3802ca74b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS @@ -0,0 +1 @@ +CONFIG_MDIO_BUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM new file mode 100644 index 0000000000000000000000000000000000000000..6ebe615162ae18694c992e21523120f759e18925 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM @@ -0,0 +1 @@ +CONFIG_MDIO_CAVIUM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..07444b0be6920bb5afb8f33eb4e7f79585a7ba9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE @@ -0,0 +1 @@ +CONFIG_MDIO_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES new file mode 100644 index 0000000000000000000000000000000000000000..6f2079183fb789f4d53203c841d83adc6cfe8c51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES @@ -0,0 +1 @@ +CONFIG_MDIO_DEVRES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C new file mode 100644 index 0000000000000000000000000000000000000000..df7d9e02511a2d616fefb2512973b731f9998e53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C @@ -0,0 +1 @@ +CONFIG_MDIO_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB new file mode 100644 index 0000000000000000000000000000000000000000..dd417ba959f35d83532f88835aa860f5c3923b65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB @@ -0,0 +1 @@ +# CONFIG_MDIO_MVUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER new file mode 100644 index 0000000000000000000000000000000000000000..00eb9a6baf8058a0206728dabdb9a102d155d37e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER @@ -0,0 +1 @@ +CONFIG_MDIO_THUNDER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE new file mode 100644 index 0000000000000000000000000000000000000000..08867772ddd5820284de01f1b54d7294afa3d458 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE @@ -0,0 +1 @@ +CONFIG_MD_BITMAP_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY new file mode 100644 index 0000000000000000000000000000000000000000..a28e8edef0d799d2fad1fa87bfa40f790364ae43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY @@ -0,0 +1 @@ +# CONFIG_MEDIATEK_GE_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM new file mode 100644 index 0000000000000000000000000000000000000000..6304e9d807b9be00774cde3a8b6e3c1d755911db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM @@ -0,0 +1 @@ +CONFIG_MEMCG_KMEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE new file mode 100644 index 0000000000000000000000000000000000000000..31a75bff9601b5ff74e20b11ace8dc5bfeeea2ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE @@ -0,0 +1 @@ +CONFIG_MEMFD_CREATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION new file mode 100644 index 0000000000000000000000000000000000000000..9a288f900bfff739411227cfc57e5e4a9540c047 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION @@ -0,0 +1 @@ +CONFIG_MEMORY_ISOLATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION new file mode 100644 index 0000000000000000000000000000000000000000..741d86477cd261c65c8a74fdc8c719d2e1f14c64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION @@ -0,0 +1 @@ +CONFIG_MEMREGION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK new file mode 100644 index 0000000000000000000000000000000000000000..9ed5ed5f5c6515e193bdcbac8b7eae2f65d2d2ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK @@ -0,0 +1 @@ +CONFIG_MEMSTICK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..31f989fb93395fa3e4e884ca504e1d1dde9f1c96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG @@ -0,0 +1 @@ +# CONFIG_MEMSTICK_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X new file mode 100644 index 0000000000000000000000000000000000000000..15743ca31997a526a7bda198c2415752b201ed50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X @@ -0,0 +1 @@ +CONFIG_MEMSTICK_JMICRON_38X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 new file mode 100644 index 0000000000000000000000000000000000000000..8aec8b70b2d4ab1e0cf8249c39b2e34a94e2b4a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 @@ -0,0 +1 @@ +CONFIG_MEMSTICK_R592=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS new file mode 100644 index 0000000000000000000000000000000000000000..d6757770ac1b5f93fe120411bedf545304354037 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS @@ -0,0 +1 @@ +CONFIG_MEMSTICK_TIFM_MS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME new file mode 100644 index 0000000000000000000000000000000000000000..940b4c5a698a3abea049e42b09a0a8709160c1bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME @@ -0,0 +1 @@ +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST new file mode 100644 index 0000000000000000000000000000000000000000..27d7daee67340dd25cfdd843abd1f744bf276f00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST @@ -0,0 +1 @@ +# CONFIG_MEMTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT new file mode 100644 index 0000000000000000000000000000000000000000..bc360bfab56cc6030801000ba4d0239d79494249 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT @@ -0,0 +1 @@ +# CONFIG_MEN_A21_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 new file mode 100644 index 0000000000000000000000000000000000000000..858dc2db1f28f1515e34aa59e1b22d140f582055 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 @@ -0,0 +1 @@ +# CONFIG_MFD_88PM800 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 new file mode 100644 index 0000000000000000000000000000000000000000..f3cd46c908e72acbb47bbf94ff1ef5d49af3aa33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 @@ -0,0 +1 @@ +# CONFIG_MFD_88PM805 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X new file mode 100644 index 0000000000000000000000000000000000000000..8591283715af4dc6d0ec2fa9658f69c361f5040b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X @@ -0,0 +1 @@ +# CONFIG_MFD_88PM860X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE new file mode 100644 index 0000000000000000000000000000000000000000..9d4cf9056ea288081b261d7011324aed2b4d4cef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_AAT2870_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C new file mode 100644 index 0000000000000000000000000000000000000000..63be91cb39471dde65f61feabbdc891c5e1b9fd4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_ARIZONA_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI new file mode 100644 index 0000000000000000000000000000000000000000..e3e9c675871e1af6c808bf859646e5d1e8047c2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_ARIZONA_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 new file mode 100644 index 0000000000000000000000000000000000000000..53b3b04f1e3013fa8a8138392e60040308d57147 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 @@ -0,0 +1 @@ +# CONFIG_MFD_AS3711 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C new file mode 100644 index 0000000000000000000000000000000000000000..f24eab65148b1d4406fab18c114f958174b39836 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_ATC260X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C new file mode 100644 index 0000000000000000000000000000000000000000..e9bcb19b142b2490a988bc36da3e3379b73ee2a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_AXP20X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX new file mode 100644 index 0000000000000000000000000000000000000000..104736d3861a514871748e77395328c24aa29e60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX @@ -0,0 +1 @@ +# CONFIG_MFD_BCM590XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV new file mode 100644 index 0000000000000000000000000000000000000000..d321ad3c62d6128872ce824a56f65f0b4e418d3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV @@ -0,0 +1 @@ +# CONFIG_MFD_BD9571MWV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C new file mode 100644 index 0000000000000000000000000000000000000000..20a2316bfad7a52b2faa6ba479f609b812529e07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_CS42L43_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C new file mode 100644 index 0000000000000000000000000000000000000000..c294bc24d5cc5d1244ee0d6094aa0787a78628d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_DA9052_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI new file mode 100644 index 0000000000000000000000000000000000000000..095062683cd70fc1f350196180bae85d1becff66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_DA9052_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 new file mode 100644 index 0000000000000000000000000000000000000000..3f7ae6423011ae1090fac6150b325a90b2ced7f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9055 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 new file mode 100644 index 0000000000000000000000000000000000000000..d605877c79afa65b860a08c2fcc7352f759962f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9062 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 new file mode 100644 index 0000000000000000000000000000000000000000..ba3cfa7cbfffdd710fdf5f1f586a23676092ba31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9063 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 new file mode 100644 index 0000000000000000000000000000000000000000..33d4eb529453065f29da2a91f29bc4109c559bb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9150 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 new file mode 100644 index 0000000000000000000000000000000000000000..b0c8ca366c64b86b706f4df79166c36410c9b9ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 @@ -0,0 +1 @@ +# CONFIG_MFD_DLN2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI new file mode 100644 index 0000000000000000000000000000000000000000..fa028d4fc507a8057f18db348c17726269ac1849 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X new file mode 100644 index 0000000000000000000000000000000000000000..c1f3356ac1e943bbb5e9e77432bb6d95f54c0819 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X @@ -0,0 +1 @@ +# CONFIG_MFD_IQS62X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO new file mode 100644 index 0000000000000000000000000000000000000000..c6926b8416da7332e4628bcae9492f32fdfa86f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO @@ -0,0 +1 @@ +# CONFIG_MFD_JANZ_CMODIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD new file mode 100644 index 0000000000000000000000000000000000000000..a94ded357c4eb74cfc021687b5a2d150088361a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD @@ -0,0 +1 @@ +# CONFIG_MFD_KEMPLD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 new file mode 100644 index 0000000000000000000000000000000000000000..0ab890140d9d0580a4460c003543d8c3a4c1876c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 @@ -0,0 +1 @@ +# CONFIG_MFD_LM3533 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 new file mode 100644 index 0000000000000000000000000000000000000000..de711a681b2fbf78239d83c3e1007b715bb94539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 @@ -0,0 +1 @@ +# CONFIG_MFD_LP3943 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 new file mode 100644 index 0000000000000000000000000000000000000000..4600a154f8847fa7b0db8f803af85fd5a21c7931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 @@ -0,0 +1 @@ +# CONFIG_MFD_LP8788 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA new file mode 100644 index 0000000000000000000000000000000000000000..71aa85920f505b8d87856eda87ab97a5b62bea60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA @@ -0,0 +1 @@ +# CONFIG_MFD_MADERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 new file mode 100644 index 0000000000000000000000000000000000000000..ebbae19974622b28a1c7cbeb4d76fd0655c419dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX14577 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 new file mode 100644 index 0000000000000000000000000000000000000000..9d07de075613e40cd01940c532367307ee4d103c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77541 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 new file mode 100644 index 0000000000000000000000000000000000000000..35769bd83e79d9e3821bccb8773f9803e95a7d07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77693 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 new file mode 100644 index 0000000000000000000000000000000000000000..487219b461580d2067261e89c46e07a34698ee44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77843 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 new file mode 100644 index 0000000000000000000000000000000000000000..7921e7b735a3a9db9a7b1a57719a068c4cb14972 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8907 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 new file mode 100644 index 0000000000000000000000000000000000000000..302fe8314b707798736f596b92a8fb819511ff24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8925 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 new file mode 100644 index 0000000000000000000000000000000000000000..bb749db3274f0efc15248e058addff434ffefbed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8997 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 new file mode 100644 index 0000000000000000000000000000000000000000..9478aeba04a111bc143daa0274d974d510b05e89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8998 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C new file mode 100644 index 0000000000000000000000000000000000000000..cf8c1df5cf19e32f7a204c50c7dc5a3d150d8628 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_MC13XXX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI new file mode 100644 index 0000000000000000000000000000000000000000..a368bc8e68a473b8e5f7b88f8f67216704e1a914 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_MC13XXX_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC new file mode 100644 index 0000000000000000000000000000000000000000..378b77170c3fdd0f2c4e958f153cbf9c671387b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC @@ -0,0 +1 @@ +# CONFIG_MFD_MENF21BMC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 new file mode 100644 index 0000000000000000000000000000000000000000..2bbfa6798c28185191b09cbaabefa1118a2ba817 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 @@ -0,0 +1 @@ +# CONFIG_MFD_MP2629 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 new file mode 100644 index 0000000000000000000000000000000000000000..34ddea555002b83abb95fc12c7dbbebd34b1b3d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6360 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 new file mode 100644 index 0000000000000000000000000000000000000000..590822965b683172f25beb2a1d9792350e87e0c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6370 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 new file mode 100644 index 0000000000000000000000000000000000000000..312fa668fe3abb8f8d7cba8ea7f25f0d4308704e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6397 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT new file mode 100644 index 0000000000000000000000000000000000000000..e3179a571497ded11d75cd3d268b9328e69713c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT @@ -0,0 +1 @@ +# CONFIG_MFD_OCELOT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS new file mode 100644 index 0000000000000000000000000000000000000000..e09bcbf18b90df7b58de1728cde4f73b1726dc1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS @@ -0,0 +1 @@ +# CONFIG_MFD_PALMAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 new file mode 100644 index 0000000000000000000000000000000000000000..d79315e31a59cd01cc3ec2e2af1b7031932d5b7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 @@ -0,0 +1 @@ +# CONFIG_MFD_PCF50633 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 new file mode 100644 index 0000000000000000000000000000000000000000..f3976be092cb447f8e8a02d9a9ca1bd6f35bed28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 @@ -0,0 +1 @@ +# CONFIG_MFD_RC5T583 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X new file mode 100644 index 0000000000000000000000000000000000000000..000bbf18d6e13b179a36837a9c35a0b163fdb0ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X @@ -0,0 +1 @@ +# CONFIG_MFD_RDC321X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU new file mode 100644 index 0000000000000000000000000000000000000000..bc6621f3301725bb06fc4b729d05ee733ccf4b6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU @@ -0,0 +1 @@ +# CONFIG_MFD_RETU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 new file mode 100644 index 0000000000000000000000000000000000000000..ac3a6f05e883041e63bedc327f56887b1a1a92c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 @@ -0,0 +1 @@ +# CONFIG_MFD_RT4831 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 new file mode 100644 index 0000000000000000000000000000000000000000..6e156c85b1e0e1247b30afbd8bad802d6554d7c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 @@ -0,0 +1 @@ +# CONFIG_MFD_RT5033 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 new file mode 100644 index 0000000000000000000000000000000000000000..51ad42893eb0c879eb2fb70fb1843caeb5f6a917 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 @@ -0,0 +1 @@ +# CONFIG_MFD_RT5120 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE new file mode 100644 index 0000000000000000000000000000000000000000..677f708982e6e3a32530d4b50de375ef7dd1e11d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_SI476X_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 new file mode 100644 index 0000000000000000000000000000000000000000..37cbb32d4ca4159729c726d767464fd6b83acaf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 @@ -0,0 +1 @@ +# CONFIG_MFD_SKY81452 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO new file mode 100644 index 0000000000000000000000000000000000000000..42478cf33f2b45a76ef7aa30e09172118a244a7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO @@ -0,0 +1 @@ +# CONFIG_MFD_SMPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A new file mode 100644 index 0000000000000000000000000000000000000000..ad3a242f2e9c7c7b12d7182f3c8e3081e8d3c9ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A @@ -0,0 +1 @@ +# CONFIG_MFD_SY7636A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU new file mode 100644 index 0000000000000000000000000000000000000000..4a84e3fd01f9d7c5846d0ab93a487553dd4d18e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X new file mode 100644 index 0000000000000000000000000000000000000000..55cab8a6a0b19459c1ddcce64e6f31fce6437963 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LP873X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 new file mode 100644 index 0000000000000000000000000000000000000000..8257e007649c4f2a717cc133bdfcc63529bdb991 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65086 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 new file mode 100644 index 0000000000000000000000000000000000000000..50e4f86dac2864cd053ff84b09a40a341158ddf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65090 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X new file mode 100644 index 0000000000000000000000000000000000000000..142ccf9dd12c19a9c788042e89cc825eab5313b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6586X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 new file mode 100644 index 0000000000000000000000000000000000000000..d4a3d009110ea5c68c42c207e4e30ad83643ec2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65910 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C new file mode 100644 index 0000000000000000000000000000000000000000..a5cb98430f9cabc5b17f051ded80d588f94fd563 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65912_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI new file mode 100644 index 0000000000000000000000000000000000000000..99856458b5b6ff90468fc57d6a43f53ad59e2e21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65912_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C new file mode 100644 index 0000000000000000000000000000000000000000..e86265f9648e834a1ff673ddd36305d30ebb8b9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6594_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI new file mode 100644 index 0000000000000000000000000000000000000000..0386f325e0a2b5edfc7f4a5f252dfb4bfb0259bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6594_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 new file mode 100644 index 0000000000000000000000000000000000000000..af8cb255f1963b0f9d1586ce1cedcdfb6ccce3f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 @@ -0,0 +1 @@ +# CONFIG_MFD_TQMX86 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE new file mode 100644 index 0000000000000000000000000000000000000000..cfdc6c17b12e86eaa5f9214ad0455966d1ddade6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_WL1273_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C new file mode 100644 index 0000000000000000000000000000000000000000..5134deeb3d161949b2a8c5c109f37dfc28e08044 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_WM831X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI new file mode 100644 index 0000000000000000000000000000000000000000..443939c2d4a3a8cf68c8f922497b38b9becf8d77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_WM831X_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C new file mode 100644 index 0000000000000000000000000000000000000000..9da9a94d8e9f5dfd0e784505905cfd5250406592 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_WM8350_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 new file mode 100644 index 0000000000000000000000000000000000000000..555799f97c84088cb8825d1f9790a41898d3cb26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 @@ -0,0 +1 @@ +# CONFIG_MFD_WM8400 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 new file mode 100644 index 0000000000000000000000000000000000000000..678e15d01d5cd4b6875e7554109bcb13ec0a6ff3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 @@ -0,0 +1 @@ +# CONFIG_MFD_WM8994 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS new file mode 100644 index 0000000000000000000000000000000000000000..bdccdbabcb0626d8b96f2a722739d913ed78cf75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS @@ -0,0 +1 @@ +# CONFIG_MHI_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP new file mode 100644 index 0000000000000000000000000000000000000000..5aba2828c381bb4024719849455f4bbc4355f62d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP @@ -0,0 +1 @@ +# CONFIG_MHI_BUS_EP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY new file mode 100644 index 0000000000000000000000000000000000000000..5674a00848380ca41eb4a3b51c509799e2811851 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY @@ -0,0 +1 @@ +CONFIG_MHP_MEMMAP_ON_MEMORY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY new file mode 100644 index 0000000000000000000000000000000000000000..f82ae918420f45602c66a47c27dae6ffde6560f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY @@ -0,0 +1 @@ +CONFIG_MICREL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY new file mode 100644 index 0000000000000000000000000000000000000000..6b800d4d4b14d0c8329e4f3ba3abc0c8e27205ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY @@ -0,0 +1 @@ +CONFIG_MICROCHIP_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY new file mode 100644 index 0000000000000000000000000000000000000000..601562cd991b1018d9cb82c1f9e13d0f6fc3ecb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY @@ -0,0 +1 @@ +# CONFIG_MICROCHIP_T1S_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY new file mode 100644 index 0000000000000000000000000000000000000000..39cad732d644693f16cc143113e5e212bd4e524c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY @@ -0,0 +1 @@ +CONFIG_MICROCHIP_T1_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY new file mode 100644 index 0000000000000000000000000000000000000000..0e171f614fefae8d880800323ba64f84df2c1262 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY @@ -0,0 +1 @@ +CONFIG_MICROSEMI_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSOFT_MANA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSOFT_MANA new file mode 100644 index 0000000000000000000000000000000000000000..ad0b10509f9250a6f2dc0252bbb8206b637bf8d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSOFT_MANA @@ -0,0 +1 @@ +# CONFIG_MICROSOFT_MANA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MII b/anolis/configs/L2-OPTIONAL/default/CONFIG_MII new file mode 100644 index 0000000000000000000000000000000000000000..b7d4f657c118debe8d41d6946189e6b7c80f2f50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MII @@ -0,0 +1 @@ +CONFIG_MII=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS new file mode 100644 index 0000000000000000000000000000000000000000..d407e6e09318b8e97c6659770aafe834286243e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS @@ -0,0 +1 @@ +# CONFIG_MINIX_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI new file mode 100644 index 0000000000000000000000000000000000000000..6d2cb18a77f977ccdf91c6f3505dbef5d77254ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI @@ -0,0 +1 @@ +# CONFIG_MISC_ALCOR_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND new file mode 100644 index 0000000000000000000000000000000000000000..4f7e86412b01597b97049320830023e4dd0e113a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND @@ -0,0 +1 @@ +CONFIG_MLX4_INFINIBAND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC new file mode 100644 index 0000000000000000000000000000000000000000..ba2658e5ae521cd2deeba199f5519a0ef91368c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC @@ -0,0 +1 @@ +# CONFIG_MLX5_EN_IPSEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS new file mode 100644 index 0000000000000000000000000000000000000000..75b19d32aa4238552bdbe66bf3a231b333c9bf10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS @@ -0,0 +1 @@ +# CONFIG_MLX5_EN_TLS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC new file mode 100644 index 0000000000000000000000000000000000000000..c1e8375c10fb67b2d45c920b0f26acf5a421b2e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC @@ -0,0 +1 @@ +# CONFIG_MLX5_MACSEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI new file mode 100644 index 0000000000000000000000000000000000000000..2ee229cc030b09e059b19743d6a5bfb823ca19aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI @@ -0,0 +1 @@ +# CONFIG_MLX5_VFIO_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC new file mode 100644 index 0000000000000000000000000000000000000000..af952ae6546e560a9c94ce615c07642459a27fb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC @@ -0,0 +1 @@ +CONFIG_MMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..c89e86066e724b190f3fb4ce5d19d1f18b81c554 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK @@ -0,0 +1 @@ +CONFIG_MMC_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS new file mode 100644 index 0000000000000000000000000000000000000000..9aad9741a7018ca5062659a0b1538697f2a36770 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS @@ -0,0 +1 @@ +CONFIG_MMC_BLOCK_MINORS=8 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 new file mode 100644 index 0000000000000000000000000000000000000000..b315628e437ad3383608a918a7c7170300753d8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 @@ -0,0 +1 @@ +CONFIG_MMC_CB710=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI new file mode 100644 index 0000000000000000000000000000000000000000..3ecbe65d00998f537f9c15e1af7b1cc0f5b7d229 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI @@ -0,0 +1 @@ +CONFIG_MMC_CQHCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..678bd75ed43d82d033f6b9546232cb85f8a2f631 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG @@ -0,0 +1 @@ +# CONFIG_MMC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ new file mode 100644 index 0000000000000000000000000000000000000000..d3a1fc7067fa9df6abbd144c047d586d05aff64e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ @@ -0,0 +1 @@ +# CONFIG_MMC_HSQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC new file mode 100644 index 0000000000000000000000000000000000000000..1899ff9a0b78cbd25f862c30b8bd29428c955f8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC @@ -0,0 +1 @@ +CONFIG_MMC_RICOH_MMC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI new file mode 100644 index 0000000000000000000000000000000000000000..dd2edd0c647bd2af2fdff5801f7fe971ab4f3087 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..065143892306188a2de491e2fc5f94b52740efdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 new file mode 100644 index 0000000000000000000000000000000000000000..00c6ab2f58ab058a10f310a27fc41153c81d0b4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_F_SDH30 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS new file mode 100644 index 0000000000000000000000000000000000000000..2395a1e3e41c5424c216ccbbcdc12132b7426319 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_IO_ACCESSORS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI new file mode 100644 index 0000000000000000000000000000000000000000..8800540d35933aa00aeaefdb6bd4139f3ef64c33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM new file mode 100644 index 0000000000000000000000000000000000000000..59c122bf5e5c5c9b9c420d2b0f26359af6e211da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_PLTFM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON new file mode 100644 index 0000000000000000000000000000000000000000..19a13aadeee0af5521bb143c96b69d4b2377dac4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_XENON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI new file mode 100644 index 0000000000000000000000000000000000000000..30837d93e98f7dfe731fa27a8bad9af5e07a4b83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI @@ -0,0 +1 @@ +# CONFIG_MMC_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST new file mode 100644 index 0000000000000000000000000000000000000000..6032d28f91b3e3e7f699248d58a399074cdd7c33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST @@ -0,0 +1 @@ +# CONFIG_MMC_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD new file mode 100644 index 0000000000000000000000000000000000000000..6e0f4ae1eab632bc5ddd6e49c7b8b2bda9a9fcdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD @@ -0,0 +1 @@ +CONFIG_MMC_TIFM_SD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 new file mode 100644 index 0000000000000000000000000000000000000000..94d6a41394f7b1fc0f94dfafba59e2691e55c05a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 @@ -0,0 +1 @@ +# CONFIG_MMC_USDHI6ROL0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC new file mode 100644 index 0000000000000000000000000000000000000000..1baab6557946e86898ea99e1eade8bfa1688cc70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC @@ -0,0 +1 @@ +CONFIG_MMC_USHC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC new file mode 100644 index 0000000000000000000000000000000000000000..88b7c20e7deab8b4ffea2e3eae1f72f82b3b86cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC @@ -0,0 +1 @@ +CONFIG_MMC_VIA_SDMMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 new file mode 100644 index 0000000000000000000000000000000000000000..72839ca5f13ee9dd7db5f8ab908d6fc7249ef348 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 @@ -0,0 +1 @@ +CONFIG_MMC_VUB300=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE new file mode 100644 index 0000000000000000000000000000000000000000..c8fc27c15961bac14af2d6db06c8f8a7b608322d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE new file mode 100644 index 0000000000000000000000000000000000000000..eacf2dc4696f8d4f2719050c4c2d2afbf56d5b64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_TABLE_FREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT new file mode 100644 index 0000000000000000000000000000000000000000..c8fbb01fffb4f40bb0cb880a6df25a90b578d4e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT @@ -0,0 +1 @@ +CONFIG_MMU_LAZY_TLB_REFCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER new file mode 100644 index 0000000000000000000000000000000000000000..2c93a2aaa1113ed97fcb3739a056dec31b123def --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER @@ -0,0 +1 @@ +CONFIG_MMU_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP new file mode 100644 index 0000000000000000000000000000000000000000..d24f8dbaf270c922b6ad9f8838e941936fc8da6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP @@ -0,0 +1 @@ +CONFIG_MODULES_TREE_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA new file mode 100644 index 0000000000000000000000000000000000000000..8f1882ca43e8b607264adb7c407a20990b8ec315 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA @@ -0,0 +1 @@ +CONFIG_MODULES_USE_ELF_RELA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH new file mode 100644 index 0000000000000000000000000000000000000000..18722eb4707e4f0f81a5a5305577f385fdc89ac2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH @@ -0,0 +1 @@ +# CONFIG_MODULE_ALLOW_BTF_MISMATCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP new file mode 100644 index 0000000000000000000000000000000000000000..01c9c5435ecb0f5d2670ee3adebab6639dbd816b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_GZIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE new file mode 100644 index 0000000000000000000000000000000000000000..2168cd5666da7a32d1c24931ecc545f5f40c3a4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE @@ -0,0 +1 @@ +CONFIG_MODULE_COMPRESS_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ new file mode 100644 index 0000000000000000000000000000000000000000..d554be9bc551f723be4922a0f96a537ed3513c34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_XZ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..4f5f059b8d8d0ec5d94671d5e5ca615327c641d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..2cdde2aa1df39d3780f42056c89b4c147db5e912 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG @@ -0,0 +1 @@ +# CONFIG_MODULE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT new file mode 100644 index 0000000000000000000000000000000000000000..96c6d879efa046906737c8916df19a817c4af0d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_FORMAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA new file mode 100644 index 0000000000000000000000000000000000000000..a436b46985a330bca559a1bd52d4d430bca417c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA new file mode 100644 index 0000000000000000000000000000000000000000..ba723e51c9d83f4ffdb5d3853e68036e7316e585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..63575ab294f03ecc043eb438c6f93cb4d3767ede --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING @@ -0,0 +1 @@ +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST new file mode 100644 index 0000000000000000000000000000000000000000..db207dfc5d9f4c5304d5889297cca9d13c1f045c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST @@ -0,0 +1 @@ +# CONFIG_MOST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY new file mode 100644 index 0000000000000000000000000000000000000000..6bbd457ada248d8a9cb8dde5c71d8a81ef7eab6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY @@ -0,0 +1 @@ +# CONFIG_MOTORCOMM_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C new file mode 100644 index 0000000000000000000000000000000000000000..cab50daec803e238a6e8b36b7404fadd198be269 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C new file mode 100644 index 0000000000000000000000000000000000000000..3bfdd5743c644595d31b98a2ebdc6f3a06b7b138 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C_I2C=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..9aefee3c8caf107c5237d61795b3ae890a6f9304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO @@ -0,0 +1 @@ +# CONFIG_MOUSE_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_ALPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_ALPS new file mode 100644 index 0000000000000000000000000000000000000000..a05fe8474326e91a42b2d026696023c5b925203e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_ALPS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ALPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_BYD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_BYD new file mode 100644 index 0000000000000000000000000000000000000000..11d592e3cde6119c439e138004601243185667cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_BYD @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_BYD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_CYPRESS new file mode 100644 index 0000000000000000000000000000000000000000..794b2cd427b515b98bd5d82433a5a0c031c8c01e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_CYPRESS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_CYPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_FOCALTECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_FOCALTECH new file mode 100644 index 0000000000000000000000000000000000000000..69ec669317211d872ecfc47bc5e74d4be1414b5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_FOCALTECH @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_FOCALTECH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_LOGIPS2PP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_LOGIPS2PP new file mode 100644 index 0000000000000000000000000000000000000000..9edffa280c718211535d53f4f600f7ae485b7964 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_LOGIPS2PP @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_LOGIPS2PP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..5aa556188ac7e910f1509183008cd4a9b731e83b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS new file mode 100644 index 0000000000000000000000000000000000000000..2ca9a3519561de1b711710b503d5c513d779ef5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SYNAPTICS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..cf7fad2095975fa655887a72d1c5be375db0e33b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TOUCHKIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TOUCHKIT new file mode 100644 index 0000000000000000000000000000000000000000..10b1f05ac2907ab64014ce8e927f99c418aff34f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TOUCHKIT @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_TOUCHKIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TRACKPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TRACKPOINT new file mode 100644 index 0000000000000000000000000000000000000000..9316b0c6a3d3b19f774e1afd1266827950bbc942 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TRACKPOINT @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_TRACKPOINT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C new file mode 100644 index 0000000000000000000000000000000000000000..7368088e6b3df259ffdb2034b85fe28ef0bfba36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_SYNAPTICS_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB new file mode 100644 index 0000000000000000000000000000000000000000..175daf73793e2fbf87e35a038f200fc22f1f838e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB @@ -0,0 +1 @@ +CONFIG_MOUSE_SYNAPTICS_USB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO new file mode 100644 index 0000000000000000000000000000000000000000..cad28e8d19a45af757636933b63218430aa201c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO @@ -0,0 +1 @@ +# CONFIG_MOXA_INTELLIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO new file mode 100644 index 0000000000000000000000000000000000000000..cd82be48cbb1f851b4a6ee7ebeaf3293effb1e2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO @@ -0,0 +1 @@ +# CONFIG_MOXA_SMARTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB new file mode 100644 index 0000000000000000000000000000000000000000..a41ce5d0eaf7e85158ee0f0af57c29ce421dee3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB @@ -0,0 +1 @@ +CONFIG_MPILIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP new file mode 100644 index 0000000000000000000000000000000000000000..515a828b329d140d8de222ad81ebf8b0c1994c3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP @@ -0,0 +1 @@ +CONFIG_MRP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..dda33929172c81cd8b5ca7805771afbedc98f46a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION @@ -0,0 +1 @@ +CONFIG_MSDOS_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X new file mode 100644 index 0000000000000000000000000000000000000000..198777735ec2d5caa5b26e61050f3499275bd550 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X @@ -0,0 +1 @@ +# CONFIG_MSE102X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..94c242c790c00db40b22108b393d0e5133eb4944 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK @@ -0,0 +1 @@ +CONFIG_MSPRO_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..5628a82fc6f4487e69203d25cad1ab6721627e72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK @@ -0,0 +1 @@ +# CONFIG_MS_BLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT new file mode 100644 index 0000000000000000000000000000000000000000..243168a3440fd204ca920cf0f0589be3185f88e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT @@ -0,0 +1 @@ +# CONFIG_MTD_ABSENT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS new file mode 100644 index 0000000000000000000000000000000000000000..a9369c2a1abc0567ead8a707c66bcc23907014cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_AR7_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS new file mode 100644 index 0000000000000000000000000000000000000000..b59074744f6e3d31e27c807ead8661bd2e203cd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS @@ -0,0 +1 @@ +CONFIG_MTD_BLKDEVS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..74f992fdaf8d13c97962193bc232b6dc953162ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK @@ -0,0 +1 @@ +CONFIG_MTD_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD new file mode 100644 index 0000000000000000000000000000000000000000..4a942ccb5fb2acdf94dc8831eeb5f131043a02da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD @@ -0,0 +1 @@ +# CONFIG_MTD_BLOCK2MTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO new file mode 100644 index 0000000000000000000000000000000000000000..d49910a2738fe9762d122e67a6035f73ebcee994 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO @@ -0,0 +1 @@ +# CONFIG_MTD_BLOCK_RO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 new file mode 100644 index 0000000000000000000000000000000000000000..8976f930f33f1e50b0f7acf043931534717bdbf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 @@ -0,0 +1 @@ +CONFIG_MTD_CFI_I1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 new file mode 100644 index 0000000000000000000000000000000000000000..24fdbdfe22fc9e04d6aa86364fb5f5c4b8694a91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 @@ -0,0 +1 @@ +CONFIG_MTD_CFI_I2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS new file mode 100644 index 0000000000000000000000000000000000000000..63ac9245bb534cf78d281a9020e58095461ca563 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_CMDLINE_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS new file mode 100644 index 0000000000000000000000000000000000000000..650cf2c1e99a93fdbf2c9354a3952430f54cdd6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS @@ -0,0 +1 @@ +# CONFIG_MTD_COMPLEX_MAPPINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH new file mode 100644 index 0000000000000000000000000000000000000000..c31b6910b45996c3586d32cf338a96bf78085c76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH @@ -0,0 +1 @@ +# CONFIG_MTD_DATAFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 new file mode 100644 index 0000000000000000000000000000000000000000..4e4b41f4ab46240dc655ed0c14535187db0f91a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 @@ -0,0 +1 @@ +# CONFIG_MTD_DOCG3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS new file mode 100644 index 0000000000000000000000000000000000000000..e9e0a97331d98091fcbce93caee918ff0747afc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS @@ -0,0 +1 @@ +# CONFIG_MTD_HYPERBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR new file mode 100644 index 0000000000000000000000000000000000000000..7099c7895f8f20f8e34880695344184e7fbf74ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR @@ -0,0 +1 @@ +# CONFIG_MTD_INTEL_VR_NOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE new file mode 100644 index 0000000000000000000000000000000000000000..1af3e12625799b654b8e9edb9fe5eb53a6722479 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE @@ -0,0 +1 @@ +# CONFIG_MTD_JEDECPROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR new file mode 100644 index 0000000000000000000000000000000000000000..eebb90ae3858ed501cf38d1d6a8b27b40d1f1dc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR @@ -0,0 +1 @@ +# CONFIG_MTD_LPDDR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 new file mode 100644 index 0000000000000000000000000000000000000000..bcfdd7b7a451c45e5c9ffe302fba8081a6028226 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 new file mode 100644 index 0000000000000000000000000000000000000000..21d0f95b273c51c4a307b5fd2c5ba1cb36b09788 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 new file mode 100644 index 0000000000000000000000000000000000000000..e98e1687e1fc80346390033cf2e44139450ee67e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 new file mode 100644 index 0000000000000000000000000000000000000000..ed6627e35c871db7e514c1ffd75cbefbe7200d05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 @@ -0,0 +1 @@ +# CONFIG_MTD_MCHP23K256 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 new file mode 100644 index 0000000000000000000000000000000000000000..e496f9040955318439044b09c3d387be64149107 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 @@ -0,0 +1 @@ +# CONFIG_MTD_MCHP48L640 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM new file mode 100644 index 0000000000000000000000000000000000000000..70da5d08cd4559630a0168062433171922fa0ce7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM @@ -0,0 +1 @@ +# CONFIG_MTD_MTDRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC new file mode 100644 index 0000000000000000000000000000000000000000..4e6b42035febe1e20f93eb578bed486fbcda4c4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_MXIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH new file mode 100644 index 0000000000000000000000000000000000000000..68691d93530ac7d1464bb5e88e1ea0385fe7597a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_SW_BCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING new file mode 100644 index 0000000000000000000000000000000000000000..81d0f3e93ba85c50d20f7a972d6ad6ffeb1ddc47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND new file mode 100644 index 0000000000000000000000000000000000000000..7f0bdb85a5f10a6e0243b5a286779af3c77214f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND @@ -0,0 +1 @@ +# CONFIG_MTD_ONENAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS new file mode 100644 index 0000000000000000000000000000000000000000..b6c42a73aeae55a0b961d0b71c115a19a53f30d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS @@ -0,0 +1 @@ +# CONFIG_MTD_OOPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER new file mode 100644 index 0000000000000000000000000000000000000000..118d5ecd72b6f9298fef164f8bb594b4c8fabea6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER @@ -0,0 +1 @@ +# CONFIG_MTD_PARTITIONED_MASTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM new file mode 100644 index 0000000000000000000000000000000000000000..5ce19c74da920afe4c17dd0920b5e7f30d4abdd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM @@ -0,0 +1 @@ +# CONFIG_MTD_PHRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM new file mode 100644 index 0000000000000000000000000000000000000000..dd004c169a494d0b002d1c9820a6790a7339c305 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM @@ -0,0 +1 @@ +# CONFIG_MTD_PLATRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 new file mode 100644 index 0000000000000000000000000000000000000000..6bf4ad0b570dc0ffddb3e2ba22ebe075a7099fe7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 @@ -0,0 +1 @@ +# CONFIG_MTD_PMC551 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM new file mode 100644 index 0000000000000000000000000000000000000000..151de3c5249c2999b86dd8b5f7a86ea3a55a7d4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM @@ -0,0 +1 @@ +# CONFIG_MTD_RAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND new file mode 100644 index 0000000000000000000000000000000000000000..cbd84d732dece074fbd1f347da03d7598e4a4198 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND @@ -0,0 +1 @@ +# CONFIG_MTD_RAW_NAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS new file mode 100644 index 0000000000000000000000000000000000000000..8e87e0d6fd9d9dbb339088f2d226f35567bddc6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_REDBOOT_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM new file mode 100644 index 0000000000000000000000000000000000000000..1efb84d75cbf8168750f12153e0b4b35f367c6b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM @@ -0,0 +1 @@ +# CONFIG_MTD_ROM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM new file mode 100644 index 0000000000000000000000000000000000000000..09db4e8e749c76fc2950c8ceb8c981c1d738bd76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM @@ -0,0 +1 @@ +# CONFIG_MTD_SLRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND new file mode 100644 index 0000000000000000000000000000000000000000..6df5264428b7dbc06122e79508817b4b61e3da5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L new file mode 100644 index 0000000000000000000000000000000000000000..0a42bba26ffc85a70c3a7a590647ba13d86a1bb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L @@ -0,0 +1 @@ +# CONFIG_MTD_SST25L is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..d5892dbb7fb989668895a9723481f68e8f730be0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP @@ -0,0 +1 @@ +# CONFIG_MTD_SWAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS new file mode 100644 index 0000000000000000000000000000000000000000..bca68acd69fae78c5758bcebb61ea9117bc05801 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS @@ -0,0 +1 @@ +# CONFIG_MTD_TESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI new file mode 100644 index 0000000000000000000000000000000000000000..d610c466a3fd92827fb58762e740b03ed02707a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI @@ -0,0 +1 @@ +CONFIG_MTD_UBI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..69372005c4dad10e8f4e38d50ee590276416dc36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT @@ -0,0 +1 @@ +CONFIG_MTD_UBI_BEB_LIMIT=20 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..3b7526c293f9fbbed8bcc15dc9c1aeb901747851 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_BLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP new file mode 100644 index 0000000000000000000000000000000000000000..f7a8769e73f2c04bd3bf2bb3e4006b7c4260bf2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_FASTMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI new file mode 100644 index 0000000000000000000000000000000000000000..8b3dd1030f6dc6975bae6e10f99c0d6bc7786ce9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_GLUEBI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD new file mode 100644 index 0000000000000000000000000000000000000000..184d7026df8505b3474a4bda617b9a72780f2312 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD @@ -0,0 +1 @@ +CONFIG_MTD_UBI_WL_THRESHOLD=4096 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER new file mode 100644 index 0000000000000000000000000000000000000000..46806d5badde92e1978d7d99c57d3a40867216da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_MUTEX_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY new file mode 100644 index 0000000000000000000000000000000000000000..7bd6621d80aa706bf6001c95226b4cb7ab5a53ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY @@ -0,0 +1 @@ +CONFIG_NATIONAL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE new file mode 100644 index 0000000000000000000000000000000000000000..2ea655b13d63276b2c3596c4943b01655d49fbff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE @@ -0,0 +1 @@ +CONFIG_NCE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY new file mode 100644 index 0000000000000000000000000000000000000000..d16d92953dc0ea44f84959b7c11cfe21e297b510 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY @@ -0,0 +1 @@ +# CONFIG_NCN26000_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM new file mode 100644 index 0000000000000000000000000000000000000000..79464b155706776edd2b2f21486916c0827463d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM @@ -0,0 +1 @@ +CONFIG_ND_CLAIM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X new file mode 100644 index 0000000000000000000000000000000000000000..46fca559be2ffcb7d69d0a2db12006bf2dd7639c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X @@ -0,0 +1 @@ +CONFIG_NE6X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF new file mode 100644 index 0000000000000000000000000000000000000000..e2f1ffded64622b918a43709fa8037fa53c76b65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF @@ -0,0 +1 @@ +CONFIG_NE6XVF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE new file mode 100644 index 0000000000000000000000000000000000000000..086b740b0c0e1a77818a351c76317c5245a92253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE @@ -0,0 +1 @@ +CONFIG_NEED_DMA_MAP_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK new file mode 100644 index 0000000000000000000000000000000000000000..b795ff30a196037e93c26ec66a9fe8b7473a5737 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK @@ -0,0 +1 @@ +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK new file mode 100644 index 0000000000000000000000000000000000000000..aff172e8e109a012adf10f23c1272744e508c26e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK @@ -0,0 +1 @@ +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS new file mode 100644 index 0000000000000000000000000000000000000000..46c8245b45bae1cdf9324bf0de20960a51e86f20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS @@ -0,0 +1 @@ +CONFIG_NEED_SG_DMA_FLAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH new file mode 100644 index 0000000000000000000000000000000000000000..63f7327a1fbc590270007afdb91a9f9962aa3014 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH @@ -0,0 +1 @@ +CONFIG_NEED_SG_DMA_LENGTH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG new file mode 100644 index 0000000000000000000000000000000000000000..0791a32f491c901ba67a4e7cfd47ca37c6963d87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG @@ -0,0 +1 @@ +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK new file mode 100644 index 0000000000000000000000000000000000000000..551bef0dacc47cb2e088b53e8ef6ee67f14a01dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK @@ -0,0 +1 @@ +CONFIG_NETFILTER_BPF_LINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK new file mode 100644 index 0000000000000000000000000000000000000000..9f6748bb605d63cd05648cf5b09159500d08a794 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK @@ -0,0 +1 @@ +# CONFIG_NETFILTER_NETLINK_HOOK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS new file mode 100644 index 0000000000000000000000000000000000000000..e1be1fcebc63194d4839b0a7032447f1eba4b7fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_SKIP_EGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..95501c0673138f83bf95db5c1ae3d43a09975e99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT @@ -0,0 +1 @@ +# CONFIG_NETFILTER_XTABLES_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL new file mode 100644 index 0000000000000000000000000000000000000000..1740767a7b96a39e5de67b7f926bc352e505e9ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL @@ -0,0 +1 @@ +CONFIG_NETPOLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC new file mode 100644 index 0000000000000000000000000000000000000000..0ecefb6637271af5f63736df1cd617bc171b955d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC @@ -0,0 +1 @@ +CONFIG_NETXEN_NIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P new file mode 100644 index 0000000000000000000000000000000000000000..2fe70dcce82d2b74a44cd0b1163acd97f377254e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P @@ -0,0 +1 @@ +# CONFIG_NET_9P is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK new file mode 100644 index 0000000000000000000000000000000000000000..a27d13004ba0bf172d08ac3ca3027e6b33ece140 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK @@ -0,0 +1 @@ +CONFIG_NET_DEVLINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER new file mode 100644 index 0000000000000000000000000000000000000000..e13c9133b3d9efcd138a53a0aa54397b925c1062 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER @@ -0,0 +1 @@ +# CONFIG_NET_DEV_REFCNT_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA new file mode 100644 index 0000000000000000000000000000000000000000..afa1d60e979d2c28d0a6cc3e2fcf954d929de41b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA @@ -0,0 +1 @@ +# CONFIG_NET_DSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS new file mode 100644 index 0000000000000000000000000000000000000000..67aba9ef8f9e58a861dc9390b9290a6ee8056cec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS @@ -0,0 +1 @@ +CONFIG_NET_EGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT new file mode 100644 index 0000000000000000000000000000000000000000..466100548f7dcd2dd8b7324fa84fdae7512b551f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT @@ -0,0 +1 @@ +CONFIG_NET_FLOW_LIMIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU new file mode 100644 index 0000000000000000000000000000000000000000..12f5c083633ab9e8bf18025857557f4ad7360886 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU @@ -0,0 +1 @@ +# CONFIG_NET_FOU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS new file mode 100644 index 0000000000000000000000000000000000000000..b2cb0a55c76377876165b46b0d5dd6fd278eace5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS @@ -0,0 +1 @@ +# CONFIG_NET_FOU_IP_TUNNELS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE new file mode 100644 index 0000000000000000000000000000000000000000..1b0aced97004c422c0674c0d995c7641cb901604 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE @@ -0,0 +1 @@ +CONFIG_NET_HANDSHAKE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE new file mode 100644 index 0000000000000000000000000000000000000000..3925520244b26e72e926f7750eaed51a37e8e264 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE @@ -0,0 +1 @@ +# CONFIG_NET_IFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS new file mode 100644 index 0000000000000000000000000000000000000000..541df22a00797bf240c928973e898e488101fd5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS @@ -0,0 +1 @@ +CONFIG_NET_INGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI new file mode 100644 index 0000000000000000000000000000000000000000..e6268e22d85e46a8084f9186eb7310772d45f107 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI @@ -0,0 +1 @@ +CONFIG_NET_IPVTI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..e8d57916ba1ea875967f192a9af38f899368ed79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL @@ -0,0 +1 @@ +CONFIG_NET_IP_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI new file mode 100644 index 0000000000000000000000000000000000000000..53eaf483628ab12b81a97954efb0d1f995d2b762 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI @@ -0,0 +1 @@ +# CONFIG_NET_NCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER new file mode 100644 index 0000000000000000000000000000000000000000..152a143df5298616a64b3795d04065393ae6fd8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER @@ -0,0 +1 @@ +# CONFIG_NET_NS_REFCNT_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER new file mode 100644 index 0000000000000000000000000000000000000000..681844676a79937720ce8b59873d8f6656b72610 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER @@ -0,0 +1 @@ +CONFIG_NET_POLL_CONTROLLER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT new file mode 100644 index 0000000000000000000000000000000000000000..089417665917f2b2bd026b04c7de826ce7bd6336 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT @@ -0,0 +1 @@ +CONFIG_NET_REDIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL new file mode 100644 index 0000000000000000000000000000000000000000..7c8ec0f83b8c4876ebc292b0f876ab25ebc1f70f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL @@ -0,0 +1 @@ +CONFIG_NET_RX_BUSY_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 new file mode 100644 index 0000000000000000000000000000000000000000..c776a1581c0498bfdcbc58697b2479b4a85b05c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 @@ -0,0 +1 @@ +# CONFIG_NET_SB1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO new file mode 100644 index 0000000000000000000000000000000000000000..315c5d560e6391754f7c48b976e8cee969b5013a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO @@ -0,0 +1 @@ +CONFIG_NET_SCH_FIFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB new file mode 100644 index 0000000000000000000000000000000000000000..78e5df30d76b0c2e0b880d2a0ae0bbf61c55ab1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB @@ -0,0 +1 @@ +CONFIG_NET_SCH_MQPRIO_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS new file mode 100644 index 0000000000000000000000000000000000000000..0018559cc43026db5430cb09353bf226ae492bc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS @@ -0,0 +1 @@ +CONFIG_NET_SELFTESTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG new file mode 100644 index 0000000000000000000000000000000000000000..188ef3975f3fa9028b37b07977827b1171c46300 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG @@ -0,0 +1 @@ +CONFIG_NET_SOCK_MSG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV new file mode 100644 index 0000000000000000000000000000000000000000..f4f578e755fce9fc1ca2adc009ae52faaa0858eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV @@ -0,0 +1 @@ +CONFIG_NET_SWITCHDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL new file mode 100644 index 0000000000000000000000000000000000000000..1a84ce40f09d237b7ae21961d70bf7bce231ca68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL @@ -0,0 +1 @@ +CONFIG_NET_UDP_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM new file mode 100644 index 0000000000000000000000000000000000000000..ed6108c188cb2a08eede1ee8de00a6c9de0b2d5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_3COM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC new file mode 100644 index 0000000000000000000000000000000000000000..c96958732776ad4f8dad914cc8d92ef0ae3179e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ADAPTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI new file mode 100644 index 0000000000000000000000000000000000000000..1316ed732394021ee96c8e8a43a10d9a2785b5b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ADI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE new file mode 100644 index 0000000000000000000000000000000000000000..3647271ce91156e7fecf1529b3aa349feace64f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_AGERE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH new file mode 100644 index 0000000000000000000000000000000000000000..f1a98f8bb81615e3ced8a77e1d9dad3aaa1c94e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ALACRITECH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON new file mode 100644 index 0000000000000000000000000000000000000000..88beca48ee98b33e33b19566b23fc4ce705d0cb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ALTEON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON new file mode 100644 index 0000000000000000000000000000000000000000..71e03d0794acf32edf37091e2914b0afa3f6a09c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AMAZON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA new file mode 100644 index 0000000000000000000000000000000000000000..f8ae0ca05dd846496e5e8774a167204d5ef4a2a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AQUANTIA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC new file mode 100644 index 0000000000000000000000000000000000000000..98de351367a91640021131e5b32a1b4426df9811 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX new file mode 100644 index 0000000000000000000000000000000000000000..ce6db20d292132231f0bcbc788a5f9833c1ade17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ASIX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS new file mode 100644 index 0000000000000000000000000000000000000000..96c98bc6a5fbd111ec673cf750dc20faf501511d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ATHEROS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX new file mode 100644 index 0000000000000000000000000000000000000000..cec1d47ff35d25b194b76eb953f21bcdb2f7bb64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BZWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE new file mode 100644 index 0000000000000000000000000000000000000000..e5f7e015d474f8c895c543942501f7bd82bc581d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM new file mode 100644 index 0000000000000000000000000000000000000000..5ae580523a423270474fb2505b5064bd909de205 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CAVIUM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO new file mode 100644 index 0000000000000000000000000000000000000000..b039e40c2ece4d1c147a07594dedc549703ec989 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CHELSIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA new file mode 100644 index 0000000000000000000000000000000000000000..3e188cd812b65bfd4b162fa2b8ed19c8b8d5b15b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CORTINA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM new file mode 100644 index 0000000000000000000000000000000000000000..acb536eaa595fa0b5c29972fa4d9363aada84340 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_DAVICOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK new file mode 100644 index 0000000000000000000000000000000000000000..181e6a008c54d2ce918a41bd626c1c14c8d2908b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_DLINK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER new file mode 100644 index 0000000000000000000000000000000000000000..e25daf9651f90078a36c17a186b2ac52eded0b22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ENGLEDER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP new file mode 100644 index 0000000000000000000000000000000000000000..6b7cb989253dc31d14f47a148f03f4c38a5b39ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_EZCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE new file mode 100644 index 0000000000000000000000000000000000000000..42e0b6a45b82ffab3e3cb457581ace41bf1eeb46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_FUNGIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE new file mode 100644 index 0000000000000000000000000000000000000000..e9bdb7cf1188ea54e02248ff1809a53f01e3a956 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_GOOGLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX new file mode 100644 index 0000000000000000000000000000000000000000..0647bca02a1aa1742bcdd60045649d7cd52c2a13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_I825XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX new file mode 100644 index 0000000000000000000000000000000000000000..b1a296f33ea0ee97fffc62a522bbc75484c42fe7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_LITEX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL new file mode 100644 index 0000000000000000000000000000000000000000..ebe703be225323e6034f5f6dd9ecda3c0f674ebd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MARVELL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL new file mode 100644 index 0000000000000000000000000000000000000000..d359479e318fa9c32ffc0b8b00a34e2de4c42c8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICREL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP new file mode 100644 index 0000000000000000000000000000000000000000..8a7b69b77c49b68869914a0a5e158cef47465498 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICROCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI new file mode 100644 index 0000000000000000000000000000000000000000..e39610d5165e3b209bbf647e714b7d864130a944 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICROSEMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT new file mode 100644 index 0000000000000000000000000000000000000000..f873fb19897fa89a94b00dcf9b75cfc7dae3107c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MICROSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM new file mode 100644 index 0000000000000000000000000000000000000000..3256085a639415ce3876748ff1b13b28e7d62dac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MOTORCOMM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI new file mode 100644 index 0000000000000000000000000000000000000000..9a7d24c542ce3510ccd917380ffece1904bfa8a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MYRI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI new file mode 100644 index 0000000000000000000000000000000000000000..f2bf134ab6c45859b9f6445ca2e18dde6757b8d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NATSEMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION new file mode 100644 index 0000000000000000000000000000000000000000..12910e8303305a61c9d537d5cfaf73a7bbd90709 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NETERION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME new file mode 100644 index 0000000000000000000000000000000000000000..fc775de95e41e4363acf76971d398902071223f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_NETRONOME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI new file mode 100644 index 0000000000000000000000000000000000000000..8504bfca5d824b7843105f70ab9cd1af14829c52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA new file mode 100644 index 0000000000000000000000000000000000000000..27ee4d58cc11998f7fefb6b9a142e27e076fb2f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NVIDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI new file mode 100644 index 0000000000000000000000000000000000000000..e5b745dc609a44d99f7044ca257c36bac090208d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_OKI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES new file mode 100644 index 0000000000000000000000000000000000000000..7a9eec2cfd29ec1fd8e6a66f662ad6709312fcb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO new file mode 100644 index 0000000000000000000000000000000000000000..aba7e5839300cb5ad309c2401aba001c4e394c7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_PENSANDO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC new file mode 100644 index 0000000000000000000000000000000000000000..868b452abfbc2b08d91552dad4ee69ecbc2bc5e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_QLOGIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC new file mode 100644 index 0000000000000000000000000000000000000000..4d84f77dc971572a2ae00a1e9ebefcf0d741d36d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_RDC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK new file mode 100644 index 0000000000000000000000000000000000000000..1e65bbda6bfb80980c5163340cc23e4ad7ef49a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_REALTEK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS new file mode 100644 index 0000000000000000000000000000000000000000..c190891099ebe88bc210244220a2f74096512df8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER new file mode 100644 index 0000000000000000000000000000000000000000..c946a85756382c96e5133c12bf9109c2a3ece2d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ROCKER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG new file mode 100644 index 0000000000000000000000000000000000000000..2542b3e8828052c89b95342c73368f432b4ae3d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SAMSUNG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ new file mode 100644 index 0000000000000000000000000000000000000000..6fe8245eb13081f87ac8fbdc36a1569859cbeb47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SEEQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN new file mode 100644 index 0000000000000000000000000000000000000000..07a129493b779c1ac12ac757ac5a961ac24e5d75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SILAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS new file mode 100644 index 0000000000000000000000000000000000000000..5ff2e309b3f1aa24a25241f04602008ff31d85b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC new file mode 100644 index 0000000000000000000000000000000000000000..4e3e7aec851b30c0cf4a74afeb653943d90783e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SMSC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT new file mode 100644 index 0000000000000000000000000000000000000000..7e4d43ba55e04611d0278cb955bba5661b853b98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SOCIONEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO new file mode 100644 index 0000000000000000000000000000000000000000..040f835c6095fe683ce2caf98b3baa1fb3760628 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_STMICRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN new file mode 100644 index 0000000000000000000000000000000000000000..a4ce7787579a1a53f500e90ab79449cbbd43ed79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SUN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS new file mode 100644 index 0000000000000000000000000000000000000000..979e832d6943a3d01beb4e4f0c689af0fa7c647c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SYNOPSYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI new file mode 100644 index 0000000000000000000000000000000000000000..7c0c6ee39d1a809effc5e86d609c3a4115926a7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_TEHUTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI new file mode 100644 index 0000000000000000000000000000000000000000..a9395e8cca7d15d9778c906fa282f03bae9a1bf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM new file mode 100644 index 0000000000000000000000000000000000000000..1005473aa017e5dc47b739e4134dcda51abb1f7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_VERTEXCOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA new file mode 100644 index 0000000000000000000000000000000000000000..ddca33bb41ea468e8f5f674671c888a4139704e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET new file mode 100644 index 0000000000000000000000000000000000000000..08efd75ae50ae3304a6683d10a432ec111a2a967 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_WIZNET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..46ccaba73f7081a10d8ed7bf69f3ca7fe6cfeae5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_XILINX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..a5eb3c0a2d62e581039be2f4f66f4fa4357247d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS @@ -0,0 +1 @@ +CONFIG_NEW_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC new file mode 100644 index 0000000000000000000000000000000000000000..73cd9395ef11d2dec42d4f3b124176c944ce4c0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC @@ -0,0 +1 @@ +# CONFIG_NFC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP new file mode 100644 index 0000000000000000000000000000000000000000..14f22b12c9498b160a2c10e82f44bf571afa9661 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP @@ -0,0 +1 @@ +CONFIG_NFP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC new file mode 100644 index 0000000000000000000000000000000000000000..cbb99c4e940f5659aeb0612ee82812533d76c9f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC @@ -0,0 +1 @@ +CONFIG_NFP_APP_ABM_NIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER new file mode 100644 index 0000000000000000000000000000000000000000..d9ff8a1781416e41f3540051e1c7b6a4f10732b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER @@ -0,0 +1 @@ +CONFIG_NFP_APP_FLOWER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..b7f75b3f3e46bde2b9364aa56934cd59ad4d92ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG @@ -0,0 +1 @@ +# CONFIG_NFP_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC new file mode 100644 index 0000000000000000000000000000000000000000..680092ad5e628ee43c8b0e2ed3bbbcdcc1a6b95e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC @@ -0,0 +1 @@ +CONFIG_NFP_NET_IPSEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 new file mode 100644 index 0000000000000000000000000000000000000000..22cf77e2874b4eb67f8aec612ef192d9d14e696f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 @@ -0,0 +1 @@ +CONFIG_NFSD_V2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..fd9df13faca8102e8ff5da3c543fcaa472bdc361 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG @@ -0,0 +1 @@ +CONFIG_NFS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS new file mode 100644 index 0000000000000000000000000000000000000000..159758ff0791409a88a7ec50140d3d212079abf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS @@ -0,0 +1 @@ +CONFIG_NFS_USE_KERNEL_DNS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN new file mode 100644 index 0000000000000000000000000000000000000000..1a02c09043b4bfeb36e7ec37145a00e5abfd3a68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN @@ -0,0 +1 @@ +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER new file mode 100644 index 0000000000000000000000000000000000000000..1e4a2c7471a5d4036c50877d5498a284bbef1d39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER @@ -0,0 +1 @@ +CONFIG_NFS_V4_2_SSC_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..aa664ac8be56d8327c8fa7a1790648385c46aee9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL @@ -0,0 +1 @@ +CONFIG_NFS_V4_SECURITY_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL new file mode 100644 index 0000000000000000000000000000000000000000..c834b8376648c3d9ea9c1a8baaf25d9c73733341 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL @@ -0,0 +1 @@ +# CONFIG_NFTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..dd70ddc432d296198ca8033ac0fdf7cf6a978313 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV new file mode 100644 index 0000000000000000000000000000000000000000..2b94f900d859b01881b8ba7691770bca4ee38a93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV @@ -0,0 +1 @@ +# CONFIG_NFT_REJECT_NETDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS new file mode 100644 index 0000000000000000000000000000000000000000..9ef5988c4b8278672f4ea15ce015ae0a7d865352 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_OVS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..cfe1cb42cac6a4393d011ed1cb10b95fd096c5e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_DEFRAG_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS new file mode 100644 index 0000000000000000000000000000000000000000..16da187039d0cddf329e69619274e0d2776f2666 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS @@ -0,0 +1 @@ +# CONFIG_NF_FLOW_TABLE_PROCFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG new file mode 100644 index 0000000000000000000000000000000000000000..920e389bf1a5f39c05337e5db52006ce7fd73611 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG @@ -0,0 +1 @@ +CONFIG_NF_LOG_SYSLOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS new file mode 100644 index 0000000000000000000000000000000000000000..dc07edb9e1a2818e5b9cc3858ce275bb293a575b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS @@ -0,0 +1 @@ +CONFIG_NF_NAT_OVS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 new file mode 100644 index 0000000000000000000000000000000000000000..995fadff97fda40a7ee74cdfa192a561b83cad31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_TPROXY_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS new file mode 100644 index 0000000000000000000000000000000000000000..23c53bef0372542e9fcffd859862c47df73b074e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS @@ -0,0 +1 @@ +# CONFIG_NILFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE new file mode 100644 index 0000000000000000000000000000000000000000..fc4435e08a5702d5003cff1320332e6447a576e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE @@ -0,0 +1 @@ +# CONFIG_NL80211_TESTMODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR new file mode 100644 index 0000000000000000000000000000000000000000..8aee114c268cf498e0f79e19309a4996894e57ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR @@ -0,0 +1 @@ +CONFIG_NLATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 new file mode 100644 index 0000000000000000000000000000000000000000..2ab2aa02648d1ed61b991bfe0c068181958acb75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_1250=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 new file mode 100644 index 0000000000000000000000000000000000000000..2c23320ef53db577ded0480adcc025aa11874fd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_1251=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 new file mode 100644 index 0000000000000000000000000000000000000000..28e6c82d3ef5c4c4015800d6bc09eef7489e2b03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_437=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 new file mode 100644 index 0000000000000000000000000000000000000000..7fbb3690430293c7023e4bca3aa3646790a7fb42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_737=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 new file mode 100644 index 0000000000000000000000000000000000000000..a6dbd8ecff53da4c6e4a468ffea95483b6d0905e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_775=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 new file mode 100644 index 0000000000000000000000000000000000000000..37a7de91b8038b3a048f974174c2dfa6d8b1474b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_850=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 new file mode 100644 index 0000000000000000000000000000000000000000..6154f8d3d76c1af2abf752e1cd80429ab13984be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_852=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 new file mode 100644 index 0000000000000000000000000000000000000000..347b0df13fc34705f4f283c90f41b659c8c68bb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_855=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 new file mode 100644 index 0000000000000000000000000000000000000000..f44591925e08823a24e4198224b55559089513f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_857=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 new file mode 100644 index 0000000000000000000000000000000000000000..5e8d61ab1b307bd493c6f5b36d3aa02830f3618d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_860=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 new file mode 100644 index 0000000000000000000000000000000000000000..39d141b6b1d550b35cad4de344a2ed228d8f7bfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_861=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 new file mode 100644 index 0000000000000000000000000000000000000000..cbdc58462bebe686100f1599eb4bfa5f9966eec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_862=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 new file mode 100644 index 0000000000000000000000000000000000000000..f0274505ce0622224293bf50216f455a70a641ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_863=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 new file mode 100644 index 0000000000000000000000000000000000000000..de63e9d614a75ddbc80028e35fa922e3c65410ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_864=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 new file mode 100644 index 0000000000000000000000000000000000000000..ebbe1cdbf095c971fd9d9a350538571a66139cd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_865=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 new file mode 100644 index 0000000000000000000000000000000000000000..694549836f6d0f60a45955b117203441c53f0588 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_866=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 new file mode 100644 index 0000000000000000000000000000000000000000..3b4b8bf73e26830313a189f3fa8ab6d0a4cc9afd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_869=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 new file mode 100644 index 0000000000000000000000000000000000000000..eead1363babbfaa0e46b3242b795f3a5a2c79791 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_874=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 new file mode 100644 index 0000000000000000000000000000000000000000..f423190f8232e3687bdfc9d5b44539658fc4b071 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_932=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 new file mode 100644 index 0000000000000000000000000000000000000000..f67e8c400ed6ecf1791547b767ae29d55d74f5e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_949=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 new file mode 100644 index 0000000000000000000000000000000000000000..883240974b12fe27301469b633b54fc9464a3772 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 new file mode 100644 index 0000000000000000000000000000000000000000..78ad020a6fa86ea25a5ab1a8ce4ddac023f7debd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_13=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 new file mode 100644 index 0000000000000000000000000000000000000000..94002d5309252293670684772e333756a9ba9c2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_14=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 new file mode 100644 index 0000000000000000000000000000000000000000..19eb61c9c4ae72713cf53d3b8018c263b4914573 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_15=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 new file mode 100644 index 0000000000000000000000000000000000000000..13f70956ddd11e28c53baebe5bb79d04b6cffb6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 new file mode 100644 index 0000000000000000000000000000000000000000..73b4a2e7d5d4a443bb3f2ebbd7ef05447cee9d97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_3=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 new file mode 100644 index 0000000000000000000000000000000000000000..bd726a9a473ad74cc6b1bf36cfe808240cc0a61b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 new file mode 100644 index 0000000000000000000000000000000000000000..5f1dc8d9332141cc4f296164ac138ff62104fdc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_5=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 new file mode 100644 index 0000000000000000000000000000000000000000..f2a2982482429b1a48439d762574859159cd1929 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_6=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 new file mode 100644 index 0000000000000000000000000000000000000000..27e788c91019b7dfe523cfd88d80bfcfe5f23adb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_7=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 new file mode 100644 index 0000000000000000000000000000000000000000..23288bc773fa8d7342f6bdea1ca07487b51f3ff4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 new file mode 100644 index 0000000000000000000000000000000000000000..155b6f80c6c4ec6094733cd0481de98a04fedddb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_9=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R new file mode 100644 index 0000000000000000000000000000000000000000..55956c2df32725cb9bdf1ee7d4120a38247f4ac1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R @@ -0,0 +1 @@ +CONFIG_NLS_KOI8_R=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U new file mode 100644 index 0000000000000000000000000000000000000000..81ce86b0f724adcad84bd42e6cc7dfea589ae1b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U @@ -0,0 +1 @@ +CONFIG_NLS_KOI8_U=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC new file mode 100644 index 0000000000000000000000000000000000000000..2eeb972185af4e9d2a9bf55c3801270c2ebe7a7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CELTIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO new file mode 100644 index 0000000000000000000000000000000000000000..8f378fa0b1f6fdba3c8ca9ab6833c950438b9fe8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CENTEURO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN new file mode 100644 index 0000000000000000000000000000000000000000..450569f5a628d0b5a65d1ecbb8d4eceda81888da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CROATIAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC new file mode 100644 index 0000000000000000000000000000000000000000..027531c4490409516bc63fabf7ad24646b370b90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CYRILLIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC new file mode 100644 index 0000000000000000000000000000000000000000..d3617cd61298f03cd667321d14225c7f79eea840 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_GAELIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK new file mode 100644 index 0000000000000000000000000000000000000000..3d52cb40d1b05d40c89b576a6cdaa746496953f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK @@ -0,0 +1 @@ +CONFIG_NLS_MAC_GREEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND new file mode 100644 index 0000000000000000000000000000000000000000..edb7121c9e653f444761a3a310dc681eac4a784f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ICELAND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT new file mode 100644 index 0000000000000000000000000000000000000000..877602ad2dbadc047be3b8498497b981cf231781 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT @@ -0,0 +1 @@ +CONFIG_NLS_MAC_INUIT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN new file mode 100644 index 0000000000000000000000000000000000000000..256f7e8a83da313928f2254999ca6ad8f72e4dd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ROMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN new file mode 100644 index 0000000000000000000000000000000000000000..1b9b8506afd6bcdbcca068698a99e694e6548527 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ROMANIAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH new file mode 100644 index 0000000000000000000000000000000000000000..f2b486a5b53daa9fac5133ed7789fc776966431c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH @@ -0,0 +1 @@ +CONFIG_NLS_MAC_TURKISH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS new file mode 100644 index 0000000000000000000000000000000000000000..632113ac85923a6c63c7d195fe36e2aee8a09023 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS @@ -0,0 +1 @@ +CONFIG_NLS_UCS2_UTILS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER new file mode 100644 index 0000000000000000000000000000000000000000..bdae48953dc38076332193cef6f99551222bb4e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER @@ -0,0 +1 @@ +CONFIG_NOP_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV new file mode 100644 index 0000000000000000000000000000000000000000..647c7293495396a5324eb4dea2476a0cba38d164 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV @@ -0,0 +1 @@ +# CONFIG_NOP_USB_XCEIV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..44e141c457ec5df8f8e032324e8cf3e357ab3629 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_NOTIFIER_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU new file mode 100644 index 0000000000000000000000000000000000000000..5d30f458862f96e3accff9c8ba851a3b4520969a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU @@ -0,0 +1 @@ +# CONFIG_NOUVEAU_DEBUG_MMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH new file mode 100644 index 0000000000000000000000000000000000000000..5614c2c49809d5301847dae215984dfdd385ce61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH @@ -0,0 +1 @@ +# CONFIG_NOUVEAU_DEBUG_PUSH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..8ad203b24298b9a4a65b55e2c77d2bfae0206944 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON @@ -0,0 +1 @@ +CONFIG_NO_HZ_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF new file mode 100644 index 0000000000000000000000000000000000000000..b9df2cd6a6a7f4291eff4350625e772789d73c92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF @@ -0,0 +1 @@ +# CONFIG_NTB_EPF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT new file mode 100644 index 0000000000000000000000000000000000000000..1a39fe8de83d6df39c3aca0e88fa87d93048b33e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT @@ -0,0 +1 @@ +# CONFIG_NTB_IDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI new file mode 100644 index 0000000000000000000000000000000000000000..62a7410ff3bf6a32dd94d6904ea15fd064c2dfca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI @@ -0,0 +1 @@ +# CONFIG_NTB_MSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF new file mode 100644 index 0000000000000000000000000000000000000000..8d7ecb6852eb688246a6e22900e86f23eb143b14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF @@ -0,0 +1 @@ +# CONFIG_NTB_PERF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG new file mode 100644 index 0000000000000000000000000000000000000000..c6b36e57d4234563ce5b78a64dfdfb0bf71dfde0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG @@ -0,0 +1 @@ +# CONFIG_NTB_PINGPONG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC new file mode 100644 index 0000000000000000000000000000000000000000..b5760cb7a1580756f37bf942d537b24d607edde0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC @@ -0,0 +1 @@ +# CONFIG_NTB_SWITCHTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL new file mode 100644 index 0000000000000000000000000000000000000000..93ba98f5d0c729f0dc7de8a430c2267fcf17f53f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL @@ -0,0 +1 @@ +# CONFIG_NTB_TOOL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT new file mode 100644 index 0000000000000000000000000000000000000000..167e3e650217ce4f112f8ac7098e906989876d42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT @@ -0,0 +1 @@ +# CONFIG_NTB_TRANSPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER new file mode 100644 index 0000000000000000000000000000000000000000..65a74b806f0bc837bec0f91281e4983e29e77a5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER @@ -0,0 +1 @@ +# CONFIG_NTFS3_64BIT_CLUSTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS new file mode 100644 index 0000000000000000000000000000000000000000..280b2a549ce8578ef32084dba210d386253c8ad0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS @@ -0,0 +1 @@ +CONFIG_NTFS3_FS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL new file mode 100644 index 0000000000000000000000000000000000000000..08340880dd06c08184e9844b7c70a910983018e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_NTFS3_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS new file mode 100644 index 0000000000000000000000000000000000000000..6379df2d35788769ffdb0539c407cc1b9dc913c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS @@ -0,0 +1 @@ +# CONFIG_NTFS3_LZX_XPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY new file mode 100644 index 0000000000000000000000000000000000000000..d09e7fbb821468f9fc8eee6d85fb222b5614f423 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY @@ -0,0 +1 @@ +# CONFIG_NULL_TTY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO new file mode 100644 index 0000000000000000000000000000000000000000..0b9ad21db132212fc84ef210278e72070ae62272 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO @@ -0,0 +1 @@ +CONFIG_NUMA_KEEP_MEMINFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX new file mode 100644 index 0000000000000000000000000000000000000000..947636ca8c1d8516ecb74f3f9389a6941f73ddd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX @@ -0,0 +1 @@ +CONFIG_NVDIMM_DAX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN new file mode 100644 index 0000000000000000000000000000000000000000..9db164ae565ae9ca70ea4a7aa96fd370232379df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN @@ -0,0 +1 @@ +CONFIG_NVDIMM_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST new file mode 100644 index 0000000000000000000000000000000000000000..903b6332e9b93eb04dc470de9bcef3f2bec81b1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST @@ -0,0 +1 @@ +# CONFIG_NVDIMM_SECURITY_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV new file mode 100644 index 0000000000000000000000000000000000000000..bc4dfbc18f085faad5d16fcb26db4e56fd8b500e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV @@ -0,0 +1 @@ +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD new file mode 100644 index 0000000000000000000000000000000000000000..f4248d286bf30d5b85b6e6268046c4f51bea15d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD @@ -0,0 +1 @@ +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM new file mode 100644 index 0000000000000000000000000000000000000000..d0ea9507bad82784205991f1e96e558d99694ff0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM @@ -0,0 +1 @@ +# CONFIG_NVMEM_RMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH new file mode 100644 index 0000000000000000000000000000000000000000..a0a7178528c93eedebb05e41fe54370a98ffdde2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH @@ -0,0 +1 @@ +# CONFIG_NVME_AUTH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..6e5917e9a4b7b9b792c9020cf87e1d3e7dc32cd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON @@ -0,0 +1 @@ +# CONFIG_NVME_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH new file mode 100644 index 0000000000000000000000000000000000000000..7eb14dd07a013739527b43cedb336597daf2522f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH @@ -0,0 +1 @@ +CONFIG_NVME_MULTIPATH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET new file mode 100644 index 0000000000000000000000000000000000000000..03d6079ba2313d911db1880218d1b2a92a6a1d0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET @@ -0,0 +1 @@ +CONFIG_NVME_TARGET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH new file mode 100644 index 0000000000000000000000000000000000000000..d2d0c7cd7c682fad395ccabfc148965a026f0d3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH @@ -0,0 +1 @@ +# CONFIG_NVME_TARGET_AUTH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC new file mode 100644 index 0000000000000000000000000000000000000000..5d264040c7c85efec84ba428c1ea800d9245c1d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_FC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP new file mode 100644 index 0000000000000000000000000000000000000000..7334e95ad60b43257c57a62b95ab7aa31f3c2c9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_FCLOOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP new file mode 100644 index 0000000000000000000000000000000000000000..31f358bca1040c5160a9e62329b89bac3c057c6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_LOOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU new file mode 100644 index 0000000000000000000000000000000000000000..12bf02e23a4104eb3714d609b492a2e589d89a6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU @@ -0,0 +1 @@ +# CONFIG_NVME_TARGET_PASSTHRU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA new file mode 100644 index 0000000000000000000000000000000000000000..0f1470c53a9d6dcb485069218788559814d820e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_RDMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP new file mode 100644 index 0000000000000000000000000000000000000000..46078d0a7f4919ad572824810317f81e625856ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_TCP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS new file mode 100644 index 0000000000000000000000000000000000000000..76bd67d16c3977c88c15f11f9c579e5f231b2129 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS @@ -0,0 +1 @@ +# CONFIG_NVME_VERBOSE_ERRORS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..58d46266f892572aacddc6900ee6c6c1805bddda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_C45_TJA11XX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..7f675ff13a34885652bb5f2ee77112ce191fc705 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_CBTX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY new file mode 100644 index 0000000000000000000000000000000000000000..37d64a89189c97bb914f9db553ab116b5166b66a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_TJA11XX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM new file mode 100644 index 0000000000000000000000000000000000000000..333f7243e3de2167ab75bfafc9469853206adf74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM @@ -0,0 +1 @@ +CONFIG_N_GSM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC new file mode 100644 index 0000000000000000000000000000000000000000..cac70b3e4b1b8b874a69785d5a7699ba20f5156f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC @@ -0,0 +1 @@ +CONFIG_N_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG b/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG new file mode 100644 index 0000000000000000000000000000000000000000..d44f2ff1c65331b880ed890e4d338b0cbd1a6cbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG @@ -0,0 +1 @@ +CONFIG_OBJAGG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS new file mode 100644 index 0000000000000000000000000000000000000000..99fe2608d7243a4c0e37aa7b323d8a995422c4a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS @@ -0,0 +1 @@ +# CONFIG_OCFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY new file mode 100644 index 0000000000000000000000000000000000000000..4a755a3f3394497e39267af1b03cbbb9bbaecfff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY @@ -0,0 +1 @@ +CONFIG_OID_REGISTRY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 new file mode 100644 index 0000000000000000000000000000000000000000..7432702d28585ed2b3e5ef67f4e6cf962494c6a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 @@ -0,0 +1 @@ +CONFIG_OLD_SIGSUSPEND3=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..0aecb6226b9ace623320fb249f823e633d314de0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS @@ -0,0 +1 @@ +# CONFIG_OMFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..f7edc3b2af3aff555988b35017e012da2b9b32f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS @@ -0,0 +1 @@ +# CONFIG_ORANGEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..c08ef0299332f93caa5cf0669437ae5f72b7edc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING new file mode 100644 index 0000000000000000000000000000000000000000..6af3d64ddbaae152d34ecbdab19be018d4200e37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING @@ -0,0 +1 @@ +# CONFIG_PACKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA new file mode 100644 index 0000000000000000000000000000000000000000..d044574ca11ab110073a47b4ed3a64d03da61e4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA @@ -0,0 +1 @@ +CONFIG_PADATA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER new file mode 100644 index 0000000000000000000000000000000000000000..45bb7b51460e540b07dd424b551373e545da6aa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER @@ -0,0 +1 @@ +CONFIG_PAGE_COUNTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL new file mode 100644 index 0000000000000000000000000000000000000000..0eb23c7cba9cd9259013040476d1a6fe30561c5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL @@ -0,0 +1 @@ +CONFIG_PAGE_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS new file mode 100644 index 0000000000000000000000000000000000000000..8b2d7f8c1355ada21a885014b7f05accfebfdd76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS @@ -0,0 +1 @@ +# CONFIG_PAGE_POOL_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB new file mode 100644 index 0000000000000000000000000000000000000000..12c87bd99979489cbb43dbc084dff60c271e0418 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB @@ -0,0 +1 @@ +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB new file mode 100644 index 0000000000000000000000000000000000000000..22ea98060768194243eb0f796721ed1ddf5b10f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB @@ -0,0 +1 @@ +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK new file mode 100644 index 0000000000000000000000000000000000000000..2949da69ef219be9edb2e1a69698399bfb45f562 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK @@ -0,0 +1 @@ +# CONFIG_PAGE_TABLE_CHECK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE new file mode 100644 index 0000000000000000000000000000000000000000..2cb7741fd132694f844376096b2414a62b4b3ced --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE @@ -0,0 +1 @@ +CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF new file mode 100644 index 0000000000000000000000000000000000000000..d2ef15e398d346bd33dacbbde1d9bd3730bd2f11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF @@ -0,0 +1 @@ +CONFIG_PAHOLE_HAS_SPLIT_BTF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION new file mode 100644 index 0000000000000000000000000000000000000000..d21e5c7d60c50ebae1d0a2a4082807c93bdbe922 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION @@ -0,0 +1 @@ +CONFIG_PAHOLE_VERSION=9999 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE new file mode 100644 index 0000000000000000000000000000000000000000..165233f748756a0e126da2cbcaafddbc21a38a9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE @@ -0,0 +1 @@ +CONFIG_PANIC_ON_OOPS_VALUE=1 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF new file mode 100644 index 0000000000000000000000000000000000000000..7b66d03b9ffd18963de817a0646357ba9a386702 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF @@ -0,0 +1 @@ +# CONFIG_PANTHERLORD_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN new file mode 100644 index 0000000000000000000000000000000000000000..1e5d4dd9125e612e7103b1c049fb16e9ca68210a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN @@ -0,0 +1 @@ +CONFIG_PARMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..c3bc7866f76d9cb02a2404f6cf681292a64e5519 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI @@ -0,0 +1 @@ +CONFIG_PATA_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI new file mode 100644 index 0000000000000000000000000000000000000000..fbd742a5709857da891cd43f9e49035598e4ae99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI @@ -0,0 +1 @@ +CONFIG_PATA_ALI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD new file mode 100644 index 0000000000000000000000000000000000000000..acf7db321ac7d73ec1a8daca2726a0954b7b7aad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD @@ -0,0 +1 @@ +CONFIG_PATA_AMD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP new file mode 100644 index 0000000000000000000000000000000000000000..db2c10844088a9156879ba2716f786f175c7442b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP @@ -0,0 +1 @@ +CONFIG_PATA_ARTOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP new file mode 100644 index 0000000000000000000000000000000000000000..0089d45c97a87caea8b11dcac5ffdf29513f65c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP @@ -0,0 +1 @@ +CONFIG_PATA_ATIIXP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X new file mode 100644 index 0000000000000000000000000000000000000000..f5a03f81db1abaafcc832db64fab86dfee866118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X @@ -0,0 +1 @@ +CONFIG_PATA_ATP867X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI new file mode 100644 index 0000000000000000000000000000000000000000..9244705d255c2ab666ebf2ef810eae5e55607b12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI @@ -0,0 +1 @@ +# CONFIG_PATA_CMD640_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X new file mode 100644 index 0000000000000000000000000000000000000000..b065f35ad51ad4e00383deb85d5fea1a24c8fbfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X @@ -0,0 +1 @@ +CONFIG_PATA_CMD64X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS new file mode 100644 index 0000000000000000000000000000000000000000..901fb21a9d58a854acc4137544ab36b079f1fa5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS @@ -0,0 +1 @@ +# CONFIG_PATA_CYPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR new file mode 100644 index 0000000000000000000000000000000000000000..d1957bf57c19b5bdb998701ce92f7936d32aa0c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR @@ -0,0 +1 @@ +# CONFIG_PATA_EFAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 new file mode 100644 index 0000000000000000000000000000000000000000..0f87284135d0096b45962076acd1e8beedb675a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 @@ -0,0 +1 @@ +CONFIG_PATA_HPT366=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X new file mode 100644 index 0000000000000000000000000000000000000000..c0f919942e5cf5f5adf45be38b6e8f7de649a260 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X @@ -0,0 +1 @@ +CONFIG_PATA_HPT37X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N new file mode 100644 index 0000000000000000000000000000000000000000..ab1bf6eb11d16e2a4fa38240989f8f3bb30de1b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N @@ -0,0 +1 @@ +CONFIG_PATA_HPT3X2N=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 new file mode 100644 index 0000000000000000000000000000000000000000..aba3749b399dc9fbfb4d39d6c63bb34723b4f734 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 @@ -0,0 +1 @@ +CONFIG_PATA_HPT3X3=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA new file mode 100644 index 0000000000000000000000000000000000000000..723cb8bb73ac173caa7e6b2e06f67b09282d6d07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA @@ -0,0 +1 @@ +# CONFIG_PATA_HPT3X3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 new file mode 100644 index 0000000000000000000000000000000000000000..1118a468f51d9d676e0870b12b117a9ab2939dbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 @@ -0,0 +1 @@ +CONFIG_PATA_IT8213=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X new file mode 100644 index 0000000000000000000000000000000000000000..3b64260864a5b37aae42b1853ee7ae995ece4cb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X @@ -0,0 +1 @@ +CONFIG_PATA_IT821X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON new file mode 100644 index 0000000000000000000000000000000000000000..b2fb1f796515c5995c23f12e60b20d3c6e572902 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON @@ -0,0 +1 @@ +CONFIG_PATA_JMICRON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..c8c3b44f26716af5782777606d85d9a960e37a0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY @@ -0,0 +1 @@ +# CONFIG_PATA_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL new file mode 100644 index 0000000000000000000000000000000000000000..7e569fb2947cba4109bc53cba432d08f8efaadda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL @@ -0,0 +1 @@ +CONFIG_PATA_MARVELL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX new file mode 100644 index 0000000000000000000000000000000000000000..241149b47c43d25689fdb6adee479c15f6d3a937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX @@ -0,0 +1 @@ +# CONFIG_PATA_MPIIX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL new file mode 100644 index 0000000000000000000000000000000000000000..d3ad10131bd2e13edbd83f89ebbaa9e9e80c2268 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL @@ -0,0 +1 @@ +CONFIG_PATA_NETCELL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 new file mode 100644 index 0000000000000000000000000000000000000000..018ab5bca4d56ea7a5355f011fdf65cdcca12b03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 @@ -0,0 +1 @@ +CONFIG_PATA_NINJA32=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 new file mode 100644 index 0000000000000000000000000000000000000000..32f9c72e036eb4e5f95b0c3a02d30551481c3c70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 @@ -0,0 +1 @@ +# CONFIG_PATA_NS87410 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 new file mode 100644 index 0000000000000000000000000000000000000000..d12f9010de640280243b0842de8804e8cad5b70d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 @@ -0,0 +1 @@ +# CONFIG_PATA_NS87415 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX new file mode 100644 index 0000000000000000000000000000000000000000..a3a6f6f6fe64eacd42165a968c91e06469d28656 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX @@ -0,0 +1 @@ +CONFIG_PATA_OLDPIIX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI new file mode 100644 index 0000000000000000000000000000000000000000..f95b6c966e0b87ac34321ee99d1b63e9b89267a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI @@ -0,0 +1 @@ +# CONFIG_PATA_OPTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA new file mode 100644 index 0000000000000000000000000000000000000000..6bf1d2e8aea611d9ea05aed3bc4cf71dfd337e88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA @@ -0,0 +1 @@ +# CONFIG_PATA_OPTIDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X new file mode 100644 index 0000000000000000000000000000000000000000..30d5e6f20f889490037cd91a950296d4abd31c44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X @@ -0,0 +1 @@ +CONFIG_PATA_PDC2027X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD new file mode 100644 index 0000000000000000000000000000000000000000..10f98240cbbcf4ac02baf6a416784d54d069bc48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD @@ -0,0 +1 @@ +CONFIG_PATA_PDC_OLD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS new file mode 100644 index 0000000000000000000000000000000000000000..30316e774e95828e2036df501d9069f23b3789f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS @@ -0,0 +1 @@ +# CONFIG_PATA_RADISYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC new file mode 100644 index 0000000000000000000000000000000000000000..011a98d5ac728ded91aa99fa5f0d532b049d968d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC @@ -0,0 +1 @@ +CONFIG_PATA_RDC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 new file mode 100644 index 0000000000000000000000000000000000000000..7da7e0970466eb008404d3941a9f4f21d8e4026d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 @@ -0,0 +1 @@ +# CONFIG_PATA_RZ1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH new file mode 100644 index 0000000000000000000000000000000000000000..2b0924997583f5f76f9570a70749afa65b53fa94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH @@ -0,0 +1 @@ +CONFIG_PATA_SCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS new file mode 100644 index 0000000000000000000000000000000000000000..3cded75960c52712ca9d6b6227b4a5424d1a3006 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS @@ -0,0 +1 @@ +CONFIG_PATA_SERVERWORKS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 new file mode 100644 index 0000000000000000000000000000000000000000..2558da4e3845717305cf58dfb11ac66f595e8e4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 @@ -0,0 +1 @@ +CONFIG_PATA_SIL680=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS new file mode 100644 index 0000000000000000000000000000000000000000..238622337136976b9d11e9635a9062ad1be2d618 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS @@ -0,0 +1 @@ +CONFIG_PATA_SIS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS new file mode 100644 index 0000000000000000000000000000000000000000..61e1a8d684c78027e74ad5c1877ce841e021ae47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS @@ -0,0 +1 @@ +CONFIG_PATA_TIMINGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA new file mode 100644 index 0000000000000000000000000000000000000000..1fe4524d61532daae8ee4171089d7889bdf95b4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA @@ -0,0 +1 @@ +CONFIG_PATA_TOSHIBA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX new file mode 100644 index 0000000000000000000000000000000000000000..5da0e57c4b293029c1738464e54fe6e14f2ff577 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX @@ -0,0 +1 @@ +# CONFIG_PATA_TRIFLEX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA new file mode 100644 index 0000000000000000000000000000000000000000..b86e4cc69ab062031137f986cd1f714666944b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA @@ -0,0 +1 @@ +CONFIG_PATA_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND new file mode 100644 index 0000000000000000000000000000000000000000..9b3eb62898cafc57cbb07350ea52a79dee397999 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND @@ -0,0 +1 @@ +# CONFIG_PATA_WINBOND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO new file mode 100644 index 0000000000000000000000000000000000000000..00bad4065b40ae96df51b298c45a9011d3d80a25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO @@ -0,0 +1 @@ +# CONFIG_PC300TOO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC new file mode 100644 index 0000000000000000000000000000000000000000..36eb8c2abf40d0d39db76b42e3c3555c3ae095a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC @@ -0,0 +1 @@ +CONFIG_PCC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD new file mode 100644 index 0000000000000000000000000000000000000000..44b97544dc6e0467797d090f768b70fde0067fdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD @@ -0,0 +1 @@ +CONFIG_PCCARD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN new file mode 100644 index 0000000000000000000000000000000000000000..95dcd9b5401b5d6fa331f966847e82caf1bb2ee9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN @@ -0,0 +1 @@ +# CONFIG_PCI200SYN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT new file mode 100644 index 0000000000000000000000000000000000000000..3e2f994f4140c39bee25677564f462b3e06282df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT @@ -0,0 +1 @@ +CONFIG_PCIEAER_INJECT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..22e8072b9f9eddf662461795233246f454592015 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_PERFORMANCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE new file mode 100644 index 0000000000000000000000000000000000000000..ac6efe4b078b854c5c4dc6d3d70529ff66b487ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_POWERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE new file mode 100644 index 0000000000000000000000000000000000000000..5f5d7b13575f77bde69ba1fa12156257e2a42e47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST new file mode 100644 index 0000000000000000000000000000000000000000..02bf1662b42ba3b15d204a0d438d7dd66344693d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_DW_PLAT_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME new file mode 100644 index 0000000000000000000000000000000000000000..4b9611bfb3fd9561d05c0d999e22b30b7e7773e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME @@ -0,0 +1 @@ +CONFIG_PCIE_PME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM new file mode 100644 index 0000000000000000000000000000000000000000..aad8440d67991c3e515f767c8e4da0adf70fcf24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM @@ -0,0 +1 @@ +# CONFIG_PCIE_PTM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..fedb6b37ba28b718e949b55b513a5c9d3da2587f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG @@ -0,0 +1 @@ +CONFIG_PCIPCWATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS new file mode 100644 index 0000000000000000000000000000000000000000..3d08025de233afee82da19f59e0bc23d513489c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS @@ -0,0 +1 @@ +CONFIG_PCI_ATS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..6162d029fd616f472d2b5dbd1624a9b4bf92f59a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG @@ -0,0 +1 @@ +# CONFIG_PCI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE new file mode 100644 index 0000000000000000000000000000000000000000..94c5a31f45dc08539940219ed6533a6b6daf9ded --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE @@ -0,0 +1 @@ +CONFIG_PCI_DOE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS new file mode 100644 index 0000000000000000000000000000000000000000..115ead1d588a3c6c3f46f086efc30db6100c9a20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS @@ -0,0 +1 @@ +CONFIG_PCI_DOMAINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT new file mode 100644 index 0000000000000000000000000000000000000000..d90e2a4f79b4643ad4461bbb102c1af4f6560ca1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT @@ -0,0 +1 @@ +# CONFIG_PCI_ENDPOINT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST new file mode 100644 index 0000000000000000000000000000000000000000..ac8854da9d653fcab23a08a211c3d711dd29a0a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST @@ -0,0 +1 @@ +# CONFIG_PCI_ENDPOINT_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL new file mode 100644 index 0000000000000000000000000000000000000000..6d8608b8087063c4c23e94d1cacc12197c5fff39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL @@ -0,0 +1 @@ +CONFIG_PCI_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON new file mode 100644 index 0000000000000000000000000000000000000000..22158fded46373261101f8b8943a9577b705399c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON @@ -0,0 +1 @@ +# CONFIG_PCI_MESON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA new file mode 100644 index 0000000000000000000000000000000000000000..8898dbd7961d50de0e810fb2ba85c2f6295a49d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA @@ -0,0 +1 @@ +# CONFIG_PCI_P2PDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..55501f103c93dab905d21a878f051d19e2b41045 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO @@ -0,0 +1 @@ +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC new file mode 100644 index 0000000000000000000000000000000000000000..cc2e5e8ba5d3964aa44dffac4fbd0c8baf21475e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC @@ -0,0 +1 @@ +# CONFIG_PCI_SW_SWITCHTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA new file mode 100644 index 0000000000000000000000000000000000000000..3cf9bfbdce64e50b3406f1da6555dbf119d7e406 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA @@ -0,0 +1 @@ +# CONFIG_PCMCIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT new file mode 100644 index 0000000000000000000000000000000000000000..235a1ec3a68ce16eedf7b6694a8f7f06d1bde741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT @@ -0,0 +1 @@ +CONFIG_PCPU_DEV_REFCNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS new file mode 100644 index 0000000000000000000000000000000000000000..a50391efc8d12fc099691cc27ee064e633068607 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS @@ -0,0 +1 @@ +CONFIG_PCS_XPCS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA new file mode 100644 index 0000000000000000000000000000000000000000..108646b90ec55c3a586973a08db3b59fda51ec67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA @@ -0,0 +1 @@ +# CONFIG_PDC_ADMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI new file mode 100644 index 0000000000000000000000000000000000000000..44ed21553769823c5a7b626207c72010d5d18cbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI @@ -0,0 +1 @@ +# CONFIG_PECI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK new file mode 100644 index 0000000000000000000000000000000000000000..fad3e9927d5db1fb2e5eb726d5c09aadcff0a188 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK @@ -0,0 +1 @@ +CONFIG_PER_VMA_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS new file mode 100644 index 0000000000000000000000000000000000000000..d552e777c90401e5b4ab102ce96b4bca5ca3ad30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS @@ -0,0 +1 @@ +# CONFIG_PER_VMA_LOCK_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM new file mode 100644 index 0000000000000000000000000000000000000000..dc10468e6b790cc1f7ae0f9a82003124455e30f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM @@ -0,0 +1 @@ +# CONFIG_PHANTOM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET new file mode 100644 index 0000000000000000000000000000000000000000..093c18313d172a53ee6cc38b78465f1468324c54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET @@ -0,0 +1 @@ +# CONFIG_PHONET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK new file mode 100644 index 0000000000000000000000000000000000000000..cc1e23e0b2aac82d832be5bf4d2d3668a14c3eb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK @@ -0,0 +1 @@ +CONFIG_PHYLINK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT new file mode 100644 index 0000000000000000000000000000000000000000..ec6ffd47fc56e6b76934b73f3c4b94be6775b071 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT @@ -0,0 +1 @@ +CONFIG_PHYS_ADDR_T_64BIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER new file mode 100644 index 0000000000000000000000000000000000000000..dc960daa801119a4ffc45c695702924414017664 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER @@ -0,0 +1 @@ +# CONFIG_PHY_CAN_TRANSCEIVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC new file mode 100644 index 0000000000000000000000000000000000000000..450c0fddf2c81b5d35e8066030fc6aec52f5ee6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC @@ -0,0 +1 @@ +# CONFIG_PHY_PXA_28NM_HSIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 new file mode 100644 index 0000000000000000000000000000000000000000..02e05a7915d1742e2f793a8cf16d4d2761bc45ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 @@ -0,0 +1 @@ +# CONFIG_PHY_PXA_28NM_USB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF new file mode 100644 index 0000000000000000000000000000000000000000..2f04b24a495956c8d5948fe3f4d8a380cd7c7976 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF @@ -0,0 +1 @@ +CONFIG_PINCONF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL new file mode 100644 index 0000000000000000000000000000000000000000..d8d83d561de7c6be7555073035cfdb363f343d51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL @@ -0,0 +1 @@ +CONFIG_PINCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD new file mode 100644 index 0000000000000000000000000000000000000000..02626b83511bbb8cef62dda7e223d46813691250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD @@ -0,0 +1 @@ +# CONFIG_PINCTRL_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 new file mode 100644 index 0000000000000000000000000000000000000000..179d4861c81e32c42b5bd94637e7ecd99ef83473 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_CY8C95X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 new file mode 100644 index 0000000000000000000000000000000000000000..948eb60576b371ada73a85c74ca3f29666fee541 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MCP23S08 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X new file mode 100644 index 0000000000000000000000000000000000000000..4416b9f36a3de0b38cded76ff91e8ad2097e256b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SX150X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX new file mode 100644 index 0000000000000000000000000000000000000000..2e704c683cc714fb85baadb9d8075873aa99b5e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX @@ -0,0 +1 @@ +CONFIG_PINMUX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW new file mode 100644 index 0000000000000000000000000000000000000000..8f49f56d565126a6bc564f3b5dbed1f0764e6d07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW @@ -0,0 +1 @@ +CONFIG_PLDMFW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA new file mode 100644 index 0000000000000000000000000000000000000000..61c623f9c8fd218e01a0d49cc1aff0b1d78650ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA @@ -0,0 +1 @@ +# CONFIG_PLX_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 new file mode 100644 index 0000000000000000000000000000000000000000..8664194c8f70f003c957061724abf5539873f63a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 @@ -0,0 +1 @@ +# CONFIG_PMIC_ADP5520 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X new file mode 100644 index 0000000000000000000000000000000000000000..875cd25808a315ba74c66dfeeed574c8b6a130b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X @@ -0,0 +1 @@ +# CONFIG_PMIC_DA903X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK new file mode 100644 index 0000000000000000000000000000000000000000..eba55a7afd61bef44996a7aee44e3b29d6f87e0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK @@ -0,0 +1 @@ +CONFIG_PM_CLK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_OPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_OPP new file mode 100644 index 0000000000000000000000000000000000000000..bbe2b56ba5ff048da5cf289ac7ae227c979a97b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_OPP @@ -0,0 +1 @@ +CONFIG_PM_OPP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP new file mode 100644 index 0000000000000000000000000000000000000000..5ef0a4ef953b78b0e3025b14f9e84fb76bd31acb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP @@ -0,0 +1 @@ +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK new file mode 100644 index 0000000000000000000000000000000000000000..e3d84638baf1516556de37739fa0d30bf718b827 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK @@ -0,0 +1 @@ +CONFIG_PNFS_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..3fe33fa512f2d6352b9e65d977d3f77d3671f655 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT @@ -0,0 +1 @@ +CONFIG_PNFS_FILE_LAYOUT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..94dbe98593c8cc057be5e5322678e0fca09e56f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT @@ -0,0 +1 @@ +CONFIG_PNFS_FLEXFILE_LAYOUT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP new file mode 100644 index 0000000000000000000000000000000000000000..5c70e41cdb6b842fdfaeecac3bb57169470e92fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP @@ -0,0 +1 @@ +CONFIG_PNP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI new file mode 100644 index 0000000000000000000000000000000000000000..02d6f696ebc4b956e2eaa8f0feca97d9f8c1dcc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI @@ -0,0 +1 @@ +CONFIG_PNPACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK new file mode 100644 index 0000000000000000000000000000000000000000..656863ecc1205f195f7193e02ea8e2c95d0998a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK @@ -0,0 +1 @@ +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY new file mode 100644 index 0000000000000000000000000000000000000000..5f28cbaf0dbd52e285dace3a7c1557517568554b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY @@ -0,0 +1 @@ +CONFIG_POWER_SUPPLY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..23ed22a2e0aeecb532abd91f415da5c396cce64b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG @@ -0,0 +1 @@ +# CONFIG_POWER_SUPPLY_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..b9bd3a83dc0d5f7937b66e0b32fb298493768112 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON @@ -0,0 +1 @@ +CONFIG_POWER_SUPPLY_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM new file mode 100644 index 0000000000000000000000000000000000000000..d2b11b3b60466fb6e9e0150ec862c0011087dc0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM @@ -0,0 +1 @@ +CONFIG_PPPOATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS new file mode 100644 index 0000000000000000000000000000000000000000..19200f9dbcc2b219623324a69e9cb6d599010f53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS @@ -0,0 +1 @@ +CONFIG_PPPOE_HASH_BITS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 new file mode 100644 index 0000000000000000000000000000000000000000..075eaaab96cf3eb3f043b8662eb8f87ee9d0556a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 new file mode 100644 index 0000000000000000000000000000000000000000..ce4762a2e49941ea5cbbd6ef0058bd42ebf7a6b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 new file mode 100644 index 0000000000000000000000000000000000000000..7db351db43136475c838888a5f34d621ab49da5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 @@ -0,0 +1 @@ +CONFIG_PPPOE_HASH_BITS_4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 new file mode 100644 index 0000000000000000000000000000000000000000..8e1d3a4148be17f1dd03460c379df5029570b231 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_8 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP new file mode 100644 index 0000000000000000000000000000000000000000..cb8272d91d792e093192aef194af04597f67183d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP @@ -0,0 +1 @@ +CONFIG_PPPOL2TP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC new file mode 100644 index 0000000000000000000000000000000000000000..822b86ae94f9ce5ba944ae9f24cd5b19cbbe08c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC @@ -0,0 +1 @@ +CONFIG_PPP_ASYNC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP new file mode 100644 index 0000000000000000000000000000000000000000..da4db48b45d4a654febdd2b3def6acb432893841 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP @@ -0,0 +1 @@ +CONFIG_PPP_BSDCOMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE new file mode 100644 index 0000000000000000000000000000000000000000..a292110b3eb071064725c9fa142213246555011b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE @@ -0,0 +1 @@ +CONFIG_PPP_DEFLATE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..0a7a25c0811949df7163333d5f656ec1b2d5b5a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER @@ -0,0 +1 @@ +CONFIG_PPP_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE new file mode 100644 index 0000000000000000000000000000000000000000..534e6d2b197659b368cef0a8f2958c91962d5b6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE @@ -0,0 +1 @@ +CONFIG_PPP_MPPE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK new file mode 100644 index 0000000000000000000000000000000000000000..815360d7a946ad17b9073a7541b43bd16c3d3e6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK @@ -0,0 +1 @@ +CONFIG_PPP_MULTILINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY new file mode 100644 index 0000000000000000000000000000000000000000..e50b04a0309634ccc3059afc9819175964a53256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY @@ -0,0 +1 @@ +CONFIG_PPP_SYNC_TTY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS new file mode 100644 index 0000000000000000000000000000000000000000..192e8c5cf15f49b5a782335b66c5adcf3e6d84e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS @@ -0,0 +1 @@ +CONFIG_PPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..70ddbfa66e258cbbfcf4b269802610aec73c2df8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER new file mode 100644 index 0000000000000000000000000000000000000000..58cc6a5c15b2d85c3a3b327030fb3439880d99b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER @@ -0,0 +1 @@ +# CONFIG_PPS_CLIENT_KTIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC new file mode 100644 index 0000000000000000000000000000000000000000..856b2b85ba9e69017dcced6b66c88331cded3061 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_LDISC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..68357ea56eee6dbf9e3d0f9e39b26871ea6fafdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG @@ -0,0 +1 @@ +# CONFIG_PPS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP new file mode 100644 index 0000000000000000000000000000000000000000..cbe2708765cc278e743c0d05fb9d9290a79f3aa3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP @@ -0,0 +1 @@ +CONFIG_PPTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST new file mode 100644 index 0000000000000000000000000000000000000000..6c650bba671a651c03e5a4dd5c881d1d26dc507f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST @@ -0,0 +1 @@ +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT new file mode 100644 index 0000000000000000000000000000000000000000..607b3560847f0b20c55d1dd5dff05670c9279f98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT @@ -0,0 +1 @@ +CONFIG_PREEMPT_COUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS new file mode 100644 index 0000000000000000000000000000000000000000..d9c869d3f5131f4fe17770a083867fa61c050813 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS @@ -0,0 +1 @@ +CONFIG_PREEMPT_NOTIFIERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD new file mode 100644 index 0000000000000000000000000000000000000000..3793f37f1d895e6cdd679be2a8941cd51fed7cc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD @@ -0,0 +1 @@ +CONFIG_PREVENT_FIRMWARE_BUILD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS new file mode 100644 index 0000000000000000000000000000000000000000..86f0c393140b47810e19bfcaf76d3eb8d5cfebbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS @@ -0,0 +1 @@ +# CONFIG_PRIME_NUMBERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER new file mode 100644 index 0000000000000000000000000000000000000000..470a071e3b58765082851d9b748c1fac75e36cb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER @@ -0,0 +1 @@ +# CONFIG_PRINTK_CALLER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..5bc5f69f71767321b0a828b203b690c5b2adfe62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_PROBE_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL new file mode 100644 index 0000000000000000000000000000000000000000..8f76128c1853e092c36f5017fc2f66245cbac9c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_PROC_CPU_RESCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE new file mode 100644 index 0000000000000000000000000000000000000000..bd7b76112fa42b8e4aeeab4f82dace2e9066c8ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE @@ -0,0 +1 @@ +# CONFIG_PROC_MEM_FORCE_PTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..bbdc2449cd55ea05581d8e073a732538573db1b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE @@ -0,0 +1 @@ +# CONFIG_PROC_MEM_NO_FORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET new file mode 100644 index 0000000000000000000000000000000000000000..1d12cb14d2708baa153ed27b84475eb6d88a295c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET @@ -0,0 +1 @@ +CONFIG_PROC_PID_CPUSET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING new file mode 100644 index 0000000000000000000000000000000000000000..adea6cc66ded0b252763eb1f82b812fbfdfe2150 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING @@ -0,0 +1 @@ +# CONFIG_PROVE_LOCKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..e8e51dd1ea14f14fdecf2e762a2eda5daadf0d01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE @@ -0,0 +1 @@ +# CONFIG_PSTORE_FTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG new file mode 100644 index 0000000000000000000000000000000000000000..06350590dff19148412289f85506f4bc83b853e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG @@ -0,0 +1 @@ +# CONFIG_PSTORE_PMSG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..57785aa591d29fa8755bdfee24eac1381fdf606f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_PTDUMP_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK new file mode 100644 index 0000000000000000000000000000000000000000..120e79f0f9241f7bee17a85b63d362a4d064ceb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 new file mode 100644 index 0000000000000000000000000000000000000000..f0dda628f592788ec62557fe71e6599f92eff145 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM new file mode 100644 index 0000000000000000000000000000000000000000..49444182dab822a95d3f8a2489133d144925c71b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_IDTCM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES new file mode 100644 index 0000000000000000000000000000000000000000..ccff0ec88e76d89ebca405a6de4ee196e4eb38cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_INES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM new file mode 100644 index 0000000000000000000000000000000000000000..346c909b60cef80ddf356fb5ad093e4f68021df3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM @@ -0,0 +1 @@ +CONFIG_PWM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..2c6acd1d009913a648911189a8133fa3af2df97a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG @@ -0,0 +1 @@ +# CONFIG_PWM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 new file mode 100644 index 0000000000000000000000000000000000000000..87e2b86aecc6698dc079165bb8d8af06e6225256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 @@ -0,0 +1 @@ +# CONFIG_PWM_PCA9685 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..27e3020762902031660c433b3be21c160df2c807 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS @@ -0,0 +1 @@ +CONFIG_PWM_SYSFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED new file mode 100644 index 0000000000000000000000000000000000000000..c39c087a9604ea47f9dabeadc7c94cf69706a884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED @@ -0,0 +1 @@ +CONFIG_QED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE new file mode 100644 index 0000000000000000000000000000000000000000..3e672b16687159d66d3daf69e1dabaaac5ed072e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE @@ -0,0 +1 @@ +CONFIG_QEDE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF new file mode 100644 index 0000000000000000000000000000000000000000..33c24865a5147a2dc403255f185f74ab5ca4932d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF @@ -0,0 +1 @@ +CONFIG_QEDF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI new file mode 100644 index 0000000000000000000000000000000000000000..c751aba20d3b5b928cf3f0abf5c0ecfbffb0a2b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI @@ -0,0 +1 @@ +CONFIG_QEDI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE new file mode 100644 index 0000000000000000000000000000000000000000..5681a40e94dbe11217b5c2dc0471a9542d3962e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE @@ -0,0 +1 @@ +CONFIG_QED_FCOE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..46c1c743e83b2f69c339ca35d5f462e76fafa9e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI @@ -0,0 +1 @@ +CONFIG_QED_ISCSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 new file mode 100644 index 0000000000000000000000000000000000000000..f574d32c6d192193e3c2a4c89415f8c51e392381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 @@ -0,0 +1 @@ +CONFIG_QED_LL2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO new file mode 100644 index 0000000000000000000000000000000000000000..b17caa3fb20c8138c94e55f25818851751236081 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO @@ -0,0 +1 @@ +CONFIG_QED_OOO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA new file mode 100644 index 0000000000000000000000000000000000000000..98cb65f0108d31ff7a12bb97b0794ede66d49255 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA @@ -0,0 +1 @@ +CONFIG_QED_RDMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV new file mode 100644 index 0000000000000000000000000000000000000000..35fa4ac2ff911fe58e035e698e65b85feb2c2dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV @@ -0,0 +1 @@ +CONFIG_QED_SRIOV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX new file mode 100644 index 0000000000000000000000000000000000000000..718c130d4f844a44dc6516f1b000e2240107a358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX @@ -0,0 +1 @@ +CONFIG_QLA3XXX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC new file mode 100644 index 0000000000000000000000000000000000000000..1ba4defc930ea1563ccf44aa33de841ad6ac5889 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC @@ -0,0 +1 @@ +# CONFIG_QLCNIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS new file mode 100644 index 0000000000000000000000000000000000000000..43a604f47a826fb437b29d65df293460ac0be84c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS @@ -0,0 +1 @@ +# CONFIG_QNX4FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS new file mode 100644 index 0000000000000000000000000000000000000000..9fba608a836f0804097ff412a9deb81cc5839763 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS @@ -0,0 +1 @@ +# CONFIG_QNX6FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR new file mode 100644 index 0000000000000000000000000000000000000000..19f911ca5043340c11b93b8e25a47c43dc90747e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR @@ -0,0 +1 @@ +# CONFIG_QRTR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY new file mode 100644 index 0000000000000000000000000000000000000000..460334246d9562331f21e4fe93e1414f5cf1b8a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY @@ -0,0 +1 @@ +CONFIG_QSEMI_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..e671b310df7cc224ab8c262ea50a63902a0272e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS @@ -0,0 +1 @@ +CONFIG_QUEUED_RWLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS new file mode 100644 index 0000000000000000000000000000000000000000..b0cb4b31e1796c95e5693b3a33c597fd077d60d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_QUEUED_SPINLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL new file mode 100644 index 0000000000000000000000000000000000000000..d473401f10a46c8160815b554ac0c81824761f22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL @@ -0,0 +1 @@ +CONFIG_QUOTACTL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE new file mode 100644 index 0000000000000000000000000000000000000000..afac1fbe946842d951e723bf33fa893f388a8e29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE @@ -0,0 +1 @@ +CONFIG_QUOTA_TREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 b/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 new file mode 100644 index 0000000000000000000000000000000000000000..3f47885c13bcc992c25df3c6e862d8f4d55605a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 @@ -0,0 +1 @@ +CONFIG_R8169=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..5c9c3b98675d28b8c0b44be05c495dd5b765f817 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST @@ -0,0 +1 @@ +# CONFIG_RANDOM32_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES new file mode 100644 index 0000000000000000000000000000000000000000..04abd34672244b54f3cad831abda8cd6e04d3953 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES @@ -0,0 +1 @@ +# CONFIG_RANDOM_KMALLOC_CACHES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO new file mode 100644 index 0000000000000000000000000000000000000000..79892b190b054f0990a2af37a7c0d1924234eeb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO @@ -0,0 +1 @@ +# CONFIG_RAPIDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL new file mode 100644 index 0000000000000000000000000000000000000000..5be225b5331e1e6ed255db97218400058aed3275 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL @@ -0,0 +1 @@ +CONFIG_RATIONAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..51b8db7b7813f00af1746f006dfc29febf13a805 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST @@ -0,0 +1 @@ +# CONFIG_RBTREE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME new file mode 100644 index 0000000000000000000000000000000000000000..caef7fbece1174eb932c5a645ce81ba795f737c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME @@ -0,0 +1 @@ +# CONFIG_RCU_CPU_STALL_CPUTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..3012f3c3e8493bc76541507595344995530906a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT @@ -0,0 +1 @@ +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY new file mode 100644 index 0000000000000000000000000000000000000000..545896a7b18c0a8a764216aa3804267c38815ef0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY @@ -0,0 +1 @@ +# CONFIG_RCU_LAZY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST new file mode 100644 index 0000000000000000000000000000000000000000..fa4469bf85435c825dd07bc2d1093272db9173df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST @@ -0,0 +1 @@ +CONFIG_RCU_NEED_SEGCBLIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL new file mode 100644 index 0000000000000000000000000000000000000000..e53819b90f6a98fd7ba7157bbc1d5476601a46bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL @@ -0,0 +1 @@ +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..181afd39fa717cd7913add8a5c1dace396e30945 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON @@ -0,0 +1 @@ +CONFIG_RCU_STALL_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS new file mode 100644 index 0000000000000000000000000000000000000000..ba0653e704deb09caead3ad874422e31e56b3106 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS @@ -0,0 +1 @@ +# CONFIG_RDS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM new file mode 100644 index 0000000000000000000000000000000000000000..ea952943f8542c74aa9d85e9faa15d7b6dda22e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM @@ -0,0 +1 @@ +CONFIG_REALTEK_AUTOPM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY new file mode 100644 index 0000000000000000000000000000000000000000..051e7cc45b00791aa3955d11f54f6e2e8164ee3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY @@ -0,0 +1 @@ +CONFIG_REALTEK_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON new file mode 100644 index 0000000000000000000000000000000000000000..d9b33b6cfd4eca894314d45a70af3a23c5d0cb3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 new file mode 100644 index 0000000000000000000000000000000000000000..e153f1356a7328357a7a896f46a7366ca66fbc01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON_DEC8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 new file mode 100644 index 0000000000000000000000000000000000000000..7624f7bb2759b74a03e4d74154f56e63506c1623 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON_ENC8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST new file mode 100644 index 0000000000000000000000000000000000000000..6ca2a5270b78a6defa1ef337f9476b7319145256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST @@ -0,0 +1 @@ +# CONFIG_REED_SOLOMON_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP new file mode 100644 index 0000000000000000000000000000000000000000..5e40b3c4b1223fdfb6766b1ddb644b091306ce75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP @@ -0,0 +1 @@ +CONFIG_REGMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C new file mode 100644 index 0000000000000000000000000000000000000000..8440b2cafcb2b61b453f8e9c2ae2e0f48c6a434d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C @@ -0,0 +1 @@ +CONFIG_REGMAP_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI new file mode 100644 index 0000000000000000000000000000000000000000..adb8804fabf59fa69ebd0984bb54a5676f1c7d12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI @@ -0,0 +1 @@ +CONFIG_REGMAP_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..d11f952ac942e0e28d070dedd0d9323e4734c859 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS @@ -0,0 +1 @@ +# CONFIG_REISERFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC new file mode 100644 index 0000000000000000000000000000000000000000..5a54c293659949bd7c493132939e558aa0f5583c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC @@ -0,0 +1 @@ +# CONFIG_REMOTEPROC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET new file mode 100644 index 0000000000000000000000000000000000000000..91c6f1c28c2ef070bc78f2ffd65040d5823907a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET @@ -0,0 +1 @@ +# CONFIG_REMOTE_TARGET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY new file mode 100644 index 0000000000000000000000000000000000000000..2314e0cf2d9aeca81e4fe0623a59c9de21852b3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY @@ -0,0 +1 @@ +CONFIG_RENESAS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION new file mode 100644 index 0000000000000000000000000000000000000000..eea15dd529c2fb61c0b2a16a90e405a36255c0e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION @@ -0,0 +1 @@ +# CONFIG_RESET_ATTACK_MITIGATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..1e76bd1354eebc19af572a8ebe46a7323564771d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON @@ -0,0 +1 @@ +# CONFIG_RESET_TI_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X new file mode 100644 index 0000000000000000000000000000000000000000..4f44ddf912b9daa59b234324e4012540e4786ae3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X @@ -0,0 +1 @@ +# CONFIG_RESET_TI_TPS380X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL new file mode 100644 index 0000000000000000000000000000000000000000..3d4b2f0de2c677f9e67274845da8736214ab9caa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL @@ -0,0 +1 @@ +# CONFIG_RFD_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL new file mode 100644 index 0000000000000000000000000000000000000000..7ec901c5331d5ead174f1406b9d9e9d3eed276e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL @@ -0,0 +1 @@ +CONFIG_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT new file mode 100644 index 0000000000000000000000000000000000000000..15fc0f8ad8c49863c294ed07e58f98fad5143cb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT @@ -0,0 +1 @@ +CONFIG_RFKILL_INPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..35a45201d109377c29c79a834964422d9e977db6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS @@ -0,0 +1 @@ +CONFIG_RFKILL_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL new file mode 100644 index 0000000000000000000000000000000000000000..3cdf9be6ba1a324afaa526c065e1ced766422ef4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL @@ -0,0 +1 @@ +CONFIG_RFS_ACCEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER new file mode 100644 index 0000000000000000000000000000000000000000..624958564c91b6b6e6a1ba8516ef58d0eac2dcf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER @@ -0,0 +1 @@ +CONFIG_RING_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK new file mode 100644 index 0000000000000000000000000000000000000000..4116ef243b9e7c069a6bf58ca571a587bce01b88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK @@ -0,0 +1 @@ +CONFIG_RING_BUFFER_BENCHMARK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST new file mode 100644 index 0000000000000000000000000000000000000000..999c92369404e016eedf90b074c45f3b8e29ba7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_RING_BUFFER_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS new file mode 100644 index 0000000000000000000000000000000000000000..062dee43468f2f1309ccb90e7bca7c2ccf164b4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS @@ -0,0 +1 @@ +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR new file mode 100644 index 0000000000000000000000000000000000000000..1e7576254ae56c8517392a3c83edf14bbe868e05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR @@ -0,0 +1 @@ +CONFIG_RMI4_2D_SENSOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE new file mode 100644 index 0000000000000000000000000000000000000000..2e5716d66f12695ab96a27b75b9754f62b206682 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE @@ -0,0 +1 @@ +CONFIG_RMI4_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 new file mode 100644 index 0000000000000000000000000000000000000000..08ae820b188d72f655bc891288cf7fbc821d1ba1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 @@ -0,0 +1 @@ +CONFIG_RMI4_F03=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO new file mode 100644 index 0000000000000000000000000000000000000000..a1bdeb7d83fc1aa61ad3e58af3efb4c0b63e3975 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO @@ -0,0 +1 @@ +CONFIG_RMI4_F03_SERIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 new file mode 100644 index 0000000000000000000000000000000000000000..94542f838bb055ae9809555f1e25f14e9bbc9ae9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 @@ -0,0 +1 @@ +CONFIG_RMI4_F11=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 new file mode 100644 index 0000000000000000000000000000000000000000..c1bf5103b420ea329390dc13f0653f6e31b86f75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 @@ -0,0 +1 @@ +CONFIG_RMI4_F12=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 new file mode 100644 index 0000000000000000000000000000000000000000..2efcd29cff5d3d30a76904534e2bcf10d692a7ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 @@ -0,0 +1 @@ +CONFIG_RMI4_F30=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A new file mode 100644 index 0000000000000000000000000000000000000000..7daf3f63cac2cdb64952df0287821dbc51b58177 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A @@ -0,0 +1 @@ +# CONFIG_RMI4_F3A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 new file mode 100644 index 0000000000000000000000000000000000000000..893bb030d1a07fc3d760d15c458e8f8feaa6fcd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 @@ -0,0 +1 @@ +CONFIG_RMI4_F55=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C new file mode 100644 index 0000000000000000000000000000000000000000..7310058090517461b635a2cef21374eb8fc33398 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C @@ -0,0 +1 @@ +CONFIG_RMI4_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB new file mode 100644 index 0000000000000000000000000000000000000000..9c2ab3f29e745a4b314c9cdd92a92cbdc0d02059 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB @@ -0,0 +1 @@ +CONFIG_RMI4_SMB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY new file mode 100644 index 0000000000000000000000000000000000000000..e49faf8f93ad524de72c324056c2fc81828db6a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY @@ -0,0 +1 @@ +CONFIG_ROCKCHIP_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER new file mode 100644 index 0000000000000000000000000000000000000000..4e61c5a5ef0a9d0a8b3f7f44fb2a6a1d9458a0a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER @@ -0,0 +1 @@ +CONFIG_ROCKER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..3591977a24c8b9a47efca75d2d10c35fb875f633 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS @@ -0,0 +1 @@ +# CONFIG_ROMFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..3050e4496023c6125209f3eefc591405907c62ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 @@ -0,0 +1 @@ +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 new file mode 100644 index 0000000000000000000000000000000000000000..a25e20873924849a7b031a9bfa610ec7ff31c9ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 @@ -0,0 +1 @@ +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA new file mode 100644 index 0000000000000000000000000000000000000000..8253c2cb6fe8c9f739e086553e7829d650dda79e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA @@ -0,0 +1 @@ +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM new file mode 100644 index 0000000000000000000000000000000000000000..df2fa18d0f733d0dab22be9efcaeab8d1fe1ff1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM @@ -0,0 +1 @@ +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..04b624a7ccc33449913d331e189d8c1c2380f378 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO @@ -0,0 +1 @@ +# CONFIG_RPMSG_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..13664a344820d3cac67be302376eaf10fbcf4677 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 new file mode 100644 index 0000000000000000000000000000000000000000..6e67aa0eec5f12377b6aa84ae5ecae7fabb50b0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABEOZ9 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K new file mode 100644 index 0000000000000000000000000000000000000000..a5b9fe1c6a53e3fd3b5e5c2802951e6401566481 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K @@ -0,0 +1 @@ +CONFIG_RTC_DRV_BQ32K=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 new file mode 100644 index 0000000000000000000000000000000000000000..26ed4fb2144cbf07cc0477865b21df1363dd363f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1286=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 new file mode 100644 index 0000000000000000000000000000000000000000..053d2126a95579e38437b71a805c348a987b5b68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1302 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 new file mode 100644 index 0000000000000000000000000000000000000000..2d3d02090bc881d4aede65a58300255639b71575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1307=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY new file mode 100644 index 0000000000000000000000000000000000000000..dd3202f455d349016786af31d352eadf80d1b2b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1307_CENTURY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 new file mode 100644 index 0000000000000000000000000000000000000000..39c837c5e59880b64d11323f871a767c8c335c5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1374=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 new file mode 100644 index 0000000000000000000000000000000000000000..a36656c78ef1995f5faa652a5a65923a09ae5467 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1511=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 new file mode 100644 index 0000000000000000000000000000000000000000..a069d4ba2dade6cd153b864d6f3f22ce7b84bc3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1553=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 new file mode 100644 index 0000000000000000000000000000000000000000..2b12e3905cd5ba51c015f4300b76e8ff30a07396 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1672=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 new file mode 100644 index 0000000000000000000000000000000000000000..ab8422c1b0a4d29cc3edb66870a610b5c45bc733 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1742=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 new file mode 100644 index 0000000000000000000000000000000000000000..88909128bd78212209c8f7369a0014e08fc747d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS2404=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 new file mode 100644 index 0000000000000000000000000000000000000000..f891d41b863a6e2b48806539f963507a8187056f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS3232=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..616fe7d3bf0ef682f7132d73ab0f826f1634d5e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS3232_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 new file mode 100644 index 0000000000000000000000000000000000000000..5045a2f143aebc16a1d64fe5339ba4eb00172a4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_EM3027=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 new file mode 100644 index 0000000000000000000000000000000000000000..32065e365a4b0b7b3cb6130f10593c3de4faddd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_FM3130=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 new file mode 100644 index 0000000000000000000000000000000000000000..bbb608e4f5621302adcddf59b890acd3487578c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_FTRTC010 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH new file mode 100644 index 0000000000000000000000000000000000000000..4b7e561b61de8a999925bd7bc56498cf043c2501 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 new file mode 100644 index 0000000000000000000000000000000000000000..e12f2be210b38768e76709de785206e4ea4b45c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ISL12022=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 new file mode 100644 index 0000000000000000000000000000000000000000..b3f7280e14fc752f4832702d6d47f7fd6442ccfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ISL1208=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 new file mode 100644 index 0000000000000000000000000000000000000000..0654b656ca287d8755a73b52a80e02a590d34e07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T80=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT new file mode 100644 index 0000000000000000000000000000000000000000..7b134a9a8741f640de80bddf7f560474b9a90ab5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T80_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 new file mode 100644 index 0000000000000000000000000000000000000000..6bbe79a050e88e270f5ad3d013f57d600da78181 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M48T35=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 new file mode 100644 index 0000000000000000000000000000000000000000..62a856ff3a64a2834cd37011ee0b15f8cbbea236 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M48T59=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 new file mode 100644 index 0000000000000000000000000000000000000000..d7a81799326b5631990607d944c94bf778439805 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M48T86 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 new file mode 100644 index 0000000000000000000000000000000000000000..bed716d84938dc4f94ef7b59c760358a3ca8e0a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MAX6900=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 new file mode 100644 index 0000000000000000000000000000000000000000..23c78cae6849b364e03b8f677111429954435226 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MAX6916 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 new file mode 100644 index 0000000000000000000000000000000000000000..3f567e9991a2a0a0164fda0ba67995a05341f668 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MSM6242=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 new file mode 100644 index 0000000000000000000000000000000000000000..a4f00644851c8d64f54ef73b86532ef206dd3733 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8523=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 new file mode 100644 index 0000000000000000000000000000000000000000..115d491a470fe645a65394c61fd9139484e8ab46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF85363 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 new file mode 100644 index 0000000000000000000000000000000000000000..f3654f9d7c19b92c2f659d811e708a730b8262f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8563=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 new file mode 100644 index 0000000000000000000000000000000000000000..06e61bf3b136ea519d47d2695c44cbc4a5b48a20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8583=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 new file mode 100644 index 0000000000000000000000000000000000000000..e13e06b65575129befc483ae67fef95debcba675 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RP5C01=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 new file mode 100644 index 0000000000000000000000000000000000000000..52e9cb50f79434847c4c92540b35e1a0cd2c0123 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RS5C372=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 new file mode 100644 index 0000000000000000000000000000000000000000..909476a51041f1898b59640013f2cbc56265a4a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3028 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 new file mode 100644 index 0000000000000000000000000000000000000000..4c0b36c3326566480e1261fbc84a2b08d768a15a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RV3029C2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..5b6c908f74b54ee5d35d15826e8c7892ebd768d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3029_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 new file mode 100644 index 0000000000000000000000000000000000000000..e34dbb84b1f61ba5c2093eabb1b991c46889af3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3032 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 new file mode 100644 index 0000000000000000000000000000000000000000..413de6727d1d58aac8b59b7a027d6af2b3149624 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV8803 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 new file mode 100644 index 0000000000000000000000000000000000000000..a7fc05a8e0cbd923dc7965528f4e402bc44bc2c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX6110 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 new file mode 100644 index 0000000000000000000000000000000000000000..62d78291898c43fc13fb9ecde7cb0bd9389e8852 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8025=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 new file mode 100644 index 0000000000000000000000000000000000000000..051a55f2a27e985701de6d3eed12a99bede39d49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8581=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A new file mode 100644 index 0000000000000000000000000000000000000000..5c810d5654f617c61ef93a4c88424bc643128b43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_S35390A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 new file mode 100644 index 0000000000000000000000000000000000000000..0d3e4c43e4299c663482eb1d5a2fd5759f9406f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_SD3078 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 new file mode 100644 index 0000000000000000000000000000000000000000..ace13585227c1d09ffe794f2cada4b7e67369484 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_STK17TA8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST new file mode 100644 index 0000000000000000000000000000000000000000..70982b01205b31d347855961ca802479baa46733 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 new file mode 100644 index 0000000000000000000000000000000000000000..444d4d32c80edf088a51e783339b477f1e700a5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_X1205=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI new file mode 100644 index 0000000000000000000000000000000000000000..b16b2084499d25458780c49713919716a6c0159b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI @@ -0,0 +1 @@ +CONFIG_RTC_I2C_AND_SPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL new file mode 100644 index 0000000000000000000000000000000000000000..80e6b2a9b8e9625071bfdaef62c981ad53577ca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL @@ -0,0 +1 @@ +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB new file mode 100644 index 0000000000000000000000000000000000000000..cfdd517579cfec5f173db1cdbe06b4d3fb7d2032 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB @@ -0,0 +1 @@ +CONFIG_RTC_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES new file mode 100644 index 0000000000000000000000000000000000000000..4402573d0a55c38cdaf61bd3e9a104b70ea440e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES @@ -0,0 +1 @@ +CONFIG_RT_MUTEXES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RV b/anolis/configs/L2-OPTIONAL/default/CONFIG_RV new file mode 100644 index 0000000000000000000000000000000000000000..5514d15cb949a8f834fcee45abcfd6909fc208a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RV @@ -0,0 +1 @@ +# CONFIG_RV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER new file mode 100644 index 0000000000000000000000000000000000000000..6585a2376fa844a25c81b792cdb317b39d0965e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_RWSEM_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES b/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES new file mode 100644 index 0000000000000000000000000000000000000000..6e026deba11288f5bcbe064421bf97a0a93f880b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES @@ -0,0 +1 @@ +# CONFIG_SAMPLES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI new file mode 100644 index 0000000000000000000000000000000000000000..7ee7391a2fbb1743a5946c702de95fb4ec801b60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI @@ -0,0 +1 @@ +# CONFIG_SATA_ACARD_AHCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC new file mode 100644 index 0000000000000000000000000000000000000000..57c2836483dad59cefb541ade7f47fd29be60127 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC @@ -0,0 +1 @@ +# CONFIG_SATA_DWC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST new file mode 100644 index 0000000000000000000000000000000000000000..4216567c05ddacd7881fdb52cee5a40d586f2aa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST @@ -0,0 +1 @@ +CONFIG_SATA_HOST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X new file mode 100644 index 0000000000000000000000000000000000000000..6abf495a0521a8d6ef69d44f4518e421e652061b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X @@ -0,0 +1 @@ +# CONFIG_SATA_INIC162X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV new file mode 100644 index 0000000000000000000000000000000000000000..cb1877ac78c122cdcbd047a7f179ad268eccf3cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV @@ -0,0 +1 @@ +CONFIG_SATA_MV=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV new file mode 100644 index 0000000000000000000000000000000000000000..47109d86c90b93c74399c24abe90fcf35686d7c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV @@ -0,0 +1 @@ +CONFIG_SATA_NV=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE new file mode 100644 index 0000000000000000000000000000000000000000..0376859cb9adb36a42b0f7e68d02cce4f2d0f44c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE @@ -0,0 +1 @@ +CONFIG_SATA_PROMISE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR new file mode 100644 index 0000000000000000000000000000000000000000..e49e1046976ffd71bb3bbdaa8d73a163fff2ebf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR @@ -0,0 +1 @@ +CONFIG_SATA_QSTOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL new file mode 100644 index 0000000000000000000000000000000000000000..831bdc200600ff208bc8a0d99ad2e395d06b3e0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL @@ -0,0 +1 @@ +CONFIG_SATA_SIL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 new file mode 100644 index 0000000000000000000000000000000000000000..999a46eb59c55659efe0095921d20342ec62f98e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 @@ -0,0 +1 @@ +CONFIG_SATA_SIL24=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS new file mode 100644 index 0000000000000000000000000000000000000000..fa6474deeae5871cf595817807d07c9bc6b938b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS @@ -0,0 +1 @@ +CONFIG_SATA_SIS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW new file mode 100644 index 0000000000000000000000000000000000000000..9eac60e9c2a25ab7f4a21c55bd8d5b2f3c4ad92b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW @@ -0,0 +1 @@ +CONFIG_SATA_SVW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 new file mode 100644 index 0000000000000000000000000000000000000000..2423653ce6c6b9d4b6fe82d6cda90dc49ef28b2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 @@ -0,0 +1 @@ +CONFIG_SATA_SX4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI new file mode 100644 index 0000000000000000000000000000000000000000..51607fa855951f97dcc983f8fb0c546893f811f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI @@ -0,0 +1 @@ +CONFIG_SATA_ULI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA new file mode 100644 index 0000000000000000000000000000000000000000..f7822f142fc4cb7001856531cfd775496b757f60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA @@ -0,0 +1 @@ +CONFIG_SATA_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE new file mode 100644 index 0000000000000000000000000000000000000000..ca2d317e5e44fbc1b2dda13cb62ed56e644366cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE @@ -0,0 +1 @@ +CONFIG_SATA_VITESSE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD new file mode 100644 index 0000000000000000000000000000000000000000..61d1a70b3a7a507dfffbeba99df82d3d150fe12a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD @@ -0,0 +1 @@ +# CONFIG_SATA_ZPODD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP new file mode 100644 index 0000000000000000000000000000000000000000..da987bc7011caa92f40a9958a20027ee3da31179 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP @@ -0,0 +1 @@ +CONFIG_SBITMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST new file mode 100644 index 0000000000000000000000000000000000000000..53ebe3b18974ec8b54df812909ab8521d9779e10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_SCF_TORTURE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK new file mode 100644 index 0000000000000000000000000000000000000000..3c856f61af10585743df0fec2ddbe07860bad328 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK @@ -0,0 +1 @@ +CONFIG_SCHED_HRTICK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID new file mode 100644 index 0000000000000000000000000000000000000000..1ea865fce3807d47a529cb912bf2f9c8dcbc6034 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID @@ -0,0 +1 @@ +CONFIG_SCHED_MM_CID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK new file mode 100644 index 0000000000000000000000000000000000000000..976ab2b507917a8b074e71eb06edcb6b6bc1b158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK @@ -0,0 +1 @@ +# CONFIG_SCHED_STACK_END_CHECK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO new file mode 100644 index 0000000000000000000000000000000000000000..e5a12d9c60dbf35555f6d07b22dd67e91b1d6e82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO @@ -0,0 +1 @@ +CONFIG_SCREEN_INFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX new file mode 100644 index 0000000000000000000000000000000000000000..00de6f158f638ecde2425ef70ae071ad0b1a1a30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX @@ -0,0 +1 @@ +# CONFIG_SCSI_3W_9XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS new file mode 100644 index 0000000000000000000000000000000000000000..06a89f9183a99f3e1e6d2427bb4cac6d9ffde589 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS @@ -0,0 +1 @@ +# CONFIG_SCSI_3W_SAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD new file mode 100644 index 0000000000000000000000000000000000000000..d4002ddd0634e7cf32ff36b4875c19a851bec256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD @@ -0,0 +1 @@ +# CONFIG_SCSI_ACARD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS new file mode 100644 index 0000000000000000000000000000000000000000..9727b0b930e6d3e5ffebd33e2e95760bd6e168a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS @@ -0,0 +1 @@ +# CONFIG_SCSI_ADVANSYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX new file mode 100644 index 0000000000000000000000000000000000000000..801fade388997138275ec4dcb293b1b0a5673dc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC79XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX new file mode 100644 index 0000000000000000000000000000000000000000..f305031d08f607be92268e987a66823112392ae3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC7XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX new file mode 100644 index 0000000000000000000000000000000000000000..ec2405137293b14846a59c5b9f2ffa0ceaa049ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC94XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 new file mode 100644 index 0000000000000000000000000000000000000000..ce64864a7087b960b11c40b39e4e392890700b83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 @@ -0,0 +1 @@ +# CONFIG_SCSI_AM53C974 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR new file mode 100644 index 0000000000000000000000000000000000000000..425f89f74fbe35092bfb25e6640d0bf28e25c6e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR @@ -0,0 +1 @@ +# CONFIG_SCSI_ARCMSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC new file mode 100644 index 0000000000000000000000000000000000000000..8568e1ec84c22720d9abb430d5b1ad0542d51b10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC @@ -0,0 +1 @@ +# CONFIG_SCSI_BFA_FC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC new file mode 100644 index 0000000000000000000000000000000000000000..c1a96e3d8505ea92d686b4eb9051f1d3cb717798 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC @@ -0,0 +1 @@ +# CONFIG_SCSI_BUSLOGIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE new file mode 100644 index 0000000000000000000000000000000000000000..8acf63ea341f43d73a67d3a37d8bf9ce456a5550 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE @@ -0,0 +1 @@ +CONFIG_SCSI_CHELSIO_FCOE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..c26141e329f8d1c87e32c5c8b7d276c85a2bee53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON @@ -0,0 +1 @@ +CONFIG_SCSI_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..d1e803258f4aa0abb8ce90ca01b34320ded12b1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI @@ -0,0 +1 @@ +# CONFIG_SCSI_CXGB3_ISCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..3cbb0b57d96a1f36ab0a9fa65e532acf5ccf5281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_CXGB4_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x new file mode 100644 index 0000000000000000000000000000000000000000..04438189f3c0667ec0439ff7ec150fb316c6d74e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x @@ -0,0 +1 @@ +# CONFIG_SCSI_DC395x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA new file mode 100644 index 0000000000000000000000000000000000000000..85e9ad83c76ec24d44ca028c2dc257915165acbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA @@ -0,0 +1 @@ +CONFIG_SCSI_DH_ALUA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC new file mode 100644 index 0000000000000000000000000000000000000000..23170c8921443085086edb152b5cba99af37f9a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC @@ -0,0 +1 @@ +CONFIG_SCSI_DH_EMC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW new file mode 100644 index 0000000000000000000000000000000000000000..a5feace2a94dd99905e6836a7fcc8181571de8a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW @@ -0,0 +1 @@ +CONFIG_SCSI_DH_HP_SW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC new file mode 100644 index 0000000000000000000000000000000000000000..ef1739ea01039095bc0a25f6dea47f1a4e5872d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC @@ -0,0 +1 @@ +CONFIG_SCSI_DH_RDAC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D new file mode 100644 index 0000000000000000000000000000000000000000..291b9196e10c968261ee89f12998e0239020277f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D @@ -0,0 +1 @@ +# CONFIG_SCSI_DMX3191D is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT new file mode 100644 index 0000000000000000000000000000000000000000..e78712e8f025c6f4d53252a19f405fff3d6ba415 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT @@ -0,0 +1 @@ +# CONFIG_SCSI_EFCT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R new file mode 100644 index 0000000000000000000000000000000000000000..190cb39db74e9b146216d5f828a4968dc3b20d44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R @@ -0,0 +1 @@ +# CONFIG_SCSI_ESAS2R is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI new file mode 100644 index 0000000000000000000000000000000000000000..706b0549dbf28471642ddb9b26e0b8f1d4c4927e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI @@ -0,0 +1 @@ +# CONFIG_SCSI_FDOMAIN_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA new file mode 100644 index 0000000000000000000000000000000000000000..11cf299e0857979fd26d8f9604c7ff756c617950 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA @@ -0,0 +1 @@ +CONFIG_SCSI_HPSA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP new file mode 100644 index 0000000000000000000000000000000000000000..e92d4a91f8c40939547e4baee33d798b85b34e35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP @@ -0,0 +1 @@ +# CONFIG_SCSI_HPTIOP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 new file mode 100644 index 0000000000000000000000000000000000000000..50f5dd42b71d434344d1561052f09c4f7111f1ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 @@ -0,0 +1 @@ +# CONFIG_SCSI_INIA100 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO new file mode 100644 index 0000000000000000000000000000000000000000..7abe6c2d9278b67f5f35fdbb406a0085355d5409 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO @@ -0,0 +1 @@ +# CONFIG_SCSI_INITIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS new file mode 100644 index 0000000000000000000000000000000000000000..32e4abe67df7194ffb197322cb3babd012cbedc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS @@ -0,0 +1 @@ +# CONFIG_SCSI_IPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC new file mode 100644 index 0000000000000000000000000000000000000000..c7cbc90fe66eacf23d59891a720d9b244c4817c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC @@ -0,0 +1 @@ +CONFIG_SCSI_LPFC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS new file mode 100644 index 0000000000000000000000000000000000000000..52fa94e2793e205ba9fd88851d649fcd0f9983c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS @@ -0,0 +1 @@ +# CONFIG_SCSI_LPFC_DEBUG_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD new file mode 100644 index 0000000000000000000000000000000000000000..7049073c114ce7442daa6aaa82a37fbd860510af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD @@ -0,0 +1 @@ +CONFIG_SCSI_MOD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS new file mode 100644 index 0000000000000000000000000000000000000000..1576ebf4984baffb8d8678feb4c88f745e1cc206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS @@ -0,0 +1 @@ +CONFIG_SCSI_MVSAS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..aa295ebbb545f15dea010d3e28bc1aeec3a9374f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG @@ -0,0 +1 @@ +CONFIG_SCSI_MVSAS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET new file mode 100644 index 0000000000000000000000000000000000000000..028f7d8e3d2556789f083574c2f8275e9240d770 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET @@ -0,0 +1 @@ +# CONFIG_SCSI_MVSAS_TASKLET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI new file mode 100644 index 0000000000000000000000000000000000000000..a9b158a8553eb316e7533848737adbb6df838b7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI @@ -0,0 +1 @@ +# CONFIG_SCSI_MVUMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB new file mode 100644 index 0000000000000000000000000000000000000000..efe4d49c4568520713a8935985cd3d9736e39dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB @@ -0,0 +1 @@ +# CONFIG_SCSI_MYRB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS new file mode 100644 index 0000000000000000000000000000000000000000..b7fc26a5995faac6540c8af023b1a3bf6f1fba10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS @@ -0,0 +1 @@ +# CONFIG_SCSI_MYRS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK new file mode 100644 index 0000000000000000000000000000000000000000..7e3736192530bf41cda43dc4c6cd8683d6ba688e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK @@ -0,0 +1 @@ +CONFIG_SCSI_NETLINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 new file mode 100644 index 0000000000000000000000000000000000000000..ddafafd46e6e80217dcdfa6f018b41f3e36f7405 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 @@ -0,0 +1 @@ +# CONFIG_SCSI_PM8001 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID new file mode 100644 index 0000000000000000000000000000000000000000..91c798c43fee7f687180d997bb32d5e779f5e34b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID @@ -0,0 +1 @@ +# CONFIG_SCSI_PMCRAID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC new file mode 100644 index 0000000000000000000000000000000000000000..68581bbb0e64314d0bb38de2a392f9cbb456d370 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC @@ -0,0 +1 @@ +CONFIG_SCSI_QLA_FC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..4c654831b7578b7dddf76ac5ed303a63c7c80b57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_QLA_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 new file mode 100644 index 0000000000000000000000000000000000000000..98bf0179a1dda683fe191bc3a45ab972663dff88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 @@ -0,0 +1 @@ +# CONFIG_SCSI_QLOGIC_1280 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC new file mode 100644 index 0000000000000000000000000000000000000000..5fa55adc91b2f2a0add60fc5481f3d2b9319eaa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC @@ -0,0 +1 @@ +# CONFIG_SCSI_SNIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX new file mode 100644 index 0000000000000000000000000000000000000000..51be1d1ebd47622d4bf9876dc92914408c5e7025 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX @@ -0,0 +1 @@ +# CONFIG_SCSI_STEX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 new file mode 100644 index 0000000000000000000000000000000000000000..1cb5921057ed693ecb9f3dbb426e19c943fbd2d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 @@ -0,0 +1 @@ +# CONFIG_SCSI_SYM53C8XX_2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X new file mode 100644 index 0000000000000000000000000000000000000000..96273e30305f0714e6d5c8915b8a228553856b83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X @@ -0,0 +1 @@ +# CONFIG_SCSI_WD719X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 new file mode 100644 index 0000000000000000000000000000000000000000..fabd74383373088d71374181908a08901a120770 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 @@ -0,0 +1 @@ +CONFIG_SCTP_COOKIE_HMAC_MD5=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..7b2efb49bca1ac30f42a7364612462fa22b7360f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 @@ -0,0 +1 @@ +CONFIG_SCTP_COOKIE_HMAC_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT new file mode 100644 index 0000000000000000000000000000000000000000..c9b5228397ce5670fe3b94dbee98a90fbc9e6830 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT @@ -0,0 +1 @@ +# CONFIG_SCTP_DBG_OBJCNT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 new file mode 100644 index 0000000000000000000000000000000000000000..39efc2075d1a6842c1e935fae1760b41f1935698 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 @@ -0,0 +1 @@ +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE new file mode 100644 index 0000000000000000000000000000000000000000..e5bb9ecfe3186a98956d75e5024e96a387059689 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE @@ -0,0 +1 @@ +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 new file mode 100644 index 0000000000000000000000000000000000000000..ecf09aef8ab9d7a887324555f1a420ce7d512003 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 @@ -0,0 +1 @@ +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART new file mode 100644 index 0000000000000000000000000000000000000000..11f20bf33fa4278337398d5e50d0fa09112834e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART @@ -0,0 +1 @@ +CONFIG_SDIO_UART=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..af600e9beaec9f3e1e9dbc6c943b24c518409eb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_SECCOMP_CACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR new file mode 100644 index 0000000000000000000000000000000000000000..4e207e1acf0a6974ff469882cc2fd23f06949c6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR @@ -0,0 +1 @@ +# CONFIG_SECURITY_APPARMOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK new file mode 100644 index 0000000000000000000000000000000000000000..6cd907f1bc3c086bf5c71636da08c51af5ebc85a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK @@ -0,0 +1 @@ +# CONFIG_SECURITY_LANDLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN new file mode 100644 index 0000000000000000000000000000000000000000..3276f3e87fdf78477cc2b05bf6d9b266ad0facea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN @@ -0,0 +1 @@ +# CONFIG_SECURITY_LOADPIN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM new file mode 100644 index 0000000000000000000000000000000000000000..cacea5434060db5da748947255db179057946cd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM @@ -0,0 +1 @@ +# CONFIG_SECURITY_LOCKDOWN_LSM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID new file mode 100644 index 0000000000000000000000000000000000000000..ad72247ef42f7587cbe2bd43d0c0bc90995e0a2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID @@ -0,0 +1 @@ +# CONFIG_SECURITY_SAFESETID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..1affa2db54dccdc3fef5dfb2804e2254b4360534 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG @@ -0,0 +1 @@ +# CONFIG_SECURITY_SELINUX_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO new file mode 100644 index 0000000000000000000000000000000000000000..e0785195f16987dd9d3bf112e07370268c754d1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO @@ -0,0 +1 @@ +# CONFIG_SECURITY_TOMOYO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA new file mode 100644 index 0000000000000000000000000000000000000000..3b55731fa516410436813af7ab5288afbe89b678 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA @@ -0,0 +1 @@ +CONFIG_SECURITY_YAMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 new file mode 100644 index 0000000000000000000000000000000000000000..96a3ee0837fd072e1c1b0a3168bb7b79c8570c67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ACBEL_FSG032 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 new file mode 100644 index 0000000000000000000000000000000000000000..73f1d35f4955f35ebc1dfb765f0c5c85980645d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1177 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 new file mode 100644 index 0000000000000000000000000000000000000000..fac63d365677707f4c74b36fa24c7b1c6771092c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1266 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 new file mode 100644 index 0000000000000000000000000000000000000000..61225fbce3f4a96b745ff6867d0f204bb21f0883 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7310 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 new file mode 100644 index 0000000000000000000000000000000000000000..b955f391ead127cf714f50cc4009ad61f7c8aab4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AHT10 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT new file mode 100644 index 0000000000000000000000000000000000000000..2040fdd8c6476d99dd1bbb8009b6b9586169cd55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT @@ -0,0 +1 @@ +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 new file mode 100644 index 0000000000000000000000000000000000000000..f76a8def3d94844bbb1aa6b786776a9baa0e2a39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AS370 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL new file mode 100644 index 0000000000000000000000000000000000000000..8f02a9e62750f7746719336e6e0a0120a22c3881 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL @@ -0,0 +1 @@ +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE new file mode 100644 index 0000000000000000000000000000000000000000..20f8ac71d4c6e5a00d8805d2466a74cd8fbf191b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE @@ -0,0 +1 @@ +# CONFIG_SENSORS_BEL_PFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 new file mode 100644 index 0000000000000000000000000000000000000000..633b6240457857b6dfec413397b00fa7b7175c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 @@ -0,0 +1 @@ +# CONFIG_SENSORS_BPA_RS600 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO new file mode 100644 index 0000000000000000000000000000000000000000..ffe4c7ee219bcb214849adf35a269f6aaf986f7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO @@ -0,0 +1 @@ +# CONFIG_SENSORS_CORSAIR_CPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU new file mode 100644 index 0000000000000000000000000000000000000000..4bc9dc6f6bfd2955056b168505d38a6f11d6350d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU @@ -0,0 +1 @@ +# CONFIG_SENSORS_CORSAIR_PSU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN new file mode 100644 index 0000000000000000000000000000000000000000..cd332db679357cd0ad24cdb90dd2d039ec6eb5db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB new file mode 100644 index 0000000000000000000000000000000000000000..4bae136656fad81880fdbdb7a7cb04e32da9ba86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB @@ -0,0 +1 @@ +# CONFIG_SENSORS_DPS920AB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP new file mode 100644 index 0000000000000000000000000000000000000000..34bc561d5c6c56293468fe115a8dce93a79e2b34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP @@ -0,0 +1 @@ +# CONFIG_SENSORS_DRIVETEMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 new file mode 100644 index 0000000000000000000000000000000000000000..17ff741c58d6981ce49575d769d75945a950e8f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC2103 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 new file mode 100644 index 0000000000000000000000000000000000000000..8e1d208a7e4261f8c288c18555509571c5486302 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC2305 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y new file mode 100644 index 0000000000000000000000000000000000000000..3b73f0d67240476f31b03e40b80253e362f0af6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y @@ -0,0 +1 @@ +# CONFIG_SENSORS_FSP_3Y is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES new file mode 100644 index 0000000000000000000000000000000000000000..b3d0d4c87a2df6ae787c7beeaa3758d67e8a9ae6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES @@ -0,0 +1 @@ +# CONFIG_SENSORS_FTSTEUTATES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 new file mode 100644 index 0000000000000000000000000000000000000000..ee050b4acca37134988b469d9bacc3f2262ec3c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 @@ -0,0 +1 @@ +# CONFIG_SENSORS_HIH6130 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 new file mode 100644 index 0000000000000000000000000000000000000000..035b60da4cd35051515cb625af949e1698c0b33f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 @@ -0,0 +1 @@ +# CONFIG_SENSORS_HS3001 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS new file mode 100644 index 0000000000000000000000000000000000000000..a217d973e740ffc7c4bf12a4164dd02ae7ea34bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBM_CFFPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 new file mode 100644 index 0000000000000000000000000000000000000000..19a35ab442f271d134a4c0027b04ab0ea9806c69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA238 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 new file mode 100644 index 0000000000000000000000000000000000000000..21b6a947e3faddeccd491bbf4f9417b377be11df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA3221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS new file mode 100644 index 0000000000000000000000000000000000000000..ac589f45a46c531eaa9306d62aea74273c7ca3b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS @@ -0,0 +1 @@ +# CONFIG_SENSORS_INSPUR_IPSPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 new file mode 100644 index 0000000000000000000000000000000000000000..788fcfa05cdc286cbc02ecc883fa6cff4461460e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR35221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 new file mode 100644 index 0000000000000000000000000000000000000000..09c870fa43bd89123b67f9e4c53e30887bd0c0a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR36021 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 new file mode 100644 index 0000000000000000000000000000000000000000..3ccb6f2bbfbf78b2a84cdee31c867ebfbddfe166 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR38064 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 new file mode 100644 index 0000000000000000000000000000000000000000..c1624388012108c638730b735fb2ef485ee845d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IRPS5401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 new file mode 100644 index 0000000000000000000000000000000000000000..b7586e0dc135df4eb870f5040ae0c24d05168ace --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL68137 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S new file mode 100644 index 0000000000000000000000000000000000000000..8bb204de2d7e76c1c14a84cc13162b6fe9987323 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S @@ -0,0 +1 @@ +# CONFIG_SENSORS_LT7182S is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C new file mode 100644 index 0000000000000000000000000000000000000000..c1a98db590e4b98e585c223151778229f4980bfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2947_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI new file mode 100644 index 0000000000000000000000000000000000000000..9bfd69b12089162d9111683ff46358f2d3962419 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2947_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 new file mode 100644 index 0000000000000000000000000000000000000000..8e84521a9c730490b0cc2f545f57e921ffa6d0c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2990 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 new file mode 100644 index 0000000000000000000000000000000000000000..ea1d267ab8e1ce174eca3a543b1414715860578f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2992 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 new file mode 100644 index 0000000000000000000000000000000000000000..2886282bde7d97a194027735424a6be6f4e812f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX127 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 new file mode 100644 index 0000000000000000000000000000000000000000..b76611b07e9a3dd8342d01c7fc88704eef04942a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX15301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 new file mode 100644 index 0000000000000000000000000000000000000000..4fa6afb577fa8ab891096a17b6c0ea9d1cf4d127 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16601 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 new file mode 100644 index 0000000000000000000000000000000000000000..38f8a8dd9dbe0292a526d0acbd78361ada121a33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX20730 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 new file mode 100644 index 0000000000000000000000000000000000000000..362fa9a5ad26433856a573ad5629f3a9f4aaec9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31722 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 new file mode 100644 index 0000000000000000000000000000000000000000..463fd89b12dd67a3797e603767e74361d744606c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31730 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 new file mode 100644 index 0000000000000000000000000000000000000000..f374857bd4f0075fab095e127981429ec2c79c65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31760 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 new file mode 100644 index 0000000000000000000000000000000000000000..071991fe56f325d76a9d522c37bad458c7fb5fd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31785 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 new file mode 100644 index 0000000000000000000000000000000000000000..9196a2cb2e706346ee1edae62d9de0aa4b8c06c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6620 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 new file mode 100644 index 0000000000000000000000000000000000000000..cc3cdefc6e316880cdd3d2052cdaa71ffcef5dc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6621 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 new file mode 100644 index 0000000000000000000000000000000000000000..0f1d0e6061e846fff5302a176485ee4f40b1afa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MC34VR500 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 new file mode 100644 index 0000000000000000000000000000000000000000..b16ee9089de6abf2ef9917e053a99a55c9e991d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP2888 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 new file mode 100644 index 0000000000000000000000000000000000000000..018a1d2ea21784a29dd75513828208756c1ea3e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP2975 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 new file mode 100644 index 0000000000000000000000000000000000000000..0cbd87152f9f3e6f0bd0886583092fca9e0ebf21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP5023 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 new file mode 100644 index 0000000000000000000000000000000000000000..4c84a460a9009b02bd7aecd9e32b704d7ffb1777 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MPQ7932 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 new file mode 100644 index 0000000000000000000000000000000000000000..6edfde475b0c4c5e390fdf53a9dad668caaddaa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MR75203 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C new file mode 100644 index 0000000000000000000000000000000000000000..663f31a268a53c65feb68673a0b88523cb5d38ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6775_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX new file mode 100644 index 0000000000000000000000000000000000000000..5f3e62e2486df3fb4cd9692ca4a2804b3097aede --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX @@ -0,0 +1 @@ +# CONFIG_SENSORS_NPCM7XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 new file mode 100644 index 0000000000000000000000000000000000000000..fac10c23da1459c0515004b250da1646d70171e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 new file mode 100644 index 0000000000000000000000000000000000000000..f942ec4e0d3dcb4a817d2faab7b60600296c23cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NZXT_SMART2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C new file mode 100644 index 0000000000000000000000000000000000000000..2b2e556867f325a096d7cd8a74af0948cc1af79e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_OCC_P8_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 new file mode 100644 index 0000000000000000000000000000000000000000..71f7cda75cc47eb04207c7598676c62f26d81240 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PIM4328 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC new file mode 100644 index 0000000000000000000000000000000000000000..672c2ae853aeefceaac5811b0777bc16a561f2ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC @@ -0,0 +1 @@ +# CONFIG_SENSORS_PLI1209BC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR new file mode 100644 index 0000000000000000000000000000000000000000..0af0b3e7870e2f81bc94e4cfec7263cc60ea43ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR @@ -0,0 +1 @@ +# CONFIG_SENSORS_PM6764TR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 new file mode 100644 index 0000000000000000000000000000000000000000..7d12a457c9502fa87b54cb571fec5a2192b34ef3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PXE1610 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 new file mode 100644 index 0000000000000000000000000000000000000000..0033b87cbac9be7585a27fcfa4c3f9b4d78d4f05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_Q54SJ108A2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI new file mode 100644 index 0000000000000000000000000000000000000000..ee2be310e7f5474cc241721be38c0e2b7f30e22b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_SBRMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI new file mode 100644 index 0000000000000000000000000000000000000000..434ec0ad9b6ed1e0a2bf98b9eec321ddf6b18960 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI @@ -0,0 +1 @@ +# CONFIG_SENSORS_SBTSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x new file mode 100644 index 0000000000000000000000000000000000000000..487a3c998bdaa4a9f40eb6b242f89b709baa62db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT3x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x new file mode 100644 index 0000000000000000000000000000000000000000..6ba610dcfce56e66ab9386530357ea796fceeead --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT4x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 new file mode 100644 index 0000000000000000000000000000000000000000..d2c14b35d5d48e2bd0aa0ce4f23ff52bf200f535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 @@ -0,0 +1 @@ +# CONFIG_SENSORS_STPDDC60 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 new file mode 100644 index 0000000000000000000000000000000000000000..f2f8006af2817c5abd3f846e5b4dce361a32a70a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 @@ -0,0 +1 @@ +# CONFIG_SENSORS_STTS751 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 new file mode 100644 index 0000000000000000000000000000000000000000..cd9dfa1518e07af426cf33c07d5e89f72e2a0bdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TC654 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 new file mode 100644 index 0000000000000000000000000000000000000000..423510d94081dc7d7ef7cc341c8190b489d2e6e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TDA38640 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 new file mode 100644 index 0000000000000000000000000000000000000000..5fc9166021b4eef6d8d2a8542a662e804e8ea0db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP108 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 new file mode 100644 index 0000000000000000000000000000000000000000..ff2a270bcc67d8bc743b160d4107165265552515 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP464 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 new file mode 100644 index 0000000000000000000000000000000000000000..906f493fca45a2b3b99a351bb3ee1f7284b1acda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP513 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 new file mode 100644 index 0000000000000000000000000000000000000000..70f166c164e6eca114062b8c772003b1f7d67c35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS23861 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 new file mode 100644 index 0000000000000000000000000000000000000000..861aa87d38c57ec08ddeedcddec106f04f30e79f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS53679 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 new file mode 100644 index 0000000000000000000000000000000000000000..eac2bb021f0465ed0d96dd61700002f654e45898 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS546D24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G new file mode 100644 index 0000000000000000000000000000000000000000..665b8bfb1e603d692063c583a9326c1401597ba6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83773G is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 new file mode 100644 index 0000000000000000000000000000000000000000..4de67bfd07680eeebf8a5a7ab18e9ea9db425502 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 @@ -0,0 +1 @@ +# CONFIG_SENSORS_XDPE122 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 new file mode 100644 index 0000000000000000000000000000000000000000..b84e16fd152b4a12161c08933b8a9a879bbd8f5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 @@ -0,0 +1 @@ +# CONFIG_SENSORS_XDPE152 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 new file mode 100644 index 0000000000000000000000000000000000000000..6896f6a62d0f12df066e6442179139d3214a9a2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 @@ -0,0 +1 @@ +CONFIG_SERIAL_8250=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..49dda6ea764db1090e63903159b936b469866e8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS new file mode 100644 index 0000000000000000000000000000000000000000..bec6e28227134fe2a28e127a3a25e73dc55c60cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..3fc7f91eaad16d2d165708351c401c8e32031933 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_DETECT_IRQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA new file mode 100644 index 0000000000000000000000000000000000000000..333f0636a9db961340ead45ac061e8521f97a717 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW new file mode 100644 index 0000000000000000000000000000000000000000..43894bea0c6703ddf70a700d7c4ca1eaac5f7d5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB new file mode 100644 index 0000000000000000000000000000000000000000..3dcf20ed9b3e49e534616ae40677ba2f7cc4ef3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DWLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR new file mode 100644 index 0000000000000000000000000000000000000000..3df024acdc5b9a0f152bd6e1510ce0b7135eb8c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_EXAR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED new file mode 100644 index 0000000000000000000000000000000000000000..91881b19569ee19b6469f663e61e6c2270b40333 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_EXTENDED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK new file mode 100644 index 0000000000000000000000000000000000000000..b6cf08b714de4fbe13f00a9d0f979a195f9b28af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_FINTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS new file mode 100644 index 0000000000000000000000000000000000000000..ded898381acc4ccecb6d22c0782862d475af0472 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_MANY_PORTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS new file mode 100644 index 0000000000000000000000000000000000000000..c30ccb26bb819dfec887cb21a71cb219b468c422 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_NR_UARTS=32 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI new file mode 100644 index 0000000000000000000000000000000000000000..c0ac5637fef6d76eac7a8e4629be6b6b1f4cec0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX new file mode 100644 index 0000000000000000000000000000000000000000..134ab6721ff3825c49746a8333322c00b3e0fc44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB new file mode 100644 index 0000000000000000000000000000000000000000..25b02aa103a850da8db09cee09e9e55a454199b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PCILIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM new file mode 100644 index 0000000000000000000000000000000000000000..7bd924eace19f937d57bdc97dd860e64566c87b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PERICOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP new file mode 100644 index 0000000000000000000000000000000000000000..09e4c12bd6cc3e29d394bc97eecbffa19665d7fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PNP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA new file mode 100644 index 0000000000000000000000000000000000000000..2e1143ad48d1805dd69e0d8112b503cd2b0bca24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RSA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS new file mode 100644 index 0000000000000000000000000000000000000000..2bde688add5446f847b601da850dbd30bfad7d49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..d346f0d47003329d2466dbf3257d089ac17786c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_SHARE_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART new file mode 100644 index 0000000000000000000000000000000000000000..b21939fa7602a0e462e99b3056927fc9c96bb4cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_ALTERA_JTAGUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART new file mode 100644 index 0000000000000000000000000000000000000000..a4c4bd74281ad4228ef070b3405e981d10761e12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART @@ -0,0 +1 @@ +# CONFIG_SERIAL_ALTERA_UART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE new file mode 100644 index 0000000000000000000000000000000000000000..a9e727ef6fc1627109314d47f2c3d05788f1a373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE @@ -0,0 +1 @@ +CONFIG_SERIAL_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..7a5650c97f4834f1cab6d17b9c8abe9627c73dfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_CORE_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS new file mode 100644 index 0000000000000000000000000000000000000000..4390febac668eadae033a225a5ad77f44377cc59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS @@ -0,0 +1 @@ +# CONFIG_SERIAL_DEV_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON new file mode 100644 index 0000000000000000000000000000000000000000..387f53de3e972b6462d391278e04c5a848f5c602 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON @@ -0,0 +1 @@ +CONFIG_SERIAL_EARLYCON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART new file mode 100644 index 0000000000000000000000000000000000000000..68d716e9dea2140bea557fff0b29c4abf7da8110 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_FSL_LINFLEXUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART new file mode 100644 index 0000000000000000000000000000000000000000..345c6ea2aedb8cc5b2b2cd45e46f9c6029cf5308 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_FSL_LPUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI new file mode 100644 index 0000000000000000000000000000000000000000..617006684052a22e7ac01aa28528001e73de0d7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI @@ -0,0 +1 @@ +# CONFIG_SERIAL_KGDB_NMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 new file mode 100644 index 0000000000000000000000000000000000000000..5fb197ebc3c7a3e20cf81d0e1ad2521d022eaa38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 @@ -0,0 +1 @@ +# CONFIG_SERIAL_MAX3100 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X new file mode 100644 index 0000000000000000000000000000000000000000..10f39f7fa3bb0cfe3003a56fe9f3acdc25890df1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X @@ -0,0 +1 @@ +# CONFIG_SERIAL_MAX310X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..32c2ac585277103ac1b8205ba16fd5dd1ae6c5e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO @@ -0,0 +1 @@ +CONFIG_SERIAL_MCTRL_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD new file mode 100644 index 0000000000000000000000000000000000000000..f371b2fdcf041ed1cf3fd9be965a958dc9eb55d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD @@ -0,0 +1 @@ +CONFIG_SERIAL_NONSTANDARD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 new file mode 100644 index 0000000000000000000000000000000000000000..2645fe9c765cbac0697a85d485e4397e11be321c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 @@ -0,0 +1 @@ +# CONFIG_SERIAL_RP2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX new file mode 100644 index 0000000000000000000000000000000000000000..29deacf2a23fc214a54b3c8d203bacabefdc3c3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX @@ -0,0 +1 @@ +# CONFIG_SERIAL_SC16IS7XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP new file mode 100644 index 0000000000000000000000000000000000000000..1ef04c2082dc7da3d2c95c81df1dde0ef832b7d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP @@ -0,0 +1 @@ +# CONFIG_SERIAL_SCCNXP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD new file mode 100644 index 0000000000000000000000000000000000000000..c1831502fe7451d92ea4d8df5ceb0a782eea0e43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD @@ -0,0 +1 @@ +# CONFIG_SERIAL_SPRD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE new file mode 100644 index 0000000000000000000000000000000000000000..5c730f9cf93c48744d513082adec56628e0218ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE @@ -0,0 +1 @@ +# CONFIG_SERIAL_UARTLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 new file mode 100644 index 0000000000000000000000000000000000000000..22c1adbfbf034ac746225bab6ed6ee8c33268458 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_GPIO_PS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 new file mode 100644 index 0000000000000000000000000000000000000000..81c68994feb64bd1f2654e1f775fcfb3cd91a324 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_PCIPS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT new file mode 100644 index 0000000000000000000000000000000000000000..ecaa8cce173b7b572d1401a153e997670a205019 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT @@ -0,0 +1 @@ +# CONFIG_SERIO_PS2MULT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA new file mode 100644 index 0000000000000000000000000000000000000000..345587e7256caa4f04d537dd4b76d2e0f4152a43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA @@ -0,0 +1 @@ +# CONFIG_SFC_SIENA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP new file mode 100644 index 0000000000000000000000000000000000000000..db57db12e9df19151b5e6f3e1cab2ca5be3b85c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP @@ -0,0 +1 @@ +CONFIG_SFP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA new file mode 100644 index 0000000000000000000000000000000000000000..87ab2c1f07c21f4d27f2250f39023f978a0a0afc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA @@ -0,0 +1 @@ +# CONFIG_SF_PDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..299ad6f713683f5cf7cd6826a667b6ef21c2f5bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC @@ -0,0 +1 @@ +CONFIG_SGL_ALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL new file mode 100644 index 0000000000000000000000000000000000000000..c2cfa29ce56ae46df29fa4c7bb7ccae974712a8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL @@ -0,0 +1 @@ +CONFIG_SG_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..1f64821da2a83729569e12eb96f458dc6325e283 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG @@ -0,0 +1 @@ +# CONFIG_SHRINKER_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE new file mode 100644 index 0000000000000000000000000000000000000000..cfefd18705cf44269df678ae515e3b5d5cd438f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE @@ -0,0 +1 @@ +CONFIG_SIGNATURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX new file mode 100644 index 0000000000000000000000000000000000000000..89578e7ae70c9eabede8e286d0a01a842a814cf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX @@ -0,0 +1 @@ +# CONFIG_SIOX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS new file mode 100644 index 0000000000000000000000000000000000000000..7dd2c37be08013e886a245ccea50ea14b2a964b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS @@ -0,0 +1 @@ +CONFIG_SKB_EXTENSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED new file mode 100644 index 0000000000000000000000000000000000000000..5b0f444ee136a41026a9909dd1537ac94751b942 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_SLAB_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC new file mode 100644 index 0000000000000000000000000000000000000000..cb09fa7d2f09db81ea1747ad11d8063e521119e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC @@ -0,0 +1 @@ +CONFIG_SLHC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS new file mode 100644 index 0000000000000000000000000000000000000000..63141c29d4c537beecdc169890a02066a4a2ea74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS @@ -0,0 +1 @@ +# CONFIG_SLIMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED new file mode 100644 index 0000000000000000000000000000000000000000..a67a13f7403a9fd48d9eec8a43a94eaa6bdecb60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED @@ -0,0 +1 @@ +CONFIG_SLIP_COMPRESSED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 new file mode 100644 index 0000000000000000000000000000000000000000..8ddc7389f0dfd8b23a09a4b6fe0b5db3f6135534 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 @@ -0,0 +1 @@ +# CONFIG_SLIP_MODE_SLIP6 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART new file mode 100644 index 0000000000000000000000000000000000000000..b6e3b7cbadda79d168e63311b949723c471b5836 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART @@ -0,0 +1 @@ +CONFIG_SLIP_SMART=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF new file mode 100644 index 0000000000000000000000000000000000000000..8644453aab07b774856b68152b609b3fe202639f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF @@ -0,0 +1 @@ +# CONFIG_SMARTJOYPLUS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS new file mode 100644 index 0000000000000000000000000000000000000000..6211f79279b31fb34a47fb46e7ba60a7e15b4d78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS @@ -0,0 +1 @@ +CONFIG_SMBFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER new file mode 100644 index 0000000000000000000000000000000000000000..0cb54ecd7dca8b1531c92db50235568c41ffebd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER @@ -0,0 +1 @@ +# CONFIG_SMB_SERVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY new file mode 100644 index 0000000000000000000000000000000000000000..12a6f39b18b8d559c8c3fd0e859ffc9a10ec43b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY @@ -0,0 +1 @@ +CONFIG_SMSC_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL new file mode 100644 index 0000000000000000000000000000000000000000..da9ab33b453b8756b8230886551c1ae4168fc419 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL @@ -0,0 +1 @@ +# CONFIG_SM_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA new file mode 100644 index 0000000000000000000000000000000000000000..4c3788456e67bb8241bfb4720c3e8b5866251a23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA @@ -0,0 +1 @@ +CONFIG_SOCK_CGROUP_DATA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING new file mode 100644 index 0000000000000000000000000000000000000000..7c6f5f44fcbfc36b338e5bb6484f4a8dd8981dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING @@ -0,0 +1 @@ +CONFIG_SOCK_RX_QUEUE_MAPPING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT new file mode 100644 index 0000000000000000000000000000000000000000..5645b5adfbe0a7ca8a924f6c7f91849bb1b055a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT @@ -0,0 +1 @@ +CONFIG_SOCK_VALIDATE_XMIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI new file mode 100644 index 0000000000000000000000000000000000000000..88248080d408eb101dd53cc7a4f1a1b3a1100c75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI @@ -0,0 +1 @@ +# CONFIG_SOC_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK new file mode 100644 index 0000000000000000000000000000000000000000..18f68f67c552b8804c1f50641e00f9f0adc812ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK @@ -0,0 +1 @@ +CONFIG_SOFTIRQ_ON_OWN_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF new file mode 100644 index 0000000000000000000000000000000000000000..f3a327bba2abb0780a0eb905fde4f26f279bd260 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF @@ -0,0 +1 @@ +CONFIG_SONY_FF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND new file mode 100644 index 0000000000000000000000000000000000000000..b62f24440c36825e4b7c0759abe0a0defebebaac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND @@ -0,0 +1 @@ +CONFIG_SOUND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE new file mode 100644 index 0000000000000000000000000000000000000000..7ddeacfd995651688d4394209074ef3251831e9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE @@ -0,0 +1 @@ +# CONFIG_SOUNDWIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME new file mode 100644 index 0000000000000000000000000000000000000000..0e87090ab4426fc444b76754f16b9604b2506ba8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_EXTREME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..f7d1d81f20c583f469c0cbf407e3acd6f02bc851 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..943900f6318eca59c036bd06d21c5ccbd02f4f57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ @@ -0,0 +1 @@ +CONFIG_SPARSE_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA new file mode 100644 index 0000000000000000000000000000000000000000..2f6b419d6c748c2a292a0560dcf6e82a50d5405d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA @@ -0,0 +1 @@ +# CONFIG_SPI_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD new file mode 100644 index 0000000000000000000000000000000000000000..0dc059dce49633d03345b552828b0962fca10520 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD @@ -0,0 +1 @@ +# CONFIG_SPI_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C new file mode 100644 index 0000000000000000000000000000000000000000..c9ba73ea33c5526c2914918be0fb55203796d9ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C @@ -0,0 +1 @@ +# CONFIG_SPI_AX88796C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE new file mode 100644 index 0000000000000000000000000000000000000000..2f54b92cdc1f66ef2e36739f2788ed74e3fa5310 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE @@ -0,0 +1 @@ +# CONFIG_SPI_AXI_SPI_ENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG new file mode 100644 index 0000000000000000000000000000000000000000..07bad2e562cd4fdf925d460b986c8fdfd3848380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG @@ -0,0 +1 @@ +# CONFIG_SPI_BITBANG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..516a1b41fa98af2a280ef18938e311d0cba4e7ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC @@ -0,0 +1 @@ +CONFIG_SPI_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..ebdaac3fe2f4d63740dddbbea86569785f1c4143 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO @@ -0,0 +1 @@ +# CONFIG_SPI_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST new file mode 100644 index 0000000000000000000000000000000000000000..c3e9782b13cbd9f7794803b10f2252b0b7acab0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST @@ -0,0 +1 @@ +# CONFIG_SPI_LOOPBACK_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER new file mode 100644 index 0000000000000000000000000000000000000000..63f789d5b226c07cdb39461b355f2fc09b826018 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER @@ -0,0 +1 @@ +CONFIG_SPI_MASTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE new file mode 100644 index 0000000000000000000000000000000000000000..38b69b2031dec4dbd120ab3b23e32fb5b56a56a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE @@ -0,0 +1 @@ +# CONFIG_SPI_MICROCHIP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI new file mode 100644 index 0000000000000000000000000000000000000000..23bd027bb0f3a6ed269d2d195944108143d98596 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI @@ -0,0 +1 @@ +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX new file mode 100644 index 0000000000000000000000000000000000000000..d1caa5aa793ff3cae48ff6fa75cb47819da8577f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX @@ -0,0 +1 @@ +# CONFIG_SPI_MUX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC new file mode 100644 index 0000000000000000000000000000000000000000..2564a03db4c9b1abc0d6ed43d6bd58793035fa67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC @@ -0,0 +1 @@ +# CONFIG_SPI_MXIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY new file mode 100644 index 0000000000000000000000000000000000000000..742e9abe78b57f3cbb5bc2ee05d7a56a680e9c25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY @@ -0,0 +1 @@ +# CONFIG_SPI_OC_TINY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX new file mode 100644 index 0000000000000000000000000000000000000000..356a3951461ff15fb04ccef7c0c0c5cf10e9a2cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_SPI_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX new file mode 100644 index 0000000000000000000000000000000000000000..266e686b84595a72c4167061bb5440a8110d2164 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX @@ -0,0 +1 @@ +# CONFIG_SPI_PXA2XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 new file mode 100644 index 0000000000000000000000000000000000000000..5d8f72c2312321cd95e63e254c353977c697ea1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 @@ -0,0 +1 @@ +# CONFIG_SPI_SC18IS602 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE new file mode 100644 index 0000000000000000000000000000000000000000..f24362cbf756a466bd6fe118af62279a8e1f8180 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE @@ -0,0 +1 @@ +# CONFIG_SPI_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE new file mode 100644 index 0000000000000000000000000000000000000000..663aaaaf305bdfc480dec7ab456abde433e98d6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE @@ -0,0 +1 @@ +# CONFIG_SPI_SLAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV new file mode 100644 index 0000000000000000000000000000000000000000..c785b6d7ed019d888649f142ffad8f6e4ef1918e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV @@ -0,0 +1 @@ +# CONFIG_SPI_SPIDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 new file mode 100644 index 0000000000000000000000000000000000000000..f9c7a26af8a4f4f951690439fb8b25dd56981efd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 @@ -0,0 +1 @@ +# CONFIG_SPI_TLE62X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM new file mode 100644 index 0000000000000000000000000000000000000000..c3da8e9c3e8b9c5762b07d24ad12b4bb39e47bc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM @@ -0,0 +1 @@ +# CONFIG_SPI_XCOMM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX new file mode 100644 index 0000000000000000000000000000000000000000..47ea00e62a9f8d43c87896236cf58132da701fa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX @@ -0,0 +1 @@ +# CONFIG_SPI_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS new file mode 100644 index 0000000000000000000000000000000000000000..412fc198ef2e080fa1cafc10f9e8e902769ad2dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS @@ -0,0 +1 @@ +CONFIG_SPLIT_PTLOCK_CPUS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI new file mode 100644 index 0000000000000000000000000000000000000000..35e1ee706a11b7a248e9c3bebdeda8ab5b2b9144 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI @@ -0,0 +1 @@ +# CONFIG_SPMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT new file mode 100644 index 0000000000000000000000000000000000000000..2b95cc6b9b39b4df3eef64053cceb3c7a42a5db1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI new file mode 100644 index 0000000000000000000000000000000000000000..8a049d3d94a1f507b7f1a5fc7859ef2c9ae08619 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU new file mode 100644 index 0000000000000000000000000000000000000000..fc9de7069126d01758216f1316f6fe9f9ae2331c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU @@ -0,0 +1 @@ +CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE new file mode 100644 index 0000000000000000000000000000000000000000..6e030a19b32139d0f0127a0a405d427ce1b38f4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM new file mode 100644 index 0000000000000000000000000000000000000000..29a3ef2ac3781960ea47f19f43db2f5b09f9e2f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM @@ -0,0 +1 @@ +# CONFIG_SRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB new file mode 100644 index 0000000000000000000000000000000000000000..7b1e39107257b961508bef292fb1fdf75a23bd37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB @@ -0,0 +1 @@ +# CONFIG_SSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE new file mode 100644 index 0000000000000000000000000000000000000000..90707d7854fda8d7dfff58bb0bedb2e6a36dfbc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE @@ -0,0 +1 @@ +CONFIG_SSB_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC new file mode 100644 index 0000000000000000000000000000000000000000..29dfb19eddafc74ab01bd05da26549154ff84561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC @@ -0,0 +1 @@ +# CONFIG_SSFDC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT new file mode 100644 index 0000000000000000000000000000000000000000..49872bcb2fe61b7853ff316323db4ba74d3b18e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT @@ -0,0 +1 @@ +CONFIG_STACKDEPOT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID new file mode 100644 index 0000000000000000000000000000000000000000..832ef97a06345376d61db7239787ecd8657ad5b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID @@ -0,0 +1 @@ +# CONFIG_STACKTRACE_BUILD_ID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE new file mode 100644 index 0000000000000000000000000000000000000000..c3d25b3d6731b76ecbe23fe9210d3041e08ddc70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE @@ -0,0 +1 @@ +CONFIG_STANDALONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP b/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP new file mode 100644 index 0000000000000000000000000000000000000000..e6a1c2bcacb03f45bbbc3634d9021789427753a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP @@ -0,0 +1 @@ +CONFIG_STE10XP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STP b/anolis/configs/L2-OPTIONAL/default/CONFIG_STP new file mode 100644 index 0000000000000000000000000000000000000000..7c878c8f9c0f3e9783b113e7545381023f236413 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STP @@ -0,0 +1 @@ +CONFIG_STP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..dbff6d7faa3ab9a9bf5e9af5017b60ba0a9bfece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STRING_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL new file mode 100644 index 0000000000000000000000000000000000000000..9dc73284e4f7001a55d96f4a0e066d37c6c0ea1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL @@ -0,0 +1 @@ +CONFIG_SUNRPC_BACKCHANNEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS new file mode 100644 index 0000000000000000000000000000000000000000..4a4e93eed9c74df30d90d75085d03e22965fee43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS @@ -0,0 +1 @@ +CONFIG_SUNRPC_GSS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION new file mode 100644 index 0000000000000000000000000000000000000000..aaa37ad19c5da4fedd6736346dd329e6cf38862d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION @@ -0,0 +1 @@ +# CONFIG_SURFACE_3_POWER_OPREGION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE new file mode 100644 index 0000000000000000000000000000000000000000..160153290de55f7560284e150ca86bef38bc3da2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE @@ -0,0 +1 @@ +# CONFIG_SURFACE_GPE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..f303a3f6e90b08a46ab13310ee86de4606aec74f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG @@ -0,0 +1 @@ +# CONFIG_SURFACE_HOTPLUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS new file mode 100644 index 0000000000000000000000000000000000000000..4cc37929c7e50c1add898474dfbe69dbf7fb7c4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS @@ -0,0 +1 @@ +CONFIG_SURFACE_PLATFORMS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON new file mode 100644 index 0000000000000000000000000000000000000000..b649d5976e661e49026a455d51d5e15f9e8764e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON @@ -0,0 +1 @@ +# CONFIG_SURFACE_PRO3_BUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB new file mode 100644 index 0000000000000000000000000000000000000000..5405b65b4d688796791436f55b5e4b1b193ef9b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB @@ -0,0 +1 @@ +CONFIG_SWIOTLB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC new file mode 100644 index 0000000000000000000000000000000000000000..2e7793f6000762cbbdf5ce31af52fc2493278ba0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC @@ -0,0 +1 @@ +# CONFIG_SWIOTLB_DYNAMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY new file mode 100644 index 0000000000000000000000000000000000000000..32139c70bf019be97b8efd51eab3385be8fa8e7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY @@ -0,0 +1 @@ +CONFIG_SWPHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC new file mode 100644 index 0000000000000000000000000000000000000000..6afd35b6ba1cbb77835f9391ae310998f716f0dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC @@ -0,0 +1 @@ +# CONFIG_SW_SYNC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST new file mode 100644 index 0000000000000000000000000000000000000000..23019ea4cf6db25612efb57dc06b96f8a803cbf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST @@ -0,0 +1 @@ +# CONFIG_SYNTH_EVENT_GEN_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..a8beb0f1bc7722641641abc0b868ea0c7aa56737 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE @@ -0,0 +1 @@ +CONFIG_SYSCTL_EXCEPTION_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB new file mode 100644 index 0000000000000000000000000000000000000000..880e56bea101fc0bd8a158846626f13202520b76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB @@ -0,0 +1 @@ +CONFIG_SYSFB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE new file mode 100644 index 0000000000000000000000000000000000000000..f56884877ee3c8c0779916a7262d71bd62711fdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE @@ -0,0 +1 @@ +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..90936c779bdac7d1f695bfddb3ac9dcd2f279b64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION @@ -0,0 +1 @@ +# CONFIG_SYSV68_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..6adabb9102953f7c82d538385576dc973437cf1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT @@ -0,0 +1 @@ +CONFIG_SYSVIPC_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS new file mode 100644 index 0000000000000000000000000000000000000000..1304fb157f36e462f2c2d14991f002503e8919e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS @@ -0,0 +1 @@ +# CONFIG_SYSV_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU new file mode 100644 index 0000000000000000000000000000000000000000..fb944bc3b88ec6a874375914d9a83f7e0748404a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..0cdcfef08436414754a00708572823f6b436a88a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC @@ -0,0 +1 @@ +CONFIG_TASKS_RCU_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU new file mode 100644 index 0000000000000000000000000000000000000000..e3ac552ae7a95919192825a3e08c1c8e9feb000b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_RUDE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU new file mode 100644 index 0000000000000000000000000000000000000000..72d905396b7f29c6b7e72e1f7619cff5b59927ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_TRACE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C new file mode 100644 index 0000000000000000000000000000000000000000..7b8f0996b6d23f765e465832d11712bf4d8c9d1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 new file mode 100644 index 0000000000000000000000000000000000000000..78e78b8e701b3023bf5c3b2305292dd6b96e4bcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_CR50 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI new file mode 100644 index 0000000000000000000000000000000000000000..5d54489c9fe9704aeeb10342481090dca072e6e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY new file mode 100644 index 0000000000000000000000000000000000000000..c5a7ebd133f1aeb2001f9aa0e32fb6730cf68722 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY @@ -0,0 +1 @@ +# CONFIG_TCG_VTPM_PROXY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC new file mode 100644 index 0000000000000000000000000000000000000000..ecef5ff8b1706f72ccb84fea098763f6906741f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC @@ -0,0 +1 @@ +# CONFIG_TCM_FC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX new file mode 100644 index 0000000000000000000000000000000000000000..585dff63159db28df445e20a5f6bb10da9b1c19b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX @@ -0,0 +1 @@ +# CONFIG_TCM_QLA2XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY new file mode 100644 index 0000000000000000000000000000000000000000..3f0ce3f88c3506f23c09e1ab2dc2cf7f586cb677 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY @@ -0,0 +1 @@ +CONFIG_TERANETICS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE new file mode 100644 index 0000000000000000000000000000000000000000..2e3d64285755f0fd149349dfdbd6905c028051ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE @@ -0,0 +1 @@ +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP new file mode 100644 index 0000000000000000000000000000000000000000..444f2cff03a1ca48e6b8ac3525ad3600b722cd9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP @@ -0,0 +1 @@ +# CONFIG_TEST_BITMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS new file mode 100644 index 0000000000000000000000000000000000000000..245f24428550b14c1a016cff739dd0f67630bd36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS @@ -0,0 +1 @@ +# CONFIG_TEST_BITOPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV new file mode 100644 index 0000000000000000000000000000000000000000..6243b331d3963984bbc978dcf1e6ae2454236f86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV @@ -0,0 +1 @@ +# CONFIG_TEST_BLACKHOLE_DEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY new file mode 100644 index 0000000000000000000000000000000000000000..39a3905198c2bcee808ccb3a4200bee7d6a54213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY @@ -0,0 +1 @@ +# CONFIG_TEST_DHRY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 new file mode 100644 index 0000000000000000000000000000000000000000..fa139eb8d38799f38e3851b4790b2b59876cb370 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 @@ -0,0 +1 @@ +# CONFIG_TEST_DIV64 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..6577d7d78d118812b8b9f0b094e2d68c817a26a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG @@ -0,0 +1 @@ +# CONFIG_TEST_DYNAMIC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE new file mode 100644 index 0000000000000000000000000000000000000000..e5930288950347eb2417e1b7905ea50f1c107f02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE @@ -0,0 +1 @@ +# CONFIG_TEST_FIRMWARE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES new file mode 100644 index 0000000000000000000000000000000000000000..246fdf83a463890acccfab588440d19849fdfbcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES @@ -0,0 +1 @@ +# CONFIG_TEST_FREE_PAGES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP new file mode 100644 index 0000000000000000000000000000000000000000..7a62932d91ad882bbfe189ff9b456698426a6a37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP @@ -0,0 +1 @@ +# CONFIG_TEST_HEXDUMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HMM new file mode 100644 index 0000000000000000000000000000000000000000..5210be0d77888a1d3f72b2fa00d67b0e92eb70ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HMM @@ -0,0 +1 @@ +# CONFIG_TEST_HMM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA new file mode 100644 index 0000000000000000000000000000000000000000..4d54b4522c82fe248647cc4b621f74e5aa1501ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA @@ -0,0 +1 @@ +# CONFIG_TEST_IDA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD new file mode 100644 index 0000000000000000000000000000000000000000..59165b345a7669a6bc6643c75d07510e1a5002a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD @@ -0,0 +1 @@ +# CONFIG_TEST_KMOD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX new file mode 100644 index 0000000000000000000000000000000000000000..e85f83bc9d88499785f309570e9a0a484325c255 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX @@ -0,0 +1 @@ +CONFIG_TEST_KSTRTOX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM new file mode 100644 index 0000000000000000000000000000000000000000..b52f756d8d3982c1755eb1a0a2985c8ad2758f76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM @@ -0,0 +1 @@ +# CONFIG_TEST_LKM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP new file mode 100644 index 0000000000000000000000000000000000000000..bdd84b4fbbcd32f3970994a5da2459473f89dbff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP @@ -0,0 +1 @@ +# CONFIG_TEST_LOCKUP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE new file mode 100644 index 0000000000000000000000000000000000000000..c0d4771b529d61e571d20e2df14ab511e84fd92a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE @@ -0,0 +1 @@ +# CONFIG_TEST_MAPLE_TREE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P new file mode 100644 index 0000000000000000000000000000000000000000..41b9190fae3788039a8a775317af709e324ee2eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P @@ -0,0 +1 @@ +# CONFIG_TEST_MEMCAT_P is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT new file mode 100644 index 0000000000000000000000000000000000000000..649e9c857c4f8fed0fd3eebcf67b19c6e6350146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT @@ -0,0 +1 @@ +# CONFIG_TEST_MEMINIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP new file mode 100644 index 0000000000000000000000000000000000000000..3bafbc63608a66e299ff40fb5e15e82a64d53cf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP @@ -0,0 +1 @@ +# CONFIG_TEST_MIN_HEAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG new file mode 100644 index 0000000000000000000000000000000000000000..37ec47bee3031d0deb5d3f6470f50dd83a0a95cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG @@ -0,0 +1 @@ +# CONFIG_TEST_OBJAGG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN new file mode 100644 index 0000000000000000000000000000000000000000..2aade4293141cd1227fe8a081019d3a6912d19c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN @@ -0,0 +1 @@ +# CONFIG_TEST_PARMAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER new file mode 100644 index 0000000000000000000000000000000000000000..64e229a696940d30e8901ed75954a716da210028 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER @@ -0,0 +1 @@ +# CONFIG_TEST_POWER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF new file mode 100644 index 0000000000000000000000000000000000000000..5b3c2da9884533fd59beb84a854deedfb07b1f1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF @@ -0,0 +1 @@ +# CONFIG_TEST_PRINTF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER new file mode 100644 index 0000000000000000000000000000000000000000..e2b4aa20417d9b43c545cbe53e8d0bebafbbd946 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER @@ -0,0 +1 @@ +# CONFIG_TEST_REF_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE new file mode 100644 index 0000000000000000000000000000000000000000..5d9f7d2900f19ea55fab757f6c217bbcadbe5e4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE @@ -0,0 +1 @@ +# CONFIG_TEST_RHASHTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF new file mode 100644 index 0000000000000000000000000000000000000000..d160b6ef4dd58d19bb6c78acdf3256624cb4eb52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF @@ -0,0 +1 @@ +# CONFIG_TEST_SCANF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..1bf36ae82fb8059bd208d04a14e316bbbba03873 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS @@ -0,0 +1 @@ +# CONFIG_TEST_STATIC_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS new file mode 100644 index 0000000000000000000000000000000000000000..b125615670a5a3a0e673693bbecf0693203223a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS @@ -0,0 +1 @@ +# CONFIG_TEST_STRING_HELPERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL new file mode 100644 index 0000000000000000000000000000000000000000..f81589ea4698198b518be43874a887d30e460575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL @@ -0,0 +1 @@ +# CONFIG_TEST_SYSCTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY new file mode 100644 index 0000000000000000000000000000000000000000..95fd4596f7117214a2e311225686c2642ccbc84c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY @@ -0,0 +1 @@ +# CONFIG_TEST_UDELAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY new file mode 100644 index 0000000000000000000000000000000000000000..ba777e177fd24be9ed03c0a49cd87768ca19d5a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY @@ -0,0 +1 @@ +# CONFIG_TEST_USER_COPY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID new file mode 100644 index 0000000000000000000000000000000000000000..434dbe1004a22437f7a061cb6962137186992f7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID @@ -0,0 +1 @@ +# CONFIG_TEST_UUID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC new file mode 100644 index 0000000000000000000000000000000000000000..35c691714c5fd7ae563d9c916b12f7d0ba048b77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC @@ -0,0 +1 @@ +# CONFIG_TEST_VMALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY new file mode 100644 index 0000000000000000000000000000000000000000..327e557319aa7ff8b263704003f73d645692dbcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY @@ -0,0 +1 @@ +# CONFIG_TEST_XARRAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH new file mode 100644 index 0000000000000000000000000000000000000000..035fa28850a0aa662f1d7cc1aac4d6ef37039210 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM new file mode 100644 index 0000000000000000000000000000000000000000..6be770b24b8705e2cc86e2e1f3346a5d954e1bc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_BM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM new file mode 100644 index 0000000000000000000000000000000000000000..b08348a22d9dbcc0b0921f6594835bfe7bf5f649 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_FSM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP new file mode 100644 index 0000000000000000000000000000000000000000..2961a652465e0c06b5159f166b6c6d31867bfa20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_KMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION new file mode 100644 index 0000000000000000000000000000000000000000..230517ec2f76d55bf6a9d23a198e9c2e41d7a3e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION @@ -0,0 +1 @@ +# CONFIG_THERMAL_EMULATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS new file mode 100644 index 0000000000000000000000000000000000000000..d375534fe2534d235baac52c96c3aa27b71b3443 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS @@ -0,0 +1 @@ +# CONFIG_THERMAL_STATISTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP new file mode 100644 index 0000000000000000000000000000000000000000..b3e1926b376a4b5a25b7a6cccb0bce2f52022f07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP @@ -0,0 +1 @@ +CONFIG_THP_SWAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK new file mode 100644 index 0000000000000000000000000000000000000000..e73b49f63a768454f01e8e5d8638a3b0f2b6fef5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK @@ -0,0 +1 @@ +CONFIG_THREAD_INFO_IN_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF new file mode 100644 index 0000000000000000000000000000000000000000..c2c1106a61db9a23be5f96688ca7fbe373b6147e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF @@ -0,0 +1 @@ +# CONFIG_THRUSTMASTER_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT new file mode 100644 index 0000000000000000000000000000000000000000..96a94b70c4963a4ab777902cf9192bcdb6b4894c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT @@ -0,0 +1 @@ +CONFIG_TICK_ONESHOT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE new file mode 100644 index 0000000000000000000000000000000000000000..21d8210ff2cf766efbfdfb5469cc1393fcc2d769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE @@ -0,0 +1 @@ +CONFIG_TIFM_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D new file mode 100644 index 0000000000000000000000000000000000000000..b6034f2f36988f0b7dee30013805035078e9ad70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D @@ -0,0 +1 @@ +# CONFIG_TINYDRM_HX8357D is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 new file mode 100644 index 0000000000000000000000000000000000000000..2a0b903c4dd3845e2bba3f884aa5f7c9fda780f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9163 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 new file mode 100644 index 0000000000000000000000000000000000000000..1371c3f6e42143f861096ea4c513e5b831e9e784 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9225 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 new file mode 100644 index 0000000000000000000000000000000000000000..d0fdddaeb2fadf723851b1490ce330cd7c08b190 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9341 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 new file mode 100644 index 0000000000000000000000000000000000000000..0728f544a3e9a9b03bcabbfbfbb458f8f8bde3cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9486 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT new file mode 100644 index 0000000000000000000000000000000000000000..fb0b5f045e3e4805de432064e274d353499fd523 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT @@ -0,0 +1 @@ +# CONFIG_TINYDRM_MI0283QT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER new file mode 100644 index 0000000000000000000000000000000000000000..c4d2874faaa51278eff63ca010cfcfba073245e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER @@ -0,0 +1 @@ +# CONFIG_TINYDRM_REPAPER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 new file mode 100644 index 0000000000000000000000000000000000000000..2b9e29f63216dd335caff363a6b5ba075cab3a00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ST7586 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R new file mode 100644 index 0000000000000000000000000000000000000000..365910ba13391b601f68f5caf4782a4feb7aff7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ST7735R is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC new file mode 100644 index 0000000000000000000000000000000000000000..764e2085e37997c81615c414500ee167ceab6add --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC @@ -0,0 +1 @@ +CONFIG_TIPC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..73824dd3483bbe4ccadbb11846eed0fd2bfd6069 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO @@ -0,0 +1 @@ +CONFIG_TIPC_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..8defa28ca8eca5c2cc2ebee91107eca3c084771e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG @@ -0,0 +1 @@ +CONFIG_TIPC_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB new file mode 100644 index 0000000000000000000000000000000000000000..f60ea000af52cdc75ed1b5cf2ac380a2c888be7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB @@ -0,0 +1 @@ +CONFIG_TIPC_MEDIA_IB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP new file mode 100644 index 0000000000000000000000000000000000000000..d25fc0f034ebeb5360d230fdedd39466b2d2e47f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP @@ -0,0 +1 @@ +CONFIG_TIPC_MEDIA_UDP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST b/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST new file mode 100644 index 0000000000000000000000000000000000000000..6d11e4e2c8b23fb511c5cea72e81732445d980b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST @@ -0,0 +1 @@ +# CONFIG_TI_ST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA b/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA new file mode 100644 index 0000000000000000000000000000000000000000..360fa99ee3c082cd5cb2b6c47ec95f719d5ccae6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA @@ -0,0 +1 @@ +# CONFIG_TMPFS_QUOTA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR b/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR new file mode 100644 index 0000000000000000000000000000000000000000..583e5c152a3b09532f9bf97184470e689115e680 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR @@ -0,0 +1 @@ +CONFIG_TOOLS_SUPPORT_RELR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X new file mode 100644 index 0000000000000000000000000000000000000000..8b8423e575f2f3dbc968c16ad3871275c5ec917d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X @@ -0,0 +1 @@ +# CONFIG_TPS6105X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 new file mode 100644 index 0000000000000000000000000000000000000000..3be4f7b82f4fc9bc2e82ff2c4b7ce0e2d1ff75b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 @@ -0,0 +1 @@ +# CONFIG_TPS65010 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X new file mode 100644 index 0000000000000000000000000000000000000000..bf40879fd1f2ba33d8e1960018e6d17084ef2974 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X @@ -0,0 +1 @@ +# CONFIG_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK new file mode 100644 index 0000000000000000000000000000000000000000..c5ad1dcc2aeda6d21f9a7da1a5ebdf8e27112600 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_TRACEPOINT_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..e01d140566a3c6543b7eede57622293453be29b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE @@ -0,0 +1 @@ +CONFIG_TRACER_MAX_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK new file mode 100644 index 0000000000000000000000000000000000000000..155e7b6983ba2de52e26bfdfa4fc33392352e7e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK @@ -0,0 +1 @@ +CONFIG_TRACE_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE new file mode 100644 index 0000000000000000000000000000000000000000..8c710a18f317241d274af0565d02669475f2bc0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE @@ -0,0 +1 @@ +# CONFIG_TRACE_EVAL_MAP_FILE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT new file mode 100644 index 0000000000000000000000000000000000000000..1c47143cc38ca6216b6d33b3dc1308546dd4553e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT @@ -0,0 +1 @@ +# CONFIG_TRACE_EVENT_INJECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..7a3d1a02972cd1a3d5dbea8ac5cb5c3b6efd59c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..de7fd6ff09e5edf5d87ef58be0e763d426bf8c5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACE_IRQFLAGS_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..c2100521d9d37097d6cd0bac7d99c12911378428 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING @@ -0,0 +1 @@ +CONFIG_TRACING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..0260c7f5dedf2454fe9760c9c92a3d414aef7e4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACING_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU new file mode 100644 index 0000000000000000000000000000000000000000..64ed0eab34ec659ca32f7ad6dba3bb96c5d133a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU @@ -0,0 +1 @@ +CONFIG_TREE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU new file mode 100644 index 0000000000000000000000000000000000000000..d432877a2facd30f8fe49e93cfff0f9207f5ac30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU @@ -0,0 +1 @@ +CONFIG_TREE_SRCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP new file mode 100644 index 0000000000000000000000000000000000000000..8361307c4ba8d168c8e76d9efbb0d6c3a3083b71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP @@ -0,0 +1 @@ +# CONFIG_TSNEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE new file mode 100644 index 0000000000000000000000000000000000000000..80f43181563d5b1cc0438494954e89202142df12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE @@ -0,0 +1 @@ +# CONFIG_TUN_VNET_CROSS_LE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE new file mode 100644 index 0000000000000000000000000000000000000000..1f5b92782554d58965289e6695ef60b8c86940e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE @@ -0,0 +1 @@ +# CONFIG_TWL4030_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE new file mode 100644 index 0000000000000000000000000000000000000000..d44fdc15f94fc9855e52d3e25d9a667f9e0eb99b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE @@ -0,0 +1 @@ +# CONFIG_TWL6040_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC new file mode 100644 index 0000000000000000000000000000000000000000..455b230d19d82502c5fbf31ce79b5134997a50ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC @@ -0,0 +1 @@ +CONFIG_TYPEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 new file mode 100644 index 0000000000000000000000000000000000000000..44b5f6b101d4dddd2e75280a3ad3ab90a52be137 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 @@ -0,0 +1 @@ +# CONFIG_TYPEC_ANX7411 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE new file mode 100644 index 0000000000000000000000000000000000000000..53f064aa454313f03ba6943f8efc45e44520b05d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE @@ -0,0 +1 @@ +CONFIG_TYPEC_DP_ALTMODE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 new file mode 100644 index 0000000000000000000000000000000000000000..0616d7d3d35a47e2fad4cb7d118ab05eb5eb6993 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 @@ -0,0 +1 @@ +# CONFIG_TYPEC_HD3SS3220 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 new file mode 100644 index 0000000000000000000000000000000000000000..dbb620396ce888af6535977b14a0eba4b8e668bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_FSA4480 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU new file mode 100644 index 0000000000000000000000000000000000000000..bd5c3d8f0be656fa394e2a774ac2bfd5e5c47062 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_GPIO_SBU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M new file mode 100644 index 0000000000000000000000000000000000000000..460d42daf2a973ab35c24df542f7de4dccd819ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 new file mode 100644 index 0000000000000000000000000000000000000000..350ebc23d14f6d0700a25df54aafecbbfd3321b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 @@ -0,0 +1 @@ +CONFIG_TYPEC_MUX_PI3USB30532=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE new file mode 100644 index 0000000000000000000000000000000000000000..4ebb1eaeca7b9d0dd2a17ffd7e94a16472e0e1c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE @@ -0,0 +1 @@ +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 new file mode 100644 index 0000000000000000000000000000000000000000..c1291042a958a494cdb93f1e42d9d4feb1b927f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 @@ -0,0 +1 @@ +# CONFIG_TYPEC_RT1719 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X new file mode 100644 index 0000000000000000000000000000000000000000..9c9373827da528fe1afbf642bb9769677d8226a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X @@ -0,0 +1 @@ +# CONFIG_TYPEC_STUSB160X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI new file mode 100644 index 0000000000000000000000000000000000000000..4da3884e266b8df2cb47ba15f1ac96364f796ba6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI @@ -0,0 +1 @@ +# CONFIG_TYPEC_TCPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM new file mode 100644 index 0000000000000000000000000000000000000000..e38aa8b2747a14f0493807542ed5775c96eef60b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM @@ -0,0 +1 @@ +CONFIG_TYPEC_TCPM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X new file mode 100644 index 0000000000000000000000000000000000000000..b6ef0d699f791eca9f83e60a4f5a4587ca622534 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X @@ -0,0 +1 @@ +CONFIG_TYPEC_TPS6598X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI new file mode 100644 index 0000000000000000000000000000000000000000..744f2f6214e041687fd575db6da83724579639b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI @@ -0,0 +1 @@ +CONFIG_TYPEC_UCSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 new file mode 100644 index 0000000000000000000000000000000000000000..717e2b34acb8fc6713b60dea43843455c77d6c8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 @@ -0,0 +1 @@ +# CONFIG_TYPEC_WUSB3801 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE new file mode 100644 index 0000000000000000000000000000000000000000..ec75233c9b3e0bc6cb006df05596c646bcac85af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE @@ -0,0 +1 @@ +CONFIG_UACCE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..72c1f7a49d95e58f673fed709fd6a13caf8e5197 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS @@ -0,0 +1 @@ +# CONFIG_UBIFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING new file mode 100644 index 0000000000000000000000000000000000000000..d5156e3e8df9074c9753f270fc1c2ad8819f926c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING @@ -0,0 +1 @@ +CONFIG_UCS2_STRING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..6adca85bd20883b7231116ccb3e7ae712086335d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI @@ -0,0 +1 @@ +CONFIG_UCSI_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG new file mode 100644 index 0000000000000000000000000000000000000000..00d6d3344e30a2c3b87e0385656aa6845f263f31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG @@ -0,0 +1 @@ +# CONFIG_UCSI_CCG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 new file mode 100644 index 0000000000000000000000000000000000000000..4f097b689de7918b84f1cb6712cce5b9f304ce1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 @@ -0,0 +1 @@ +# CONFIG_UCSI_STM32G0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF b/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF new file mode 100644 index 0000000000000000000000000000000000000000..1d2a9a2f5b314fe693806d779ed3a8c6b4207633 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF @@ -0,0 +1 @@ +# CONFIG_UDMABUF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER new file mode 100644 index 0000000000000000000000000000000000000000..6205dc54cfdc97223990d87f8131a931bee1df25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER @@ -0,0 +1 @@ +CONFIG_UEFI_CPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..768c9ac9e227252e8505eeb282f5b678c7bb8c6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS @@ -0,0 +1 @@ +# CONFIG_UFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID b/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID new file mode 100644 index 0000000000000000000000000000000000000000..dc29857794f4cf16071f9a8171dd4de44abf809a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID @@ -0,0 +1 @@ +CONFIG_UHID=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..918b1ac2b7348d6b2f89ed084089641c45e55067 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION @@ -0,0 +1 @@ +# CONFIG_ULTRIX_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE new file mode 100644 index 0000000000000000000000000000000000000000..a9c4799a491cbfc3a4cd0e6cd0a7149a632ac33c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE @@ -0,0 +1 @@ +# CONFIG_UNICODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK new file mode 100644 index 0000000000000000000000000000000000000000..e4969f9444c985e5f4f8419d37067eacab055dd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK @@ -0,0 +1 @@ +CONFIG_UNINLINE_SPIN_UNLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM new file mode 100644 index 0000000000000000000000000000000000000000..2488ff87253e0e485483e07bcb20b23d306ad440 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM @@ -0,0 +1 @@ +CONFIG_UNIX_SCM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 new file mode 100644 index 0000000000000000000000000000000000000000..00afcdf8f05aa6201ce350aea23f959e9f3fc16a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 @@ -0,0 +1 @@ +# CONFIG_USB4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE new file mode 100644 index 0000000000000000000000000000000000000000..f23acaa70d93abe625f254e5c53cc9ab32655ef3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE @@ -0,0 +1 @@ +# CONFIG_USBIP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..e92c7cbba012d427ce12b423c139174bd152854c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG @@ -0,0 +1 @@ +CONFIG_USBPCWATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM new file mode 100644 index 0000000000000000000000000000000000000000..62189f613d576d0d0a04e84cc545b8dc24b85963 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM @@ -0,0 +1 @@ +CONFIG_USB_ACM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX new file mode 100644 index 0000000000000000000000000000000000000000..e082deeb5e7d915f71217d23696ccd729a3f864a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX @@ -0,0 +1 @@ +CONFIG_USB_ADUTUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 new file mode 100644 index 0000000000000000000000000000000000000000..e578e0be42fef59ba34dc3c06b116652c0aea03b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 @@ -0,0 +1 @@ +CONFIG_USB_ALI_M5632=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 new file mode 100644 index 0000000000000000000000000000000000000000..696ce5c88317cdfce77fa681905885076d312a22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 @@ -0,0 +1 @@ +CONFIG_USB_AN2720=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES new file mode 100644 index 0000000000000000000000000000000000000000..f6c074bb0ece48bf25bddf608735e0b0b396027b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES @@ -0,0 +1 @@ +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY new file mode 100644 index 0000000000000000000000000000000000000000..ec4e8a64a7a1c847253e65aad40689ba0288dd1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY @@ -0,0 +1 @@ +CONFIG_USB_APPLEDISPLAY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD new file mode 100644 index 0000000000000000000000000000000000000000..4d952e2fa961b9f0d8d977c818d1d6014eff27c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD @@ -0,0 +1 @@ +CONFIG_USB_ARCH_HAS_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX new file mode 100644 index 0000000000000000000000000000000000000000..597b27f40b12b8762e4cecbc7c2201878b77a7f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX @@ -0,0 +1 @@ +CONFIG_USB_ARMLINUX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM new file mode 100644 index 0000000000000000000000000000000000000000..0bdc7ed90ce0e3212805db38f342e5b6f200f52d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM @@ -0,0 +1 @@ +CONFIG_USB_ATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY new file mode 100644 index 0000000000000000000000000000000000000000..0886e4b55a21dfb8271b743bc3b07db6097fb152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY @@ -0,0 +1 @@ +CONFIG_USB_AUTOSUSPEND_DELAY=2 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN new file mode 100644 index 0000000000000000000000000000000000000000..2f7e9876d9de9b1282f26ca28aecca7bcd87dae3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN @@ -0,0 +1 @@ +CONFIG_USB_BELKIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD new file mode 100644 index 0000000000000000000000000000000000000000..a0e2f42dac409f456d76ad799510e928f3447b83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD @@ -0,0 +1 @@ +# CONFIG_USB_C67X00_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC new file mode 100644 index 0000000000000000000000000000000000000000..c89d8f72c9e41d122c707ddc006f81d069e01c87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC @@ -0,0 +1 @@ +CONFIG_USB_CATC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..c38a1200259756cd7ae99ea4592de6e8796df90f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT @@ -0,0 +1 @@ +# CONFIG_USB_CDNS_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA new file mode 100644 index 0000000000000000000000000000000000000000..b006254ad58e31f6c55b07d993770b93642f78e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA @@ -0,0 +1 @@ +# CONFIG_USB_CHIPIDEA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..14bd22787685f782a7544f072df2ebbffa1d7b48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON @@ -0,0 +1 @@ +CONFIG_USB_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..e91343881fcc0d3468f685d1f062800a19c84164 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO @@ -0,0 +1 @@ +# CONFIG_USB_CONN_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU new file mode 100644 index 0000000000000000000000000000000000000000..7650d329ea27bb19002cdc463c617364167e1449 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU @@ -0,0 +1 @@ +CONFIG_USB_CXACRU=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 new file mode 100644 index 0000000000000000000000000000000000000000..5547144d9b0e6cd55234ba83c0f6102d92938637 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 @@ -0,0 +1 @@ +# CONFIG_USB_CYPRESS_CY7C63 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM new file mode 100644 index 0000000000000000000000000000000000000000..e8dc4c60b74471e1ba4fe954893f8f94ae17bc33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM @@ -0,0 +1 @@ +# CONFIG_USB_CYTHERM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST new file mode 100644 index 0000000000000000000000000000000000000000..db89fa3d42814d7a0508af024dd92789e6c193bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST @@ -0,0 +1 @@ +CONFIG_USB_DEFAULT_PERSIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 new file mode 100644 index 0000000000000000000000000000000000000000..94b56af6d056aa578427e4fe419307a9dc83c3d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 @@ -0,0 +1 @@ +# CONFIG_USB_DWC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 new file mode 100644 index 0000000000000000000000000000000000000000..fc9f5c8fe1040f5bb6e3b14f474df1c6b0170ac9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 @@ -0,0 +1 @@ +# CONFIG_USB_DWC3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS new file mode 100644 index 0000000000000000000000000000000000000000..88b6f1f71e27652eaefc4e6eb55bdc44cfde275f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS @@ -0,0 +1 @@ +# CONFIG_USB_DYNAMIC_MINORS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL new file mode 100644 index 0000000000000000000000000000000000000000..c2f936a3444cf8daed2ea0156ebc8017cb470037 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_FSL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD new file mode 100644 index 0000000000000000000000000000000000000000..7322dc2d519c0400d583f9e4c5874334bd0b8ffd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_EHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI new file mode 100644 index 0000000000000000000000000000000000000000..e482ceabd85a9aacfc507acf55c16a11d718a5ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI @@ -0,0 +1 @@ +CONFIG_USB_EHCI_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT new file mode 100644 index 0000000000000000000000000000000000000000..a5c3cc477270f3db571f3622af6ea4172f9c448a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT @@ -0,0 +1 @@ +CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED new file mode 100644 index 0000000000000000000000000000000000000000..1eebea30348c198328806a569d334e5a16f64e78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED @@ -0,0 +1 @@ +CONFIG_USB_EHCI_TT_NEWSCHED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE new file mode 100644 index 0000000000000000000000000000000000000000..4cef71e49e33db1c9567e0c5f97b87ec27bde538 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE @@ -0,0 +1 @@ +# CONFIG_USB_EHSET_TEST_FIXTURE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 new file mode 100644 index 0000000000000000000000000000000000000000..35081c0de25230326197878b2d25dafaf2e3560c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 @@ -0,0 +1 @@ +CONFIG_USB_EMI26=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 new file mode 100644 index 0000000000000000000000000000000000000000..f2e7d662899adab1018372854d32aaea6505ea7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 @@ -0,0 +1 @@ +CONFIG_USB_EMI62=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 new file mode 100644 index 0000000000000000000000000000000000000000..944ff495a26bfea589d97628e7c49700f484a998 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 @@ -0,0 +1 @@ +CONFIG_USB_EPSON2888=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 new file mode 100644 index 0000000000000000000000000000000000000000..cbcda7399e8d7ec60b4ed8f1500b11461fa39b9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 @@ -0,0 +1 @@ +CONFIG_USB_EZUSB_FX2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES new file mode 100644 index 0000000000000000000000000000000000000000..9d905e9cef4faa4d27e12fa17781ee72c71c72d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES @@ -0,0 +1 @@ +# CONFIG_USB_FEW_INIT_RETRIES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET new file mode 100644 index 0000000000000000000000000000000000000000..86fb660fe6d565733ed0874fc3c2ab44f613d0ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET @@ -0,0 +1 @@ +# CONFIG_USB_GADGET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS new file mode 100644 index 0000000000000000000000000000000000000000..3fb8053360f0a6b93c6b19be2418c4e0539a68fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS @@ -0,0 +1 @@ +# CONFIG_USB_GPIO_VBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA new file mode 100644 index 0000000000000000000000000000000000000000..645b791664a2049e0538419b7299e5a143adc729 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA @@ -0,0 +1 @@ +# CONFIG_USB_HCD_BCMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE new file mode 100644 index 0000000000000000000000000000000000000000..3d8668f5e9ceb95a4b0238e742eb803990aa6061 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE @@ -0,0 +1 @@ +# CONFIG_USB_HCD_TEST_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID new file mode 100644 index 0000000000000000000000000000000000000000..9f35c0af732455a5c08667802b677d6a721032c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID @@ -0,0 +1 @@ +CONFIG_USB_HID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV new file mode 100644 index 0000000000000000000000000000000000000000..4d37a25b7d86f301950e9884d3c07990069b0118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV @@ -0,0 +1 @@ +CONFIG_USB_HIDDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 new file mode 100644 index 0000000000000000000000000000000000000000..8f6b0dd20bc4d0807d86c656d5b9cc7d25b70faf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 @@ -0,0 +1 @@ +CONFIG_USB_HSIC_USB3503=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 new file mode 100644 index 0000000000000000000000000000000000000000..2e27cbba8b62745fbeadf9e645904427036c2ddc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 @@ -0,0 +1 @@ +# CONFIG_USB_HSIC_USB4604 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO new file mode 100644 index 0000000000000000000000000000000000000000..0a009eeca2a58ffe1faf64fecd83c66d0b2d4da2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO @@ -0,0 +1 @@ +CONFIG_USB_HSO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB new file mode 100644 index 0000000000000000000000000000000000000000..5507c061fd81cfdc6090981afce065f99216abcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB @@ -0,0 +1 @@ +# CONFIG_USB_HUB_USB251XB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE new file mode 100644 index 0000000000000000000000000000000000000000..8b54088757d7cdddff477c8770f7e6da068e4206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE @@ -0,0 +1 @@ +CONFIG_USB_IDMOUSE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR new file mode 100644 index 0000000000000000000000000000000000000000..7bf2b0c653f108b63e287cc03944d4102d5fd274 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR @@ -0,0 +1 @@ +CONFIG_USB_IOWARRIOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH new file mode 100644 index 0000000000000000000000000000000000000000..9aa5dc3442d73b94e7b994ea9c897df2a7590009 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH @@ -0,0 +1 @@ +CONFIG_USB_IPHETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW new file mode 100644 index 0000000000000000000000000000000000000000..80ead4f41ccb1cb47b1bb7fd6240c03e982b5993 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW @@ -0,0 +1 @@ +CONFIG_USB_ISIGHTFW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD new file mode 100644 index 0000000000000000000000000000000000000000..bd16b51652b0f757daceff3835637aa16d6ebe45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD @@ -0,0 +1 @@ +# CONFIG_USB_ISP116X_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 new file mode 100644 index 0000000000000000000000000000000000000000..8a6cb284e0b94c1aca6d696ef5d00768eeaa9b15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 @@ -0,0 +1 @@ +# CONFIG_USB_ISP1301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 new file mode 100644 index 0000000000000000000000000000000000000000..aed47c8aa1bf9e15673018b70777ce75410bdadf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 @@ -0,0 +1 @@ +# CONFIG_USB_ISP1760 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH new file mode 100644 index 0000000000000000000000000000000000000000..daed8dbd41354c91e00a937ac684fd9718583900 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH @@ -0,0 +1 @@ +CONFIG_USB_KAWETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 new file mode 100644 index 0000000000000000000000000000000000000000..bd099751cb763fbab0fe451c4fc5387a1674c850 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 @@ -0,0 +1 @@ +CONFIG_USB_KC2190=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX new file mode 100644 index 0000000000000000000000000000000000000000..6d409c13e26bc7dede57beb11e915d3d1d5f7121 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX @@ -0,0 +1 @@ +CONFIG_USB_LAN78XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD new file mode 100644 index 0000000000000000000000000000000000000000..b87994bf232e4515bbb60c35c62d3cd1be11fdfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD @@ -0,0 +1 @@ +CONFIG_USB_LCD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD new file mode 100644 index 0000000000000000000000000000000000000000..ba993ee02d8d94842ba05daeaabfcf975d59ae4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD @@ -0,0 +1 @@ +CONFIG_USB_LD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT new file mode 100644 index 0000000000000000000000000000000000000000..9d83c48a4a2130460e531a8a2861129b3cb8b8bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT @@ -0,0 +1 @@ +CONFIG_USB_LEDS_TRIGGER_USBPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG new file mode 100644 index 0000000000000000000000000000000000000000..28127f785c68620ac71655e46de976abb429d6db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG @@ -0,0 +1 @@ +CONFIG_USB_LED_TRIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER new file mode 100644 index 0000000000000000000000000000000000000000..4f0188a9ab0859a0c98bda09daeb999fb0900b92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER @@ -0,0 +1 @@ +CONFIG_USB_LEGOTOWER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST new file mode 100644 index 0000000000000000000000000000000000000000..0e4389a3db5d75b3529668442dc37962cd78981f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST @@ -0,0 +1 @@ +# CONFIG_USB_LINK_LAYER_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD new file mode 100644 index 0000000000000000000000000000000000000000..51b1205bf53a8dfc506eb0f9c236cc8753a9242c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD @@ -0,0 +1 @@ +# CONFIG_USB_MAX3421_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 new file mode 100644 index 0000000000000000000000000000000000000000..bc7b0bfd8dc69a84531d33d03225b81302fc3fb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 @@ -0,0 +1 @@ +CONFIG_USB_MDC800=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK new file mode 100644 index 0000000000000000000000000000000000000000..94f288329da25fde43df33cd00719ba98384d289 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK @@ -0,0 +1 @@ +CONFIG_USB_MICROTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON new file mode 100644 index 0000000000000000000000000000000000000000..330d7225cbf6414e36688f1effe0b16cc32c4f86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON @@ -0,0 +1 @@ +CONFIG_USB_MON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC new file mode 100644 index 0000000000000000000000000000000000000000..ae1dea5126608167c95503ddc7092a2c2fed15eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC @@ -0,0 +1 @@ +# CONFIG_USB_MUSB_HDRC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 new file mode 100644 index 0000000000000000000000000000000000000000..dd5f4e15149ef7f0ca486ae0c1ed4c6c32df5da4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 @@ -0,0 +1 @@ +# CONFIG_USB_NET_AQC111 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A new file mode 100644 index 0000000000000000000000000000000000000000..da5aa15b2e67b7ccde410501feeeef125833a72d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A @@ -0,0 +1 @@ +CONFIG_USB_NET_AX88179_178A=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X new file mode 100644 index 0000000000000000000000000000000000000000..3ac11e4ec0811195b3b72444322146edbd2fe81f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X @@ -0,0 +1 @@ +CONFIG_USB_NET_AX8817X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER new file mode 100644 index 0000000000000000000000000000000000000000..87452e10974460fb34936c7d06e2d4a86bab2e17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER @@ -0,0 +1 @@ +CONFIG_USB_NET_CDCETHER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM new file mode 100644 index 0000000000000000000000000000000000000000..73bbe6213088aadeac6a7754862db46523849e63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_EEM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM new file mode 100644 index 0000000000000000000000000000000000000000..a728babfc784fd2bcfd8a4886e6a9dd05ca5b1bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_MBIM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM new file mode 100644 index 0000000000000000000000000000000000000000..4c6e461cbc532b0e4a5af7a48e6e6c71057139d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_NCM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET new file mode 100644 index 0000000000000000000000000000000000000000..f54b326ead735df443ecefce832428e9fc316db5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_SUBSET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE new file mode 100644 index 0000000000000000000000000000000000000000..5d3aa7ce08af1df2f23f1069da1599b2bc7c84e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 new file mode 100644 index 0000000000000000000000000000000000000000..ce29732451a68baa40b4b161d335d86389f4fe35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 @@ -0,0 +1 @@ +CONFIG_USB_NET_CH9200=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH new file mode 100644 index 0000000000000000000000000000000000000000..13aaf6f75c304704dab6fd0c0660ffaf1be13b9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH @@ -0,0 +1 @@ +CONFIG_USB_NET_CX82310_ETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 new file mode 100644 index 0000000000000000000000000000000000000000..7227f76861b8349670a9b6aa4107d61a08ec8dc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 @@ -0,0 +1 @@ +CONFIG_USB_NET_DM9601=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A new file mode 100644 index 0000000000000000000000000000000000000000..8a950c494acaddb82f9fabab515658533d91ddc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A @@ -0,0 +1 @@ +CONFIG_USB_NET_GL620A=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM new file mode 100644 index 0000000000000000000000000000000000000000..16acb23012bd87ecb5d6eb7bc61115bd00ae15fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM @@ -0,0 +1 @@ +CONFIG_USB_NET_HUAWEI_CDC_NCM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 new file mode 100644 index 0000000000000000000000000000000000000000..a2526e648818d6087741f361dbdeb1c7007dec1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 @@ -0,0 +1 @@ +CONFIG_USB_NET_INT51X1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA new file mode 100644 index 0000000000000000000000000000000000000000..a92ab21e5a19da33f98dc4b36d3252543c1d46cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA @@ -0,0 +1 @@ +CONFIG_USB_NET_KALMIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 new file mode 100644 index 0000000000000000000000000000000000000000..0c6fd98a509008a7cfccd1a7759c8d4a1a45d87b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 @@ -0,0 +1 @@ +CONFIG_USB_NET_MCS7830=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 new file mode 100644 index 0000000000000000000000000000000000000000..0ad1bc9be042f27205a6ecf19aad0a8265d74af5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 @@ -0,0 +1 @@ +CONFIG_USB_NET_NET1080=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB new file mode 100644 index 0000000000000000000000000000000000000000..f87e9896eafbdfc1bd632a661117a648cc3adc13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB @@ -0,0 +1 @@ +CONFIG_USB_NET_PLUSB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN new file mode 100644 index 0000000000000000000000000000000000000000..6be46c7d203437e24e422a7e63cb8e83dabf5fc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN @@ -0,0 +1 @@ +CONFIG_USB_NET_QMI_WWAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST new file mode 100644 index 0000000000000000000000000000000000000000..58c44e88a6a5651322ceaf6886e8eb80eae275a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST @@ -0,0 +1 @@ +CONFIG_USB_NET_RNDIS_HOST=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX new file mode 100644 index 0000000000000000000000000000000000000000..3011223ad23008f76faefb653f68b0128b4b89ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX @@ -0,0 +1 @@ +CONFIG_USB_NET_SMSC75XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX new file mode 100644 index 0000000000000000000000000000000000000000..479b43b1d4a310a3762334df00e99a0612a5aa10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX @@ -0,0 +1 @@ +CONFIG_USB_NET_SMSC95XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 new file mode 100644 index 0000000000000000000000000000000000000000..3caca281c289da435612405532773daf93e0cb2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 @@ -0,0 +1 @@ +# CONFIG_USB_NET_SR9800 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS new file mode 100644 index 0000000000000000000000000000000000000000..00dfbaf3b103ce7e253b755b9f53ef910426df93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS @@ -0,0 +1 @@ +CONFIG_USB_NET_ZAURUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD new file mode 100644 index 0000000000000000000000000000000000000000..59f6d5fd5b344d0ef13d758a459dfab3783bc65f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_OHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI new file mode 100644 index 0000000000000000000000000000000000000000..a78b62cb547ecb711b7289436dd72d206d9ad5c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI @@ -0,0 +1 @@ +CONFIG_USB_OHCI_HCD_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..c35b2cdd60801c354665a37fa0335908ba1a62c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_OHCI_HCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN new file mode 100644 index 0000000000000000000000000000000000000000..ae53877ea536a24d57403f585bcd58a98443ab49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN @@ -0,0 +1 @@ +CONFIG_USB_OHCI_LITTLE_ENDIAN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG new file mode 100644 index 0000000000000000000000000000000000000000..b4ad21e4876e364bb2575037c445e637ffb420d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG @@ -0,0 +1 @@ +# CONFIG_USB_OTG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST new file mode 100644 index 0000000000000000000000000000000000000000..a9ccf6a0ef8f30fca1d54cc82fe488f856c10776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST @@ -0,0 +1 @@ +# CONFIG_USB_OTG_PRODUCTLIST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD new file mode 100644 index 0000000000000000000000000000000000000000..4f1efda3771606a5b86c522b6b8260cb4c9047c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD @@ -0,0 +1 @@ +# CONFIG_USB_OXU210HP_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS new file mode 100644 index 0000000000000000000000000000000000000000..3218ed61fad0b0963a89e32c4a9278984e0a4849 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS @@ -0,0 +1 @@ +CONFIG_USB_PEGASUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER new file mode 100644 index 0000000000000000000000000000000000000000..108d00bea8fe753af71f4bbf1a1c0daee32133f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER @@ -0,0 +1 @@ +CONFIG_USB_PRINTER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD new file mode 100644 index 0000000000000000000000000000000000000000..70dcc7701792d89e5e46490dac40f7ec3673287a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD @@ -0,0 +1 @@ +# CONFIG_USB_R8A66597_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH new file mode 100644 index 0000000000000000000000000000000000000000..6a168b92696b8f8b050b19d6ecf5afa00d3cea5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH @@ -0,0 +1 @@ +CONFIG_USB_ROLE_SWITCH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 new file mode 100644 index 0000000000000000000000000000000000000000..29f740fcfdbc60a59ce6eb06fb2f0069072c8861 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 @@ -0,0 +1 @@ +CONFIG_USB_RTL8150=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 new file mode 100644 index 0000000000000000000000000000000000000000..66869c0244eb666cb87ac2af1e1f2b28316f986f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 @@ -0,0 +1 @@ +CONFIG_USB_RTL8152=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM new file mode 100644 index 0000000000000000000000000000000000000000..3ec37682be5cbb22b955faa5a264a07d8ec82304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM @@ -0,0 +1 @@ +CONFIG_USB_RTL8153_ECM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..e5f3a6d27d3f9677bfcef06d6ebe4886e7bc4916 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL @@ -0,0 +1 @@ +CONFIG_USB_SERIAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE new file mode 100644 index 0000000000000000000000000000000000000000..bbe37bf7578f86d0916a69c3fff6ee286ee32ebe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_AIRCABLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 new file mode 100644 index 0000000000000000000000000000000000000000..285aeacc9d40b08fb742d54319f16eb465919de9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_ARK3116=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN new file mode 100644 index 0000000000000000000000000000000000000000..313844222fdc6252d598ad942ac9633a7b65bb5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_BELKIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 new file mode 100644 index 0000000000000000000000000000000000000000..7b09e0d904f07db8591d59286a49bd85e91d5210 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CH341=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X new file mode 100644 index 0000000000000000000000000000000000000000..18e8641d54da572a09b580ce1ef4d6ec361f29d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CP210X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK new file mode 100644 index 0000000000000000000000000000000000000000..3681a379785dba7a744ba2b805e09131ad8658b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CYBERJACK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 new file mode 100644 index 0000000000000000000000000000000000000000..b9f46a74260fbbd0d4f1009a48e331fae110411f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CYPRESS_M8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..896ed4ca6f7adfd56302e891e846e8260763c5dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_DEBUG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT new file mode 100644 index 0000000000000000000000000000000000000000..5617d88f807a0806b4c32001cb3c9a059dba36c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT new file mode 100644 index 0000000000000000000000000000000000000000..bba8a43e8566904c343a0cb26ff6f570b2f7f932 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EDGEPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI new file mode 100644 index 0000000000000000000000000000000000000000..06184eb4d4e0bfa41fd2e1331cac9ae7503f8f1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EDGEPORT_TI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG new file mode 100644 index 0000000000000000000000000000000000000000..139e9f9aaa8794cb59af0697840c150d1d2f85e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EMPEG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 new file mode 100644 index 0000000000000000000000000000000000000000..c02ccac8388a5beeed7a9fbdc5c21286756a87e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_F81232 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X new file mode 100644 index 0000000000000000000000000000000000000000..94dd9f277e36c2baefd730d5604812f390c53e4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_F8153X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO new file mode 100644 index 0000000000000000000000000000000000000000..0d2907aaa19a112deabca5ea9dcaf96a5508d0b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_FTDI_SIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN new file mode 100644 index 0000000000000000000000000000000000000000..81637b4917f83308c327fe8479e9723c464bcea0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_GARMIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC new file mode 100644 index 0000000000000000000000000000000000000000..ad338db8137899ff5b8292ffd3735da6f95d15d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ new file mode 100644 index 0000000000000000000000000000000000000000..3a435bc5336358cd7f48151c54c04e3b644762ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IPAQ=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW new file mode 100644 index 0000000000000000000000000000000000000000..43c1be4f98d68ee5c87b0ebc067e6ef4d3ff955d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IPW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR new file mode 100644 index 0000000000000000000000000000000000000000..5e8e417681d8783611584e3a82621de797f568eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU new file mode 100644 index 0000000000000000000000000000000000000000..e2b601afd731d51102c9bdb310213e0d671452a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IUU=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN new file mode 100644 index 0000000000000000000000000000000000000000..a09ff3f77a6d75a5568f44f7287fccf6b8eed795 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KEYSPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA new file mode 100644 index 0000000000000000000000000000000000000000..fe57af002db0e4930fa19fa2e248f7fae5b31a9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KEYSPAN_PDA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI new file mode 100644 index 0000000000000000000000000000000000000000..6b920b85920b72a98093b202b67d41111d55f287 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KLSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT new file mode 100644 index 0000000000000000000000000000000000000000..12f7217e23fbfabfb10524463a421b66ca026f91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KOBIL_SCT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 new file mode 100644 index 0000000000000000000000000000000000000000..ee2c0eebbe8a90e1c48aa279ae0a4542f8135706 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MCT_U232=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO new file mode 100644 index 0000000000000000000000000000000000000000..e18da6c48212bd7b2056512ab3159bd75c0cec75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_METRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 new file mode 100644 index 0000000000000000000000000000000000000000..221f04db688cc75f67b92ae5cde13f4b965099a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7720=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 new file mode 100644 index 0000000000000000000000000000000000000000..d47711d40285e87459c0452ade24d798e7ac94b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7840=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT new file mode 100644 index 0000000000000000000000000000000000000000..22d9816009f5bb67c82e3efe9f70cfd38906fb6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MXUPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN new file mode 100644 index 0000000000000000000000000000000000000000..76073f7f8bc819f91d6d7acb20b2fe4492f0809b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_NAVMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET new file mode 100644 index 0000000000000000000000000000000000000000..3185812058b744b5341e36b2fb6bd3c58c80c102 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OMNINET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON new file mode 100644 index 0000000000000000000000000000000000000000..697554b8bffd3838ad13fdbf59b7c34efcfb1512 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OPTICON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION new file mode 100644 index 0000000000000000000000000000000000000000..6c05eb345fef4edf3c658ec4ed066d6d257906d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OPTION=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 new file mode 100644 index 0000000000000000000000000000000000000000..051bd0fa41c3d970a7267be1891ac3db6e34d691 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OTI6858=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 new file mode 100644 index 0000000000000000000000000000000000000000..5b99f3ed0f86c76020a25618036b2e483a65d70e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_PL2303=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX new file mode 100644 index 0000000000000000000000000000000000000000..1c898ff4ad839625f3079f38ad3ad7074dc73bf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QCAUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 new file mode 100644 index 0000000000000000000000000000000000000000..16c67f4eaafbd083c78c45531533dde0e73af039 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QT2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM new file mode 100644 index 0000000000000000000000000000000000000000..26e5667b2ab54a5cae87c8c0669416cea8f5c0e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QUALCOMM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE new file mode 100644 index 0000000000000000000000000000000000000000..687dfe214671f1c0004fc58af073b4592dcfada6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SAFE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED new file mode 100644 index 0000000000000000000000000000000000000000..7af538c757af32e914456b4263921104c9be909b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SAFE_PADDED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS new file mode 100644 index 0000000000000000000000000000000000000000..4f35e2616f082b442206f95e30d9af0f3f64b32e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SIERRAWIRELESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 new file mode 100644 index 0000000000000000000000000000000000000000..97176567f273c324523e22008d490ea7badca21b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SPCP8X5=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 new file mode 100644 index 0000000000000000000000000000000000000000..5d35a493b08a46a4845a372cfb874c6c4d1c9911 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SSU100=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL new file mode 100644 index 0000000000000000000000000000000000000000..2f3118f1233b4173bdfeaaffe0bfe0ff3b52c308 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SYMBOL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI new file mode 100644 index 0000000000000000000000000000000000000000..eb82f9ed79927b9a6f4d82927971e92d6aba9c8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_TI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 new file mode 100644 index 0000000000000000000000000000000000000000..8258d573c7f792d9a839d24739bd92845939c55f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_UPD78F0730=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR new file mode 100644 index 0000000000000000000000000000000000000000..51a00636b74ade7871bd8189c948447ad54f949e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_VISOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT new file mode 100644 index 0000000000000000000000000000000000000000..38ca17549e29839d55e23fedc6c09eb1d2c09e73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_WHITEHEAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE new file mode 100644 index 0000000000000000000000000000000000000000..975ebe400d0b67e637a4a057162f20c07ea19ede --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_WISHBONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN new file mode 100644 index 0000000000000000000000000000000000000000..7ff3107ac9751ce26ca1fd70c4ef93cb8c66437e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_WWAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR new file mode 100644 index 0000000000000000000000000000000000000000..bd445c4e83c059ce1212ee3741f7b771c0cc21e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_XR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT new file mode 100644 index 0000000000000000000000000000000000000000..bf74a07342a79320ede93e25d5a6520a3a138efa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_XSENS_MT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG new file mode 100644 index 0000000000000000000000000000000000000000..161fcb9b919b943d1bf946b2dfbeb9a49c2d47d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG @@ -0,0 +1 @@ +CONFIG_USB_SEVSEG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET new file mode 100644 index 0000000000000000000000000000000000000000..310a9f31aae418b6027eb8a8783ca578eb5e482d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET @@ -0,0 +1 @@ +CONFIG_USB_SIERRA_NET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA new file mode 100644 index 0000000000000000000000000000000000000000..7efda84c1bbdb82c03b532e099ccfd14dd6f84c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA @@ -0,0 +1 @@ +CONFIG_USB_SISUSBVGA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD new file mode 100644 index 0000000000000000000000000000000000000000..73078fbbce444a3ac067af0e6cf1527fbf10701e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD @@ -0,0 +1 @@ +# CONFIG_USB_SL811_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE new file mode 100644 index 0000000000000000000000000000000000000000..c99c095dfa35730450e8805445cb2c9c571f326c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE @@ -0,0 +1 @@ +CONFIG_USB_STORAGE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA new file mode 100644 index 0000000000000000000000000000000000000000..9f4bfdeac483a1ce7fc6363ac63d37cac3382300 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ALAUDA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB new file mode 100644 index 0000000000000000000000000000000000000000..3e095761b3510e2dbacab421632a9d043a8b9d8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_CYPRESS_ATACB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB new file mode 100644 index 0000000000000000000000000000000000000000..92eb4beb3cbe2b593b4fc171576313040651e818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_DATAFAB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..3a8be6ac1a06259537be11687c856bcd35e3a876 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG @@ -0,0 +1 @@ +# CONFIG_USB_STORAGE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 new file mode 100644 index 0000000000000000000000000000000000000000..64edcc2aca9ff6928824d0a380737551dfb52d45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ENE_UB6250=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM new file mode 100644 index 0000000000000000000000000000000000000000..44ffc7edc1e7dee5015b017c5cb6f16e7644b0d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_FREECOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 new file mode 100644 index 0000000000000000000000000000000000000000..c9604f3463cc883748f2be7064e48e56972a5cd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ISD200=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT new file mode 100644 index 0000000000000000000000000000000000000000..3753489e015d6118bca3a8ab38439610060c1bd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_JUMPSHOT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA new file mode 100644 index 0000000000000000000000000000000000000000..a3fcafa6348f205ade601835d662ce43a27f4e04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_KARMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH new file mode 100644 index 0000000000000000000000000000000000000000..de105d95fd194af6ca2bc441f69267549db941f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ONETOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK new file mode 100644 index 0000000000000000000000000000000000000000..d726f0eff6f1f0bea322a6f51d60903ea2e5e76c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_REALTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 new file mode 100644 index 0000000000000000000000000000000000000000..4336ce368a49af61b70f24fe7f666396a167e1b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_SDDR09=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 new file mode 100644 index 0000000000000000000000000000000000000000..18e63e8218d207194d859dcc06a15a2e4e9123f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_SDDR55=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT new file mode 100644 index 0000000000000000000000000000000000000000..f5309cd3d21ecf63d37acaf6b62fb8ad552b069a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_USBAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST new file mode 100644 index 0000000000000000000000000000000000000000..d11d0fdaadbf8f1c68af3880aa9a4d804daa089a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST @@ -0,0 +1 @@ +# CONFIG_USB_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC new file mode 100644 index 0000000000000000000000000000000000000000..1215a85c275eba1895a3dd50caa6cca17c82fb89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC @@ -0,0 +1 @@ +CONFIG_USB_TMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR new file mode 100644 index 0000000000000000000000000000000000000000..f30a7b06687a71c1dc31f051ee73e640d532588b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR @@ -0,0 +1 @@ +# CONFIG_USB_TRANCEVIBRATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS new file mode 100644 index 0000000000000000000000000000000000000000..8d98eeae11811563bf8c433597f93d80527b2a71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS @@ -0,0 +1 @@ +CONFIG_USB_UAS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM new file mode 100644 index 0000000000000000000000000000000000000000..42aac423b2bd9ea47534ac4f8336532a74e8fc4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM @@ -0,0 +1 @@ +CONFIG_USB_UEAGLEATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET new file mode 100644 index 0000000000000000000000000000000000000000..587a08dc5124d064e576c38f53bce94af56d242c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET @@ -0,0 +1 @@ +CONFIG_USB_USBNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 new file mode 100644 index 0000000000000000000000000000000000000000..7f1be44f559d225ff01ef16decbbd04a1bf6661b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 @@ -0,0 +1 @@ +CONFIG_USB_VL600=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM new file mode 100644 index 0000000000000000000000000000000000000000..5f264ab428c743a3542ab7d25796dcc83b190da7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM @@ -0,0 +1 @@ +CONFIG_USB_WDM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD new file mode 100644 index 0000000000000000000000000000000000000000..ae4294baf6118a4c3f265e3d8f8d7cbfa5f7286d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_XHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI new file mode 100644 index 0000000000000000000000000000000000000000..69eea98dae52ba03d11c55963147e6cb4a57fcaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI @@ -0,0 +1 @@ +CONFIG_USB_XHCI_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS new file mode 100644 index 0000000000000000000000000000000000000000..0d81c3d4d18642df28e260f273ffa12a813c8fae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_PCI_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM new file mode 100644 index 0000000000000000000000000000000000000000..6c567c836e8067a0906278dcb2bf952833080f77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM @@ -0,0 +1 @@ +CONFIG_USB_XUSBATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX new file mode 100644 index 0000000000000000000000000000000000000000..71cffb1a41f08fa6cd19a8b3f653f1d164c3343c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX @@ -0,0 +1 @@ +# CONFIG_USB_YUREX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO new file mode 100644 index 0000000000000000000000000000000000000000..994335413f23eb0242d08ac409ad8deceaedd329 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO @@ -0,0 +1 @@ +# CONFIG_USERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA new file mode 100644 index 0000000000000000000000000000000000000000..b2e46dd0575e2be7481ad5eff8553743b618c871 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA @@ -0,0 +1 @@ +# CONFIG_USER_DECRYPTED_DATA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS new file mode 100644 index 0000000000000000000000000000000000000000..3dab01b676c4dbb0abc3d04c9d331a0c7c513fea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS @@ -0,0 +1 @@ +# CONFIG_USER_EVENTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID b/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID new file mode 100644 index 0000000000000000000000000000000000000000..90c9b33d52c0ee98a5774d854602b304d268902f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID @@ -0,0 +1 @@ +CONFIG_USE_PERCPU_NUMA_NODE_ID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER b/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER new file mode 100644 index 0000000000000000000000000000000000000000..e1915bb4a4e8d511142c9fca4d800048486e7539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER @@ -0,0 +1 @@ +# CONFIG_VALIDATE_FS_PARSER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 new file mode 100644 index 0000000000000000000000000000000000000000..0c5602392c328f0258286da07a6a1b0d5e90d44b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 @@ -0,0 +1 @@ +CONFIG_VFIO_IOMMU_TYPE1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX new file mode 100644 index 0000000000000000000000000000000000000000..d58708279a2b007615ce4f17b3394770bb28f92a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_INTX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP new file mode 100644 index 0000000000000000000000000000000000000000..4a77d0d3219e6d6992c30e634bfb287340abe750 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_MMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD new file mode 100644 index 0000000000000000000000000000000000000000..af7e2cab380c30af29aea19cc002b5e70627a350 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD @@ -0,0 +1 @@ +CONFIG_VFIO_VIRQFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST new file mode 100644 index 0000000000000000000000000000000000000000..391e6cb1a17b0c6b411d620b4b048fc9b17c9b3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST @@ -0,0 +1 @@ +CONFIG_VHOST=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..e7cbfa39a4f716a6107e7964d24832592ecdec99 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY @@ -0,0 +1 @@ +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB new file mode 100644 index 0000000000000000000000000000000000000000..cf2867757f6a2597521b3784ffbfa2eba7fbc937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB @@ -0,0 +1 @@ +CONFIG_VHOST_IOTLB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK new file mode 100644 index 0000000000000000000000000000000000000000..662156d8439a214144bf63edf3c7cc10b5f352a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK @@ -0,0 +1 @@ +CONFIG_VHOST_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE new file mode 100644 index 0000000000000000000000000000000000000000..b9b499925593da3f2798a66f9726e98e2bf1c51c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE @@ -0,0 +1 @@ +CONFIG_VIDEO_CMDLINE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET new file mode 100644 index 0000000000000000000000000000000000000000..6fa3400c72883d88c079f866c0810a33bd2ec2c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET @@ -0,0 +1 @@ +CONFIG_VIDEO_NOMODESET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR new file mode 100644 index 0000000000000000000000000000000000000000..55b32c7aef56eadb56f2522b1972c662c1a6f7d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR @@ -0,0 +1 @@ +CONFIG_VIRTIO_ANCHOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU new file mode 100644 index 0000000000000000000000000000000000000000..ecd7366a12be039b8752c70b4fef8dc4a68a1be5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU @@ -0,0 +1 @@ +# CONFIG_VIRTIO_IOMMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING new file mode 100644 index 0000000000000000000000000000000000000000..e176b462a608ea70de7a1c419f101da00d2efc83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_VIRT_CPU_ACCOUNTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY new file mode 100644 index 0000000000000000000000000000000000000000..98b7defb93cea93df910fa4b5a65678d4b3b452c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY @@ -0,0 +1 @@ +CONFIG_VITESSE_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q new file mode 100644 index 0000000000000000000000000000000000000000..1cb92b9cd0c8f06abca70f52359c483588b3fbd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP new file mode 100644 index 0000000000000000000000000000000000000000..dd976efa2c973aeeb8ea140a3da4445b069b4904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q_GVRP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP new file mode 100644 index 0000000000000000000000000000000000000000..c0184a3f61834a48a70ed3ba6c0cbb863a79b09c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q_MVRP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP new file mode 100644 index 0000000000000000000000000000000000000000..dc6d564ef6e4c86b3323f30fbb12e931b8f1a233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP @@ -0,0 +1 @@ +CONFIG_VT_CONSOLE_SLEEP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..07d6c238c947e16386d47e91b7953acfa2742640 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS @@ -0,0 +1 @@ +# CONFIG_VXFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 new file mode 100644 index 0000000000000000000000000000000000000000..c224e25f0cb469ea5aedf8469a6578adef771409 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 @@ -0,0 +1 @@ +# CONFIG_W1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL b/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL new file mode 100644 index 0000000000000000000000000000000000000000..8fb832a9d89a01ff8de2821a7ab1df9e7c27ea30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL @@ -0,0 +1 @@ +# CONFIG_WANXL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..804787a3c403982719167bd0c29c47619890037f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE new file mode 100644 index 0000000000000000000000000000000000000000..4e2060b85ff3f0b50e9a7d54d27934cb6abe4ea6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE @@ -0,0 +1 @@ +# CONFIG_WATCH_QUEUE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI new file mode 100644 index 0000000000000000000000000000000000000000..9cc0d4bd549efe4d5bf0103a8e6baa9c880733ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI @@ -0,0 +1 @@ +CONFIG_WDTPCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR b/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR new file mode 100644 index 0000000000000000000000000000000000000000..2bfe4c686d742d5930297c88c6800aaca96b16bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR @@ -0,0 +1 @@ +# CONFIG_WERROR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS new file mode 100644 index 0000000000000000000000000000000000000000..150710089702d2c0d05f464b1068d49c4d5cbb3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS @@ -0,0 +1 @@ +CONFIG_WIRELESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC new file mode 100644 index 0000000000000000000000000000000000000000..5598b82c7fbf816e2be1753f422d79f01fd563e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC @@ -0,0 +1 @@ +# CONFIG_WPCM450_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT new file mode 100644 index 0000000000000000000000000000000000000000..67231fa15e301df4c6a8fea0b628e669bb3eacd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT @@ -0,0 +1 @@ +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..6cf98a6238b725d347615c00c605349f0f7a474c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT @@ -0,0 +1 @@ +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN new file mode 100644 index 0000000000000000000000000000000000000000..55b7942e0a8a0dedfe9d5dd59a3010a1101a4165 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN @@ -0,0 +1 @@ +# CONFIG_WWAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..014b2354ca780f483ed782ed0df0c47d1fdd8ec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST @@ -0,0 +1 @@ +# CONFIG_WW_MUTEX_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 b/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 new file mode 100644 index 0000000000000000000000000000000000000000..92856948214a99a7c335f31d29a2fe65bdc9344f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 @@ -0,0 +1 @@ +# CONFIG_X25 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI new file mode 100644 index 0000000000000000000000000000000000000000..a4f4eb42fdb3bbbb80260bfa79989fbbbc31a23c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI @@ -0,0 +1 @@ +CONFIG_XARRAY_MULTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM new file mode 100644 index 0000000000000000000000000000000000000000..63f3578d2421a8ff3defb77e8a61108c89928b13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM @@ -0,0 +1 @@ +CONFIG_XFRM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH new file mode 100644 index 0000000000000000000000000000000000000000..9ad8fc4e61b9f8e14febbcbced30df23fb9cd551 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH @@ -0,0 +1 @@ +CONFIG_XFRM_AH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO new file mode 100644 index 0000000000000000000000000000000000000000..ad44d0de5f307710a15db7bea65e265e2063fe53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO @@ -0,0 +1 @@ +CONFIG_XFRM_ALGO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP new file mode 100644 index 0000000000000000000000000000000000000000..c836db65133ecb84d1211d333bf38e049dc479b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP @@ -0,0 +1 @@ +CONFIG_XFRM_ESP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP new file mode 100644 index 0000000000000000000000000000000000000000..ebcbe78807bb59dda89ef2e1279a1249d8add114 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP @@ -0,0 +1 @@ +CONFIG_XFRM_IPCOMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD new file mode 100644 index 0000000000000000000000000000000000000000..37cb46a2f16002348ca166aca4dca49efc818b7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD @@ -0,0 +1 @@ +CONFIG_XFRM_OFFLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI new file mode 100644 index 0000000000000000000000000000000000000000..0116c0e57265cb2f65cb001de9775e92db3bdc12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI @@ -0,0 +1 @@ +CONFIG_XFS_SUPPORT_ASCII_CI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC new file mode 100644 index 0000000000000000000000000000000000000000..9b5486d39f4dbc8cb7fc057c1315fe0d8204cbb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC @@ -0,0 +1 @@ +# CONFIG_XILINX_AXI_EMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA new file mode 100644 index 0000000000000000000000000000000000000000..67b50b087234fa02f940b2f54a7f27fb980fa2d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA @@ -0,0 +1 @@ +# CONFIG_XILINX_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE new file mode 100644 index 0000000000000000000000000000000000000000..ce6f11720757635758b785c2d702106a37b3feec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE @@ -0,0 +1 @@ +# CONFIG_XILINX_EMACLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII new file mode 100644 index 0000000000000000000000000000000000000000..c2901c6ea78136d14f9c0d147e0e9f0d7a9e055b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII @@ -0,0 +1 @@ +CONFIG_XILINX_GMII2RGMII=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC new file mode 100644 index 0000000000000000000000000000000000000000..01a4f19cca097dec20f7245f23c31913aaf5a3c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC @@ -0,0 +1 @@ +# CONFIG_XILINX_LL_TEMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC new file mode 100644 index 0000000000000000000000000000000000000000..5de693e9871e45fc1c0f66458fa5464a3e2727fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC @@ -0,0 +1 @@ +# CONFIG_XILINX_SDFEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU new file mode 100644 index 0000000000000000000000000000000000000000..7ed663374be94bcebe79ec63dde12bea75aee42c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU @@ -0,0 +1 @@ +# CONFIG_XILINX_VCU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..b0f810054392e09bb6f03622f8229aec2324e695 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_XILINX_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA new file mode 100644 index 0000000000000000000000000000000000000000..7818eb3188dc1653103f3a1dcd67bfd4dafdb91f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA @@ -0,0 +1 @@ +# CONFIG_XILINX_XDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS new file mode 100644 index 0000000000000000000000000000000000000000..ce2448ee6c705a8543dc3d5293b1ed3ff897f885 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS @@ -0,0 +1 @@ +# CONFIG_XILLYBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB new file mode 100644 index 0000000000000000000000000000000000000000..1b74c675f4e12e18ea81a7f71430d5a49e25797a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB @@ -0,0 +1 @@ +# CONFIG_XILLYUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH new file mode 100644 index 0000000000000000000000000000000000000000..25ccb2e89b88e147afea6c702f6fb572fb3dc100 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH @@ -0,0 +1 @@ +CONFIG_XXHASH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA new file mode 100644 index 0000000000000000000000000000000000000000..fa07f12c4899a85018875b51ab218f7737788ee0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA @@ -0,0 +1 @@ +CONFIG_YENTA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE new file mode 100644 index 0000000000000000000000000000000000000000..8753860d35c0d75cb1d3c5a2c910bcf91ff85edd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE @@ -0,0 +1 @@ +CONFIG_YENTA_ENE_TUNE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 new file mode 100644 index 0000000000000000000000000000000000000000..d77df1f4688b648022231024aafe6ec0d76dfd19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 @@ -0,0 +1 @@ +CONFIG_YENTA_O2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH new file mode 100644 index 0000000000000000000000000000000000000000..47b537c5dbf78d7af9c55161c71c7f6c0bad7126 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH @@ -0,0 +1 @@ +CONFIG_YENTA_RICOH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI new file mode 100644 index 0000000000000000000000000000000000000000..003f693c2f51fe89de83b5b6965bcd666f1b60e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI @@ -0,0 +1 @@ +CONFIG_YENTA_TI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA new file mode 100644 index 0000000000000000000000000000000000000000..ce7555e6874ca3e547d9a34963f5bee4b7661b7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA @@ -0,0 +1 @@ +CONFIG_YENTA_TOSHIBA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 b/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 new file mode 100644 index 0000000000000000000000000000000000000000..ded35b1a94284585867d7c1c7a48f8b02df5772d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 @@ -0,0 +1 @@ +# CONFIG_YT6801 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED new file mode 100644 index 0000000000000000000000000000000000000000..e759a23add4ac8ee67877fa32f271eec93e44f51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_Z3FOLD_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF new file mode 100644 index 0000000000000000000000000000000000000000..a3656c69a7a9c58f349f05f4d52f5b008190c2f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF @@ -0,0 +1 @@ +# CONFIG_ZEROPLUS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..db53f11b7e656736546361374f0061cd8c0e527f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_ZIIRAVE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE new file mode 100644 index 0000000000000000000000000000000000000000..078cde4af4c46c4e1fbe44a7227110b51885b97a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE @@ -0,0 +1 @@ +CONFIG_ZLIB_DEFLATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE new file mode 100644 index 0000000000000000000000000000000000000000..e23856bdc9fd8e2efab19c67b62dd316d1ee292a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE @@ -0,0 +1 @@ +CONFIG_ZLIB_INFLATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS new file mode 100644 index 0000000000000000000000000000000000000000..557a55ca1cf89bbba307a89bb5e5b81fb97db6a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS @@ -0,0 +1 @@ +# CONFIG_ZONEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED new file mode 100644 index 0000000000000000000000000000000000000000..a9c94a578e9c414c6bafe01108cdaddf21891fe4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT new file mode 100644 index 0000000000000000000000000000000000000000..4506c4b5b01456f6c04388835060bba8442e775c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT @@ -0,0 +1 @@ +# CONFIG_60XX_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG new file mode 100644 index 0000000000000000000000000000000000000000..56b9bd17fcec0c410246dfe7067be89220257458 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG @@ -0,0 +1 @@ +# CONFIG_ABP060MG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF new file mode 100644 index 0000000000000000000000000000000000000000..53c61f3c7150ce729840d80be380d44aada09339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF @@ -0,0 +1 @@ +CONFIG_ACERHDF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS new file mode 100644 index 0000000000000000000000000000000000000000..648d05da41ab1307185f5ba4015e206acc535624 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS @@ -0,0 +1 @@ +# CONFIG_ACER_WIRELESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI new file mode 100644 index 0000000000000000000000000000000000000000..06ff5c76de7bd95c1a2e82a5073cd7e827cac4c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI @@ -0,0 +1 @@ +CONFIG_ACER_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL new file mode 100644 index 0000000000000000000000000000000000000000..e76a18724d297275b6f6dd8ea19ae7c859f395c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL @@ -0,0 +1 @@ +CONFIG_ACPI_ADXL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS new file mode 100644 index 0000000000000000000000000000000000000000..e1f9ffcf0df7aa9e8be27efbf8236af5a5bf1c6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS @@ -0,0 +1 @@ +# CONFIG_ACPI_ALS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC new file mode 100644 index 0000000000000000000000000000000000000000..72ad5cede8ea0f4b596583ec42f05eb210b4f5c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC @@ -0,0 +1 @@ +CONFIG_ACPI_CMPC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS new file mode 100644 index 0000000000000000000000000000000000000000..6cb5293d986947eb9e559806f4a1c33901c1eaed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS @@ -0,0 +1 @@ +CONFIG_ACPI_CPU_FREQ_PSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF new file mode 100644 index 0000000000000000000000000000000000000000..e1ef119ef2e35d313ac0f39eea254a58f949cd1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF @@ -0,0 +1 @@ +# CONFIG_ACPI_DPTF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC new file mode 100644 index 0000000000000000000000000000000000000000..4e82e7d4e35c4e53aca6c82398ffd8ae07330ac6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_IOAPIC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP new file mode 100644 index 0000000000000000000000000000000000000000..dd59ebd67c12cf82c32d3ec8b465198d116cde10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP @@ -0,0 +1 @@ +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT new file mode 100644 index 0000000000000000000000000000000000000000..7d63b33df49151014103e8c1f14728a74b7e1eed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT @@ -0,0 +1 @@ +CONFIG_ACPI_LPIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE new file mode 100644 index 0000000000000000000000000000000000000000..33f4540f3d115276a56256a6ffd1c89d207ef45f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE @@ -0,0 +1 @@ +CONFIG_ACPI_PLATFORM_PROFILE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE new file mode 100644 index 0000000000000000000000000000000000000000..32905041afe914eb81e70e9c49bc17772f531daa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_CSTATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..4b19514a9a9b7a280724bdb13ba036375b4446e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT @@ -0,0 +1 @@ +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA new file mode 100644 index 0000000000000000000000000000000000000000..2504b4dc8d4f0d2eecb6cc5c39e441873e0563ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA @@ -0,0 +1 @@ +# CONFIG_ACPI_TOSHIBA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI new file mode 100644 index 0000000000000000000000000000000000000000..9cef4743c8cc807446ddb4bf4c4c8f184f4b8e35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI @@ -0,0 +1 @@ +CONFIG_ACPI_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..700a0209a85690269e359e06b602bb9e4a6f601d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST @@ -0,0 +1 @@ +# CONFIG_ACRN_GUEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 new file mode 100644 index 0000000000000000000000000000000000000000..4c835768054e28ffb256a1b5ebee999fb8e45873 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 @@ -0,0 +1 @@ +# CONFIG_AD2S1200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 new file mode 100644 index 0000000000000000000000000000000000000000..8652c581466da69456aec7060dbe27ef63867ff4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 @@ -0,0 +1 @@ +# CONFIG_AD2S90 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R new file mode 100644 index 0000000000000000000000000000000000000000..3e21ea9bcf28c4f128adfa5ba6104366be7aef52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R @@ -0,0 +1 @@ +# CONFIG_AD3552R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 new file mode 100644 index 0000000000000000000000000000000000000000..34fe968a74516b11cf48ba26dbfa16fe04e04341 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 @@ -0,0 +1 @@ +# CONFIG_AD4130 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 new file mode 100644 index 0000000000000000000000000000000000000000..d26142d73fd7a9e6c6eaea184acebfa64eb73855 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 @@ -0,0 +1 @@ +# CONFIG_AD5064 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 new file mode 100644 index 0000000000000000000000000000000000000000..3beaff0bb3e3a44a6ee62111600fe9b3159b42f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 @@ -0,0 +1 @@ +# CONFIG_AD5110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 new file mode 100644 index 0000000000000000000000000000000000000000..66aa4460977eb9222d8e0082d081332fe2aa3ca8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 @@ -0,0 +1 @@ +# CONFIG_AD5272 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 new file mode 100644 index 0000000000000000000000000000000000000000..294533b24b1412cff3455b20019367f007162f47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 @@ -0,0 +1 @@ +# CONFIG_AD5360 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 new file mode 100644 index 0000000000000000000000000000000000000000..cc21a83aa48eb1a48152a0a752332bcf37ee1f0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 @@ -0,0 +1 @@ +# CONFIG_AD5380 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 new file mode 100644 index 0000000000000000000000000000000000000000..9ec8d94a8d95e135be13681259085db2c2a51af9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 @@ -0,0 +1 @@ +# CONFIG_AD5421 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 new file mode 100644 index 0000000000000000000000000000000000000000..1a8d66614b5d44e18daae6ca295b1ee64995abec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 @@ -0,0 +1 @@ +# CONFIG_AD5446 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 new file mode 100644 index 0000000000000000000000000000000000000000..a370e945dce575fb35d397c19ad044e9992d5905 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 @@ -0,0 +1 @@ +# CONFIG_AD5449 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 new file mode 100644 index 0000000000000000000000000000000000000000..cad3ffe128e23a887cbfdf0da32fd1c65e1e5773 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 @@ -0,0 +1 @@ +# CONFIG_AD5504 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R new file mode 100644 index 0000000000000000000000000000000000000000..019d6225df598571e4ad691135d2806d679a6c74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R @@ -0,0 +1 @@ +# CONFIG_AD5592R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R new file mode 100644 index 0000000000000000000000000000000000000000..a0a89f52b4fef45eac5f68efcaff5d0fd754fb51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R @@ -0,0 +1 @@ +# CONFIG_AD5593R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI new file mode 100644 index 0000000000000000000000000000000000000000..fb8e93bb2b6aeacc13ea90eeb43bd7d5e3b242e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI @@ -0,0 +1 @@ +# CONFIG_AD5624R_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI new file mode 100644 index 0000000000000000000000000000000000000000..ab255270b2b218d3b0fbbd1eedb3c9895ae0cb4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI @@ -0,0 +1 @@ +# CONFIG_AD5686_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C new file mode 100644 index 0000000000000000000000000000000000000000..5fd9e716619b80ca61fcc0f71f5d1511e6932a14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C @@ -0,0 +1 @@ +# CONFIG_AD5696_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 new file mode 100644 index 0000000000000000000000000000000000000000..986e3bdc9249a067ac8e6624a79050be967368b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 @@ -0,0 +1 @@ +# CONFIG_AD5755 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 new file mode 100644 index 0000000000000000000000000000000000000000..244fcdfa62f4d127506ff9d581d43a89c2722d38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 @@ -0,0 +1 @@ +# CONFIG_AD5758 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 new file mode 100644 index 0000000000000000000000000000000000000000..93e281ced3b97443a50f237d7505dd2ccddb91ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 @@ -0,0 +1 @@ +# CONFIG_AD5761 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 new file mode 100644 index 0000000000000000000000000000000000000000..c6885466f205544ca96c9297a9cb0adc65b58857 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 @@ -0,0 +1 @@ +# CONFIG_AD5764 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 new file mode 100644 index 0000000000000000000000000000000000000000..5df88b41d4b9bc52c06db199f0f865b4dbce39a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 @@ -0,0 +1 @@ +# CONFIG_AD5766 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R new file mode 100644 index 0000000000000000000000000000000000000000..3a875e6c1735a75d071209fed66ce27880f36fa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R @@ -0,0 +1 @@ +# CONFIG_AD5770R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 new file mode 100644 index 0000000000000000000000000000000000000000..4d143c6592190a79589981262a5eaca6ee57decc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 @@ -0,0 +1 @@ +# CONFIG_AD5791 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 new file mode 100644 index 0000000000000000000000000000000000000000..a2cd8825e209696c4ab55f40450256e3c065c836 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 @@ -0,0 +1 @@ +# CONFIG_AD7091R5 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 new file mode 100644 index 0000000000000000000000000000000000000000..b25a1166b6903500cccfadf5612eba93029a8bb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 @@ -0,0 +1 @@ +# CONFIG_AD7124 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 new file mode 100644 index 0000000000000000000000000000000000000000..cd5a3edb085004024232854ad6e4817560e21b2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 @@ -0,0 +1 @@ +# CONFIG_AD7150 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 new file mode 100644 index 0000000000000000000000000000000000000000..231728de4734a0155483090609a77604d7463310 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 @@ -0,0 +1 @@ +# CONFIG_AD7192 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 new file mode 100644 index 0000000000000000000000000000000000000000..62b03da046d7ae731493eb2f8ecdd4d45ace61de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 @@ -0,0 +1 @@ +# CONFIG_AD7266 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 new file mode 100644 index 0000000000000000000000000000000000000000..6ac49feefaef9804d64e24034b8d8624d0290f78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 @@ -0,0 +1 @@ +# CONFIG_AD7280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 new file mode 100644 index 0000000000000000000000000000000000000000..9257aeb05d7adfc9d67ec6fc4ca421089c0de739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 @@ -0,0 +1 @@ +# CONFIG_AD7291 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 new file mode 100644 index 0000000000000000000000000000000000000000..a9c0380b3c4b6be70e0bfca1e4e68a0572af65fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 @@ -0,0 +1 @@ +# CONFIG_AD7292 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 new file mode 100644 index 0000000000000000000000000000000000000000..f804e7bf7209b93fceaabad8130a0ca1b7b340c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 @@ -0,0 +1 @@ +# CONFIG_AD7293 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 new file mode 100644 index 0000000000000000000000000000000000000000..40ce2af3a07b19269aeb5ac22b76231610c3c9c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 @@ -0,0 +1 @@ +# CONFIG_AD7298 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 new file mode 100644 index 0000000000000000000000000000000000000000..f4909c7c0045ffe809edbd53d3c02a0c7281de3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 @@ -0,0 +1 @@ +# CONFIG_AD7303 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 new file mode 100644 index 0000000000000000000000000000000000000000..0fbf7ef087a2a5e08c500b3d7c8a20966309d6c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 @@ -0,0 +1 @@ +# CONFIG_AD74115 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R new file mode 100644 index 0000000000000000000000000000000000000000..c22b3660b36bbfeaddef0c27f2d76885d904fea0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R @@ -0,0 +1 @@ +# CONFIG_AD74413R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 new file mode 100644 index 0000000000000000000000000000000000000000..a8a9e9361a02d5b0f9dfa4632acca4576b4e95ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 @@ -0,0 +1 @@ +# CONFIG_AD7476 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL new file mode 100644 index 0000000000000000000000000000000000000000..c04c8bcd2eb669f3bcca5cad1ef5386ff87fa8ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL @@ -0,0 +1 @@ +# CONFIG_AD7606_IFACE_PARALLEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI new file mode 100644 index 0000000000000000000000000000000000000000..6f532a5afc09934ec9e59057a30a045e8141730a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI @@ -0,0 +1 @@ +# CONFIG_AD7606_IFACE_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 new file mode 100644 index 0000000000000000000000000000000000000000..49d1a7d26ae121c2c33d8385cb62f1518d6e7d69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 @@ -0,0 +1 @@ +# CONFIG_AD7746 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 new file mode 100644 index 0000000000000000000000000000000000000000..c1a1ea27718bf9beb9c8fe397d448873783385bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 @@ -0,0 +1 @@ +# CONFIG_AD7766 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 new file mode 100644 index 0000000000000000000000000000000000000000..eee87cb8238e608d74e91dcd1f94200a6f3f72b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 @@ -0,0 +1 @@ +# CONFIG_AD7768_1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 new file mode 100644 index 0000000000000000000000000000000000000000..ad5e89ca58a6743fb012e48244d66b41fa5f0fd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 @@ -0,0 +1 @@ +# CONFIG_AD7780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 new file mode 100644 index 0000000000000000000000000000000000000000..3b0b7f0f1bd763f7fcb70fe7893e30ee123e2a8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 @@ -0,0 +1 @@ +# CONFIG_AD7791 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 new file mode 100644 index 0000000000000000000000000000000000000000..f2cf5fc2f76e969748d153cf1eae482795d777b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 @@ -0,0 +1 @@ +# CONFIG_AD7793 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 new file mode 100644 index 0000000000000000000000000000000000000000..4746e6e69bebc39f70820641e53bbbf974a32062 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 @@ -0,0 +1 @@ +# CONFIG_AD7887 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 new file mode 100644 index 0000000000000000000000000000000000000000..cc3b9c69497ac1a06ffeb88a81c70dc86091982c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 @@ -0,0 +1 @@ +# CONFIG_AD7923 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 new file mode 100644 index 0000000000000000000000000000000000000000..a11e18aea30b79c9a17c50e5559c50b8555a4c1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 @@ -0,0 +1 @@ +# CONFIG_AD7949 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X new file mode 100644 index 0000000000000000000000000000000000000000..66e971f76cea5bd18d47330b4bdfbc074fc07ac8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X @@ -0,0 +1 @@ +# CONFIG_AD799X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 new file mode 100644 index 0000000000000000000000000000000000000000..0d9754eeb9bcc7fcf4ac4c1231a6366d723bd3b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 @@ -0,0 +1 @@ +# CONFIG_AD8366 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 new file mode 100644 index 0000000000000000000000000000000000000000..e634e592558b8a0e3bd023e5243139f957b52b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 @@ -0,0 +1 @@ +# CONFIG_AD8801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 new file mode 100644 index 0000000000000000000000000000000000000000..421ac1f25eec2765aa1eb61699b5b41152503b77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 @@ -0,0 +1 @@ +# CONFIG_AD9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 new file mode 100644 index 0000000000000000000000000000000000000000..abfd7e162c8229554133ccaeca6fc4d33db5aa9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 @@ -0,0 +1 @@ +# CONFIG_AD9523 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 new file mode 100644 index 0000000000000000000000000000000000000000..7261f710891ec8f50a766f7f98f515a5c6213686 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 @@ -0,0 +1 @@ +# CONFIG_ADA4250 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 new file mode 100644 index 0000000000000000000000000000000000000000..77a7e76604b58d808ed8efe9232d7c204d48bcb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 @@ -0,0 +1 @@ +# CONFIG_ADF4350 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 new file mode 100644 index 0000000000000000000000000000000000000000..0d7d09dd13dcd09f078c3d6d2ed86fc6e0fca6c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 @@ -0,0 +1 @@ +# CONFIG_ADF4371 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 new file mode 100644 index 0000000000000000000000000000000000000000..045780028d52fbeff1aab986e5a6fb1119dc838c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 @@ -0,0 +1 @@ +# CONFIG_ADF4377 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 new file mode 100644 index 0000000000000000000000000000000000000000..5718d4066600b15f5f042e9d39d4f1755013e587 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 @@ -0,0 +1 @@ +# CONFIG_ADIS16080 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 new file mode 100644 index 0000000000000000000000000000000000000000..2a9469708eb88b12b0d707e88f8933c94cd31aa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 @@ -0,0 +1 @@ +# CONFIG_ADIS16130 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 new file mode 100644 index 0000000000000000000000000000000000000000..efe24e8bc1feedb405a1a4bb517fa5a9ef3aa087 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 @@ -0,0 +1 @@ +# CONFIG_ADIS16136 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 new file mode 100644 index 0000000000000000000000000000000000000000..87928c55476f1a0b7d35c5ac86b751fd0ec644ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 @@ -0,0 +1 @@ +# CONFIG_ADIS16201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 new file mode 100644 index 0000000000000000000000000000000000000000..8f686138b83b6ca225c0bceb2df274487fba5fa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 @@ -0,0 +1 @@ +# CONFIG_ADIS16209 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 new file mode 100644 index 0000000000000000000000000000000000000000..a96770bc983320dce6194083bfd5efaa2ccdb4c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 @@ -0,0 +1 @@ +# CONFIG_ADIS16260 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 new file mode 100644 index 0000000000000000000000000000000000000000..f7717e8d82c3768a590dc9821aa2ff094c1b0c35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 @@ -0,0 +1 @@ +# CONFIG_ADIS16400 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 new file mode 100644 index 0000000000000000000000000000000000000000..e3f206072caf0a8b70bbeb934bb8c1f5570dd4c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 @@ -0,0 +1 @@ +# CONFIG_ADIS16460 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 new file mode 100644 index 0000000000000000000000000000000000000000..18dd3612a10fae784124726fd5ad7db98b31909f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 @@ -0,0 +1 @@ +# CONFIG_ADIS16475 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 new file mode 100644 index 0000000000000000000000000000000000000000..023204875e7d1ebe700ec854d546c8a010a1dc82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 @@ -0,0 +1 @@ +# CONFIG_ADIS16480 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC new file mode 100644 index 0000000000000000000000000000000000000000..e98b407ac85f87ca0bb365ca3e297ff949ded4c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC @@ -0,0 +1 @@ +# CONFIG_ADI_AXI_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 new file mode 100644 index 0000000000000000000000000000000000000000..624a7bc07ad61e119fc2758b2b76d9d572c260b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 @@ -0,0 +1 @@ +# CONFIG_ADJD_S311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 new file mode 100644 index 0000000000000000000000000000000000000000..06f76f7c122681a79b1a3662a60791501b02edcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 @@ -0,0 +1 @@ +# CONFIG_ADMV1013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 new file mode 100644 index 0000000000000000000000000000000000000000..983402c3c0db6ef5886e50c662026b22de4632cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 @@ -0,0 +1 @@ +# CONFIG_ADMV1014 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 new file mode 100644 index 0000000000000000000000000000000000000000..d5ae672e12b0e4fb01f74b82ab9c5c1436936326 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 @@ -0,0 +1 @@ +# CONFIG_ADMV4420 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 new file mode 100644 index 0000000000000000000000000000000000000000..0130531ed952240afcf47d50c47ce505ab418c13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 @@ -0,0 +1 @@ +# CONFIG_ADMV8818 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 new file mode 100644 index 0000000000000000000000000000000000000000..4830752d31d7082bf9275c035d5b70d98662e2a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 @@ -0,0 +1 @@ +# CONFIG_ADRF6780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 new file mode 100644 index 0000000000000000000000000000000000000000..3e18d3e0a4d74191a19d69996a41ce95063369f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 @@ -0,0 +1 @@ +# CONFIG_ADUX1020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT new file mode 100644 index 0000000000000000000000000000000000000000..48d437ed84f230fbd37cfb2e418df42c55e9a794 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT @@ -0,0 +1 @@ +# CONFIG_ADVANTECH_EC_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON new file mode 100644 index 0000000000000000000000000000000000000000..c359f60c6f847ca7bb3effaf3516f8e88db4e3e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON @@ -0,0 +1 @@ +# CONFIG_ADV_SWBUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C new file mode 100644 index 0000000000000000000000000000000000000000..5746dee1e8c5c619e9b35d9b46910a07b1149636 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL313_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI new file mode 100644 index 0000000000000000000000000000000000000000..2e5c52a6e312d1dda122ab32fe6b073f4e450934 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL313_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C new file mode 100644 index 0000000000000000000000000000000000000000..f6976b836c605d1281e2295809f39f518f10e209 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL345_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI new file mode 100644 index 0000000000000000000000000000000000000000..186ab0d3ad9ca55173d2e20b9ff7457a415294ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL345_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C new file mode 100644 index 0000000000000000000000000000000000000000..5d6dbfc751817ff4d08bb433ed402276dc4e83a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL355_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI new file mode 100644 index 0000000000000000000000000000000000000000..d546ccabbe0254e92065c8948219c8601715ad80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL355_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C new file mode 100644 index 0000000000000000000000000000000000000000..d876f8d2be8badaca1deb80606680b1540f85856 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL367_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI new file mode 100644 index 0000000000000000000000000000000000000000..980e9a2e1a7aaa781b2cc306bba85f5f6c63095f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL367_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C new file mode 100644 index 0000000000000000000000000000000000000000..dad8d9bf6fb45be53af03c079fc3c2290a186ca2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL372_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI new file mode 100644 index 0000000000000000000000000000000000000000..e9916f854444e389eaed36476767d86cfd01ad03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL372_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 new file mode 100644 index 0000000000000000000000000000000000000000..8b7d877f13bcc8d6c29bf7a880ac0aabfb3c5549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 @@ -0,0 +1 @@ +# CONFIG_ADXRS290 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 new file mode 100644 index 0000000000000000000000000000000000000000..7abb6c9c598dad722cf1123afd719ace96cbecc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 @@ -0,0 +1 @@ +# CONFIG_ADXRS450 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 new file mode 100644 index 0000000000000000000000000000000000000000..26df5ff60250f4a0b652e2116cbd8b255f3e2bb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 @@ -0,0 +1 @@ +# CONFIG_AFE4403 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 new file mode 100644 index 0000000000000000000000000000000000000000..0244492b53ff513561c81b1f466fb6153877935d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 @@ -0,0 +1 @@ +# CONFIG_AFE4404 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP new file mode 100644 index 0000000000000000000000000000000000000000..f7332ef53c00b184637ebf84f6710d133a6adb38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP @@ -0,0 +1 @@ +# CONFIG_AGP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 new file mode 100644 index 0000000000000000000000000000000000000000..e4eae5d4348438cfdd973b3aacc445f8c0fca82e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 @@ -0,0 +1 @@ +# CONFIG_AK09911 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 new file mode 100644 index 0000000000000000000000000000000000000000..c74de8fec3a7db0676577c3c916c7597577bc771 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 @@ -0,0 +1 @@ +# CONFIG_AK8974 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 new file mode 100644 index 0000000000000000000000000000000000000000..958f0bd067c737f426b154f9ab740795b6bf0d4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 @@ -0,0 +1 @@ +# CONFIG_AK8975 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 new file mode 100644 index 0000000000000000000000000000000000000000..c0ce27f464900fd97138acc1738236c1369a0c70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 @@ -0,0 +1 @@ +# CONFIG_AL3010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A new file mode 100644 index 0000000000000000000000000000000000000000..0b60a6dfd3c8925baf988874edad3d349533fa88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A @@ -0,0 +1 @@ +# CONFIG_AL3320A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT new file mode 100644 index 0000000000000000000000000000000000000000..752901cddd1d9c4caf83b8e1e433b0e8295ece50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT @@ -0,0 +1 @@ +CONFIG_ALIM1535_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL new file mode 100644 index 0000000000000000000000000000000000000000..7931c1e5ab18081c3c4b6ed1c3a3788a61357852 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL @@ -0,0 +1 @@ +CONFIG_ALTERA_STAPL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 new file mode 100644 index 0000000000000000000000000000000000000000..9acd703102e51bd315f4bac5cca156a5c449109e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 @@ -0,0 +1 @@ +# CONFIG_AM2315 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB new file mode 100644 index 0000000000000000000000000000000000000000..31b1f5e5f6fa1d60c3883c5ded610be6271ffb88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB @@ -0,0 +1 @@ +CONFIG_AMD_NB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC new file mode 100644 index 0000000000000000000000000000000000000000..1ad2137c611fc88a295c5d69463591e852b62800 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC @@ -0,0 +1 @@ +# CONFIG_AMD_PMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF new file mode 100644 index 0000000000000000000000000000000000000000..0b4aaae83f32d9f3ccf4093b0cf6a6b72f7004a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF @@ -0,0 +1 @@ +# CONFIG_AMD_PMF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID new file mode 100644 index 0000000000000000000000000000000000000000..166fa39b1cc2adb1e6354deaef5d4d2447becb19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID @@ -0,0 +1 @@ +# CONFIG_AMD_SFH_HID is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..5f03ffaf533774d66121549c21860974110e7e72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION @@ -0,0 +1 @@ +CONFIG_AMIGA_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL new file mode 100644 index 0000000000000000000000000000000000000000..ab49a1190354a3cee0ca23be0b424cd6734bdd64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL @@ -0,0 +1 @@ +CONFIG_AMILO_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 new file mode 100644 index 0000000000000000000000000000000000000000..21d37456539d6969061af380bffd4f9ae5f7f2e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 @@ -0,0 +1 @@ +# CONFIG_APDS9300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS new file mode 100644 index 0000000000000000000000000000000000000000..086fb1becf62182fd5b84db14bdd022e91f97381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS @@ -0,0 +1 @@ +CONFIG_APDS9802ALS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 new file mode 100644 index 0000000000000000000000000000000000000000..ac56682f409cbe284bc860f9b8ea280ec6a08a62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 @@ -0,0 +1 @@ +# CONFIG_APDS9960 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX new file mode 100644 index 0000000000000000000000000000000000000000..9ecb754be9de1bd40e29702fbb474ec6d86442d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX @@ -0,0 +1 @@ +CONFIG_APPLE_GMUX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES new file mode 100644 index 0000000000000000000000000000000000000000..5f2035b0e91723a34ab77528a0d0caebf3f66757 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES @@ -0,0 +1 @@ +# CONFIG_APPLE_PROPERTIES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION new file mode 100644 index 0000000000000000000000000000000000000000..7812ca016377966b5f05ff5f99dfa85a99a551e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION @@ -0,0 +1 @@ +CONFIG_AQTION=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 new file mode 100644 index 0000000000000000000000000000000000000000..731e605b7172d4fbaa5f018241a0b22abe7d12f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 @@ -0,0 +1 @@ +# CONFIG_AR5523 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT new file mode 100644 index 0000000000000000000000000000000000000000..0e1d8fd3bb7bbe2d23b4e762389abf3f0df11e60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT @@ -0,0 +1 @@ +CONFIG_ARCH_CLOCKSOURCE_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS new file mode 100644 index 0000000000000000000000000000000000000000..a7a95432397c43ae047c217d72b1167005737b06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES new file mode 100644 index 0000000000000000000000000000000000000000..8b8300af27aaa91b5c2adc4965429442356950d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ADD_PAGES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..6ee641e4cf79f2e33ff5187491c6a0240ca5b13a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CC_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION new file mode 100644 index 0000000000000000000000000000000000000000..c4c0018dd1ad7daaebb218664168ffcbd6fb9c2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT new file mode 100644 index 0000000000000000000000000000000000000000..a826e586c4c8743ca0a85144767c297d6cde41ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX new file mode 100644 index 0000000000000000000000000000000000000000..1d046c958ef6d2ffcc5ebaecef8ceaf14658987f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_RELAX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED new file mode 100644 index 0000000000000000000000000000000000000000..e94589313f21fdc211912a9a1f34e03df6cd6b6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..330e0806b2ea3c07670e8808c6200ee95c77b1e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_EARLY_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..1d94e34b5a85288c9d3dc7f20e4beefbfb31a116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED new file mode 100644 index 0000000000000000000000000000000000000000..bf71c649fb8bc71ceff2a68900e802496feabbf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT new file mode 100644 index 0000000000000000000000000000000000000000..a87d5d6bb2bb1f409a3d3a2f55d62c4c3c78d981 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_MEM_ENCRYPT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG new file mode 100644 index 0000000000000000000000000000000000000000..817a506f6881ad809ec2823d5bac5ad47f242813 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH new file mode 100644 index 0000000000000000000000000000000000000000..01e8ba194e3d1d999fd6f75531dbe91ce2db8d72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS new file mode 100644 index 0000000000000000000000000000000000000000..ee751e45fe3947fe959d0f047d280ae1105c0b3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PKEYS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE new file mode 100644 index 0000000000000000000000000000000000000000..402b0726b7dfa9835e338bab00cdcdc433626e5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC new file mode 100644 index 0000000000000000000000000000000000000000..7884f40dbed523768053ea2955b52dadd6205e1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC @@ -0,0 +1 @@ +CONFIG_ARCH_MAY_HAVE_PC_FDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC new file mode 100644 index 0000000000000000000000000000000000000000..2789c27e17a23fe906456ae5635fb04e32c4ac1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..2d87e628fcce72a6b6f4756c6643ea937c178e4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO new file mode 100644 index 0000000000000000000000000000000000000000..8d49b424e2a86fc9ec0e95f7e8383df38543e205 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS new file mode 100644 index 0000000000000000000000000000000000000000..e4ce245684021cbb1f3ecdff82882982469bf2af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS=28 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX new file mode 100644 index 0000000000000000000000000000000000000000..9f3e6d136c588093bcabbd7b358fdcb5ee40cee4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN new file mode 100644 index 0000000000000000000000000000000000000000..8518e1303f782f46a6a9db672105e06b294b69d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN new file mode 100644 index 0000000000000000000000000000000000000000..9cd428feb208c4cfcd13732b6ce7ae89d0dfb1df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..799122cf34aaa7117602b43beb97abb56cea1051 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT @@ -0,0 +1 @@ +CONFIG_ARCH_SPARSEMEM_DEFAULT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG new file mode 100644 index 0000000000000000000000000000000000000000..d981b8e0e036e47e7ee24dac3817f2fb0a594d18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG new file mode 100644 index 0000000000000000000000000000000000000000..40f0233e6b32f5a3a9bd823e63382fb344322d0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP new file mode 100644 index 0000000000000000000000000000000000000000..4f63af6947cb1f926ce3c01678923aa593739a56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY new file mode 100644 index 0000000000000000000000000000000000000000..bb22c1173c4d4fdcca60d762e578745687c674c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE new file mode 100644 index 0000000000000000000000000000000000000000..b5c30c362399f31c73f0124fdf0dd1db62996e11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP new file mode 100644 index 0000000000000000000000000000000000000000..b2816f365d92e737b8f8dd7f5aa22c7f2ef9ad2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED new file mode 100644 index 0000000000000000000000000000000000000000..a07ba5e0b33a517bf1dffc32a40329c007bb0be5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED @@ -0,0 +1 @@ +CONFIG_ARCH_USES_PG_UNCACHED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP new file mode 100644 index 0000000000000000000000000000000000000000..0665eb9f39909ce3b532e30761fe380dcb910d18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP @@ -0,0 +1 @@ +CONFIG_ARCH_USE_BUILTIN_BSWAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT new file mode 100644 index 0000000000000000000000000000000000000000..e1a56bfcc7aea83284adb7613f974e6eedd0d44b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB new file mode 100644 index 0000000000000000000000000000000000000000..e1afff6d9573363056d690156a96256ffad1728f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC new file mode 100644 index 0000000000000000000000000000000000000000..9d328ef054ef103a891bbffd74738dee447cc6b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..edd57b80994b6615e00d0837536679849ef1a977 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..9308f5db98cc579a545d96f6429ce252d5d584f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 new file mode 100644 index 0000000000000000000000000000000000000000..3d08d5594364e04d3bfda43d27d658fce577caae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 @@ -0,0 +1 @@ +# CONFIG_AS3935 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 new file mode 100644 index 0000000000000000000000000000000000000000..dd3aa86a497ae7f4316be97cd89e2c81970435c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 @@ -0,0 +1 @@ +# CONFIG_AS73211 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..40e261455a0ac610e891957f856b90eb0a766c3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP @@ -0,0 +1 @@ +CONFIG_ASUS_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI new file mode 100644 index 0000000000000000000000000000000000000000..16d2c7fa9b0dac2cb59ada5db50292566954318c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI @@ -0,0 +1 @@ +CONFIG_ASUS_NB_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK new file mode 100644 index 0000000000000000000000000000000000000000..a6e76ad34038b873e21d5aca53819507520bfcf9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK @@ -0,0 +1 @@ +# CONFIG_ASUS_TF103C_DOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS new file mode 100644 index 0000000000000000000000000000000000000000..6e8ebd8da47c04239659a57ad956919b69063395 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS @@ -0,0 +1 @@ +# CONFIG_ASUS_WIRELESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI new file mode 100644 index 0000000000000000000000000000000000000000..e881859cc8c66c773981021e40952cfe2b8f3bfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI @@ -0,0 +1 @@ +CONFIG_ASUS_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 new file mode 100644 index 0000000000000000000000000000000000000000..5b1d1bd8c34382e017452affe22a24d54283eccc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 @@ -0,0 +1 @@ +CONFIG_AS_AVX512=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI new file mode 100644 index 0000000000000000000000000000000000000000..96ae77780964d5e701f84182366d5120124816fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI @@ -0,0 +1 @@ +CONFIG_AS_GFNI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI new file mode 100644 index 0000000000000000000000000000000000000000..e0ad2b172c47406096f648d001bc5348d6873d1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI @@ -0,0 +1 @@ +CONFIG_AS_SHA1_NI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI new file mode 100644 index 0000000000000000000000000000000000000000..df77e5b9d949a2b1124ee629adbcd3eb4dcffb62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI @@ -0,0 +1 @@ +CONFIG_AS_SHA256_NI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE new file mode 100644 index 0000000000000000000000000000000000000000..e1d3f4ac127bcf186454e67db94272859d255730 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE @@ -0,0 +1 @@ +CONFIG_AS_TPAUSE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS new file mode 100644 index 0000000000000000000000000000000000000000..51b5d9ab8b709fc5becf1ed7a5909b5222da2d23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS @@ -0,0 +1 @@ +CONFIG_AS_WRUSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K new file mode 100644 index 0000000000000000000000000000000000000000..7e304c3b61736c4756e9c53928b690a6b34700c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K @@ -0,0 +1 @@ +CONFIG_ATH10K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE new file mode 100644 index 0000000000000000000000000000000000000000..d07e3d10a7bb73c2b2ba3a58ca86163f4c2465bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE @@ -0,0 +1 @@ +CONFIG_ATH10K_CE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..6390cff87f173bfa05b11e0fc97b398fc86df8a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG @@ -0,0 +1 @@ +# CONFIG_ATH10K_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..a7a5d6286c3f715f9d23fb7ad590c49636fc5fae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ATH10K_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI new file mode 100644 index 0000000000000000000000000000000000000000..888f54594ce9f81435b9794a6e47fd9c60b40344 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI @@ -0,0 +1 @@ +CONFIG_ATH10K_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO new file mode 100644 index 0000000000000000000000000000000000000000..efdf37f81fe25d9cf199d2d6a0fbec987805cfdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO @@ -0,0 +1 @@ +# CONFIG_ATH10K_SDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..e6827cecbd33e0d2febd68aae9c147777aabeacf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING @@ -0,0 +1 @@ +# CONFIG_ATH10K_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB new file mode 100644 index 0000000000000000000000000000000000000000..3db2059ec227bf269d7754453137523c9f4a7719 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB @@ -0,0 +1 @@ +# CONFIG_ATH10K_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K new file mode 100644 index 0000000000000000000000000000000000000000..584c5e3f1ebc3916f3faf83e3324f70925ffaf73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K @@ -0,0 +1 @@ +# CONFIG_ATH11K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K new file mode 100644 index 0000000000000000000000000000000000000000..5d40742814358edf28b63d0ab8b01cbe4afa1cc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K @@ -0,0 +1 @@ +# CONFIG_ATH12K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K new file mode 100644 index 0000000000000000000000000000000000000000..add610882e5d5ce78ed544bd02115fc7c04fa206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K @@ -0,0 +1 @@ +# CONFIG_ATH5K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI new file mode 100644 index 0000000000000000000000000000000000000000..6447cbab9f1ac12f6c6d05614513413c645d1529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI @@ -0,0 +1 @@ +# CONFIG_ATH5K_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL new file mode 100644 index 0000000000000000000000000000000000000000..34de675fb8e3e3b5f610a1a99288de53910a37f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL @@ -0,0 +1 @@ +# CONFIG_ATH6KL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K new file mode 100644 index 0000000000000000000000000000000000000000..3346478981eacd5fbfbbe17b5a411bc8183cf1c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K @@ -0,0 +1 @@ +CONFIG_ATH9K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB new file mode 100644 index 0000000000000000000000000000000000000000..b9a31aab7584bfd01246cfc18507019a60cdbabc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB @@ -0,0 +1 @@ +CONFIG_ATH9K_AHB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..64cf5324c025fd9bc30668d2cfc67a0a52d6dced --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT @@ -0,0 +1 @@ +CONFIG_ATH9K_BTCOEX_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT new file mode 100644 index 0000000000000000000000000000000000000000..e63bc2c75fddc22dace54c8de4cb97bf2aa9ded4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT @@ -0,0 +1 @@ +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..cccaa9d0c22df01f2b2335cea174b09cbcdff58a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON @@ -0,0 +1 @@ +CONFIG_ATH9K_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..22e139de6c52828008cba0ef817a26c3b3b4c412 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG @@ -0,0 +1 @@ +CONFIG_ATH9K_COMMON_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL new file mode 100644 index 0000000000000000000000000000000000000000..48a79591472bac1f5ad78e0f44fbafbe7d743817 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL @@ -0,0 +1 @@ +# CONFIG_ATH9K_COMMON_SPECTRAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..87a6218f9887ee70014fe89e0ccf466f2544857c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS @@ -0,0 +1 @@ +CONFIG_ATH9K_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK new file mode 100644 index 0000000000000000000000000000000000000000..fd6896cbc6b7b60fb0fb9c932e6bbe584f433da3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK @@ -0,0 +1 @@ +# CONFIG_ATH9K_DYNACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC new file mode 100644 index 0000000000000000000000000000000000000000..604c945745e6ccdd4da8fe8110120ddba7792752 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC @@ -0,0 +1 @@ +CONFIG_ATH9K_HTC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..811b2d5bd49e45688b93bcf613986992b9c0ab3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ATH9K_HTC_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW new file mode 100644 index 0000000000000000000000000000000000000000..916a1f10e25e9361e818654b74ccc1c6e998d8a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW @@ -0,0 +1 @@ +CONFIG_ATH9K_HW=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG new file mode 100644 index 0000000000000000000000000000000000000000..7b35feaa03710c7bb850085d8384ac8ae1e8146e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG @@ -0,0 +1 @@ +# CONFIG_ATH9K_HWRNG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI new file mode 100644 index 0000000000000000000000000000000000000000..adbd3924862cf9ecdf772513c5c45723834f28bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI @@ -0,0 +1 @@ +CONFIG_ATH9K_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM new file mode 100644 index 0000000000000000000000000000000000000000..aa3549208394510568e02703b5f5e00ad97e67c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM @@ -0,0 +1 @@ +# CONFIG_ATH9K_PCI_NO_EEPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM new file mode 100644 index 0000000000000000000000000000000000000000..e72b499c8f8d8160f2bd71ba6ae7ffa93b60d217 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM @@ -0,0 +1 @@ +CONFIG_ATH9K_PCOEM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL new file mode 100644 index 0000000000000000000000000000000000000000..2e75824b7c89e39e285451790fa5bc52a4e0f5c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL @@ -0,0 +1 @@ +CONFIG_ATH9K_RFKILL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS new file mode 100644 index 0000000000000000000000000000000000000000..a8ee2a27c62344b24adf26b49b3d64c775125b53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS @@ -0,0 +1 @@ +# CONFIG_ATH9K_STATION_STATISTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW new file mode 100644 index 0000000000000000000000000000000000000000..169bfaa9d78bb3a3184b00feb23baafa9160ecb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW @@ -0,0 +1 @@ +CONFIG_ATH9K_WOW=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..98ba398b96e9ff6b6885f3fc5a17449ab214bb08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON @@ -0,0 +1 @@ +CONFIG_ATH_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..a93cd5ce25186ff57af2027ff3ec0ce099208ee4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG @@ -0,0 +1 @@ +# CONFIG_ATH_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 new file mode 100644 index 0000000000000000000000000000000000000000..789b4a0d9abe19f5d86518b2f8a3b2178cf5dcbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 @@ -0,0 +1 @@ +CONFIG_ATL2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR new file mode 100644 index 0000000000000000000000000000000000000000..be8768690df264929da9af10657dec4b6ddf4d17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR @@ -0,0 +1 @@ +# CONFIG_ATLAS_EZO_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR new file mode 100644 index 0000000000000000000000000000000000000000..1a854eb57bb44c5d1a997f66289018552388702a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR @@ -0,0 +1 @@ +# CONFIG_ATLAS_PH_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP new file mode 100644 index 0000000000000000000000000000000000000000..3ccfa591d429e4c2b38b758ab0492cc67a8a6c63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP @@ -0,0 +1 @@ +# CONFIG_ATP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH new file mode 100644 index 0000000000000000000000000000000000000000..df22866032ac22907434e3113883256ee625a9db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH @@ -0,0 +1 @@ +CONFIG_AUDIT_ARCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 new file mode 100644 index 0000000000000000000000000000000000000000..9a5037afbefa1630466c64e881a346f1665d5468 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 @@ -0,0 +1 @@ +# CONFIG_B43 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..be4bdc84a58566bf9d631da1bf4003b7b1f2d273 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY @@ -0,0 +1 @@ +# CONFIG_B43LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE new file mode 100644 index 0000000000000000000000000000000000000000..5150ad41e5eaafc4050c195c5d63b31781ea6a23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_APPLE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..4ddf33e9a31d5bcda736df25536e956ce4c78641 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM new file mode 100644 index 0000000000000000000000000000000000000000..e867170c2999789521734c91461168d53472a1f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA new file mode 100644 index 0000000000000000000000000000000000000000..bec58af815ebd87c1af855e6159ad7f281fe1a2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_SAHARA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..c5b5c80c7dbf76ae437554fb0a2e32bbd1b1540f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO @@ -0,0 +1 @@ +# CONFIG_BARCO_P50_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BCACHE_ASYNC_REGISTRATION new file mode 100644 index 0000000000000000000000000000000000000000..d966c9744d3d034845bc3b36e2ba1acfd5cb9d29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BCACHE_ASYNC_REGISTRATION @@ -0,0 +1 @@ +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET new file mode 100644 index 0000000000000000000000000000000000000000..b5f51dbbd6860472dd670000b2a109d6437ce4bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET @@ -0,0 +1 @@ +CONFIG_BE2NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 new file mode 100644 index 0000000000000000000000000000000000000000..feddf969fb3ad2b4141ee1929430df6fbadbf1ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 @@ -0,0 +1 @@ +# CONFIG_BE2NET_BE2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 new file mode 100644 index 0000000000000000000000000000000000000000..a4dd47a9430931a4e7d1d4b1f65442a8f2bdd860 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 @@ -0,0 +1 @@ +# CONFIG_BE2NET_BE3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..7e5e7cfe30c6b6e7ff7a17aba8746354901f615c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON @@ -0,0 +1 @@ +CONFIG_BE2NET_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER new file mode 100644 index 0000000000000000000000000000000000000000..d63e3ec0a5989dd4b29d9cfb2a80e3fc6f202985 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER @@ -0,0 +1 @@ +CONFIG_BE2NET_LANCER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK new file mode 100644 index 0000000000000000000000000000000000000000..d10755283ad575da4d6e078e559b29f90fba5a79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK @@ -0,0 +1 @@ +CONFIG_BE2NET_SKYHAWK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 new file mode 100644 index 0000000000000000000000000000000000000000..f69e2b6b1c8d5f7d33c3a477b02cc548b643183e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 @@ -0,0 +1 @@ +# CONFIG_BH1750 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 new file mode 100644 index 0000000000000000000000000000000000000000..07bbc5b6810243e9b3e3656d2a8d505b5abc1088 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 @@ -0,0 +1 @@ +# CONFIG_BH1780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY new file mode 100644 index 0000000000000000000000000000000000000000..1ea3b679b4b267fdea79e13b1da5491194932e6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_IOLATENCY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD new file mode 100644 index 0000000000000000000000000000000000000000..4f8c857f952eb0d55ed3c7d61e589eeb73ef8954 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_FD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM new file mode 100644 index 0000000000000000000000000000000000000000..ea2a00056c895c7cf01ffc0977ff700ad1e18650 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PMEM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 new file mode 100644 index 0000000000000000000000000000000000000000..f1c41d2909f026d6642f68b3760d1391eb68b7dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 @@ -0,0 +1 @@ +# CONFIG_BMA180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 new file mode 100644 index 0000000000000000000000000000000000000000..21f4185459091337e7aac5d29dda6e83fe9fc11c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 @@ -0,0 +1 @@ +# CONFIG_BMA220 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 new file mode 100644 index 0000000000000000000000000000000000000000..597efc0837f18d00ba5c5a9592da9a68b846304b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 @@ -0,0 +1 @@ +# CONFIG_BMA400 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL new file mode 100644 index 0000000000000000000000000000000000000000..05b1b0025818d0fc61a80a32108a4ce823460c6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL @@ -0,0 +1 @@ +# CONFIG_BMC150_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C new file mode 100644 index 0000000000000000000000000000000000000000..9ee4a72054bbf6d7d1972f4daa09184283a17be2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C @@ -0,0 +1 @@ +# CONFIG_BMC150_MAGN_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI new file mode 100644 index 0000000000000000000000000000000000000000..500adb8335e285d06d4ada72d560da359029cff6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI @@ -0,0 +1 @@ +# CONFIG_BMC150_MAGN_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 new file mode 100644 index 0000000000000000000000000000000000000000..b14efe57a9a5ed00c0c742ee15ca6a9826e13f45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 @@ -0,0 +1 @@ +# CONFIG_BME680 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 new file mode 100644 index 0000000000000000000000000000000000000000..6406a84d57113a50b22fa5b772801119467a0558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 @@ -0,0 +1 @@ +# CONFIG_BMG160 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL new file mode 100644 index 0000000000000000000000000000000000000000..20952f3f557572e4dcf5f5a1492c0c0562f2b1cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL @@ -0,0 +1 @@ +# CONFIG_BMI088_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C new file mode 100644 index 0000000000000000000000000000000000000000..f58224424a93ca498f3f3d224d20f284e55c6dbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C @@ -0,0 +1 @@ +# CONFIG_BMI160_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI new file mode 100644 index 0000000000000000000000000000000000000000..9ca7127aed0c209062383c9152ddfb086ba9c014 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI @@ -0,0 +1 @@ +# CONFIG_BMI160_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 new file mode 100644 index 0000000000000000000000000000000000000000..e882be513619b24e2546c054545f2e4a0bbb4853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 @@ -0,0 +1 @@ +# CONFIG_BMP280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA new file mode 100644 index 0000000000000000000000000000000000000000..3899b6f1bef1fc2785c048bf44f219d10d3c20bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA @@ -0,0 +1 @@ +# CONFIG_BNA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..50aadaed14dc4498e9719bdcfcdbcc1b18cdcc08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT @@ -0,0 +1 @@ +CONFIG_BOOT_VESA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C new file mode 100644 index 0000000000000000000000000000000000000000..498977ee39ccbff3f71bcc13acf0677867f2c8dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C @@ -0,0 +1 @@ +# CONFIG_BOSCH_BNO055_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG new file mode 100644 index 0000000000000000000000000000000000000000..6c88b45b4dd6f82ebc3854d92d2bc76e5965fce0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG @@ -0,0 +1 @@ +# CONFIG_BRCMDBG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC new file mode 100644 index 0000000000000000000000000000000000000000..bd1d084d34770eded13b16895a56ea4c6c493735 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC @@ -0,0 +1 @@ +CONFIG_BRCMFMAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..7f83fdfbd65b79195ee263c2d8bafd70643eef0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PCIE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC new file mode 100644 index 0000000000000000000000000000000000000000..4e5140188d05e5ef8147d6babf85ddeba4884b23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PROTO_BCDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF new file mode 100644 index 0000000000000000000000000000000000000000..d66f945330458ef5e8ff91b61fa1cd0e9016aff4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PROTO_MSGBUF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO new file mode 100644 index 0000000000000000000000000000000000000000..be5c23d2e80c9be082f22a2e45445d3e25fb374d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_SDIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB new file mode 100644 index 0000000000000000000000000000000000000000..0bcd32d695395f7402faf3eae476de09c5c4c47e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC new file mode 100644 index 0000000000000000000000000000000000000000..92915921ffa1827f53574886a65727f348d8708f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC @@ -0,0 +1 @@ +CONFIG_BRCMSMAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..b789e04f05ee828103aa619ef03e11644e962140 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS @@ -0,0 +1 @@ +CONFIG_BRCMSMAC_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL new file mode 100644 index 0000000000000000000000000000000000000000..66c5eac06358419dd4fb50d3a2dcf93bc4b40262 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL @@ -0,0 +1 @@ +CONFIG_BRCMUTIL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..cda166d43237fa87b79b754d8a91b566a56ae048 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING @@ -0,0 +1 @@ +# CONFIG_BRCM_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT new file mode 100644 index 0000000000000000000000000000000000000000..ec115929740687a60d0e101aaad413e6ee2d23b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT @@ -0,0 +1 @@ +CONFIG_BT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN new file mode 100644 index 0000000000000000000000000000000000000000..dde565107daddabb56b3b043c84acb9d2a14317b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN @@ -0,0 +1 @@ +# CONFIG_BT_6LOWPAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT new file mode 100644 index 0000000000000000000000000000000000000000..e7c3f2c7d5e1ebfac85abf23095af8795068fe35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT @@ -0,0 +1 @@ +# CONFIG_BT_AOSPEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K new file mode 100644 index 0000000000000000000000000000000000000000..7dc74ada07acc6a9e4ad6628ab3c99ecd9d4614b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K @@ -0,0 +1 @@ +CONFIG_BT_ATH3K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM new file mode 100644 index 0000000000000000000000000000000000000000..56bb7cf8c1f7a6f98a2fc8e95e5b1372a793378f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM @@ -0,0 +1 @@ +CONFIG_BT_BCM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP new file mode 100644 index 0000000000000000000000000000000000000000..36b8e51be3491fb14ad370e0b9050f704e3b4968 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP @@ -0,0 +1 @@ +CONFIG_BT_BNEP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..fc4bf7c14ece1f192b6eca9190fb8e3137b50139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER @@ -0,0 +1 @@ +CONFIG_BT_BNEP_MC_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..0cfac59b0417ba79b3b8e6eb4b09e838060147fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER @@ -0,0 +1 @@ +CONFIG_BT_BNEP_PROTO_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR new file mode 100644 index 0000000000000000000000000000000000000000..c7b6f3ddb64c942f032557b7e33c7f1a13448199 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR @@ -0,0 +1 @@ +CONFIG_BT_BREDR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP new file mode 100644 index 0000000000000000000000000000000000000000..6fe276815d80e71f96e6953467d8263e573c6fe1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP @@ -0,0 +1 @@ +CONFIG_BT_CMTP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..a133412837775c93c6d2a4ffd9fff270b7d18221 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS @@ -0,0 +1 @@ +CONFIG_BT_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X new file mode 100644 index 0000000000000000000000000000000000000000..08b55441f4e6723e13f6a20f8156f108ff284fc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X @@ -0,0 +1 @@ +CONFIG_BT_HCIBCM203X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 new file mode 100644 index 0000000000000000000000000000000000000000..c1e35e11b91d6a32e1c78d3f12df19e1da350cc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 @@ -0,0 +1 @@ +# CONFIG_BT_HCIBCM4377 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB new file mode 100644 index 0000000000000000000000000000000000000000..14a07fa0e9f9fc6ac1d07e4ab53deab6b7f23905 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB @@ -0,0 +1 @@ +CONFIG_BT_HCIBFUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X new file mode 100644 index 0000000000000000000000000000000000000000..fec109e4406989a9ed13b22b03abaffcb5d3c539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X @@ -0,0 +1 @@ +CONFIG_BT_HCIBPA10X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO new file mode 100644 index 0000000000000000000000000000000000000000..82ea86e4902a9813ac7a9eac5dfa35fddcf2a778 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO @@ -0,0 +1 @@ +CONFIG_BT_HCIBTSDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB new file mode 100644 index 0000000000000000000000000000000000000000..87df53377e717e13cf63b3d1928584929f1e91c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND new file mode 100644 index 0000000000000000000000000000000000000000..76c0b541b3faf481554bf48c8ea259d90d61d26f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM new file mode 100644 index 0000000000000000000000000000000000000000..61e55a376fdab22f012352b0910711e13fa49ec1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_BCM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK new file mode 100644 index 0000000000000000000000000000000000000000..a01298616a8c24ae957c38f9f1e71f3ef44a54db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK @@ -0,0 +1 @@ +# CONFIG_BT_HCIBTUSB_MTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC new file mode 100644 index 0000000000000000000000000000000000000000..d4d28bc87933f94ca2de99bc9d5aec992fedf4d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_POLL_SYNC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL new file mode 100644 index 0000000000000000000000000000000000000000..0ea9fa578df0cb5aad6026fdc21484ab45d78996 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_RTL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART new file mode 100644 index 0000000000000000000000000000000000000000..a22748bb0f2c66e04d1c635c85d550ec93ec7e92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART @@ -0,0 +1 @@ +CONFIG_BT_HCIUART=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX new file mode 100644 index 0000000000000000000000000000000000000000..c711a76c537fd293c8b3c3dca4ca85296f1f1783 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX @@ -0,0 +1 @@ +# CONFIG_BT_HCIUART_AG6XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K new file mode 100644 index 0000000000000000000000000000000000000000..818b4f4f48db7a1aed57a07208b87c3bea6be8c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_ATH3K=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP new file mode 100644 index 0000000000000000000000000000000000000000..c90c17852a167540701c57428c29a33a50c5c116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_BCSP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 new file mode 100644 index 0000000000000000000000000000000000000000..813998955f94572bbc99192083703fb86ec43db8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_H4=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..18f6e54ada58c2007ae0acb0bb196166f9c5010a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL @@ -0,0 +1 @@ +# CONFIG_BT_HCIUART_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI new file mode 100644 index 0000000000000000000000000000000000000000..caba0cdc8c57ce130918c03ebb2bc97fbabe5f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI @@ -0,0 +1 @@ +CONFIG_BT_HCIVHCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP new file mode 100644 index 0000000000000000000000000000000000000000..b555d07c30fe4d672c6410740751f80d59ce76e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP @@ -0,0 +1 @@ +CONFIG_BT_HIDP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..876bc4f979bc1fb27ebff3668996d85675003a6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL @@ -0,0 +1 @@ +CONFIG_BT_INTEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE new file mode 100644 index 0000000000000000000000000000000000000000..e4bd7f9acfb751a968fb8bba1582619b73c8646a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE @@ -0,0 +1 @@ +CONFIG_BT_LE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..dabfa7971f3f0db711d863d271a8937af87db18b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS @@ -0,0 +1 @@ +# CONFIG_BT_LEDS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED new file mode 100644 index 0000000000000000000000000000000000000000..290fa0aa335ec6dd240c6cace75e4b64230f67c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED @@ -0,0 +1 @@ +CONFIG_BT_LE_L2CAP_ECRED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL new file mode 100644 index 0000000000000000000000000000000000000000..a22a81e3ca320b5ca6b8a2fef1c94da8eff471cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL @@ -0,0 +1 @@ +CONFIG_BT_MRVL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO new file mode 100644 index 0000000000000000000000000000000000000000..9b96ff020db2c72b028dd338eeddb8a0b001ebfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO @@ -0,0 +1 @@ +CONFIG_BT_MRVL_SDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT new file mode 100644 index 0000000000000000000000000000000000000000..7fae871b2426994f4a0b021d00a1bafcb54bcb28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT @@ -0,0 +1 @@ +# CONFIG_BT_MSFTEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO new file mode 100644 index 0000000000000000000000000000000000000000..528c870c2a940db53858f7aa94fa399992c66c5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO @@ -0,0 +1 @@ +# CONFIG_BT_MTKSDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM new file mode 100644 index 0000000000000000000000000000000000000000..a203466fc7cb2b360f4e74a5b74fc87c8b814378 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM @@ -0,0 +1 @@ +CONFIG_BT_RFCOMM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY new file mode 100644 index 0000000000000000000000000000000000000000..b3bbd06342aebafcfa38d6edfc5ee6f2dc0dc8bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY @@ -0,0 +1 @@ +CONFIG_BT_RFCOMM_TTY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL new file mode 100644 index 0000000000000000000000000000000000000000..7ff5bef0400b2b36a46d1d32f9dc45c87e11f3a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL @@ -0,0 +1 @@ +CONFIG_BT_RTL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..e7b42ec08708963b4159f831faad4ff1c2e1d59e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST @@ -0,0 +1 @@ +# CONFIG_BT_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..a31bdc88d5da1b2da8d1343a5ae183e96733be2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO @@ -0,0 +1 @@ +# CONFIG_BT_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT new file mode 100644 index 0000000000000000000000000000000000000000..2990b20d629fb84ad94644ad1836d1f97650397e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT @@ -0,0 +1 @@ +CONFIG_BUILDTIME_MCOUNT_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING new file mode 100644 index 0000000000000000000000000000000000000000..bc0c12b17fe296894324bb25d796b0eb39fee7de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING @@ -0,0 +1 @@ +CONFIG_CALL_PADDING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS new file mode 100644 index 0000000000000000000000000000000000000000..48aa2967be007ac20290eef735d188712c51de0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS @@ -0,0 +1 @@ +CONFIG_CALL_THUNKS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..d0cb45d63457db5da82eb11624a33e4042e0d13d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG @@ -0,0 +1 @@ +# CONFIG_CALL_THUNKS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE new file mode 100644 index 0000000000000000000000000000000000000000..a2de337f37e0c76919fe8b69fd0066754ab75e9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE @@ -0,0 +1 @@ +CONFIG_CAPI_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 new file mode 100644 index 0000000000000000000000000000000000000000..cff12d58adb8c34f92f289303dde939c4347993c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 @@ -0,0 +1 @@ +# CONFIG_CARL9170 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 new file mode 100644 index 0000000000000000000000000000000000000000..931f14e2ac6f4cd9b51f9106037760e06e6c3a53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 @@ -0,0 +1 @@ +# CONFIG_CCS811 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING new file mode 100644 index 0000000000000000000000000000000000000000..e86c4e63aadf83d49a610e966fab00bb5b9d1925 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING @@ -0,0 +1 @@ +CONFIG_CC_HAS_ENTRY_PADDING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT new file mode 100644 index 0000000000000000000000000000000000000000..eeba04d6d758ae2a810d8abc193a1c5f01798e84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT @@ -0,0 +1 @@ +CONFIG_CC_HAS_IBT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK new file mode 100644 index 0000000000000000000000000000000000000000..26513cd1e173c2ab9dcc46014cf31b03cf078513 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK @@ -0,0 +1 @@ +CONFIG_CC_HAS_RETURN_THUNK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR new file mode 100644 index 0000000000000000000000000000000000000000..1fa5f625dec70661e2a195d1c1595021394c6bcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS new file mode 100644 index 0000000000000000000000000000000000000000..e5ea0a51769619b0a39653e4cbbee69c02a6d60f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS @@ -0,0 +1 @@ +CONFIG_CC_HAS_SLS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 new file mode 100644 index 0000000000000000000000000000000000000000..7433b698fa244339b756b0e92348904a1aad15f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 @@ -0,0 +1 @@ +# CONFIG_CEC_CH7322 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..96304804583cf7705133cf5f33114b8391dd3f59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO @@ -0,0 +1 @@ +# CONFIG_CEC_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO new file mode 100644 index 0000000000000000000000000000000000000000..6b713c371e945c46baacdbb97f44244f769e3343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO @@ -0,0 +1 @@ +# CONFIG_CEC_SECO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS new file mode 100644 index 0000000000000000000000000000000000000000..e7b940cfc3b346f783895cecc6ea3ea09cc7d412 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS @@ -0,0 +1 @@ +# CONFIG_CHROME_PLATFORMS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 new file mode 100644 index 0000000000000000000000000000000000000000..7c405b82f660a81817004b3ce1103d5c798ef5cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 @@ -0,0 +1 @@ +CONFIG_CLKBLD_I8253=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 new file mode 100644 index 0000000000000000000000000000000000000000..3ed355dfeb40bbdec4aedc43ad6f8442a234a994 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 @@ -0,0 +1 @@ +CONFIG_CLKEVT_I8253=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE new file mode 100644 index 0000000000000000000000000000000000000000..9cefd31ed4f622f731b67385b76017f89bbaaf3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..7ae919a190fefb78ee40eeedb5b10369893ef40f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_WATCHDOG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US new file mode 100644 index 0000000000000000000000000000000000000000..22d8ef24cbd7687cb1c5dc5396870b83f37e95ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 new file mode 100644 index 0000000000000000000000000000000000000000..706f9fc70667ea865cd28810f5d2ee18e138f68b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 @@ -0,0 +1 @@ +# CONFIG_CM32181 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 new file mode 100644 index 0000000000000000000000000000000000000000..12580fd2fa60afdb6b783798b81a7e4b7edf0d73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 @@ -0,0 +1 @@ +# CONFIG_CM3232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 new file mode 100644 index 0000000000000000000000000000000000000000..1337712578a26e87aa68965c0ec9a8aef296722a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 @@ -0,0 +1 @@ +# CONFIG_CM3323 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 new file mode 100644 index 0000000000000000000000000000000000000000..be5dca2eab02a383714bb884e021b45e5b90274c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 @@ -0,0 +1 @@ +# CONFIG_CM3605 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 new file mode 100644 index 0000000000000000000000000000000000000000..5ad3dd89d41ea7d9a4ce0695153f47a37d7e21c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 @@ -0,0 +1 @@ +# CONFIG_CM36651 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL new file mode 100644 index 0000000000000000000000000000000000000000..ca23a06ea98dbcccc9c8b24a79c9803c437535f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL @@ -0,0 +1 @@ +# CONFIG_CMDLINE_BOOL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..84994e1774e11a02dd862dd56f51191002e0ea0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP @@ -0,0 +1 @@ +CONFIG_COMPAL_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 new file mode 100644 index 0000000000000000000000000000000000000000..15ce6df157fa2cd0e2d79677a245d17c23973576 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 @@ -0,0 +1 @@ +CONFIG_COMPAT_32=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT new file mode 100644 index 0000000000000000000000000000000000000000..9af0f279bc4b9933daadc05742c5f93ed1592f30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..d87a4d23583f10db35b15a5d7d6857092e495f8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG @@ -0,0 +1 @@ +# CONFIG_CPA_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT new file mode 100644 index 0000000000000000000000000000000000000000..14616751777491deb416761a2013bfa670d5235b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT @@ -0,0 +1 @@ +# CONFIG_CPU5_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR new file mode 100644 index 0000000000000000000000000000000000000000..d320ee4cd6ee99243f288e6a84d9b223cef71446 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR @@ -0,0 +1 @@ +CONFIG_CPU_SUP_CENTAUR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES new file mode 100644 index 0000000000000000000000000000000000000000..2c57448707cd3d4a9b428a804b9e7b04739034e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES @@ -0,0 +1 @@ +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 new file mode 100644 index 0000000000000000000000000000000000000000..0da0f270722301215711794266913e9bb5708b80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 @@ -0,0 +1 @@ +CONFIG_CRC64=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT new file mode 100644 index 0000000000000000000000000000000000000000..4b85a46ebe6f3a30e63543ca878f9dd5c75a6011 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRC64_ROCKSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S new file mode 100644 index 0000000000000000000000000000000000000000..dcfccc3ddc0ab6a0dc6c16c78a2824d5d95fefb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 new file mode 100644 index 0000000000000000000000000000000000000000..b8d4160933c6f0ec72122ca3c788d00d5ec9801e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..1185030737e7ab1a397880340724766b8f218a62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..5c153ec358f288ad12f7b441e9986bbe60cdfedf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 new file mode 100644 index 0000000000000000000000000000000000000000..a39408b1cd41a188643c9d5d3829ace5ad8a3d64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT new file mode 100644 index 0000000000000000000000000000000000000000..ee01701c7776223fe4636a578e3c240dfc007e2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC64_ROCKSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK new file mode 100644 index 0000000000000000000000000000000000000000..8a6e8e6cb79ec26bd797cb331a1cd0f6a3abd919 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES new file mode 100644 index 0000000000000000000000000000000000000000..fed2a47ff27a138492f6228349d92e53dfda6d46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK_AES=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA new file mode 100644 index 0000000000000000000000000000000000000000..7f043e865ebaed3b8a34c4065784d93188c75b1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX new file mode 100644 index 0000000000000000000000000000000000000000..64a85cdcb962cbb97a0b1b5c72e29dfa9d3b8d0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_4XXX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION new file mode 100644 index 0000000000000000000000000000000000000000..eb5cbd1e34b438a204c5324ce2d2d673cd701a50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE new file mode 100644 index 0000000000000000000000000000000000000000..ca84f9442991f105145df1f76c10ae0b2c211ceb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI new file mode 100644 index 0000000000000000000000000000000000000000..cb55de79115214f7de12d0f083a98dcc5876e456 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI @@ -0,0 +1 @@ +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT new file mode 100644 index 0000000000000000000000000000000000000000..f1dda12b0127de9d2b85c7d94587537127053743 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT @@ -0,0 +1 @@ +# CONFIG_CX_ECAT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 new file mode 100644 index 0000000000000000000000000000000000000000..deab20f930f60f933139afb0db2c71ec16b5ea5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 @@ -0,0 +1 @@ +# CONFIG_DA280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 new file mode 100644 index 0000000000000000000000000000000000000000..f83efb28804edb768cb68c39bc4831c7e2b868b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 @@ -0,0 +1 @@ +# CONFIG_DA311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA new file mode 100644 index 0000000000000000000000000000000000000000..6ebff19925ddb040a142f01dac9cd019c09c3f3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA @@ -0,0 +1 @@ +CONFIG_DCA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X new file mode 100644 index 0000000000000000000000000000000000000000..e43c05e6a897d50af5b39289e040f00d4a291b60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X @@ -0,0 +1 @@ +# CONFIG_DE2104X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..88cb6da0863dd98895fc96994370b66ddd046cba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY @@ -0,0 +1 @@ +# CONFIG_DEBUG_ENTRY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP new file mode 100644 index 0000000000000000000000000000000000000000..3bb67a6665b4c5ed1b2e4206404522422708f950 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP @@ -0,0 +1 @@ +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST new file mode 100644 index 0000000000000000000000000000000000000000..4ea92fec1e8bb556bc5c17d9babae4cb61b5fcc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST @@ -0,0 +1 @@ +# CONFIG_DEBUG_NMI_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH new file mode 100644 index 0000000000000000000000000000000000000000..6544b79a85a4783f9c6de96be4c9099013319fd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH @@ -0,0 +1 @@ +# CONFIG_DEBUG_TLBFLUSH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE new file mode 100644 index 0000000000000000000000000000000000000000..5f80876fcda4b1e87434bc346683ca72b72376f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_PASSIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE new file mode 100644 index 0000000000000000000000000000000000000000..b79a05cf1306d093c9b2aea83f0ab96173bb9553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE new file mode 100644 index 0000000000000000000000000000000000000000..5b1218239c633fe32fa31831910efba863639b6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND new file mode 100644 index 0000000000000000000000000000000000000000..ca7663b8301ed9d616fcf5d0189bd6a162663479 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND @@ -0,0 +1 @@ +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE new file mode 100644 index 0000000000000000000000000000000000000000..001f636a572db111de5d40e24d1ce72568592cea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_USERSPACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..5b90f90828f40c92d9565198215aec40e32c613a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP new file mode 100644 index 0000000000000000000000000000000000000000..eb4e4395cb18b3e3cfed91fb851b1c39dab6ee8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_DEV_COREDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 new file mode 100644 index 0000000000000000000000000000000000000000..877b95de0ab56e9a10e347438ff2671da340b0a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 @@ -0,0 +1 @@ +# CONFIG_DHT11 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D new file mode 100644 index 0000000000000000000000000000000000000000..031f3d2e3a493de1c0c4d7df97fe9c5842e47835 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D @@ -0,0 +1 @@ +# CONFIG_DLHL60D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 new file mode 100644 index 0000000000000000000000000000000000000000..e9c58e76dff0549f7a7d5c81187bc10ed908d007 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 @@ -0,0 +1 @@ +# CONFIG_DM9102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 new file mode 100644 index 0000000000000000000000000000000000000000..0d22f625b6fa67d56572c402991a7ac6de88ca39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 @@ -0,0 +1 @@ +# CONFIG_DMARD06 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 new file mode 100644 index 0000000000000000000000000000000000000000..cdf8d6bad89a10e8411ddca40f15bf4a5fd0bace --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 @@ -0,0 +1 @@ +# CONFIG_DMARD09 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 new file mode 100644 index 0000000000000000000000000000000000000000..fdffba901ab84171a063d639b6a7ff14d5365b9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 @@ -0,0 +1 @@ +# CONFIG_DMARD10 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE new file mode 100644 index 0000000000000000000000000000000000000000..97672685d8646970d40a4af73991a56bf1084e43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE @@ -0,0 +1 @@ +CONFIG_DMAR_TABLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS new file mode 100644 index 0000000000000000000000000000000000000000..2471fccb8c68ac198d0e555dc4e6fb2a2c2f5a81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS @@ -0,0 +1 @@ +CONFIG_DMA_VIRTUAL_CHANNELS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK new file mode 100644 index 0000000000000000000000000000000000000000..f72f8c5e256716da25bcb253e3bd1bff2e98c0fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK @@ -0,0 +1 @@ +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC new file mode 100644 index 0000000000000000000000000000000000000000..78bc08ab5160464ae410109d5556668849227186 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC @@ -0,0 +1 @@ +# CONFIG_DPOT_DAC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 new file mode 100644 index 0000000000000000000000000000000000000000..1a46ce6c0415d07159a70df18836772936e62388 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 @@ -0,0 +1 @@ +# CONFIG_DPS310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC new file mode 100644 index 0000000000000000000000000000000000000000..f0dbdcca2ed468eb988633baaf75e15a9b581aa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC @@ -0,0 +1 @@ +# CONFIG_DRM_DP_CEC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X new file mode 100644 index 0000000000000000000000000000000000000000..b04038c91499235a0d5ab730c6c7cf24adf579bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_NXP_TDA998X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 new file mode 100644 index 0000000000000000000000000000000000000000..fb5fc4d67153dd1535369d0fe9143b11b0ee4394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 @@ -0,0 +1 @@ +CONFIG_DRM_I2C_SIL164=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE new file mode 100644 index 0000000000000000000000000000000000000000..d545eec6d5d762531e27fb22a7740d89dea19741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE @@ -0,0 +1 @@ +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT new file mode 100644 index 0000000000000000000000000000000000000000..f67cc1be19b832fe824fb4813997545c557b5669 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI new file mode 100644 index 0000000000000000000000000000000000000000..3b3739e51aee8e974d68e849dff95281e14e3139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI @@ -0,0 +1 @@ +CONFIG_DRM_MIPI_DSI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN new file mode 100644 index 0000000000000000000000000000000000000000..48ef8d46b8564f030ee4b22ce62f002c2b59c1b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN new file mode 100644 index 0000000000000000000000000000000000000000..7823f34185753e039bed2bf2785926a8aad580c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN @@ -0,0 +1 @@ +CONFIG_DRM_PRIVACY_SCREEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO new file mode 100644 index 0000000000000000000000000000000000000000..8cd637a0c076044ff26f7ccd5493cda8e643a49c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO @@ -0,0 +1 @@ +# CONFIG_DRM_VBOXVIDEO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS new file mode 100644 index 0000000000000000000000000000000000000000..429edecedefa1f2733b7665dcbe073531871b25c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS @@ -0,0 +1 @@ +# CONFIG_DRM_VMWGFX_MKSSTATS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..47be407b1134a418c8f467ce7251bb2a2e954db1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND @@ -0,0 +1 @@ +# CONFIG_DRM_XEN_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 new file mode 100644 index 0000000000000000000000000000000000000000..04e81cfe1e82621a5011be0d5e30b6c78ad19425 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 @@ -0,0 +1 @@ +# CONFIG_DS1803 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 new file mode 100644 index 0000000000000000000000000000000000000000..199b37762c734c8cb358ccb721b5e9a7ff2fa01f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 @@ -0,0 +1 @@ +# CONFIG_DS4424 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..110558a22025fbabced85260c1f7fcc10e9d8481 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE @@ -0,0 +1 @@ +CONFIG_DW_DMAC_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS new file mode 100644 index 0000000000000000000000000000000000000000..49bcef1aca7f6428c692eb9fe32238693b4cfcdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT new file mode 100644 index 0000000000000000000000000000000000000000..d05a49d8b29790530aa89ae1fe2394922be6a363 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT @@ -0,0 +1 @@ +CONFIG_DYNAMIC_MEMORY_LAYOUT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB new file mode 100644 index 0000000000000000000000000000000000000000..47eaad0217b1dd45b8d69a62066792d48353aeaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT new file mode 100644 index 0000000000000000000000000000000000000000..262928b23a0d48f5337e064f1d02c551e4a6a5e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT @@ -0,0 +1 @@ +# CONFIG_EBC_C384_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB new file mode 100644 index 0000000000000000000000000000000000000000..0ca60f8d6371581355b6204bd94001688b4d20ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB @@ -0,0 +1 @@ +CONFIG_EDAC_ATOMIC_SCRUB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 new file mode 100644 index 0000000000000000000000000000000000000000..1d4b071dea2b5734e59f1180194210a4d8483632 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 @@ -0,0 +1 @@ +CONFIG_EDAC_IE31200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 new file mode 100644 index 0000000000000000000000000000000000000000..88b22f502c82d4208ea5bbb44e313d28ea1a7650 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 @@ -0,0 +1 @@ +# CONFIG_EDAC_IGEN6 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 new file mode 100644 index 0000000000000000000000000000000000000000..d0b59df60e9e84f34e4208428fa19a2cafa8cb95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 @@ -0,0 +1 @@ +CONFIG_EDAC_PND2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 new file mode 100644 index 0000000000000000000000000000000000000000..710ecf317b5780973761780e3c0882edeec73b1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 @@ -0,0 +1 @@ +CONFIG_EDAC_X38=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF new file mode 100644 index 0000000000000000000000000000000000000000..8e6d5587136240cb3f810d6b97079d230b82d88a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF @@ -0,0 +1 @@ +# CONFIG_EDD_OFF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..b0fe48b006fd3e530aaea604f5ec1d5a71be1c15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP @@ -0,0 +1 @@ +CONFIG_EEEPC_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI new file mode 100644 index 0000000000000000000000000000000000000000..bd7f00ece0d631490b3b10123b65e16250c121bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI @@ -0,0 +1 @@ +CONFIG_EEEPC_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES new file mode 100644 index 0000000000000000000000000000000000000000..17ce2878b0bee648b88d23ba50d95d91d0b6856c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES @@ -0,0 +1 @@ +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP new file mode 100644 index 0000000000000000000000000000000000000000..316a93b97aee0c1c9f0af24312fd7abb3b67467a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP @@ -0,0 +1 @@ +# CONFIG_EFI_FAKE_MEMMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL new file mode 100644 index 0000000000000000000000000000000000000000..1beee118e043bba99c2232c1c8cabaf72d00aa80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL @@ -0,0 +1 @@ +CONFIG_EFI_HANDOVER_PROTOCOL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP new file mode 100644 index 0000000000000000000000000000000000000000..c7e9f9f088b96fe1f1be87604f8f7fe3272909f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP @@ -0,0 +1 @@ +# CONFIG_EFI_PGT_DUMP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA new file mode 100644 index 0000000000000000000000000000000000000000..01c95e39f4702ba254a6ccd92f686693f33749a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA @@ -0,0 +1 @@ +# CONFIG_EISA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC new file mode 100644 index 0000000000000000000000000000000000000000..5c206ee501398d27f344d86a567ff44a2188c9c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC @@ -0,0 +1 @@ +CONFIG_ENIC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR new file mode 100644 index 0000000000000000000000000000000000000000..ac79f531ac6a119280e2891efd9a1a9a9f339ecc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR @@ -0,0 +1 @@ +# CONFIG_ENVELOPE_DETECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT new file mode 100644 index 0000000000000000000000000000000000000000..08f35862f63f965b149a836aea12cd3171b0a523 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT @@ -0,0 +1 @@ +# CONFIG_EUROTECH_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT new file mode 100644 index 0000000000000000000000000000000000000000..3b782c1737cc5a4f9e003b410483b310d2a22e3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT @@ -0,0 +1 @@ +# CONFIG_EXAR_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT new file mode 100644 index 0000000000000000000000000000000000000000..9fc51eb65135ecf2470d2f12b460031527cd0742 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT @@ -0,0 +1 @@ +CONFIG_F71808E_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC new file mode 100644 index 0000000000000000000000000000000000000000..1d66b92f45c1a9f141b4f14ad5ef6d1cbe51ba67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC @@ -0,0 +1 @@ +# CONFIG_FB_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA new file mode 100644 index 0000000000000000000000000000000000000000..3c3142a1e7e66fc3bb9b299035bb359503b41c78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA @@ -0,0 +1 @@ +# CONFIG_FB_HGA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 new file mode 100644 index 0000000000000000000000000000000000000000..327044a84a9032405052599318d5cde74340d029 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 @@ -0,0 +1 @@ +# CONFIG_FB_LE80578 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 new file mode 100644 index 0000000000000000000000000000000000000000..50f6f3584e845bfc9727b838715e887dc21fbcfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 @@ -0,0 +1 @@ +# CONFIG_FB_N411 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 new file mode 100644 index 0000000000000000000000000000000000000000..8ee07d09d2a11823abb8dd4c9a62e93b4f0df115 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 @@ -0,0 +1 @@ +# CONFIG_FB_SM501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 new file mode 100644 index 0000000000000000000000000000000000000000..d933f13d7a84bde5681d675c8c97826455f5f627 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 @@ -0,0 +1 @@ +# CONFIG_FB_VGA16 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA new file mode 100644 index 0000000000000000000000000000000000000000..d34242c33dd984364c42f2c2d967120c2d1f1b86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA @@ -0,0 +1 @@ +# CONFIG_FB_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE new file mode 100644 index 0000000000000000000000000000000000000000..7e1163b282bb0eee5fd0689fe5afa50f6448584f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE @@ -0,0 +1 @@ +CONFIG_FIREWIRE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET new file mode 100644 index 0000000000000000000000000000000000000000..5a9e50eb3b69108c424513348c110fdad9d2a5c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET @@ -0,0 +1 @@ +CONFIG_FIREWIRE_NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI new file mode 100644 index 0000000000000000000000000000000000000000..de045fe5aa6e16b067e4543edd6b329861f11970 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI @@ -0,0 +1 @@ +CONFIG_FIREWIRE_OHCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 new file mode 100644 index 0000000000000000000000000000000000000000..1255474ee93652d3cfaf4f23b554d72df44b9053 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 @@ -0,0 +1 @@ +CONFIG_FIREWIRE_SBP2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE new file mode 100644 index 0000000000000000000000000000000000000000..56010245c46ff6b0ebd2c40ca89474a43723288b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE @@ -0,0 +1 @@ +# CONFIG_FPROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC new file mode 100644 index 0000000000000000000000000000000000000000..f4727a5a44387cdadf67d8eb9f6b0bd8d1f8827e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_USE_CC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST new file mode 100644 index 0000000000000000000000000000000000000000..aea4adb9eb14641d149ec78a10f018120ee9d89e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES new file mode 100644 index 0000000000000000000000000000000000000000..2c1643a52b0b7b3122f0c87d8188f1a8435c5edf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES @@ -0,0 +1 @@ +CONFIG_FUJITSU_ES=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..e47131e837c033daac2261a1e0fe668c84765ed1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP @@ -0,0 +1 @@ +CONFIG_FUJITSU_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET new file mode 100644 index 0000000000000000000000000000000000000000..4e9d1a0a1ea8b3808b23f2fc780b05ffb4167c24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET @@ -0,0 +1 @@ +CONFIG_FUJITSU_TABLET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT new file mode 100644 index 0000000000000000000000000000000000000000..54b8841aa24c7eacd335e2e0880396ec67afd664 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT=16 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B new file mode 100644 index 0000000000000000000000000000000000000000..bfa63ab172beb837e2b246df28a5fdc64b0b69dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_16B=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES new file mode 100644 index 0000000000000000000000000000000000000000..2e4315d4111eddc1aa7df9b3f85fd720ad81b853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES @@ -0,0 +1 @@ +CONFIG_FUNCTION_PADDING_BYTES=16 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI new file mode 100644 index 0000000000000000000000000000000000000000..28a5f3daec77181ee83c0891e19b4a55ccbd658b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI @@ -0,0 +1 @@ +CONFIG_FUNCTION_PADDING_CFI=11 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK new file mode 100644 index 0000000000000000000000000000000000000000..29daf8fb65c346c477e6cc68fddd89038ccca969 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C new file mode 100644 index 0000000000000000000000000000000000000000..80e57d9425db0848464f4c9d42c3d4c1aa3e148b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C @@ -0,0 +1 @@ +# CONFIG_FXAS21002C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C new file mode 100644 index 0000000000000000000000000000000000000000..312ae1763d7488e24ddda3d08fb4a13d9af629f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C @@ -0,0 +1 @@ +# CONFIG_FXLS8962AF_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI new file mode 100644 index 0000000000000000000000000000000000000000..5721a78eb2101f5d013d5b8f9a60a4c6aa39778a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI @@ -0,0 +1 @@ +# CONFIG_FXLS8962AF_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C new file mode 100644 index 0000000000000000000000000000000000000000..8ad59615f73df2e2a88f7f440c99f0c8a5299f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C @@ -0,0 +1 @@ +# CONFIG_FXOS8700_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI new file mode 100644 index 0000000000000000000000000000000000000000..b291b78e2c88c2ab124ac9f253c9ae3b496a2a85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI @@ -0,0 +1 @@ +# CONFIG_FXOS8700_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION new file mode 100644 index 0000000000000000000000000000000000000000..89396f95d3e2f3804304708d8bc95c0ecd75e1c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION @@ -0,0 +1 @@ +# CONFIG_GDS_FORCE_MITIGATION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY new file mode 100644 index 0000000000000000000000000000000000000000..440938a36dba9d9002bb6ffeaaa13d75794dfcdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY @@ -0,0 +1 @@ +# CONFIG_GENERIC_ADC_BATTERY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST new file mode 100644 index 0000000000000000000000000000000000000000..cbe34665557ed372328435e92929cbf861887054 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE new file mode 100644 index 0000000000000000000000000000000000000000..ba31fc229ab5255ed6deeb60713cf70d9526c80c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE @@ -0,0 +1 @@ +CONFIG_GENERIC_CMOS_UPDATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY new file mode 100644 index 0000000000000000000000000000000000000000..039e303e59d7dc49cbffb365ac4e27dc8a0d1f6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY @@ -0,0 +1 @@ +CONFIG_GENERIC_ENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP new file mode 100644 index 0000000000000000000000000000000000000000..3332e15f4e30d4aeb8ab8ce8c7f4c508e6255381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR new file mode 100644 index 0000000000000000000000000000000000000000..9fbd28afe9b2042dca53a87bf4d5593e7da927c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE new file mode 100644 index 0000000000000000000000000000000000000000..c0bbab3e55b4ed9f487490b6fad8336106b93ae0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI new file mode 100644 index 0000000000000000000000000000000000000000..3cd2ec4144e372b95789438c25ea9d2333f453c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI @@ -0,0 +1 @@ +# CONFIG_GIGABYTE_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 new file mode 100644 index 0000000000000000000000000000000000000000..4be624a417b4e73fa0ffc9a34803717cd4469db1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 @@ -0,0 +1 @@ +# CONFIG_GP2AP002 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F new file mode 100644 index 0000000000000000000000000000000000000000..15d94281d250a7d1e9165aca7aa91e27a25a5b9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F @@ -0,0 +1 @@ +# CONFIG_GP2AP020A00F is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN new file mode 100644 index 0000000000000000000000000000000000000000..a4541a51d7b9cc21a25f37fbc0f9ca3a8f4643ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN @@ -0,0 +1 @@ +# CONFIG_GPD_POCKET_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 new file mode 100644 index 0000000000000000000000000000000000000000..097d52ce198e889719ffa3d641ded3c4c3d09725 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 @@ -0,0 +1 @@ +# CONFIG_GPIO_AMD8111 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB new file mode 100644 index 0000000000000000000000000000000000000000..3643833287116639ab26951020bc11ff99171b6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB @@ -0,0 +1 @@ +# CONFIG_GPIO_DWAPB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE new file mode 100644 index 0000000000000000000000000000000000000000..d39ce656334754a9175f66c28b7d1824c94b605d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE @@ -0,0 +1 @@ +# CONFIG_GPIO_ELKHARTLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X new file mode 100644 index 0000000000000000000000000000000000000000..4ab26719d33b7b3f5895e336109072cf46beeb42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X @@ -0,0 +1 @@ +# CONFIG_GPIO_F7188X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH new file mode 100644 index 0000000000000000000000000000000000000000..7712d05165d66898b06db3e2261d0db8b5d3d2da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH @@ -0,0 +1 @@ +CONFIG_GPIO_ICH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 new file mode 100644 index 0000000000000000000000000000000000000000..aaeb504acabf0d0dbac73fd44d8ac4c3718d49d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 @@ -0,0 +1 @@ +# CONFIG_GPIO_IT87 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH new file mode 100644 index 0000000000000000000000000000000000000000..9155cc76a8deb78bb2f56ac3c2c635e28be52896 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH @@ -0,0 +1 @@ +# CONFIG_GPIO_ML_IOH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH new file mode 100644 index 0000000000000000000000000000000000000000..91a3ab850cbace4b9f6e4671c3e9f6eaf23ff2c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH @@ -0,0 +1 @@ +# CONFIG_GPIO_SCH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X new file mode 100644 index 0000000000000000000000000000000000000000..62c932c31b6d0267d2182f27a7e36f703becf806 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X @@ -0,0 +1 @@ +# CONFIG_GPIO_SCH311X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD new file mode 100644 index 0000000000000000000000000000000000000000..e92c6f0d3740e07544b9d4f87ae8f9bae2c248a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_GPIO_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 new file mode 100644 index 0000000000000000000000000000000000000000..07b0ce3abc052023816a5bfa9172f949f21069fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 @@ -0,0 +1 @@ +# CONFIG_GPIO_VX855 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND new file mode 100644 index 0000000000000000000000000000000000000000..0e6058047c8957e1972c44791edf834209b39efa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND @@ -0,0 +1 @@ +# CONFIG_GPIO_WINBOND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 new file mode 100644 index 0000000000000000000000000000000000000000..75427dc0f51718aec7dc417d7f02f82d61e5f9dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 @@ -0,0 +1 @@ +# CONFIG_GPIO_WS16C48 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP new file mode 100644 index 0000000000000000000000000000000000000000..bf5847da79f8f89ab7aa170325c038cdf66e99cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI new file mode 100644 index 0000000000000000000000000000000000000000..bda00028a2bf0ea8a556fbcde5d57b39cc80e48c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_ACPI_APEI_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES new file mode 100644 index 0000000000000000000000000000000000000000..f6490ad53e3c9797921cf98159623ea03842b209 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN new file mode 100644 index 0000000000000000000000000000000000000000..72fd84bfab21b0f9bbb1789d0c8835568eaad0b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KCSAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN new file mode 100644 index 0000000000000000000000000000000000000000..c4b12d1aaf440bfa009bb657a84d5dfa492a75b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KMSAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP new file mode 100644 index 0000000000000000000000000000000000000000..a39a0b90ae2e9ca04fd42552d515a245bc2d453f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY new file mode 100644 index 0000000000000000000000000000000000000000..77f3b4e3b64aecc5c198a2cae06ae04fc06ef9e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SOFT_DIRTY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD new file mode 100644 index 0000000000000000000000000000000000000000..e97355f3bf36fd07e32a74f28987ed3d96230e14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP new file mode 100644 index 0000000000000000000000000000000000000000..4b72ebe2ba58f542e8aa3ace5b7693a9bf9f0b0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES new file mode 100644 index 0000000000000000000000000000000000000000..83d9554e9be27e5d862f4065b0c57096542cfdc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE new file mode 100644 index 0000000000000000000000000000000000000000..ee51a6582c46756c30fc8aa99520cd5574e7aaa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE @@ -0,0 +1 @@ +CONFIG_HAVE_BOOTMEM_INFO_NODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT new file mode 100644 index 0000000000000000000000000000000000000000..657e0932379bb344466d52939a545d999d00eac9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT @@ -0,0 +1 @@ +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS new file mode 100644 index 0000000000000000000000000000000000000000..66d15550a4e9cdec9236a1b3609dd1dad8ebc858 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS @@ -0,0 +1 @@ +CONFIG_HAVE_CALL_THUNKS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK new file mode 100644 index 0000000000000000000000000000000000000000..d8e9df335d63d8929576833953f08a7314f4aa6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK @@ -0,0 +1 @@ +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE new file mode 100644 index 0000000000000000000000000000000000000000..8300b2acaf7a34e49e600a0dd04b4b8ac03a9b00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS new file mode 100644 index 0000000000000000000000000000000000000000..0898f1463c5cc1ccba7845003a2f58c83af3147b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA new file mode 100644 index 0000000000000000000000000000000000000000..f5707a64cd4b6ba0e8cd0281b861c781e41ea839 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA @@ -0,0 +1 @@ +CONFIG_HAVE_EISA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD new file mode 100644 index 0000000000000000000000000000000000000000..67b28cbe35161f3e540fb7985721d302daa7329f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD @@ -0,0 +1 @@ +CONFIG_HAVE_EXIT_THREAD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY new file mode 100644 index 0000000000000000000000000000000000000000..9b6997a06e599d8d20cc32c4bb8ea6947a6425a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY @@ -0,0 +1 @@ +CONFIG_HAVE_FENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT new file mode 100644 index 0000000000000000000000000000000000000000..0e05464bc7319e29110ede6a80540679364785fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT @@ -0,0 +1 @@ +CONFIG_HAVE_INTEL_TXT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK new file mode 100644 index 0000000000000000000000000000000000000000..4c8e7558df7262a5bd19c625e93d03069ef6e2fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK new file mode 100644 index 0000000000000000000000000000000000000000..4588049c08a66a7b5165a1009f9e15b4a3e9f75f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK @@ -0,0 +1 @@ +CONFIG_HAVE_JUMP_LABEL_HACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 new file mode 100644 index 0000000000000000000000000000000000000000..1b1d867b334c87035c683981e517f8e75b94bee8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_BZIP2=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP new file mode 100644 index 0000000000000000000000000000000000000000..feb2641d88dc6677449b320038b2b501a6832a77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_GZIP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 new file mode 100644 index 0000000000000000000000000000000000000000..ec1e81c3c306ca44187a1935132b388b9a189ca2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZ4=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA new file mode 100644 index 0000000000000000000000000000000000000000..c50e6ad86436067477578902b08ecb5882f540aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZMA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO new file mode 100644 index 0000000000000000000000000000000000000000..81bb9ec7ed5d6dbd22c0a5a32198959b50b1add7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ new file mode 100644 index 0000000000000000000000000000000000000000..329229234b2635cc471750543c58976ddda3de87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_XZ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD new file mode 100644 index 0000000000000000000000000000000000000000..1a47f3ca73e76179adba68a7e03c1483c377cd94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_ZSTD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE new file mode 100644 index 0000000000000000000000000000000000000000..805752949837127e6c1717424cb2e290b88a562c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_KPROBES_ON_FTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO new file mode 100644 index 0000000000000000000000000000000000000000..46fbdd90ba20e003468b8a1440312d47422605f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL new file mode 100644 index 0000000000000000000000000000000000000000..c620814f57dba095a916fe333fd4274e694b426c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_NO_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE new file mode 100644 index 0000000000000000000000000000000000000000..db784852153f2597ad1c12be8dfbb4c8cea22631 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_PFNCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER new file mode 100644 index 0000000000000000000000000000000000000000..fea671aea6b71f5b27476c3fb31fb3951e431aff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_PM_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS new file mode 100644 index 0000000000000000000000000000000000000000..a54d950e2d5e91abf896c7cb1ec834061efa0735 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..91d2dc7c91c1742255302db86929478ec6fd821e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_HAVE_MMIOTRACE_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK new file mode 100644 index 0000000000000000000000000000000000000000..7413f93aa6f1664956656636aad72ed649de2deb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK @@ -0,0 +1 @@ +CONFIG_HAVE_NOINSTR_HACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..1367d6e73a4c634edd76468a9085611b59fda548 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_NOINSTR_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL new file mode 100644 index 0000000000000000000000000000000000000000..469c7ff06eb548fd232600315561eeea69fada76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT new file mode 100644 index 0000000000000000000000000000000000000000..4634d2d6365f68c741c5612f55b55a5a8dfb92e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL_MCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT new file mode 100644 index 0000000000000000000000000000000000000000..94191899a0557b4d2a17e13150c7b3043ece65ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES new file mode 100644 index 0000000000000000000000000000000000000000..06751c73a8ac8902c6e6eeb3868c3e2d32932ba0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_OPTPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..eab8b6f8a3ab0f9163bd9ea64329195b728b5cd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM @@ -0,0 +1 @@ +CONFIG_HAVE_PCSPKR_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL new file mode 100644 index 0000000000000000000000000000000000000000..af4fcec508881f5c9246d3fd6c937e347089c4f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK new file mode 100644 index 0000000000000000000000000000000000000000..b25116106132eb241a08db0ad9d6216e0b6758d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK @@ -0,0 +1 @@ +CONFIG_HAVE_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST new file mode 100644 index 0000000000000000000000000000000000000000..517440189cca91176a2294a85d898589a3643e86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST @@ -0,0 +1 @@ +CONFIG_HAVE_RUST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL new file mode 100644 index 0000000000000000000000000000000000000000..f5d02db4c03b103296fb9ef961e8b260f9244023 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL @@ -0,0 +1 @@ +CONFIG_HAVE_STATIC_CALL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE new file mode 100644 index 0000000000000000000000000000000000000000..5b16b117c7ab2a80be145560787af066f33ddfe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE @@ -0,0 +1 @@ +CONFIG_HAVE_STATIC_CALL_INLINE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..d337e94712f1396d6e5a8add8afe62d016e9cb9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_UACCESS_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK new file mode 100644 index 0000000000000000000000000000000000000000..747a0312fa84f0c84225c5c5e7b59c21b48575f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK @@ -0,0 +1 @@ +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER new file mode 100644 index 0000000000000000000000000000000000000000..448c0846095614d65d2b28e2b36ccb73752adea6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER @@ -0,0 +1 @@ +CONFIG_HAVE_USER_RETURN_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X new file mode 100644 index 0000000000000000000000000000000000000000..e8e7ce278b2762ecabc715fb8c7772f82fae910d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X @@ -0,0 +1 @@ +# CONFIG_HDC100X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 new file mode 100644 index 0000000000000000000000000000000000000000..fad2fecfd916cdbe8ad17a2210f0da8e1f4b0798 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 @@ -0,0 +1 @@ +# CONFIG_HDC2010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO new file mode 100644 index 0000000000000000000000000000000000000000..a0da57f4088cda778604ef741b3cc114f3b37ca3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO @@ -0,0 +1 @@ +# CONFIG_HDMI_LPE_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER new file mode 100644 index 0000000000000000000000000000000000000000..a5c5e96e64a5dc2ec1bd42ddcea7907e2f9939aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER @@ -0,0 +1 @@ +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 new file mode 100644 index 0000000000000000000000000000000000000000..21ee368cedd5a77988a2b14ff137a7825492c54e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 @@ -0,0 +1 @@ +# CONFIG_HI8435 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS new file mode 100644 index 0000000000000000000000000000000000000000..81f929d6b8532a290fab4adecc4506ab8e0be204 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS @@ -0,0 +1 @@ +CONFIG_HID_ALPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS new file mode 100644 index 0000000000000000000000000000000000000000..efc1ef4f8da95500dc38b9a430bbe1790edcdf09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS @@ -0,0 +1 @@ +CONFIG_HID_ASUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA new file mode 100644 index 0000000000000000000000000000000000000000..c168e7ea03b37cf1f31db6c2eac4b663868c730b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA @@ -0,0 +1 @@ +CONFIG_HID_CMEDIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE new file mode 100644 index 0000000000000000000000000000000000000000..2089444e567c775f7038c63ca8f721d37da94fc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE @@ -0,0 +1 @@ +CONFIG_HID_HYPERV_MOUSE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD new file mode 100644 index 0000000000000000000000000000000000000000..dc7f0d9989d96b23f8afa1b1d83884f424f423c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD @@ -0,0 +1 @@ +# CONFIG_HID_NVIDIA_SHIELD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR new file mode 100644 index 0000000000000000000000000000000000000000..312f304cf23dbe1aed95e3767e1b0f2c933a0c1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_CIR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS new file mode 100644 index 0000000000000000000000000000000000000000..16e17caf6190dc8e21836f4cbff13342d13d19ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS @@ -0,0 +1 @@ +# CONFIG_HID_PRODIKEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D new file mode 100644 index 0000000000000000000000000000000000000000..58460b8bfc47c58e99079764ddfb731d3ae2e150 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_ACCEL_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS new file mode 100644 index 0000000000000000000000000000000000000000..a8a99f6b3b382970838b5a369bd66f9f0a3be6e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_ALS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE new file mode 100644 index 0000000000000000000000000000000000000000..705f7e447dc6299f7506d56aa114f3a29f726ada --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE @@ -0,0 +1 @@ +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR new file mode 100644 index 0000000000000000000000000000000000000000..e8a6e4971a51411537789fff86eb04fb3838f786 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION new file mode 100644 index 0000000000000000000000000000000000000000..66449de3b74ca73214047a97d6bf5406106ca7ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_DEVICE_ROTATION=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D new file mode 100644 index 0000000000000000000000000000000000000000..b2aa13a9f4ce32f664a81fe629063f9bb4c2121d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_GYRO_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB new file mode 100644 index 0000000000000000000000000000000000000000..090ecbd4043f7fbf2db2c928546b977ab6cfaf78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY new file mode 100644 index 0000000000000000000000000000000000000000..d50f5014af5cb4d02a2d637f913792153c06c574 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUMIDITY=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..fc396a26cbd29f1d19d271d6ecce9a499276cd1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_IIO_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..b9cfc649f65c49f7e6c3356a796e8b65be5f06fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_IIO_TRIGGER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D new file mode 100644 index 0000000000000000000000000000000000000000..7a9399cb1aef510d24c97937c5941294b4c54b56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_INCLINOMETER_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D new file mode 100644 index 0000000000000000000000000000000000000000..61036565fa4b06231d6f9a405f06160ceea261ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS new file mode 100644 index 0000000000000000000000000000000000000000..735e740b0d7046d356b6063a3cc8e149c31ce902 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_PRESS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX new file mode 100644 index 0000000000000000000000000000000000000000..46e212b880c2fc76047cc3bdff20c0deb569027e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_PROX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP new file mode 100644 index 0000000000000000000000000000000000000000..6f1a98bc0df18bed0aeaa499989800b2c8ac1e88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 new file mode 100644 index 0000000000000000000000000000000000000000..79128bfd7bbce715d888f9af2f683ab89efd6f1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 @@ -0,0 +1 @@ +# CONFIG_HMC425 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL new file mode 100644 index 0000000000000000000000000000000000000000..74e300e560120c265a8fc16e7ecbc4bf27ee4f05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC_FULL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL new file mode 100644 index 0000000000000000000000000000000000000000..5eac1fd8ef52fb2905477822b9d52f98b491f4bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PARALLEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC new file mode 100644 index 0000000000000000000000000000000000000000..4380a971729deccf4d572ec27ec7c38670cb571d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_SHPC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT new file mode 100644 index 0000000000000000000000000000000000000000..ddb25fc38d3a7b2aec87b6994a36d664c023e743 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT @@ -0,0 +1 @@ +CONFIG_HOTPLUG_SMT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP new file mode 100644 index 0000000000000000000000000000000000000000..9ba8dd70d43d0d87e9413b9c513b5d9c67c65849 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP @@ -0,0 +1 @@ +CONFIG_HOTPLUG_SPLIT_STARTUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 new file mode 100644 index 0000000000000000000000000000000000000000..bd8714af3cda9a02aef0200a1beb0b79306d3ae5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 @@ -0,0 +1 @@ +# CONFIG_HP03 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C new file mode 100644 index 0000000000000000000000000000000000000000..6d5f0703259ee8c2e3c246fd76c902701fed251b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C @@ -0,0 +1 @@ +# CONFIG_HP206C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC new file mode 100644 index 0000000000000000000000000000000000000000..fa706c711c31bcf39f2d99a8c092fff328172a13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC @@ -0,0 +1 @@ +CONFIG_HPET_EMULATE_RTC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT new file mode 100644 index 0000000000000000000000000000000000000000..05bd11cb7de67936a6f123c63e8ba582c02917c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT @@ -0,0 +1 @@ +# CONFIG_HPET_MMAP_DEFAULT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING new file mode 100644 index 0000000000000000000000000000000000000000..2f21b282dfce7a0f9a6f8436d70bc150e8d0367a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING @@ -0,0 +1 @@ +CONFIG_HPWDT_NMI_DECODING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO new file mode 100644 index 0000000000000000000000000000000000000000..1d3256354a5bd67727130c8ec14d6d71a9b91c0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO @@ -0,0 +1 @@ +CONFIG_HP_ILO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..9a829b4e98c840610caab502580b03e1053f2cdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG @@ -0,0 +1 @@ +CONFIG_HP_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA new file mode 100644 index 0000000000000000000000000000000000000000..0dcb85dec3f037368b3a62aa1743701d0d5a1612 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA @@ -0,0 +1 @@ +CONFIG_HSU_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 new file mode 100644 index 0000000000000000000000000000000000000000..e38bd6677d074442f2ddcf67aaf439b8672fc16c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 @@ -0,0 +1 @@ +# CONFIG_HTS221 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 new file mode 100644 index 0000000000000000000000000000000000000000..5ba8bbff7d763a40e07979b2a67cc29fabbc3b17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 @@ -0,0 +1 @@ +# CONFIG_HTU21 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI new file mode 100644 index 0000000000000000000000000000000000000000..0ad2cb4e3c5c0182f8d64b7256447a59b3fe7516 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI @@ -0,0 +1 @@ +# CONFIG_HUAWEI_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ new file mode 100644 index 0000000000000000000000000000000000000000..2064342295b0e0c84bf86a1ca94cd9ceb6f7d07c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ @@ -0,0 +1 @@ +CONFIG_HVC_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN new file mode 100644 index 0000000000000000000000000000000000000000..df97a05c75eae102afef241688a58ddd82fc7560 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN @@ -0,0 +1 @@ +CONFIG_HVC_XEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..02ccaac1310ceb25646c05b7dc31d8fec883282b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND @@ -0,0 +1 @@ +CONFIG_HVC_XEN_FRONTEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID new file mode 100644 index 0000000000000000000000000000000000000000..5cb16aa7c2fb203fdb8c613d762d2841432f8263 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID @@ -0,0 +1 @@ +CONFIG_HWMON_VID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA new file mode 100644 index 0000000000000000000000000000000000000000..9b9c512317b0ea65cc7ed9d1535c53ccb7c00310 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..ec44b8453241cf2d4e6067c554111c7ae699b41f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIRTIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 new file mode 100644 index 0000000000000000000000000000000000000000..5378c733942ae3d61f00bd292c6285209a05d3bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 @@ -0,0 +1 @@ +# CONFIG_HX711 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET new file mode 100644 index 0000000000000000000000000000000000000000..ecacee7154d06241ee76818e398a98edd75f1384 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET @@ -0,0 +1 @@ +CONFIG_HYPERV_NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..983959256fe379e9f634a379de777b8f3a1a7466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER @@ -0,0 +1 @@ +CONFIG_HYPERV_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE new file mode 100644 index 0000000000000000000000000000000000000000..bc928703dc8b2382168b9da2df820df07551c44d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE @@ -0,0 +1 @@ +# CONFIG_HYPERV_VTL_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 new file mode 100644 index 0000000000000000000000000000000000000000..760d8bd4289e88018324080d5b4d59bbe7154b33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 @@ -0,0 +1 @@ +CONFIG_I2C_AMD756=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 new file mode 100644 index 0000000000000000000000000000000000000000..32da34a3ec9c70676224f35d0b0c402481290de4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 @@ -0,0 +1 @@ +CONFIG_I2C_AMD756_S4882=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 new file mode 100644 index 0000000000000000000000000000000000000000..3359257cda3d943ad1db0224f7a1ea2dddee4633 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 @@ -0,0 +1 @@ +CONFIG_I2C_AMD8111=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP new file mode 100644 index 0000000000000000000000000000000000000000..3ae24381d8af9642e41beef37d7ef9848d265b3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL new file mode 100644 index 0000000000000000000000000000000000000000..88e9c546ec065fe755d2b9862690eb5a9c892e4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..1dd3eb65409f22bd1f9655c896615bfafcf5475a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO new file mode 100644 index 0000000000000000000000000000000000000000..59be08e04c9d562f6e824d22d41f32aad30a4fe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO @@ -0,0 +1 @@ +CONFIG_I2C_HELPER_AUTO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 new file mode 100644 index 0000000000000000000000000000000000000000..5d1883a34d28be29139f922b4c178eec1b6b5a14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 @@ -0,0 +1 @@ +CONFIG_I2C_I801=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH new file mode 100644 index 0000000000000000000000000000000000000000..ca084a7099493efada32ed77c8df7d6087f37a84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH @@ -0,0 +1 @@ +CONFIG_I2C_ISCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT new file mode 100644 index 0000000000000000000000000000000000000000..f720d1bb1a2bb6ebbad2861fab52934f2011a3fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT @@ -0,0 +1 @@ +CONFIG_I2C_ISMT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD new file mode 100644 index 0000000000000000000000000000000000000000..92afd7ee96541664c9ae6f8d43a60e7ccd269358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD @@ -0,0 +1 @@ +CONFIG_I2C_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..7d3813d3461bec1694757b6d2179324ebf15ab34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 new file mode 100644 index 0000000000000000000000000000000000000000..77a2d793949a6a3f8f1ec1812276bb6d7538abb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_PCA9541 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x new file mode 100644 index 0000000000000000000000000000000000000000..51e48b468b58a2dbb25756df32f73297152fd3a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_PCA954x is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 new file mode 100644 index 0000000000000000000000000000000000000000..758a790d255858cda858d1d10da5cf128aba8092 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 @@ -0,0 +1 @@ +CONFIG_I2C_NFORCE2_S4985=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..58827a258983cada3764d40827c05c3124153b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT @@ -0,0 +1 @@ +CONFIG_I2C_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 new file mode 100644 index 0000000000000000000000000000000000000000..ff0307146e25e3329b3641f1b76c0918882079c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 @@ -0,0 +1 @@ +CONFIG_I2C_PIIX4=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI new file mode 100644 index 0000000000000000000000000000000000000000..6e60bd4e1174ff34249216547e310bb6d769b4c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI @@ -0,0 +1 @@ +CONFIG_I2C_SCMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X new file mode 100644 index 0000000000000000000000000000000000000000..e716d349283e674d2eda83137083f55822790d59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X @@ -0,0 +1 @@ +CONFIG_I2C_SIS96X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA new file mode 100644 index 0000000000000000000000000000000000000000..4b9475a7bf95207e3afdbd61585d0e3778d9dbaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA @@ -0,0 +1 @@ +CONFIG_I2C_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO new file mode 100644 index 0000000000000000000000000000000000000000..93d8303dbf8fcaee61f46a37d9f1d2925ccd03ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO @@ -0,0 +1 @@ +CONFIG_I2C_VIAPRO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD new file mode 100644 index 0000000000000000000000000000000000000000..a041d4d66b8a311af39c2fc58d5199a65e9d349d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_I2C_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..d9333ca2495cace1ec5848e6cc1b413c26dfec51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_I2C_ZHAOXIN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..12afe80402f38a5ed48b688965dd3ce593929317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS @@ -0,0 +1 @@ +CONFIG_I2C_ZHAOXIN_SMBUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK new file mode 100644 index 0000000000000000000000000000000000000000..f5026f2f767f9484668b8916a565a9abd6393e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK @@ -0,0 +1 @@ +CONFIG_I8253_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE new file mode 100644 index 0000000000000000000000000000000000000000..7d167fb8a92d36eb5457c1890d2d2f1680bd6996 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE @@ -0,0 +1 @@ +# CONFIG_IAQCORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT new file mode 100644 index 0000000000000000000000000000000000000000..3cf068abd52e4ee865ac2b6bfd6d21bc40d7e4c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT @@ -0,0 +1 @@ +CONFIG_IB700_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR new file mode 100644 index 0000000000000000000000000000000000000000..c5d4eb2539f8339b9052fcbc97bd8599b9520380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR @@ -0,0 +1 @@ +CONFIG_IBMASR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM new file mode 100644 index 0000000000000000000000000000000000000000..dd7f292eb8ff8c8a2277bac69a34f49cb88085e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM @@ -0,0 +1 @@ +# CONFIG_IBM_ASM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL new file mode 100644 index 0000000000000000000000000000000000000000..3a0fee720c72b1cb86c11392b6fae82c4943b43c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL @@ -0,0 +1 @@ +# CONFIG_IBM_RTL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS new file mode 100644 index 0000000000000000000000000000000000000000..ef67132369409ae4dac9e44b67bf4c2ea7d6afec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS @@ -0,0 +1 @@ +CONFIG_ICE_HWTS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 new file mode 100644 index 0000000000000000000000000000000000000000..4a8ee10d033593d45a6dc098ae0dadefdf827468 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 @@ -0,0 +1 @@ +# CONFIG_ICP10100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..5937dca3e801496866c5b1621de571f62a716077 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP @@ -0,0 +1 @@ +CONFIG_IDEAPAD_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT new file mode 100644 index 0000000000000000000000000000000000000000..9e7af864ae548998d710d2fa2689f1b7d6bfa6c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT @@ -0,0 +1 @@ +CONFIG_IDLE_INJECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT new file mode 100644 index 0000000000000000000000000000000000000000..96368c0afc4493b164963603ea65892d9c493d23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT @@ -0,0 +1 @@ +CONFIG_IE6XX_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB new file mode 100644 index 0000000000000000000000000000000000000000..63f336d574da369f879692abc09ecc051f0bf928 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB @@ -0,0 +1 @@ +CONFIG_IEEE802154_FAKELB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO new file mode 100644 index 0000000000000000000000000000000000000000..72953e4fa961cadce5ed0d7b9156292805b0bc8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO @@ -0,0 +1 @@ +CONFIG_IIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER new file mode 100644 index 0000000000000000000000000000000000000000..e10ee5ac8c8f5f08a7bd871802d15a2d332f0a04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER @@ -0,0 +1 @@ +CONFIG_IIO_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB new file mode 100644 index 0000000000000000000000000000000000000000..657cf48d33c3dd4783e778215d372ed13136c1f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_CB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA new file mode 100644 index 0000000000000000000000000000000000000000..fdc2817a1429fd471c1f125c75d1b849dc7df395 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE new file mode 100644 index 0000000000000000000000000000000000000000..7f7ca607ab89b32f399f7ed9e861fea276610624 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_DMAENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER new file mode 100644 index 0000000000000000000000000000000000000000..0baa9e884b246ccece49d92bbfe287da47713aa1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS new file mode 100644 index 0000000000000000000000000000000000000000..bbf2a829d728d58b2e69433b78c29f4caf9de7d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS @@ -0,0 +1 @@ +# CONFIG_IIO_CONFIGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..9198f59332fcec270d1b2cd6a373e5f7ea5bc3c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER @@ -0,0 +1 @@ +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..fd56749b1ee17f123d48ae0fcdabc9231a8f0b68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_INTERRUPT_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF new file mode 100644 index 0000000000000000000000000000000000000000..5791c27c1db6d09f4fc87fc5241f3c3b9bd59367 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF @@ -0,0 +1 @@ +CONFIG_IIO_KFIFO_BUF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C new file mode 100644 index 0000000000000000000000000000000000000000..b2454203d3574802dc576c7a46d9f382e873f598 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C @@ -0,0 +1 @@ +# CONFIG_IIO_KX022A_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI new file mode 100644 index 0000000000000000000000000000000000000000..4630db8b4736de5d7141e89f70f8cb8e40e5a60d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI @@ -0,0 +1 @@ +# CONFIG_IIO_KX022A_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX new file mode 100644 index 0000000000000000000000000000000000000000..def041a2b19694c33677a7d02ae5cd573eeb2e59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX @@ -0,0 +1 @@ +# CONFIG_IIO_MUX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE new file mode 100644 index 0000000000000000000000000000000000000000..4c25d3102621b807996f75978ba4a67fb750d589 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE @@ -0,0 +1 @@ +# CONFIG_IIO_RESCALE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB new file mode 100644 index 0000000000000000000000000000000000000000..65fe1e730198e3f1cbbd66da867c3c87b58f67df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB @@ -0,0 +1 @@ +# CONFIG_IIO_SSP_SENSORHUB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS new file mode 100644 index 0000000000000000000000000000000000000000..8f88bbdf50afd3146afa9fefabb7b91ca37fa6a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_ACCEL_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS new file mode 100644 index 0000000000000000000000000000000000000000..b76d81866ab4441768cfbf97556877e88b325206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_GYRO_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX new file mode 100644 index 0000000000000000000000000000000000000000..b6ec5783698fc6f67bc574fbe114e311edb50ece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX @@ -0,0 +1 @@ +# CONFIG_IIO_ST_LSM6DSX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 new file mode 100644 index 0000000000000000000000000000000000000000..6c71996a504e21cf702425c7fde2c8f83122ae1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 @@ -0,0 +1 @@ +# CONFIG_IIO_ST_LSM9DS0 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS new file mode 100644 index 0000000000000000000000000000000000000000..3019839a3550cce04806e7b4cecbc94b5acadf92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_MAGN_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS new file mode 100644 index 0000000000000000000000000000000000000000..6932cca0bc6871b9d8a6acf6b9b8c199f2f0fcb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_PRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..92c6b2e2885c9155da60de496758d3e426d71eeb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE @@ -0,0 +1 @@ +# CONFIG_IIO_SW_DEVICE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..6373a07ab06a37f401714b9ed2ef25585af87d24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_SW_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..3ea306080ed88e6af6850cb64080718d9080620b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_SYSFS_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER new file mode 100644 index 0000000000000000000000000000000000000000..f32839d5fc982170b9008e5c40223cad94cb8a02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER @@ -0,0 +1 @@ +CONFIG_IIO_TRIGGER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER new file mode 100644 index 0000000000000000000000000000000000000000..1707c5f486961de824adad1d5041e6f69c74e8d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER @@ -0,0 +1 @@ +CONFIG_IIO_TRIGGERED_BUFFER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT new file mode 100644 index 0000000000000000000000000000000000000000..d9e9ba26a23b9dc90194c01d7443848428309518 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT @@ -0,0 +1 @@ +# CONFIG_IIO_TRIGGERED_EVENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC new file mode 100644 index 0000000000000000000000000000000000000000..ee86c5485107486a75da0fd246dfdf1b31b6fa19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC @@ -0,0 +1 @@ +# CONFIG_INA2XX_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG new file mode 100644 index 0000000000000000000000000000000000000000..6876f3fd266bebc036aa09c2874585dd652e23be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_DCCP_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 new file mode 100644 index 0000000000000000000000000000000000000000..eb333c845ba12a6eef471a401b441963b4909db4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HFI1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..2eb8392f5c845b49dd775764de7a300901dd1056 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG @@ -0,0 +1 @@ +CONFIG_INFINIBAND_MTHCA_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB new file mode 100644 index 0000000000000000000000000000000000000000..591f4e962814d711e3adc8f200fa157effc0dfca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_QIB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC new file mode 100644 index 0000000000000000000000000000000000000000..3d8c33da50e1c9ffcd936490970d19bf560fb7ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USNIC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA new file mode 100644 index 0000000000000000000000000000000000000000..164f3b26c12159f388c067287520336664271785 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_VMWARE_PVRDMA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X new file mode 100644 index 0000000000000000000000000000000000000000..ce04c7c5e73fd20ea9bf747c4bf5a305fef09afa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X @@ -0,0 +1 @@ +# CONFIG_INPUT_AD714X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X new file mode 100644 index 0000000000000000000000000000000000000000..98fa0144307deb7d3840128e39d3c0c59ef06010 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X @@ -0,0 +1 @@ +# CONFIG_INPUT_ADXL34X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL new file mode 100644 index 0000000000000000000000000000000000000000..16deb3fd37e56408a0d84977c81fcb58be29028a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL @@ -0,0 +1 @@ +CONFIG_INPUT_APANEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 new file mode 100644 index 0000000000000000000000000000000000000000..c60eb9e65dccec7d3a8213261c9b8dedc4ec83e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 @@ -0,0 +1 @@ +CONFIG_INPUT_ATI_REMOTE2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS new file mode 100644 index 0000000000000000000000000000000000000000..730f49614137ff285269aba3e52159d93e833993 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS @@ -0,0 +1 @@ +CONFIG_INPUT_ATLAS_BTNS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 new file mode 100644 index 0000000000000000000000000000000000000000..4dd4cb0fe1580cbed8b639c85803b05657d9c026 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 @@ -0,0 +1 @@ +# CONFIG_INPUT_BMA150 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 new file mode 100644 index 0000000000000000000000000000000000000000..ede5a1df4c0181ad9eeda7a45afd09be41362931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 @@ -0,0 +1 @@ +CONFIG_INPUT_CM109=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 new file mode 100644 index 0000000000000000000000000000000000000000..9743d20285710d2a95c9afa14ec8901aaf965eec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 @@ -0,0 +1 @@ +# CONFIG_INPUT_CMA3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS new file mode 100644 index 0000000000000000000000000000000000000000..27256e8ec0870f08e69458c1214b8a9b60704207 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DA7280_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS new file mode 100644 index 0000000000000000000000000000000000000000..482b80c549f029bc867c26f9213a2e91aed20d70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV260X_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS new file mode 100644 index 0000000000000000000000000000000000000000..0ce8ccd60473e056883f6feaf5327e8d4d2d8c93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV2665_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS new file mode 100644 index 0000000000000000000000000000000000000000..29d7ce7536be4c3249a0680b96a01dc0dce3af44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV2667_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON new file mode 100644 index 0000000000000000000000000000000000000000..70002037e5e71df2287188282d61e3eb56192bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON @@ -0,0 +1 @@ +# CONFIG_INPUT_E3X0_BUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER new file mode 100644 index 0000000000000000000000000000000000000000..411fd135edf64d4edd0e44641de0f6ee46ff1e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_BEEPER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..47e04fe3a9e03b1ad77b64a5d032a48737714ed1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_DECODER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER new file mode 100644 index 0000000000000000000000000000000000000000..fd61dc3ff5cbefa662dd3daab7e3f8c6bb9f835a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER @@ -0,0 +1 @@ +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA new file mode 100644 index 0000000000000000000000000000000000000000..ef2cfbcab11a47076c1c34d0653d360469bfb65f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_VIBRA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR new file mode 100644 index 0000000000000000000000000000000000000000..16feab58f09c69ea59854d1eab19b576a93f485a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR @@ -0,0 +1 @@ +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU new file mode 100644 index 0000000000000000000000000000000000000000..476ca55ac98a1d54417eb4dd658ab8c9471c29da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU @@ -0,0 +1 @@ +# CONFIG_INPUT_IMS_PCU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A new file mode 100644 index 0000000000000000000000000000000000000000..8b78e198494aecf70f3bbc3029b3e614f6161dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS269A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A new file mode 100644 index 0000000000000000000000000000000000000000..0091ceedf94f6a0e389253a3dbc4ac11f4df823e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS626A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 new file mode 100644 index 0000000000000000000000000000000000000000..6ac434ac975f3e5b9db686e555f5fec2915c9bf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS7222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV new file mode 100644 index 0000000000000000000000000000000000000000..d4bf799d0518a61bd3d6a8866c244140b87fb7e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV @@ -0,0 +1 @@ +CONFIG_INPUT_JOYDEV=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE new file mode 100644 index 0000000000000000000000000000000000000000..5cadd6973685e892c11b742c09ef1db8b02dfe30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE @@ -0,0 +1 @@ +CONFIG_INPUT_KEYSPAN_REMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 new file mode 100644 index 0000000000000000000000000000000000000000..f2d3d0cb698ea41e6e49946b9ba426fe3a6ae51b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 @@ -0,0 +1 @@ +# CONFIG_INPUT_KXTJ9 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC new file mode 100644 index 0000000000000000000000000000000000000000..3708f5641c1e7835b00c01100652ee426891e884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC @@ -0,0 +1 @@ +CONFIG_INPUT_MISC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 new file mode 100644 index 0000000000000000000000000000000000000000..1051809178e73c2c1ebcdd6ea68675606ad56ee4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 @@ -0,0 +1 @@ +# CONFIG_INPUT_MMA8450 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 new file mode 100644 index 0000000000000000000000000000000000000000..caaa9ddb2066ea9a9dc2aaa1117702271fdb5fcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 @@ -0,0 +1 @@ +# CONFIG_INPUT_PCF8574 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR new file mode 100644 index 0000000000000000000000000000000000000000..87fa5268dfa0bc3ee376e4a86695b0c9582b6d04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR @@ -0,0 +1 @@ +CONFIG_INPUT_PCSPKR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE new file mode 100644 index 0000000000000000000000000000000000000000..e7251e1ce29f8ef61d9b9ae65a7ae1e575e47496 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE @@ -0,0 +1 @@ +CONFIG_INPUT_POWERMATE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER new file mode 100644 index 0000000000000000000000000000000000000000..9ac1c7b316139b2e5d143a3e6b2eefd859027d28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER @@ -0,0 +1 @@ +# CONFIG_INPUT_PWM_BEEPER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA new file mode 100644 index 0000000000000000000000000000000000000000..39a51b4903f2712af7091de5b3ebcf7bd62ab75b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA @@ -0,0 +1 @@ +# CONFIG_INPUT_PWM_VIBRA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET new file mode 100644 index 0000000000000000000000000000000000000000..a435e97d00042688d19d928eda80c44064f7f529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET @@ -0,0 +1 @@ +CONFIG_INPUT_TABLET=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN new file mode 100644 index 0000000000000000000000000000000000000000..3ee240e2b492f2d09d9bb4e2858a72ef880ed688 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN @@ -0,0 +1 @@ +CONFIG_INPUT_TOUCHSCREEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT new file mode 100644 index 0000000000000000000000000000000000000000..ae1ea9a9c0ed1ad815e25efee3bbf0edbbeb9435 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT @@ -0,0 +1 @@ +CONFIG_INPUT_UINPUT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..9703adaf45ab45858b6730770ecc44c086666a96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK new file mode 100644 index 0000000000000000000000000000000000000000..328da30ac4e83c11c364366b52bbc542c17cca39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK @@ -0,0 +1 @@ +CONFIG_INPUT_YEALINK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM new file mode 100644 index 0000000000000000000000000000000000000000..cf052120e4118ce0fe5d51f3a3b6a705280be709 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM @@ -0,0 +1 @@ +# CONFIG_INTEL_ATOMISP2_PM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT new file mode 100644 index 0000000000000000000000000000000000000000..a4a57e0b71ce6d8c1d49ac0c216fb461047b0af3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT @@ -0,0 +1 @@ +CONFIG_INTEL_GTT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT new file mode 100644 index 0000000000000000000000000000000000000000..db284f9da343f9a5bc5210ced37577a777466988 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT @@ -0,0 +1 @@ +CONFIG_INTEL_HID_EVENT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..c7105ed60dee35332df7d95fff9292be1dbfb43e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT @@ -0,0 +1 @@ +# CONFIG_INTEL_IDXD_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS new file mode 100644 index 0000000000000000000000000000000000000000..6299ba67249bb4c4b24cfe719005d35d9ac1cdef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS @@ -0,0 +1 @@ +CONFIG_INTEL_IFS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO new file mode 100644 index 0000000000000000000000000000000000000000..f416f2ddcdf1ed95707bfed2a35226fb6368951f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO @@ -0,0 +1 @@ +# CONFIG_INTEL_INT0002_VGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON new file mode 100644 index 0000000000000000000000000000000000000000..65c519e1d8cb03efdd8fc0273c8355840e16c1c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA new file mode 100644 index 0000000000000000000000000000000000000000..24e7e07d86bf6c6fcdad2e57db3ed89c20277bf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_FLOPPY_WA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS new file mode 100644 index 0000000000000000000000000000000000000000..3d5e32159cb950aaf74155dc8af9c28a05441d25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS @@ -0,0 +1 @@ +CONFIG_INTEL_IPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE new file mode 100644 index 0000000000000000000000000000000000000000..83930e0edd4a987279a156f8806073d31c949c96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE @@ -0,0 +1 @@ +# CONFIG_INTEL_ISHTP_ECLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER new file mode 100644 index 0000000000000000000000000000000000000000..af8d58e2b2246a36d15bd5e20fe6d20f1840f9e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER @@ -0,0 +1 @@ +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID new file mode 100644 index 0000000000000000000000000000000000000000..7f7cbea0afd5a9861da8276bfa4962e7eddbd063 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID @@ -0,0 +1 @@ +CONFIG_INTEL_ISH_HID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA new file mode 100644 index 0000000000000000000000000000000000000000..7ff87eb77759985f546aab7151e63d9a1022b9a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA @@ -0,0 +1 @@ +# CONFIG_INTEL_LDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC new file mode 100644 index 0000000000000000000000000000000000000000..b2bd8b907376f6066831971bd5a8858a9bca3544 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_GSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY new file mode 100644 index 0000000000000000000000000000000000000000..2282406918d2d19aad623edab799109eb0b16b80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_GSC_PROXY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP new file mode 100644 index 0000000000000000000000000000000000000000..3566e3d207c7cc0c047c16ec4dff1b1e23f72a42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_HDCP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP new file mode 100644 index 0000000000000000000000000000000000000000..b9589b52478ab9ddfa7fbbac8028d1dcb5168856 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_PXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE new file mode 100644 index 0000000000000000000000000000000000000000..97e40f64452592bc4dba0151bfd334be5010551f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_TXE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL new file mode 100644 index 0000000000000000000000000000000000000000..c2cbe39cd930640ded58b9a827d5d209d6bf486b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL @@ -0,0 +1 @@ +CONFIG_INTEL_OAKTRAIL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC new file mode 100644 index 0000000000000000000000000000000000000000..132df88276e57271385e8263dd666b6ff134231e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC @@ -0,0 +1 @@ +# CONFIG_INTEL_PUNIT_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 new file mode 100644 index 0000000000000000000000000000000000000000..6e0e28bee79a9fe76121cc4c010905604584fa5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 @@ -0,0 +1 @@ +# CONFIG_INTEL_SAR_INT1092 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI new file mode 100644 index 0000000000000000000000000000000000000000..0418b146247f5127ded7a6ae8f2e59515c8d39ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI @@ -0,0 +1 @@ +# CONFIG_INTEL_SCU_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..18641bd2d7d09760bc60cd83b5026ba6002fd7e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM @@ -0,0 +1 @@ +# CONFIG_INTEL_SCU_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI new file mode 100644 index 0000000000000000000000000000000000000000..2e3194ab0130f3fd119e83085ecbcbe11480681c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI @@ -0,0 +1 @@ +# CONFIG_INTEL_SDSI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT new file mode 100644 index 0000000000000000000000000000000000000000..f9966a58b5dd56b30b97fcf293d5c83529cde98e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT @@ -0,0 +1 @@ +# CONFIG_INTEL_SMARTCONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE new file mode 100644 index 0000000000000000000000000000000000000000..783efbc28d12a76b22764309b1950a3b1dce9977 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL new file mode 100644 index 0000000000000000000000000000000000000000..a9e9470522cf6d56ff32907cc575dfc5d1f12d01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL @@ -0,0 +1 @@ +# CONFIG_INTEL_SOC_DTS_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC new file mode 100644 index 0000000000000000000000000000000000000000..c994c6ea735c4c0a61716ea163732a1b0b9bdfe3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC @@ -0,0 +1 @@ +CONFIG_INTEL_TCC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING new file mode 100644 index 0000000000000000000000000000000000000000..c64c52e1bde43d106e58203872718cf55240ad67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING @@ -0,0 +1 @@ +# CONFIG_INTEL_TCC_COOLING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN new file mode 100644 index 0000000000000000000000000000000000000000..62234540ab5a7fa5b68d84a57a7f68be0205a017 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN @@ -0,0 +1 @@ +CONFIG_INTEL_VBTN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI new file mode 100644 index 0000000000000000000000000000000000000000..d9b411a9ced355457bb48eeb733427ca787bff91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI @@ -0,0 +1 @@ +CONFIG_INTEL_WMI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE new file mode 100644 index 0000000000000000000000000000000000000000..f3b82a7855607910d1f624c329db7e7d215c8726 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE @@ -0,0 +1 @@ +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT new file mode 100644 index 0000000000000000000000000000000000000000..05356f742f400715d019f7ecd7936715e0e043b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT @@ -0,0 +1 @@ +CONFIG_INTEL_WMI_THUNDERBOLT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER new file mode 100644 index 0000000000000000000000000000000000000000..674eb3d2aa0696d35028daf4967bf5d92e40dac4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER @@ -0,0 +1 @@ +CONFIG_INTERVAL_TREE_SPAN_ITER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C new file mode 100644 index 0000000000000000000000000000000000000000..19a326e2067b1d9860702e13c532923ba5ace247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C @@ -0,0 +1 @@ +# CONFIG_INV_ICM42600_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI new file mode 100644 index 0000000000000000000000000000000000000000..610c7e0594038ffd42b77a471a8bea8c93c46f2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI @@ -0,0 +1 @@ +# CONFIG_INV_ICM42600_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C new file mode 100644 index 0000000000000000000000000000000000000000..75930df434a8f3b9406929b3cdec8588a1aff050 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C @@ -0,0 +1 @@ +# CONFIG_INV_MPU6050_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI new file mode 100644 index 0000000000000000000000000000000000000000..84886298913fffd834f927399de879e7a919af74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI @@ -0,0 +1 @@ +# CONFIG_INV_MPU6050_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD new file mode 100644 index 0000000000000000000000000000000000000000..9e5b98c5119c79509512a0e00d281ff282887c63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD @@ -0,0 +1 @@ +CONFIG_IOMMUFD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER new file mode 100644 index 0000000000000000000000000000000000000000..14ac2301d0ee03ccbb7cd51f1bf2e919a06a8c43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER @@ -0,0 +1 @@ +CONFIG_IOMMUFD_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..ecc6275ab1d19f6815eef8a0042a44b745bd43e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG @@ -0,0 +1 @@ +# CONFIG_IOSF_MBI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED new file mode 100644 index 0000000000000000000000000000000000000000..07499527fa63b5e9d5081f659d19d30a9fd349ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_0XED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE new file mode 100644 index 0000000000000000000000000000000000000000..36604953911f7d414896fb4207214f28ec0a8219 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY new file mode 100644 index 0000000000000000000000000000000000000000..968788e0ea0a8ea14764fe0ccb870b2ebb9bf25e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_UDELAY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE new file mode 100644 index 0000000000000000000000000000000000000000..90e15a7d176073b6da2bd61601c2559a58d4de22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE @@ -0,0 +1 @@ +# CONFIG_IPU_BRIDGE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 new file mode 100644 index 0000000000000000000000000000000000000000..b9f94f2835774082e5015c202f0bd3a52702d26c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 @@ -0,0 +1 @@ +# CONFIG_IPW2100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 new file mode 100644 index 0000000000000000000000000000000000000000..d1ad734749314d26adbdce6e3bc61f0d0b4416f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 @@ -0,0 +1 @@ +# CONFIG_IPW2200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..cddfe2944489273d20c8360a927f98b6d27fdb01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_CCID2_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 new file mode 100644 index 0000000000000000000000000000000000000000..b7427d3119763ea1ea25aabbb3a3d82d2572fcf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 @@ -0,0 +1 @@ +CONFIG_IP_DCCP_CCID3=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..b7d849decc4bd418634d8b6ddd2a3ac0e47e9071 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_CCID3_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..d62cd2f780d011128a4e09b7378a63d6930657c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB new file mode 100644 index 0000000000000000000000000000000000000000..b0c1d75c77042f8a826266ce52518c8297e96a82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB @@ -0,0 +1 @@ +CONFIG_IP_DCCP_TFRC_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER new file mode 100644 index 0000000000000000000000000000000000000000..c7d0aceb3588f887a1a7db49e0412721004b80d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER @@ -0,0 +1 @@ +CONFIG_IRQ_BYPASS_MANAGER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 new file mode 100644 index 0000000000000000000000000000000000000000..834da272499c95d3499c7dfd512530072abab26b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 @@ -0,0 +1 @@ +# CONFIG_IRSD200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE new file mode 100644 index 0000000000000000000000000000000000000000..eff4710c000ed5206a7bf52624f0503f93527213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE @@ -0,0 +1 @@ +CONFIG_IR_ENE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK new file mode 100644 index 0000000000000000000000000000000000000000..d0eedcce1e15e38fbb68e1442826d5cd2dbbc275 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK @@ -0,0 +1 @@ +CONFIG_IR_FINTEK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB new file mode 100644 index 0000000000000000000000000000000000000000..ce19e31eff5c034e0f7485a48d9417995c3cef27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB @@ -0,0 +1 @@ +# CONFIG_IR_IGORPLUGUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA new file mode 100644 index 0000000000000000000000000000000000000000..f07db57bda63ffcacc8c25812267c37a6d652f3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA @@ -0,0 +1 @@ +CONFIG_IR_IGUANA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON new file mode 100644 index 0000000000000000000000000000000000000000..6cf331742ab5998282691e24b0d9408a15cffa2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON @@ -0,0 +1 @@ +CONFIG_IR_IMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..0a129a166896a026fa4338c2242ec2976d399d05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER @@ -0,0 +1 @@ +CONFIG_IR_IMON_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW new file mode 100644 index 0000000000000000000000000000000000000000..d7a8ae566ff08ed312eed0453d5c144de3c3f49a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW @@ -0,0 +1 @@ +CONFIG_IR_IMON_RAW=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR new file mode 100644 index 0000000000000000000000000000000000000000..8bf06f3c929638ff4d510268041592f2208c97e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR @@ -0,0 +1 @@ +CONFIG_IR_ITE_CIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..0dad4b65e0236f4862097c83bcc173d209495b57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER @@ -0,0 +1 @@ +CONFIG_IR_JVC_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB new file mode 100644 index 0000000000000000000000000000000000000000..d569f098563595bbc5447a54141dc50dd2bbeabf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB @@ -0,0 +1 @@ +CONFIG_IR_MCEUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..60bd7c9629661a78863d488acb06ac6aa960d1f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER @@ -0,0 +1 @@ +CONFIG_IR_MCE_KBD_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..0d4e6b3466dca01e28b6e027b76f2f1a8dbf757f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER @@ -0,0 +1 @@ +CONFIG_IR_NEC_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON new file mode 100644 index 0000000000000000000000000000000000000000..667c8cea6c2259bf82154db00e7563cab3eb8712 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON @@ -0,0 +1 @@ +CONFIG_IR_NUVOTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..ef01aaedab36b9e8f8b1492c0e827f35ad88193f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER @@ -0,0 +1 @@ +CONFIG_IR_RC5_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..7b02e694ae07bb65d5cbaa830d45202aa3197cee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER @@ -0,0 +1 @@ +CONFIG_IR_RC6_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..8393c01f6484e0954ced3944614a7223a9bcf0e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER @@ -0,0 +1 @@ +# CONFIG_IR_RCMM_DECODER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 new file mode 100644 index 0000000000000000000000000000000000000000..a09fb6b6d1d9dd0a15ba4318af1701d98eed539a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 @@ -0,0 +1 @@ +CONFIG_IR_REDRAT3=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..26864c7d838477f757a5ee7dedbea865a7ec1605 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER @@ -0,0 +1 @@ +CONFIG_IR_SANYO_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..53c7f1d81e8e68d5a6107c6dbe3cae52e9ecad2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL @@ -0,0 +1 @@ +CONFIG_IR_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER new file mode 100644 index 0000000000000000000000000000000000000000..0ee42a3dbce0af86d4bd4778325e91b9608bec0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER @@ -0,0 +1 @@ +CONFIG_IR_SONY_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP new file mode 100644 index 0000000000000000000000000000000000000000..8a21614508cd84be2a2459a3b1720217dbe6996f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP @@ -0,0 +1 @@ +CONFIG_IR_STREAMZAP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY new file mode 100644 index 0000000000000000000000000000000000000000..3993b5b17f2f32532b81c372696218086fccfabd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY @@ -0,0 +1 @@ +# CONFIG_IR_TOY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR new file mode 100644 index 0000000000000000000000000000000000000000..d18284cc8c8a6e63457fbead09595a23a99e0dab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR @@ -0,0 +1 @@ +CONFIG_IR_TTUSBIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR new file mode 100644 index 0000000000000000000000000000000000000000..a8e5cd5e7c7e72c5c06ca25b606ee841cb694b60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR @@ -0,0 +1 @@ +CONFIG_IR_WINBOND_CIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN new file mode 100644 index 0000000000000000000000000000000000000000..faddbf9dd49da42da652f197275e3994eafd8eae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN @@ -0,0 +1 @@ +CONFIG_ISDN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI new file mode 100644 index 0000000000000000000000000000000000000000..b36fbdb13e9a8b20e6a703e55154c5d6b69c92cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI @@ -0,0 +1 @@ +CONFIG_ISDN_CAPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE new file mode 100644 index 0000000000000000000000000000000000000000..759a175f1f7100ff33edf5f85209c260c6a7aaf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE @@ -0,0 +1 @@ +CONFIG_ISDN_CAPI_MIDDLEWARE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 new file mode 100644 index 0000000000000000000000000000000000000000..106e426e44d0b04362838fd1ce827a7103cec7e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 @@ -0,0 +1 @@ +CONFIG_ISL29003=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 new file mode 100644 index 0000000000000000000000000000000000000000..a441bf00f77610b46085cc487e424b6e30c79af1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 @@ -0,0 +1 @@ +CONFIG_ISL29020=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 new file mode 100644 index 0000000000000000000000000000000000000000..53ed90a59b39769e42c51603b3ef7188f8e04862 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 @@ -0,0 +1 @@ +# CONFIG_ISL29125 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 new file mode 100644 index 0000000000000000000000000000000000000000..65b1e20732320d98410b35e3935f9581a5e2abe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 @@ -0,0 +1 @@ +# CONFIG_ISL29501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT new file mode 100644 index 0000000000000000000000000000000000000000..0b3b587bd3abb8f16e35f5ce82c479b4700b02eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT @@ -0,0 +1 @@ +CONFIG_IT8712F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT new file mode 100644 index 0000000000000000000000000000000000000000..7c21af5170211c25e33229b9f3f21081b9277077 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT @@ -0,0 +1 @@ +CONFIG_IT87_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 new file mode 100644 index 0000000000000000000000000000000000000000..3ce7a97af61f17bd0a11f63713264c2ed0f0c077 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 @@ -0,0 +1 @@ +# CONFIG_ITG3200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 new file mode 100644 index 0000000000000000000000000000000000000000..488a21b4b9ece376913484d2a87bb256be2503c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 @@ -0,0 +1 @@ +# CONFIG_IWL3945 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 new file mode 100644 index 0000000000000000000000000000000000000000..602216386ff17f9a8c9f9c540af5b7759c268e73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 @@ -0,0 +1 @@ +# CONFIG_IWL4965 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM new file mode 100644 index 0000000000000000000000000000000000000000..3e6af879094e5483c1b324e90f64dd95767ecc65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM @@ -0,0 +1 @@ +CONFIG_IWLDVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM new file mode 100644 index 0000000000000000000000000000000000000000..c22d966fa17b663b0d7ce15906711e8be9cfa8cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM @@ -0,0 +1 @@ +CONFIG_IWLMVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI new file mode 100644 index 0000000000000000000000000000000000000000..485244aec328e30b1c6f870ed0168b8c519898a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI @@ -0,0 +1 @@ +CONFIG_IWLWIFI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..c646f91ad8683f0c5b108178ccb666a53779d075 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG @@ -0,0 +1 @@ +# CONFIG_IWLWIFI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..2429b837bef570f9aa708ca1ea080132ebea2efb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS @@ -0,0 +1 @@ +CONFIG_IWLWIFI_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING new file mode 100644 index 0000000000000000000000000000000000000000..31442f8985c4cd8c8153045421674a10d9eb1f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING @@ -0,0 +1 @@ +# CONFIG_IWLWIFI_DEVICE_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..0135ed037e7c3237f3ccea73c124e5ee93f7c15d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS @@ -0,0 +1 @@ +CONFIG_IWLWIFI_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR new file mode 100644 index 0000000000000000000000000000000000000000..7af1ca93eb452ebc3a1433308e3feb93b54b559a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR @@ -0,0 +1 @@ +CONFIG_IWLWIFI_OPMODE_MODULAR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..aa93b53af4e97f33b1df51466bde263f9271a0e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST @@ -0,0 +1 @@ +# CONFIG_JAILHOUSE_GUEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 new file mode 100644 index 0000000000000000000000000000000000000000..2b65eeb385cd951fe285798b5f2c6b83bed36290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 @@ -0,0 +1 @@ +# CONFIG_JSA1212 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU new file mode 100644 index 0000000000000000000000000000000000000000..decb2f62f288b330a35394c3da88f06e7a9769a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU @@ -0,0 +1 @@ +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..32e78cf3929cf7de45e896fb296db2a2d81e1252 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION @@ -0,0 +1 @@ +CONFIG_KARMA_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI new file mode 100644 index 0000000000000000000000000000000000000000..88d59d0cd42accfd3e2b3e7ceb523c59be565cd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_APPLESPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..d31079cd6b1aaada5ef8487832b54f4f1e607395 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 new file mode 100644 index 0000000000000000000000000000000000000000..5e4362ad9b3c26b392def528d940a8db3290a8cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 @@ -0,0 +1 @@ +# CONFIG_KMX61 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK new file mode 100644 index 0000000000000000000000000000000000000000..b29d2d70a3326d35165d0b95f245d4dc7843d202 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK @@ -0,0 +1 @@ +CONFIG_KRETPROBE_ON_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF new file mode 100644 index 0000000000000000000000000000000000000000..fc0160b92ab6c453bc2243341b2a62d0d52b3467 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF @@ -0,0 +1 @@ +CONFIG_KVM_ASYNC_PF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..0a783f2dfd69d3e71c667e5a74cc64900ad8afbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT @@ -0,0 +1 @@ +CONFIG_KVM_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING new file mode 100644 index 0000000000000000000000000000000000000000..483053d535562d9bd7d20d0fc67eda15b00c372d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING @@ -0,0 +1 @@ +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM new file mode 100644 index 0000000000000000000000000000000000000000..db0ae8a99e90e38fffdcbd21bd4385f9f757662a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM @@ -0,0 +1 @@ +CONFIG_KVM_SMM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN new file mode 100644 index 0000000000000000000000000000000000000000..de03d1010a4b31a9767876658224f2b7cfb5b5ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN @@ -0,0 +1 @@ +# CONFIG_KVM_XEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 new file mode 100644 index 0000000000000000000000000000000000000000..0aa734f18e46a43c1e53a18c3cb25c008e1778e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 @@ -0,0 +1 @@ +# CONFIG_KXCJK1013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 new file mode 100644 index 0000000000000000000000000000000000000000..ea0cfd36c5870a3732b3d0f6186354262a74eb22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 @@ -0,0 +1 @@ +# CONFIG_KXSD9 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU new file mode 100644 index 0000000000000000000000000000000000000000..571c04c1ce2f0126b1e2ea10bfae4c8320fbc5eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU @@ -0,0 +1 @@ +# CONFIG_LEDS_APU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH new file mode 100644 index 0000000000000000000000000000000000000000..feacc7f5659d328841963b1259fc3f4a724bf09b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH @@ -0,0 +1 @@ +# CONFIG_LEDS_CLASS_FLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 new file mode 100644 index 0000000000000000000000000000000000000000..9937903af02d934e93531a1b19eb7d2448039f9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 @@ -0,0 +1 @@ +CONFIG_LEDS_INTEL_SS4200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 new file mode 100644 index 0000000000000000000000000000000000000000..f0784c9024d65daffbcd945003f5182c82ba8151 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 @@ -0,0 +1 @@ +# CONFIG_LEDS_LT3593 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD new file mode 100644 index 0000000000000000000000000000000000000000..0bef0da2f826a864021de89cc24796f8ee7d9872 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD @@ -0,0 +1 @@ +CONFIG_LEDS_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX new file mode 100644 index 0000000000000000000000000000000000000000..2ac0c1c812a92f35cb4b6ffb12ad0e5aad81d8c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX @@ -0,0 +1 @@ +# CONFIG_LEDS_NIC78BX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO new file mode 100644 index 0000000000000000000000000000000000000000..bd45e0d885c1f2eb66c2b6d39a7b84fd59b336fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_AUDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK new file mode 100644 index 0000000000000000000000000000000000000000..65230ad6b1b52542d70eac448a87a10607bedc96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_DISK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY new file mode 100644 index 0000000000000000000000000000000000000000..65d6a69e8c1d98f97d4e9412d98bdcfd8808f7be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY @@ -0,0 +1 @@ +CONFIG_LEGACY_VSYSCALL_XONLY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC new file mode 100644 index 0000000000000000000000000000000000000000..807ab67f155822504f0f22bdeb236e32b337232b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC @@ -0,0 +1 @@ +# CONFIG_LENOVO_YMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..b08108d5adfbc8f1f0f35380aef3bc09e6028dc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP @@ -0,0 +1 @@ +# CONFIG_LG_LAPTOP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS new file mode 100644 index 0000000000000000000000000000000000000000..9f51a6c121ab623a5cb09a2056cfdcd7365d3df9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS @@ -0,0 +1 @@ +# CONFIG_LIBERTAS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM new file mode 100644 index 0000000000000000000000000000000000000000..7e611e155629a489ab3e4b627b173fd2228ece82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM @@ -0,0 +1 @@ +# CONFIG_LIBERTAS_THINFIRM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM new file mode 100644 index 0000000000000000000000000000000000000000..f773300dc43f207974cd143e2ff686dc7f0e5fa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM @@ -0,0 +1 @@ +CONFIG_LIBNVDIMM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 new file mode 100644 index 0000000000000000000000000000000000000000..731a434426c84bfd01ec00c308fa0425f86ac3f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 @@ -0,0 +1 @@ +# CONFIG_LIDAR_LITE_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC new file mode 100644 index 0000000000000000000000000000000000000000..bc883983d070981297daa75452cbfcba4beecd2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC @@ -0,0 +1 @@ +# CONFIG_LIRC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 new file mode 100644 index 0000000000000000000000000000000000000000..aa44e35dfb354ed665e2ce49a7580c710ddede87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 @@ -0,0 +1 @@ +# CONFIG_LMP91000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH new file mode 100644 index 0000000000000000000000000000000000000000..0348d015ec043530296cd112b1eb92e6dc607085 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH @@ -0,0 +1 @@ +CONFIG_LPC_ICH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH new file mode 100644 index 0000000000000000000000000000000000000000..5d8664a8a738b998990f89dd4852480fbd881a5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH @@ -0,0 +1 @@ +CONFIG_LPC_SCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..1d8b49586404e5998b855677eb9ea17aaf584309 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE @@ -0,0 +1 @@ +# CONFIG_LP_CONSOLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 new file mode 100644 index 0000000000000000000000000000000000000000..89f64c1aa48f0e12cbc75cd0be5b89643c2c4677 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 @@ -0,0 +1 @@ +# CONFIG_LTC1660 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 new file mode 100644 index 0000000000000000000000000000000000000000..5d272ac29bdbafe6a2d85866c50d84595d38f297 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 @@ -0,0 +1 @@ +# CONFIG_LTC2471 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 new file mode 100644 index 0000000000000000000000000000000000000000..09a531e8b6d843013c01e31bbde6b108f648f1e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 @@ -0,0 +1 @@ +# CONFIG_LTC2485 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 new file mode 100644 index 0000000000000000000000000000000000000000..b63c5163ce7a5a5d342df06a430d4fb9359b4cb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 @@ -0,0 +1 @@ +# CONFIG_LTC2496 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 new file mode 100644 index 0000000000000000000000000000000000000000..312f3db17f392fd2d9fd9c9e6119f6f7047613db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 @@ -0,0 +1 @@ +# CONFIG_LTC2497 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 new file mode 100644 index 0000000000000000000000000000000000000000..8bc2b8bc435ced35fab96829a4c90258c6fcf66a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 @@ -0,0 +1 @@ +# CONFIG_LTC2632 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 new file mode 100644 index 0000000000000000000000000000000000000000..115e34b206048636f6e037176b970b3f7e4c3deb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 @@ -0,0 +1 @@ +# CONFIG_LTC2688 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 new file mode 100644 index 0000000000000000000000000000000000000000..170afd86b7446a86591559320990356762e02a7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 @@ -0,0 +1 @@ +# CONFIG_LTC2983 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 new file mode 100644 index 0000000000000000000000000000000000000000..04512139d8933433545cb77782a1bb989d378931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 @@ -0,0 +1 @@ +# CONFIG_LTR501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A new file mode 100644 index 0000000000000000000000000000000000000000..e57ef9dc388bb39ad7119aeeac88d36802e77421 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A @@ -0,0 +1 @@ +# CONFIG_LTRF216A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS new file mode 100644 index 0000000000000000000000000000000000000000..5e71853613f5ae87e52387ba14f2753da594ef72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS @@ -0,0 +1 @@ +# CONFIG_LV0104CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 new file mode 100644 index 0000000000000000000000000000000000000000..5b8b668eca853bf8cb338541561cfb3ec940533f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 @@ -0,0 +1 @@ +# CONFIG_M62332 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM new file mode 100644 index 0000000000000000000000000000000000000000..274bf1d929ceb360fb38b71447fba0f67906caac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM @@ -0,0 +1 @@ +CONFIG_MAC80211_HWSIM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT new file mode 100644 index 0000000000000000000000000000000000000000..554b4e91aaa217111dac5f5faa0c8d1d26f7cdfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT @@ -0,0 +1 @@ +CONFIG_MACHZ_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..105442f402c5fe4c51ee330f6df6d1dc1e155ff2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS @@ -0,0 +1 @@ +CONFIG_MACINTOSH_DRIVERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN new file mode 100644 index 0000000000000000000000000000000000000000..76b30caa5fb9f1feceb0f9a55f8ac617129cd29b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN @@ -0,0 +1 @@ +CONFIG_MAC_EMUMOUSEBTN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..66fca5aefbe2049db1458bbd5c754f483294a77f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION @@ -0,0 +1 @@ +CONFIG_MAC_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 new file mode 100644 index 0000000000000000000000000000000000000000..2db4f0a851371ae29c02aaf0b8551616525381a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 @@ -0,0 +1 @@ +# CONFIG_MAG3110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM new file mode 100644 index 0000000000000000000000000000000000000000..d80376d426916c2755fae3a519ae9a577abb814e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM @@ -0,0 +1 @@ +# CONFIG_MATOM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 new file mode 100644 index 0000000000000000000000000000000000000000..579a537aa6f4d950591cdc5fe9b7768e5716ff9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 @@ -0,0 +1 @@ +# CONFIG_MAX1027 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 new file mode 100644 index 0000000000000000000000000000000000000000..90819abc810f6bbb8c2bd4c6fc8e377eda1e4370 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 @@ -0,0 +1 @@ +# CONFIG_MAX11100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 new file mode 100644 index 0000000000000000000000000000000000000000..615bda2e232578fd6eb8c6b43020876852882cda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 @@ -0,0 +1 @@ +# CONFIG_MAX1118 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 new file mode 100644 index 0000000000000000000000000000000000000000..c9d46afb3f9396f7b194e3f79d2e1c95f7d44bdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 @@ -0,0 +1 @@ +# CONFIG_MAX11205 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 new file mode 100644 index 0000000000000000000000000000000000000000..91912eb0263cbd5581a278a096835feeff452262 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 @@ -0,0 +1 @@ +# CONFIG_MAX11410 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 new file mode 100644 index 0000000000000000000000000000000000000000..813c3f2cd8831ddc6bff75422ad80553d020e2a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 @@ -0,0 +1 @@ +# CONFIG_MAX1241 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 new file mode 100644 index 0000000000000000000000000000000000000000..d0090112ccab6ee9b8b1180c9e6d05ed1888d19e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 @@ -0,0 +1 @@ +# CONFIG_MAX1363 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 new file mode 100644 index 0000000000000000000000000000000000000000..04886e68f320768059b6c0f6804ae0431b8ee333 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 @@ -0,0 +1 @@ +# CONFIG_MAX30100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 new file mode 100644 index 0000000000000000000000000000000000000000..5b4aacf3d603e2baaf7f0e839adf9f5da306dd0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 @@ -0,0 +1 @@ +# CONFIG_MAX30102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 new file mode 100644 index 0000000000000000000000000000000000000000..e324af9d60ea7adcff23e3ddb35e05fa26673767 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 @@ -0,0 +1 @@ +# CONFIG_MAX30208 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 new file mode 100644 index 0000000000000000000000000000000000000000..9152f83b6a41be66eb41ccb84fca1fdcd43c5f55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 @@ -0,0 +1 @@ +# CONFIG_MAX31856 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 new file mode 100644 index 0000000000000000000000000000000000000000..ae3c7ebbb7ec783178d7ac04e9a80b6149f1fd39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 @@ -0,0 +1 @@ +# CONFIG_MAX31865 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 new file mode 100644 index 0000000000000000000000000000000000000000..d41f59c30a90ec74a5dc1768d5a0f24356de73de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 @@ -0,0 +1 @@ +# CONFIG_MAX44000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 new file mode 100644 index 0000000000000000000000000000000000000000..8a008ed653d0f60a08617d49d3da94eb50302059 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 @@ -0,0 +1 @@ +# CONFIG_MAX44009 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 new file mode 100644 index 0000000000000000000000000000000000000000..5b3bda9c0a6dfdafc7948461092201dd5c111112 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 @@ -0,0 +1 @@ +# CONFIG_MAX517 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 new file mode 100644 index 0000000000000000000000000000000000000000..b6ea28de1bacd76511ed67d04a7cf10f38a17186 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 @@ -0,0 +1 @@ +# CONFIG_MAX5432 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 new file mode 100644 index 0000000000000000000000000000000000000000..36b3781d602ce4e58b5d93032f6e4c083ae74a24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 @@ -0,0 +1 @@ +# CONFIG_MAX5481 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 new file mode 100644 index 0000000000000000000000000000000000000000..4ac669fe238aa4aa1262fadc807bd67ba0a1c28d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 @@ -0,0 +1 @@ +# CONFIG_MAX5487 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 new file mode 100644 index 0000000000000000000000000000000000000000..235413f25805032a919cb485f1b91e23e3b95573 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 @@ -0,0 +1 @@ +# CONFIG_MAX5522 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 new file mode 100644 index 0000000000000000000000000000000000000000..a308172ed0553f57a2365f63ed518bdf00913edc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 @@ -0,0 +1 @@ +# CONFIG_MAX5821 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 new file mode 100644 index 0000000000000000000000000000000000000000..1cbc674e0c6a1208575f7dad8e2c9f97ccfb8acd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 @@ -0,0 +1 @@ +# CONFIG_MAX9611 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE new file mode 100644 index 0000000000000000000000000000000000000000..442d4efa6f3e71c1750239f09549286aeec90e10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE @@ -0,0 +1 @@ +# CONFIG_MAXIM_THERMOCOUPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 new file mode 100644 index 0000000000000000000000000000000000000000..522f4becb46bed8c0a704e337d3ab5f79d6ad92f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 @@ -0,0 +1 @@ +# CONFIG_MB1232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 new file mode 100644 index 0000000000000000000000000000000000000000..e06bd44e29d71c805493a8d887b14ee1574db9e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 @@ -0,0 +1 @@ +# CONFIG_MC3230 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X new file mode 100644 index 0000000000000000000000000000000000000000..da2f99aaf256787d831c1d32028214223717990b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X @@ -0,0 +1 @@ +# CONFIG_MCP320X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 new file mode 100644 index 0000000000000000000000000000000000000000..0f00863b4f3ccd9a77c2c511fe01ed83bacf1225 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 @@ -0,0 +1 @@ +# CONFIG_MCP3422 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 new file mode 100644 index 0000000000000000000000000000000000000000..2f846d6945593bb29c809f38fe557556a7c960a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 @@ -0,0 +1 @@ +# CONFIG_MCP3911 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 new file mode 100644 index 0000000000000000000000000000000000000000..5eb63131176f78837cdc530347aae9ded610d314 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 @@ -0,0 +1 @@ +# CONFIG_MCP4018 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 new file mode 100644 index 0000000000000000000000000000000000000000..a1a1bfc15678684e375cedc84a9e83685c1e2965 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 @@ -0,0 +1 @@ +# CONFIG_MCP41010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 new file mode 100644 index 0000000000000000000000000000000000000000..62d46e61922432a49e676af6b86c71445b5e840e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 @@ -0,0 +1 @@ +# CONFIG_MCP4131 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 new file mode 100644 index 0000000000000000000000000000000000000000..cd27ab52132c15ce558745e9b7329d9814916c32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 @@ -0,0 +1 @@ +# CONFIG_MCP4531 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 new file mode 100644 index 0000000000000000000000000000000000000000..c88886b2191140a96bc2f7e2d8ee8102b1b116e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 @@ -0,0 +1 @@ +# CONFIG_MCP4725 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 new file mode 100644 index 0000000000000000000000000000000000000000..59a53b7e1bdc1398e8f90641c379b3ee64134f43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 @@ -0,0 +1 @@ +# CONFIG_MCP4728 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 new file mode 100644 index 0000000000000000000000000000000000000000..a78c7a7d12468e37c6206e138f367ae186987c8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 @@ -0,0 +1 @@ +# CONFIG_MCP4922 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..9ae9783563daf820250a771da8ae81cd02e4046f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO @@ -0,0 +1 @@ +# CONFIG_MDIO_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..48afe48b8f6335c7ba93ceadb570e272ffd4f945 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..7b16f1e62723e8410c007edcb7df550f6876a6e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_CAMERA_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC new file mode 100644 index 0000000000000000000000000000000000000000..93bb282f05dfaacb32c1f25b12cf05d0471331d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC @@ -0,0 +1 @@ +# CONFIG_MEDIA_CEC_RC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..c23046c74f96a479c750deab8a3b681947914aa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_CEC_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..fbaf5a88de56b60561ca05eed3b070d6fabd2749 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV new file mode 100644 index 0000000000000000000000000000000000000000..ba3e5ca61c3ac5cb35527d157051d9d3cc34ffc9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV @@ -0,0 +1 @@ +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..74a0751574ab86bc7ad9bd898d7a7a2810b318fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_PCI_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..bae8cf596e81ad9865793330d0910c6cedfdc0f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..25cbb953d8d4a0bc134e9f508c4c54a868fd26ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_RADIO_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..e75136efaeef8ab49365604d07838c49fb47d58c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_SDR_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT new file mode 100644 index 0000000000000000000000000000000000000000..096cc99ca22f4f4ed2897ee4d43d887308e140b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT @@ -0,0 +1 @@ +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..518972fe85d661d9057f123d5cec0b93f1ffb90a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_SUPPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER new file mode 100644 index 0000000000000000000000000000000000000000..2e98c59914aeb84a91cf399201c06a46faf80887 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER @@ -0,0 +1 @@ +CONFIG_MEDIA_SUPPORT_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..da6e02be45a2f49aa454af89c2cab643f81f7d3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_TEST_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..49ea6a0ae7e4198fafeb7981f0ed92a331260024 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_USB_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI new file mode 100644 index 0000000000000000000000000000000000000000..ad0265810c10f2554ee701c39031a4890c6c77eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI @@ -0,0 +1 @@ +CONFIG_MEMSTICK_REALTEK_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB new file mode 100644 index 0000000000000000000000000000000000000000..8792e797c8001a6daf7051c6947d66a4062edda7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB @@ -0,0 +1 @@ +CONFIG_MEMSTICK_REALTEK_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 new file mode 100644 index 0000000000000000000000000000000000000000..a6a10756af37da5cb0b78e873a7989838525a5f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 @@ -0,0 +1 @@ +# CONFIG_MERAKI_MX100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE new file mode 100644 index 0000000000000000000000000000000000000000..0282448d0276654089a56099b8fd18c4e2c7aef4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE @@ -0,0 +1 @@ +CONFIG_MFD_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS new file mode 100644 index 0000000000000000000000000000000000000000..f2e6de9b681d74f51f5c2970d83e2eb3aafff451 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..7be3065fcac9623c153b4f2a1d4f1a4d1c425829 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI new file mode 100644 index 0000000000000000000000000000000000000000..93a430846349053cc685ae227eb4dedbfdda651b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT new file mode 100644 index 0000000000000000000000000000000000000000..d6d6d1a92d8ea871c2c440e668631c3b80bf3ec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_PMC_BXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..e0e51d57709f28f43323e6b64cd7a959fbf66e9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 new file mode 100644 index 0000000000000000000000000000000000000000..d056f1aad7d7f373d7fba269aceccbe25e5f99d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 @@ -0,0 +1 @@ +CONFIG_MFD_SM501=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..2fffb094aa5a030917d27310ed652359eaac572c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO @@ -0,0 +1 @@ +CONFIG_MFD_SM501_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON new file mode 100644 index 0000000000000000000000000000000000000000..cab0ef0b79aef3a355c0c2db60fa98bf70ef334e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON @@ -0,0 +1 @@ +# CONFIG_MFD_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD new file mode 100644 index 0000000000000000000000000000000000000000..1b26e164db4604964322e364bd5ff0b392ebb253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_MFD_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 new file mode 100644 index 0000000000000000000000000000000000000000..560a3284fd08feab8525a55bac25f3b90d77335e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 @@ -0,0 +1 @@ +CONFIG_MFD_VX855=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA new file mode 100644 index 0000000000000000000000000000000000000000..70b35891446fd33d755daab92fa1c2fb32aa272e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA @@ -0,0 +1 @@ +# CONFIG_MICREL_KS8995MA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING new file mode 100644 index 0000000000000000000000000000000000000000..a064646b34314722e238b099540d5e0ba42890cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING @@ -0,0 +1 @@ +# CONFIG_MICROCODE_LATE_LOADING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION new file mode 100644 index 0000000000000000000000000000000000000000..65cb50d6b7da267ea9a582c8d496002733600c0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION @@ -0,0 +1 @@ +CONFIG_MINIX_SUBPARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX new file mode 100644 index 0000000000000000000000000000000000000000..246e4901febe12cd521524efc5a4315dbb4fa397 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX @@ -0,0 +1 @@ +CONFIG_MISC_RTSX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI new file mode 100644 index 0000000000000000000000000000000000000000..c05d0dda2c79fa8c693b5789626967b59ce5df8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI @@ -0,0 +1 @@ +CONFIG_MISC_RTSX_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB new file mode 100644 index 0000000000000000000000000000000000000000..c22296e513de076e1130dc34c0b1f3908237748a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB @@ -0,0 +1 @@ +CONFIG_MISC_RTSX_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN new file mode 100644 index 0000000000000000000000000000000000000000..b1a6775949da6ea7cfb3465dae415d9d3370e9be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN @@ -0,0 +1 @@ +CONFIG_MISDN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ new file mode 100644 index 0000000000000000000000000000000000000000..c461708cd5abe0d4c77a6ceeda93a770e64cb12f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ @@ -0,0 +1 @@ +CONFIG_MISDN_AVMFRITZ=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP new file mode 100644 index 0000000000000000000000000000000000000000..897e3c7ea8385a486989364b8e4b92fbdf9c3e6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP @@ -0,0 +1 @@ +CONFIG_MISDN_DSP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC new file mode 100644 index 0000000000000000000000000000000000000000..01eddd0cb5152052daa3494831e3b29962e68d43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC @@ -0,0 +1 @@ +CONFIG_MISDN_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI new file mode 100644 index 0000000000000000000000000000000000000000..61c2917c9e4bd84a057f14d942674a5cb040513a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI @@ -0,0 +1 @@ +CONFIG_MISDN_HFCMULTI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI new file mode 100644 index 0000000000000000000000000000000000000000..f9d759bf63fe118558128ea1fa80572361d71317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI @@ -0,0 +1 @@ +CONFIG_MISDN_HFCPCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB new file mode 100644 index 0000000000000000000000000000000000000000..5bf22217bbc09be4671798d879d5d818bca6f91a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB @@ -0,0 +1 @@ +CONFIG_MISDN_HFCUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON new file mode 100644 index 0000000000000000000000000000000000000000..ccb4360c41b525135766d4f91f468e619f31d85f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON @@ -0,0 +1 @@ +CONFIG_MISDN_INFINEON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC new file mode 100644 index 0000000000000000000000000000000000000000..6a53662ae67cc7e1c103d1a81fb3b1fcbd2f55fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC @@ -0,0 +1 @@ +CONFIG_MISDN_IPAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR new file mode 100644 index 0000000000000000000000000000000000000000..566a8e8a4efb6e8e7aea7d0c185142e57c0a1a1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR @@ -0,0 +1 @@ +CONFIG_MISDN_ISAR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP new file mode 100644 index 0000000000000000000000000000000000000000..3cde6368dfe0dd60407a54cf4b6f72f0aede47ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP @@ -0,0 +1 @@ +CONFIG_MISDN_L1OIP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET new file mode 100644 index 0000000000000000000000000000000000000000..379062a8b991e1ec14139a9ae0333d3024635253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET @@ -0,0 +1 @@ +CONFIG_MISDN_NETJET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX new file mode 100644 index 0000000000000000000000000000000000000000..bd52af29d3b055d5664debc3bf8831a3c38dfa43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX @@ -0,0 +1 @@ +CONFIG_MISDN_SPEEDFAX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 new file mode 100644 index 0000000000000000000000000000000000000000..9b0b1d892d39ac56081f15437d06ce4243983cc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 @@ -0,0 +1 @@ +CONFIG_MISDN_W6692=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 new file mode 100644 index 0000000000000000000000000000000000000000..2f448241b46dfadf29d674b92ff55ea93a71e257 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 @@ -0,0 +1 @@ +# CONFIG_MK8 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 new file mode 100644 index 0000000000000000000000000000000000000000..886455c5d30db51dfe0c5038bfdef255b2326de3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 @@ -0,0 +1 @@ +# CONFIG_MLX90614 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 new file mode 100644 index 0000000000000000000000000000000000000000..f8285863179072951801ac54fe4bd1ba80b5f6a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 @@ -0,0 +1 @@ +# CONFIG_MLX90632 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO new file mode 100644 index 0000000000000000000000000000000000000000..ce97d907dc59b8449ed10fd304831e6e6ac1a8c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO @@ -0,0 +1 @@ +# CONFIG_MLXREG_IO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC new file mode 100644 index 0000000000000000000000000000000000000000..456329a2c287fa328a7b9547deb4b880a917a783 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC @@ -0,0 +1 @@ +# CONFIG_MLXREG_LC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT new file mode 100644 index 0000000000000000000000000000000000000000..68597aed64984bbc974bafba9b09e632a32c1ffc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT @@ -0,0 +1 @@ +# CONFIG_MLX_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C new file mode 100644 index 0000000000000000000000000000000000000000..ba1a64705aa46687e4bcb338b4a68f6f4d937d48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C @@ -0,0 +1 @@ +# CONFIG_MMA7455_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI new file mode 100644 index 0000000000000000000000000000000000000000..5cb36e546b43ef1550616107f871d1cf67203eac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI @@ -0,0 +1 @@ +# CONFIG_MMA7455_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 new file mode 100644 index 0000000000000000000000000000000000000000..f729209ea41711b21008ca40e7a0aeaae77253d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 @@ -0,0 +1 @@ +# CONFIG_MMA7660 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 new file mode 100644 index 0000000000000000000000000000000000000000..ef5caa02aee2b419b09373ffceca88209a4313ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 @@ -0,0 +1 @@ +# CONFIG_MMA8452 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 new file mode 100644 index 0000000000000000000000000000000000000000..116e8e246b2464b3ce85a3836640f8a4a0e8f293 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 @@ -0,0 +1 @@ +# CONFIG_MMA9551 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 new file mode 100644 index 0000000000000000000000000000000000000000..a1a7373f27efe28607f5513d4e5217405c34f38a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 @@ -0,0 +1 @@ +# CONFIG_MMA9553 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 new file mode 100644 index 0000000000000000000000000000000000000000..9e1cf1c4bcacc7f444feb3733fafc824a8889207 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 @@ -0,0 +1 @@ +# CONFIG_MMC35240 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H new file mode 100644 index 0000000000000000000000000000000000000000..8cf0a943fc052bf82de62fafbed0feaad47c20fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H @@ -0,0 +1 @@ +CONFIG_MMCONF_FAM10H=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK new file mode 100644 index 0000000000000000000000000000000000000000..d92a9072c875727ac7738f01d307e7ecebf8f931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK @@ -0,0 +1 @@ +# CONFIG_MMC_MTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI new file mode 100644 index 0000000000000000000000000000000000000000..90687a235689a728f3b8f88424396faba9970a13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI @@ -0,0 +1 @@ +CONFIG_MMC_REALTEK_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB new file mode 100644 index 0000000000000000000000000000000000000000..7268a312edce8fe44c18d06c2c03c1a8e97a2037 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB @@ -0,0 +1 @@ +CONFIG_MMC_REALTEK_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI new file mode 100644 index 0000000000000000000000000000000000000000..2ae27a7ef1b14e8b23d0f23b53d7f0de3f05a64a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI @@ -0,0 +1 @@ +# CONFIG_MMC_TOSHIBA_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD new file mode 100644 index 0000000000000000000000000000000000000000..886f684b3b188edbcfc5f880c376458a743529e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD @@ -0,0 +1 @@ +# CONFIG_MMC_WBSD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE new file mode 100644 index 0000000000000000000000000000000000000000..958d609b33c7d6865ddc75c78396adff2c6f3fc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE @@ -0,0 +1 @@ +# CONFIG_MMIOTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS new file mode 100644 index 0000000000000000000000000000000000000000..2219eddcd3621afba15d3e16041bc08655af7ebd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_MERGE_VMAS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH new file mode 100644 index 0000000000000000000000000000000000000000..2aa5a2df765128cb77eceba034f91f48d5df53db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH @@ -0,0 +1 @@ +CONFIG_MOUSE_APPLETOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 new file mode 100644 index 0000000000000000000000000000000000000000..797189442ff1e3b26a1800e8af952afe7008937c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 @@ -0,0 +1 @@ +CONFIG_MOUSE_BCM5974=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA new file mode 100644 index 0000000000000000000000000000000000000000..b0a9d858116b7adbc594dba035773247d7426ff7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA @@ -0,0 +1 @@ +CONFIG_MOUSE_CYAPA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..f1268e0a84a68d698edd93543f009fa9c6ffc3a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS @@ -0,0 +1 @@ +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH new file mode 100644 index 0000000000000000000000000000000000000000..15c77ec5c901489e4d4339b76c6a42482b1275b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ELANTECH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS new file mode 100644 index 0000000000000000000000000000000000000000..1aaf34652a4017851abc5ba61d2cfbc1b7f012b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK new file mode 100644 index 0000000000000000000000000000000000000000..4b0746ac16f26d71ee1b3c0be3013d74421ed8f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_LIFEBOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC new file mode 100644 index 0000000000000000000000000000000000000000..14079ba62504d5b87d9d902c68a19a0439097bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SENTELIC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE new file mode 100644 index 0000000000000000000000000000000000000000..d3f895d3b629970338f526d6eee986a8423c74ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_VMMOUSE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..b98a70fa60c2b432f7e5673e2c3764413bff0c8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL @@ -0,0 +1 @@ +CONFIG_MOUSE_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA new file mode 100644 index 0000000000000000000000000000000000000000..1c962b695cd55b0a3c9baa8056452ade15342f05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA @@ -0,0 +1 @@ +CONFIG_MOUSE_VSXXXAA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C new file mode 100644 index 0000000000000000000000000000000000000000..627567714a6a3c59e88340dea37bab095bc97d39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C @@ -0,0 +1 @@ +# CONFIG_MPL115_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI new file mode 100644 index 0000000000000000000000000000000000000000..a4be2fc22f3773364803cd9ae80a789080541a53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI @@ -0,0 +1 @@ +# CONFIG_MPL115_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 new file mode 100644 index 0000000000000000000000000000000000000000..36eb331ab38fde913cafb54c516b25b419768057 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 @@ -0,0 +1 @@ +# CONFIG_MPL3115 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA new file mode 100644 index 0000000000000000000000000000000000000000..449bd8b9fd2334067f3cd28ab80acde368537952 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA @@ -0,0 +1 @@ +# CONFIG_MPRLS0025PA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC new file mode 100644 index 0000000000000000000000000000000000000000..8993bc8237a637173a7a834b6916b080bb6f7d72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC @@ -0,0 +1 @@ +# CONFIG_MPSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C new file mode 100644 index 0000000000000000000000000000000000000000..92e6cbf51ba2f97df4d4ae2738983a3edd02020e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C @@ -0,0 +1 @@ +# CONFIG_MPU3050_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 new file mode 100644 index 0000000000000000000000000000000000000000..0bf3597059d610408745eddd67e8e3247cadbba9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 @@ -0,0 +1 @@ +# CONFIG_MS5611 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 new file mode 100644 index 0000000000000000000000000000000000000000..867af2e87ebd68413513217c2eb192bdaca7393f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 @@ -0,0 +1 @@ +# CONFIG_MS5637 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 new file mode 100644 index 0000000000000000000000000000000000000000..e74986a6b9688025c5a99d2aec5589c8294a2312 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 @@ -0,0 +1 @@ +# CONFIG_MSA311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC new file mode 100644 index 0000000000000000000000000000000000000000..bc0e60f4d3370c58e2bd18651ef74feba024227f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC @@ -0,0 +1 @@ +# CONFIG_MSI_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..7d45b2e5e567b65e3ab2fb8a48a6798aea81eca5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP @@ -0,0 +1 @@ +CONFIG_MSI_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI new file mode 100644 index 0000000000000000000000000000000000000000..7949ac91b539fcc821932e422919350116be904d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI @@ -0,0 +1 @@ +CONFIG_MSI_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U new file mode 100644 index 0000000000000000000000000000000000000000..cd5368ee42595fbf1b24f3ff9691ac17b26700bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U @@ -0,0 +1 @@ +CONFIG_MT7601U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E new file mode 100644 index 0000000000000000000000000000000000000000..747b5499c7aa27451298b53a246f5785262cb8aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E @@ -0,0 +1 @@ +# CONFIG_MT7603E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E new file mode 100644 index 0000000000000000000000000000000000000000..f3893bd9457bd07be7078cad4bb5d8b4aee8b40e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E @@ -0,0 +1 @@ +# CONFIG_MT7615E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S new file mode 100644 index 0000000000000000000000000000000000000000..0597f0e96baa9f914824bee44d0e5cb97304e155 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S @@ -0,0 +1 @@ +# CONFIG_MT7663S is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U new file mode 100644 index 0000000000000000000000000000000000000000..c4efff0c4a21a5e96605be0cdc969dea70ea7549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U @@ -0,0 +1 @@ +# CONFIG_MT7663U is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE new file mode 100644 index 0000000000000000000000000000000000000000..3c4b786bce82a86977eb2ee02774c4bf3993d7cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE @@ -0,0 +1 @@ +CONFIG_MT76_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..38082977adbd67c09a24035d3354b9ddabed1e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS @@ -0,0 +1 @@ +CONFIG_MT76_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB new file mode 100644 index 0000000000000000000000000000000000000000..ed4e177e4de802fff253b3dde6ea228b614968f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB @@ -0,0 +1 @@ +CONFIG_MT76_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB new file mode 100644 index 0000000000000000000000000000000000000000..95bc13d243c107e0cd7b654107c45b1b580872ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB @@ -0,0 +1 @@ +CONFIG_MT76x02_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB new file mode 100644 index 0000000000000000000000000000000000000000..5c92059065aecbec832555d993f01f619b8b85ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB @@ -0,0 +1 @@ +CONFIG_MT76x02_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E new file mode 100644 index 0000000000000000000000000000000000000000..21e07c8c4ea86f65d9b6bfa774ffc099fbda6bf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E @@ -0,0 +1 @@ +# CONFIG_MT76x0E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U new file mode 100644 index 0000000000000000000000000000000000000000..b7b54019634b786d6cbda865ea95594d380b9b9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U @@ -0,0 +1 @@ +CONFIG_MT76x0U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..c8b6b3776e465fd70ed3ae8f9f07288da6a60ea1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON @@ -0,0 +1 @@ +CONFIG_MT76x0_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E new file mode 100644 index 0000000000000000000000000000000000000000..adc8b6205a838b1d4f62cbd237750658cf96bca6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E @@ -0,0 +1 @@ +# CONFIG_MT76x2E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U new file mode 100644 index 0000000000000000000000000000000000000000..1749e54495cecbe7c4f7f24cb431fc94c0f5f450 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U @@ -0,0 +1 @@ +CONFIG_MT76x2U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..6ce98efe7a3542090d4975bc0b3eff1e6fd38a31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON @@ -0,0 +1 @@ +CONFIG_MT76x2_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E new file mode 100644 index 0000000000000000000000000000000000000000..3ad870e9f43b16c2a5924e6640b661ff0bc39ec7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E @@ -0,0 +1 @@ +# CONFIG_MT7915E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E new file mode 100644 index 0000000000000000000000000000000000000000..b530241f063261f31ee7e14bdae60703c8a23830 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E @@ -0,0 +1 @@ +# CONFIG_MT7921E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S new file mode 100644 index 0000000000000000000000000000000000000000..b092cbf7bf52f78ea658b0edb8bbf24ab2728229 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S @@ -0,0 +1 @@ +# CONFIG_MT7921S is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U new file mode 100644 index 0000000000000000000000000000000000000000..85c2cfd81a08540b3102ec80b061124614157cb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U @@ -0,0 +1 @@ +# CONFIG_MT7921U is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E new file mode 100644 index 0000000000000000000000000000000000000000..f2e9da094b9d8d3db58e2a23cf53d55257fc93c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E @@ -0,0 +1 @@ +# CONFIG_MT7996E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI new file mode 100644 index 0000000000000000000000000000000000000000..67915c663d630322050aa89281c896bedc226070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI @@ -0,0 +1 @@ +# CONFIG_MTD_CFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE new file mode 100644 index 0000000000000000000000000000000000000000..95a67924d3fa1824eced03891953269d205ee006 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE @@ -0,0 +1 @@ +# CONFIG_MWAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX new file mode 100644 index 0000000000000000000000000000000000000000..d7d6416710d72b7b755bc8f89087fc4ec0800abf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX @@ -0,0 +1 @@ +CONFIG_MWIFIEX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..80dd05958b8bd4f45641538d6772226048c3ff37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE @@ -0,0 +1 @@ +CONFIG_MWIFIEX_PCIE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO new file mode 100644 index 0000000000000000000000000000000000000000..fa88327f350a3619ceda310939cc9d501f71b749 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO @@ -0,0 +1 @@ +CONFIG_MWIFIEX_SDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB new file mode 100644 index 0000000000000000000000000000000000000000..fac2cb3d282c6f79798db09512c475b20731713b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB @@ -0,0 +1 @@ +CONFIG_MWIFIEX_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K new file mode 100644 index 0000000000000000000000000000000000000000..0d993e9f125057c571a00e9af12094e318dced00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K @@ -0,0 +1 @@ +# CONFIG_MWL8K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 new file mode 100644 index 0000000000000000000000000000000000000000..6094387320ac469edb935ceef40d1922529998b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 @@ -0,0 +1 @@ +# CONFIG_MXC4005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 new file mode 100644 index 0000000000000000000000000000000000000000..e834351d7c88c2efed462bea073bfed219852dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 @@ -0,0 +1 @@ +# CONFIG_MXC6255 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI new file mode 100644 index 0000000000000000000000000000000000000000..7e2ecf29cd725d943a35101669ce99468a98a6c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI @@ -0,0 +1 @@ +CONFIG_MXM_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE new file mode 100644 index 0000000000000000000000000000000000000000..a3b2d57f516d1f41be26297950f8c1f40f12fa53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE @@ -0,0 +1 @@ +CONFIG_MYRI10GE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA new file mode 100644 index 0000000000000000000000000000000000000000..7b5904a76f5e3bd747a7fc39b27840299a044739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA @@ -0,0 +1 @@ +CONFIG_MYRI10GE_DCA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 new file mode 100644 index 0000000000000000000000000000000000000000..2bff4c70af36ec920fc61ece74c7929b115758d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 @@ -0,0 +1 @@ +# CONFIG_NAU7802 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT new file mode 100644 index 0000000000000000000000000000000000000000..2a3ebca583a8e879d4a1d0b97d394914e081455f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT @@ -0,0 +1 @@ +CONFIG_ND_BTT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN new file mode 100644 index 0000000000000000000000000000000000000000..6c96748c6c2e9c6c0a12d9b2e27410d7985ba3a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN @@ -0,0 +1 @@ +CONFIG_ND_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD new file mode 100644 index 0000000000000000000000000000000000000000..606381824077e0898134caec35dca3767897bf8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE new file mode 100644 index 0000000000000000000000000000000000000000..5ca46d872d025509a3d0158cd22627d4b9222b89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BROCADE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO new file mode 100644 index 0000000000000000000000000000000000000000..0c803ecf23fdd5ab52376902e416d04e70ad150e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CISCO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC new file mode 100644 index 0000000000000000000000000000000000000000..5d2f9d54c77553b10f358e261d785be436d8515a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_DEC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX new file mode 100644 index 0000000000000000000000000000000000000000..8751238f92558194fbc1b0738e64608e146dcce7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_EMULEX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM new file mode 100644 index 0000000000000000000000000000000000000000..585724f65948cd032ded20102d6fea2a12d6cfd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_QUALCOMM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT new file mode 100644 index 0000000000000000000000000000000000000000..117073a493516782ad1a3097679272c009868196 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT @@ -0,0 +1 @@ +# CONFIG_NI903X_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT new file mode 100644 index 0000000000000000000000000000000000000000..8bc086748983b76d1d1f40c346595055013f834c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT @@ -0,0 +1 @@ +# CONFIG_NIC7018_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES new file mode 100644 index 0000000000000000000000000000000000000000..517f0308e51495d7ca50b145cbcb21fbf0fb6d25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES @@ -0,0 +1 @@ +# CONFIG_NITRO_ENCLAVES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU new file mode 100644 index 0000000000000000000000000000000000000000..91d403d72e6093b9b022876b51e228bc93a60312 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU @@ -0,0 +1 @@ +# CONFIG_NMI_CHECK_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 new file mode 100644 index 0000000000000000000000000000000000000000..2d1dd87315700160a8288e290def02cff270928e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 @@ -0,0 +1 @@ +# CONFIG_NOA1305 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI new file mode 100644 index 0000000000000000000000000000000000000000..e94a0abf8b3fcc93ce9c3754d7ae90b03391b2ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI @@ -0,0 +1 @@ +CONFIG_NOZOMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD new file mode 100644 index 0000000000000000000000000000000000000000..b458f9403636bdcb377bd6a3f64c80635f4bde36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD @@ -0,0 +1 @@ +# CONFIG_NTB_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..cca7a6dd7d22998b206272c2931607051e10a0b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL @@ -0,0 +1 @@ +# CONFIG_NTB_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT new file mode 100644 index 0000000000000000000000000000000000000000..dee8c6d517406eeae0e95835c4d1f84655981d08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT @@ -0,0 +1 @@ +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 new file mode 100644 index 0000000000000000000000000000000000000000..e1f1d8e831e6baecb45cba34a659283b66e9490f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 @@ -0,0 +1 @@ +# CONFIG_NVSW_SN2201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF new file mode 100644 index 0000000000000000000000000000000000000000..d15176e9c76291a3f5f9f4fdc6b1c7326b1c1200 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF @@ -0,0 +1 @@ +# CONFIG_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 new file mode 100644 index 0000000000000000000000000000000000000000..359f9a14edb6feee74bb81cc2c763f51e1199bbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 @@ -0,0 +1 @@ +# CONFIG_OPT3001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 new file mode 100644 index 0000000000000000000000000000000000000000..f438ae10f521dd150d6b03d0759e906449de83d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 @@ -0,0 +1 @@ +# CONFIG_OPT4001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..8ff387ae91713b23f06a3e3fbb38f3e7d2439d25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION @@ -0,0 +1 @@ +CONFIG_OSF_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT new file mode 100644 index 0000000000000000000000000000000000000000..30e0011c93102c8fa52db9169ea65742218984b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT @@ -0,0 +1 @@ +CONFIG_OUTPUT_FORMAT="elf64-x86-64" diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB new file mode 100644 index 0000000000000000000000000000000000000000..0cef1061a317520000cdae067197d76837931012 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB @@ -0,0 +1 @@ +CONFIG_P2SB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 new file mode 100644 index 0000000000000000000000000000000000000000..99737de75f87e5322949ea1453caa983e03525b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 @@ -0,0 +1 @@ +# CONFIG_PA12203001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..f1ac44b84b8c6a6e28c043c24e894eddd17a9ece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP @@ -0,0 +1 @@ +CONFIG_PANASONIC_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL new file mode 100644 index 0000000000000000000000000000000000000000..de8051393a126d10f8a751a62b3d85ea84845ee2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL @@ -0,0 +1 @@ +# CONFIG_PANEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK new file mode 100644 index 0000000000000000000000000000000000000000..a0977b3b88fd0a7341647ecf9f7ce8813724b360 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK @@ -0,0 +1 @@ +CONFIG_PARAVIRT_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..7b0071250f80910f1ee2017d91d66d55680f1832 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG @@ -0,0 +1 @@ +# CONFIG_PARAVIRT_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..589156958a51dc35c89b3b0534bd26db700b7b01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT @@ -0,0 +1 @@ +CONFIG_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 new file mode 100644 index 0000000000000000000000000000000000000000..585684fb06a51566784736d9c80471b06f3a5322 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 @@ -0,0 +1 @@ +CONFIG_PARPORT_1284=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC new file mode 100644 index 0000000000000000000000000000000000000000..2f34bccd2cf5b187f8a9be03bc166623d7e5a14a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC @@ -0,0 +1 @@ +CONFIG_PARPORT_NOT_PC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC new file mode 100644 index 0000000000000000000000000000000000000000..b9aa6e8cad781de265a4bbaf00d9906221e24c26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC @@ -0,0 +1 @@ +CONFIG_PARPORT_PC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO new file mode 100644 index 0000000000000000000000000000000000000000..62562af4c548f837747560aca1436cfef4c01fa6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO @@ -0,0 +1 @@ +# CONFIG_PARPORT_PC_FIFO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO new file mode 100644 index 0000000000000000000000000000000000000000..b6858ce795a2d44dcec26551e9664c7b0c2e3d16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO @@ -0,0 +1 @@ +# CONFIG_PARPORT_PC_SUPERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..8e90201908b6aca20b90896c1bba7600b371cc5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL @@ -0,0 +1 @@ +CONFIG_PARPORT_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..2c3271235ae1b7c39364cf9e15a4bf80258af94e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT @@ -0,0 +1 @@ +# CONFIG_PATA_PARPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT new file mode 100644 index 0000000000000000000000000000000000000000..5cd896accfaca367c15d9f2a008fdd5c1c050690 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT @@ -0,0 +1 @@ +# CONFIG_PC87413_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 new file mode 100644 index 0000000000000000000000000000000000000000..ee09db0f494e653af7161602f4659c9a1d57256d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 @@ -0,0 +1 @@ +# CONFIG_PCENGINES_APU2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT new file mode 100644 index 0000000000000000000000000000000000000000..1d4923bf1df4a276e791857368413ea28e3c0b54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT @@ -0,0 +1 @@ +CONFIG_PCI_DIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG new file mode 100644 index 0000000000000000000000000000000000000000..486e469e47ca76b572bb5d9611c6b8fa5adbf93f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG @@ -0,0 +1 @@ +CONFIG_PCI_LOCKLESS_CONFIG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN new file mode 100644 index 0000000000000000000000000000000000000000..729d7020fade0ba814e4aeacabcca7d8d30a6726 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN @@ -0,0 +1 @@ +CONFIG_PCI_XEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM new file mode 100644 index 0000000000000000000000000000000000000000..957ce8cf727ec22657bb4f7c3e02e7351643462a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM @@ -0,0 +1 @@ +# CONFIG_PCMCIA_XIRCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB new file mode 100644 index 0000000000000000000000000000000000000000..c68874d74317b0baaa4e8fe385019e600df6ebf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB @@ -0,0 +1 @@ +# CONFIG_PHY_CPCAP_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC new file mode 100644 index 0000000000000000000000000000000000000000..e268046158e373b0ecb7b8ed1ac873c37c7c0608 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC @@ -0,0 +1 @@ +# CONFIG_PHY_INTEL_LGM_EMMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE new file mode 100644 index 0000000000000000000000000000000000000000..fc34a4ac0016e25679d56637e611aaf6c9aba567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_ALDERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL new file mode 100644 index 0000000000000000000000000000000000000000..706680265234fa8bc5abf50431009cdea013f732 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL @@ -0,0 +1 @@ +CONFIG_PINCTRL_BAYTRAIL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON new file mode 100644 index 0000000000000000000000000000000000000000..563eb973f8adc5028650638c3e7be16777cb9acd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON @@ -0,0 +1 @@ +CONFIG_PINCTRL_BROXTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE new file mode 100644 index 0000000000000000000000000000000000000000..db4143366c2a1a40830398cb1315949e131d09b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_CANNONLAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK new file mode 100644 index 0000000000000000000000000000000000000000..b723950b2601186b74ee660b1ab84971099d3f20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK @@ -0,0 +1 @@ +CONFIG_PINCTRL_CEDARFORK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW new file mode 100644 index 0000000000000000000000000000000000000000..6a7cb75e56c30c4cb54409ef9085c5a0fe587ba7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW @@ -0,0 +1 @@ +# CONFIG_PINCTRL_CHERRYVIEW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON new file mode 100644 index 0000000000000000000000000000000000000000..ec6b4e8e1eb9fa982b12b8378613f82b196c92f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON @@ -0,0 +1 @@ +CONFIG_PINCTRL_DENVERTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE new file mode 100644 index 0000000000000000000000000000000000000000..224fd47cc178168dc36e569b216107625275a869 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_ELKHARTLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG new file mode 100644 index 0000000000000000000000000000000000000000..c347d0b35ef09877f03567221aa316448a274841 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG @@ -0,0 +1 @@ +# CONFIG_PINCTRL_EMMITSBURG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE new file mode 100644 index 0000000000000000000000000000000000000000..fca1ee23ae246b21168dac75b9f382036ad48dbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_GEMINILAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE new file mode 100644 index 0000000000000000000000000000000000000000..f208784ed29db980aea1dd07da21de5dae2d314c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_ICELAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..bd4371168eb2721560c1bf419e3bdc00d854e085 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL @@ -0,0 +1 @@ +CONFIG_PINCTRL_INTEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE new file mode 100644 index 0000000000000000000000000000000000000000..09f84c415cfe3c14a828998d865003399a306864 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_JASPERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD new file mode 100644 index 0000000000000000000000000000000000000000..7333be215f28324cd816fcd7b5e8a08d5f3ac19d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LAKEFIELD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG new file mode 100644 index 0000000000000000000000000000000000000000..c2c51192e2ae59d5cfff241149708e565c56c7af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG @@ -0,0 +1 @@ +CONFIG_PINCTRL_LEWISBURG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT new file mode 100644 index 0000000000000000000000000000000000000000..252078559e7986037fbe3b734111a9581d144e21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LYNXPOINT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE new file mode 100644 index 0000000000000000000000000000000000000000..82f12b6db0dfa669527df9717c3f250bc8dd2741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_METEORLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT new file mode 100644 index 0000000000000000000000000000000000000000..f47b9a76dab68e8d11111c528450502244d08d7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT @@ -0,0 +1 @@ +CONFIG_PINCTRL_SUNRISEPOINT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE new file mode 100644 index 0000000000000000000000000000000000000000..4837dfaf5624a90bf88b566652ef4ad75def73cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_TIGERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING new file mode 100644 index 0000000000000000000000000000000000000000..d0dcdc8edcafb771e57f6af0a424086d198e3bda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING @@ -0,0 +1 @@ +# CONFIG_PING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC new file mode 100644 index 0000000000000000000000000000000000000000..eb04922b37227fba9fb5bb1035203c57b58026f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC @@ -0,0 +1 @@ +# CONFIG_PLFXLC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP new file mode 100644 index 0000000000000000000000000000000000000000..b8c35e075a951f480ea597e7ff9bcf3fcbd7c0c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP @@ -0,0 +1 @@ +# CONFIG_PLIP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION new file mode 100644 index 0000000000000000000000000000000000000000..4dbdfac30b9a49c1b24f302a36812b4de6ce8701 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION @@ -0,0 +1 @@ +CONFIG_PMIC_OPREGION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ new file mode 100644 index 0000000000000000000000000000000000000000..89637109cad4480b2f3ea16320e8b16f284ca267 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ @@ -0,0 +1 @@ +CONFIG_PM_DEVFREQ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT new file mode 100644 index 0000000000000000000000000000000000000000..e8ed02e7e5c7f47f7f4df109410201e95abac081 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT @@ -0,0 +1 @@ +# CONFIG_PM_DEVFREQ_EVENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES new file mode 100644 index 0000000000000000000000000000000000000000..cb591885f89539403146dbbd7e536892b94b090d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES @@ -0,0 +1 @@ +# CONFIG_PNP_DEBUG_MESSAGES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART new file mode 100644 index 0000000000000000000000000000000000000000..62520aa1d41d87176251c9d89bb6928e077fae80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_RESTART is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV new file mode 100644 index 0000000000000000000000000000000000000000..34c75b4ab86e72b5cb22a3ef68ec0edbbacc5e23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV @@ -0,0 +1 @@ +CONFIG_PPDEV=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..d11894deff1708aaccc599346abcffa13ceee21a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS new file mode 100644 index 0000000000000000000000000000000000000000..8b56494e9fcfd4afbe0678edf2de5fb0eaf23060 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS @@ -0,0 +1 @@ +CONFIG_PREFIX_SYMBOLS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER new file mode 100644 index 0000000000000000000000000000000000000000..9b82c068b790f318c19adc25f1a8a31bed976a5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER @@ -0,0 +1 @@ +CONFIG_PRINTER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS new file mode 100644 index 0000000000000000000000000000000000000000..5498b06e9ae2f38b720fe7bae732994ffcb596c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS @@ -0,0 +1 @@ +CONFIG_PROC_PID_ARCH_STATUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT new file mode 100644 index 0000000000000000000000000000000000000000..eee4011e1dee39167f40b3ecd1da2837dafce510 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT @@ -0,0 +1 @@ +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM new file mode 100644 index 0000000000000000000000000000000000000000..203f6b757d8cd4466ab45c8924d42bb5a557c25f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_KVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW new file mode 100644 index 0000000000000000000000000000000000000000..4b0c1ae3468996825bf0ca2a2a092b004959bbd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_VMW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..0507cd497376261a62ea36664f18c68a9dbc6425 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG @@ -0,0 +1 @@ +# CONFIG_PUNIT_ATOM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH new file mode 100644 index 0000000000000000000000000000000000000000..2311dfd77b95a4395a367cdbc404565fbd0a8ed6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH @@ -0,0 +1 @@ +# CONFIG_PVH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS new file mode 100644 index 0000000000000000000000000000000000000000..7e6a48990a9a2935a0f354bde1cc4a781ecbcac6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS @@ -0,0 +1 @@ +CONFIG_PWM_LPSS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI new file mode 100644 index 0000000000000000000000000000000000000000..d7ed9139ee8ae1b5e8a2559cc0345c8a307ba00b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI @@ -0,0 +1 @@ +CONFIG_PWM_LPSS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..d9042d8b82a5b104c96cd6a6ee14262087f03ad2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM @@ -0,0 +1 @@ +CONFIG_PWM_LPSS_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA new file mode 100644 index 0000000000000000000000000000000000000000..b58063523974c2180dd75ac18565ff0eb5b01d72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA @@ -0,0 +1 @@ +# CONFIG_QCOM_HIDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT new file mode 100644 index 0000000000000000000000000000000000000000..4b37c03484514fb04b21b83d4f8621c3bd79f753 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT @@ -0,0 +1 @@ +# CONFIG_QCOM_HIDMA_MGMT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE new file mode 100644 index 0000000000000000000000000000000000000000..da040aaf738b9a7882f41dc36f4b11a268586d83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE @@ -0,0 +1 @@ +# CONFIG_QTNFMAC_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE new file mode 100644 index 0000000000000000000000000000000000000000..9912b0af8637c3bb1ffd7d8bf71c2106c517caa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE @@ -0,0 +1 @@ +CONFIG_RC_ATI_REMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE new file mode 100644 index 0000000000000000000000000000000000000000..73d773223623f756ac1935e0bdfbaf9e2b4b5aed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE @@ -0,0 +1 @@ +CONFIG_RC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS new file mode 100644 index 0000000000000000000000000000000000000000..ae93cb8deb27df8e40325642e027315f6fa681fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS @@ -0,0 +1 @@ +CONFIG_RC_DECODERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES new file mode 100644 index 0000000000000000000000000000000000000000..fb9a9e567616dc87da9ae90e3bab32cc99646a5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES @@ -0,0 +1 @@ +CONFIG_RC_DEVICES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK new file mode 100644 index 0000000000000000000000000000000000000000..a0ee48052143a695268475d41075c6825bd7bcd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK @@ -0,0 +1 @@ +# CONFIG_RC_LOOPBACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP new file mode 100644 index 0000000000000000000000000000000000000000..910c883d4fffae145e2472fad32589b3917c57ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP @@ -0,0 +1 @@ +CONFIG_RC_MAP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD new file mode 100644 index 0000000000000000000000000000000000000000..2f36a11126dbf06464a7e3952f067771ed933a6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD @@ -0,0 +1 @@ +# CONFIG_RC_XBOX_DVD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR new file mode 100644 index 0000000000000000000000000000000000000000..3987d79a103893a5c1da2a5c8d9593ac620d49f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR @@ -0,0 +1 @@ +# CONFIG_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK new file mode 100644 index 0000000000000000000000000000000000000000..34673e12450f37f86da49d5f98f7c6ea8c60cd28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK @@ -0,0 +1 @@ +CONFIG_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 new file mode 100644 index 0000000000000000000000000000000000000000..00383ffd289b061ee36fb0c34eecc62c647849dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 @@ -0,0 +1 @@ +# CONFIG_RFD77402 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO new file mode 100644 index 0000000000000000000000000000000000000000..400303dcf315e5159319c661b935b80fd3c03bee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO @@ -0,0 +1 @@ +# CONFIG_RFKILL_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 new file mode 100644 index 0000000000000000000000000000000000000000..69a728e657c48d3ef0dd0afb6f0330c7bee71138 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 @@ -0,0 +1 @@ +# CONFIG_RICHTEK_RTQ6056 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 new file mode 100644 index 0000000000000000000000000000000000000000..d9f34d253d7d5b24eaedb80231e9f2dd472ad00d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 @@ -0,0 +1 @@ +# CONFIG_RMI4_F34 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI new file mode 100644 index 0000000000000000000000000000000000000000..70a3ee1732f93a20b11440de2bc286b92f6d545d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI @@ -0,0 +1 @@ +# CONFIG_RMI4_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 new file mode 100644 index 0000000000000000000000000000000000000000..e3ef46a987c15ce7542f9d1361ba0c3d9b219921 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 @@ -0,0 +1 @@ +# CONFIG_ROHM_BU27008 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 new file mode 100644 index 0000000000000000000000000000000000000000..8596aaa4a8b77b64fd8014f52eeb21b67996e077 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 @@ -0,0 +1 @@ +# CONFIG_ROHM_BU27034 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 new file mode 100644 index 0000000000000000000000000000000000000000..31f257edd382fabe93cf35e0a389ac74f8b5fe25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 @@ -0,0 +1 @@ +# CONFIG_RPR0521 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI new file mode 100644 index 0000000000000000000000000000000000000000..1e1b2bf28509844a617963b3a17a43034561c724 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI @@ -0,0 +1 @@ +# CONFIG_RT2400PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI new file mode 100644 index 0000000000000000000000000000000000000000..ac28e43e46d1db57cde44152f942d3e22da8f488 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI @@ -0,0 +1 @@ +# CONFIG_RT2500PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB new file mode 100644 index 0000000000000000000000000000000000000000..f2fd3effd6b71360cb955756814fbdb7c2f6061b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB @@ -0,0 +1 @@ +# CONFIG_RT2500USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI new file mode 100644 index 0000000000000000000000000000000000000000..6a092667a1bb7dab166f58f2f9c4b8ff6daa9730 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI @@ -0,0 +1 @@ +CONFIG_RT2800PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 new file mode 100644 index 0000000000000000000000000000000000000000..b407d7ee54e376bf60184f44f2490e2cf87dc7f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT3290=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX new file mode 100644 index 0000000000000000000000000000000000000000..fc48a116e1e76a986e5f7edc728f10856ed8f546 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT33XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX new file mode 100644 index 0000000000000000000000000000000000000000..f9d3762c396c3897ec61a5f1b3ddd3043cc3d640 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT35XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX new file mode 100644 index 0000000000000000000000000000000000000000..6527124788c875b295d1606f314660de17c91b11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT53XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB new file mode 100644 index 0000000000000000000000000000000000000000..d6eaffd7b958400195a12a15e09a971eba732b2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB @@ -0,0 +1 @@ +CONFIG_RT2800USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX new file mode 100644 index 0000000000000000000000000000000000000000..f86c4e42f75706e3763d94f2d0ae616aba0fb5b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT33XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 new file mode 100644 index 0000000000000000000000000000000000000000..df9ae1acc02dfbabb4b32464345f6bf4724d4370 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT3573=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX new file mode 100644 index 0000000000000000000000000000000000000000..52c191500246f78708c3b60959043b8994c15b0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT35XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX new file mode 100644 index 0000000000000000000000000000000000000000..b896dc2ee99d7b270b66c42fa10cbcb2edcc4e8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT53XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX new file mode 100644 index 0000000000000000000000000000000000000000..f585f4f02f87de4b5740fa85c6eeb7a6d9aa4df9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT55XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN new file mode 100644 index 0000000000000000000000000000000000000000..95d862008c9611a44eb180d8996b734195b73ea9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN @@ -0,0 +1 @@ +CONFIG_RT2800USB_UNKNOWN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB new file mode 100644 index 0000000000000000000000000000000000000000..7cfb7cfaf18e1b3bb82df2adc9e6af4626af51d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB @@ -0,0 +1 @@ +CONFIG_RT2800_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..f2b1f406312a2d6e3b86449b62257d07e5dfe476 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO @@ -0,0 +1 @@ +CONFIG_RT2800_LIB_MMIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 new file mode 100644 index 0000000000000000000000000000000000000000..b9390bdc8207e4bd550ad76009b5be2d4c1495a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 @@ -0,0 +1 @@ +CONFIG_RT2X00=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..65f957c23762fa13790e1bec7dbf708915ea6049 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG @@ -0,0 +1 @@ +# CONFIG_RT2X00_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB new file mode 100644 index 0000000000000000000000000000000000000000..254fa39478f1237f70b4dfc9a540aa48a745d7a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO new file mode 100644 index 0000000000000000000000000000000000000000..cb52824844dbcfc2e570639268f8b06e79b405fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..bec40f8c853838f084d41f7e61fd0df2649e096a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE new file mode 100644 index 0000000000000000000000000000000000000000..9905d3c4bfde627aff7403ee8c1c5f20d538618b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_FIRMWARE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..25f61afcc5ac1b3a77d6210abdbfafdc4d4f734b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO new file mode 100644 index 0000000000000000000000000000000000000000..cde752d572bd7e50e06058926215c1007a9e6a1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_MMIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI new file mode 100644 index 0000000000000000000000000000000000000000..a11f3c79264e5b5ded6be8c1a993a92551d8ba5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB new file mode 100644 index 0000000000000000000000000000000000000000..0a6599a5c40844d4d14e6a60b874a1c97e5c81ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI new file mode 100644 index 0000000000000000000000000000000000000000..903ef5f2bcf60cbefbc9beb04b5beecb4012a372 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI @@ -0,0 +1 @@ +# CONFIG_RT61PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB new file mode 100644 index 0000000000000000000000000000000000000000..bc5c783f78523fc575268e5ab86e1d87f34a09cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB @@ -0,0 +1 @@ +# CONFIG_RT73USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 new file mode 100644 index 0000000000000000000000000000000000000000..e7552fd8f3731562cb46c0890031fb29e107f355 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABB5ZES3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X new file mode 100644 index 0000000000000000000000000000000000000000..5a4765f016c3fcb57c7a10eac96015b1fac7c9cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABX80X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 new file mode 100644 index 0000000000000000000000000000000000000000..5d85197dc971606ffaa34c63893842ea1df10227 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1305 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 new file mode 100644 index 0000000000000000000000000000000000000000..ace147d2e97feef254e811c51d8f711ce99ac34a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1343 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 new file mode 100644 index 0000000000000000000000000000000000000000..b0b49f400be9ddac3121842146268b4aa198a798 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1347 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT new file mode 100644 index 0000000000000000000000000000000000000000..a908c2f97590c0618a620c98d250e948f81e97d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1374_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 new file mode 100644 index 0000000000000000000000000000000000000000..710b4edafcedb02e708085b39030057592259258 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1390 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY new file mode 100644 index 0000000000000000000000000000000000000000..8389612ed98a3c40dbdc570132a653019a8deb9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1685_FAMILY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME new file mode 100644 index 0000000000000000000000000000000000000000..0c15783d95fa7d39c435bd4c46f66069bd213ab9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 new file mode 100644 index 0000000000000000000000000000000000000000..1339d6d907ebd36c801618db538b02d51732ea45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M41T93 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 new file mode 100644 index 0000000000000000000000000000000000000000..d39df1187ae8ea4e313f81296d468addea301174 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M41T94 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 new file mode 100644 index 0000000000000000000000000000000000000000..08bfbe6afff9667342c381fe492410c0e677bca0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MAX6902 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 new file mode 100644 index 0000000000000000000000000000000000000000..ccef499632d7a0e93a6d89dfe4f1d0b9711d0026 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MCP795 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 new file mode 100644 index 0000000000000000000000000000000000000000..ae2d421c1b081a8f5cefd2c61eac221f41a39494 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF2123 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 new file mode 100644 index 0000000000000000000000000000000000000000..8f90e9341ad047525cc987f4404293b4274ee99e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF2127 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 new file mode 100644 index 0000000000000000000000000000000000000000..b64437deff4894fca9465d200d11b739494dbfdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF85063 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 new file mode 100644 index 0000000000000000000000000000000000000000..ff1d81f983c97553340df28dd68067ec52456707 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_R9701 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 new file mode 100644 index 0000000000000000000000000000000000000000..b24f6e702c742e24142221fbcbfff9fc5c644408 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RS5C348 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 new file mode 100644 index 0000000000000000000000000000000000000000..6131953dc63a06719ba05df37d727797d94a7bfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX4581 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 new file mode 100644 index 0000000000000000000000000000000000000000..8cf6aa26c288bdaa6439137999415b96b1ce9b02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX8010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB new file mode 100644 index 0000000000000000000000000000000000000000..0e5b5fd2624536ff5664d92d9f69733d662dae26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB @@ -0,0 +1 @@ +CONFIG_RTC_MC146818_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 new file mode 100644 index 0000000000000000000000000000000000000000..4fc37a64cb2a592a0e1af0ae88e647cf1241003f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 @@ -0,0 +1 @@ +# CONFIG_RTL8180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 new file mode 100644 index 0000000000000000000000000000000000000000..fc72ad4a47ee72137980682f60f2a5598fda6e36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 @@ -0,0 +1 @@ +# CONFIG_RTL8187 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE new file mode 100644 index 0000000000000000000000000000000000000000..d5e7eaf4342a54fb611057b8a974b3898544ad5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE @@ -0,0 +1 @@ +CONFIG_RTL8188EE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE new file mode 100644 index 0000000000000000000000000000000000000000..d9088f1a4b530a6e903c9016bf130dfcbebf18d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE @@ -0,0 +1 @@ +CONFIG_RTL8192CE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU new file mode 100644 index 0000000000000000000000000000000000000000..eaf668f4ad57832912470451bfbc7e9a2a3f7f52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU @@ -0,0 +1 @@ +CONFIG_RTL8192CU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..4642bcc37a0e3d0e9a8e8d39ca37d192e682f22a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON @@ -0,0 +1 @@ +CONFIG_RTL8192C_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE new file mode 100644 index 0000000000000000000000000000000000000000..c8c6d22412e2eeb96c7fcd03a9105821f6e51cf7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE @@ -0,0 +1 @@ +CONFIG_RTL8192DE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE new file mode 100644 index 0000000000000000000000000000000000000000..2a52d41d83dd146a5f9724a6dcdb2a887d696525 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE @@ -0,0 +1 @@ +CONFIG_RTL8192EE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE new file mode 100644 index 0000000000000000000000000000000000000000..afaa055bb69931357483abde69b2e05969ddb62e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE @@ -0,0 +1 @@ +CONFIG_RTL8192SE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE new file mode 100644 index 0000000000000000000000000000000000000000..2c83e20a28639747a04d67a991294887aeccdf10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE @@ -0,0 +1 @@ +CONFIG_RTL8723AE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE new file mode 100644 index 0000000000000000000000000000000000000000..cf446b8163466c82ab46ffc1146174dbefd71ddc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE @@ -0,0 +1 @@ +CONFIG_RTL8723BE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..a3c839013adf4bdc0575bfb23dd109eb1a584f09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON @@ -0,0 +1 @@ +CONFIG_RTL8723_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE new file mode 100644 index 0000000000000000000000000000000000000000..3498bf5db32dffed42446c1609f623fccaf79a7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE @@ -0,0 +1 @@ +CONFIG_RTL8821AE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU new file mode 100644 index 0000000000000000000000000000000000000000..13b2ed870b58f896d96d297cddd009525d861653 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU @@ -0,0 +1 @@ +CONFIG_RTL8XXXU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED new file mode 100644 index 0000000000000000000000000000000000000000..44fccdd4f30dff694d1320615d2ee64ed0886d14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED @@ -0,0 +1 @@ +# CONFIG_RTL8XXXU_UNTESTED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST new file mode 100644 index 0000000000000000000000000000000000000000..065d8886cd56b6cb92b9c6dedc6ababc29e3d59d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST @@ -0,0 +1 @@ +CONFIG_RTLBTCOEXIST=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI new file mode 100644 index 0000000000000000000000000000000000000000..61ed2c5ea8596b3e0352486f00dac739e1d8be1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI @@ -0,0 +1 @@ +CONFIG_RTLWIFI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..e61d486f935ebc0f55dccfc2075765ed5c988141 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTLWIFI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI new file mode 100644 index 0000000000000000000000000000000000000000..620478ab7281e00d8c7a9225d9aefba9dc0fac14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI @@ -0,0 +1 @@ +CONFIG_RTLWIFI_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB new file mode 100644 index 0000000000000000000000000000000000000000..b41beeb8ae33eb786a41f8eace42d66112b89ddc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB @@ -0,0 +1 @@ +CONFIG_RTLWIFI_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS new file mode 100644 index 0000000000000000000000000000000000000000..61b7c276b95d4e55ebbfc07d9a5e7175853303a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS @@ -0,0 +1 @@ +CONFIG_RTL_CARDS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 new file mode 100644 index 0000000000000000000000000000000000000000..df8a6f6037b32cae2d835c556de58ea60b59fc60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 @@ -0,0 +1 @@ +CONFIG_RTW88=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE new file mode 100644 index 0000000000000000000000000000000000000000..78ccc3dc8d890a85e647676a23fd16dd11633049 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS new file mode 100644 index 0000000000000000000000000000000000000000..1a5233a579374870b0f5ef24e010b7fe28270064 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU new file mode 100644 index 0000000000000000000000000000000000000000..a23da9457d5493040b3f32f268b32824470e19f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE new file mode 100644 index 0000000000000000000000000000000000000000..1f652b03a1e187095f3b7effd5ad115410ae48b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS new file mode 100644 index 0000000000000000000000000000000000000000..1d6799b61dda7b0be3f2487765c034bb72f441cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU new file mode 100644 index 0000000000000000000000000000000000000000..26c987469ce9f42a631935bb5dfebded2b1c66f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B new file mode 100644 index 0000000000000000000000000000000000000000..21a486b59f5aa12a5736d401b968a4ca6602a4a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B @@ -0,0 +1 @@ +CONFIG_RTW88_8822B=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE new file mode 100644 index 0000000000000000000000000000000000000000..d7170a75de5d8589f253d9e203c2086352b93a19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE @@ -0,0 +1 @@ +CONFIG_RTW88_8822BE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS new file mode 100644 index 0000000000000000000000000000000000000000..50070985683ef7cb876a108c3182bdb3d4a2c000 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS @@ -0,0 +1 @@ +# CONFIG_RTW88_8822BS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU new file mode 100644 index 0000000000000000000000000000000000000000..e975fb1c76efb61d3d9c0a8cd0e4fbdf754c5572 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU @@ -0,0 +1 @@ +# CONFIG_RTW88_8822BU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C new file mode 100644 index 0000000000000000000000000000000000000000..8cea7041df6d58e7e9f71a9eeaeaba91ebeffde2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C @@ -0,0 +1 @@ +CONFIG_RTW88_8822C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE new file mode 100644 index 0000000000000000000000000000000000000000..2cc838e52b08db19f3c7f5d1e281571eae09df59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE @@ -0,0 +1 @@ +CONFIG_RTW88_8822CE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS new file mode 100644 index 0000000000000000000000000000000000000000..be9f5c890868366c6b21bd87f999e8cdccf24743 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS @@ -0,0 +1 @@ +# CONFIG_RTW88_8822CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU new file mode 100644 index 0000000000000000000000000000000000000000..90982727f8558809117569048af2b2212d48f5cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU @@ -0,0 +1 @@ +# CONFIG_RTW88_8822CU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE new file mode 100644 index 0000000000000000000000000000000000000000..df829f192633c30040ce8d9ee6783d7a48c2b9c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE @@ -0,0 +1 @@ +CONFIG_RTW88_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..da89c1e0a6dd0eefe9a410cbd78b4b796d0b71ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTW88_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS new file mode 100644 index 0000000000000000000000000000000000000000..d810b056cdb60f4d12e95019b7e72b84975feacc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_RTW88_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI new file mode 100644 index 0000000000000000000000000000000000000000..bf6c93d5378c9ea608af2ee6fec52e0f798b4a78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI @@ -0,0 +1 @@ +CONFIG_RTW88_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 new file mode 100644 index 0000000000000000000000000000000000000000..9f3b4271492b82a62750726e628dc0eaa7c98572 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 @@ -0,0 +1 @@ +# CONFIG_RTW89 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..aaa10768947139a425b681ade2cb3d2d1007d1ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP @@ -0,0 +1 @@ +CONFIG_SAMSUNG_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 new file mode 100644 index 0000000000000000000000000000000000000000..01423b93dba798138c70c2d920be0119ebe3c400 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 @@ -0,0 +1 @@ +CONFIG_SAMSUNG_Q10=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN new file mode 100644 index 0000000000000000000000000000000000000000..08c1f428db3628cc486f958fc31813c17bece507 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_SATA_ZHAOXIN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..2f9695c650f3ae409e920d18c23ed875cb152754 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_SBC_EPX_C3_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..7f3cf373323f75cc3e68899f333ab4e84d2f1394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SBC_FITPC2_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET new file mode 100644 index 0000000000000000000000000000000000000000..bda77b56017b126cf35d5635119a46801a8619d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET @@ -0,0 +1 @@ +# CONFIG_SBP_TARGET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT new file mode 100644 index 0000000000000000000000000000000000000000..86f8c743e12f573f96135e727eafc8e10457017c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT @@ -0,0 +1 @@ +# CONFIG_SC1200_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 new file mode 100644 index 0000000000000000000000000000000000000000..48f4cf26be6fdd8cb516559c9a739db60ebffc0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 @@ -0,0 +1 @@ +# CONFIG_SCA3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 new file mode 100644 index 0000000000000000000000000000000000000000..3d71a75abbad6e944ee9a25b7a51eabaf7f567d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 @@ -0,0 +1 @@ +# CONFIG_SCA3300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE new file mode 100644 index 0000000000000000000000000000000000000000..6d3d6cbcb62ab7376ff107bfe39b8064eabbc65f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE @@ -0,0 +1 @@ +# CONFIG_SCD30_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X new file mode 100644 index 0000000000000000000000000000000000000000..7e88cabcb490f6bb02a2d56e375f6ec203a9fbf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X @@ -0,0 +1 @@ +# CONFIG_SCD4X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID new file mode 100644 index 0000000000000000000000000000000000000000..c164284d69d8241601512828fbf07c4eaba68f4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID @@ -0,0 +1 @@ +CONFIG_SCSI_AACRAID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE new file mode 100644 index 0000000000000000000000000000000000000000..b74e58b834044ef4b22711d77b580ec011f655e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE @@ -0,0 +1 @@ +CONFIG_SCSI_BNX2X_FCOE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI new file mode 100644 index 0000000000000000000000000000000000000000..7c597f4bc7982171b59b7ed8657e1641303a790c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_BNX2_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM new file mode 100644 index 0000000000000000000000000000000000000000..46434497d4c610655401cad583a2ccab954baded --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM @@ -0,0 +1 @@ +# CONFIG_SCSI_IMM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR new file mode 100644 index 0000000000000000000000000000000000000000..8c7626c6ee37521bbe31fbcfd3d16e2af7b8ef9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR @@ -0,0 +1 @@ +# CONFIG_SCSI_IPR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI new file mode 100644 index 0000000000000000000000000000000000000000..9b9e4f77a9375901012c03c38b81917c1a5e3a67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI @@ -0,0 +1 @@ +CONFIG_SCSI_ISCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA new file mode 100644 index 0000000000000000000000000000000000000000..aa0ba6c1238bf9a83638a5bf458eb320c880837c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA @@ -0,0 +1 @@ +# CONFIG_SCSI_PPA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI new file mode 100644 index 0000000000000000000000000000000000000000..99d4f262cf70efdd1a0b573fb16e8b6866abfc86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..16e9439f5f2f1a40b3ba572bfd104766e73434be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG new file mode 100644 index 0000000000000000000000000000000000000000..2b0c98548c0817bd3bf388afc68ddbcd1b6c66a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG @@ -0,0 +1 @@ +# CONFIG_SCSI_UFS_BSG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..ba9edd4cd6daa38c8f217f739b413d83b1ca8abf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON @@ -0,0 +1 @@ +# CONFIG_SCSI_UFS_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY new file mode 100644 index 0000000000000000000000000000000000000000..1144ca928e6ad9df304740746d35b4c929f72333 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY @@ -0,0 +1 @@ +# CONFIG_SDMA_VERBOSITY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR new file mode 100644 index 0000000000000000000000000000000000000000..ec43a0ff1d017de1b84dee2c34194b3eedcbe545 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR @@ -0,0 +1 @@ +# CONFIG_SD_ADC_MODULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..b45fc3d318d658ee5bbe18d9c8195aca1baedaab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM @@ -0,0 +1 @@ +# CONFIG_SEL3350_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 new file mode 100644 index 0000000000000000000000000000000000000000..2f3809636c9ecc7be7c87ad4952f2f6dd8c869e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 @@ -0,0 +1 @@ +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 new file mode 100644 index 0000000000000000000000000000000000000000..9d25e3dbba7a4ac02fa660369653519e9adef381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 @@ -0,0 +1 @@ +# CONFIG_SENSIRION_SGP30 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 new file mode 100644 index 0000000000000000000000000000000000000000..4cfb570367756a802941d67a045eb2325a6e6219 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 @@ -0,0 +1 @@ +# CONFIG_SENSIRION_SGP40 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU new file mode 100644 index 0000000000000000000000000000000000000000..adab26252588a8042a6a9128ec14267deb587148 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU @@ -0,0 +1 @@ +CONFIG_SENSORS_ABITUGURU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 new file mode 100644 index 0000000000000000000000000000000000000000..22666cbee7f30a5454950bde38eef61a3a9bf473 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 @@ -0,0 +1 @@ +CONFIG_SENSORS_ABITUGURU3=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER new file mode 100644 index 0000000000000000000000000000000000000000..dbb45c01bf2c5e130cd8f9403c8c12e97e8ba31a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_ACPI_POWER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 new file mode 100644 index 0000000000000000000000000000000000000000..6e7f02e40048c4519407f3831808d8a27a161b9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7314 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 new file mode 100644 index 0000000000000000000000000000000000000000..10f22ae168ddf499138b6a2cef037695fac4b285 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7414=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 new file mode 100644 index 0000000000000000000000000000000000000000..3df9e9e120cc5f72ad14c178aa032d351c5798d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7418=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 new file mode 100644 index 0000000000000000000000000000000000000000..d82687c2f212926802f7dd7b9156f461007463a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADC128D818 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX new file mode 100644 index 0000000000000000000000000000000000000000..581f6ccb69e9cccb05901398d1b874e93c7aff64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADCXX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 new file mode 100644 index 0000000000000000000000000000000000000000..a5617d87e3e71b381f7b9c9beac59fe9331b4a56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1025=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 new file mode 100644 index 0000000000000000000000000000000000000000..d6cb0376df27f7fc135d0a9f821a38fec5874915 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1026=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 new file mode 100644 index 0000000000000000000000000000000000000000..0d3c99845fa6e6041584f0e7959c29f3047a9a04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1029=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 new file mode 100644 index 0000000000000000000000000000000000000000..6109dab5efc87c483db1ad10872395f25772d0e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1031=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 new file mode 100644 index 0000000000000000000000000000000000000000..76144de6f986a8db6abea50ec6edef2ace66d0da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1275=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 new file mode 100644 index 0000000000000000000000000000000000000000..f79944a972b19d834c828c73c1b0619011212df8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM9240=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 new file mode 100644 index 0000000000000000000000000000000000000000..494099a32793d37dd695fd17c5a2298517582c65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADS7828=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 new file mode 100644 index 0000000000000000000000000000000000000000..f5de6f551fcf0b29aa111561bbd325842bfdd7d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADS7871 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 new file mode 100644 index 0000000000000000000000000000000000000000..7255e3abd8dd20035271d2295efcff03db86a218 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7410=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 new file mode 100644 index 0000000000000000000000000000000000000000..a4c670ee7f81bde35c6574a90d19b19148f2d2ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7411=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 new file mode 100644 index 0000000000000000000000000000000000000000..0eaa2e22ff45c7daf8092c74f63216bd35190e6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7462=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 new file mode 100644 index 0000000000000000000000000000000000000000..7fbf07427b1bdf955c22250d78368c10a29fc7eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7470=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 new file mode 100644 index 0000000000000000000000000000000000000000..38e288230f70059f996a5b675771cb2e188960c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7475=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 new file mode 100644 index 0000000000000000000000000000000000000000..74edafb847738a833b4d6af160aae67fe3a9ea7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7X10=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 new file mode 100644 index 0000000000000000000000000000000000000000..c0389d18989c3dbbb83ed48cdd406e106d2468cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 @@ -0,0 +1 @@ +CONFIG_SENSORS_AMC6821=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X new file mode 100644 index 0000000000000000000000000000000000000000..1521713d81cfb413116cab65fc94033c529b2c20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X @@ -0,0 +1 @@ +CONFIG_SENSORS_APDS990X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC new file mode 100644 index 0000000000000000000000000000000000000000..48519f04f841bb62a3c7541b1f100f583a3d721a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC @@ -0,0 +1 @@ +CONFIG_SENSORS_APPLESMC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 new file mode 100644 index 0000000000000000000000000000000000000000..e568b0eb67a94d1f8ba38f55468f03de60f1feb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 @@ -0,0 +1 @@ +CONFIG_SENSORS_ASB100=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 new file mode 100644 index 0000000000000000000000000000000000000000..5ba8f0815922fc06056550f379c96b87abe755df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 @@ -0,0 +1 @@ +CONFIG_SENSORS_ASC7621=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC new file mode 100644 index 0000000000000000000000000000000000000000..9c16493b6109d42d14f950f634aa16803c980f41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASUS_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI new file mode 100644 index 0000000000000000000000000000000000000000..9578d79edf13908d2f5919cbc20dc25345d0a204 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASUS_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 new file mode 100644 index 0000000000000000000000000000000000000000..a323de026e962904ba67a9184e5c76c27a065880 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 @@ -0,0 +1 @@ +CONFIG_SENSORS_ATK0110=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 new file mode 100644 index 0000000000000000000000000000000000000000..78edcd84dfd28284b6edbe4ab46d75238ad3d34d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 @@ -0,0 +1 @@ +CONFIG_SENSORS_ATXP1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 new file mode 100644 index 0000000000000000000000000000000000000000..78371b1b5ad2c375549da5e00cde161f76239b35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 @@ -0,0 +1 @@ +CONFIG_SENSORS_BH1770=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP new file mode 100644 index 0000000000000000000000000000000000000000..8fb4868a2372113a519d991adcacce4525b78f85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_CORETEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM new file mode 100644 index 0000000000000000000000000000000000000000..d9fa9f5a2fae1f18c2c670d85c0292f2a86bdfda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM @@ -0,0 +1 @@ +CONFIG_SENSORS_DELL_SMM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 new file mode 100644 index 0000000000000000000000000000000000000000..981a96d2f65047ecb8a4d0041b7c7886f2ab7484 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 @@ -0,0 +1 @@ +CONFIG_SENSORS_DME1737=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 new file mode 100644 index 0000000000000000000000000000000000000000..79f96d6ea8d0521e17526f9acfab51c746f09ac3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 @@ -0,0 +1 @@ +CONFIG_SENSORS_DS1621=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 new file mode 100644 index 0000000000000000000000000000000000000000..4a06b361ae98df65f42d4a898ccb322c8451e8d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 @@ -0,0 +1 @@ +CONFIG_SENSORS_DS620=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 new file mode 100644 index 0000000000000000000000000000000000000000..efe0b0e41cee109356a4133fee3374f9f552a757 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 @@ -0,0 +1 @@ +CONFIG_SENSORS_EMC1403=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 new file mode 100644 index 0000000000000000000000000000000000000000..4e7513c5979318b7de32746866534dc7c07e9e4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 @@ -0,0 +1 @@ +CONFIG_SENSORS_EMC6W201=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F new file mode 100644 index 0000000000000000000000000000000000000000..57db5db3cd8ed3f605734702f22bfb181f523a8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F @@ -0,0 +1 @@ +CONFIG_SENSORS_F71805F=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG new file mode 100644 index 0000000000000000000000000000000000000000..67c502c7155ea11420cac99157367397bfdbab9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG @@ -0,0 +1 @@ +CONFIG_SENSORS_F71882FG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S new file mode 100644 index 0000000000000000000000000000000000000000..04d774dca67155bde76ece91330d4c5ea41a4a66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S @@ -0,0 +1 @@ +CONFIG_SENSORS_F75375S=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER new file mode 100644 index 0000000000000000000000000000000000000000..9632b4c8df9cc7e3f2d4af492fec60f7dc18a1a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_FAM15H_POWER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD new file mode 100644 index 0000000000000000000000000000000000000000..0407bdcd142c28984ace14f8170cc2654a4f7a0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD @@ -0,0 +1 @@ +CONFIG_SENSORS_FSCHMD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A new file mode 100644 index 0000000000000000000000000000000000000000..17b630baad83229e26b5adb73c89ca8aaf0058cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A @@ -0,0 +1 @@ +CONFIG_SENSORS_G760A=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 new file mode 100644 index 0000000000000000000000000000000000000000..4eea34146db8ade882a9013e19af5bd4f87974b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 @@ -0,0 +1 @@ +# CONFIG_SENSORS_G762 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM new file mode 100644 index 0000000000000000000000000000000000000000..3942992572d48e1df936e9f89fface2d7e1237d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM @@ -0,0 +1 @@ +CONFIG_SENSORS_GL518SM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM new file mode 100644 index 0000000000000000000000000000000000000000..d03316fec69968842c7b4d60b43fbd3501d04eba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM @@ -0,0 +1 @@ +CONFIG_SENSORS_GL520SM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS new file mode 100644 index 0000000000000000000000000000000000000000..d2dba2ab1be6fa3cd131f203e31cd994001bfd50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS @@ -0,0 +1 @@ +CONFIG_SENSORS_HDAPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C new file mode 100644 index 0000000000000000000000000000000000000000..8ecf97fd1e4fb39f2f121e7e38390f253287fbbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_HMC5843_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI new file mode 100644 index 0000000000000000000000000000000000000000..51f52f1bf8e0ff9bc9c1fb17f65eb3e9d7cfdfca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_HMC5843_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI new file mode 100644 index 0000000000000000000000000000000000000000..ee7ab1228c5b8ec63b534f0f73cd0511871c88b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_HP_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 new file mode 100644 index 0000000000000000000000000000000000000000..d956e09b5f919df36868e39491945ff058a133bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 @@ -0,0 +1 @@ +CONFIG_SENSORS_I5500=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB new file mode 100644 index 0000000000000000000000000000000000000000..ad6832efb1d369525277cafa33217838384907fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB @@ -0,0 +1 @@ +CONFIG_SENSORS_I5K_AMB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM new file mode 100644 index 0000000000000000000000000000000000000000..b291c66370c5e1d2e686ea5416a55bd9a92ad48e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM @@ -0,0 +1 @@ +CONFIG_SENSORS_IBMAEM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX new file mode 100644 index 0000000000000000000000000000000000000000..a82ec4e6d9c2322e15cc4a34564b01099827a23c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX @@ -0,0 +1 @@ +CONFIG_SENSORS_IBMPEX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON new file mode 100644 index 0000000000000000000000000000000000000000..d7dbcc83cefffd5677fbd0cec86c7a6e47cbbdb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON @@ -0,0 +1 @@ +# CONFIG_SENSORS_IIO_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 new file mode 100644 index 0000000000000000000000000000000000000000..4b0197e0a423fc3e105d2fb343875e769a954f76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 @@ -0,0 +1 @@ +CONFIG_SENSORS_INA209=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX new file mode 100644 index 0000000000000000000000000000000000000000..4c6a7ebe7b26b668e850caf0cc3505bb92a6b2be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX @@ -0,0 +1 @@ +CONFIG_SENSORS_INA2XX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 new file mode 100644 index 0000000000000000000000000000000000000000..131aebf71240b5acd86601b46dfd739356e74b9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL29018 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 new file mode 100644 index 0000000000000000000000000000000000000000..53f7bb3ff531a047098ad7735560baec74609093 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL29028 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 new file mode 100644 index 0000000000000000000000000000000000000000..f6a8baf0d7d92d29d3e6d6e042ab245d2aaf55e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 @@ -0,0 +1 @@ +CONFIG_SENSORS_IT87=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 new file mode 100644 index 0000000000000000000000000000000000000000..8a07c3eef08abbe5b69e79c052ecb76ebd4b737c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 @@ -0,0 +1 @@ +CONFIG_SENSORS_JC42=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP new file mode 100644 index 0000000000000000000000000000000000000000..eec8b328a6ff371c79441245e0f07e024fe08d5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_K10TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP new file mode 100644 index 0000000000000000000000000000000000000000..f690795aadb9d464b9026a24ae40e0e7de120984 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_K8TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE new file mode 100644 index 0000000000000000000000000000000000000000..43bc1aa07638cfd44e74a627d4d41e2ab472e3e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE @@ -0,0 +1 @@ +CONFIG_SENSORS_LINEAGE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D new file mode 100644 index 0000000000000000000000000000000000000000..980124650a42d40966adfe0b2c376c367797f9c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D @@ -0,0 +1 @@ +CONFIG_SENSORS_LIS3LV02D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C new file mode 100644 index 0000000000000000000000000000000000000000..3f3a59ae65b385229a10ff2ff889f08166fb58b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C @@ -0,0 +1 @@ +CONFIG_SENSORS_LIS3_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 new file mode 100644 index 0000000000000000000000000000000000000000..5c202b02f167e9350ad7eaba0dc83149b5e2bb10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM25066=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 new file mode 100644 index 0000000000000000000000000000000000000000..7d893171b59f2301edfa427b032d07fd08e0236a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM63=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 new file mode 100644 index 0000000000000000000000000000000000000000..c927bf635971a07618d7c584186d51dfc5179e0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM70 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 new file mode 100644 index 0000000000000000000000000000000000000000..b78a7f18d844d262175b19e7bc97eff5b0572cd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM73=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 new file mode 100644 index 0000000000000000000000000000000000000000..205675fc619264a69487311dc9c1b4ea645f7369 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM75=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 new file mode 100644 index 0000000000000000000000000000000000000000..3fd7f3f3b6b59c412cac9d9555c76720a25d59b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM77=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 new file mode 100644 index 0000000000000000000000000000000000000000..6dc289cbcfc1369d432855b78c561f8bc43f4649 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM78=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 new file mode 100644 index 0000000000000000000000000000000000000000..a7cfa2123d590046563b79082068ea676776cb42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM80=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 new file mode 100644 index 0000000000000000000000000000000000000000..2e15080a2a010fc0dc9bc4e1015794fd67cd604e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM83=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 new file mode 100644 index 0000000000000000000000000000000000000000..1ce412b2953bab15300b8dfad7534f387a64af69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM85=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 new file mode 100644 index 0000000000000000000000000000000000000000..8930963579e0b5d6f53863c4672855fb64c77cb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM87=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 new file mode 100644 index 0000000000000000000000000000000000000000..2633ff93446646e8d8e68e05277b23daed30f01c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM90=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 new file mode 100644 index 0000000000000000000000000000000000000000..58b30a4e375a0bf527d542bf96fb3fca5746d7b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM92=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 new file mode 100644 index 0000000000000000000000000000000000000000..46514707a4028b5a1ad536c4cd09392c6807fdd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM93=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 new file mode 100644 index 0000000000000000000000000000000000000000..f8a7d302e0389839d2cadf58c3e5b919d075680c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95234=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 new file mode 100644 index 0000000000000000000000000000000000000000..10e50c1c2e94917d0c3e5ddd584c589fbd6600d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95241=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 new file mode 100644 index 0000000000000000000000000000000000000000..c66b177055471517cc19bcf6de22715bdb821d34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95245=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 new file mode 100644 index 0000000000000000000000000000000000000000..79d68fd35109df705ee0898a1db52b3d1ca7eab1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2945 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 new file mode 100644 index 0000000000000000000000000000000000000000..5b47e4c4440e940da97f015aac359ac3e8a43168 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC2978=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 new file mode 100644 index 0000000000000000000000000000000000000000..5ea0210058bbbc0561258eb4589ca679e3e873a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC3815 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 new file mode 100644 index 0000000000000000000000000000000000000000..9a8d133d299bc1362e3f72cd3e90e41b60cc577c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4151=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 new file mode 100644 index 0000000000000000000000000000000000000000..3734c6ff236f855559ca411dd814f230ef5f6c17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4215=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 new file mode 100644 index 0000000000000000000000000000000000000000..82af61e643ea8ef8f585215182efcfe3bbf28686 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 new file mode 100644 index 0000000000000000000000000000000000000000..9ef758048d95bd21ea0db200b7614fbda413edab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4245=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 new file mode 100644 index 0000000000000000000000000000000000000000..a42c153cfa414c86c42c09515625fcd43d0d9c33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4260 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 new file mode 100644 index 0000000000000000000000000000000000000000..a6cd8f7f12b0a66633344a3c7543237d652e7e1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4261=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 new file mode 100644 index 0000000000000000000000000000000000000000..082ff9e60ad37c50e08a3c89bf4b2a57846e7e46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1111 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 new file mode 100644 index 0000000000000000000000000000000000000000..579d1c59d4bfe7354c68658d7281e82c5f880829 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX16064=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 new file mode 100644 index 0000000000000000000000000000000000000000..1054c96225ecd171957750d480a285afabccb3cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX16065=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 new file mode 100644 index 0000000000000000000000000000000000000000..0b3146610bcb98947fa03504e98b95404d7b992a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1619=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 new file mode 100644 index 0000000000000000000000000000000000000000..129708ae55ddde3c97306eef863e65d3d28cac92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1668=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 new file mode 100644 index 0000000000000000000000000000000000000000..10089e38bc0082e461aa92190935552d77fe3c14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX197=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 new file mode 100644 index 0000000000000000000000000000000000000000..5555d692860f0e6103fdadd519bb6ce8a2262f3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX20751 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 new file mode 100644 index 0000000000000000000000000000000000000000..6f8e031631e6fc7c4a3244e1891d3bc035044daf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31790 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 new file mode 100644 index 0000000000000000000000000000000000000000..d6c798cafcf2c7bc4d114e5fad7911db12e4c155 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX34440=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 new file mode 100644 index 0000000000000000000000000000000000000000..a56c9c5223355563f1a457d17ea2bed45ae6a30c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6639=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 new file mode 100644 index 0000000000000000000000000000000000000000..45f92249496241dd07f7459d9abb7e410f791337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6650=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 new file mode 100644 index 0000000000000000000000000000000000000000..346dae3dfd50e529baf8ebd47b54610c786c9639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6697=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 new file mode 100644 index 0000000000000000000000000000000000000000..d983ad1d5cf319229cd5dcd7ccdc5afcbd6fb949 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX8688=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 new file mode 100644 index 0000000000000000000000000000000000000000..508286fe15d1ec74f96823e9b113457beb5e5edc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 @@ -0,0 +1 @@ +CONFIG_SENSORS_MCP3021=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN new file mode 100644 index 0000000000000000000000000000000000000000..5d431cda6a42df4116d3aaaad218653a067d386f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_MLXREG_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 new file mode 100644 index 0000000000000000000000000000000000000000..677df61836002c89c09abf111bf606f92ac31cda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6683 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 new file mode 100644 index 0000000000000000000000000000000000000000..8bde1424c5c4a9f9e14d1d60d059ec816e89fd29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6775=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE new file mode 100644 index 0000000000000000000000000000000000000000..df64ea6de2d40df07bb4e4d57342af68c4a1735b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6775_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 new file mode 100644 index 0000000000000000000000000000000000000000..e699237af139eec4bb3ae24c1ed5d23dce90249a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT7802 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 new file mode 100644 index 0000000000000000000000000000000000000000..aabc731da231cb1ada97112d465ef9f657c69557 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT7904 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR new file mode 100644 index 0000000000000000000000000000000000000000..269cb4eb752250f53084bc32b1b839a545a087c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR @@ -0,0 +1 @@ +CONFIG_SENSORS_NTC_THERMISTOR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP new file mode 100644 index 0000000000000000000000000000000000000000..b490423ca30c2849eeaa45a6a2ee848ee409a896 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP @@ -0,0 +1 @@ +# CONFIG_SENSORS_OXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 new file mode 100644 index 0000000000000000000000000000000000000000..bbd351dbf7f87883bc4d772848fcef5987dbdb50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 @@ -0,0 +1 @@ +CONFIG_SENSORS_PC87360=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 new file mode 100644 index 0000000000000000000000000000000000000000..11369515027797bb5ab60089945281cf54de6340 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 @@ -0,0 +1 @@ +CONFIG_SENSORS_PC87427=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 new file mode 100644 index 0000000000000000000000000000000000000000..685b3b3c21c7fa0c00464180baa0ed08deed7fd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 @@ -0,0 +1 @@ +CONFIG_SENSORS_PCF8591=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS new file mode 100644 index 0000000000000000000000000000000000000000..7b11c9b8f7ecb1f5893beacc204057ea9be694f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS @@ -0,0 +1 @@ +CONFIG_SENSORS_PMBUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 new file mode 100644 index 0000000000000000000000000000000000000000..7f6616feb295c486a33a79ab0604cbd9d04d93b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 @@ -0,0 +1 @@ +# CONFIG_SENSORS_POWR1220 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C new file mode 100644 index 0000000000000000000000000000000000000000..68d33c0e7cb7e4773193fa4a5e89004b5404abb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_RM3100_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI new file mode 100644 index 0000000000000000000000000000000000000000..7c4a1c0621d73e15f7ea6b2a638b2c5b4fa006b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_RM3100_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 new file mode 100644 index 0000000000000000000000000000000000000000..2a3b5924d8a864e1c7728efd8e9b8c60856d0f5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH5627=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 new file mode 100644 index 0000000000000000000000000000000000000000..038ef2e1752435c850e01a68d504578c3f3c2dea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH5636=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON new file mode 100644 index 0000000000000000000000000000000000000000..b2d6ee39ddad09808dd950795fa05add83ab57f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH56XX_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 new file mode 100644 index 0000000000000000000000000000000000000000..997046201a1295e1a1af867fbf690f61b97223de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHT15=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 new file mode 100644 index 0000000000000000000000000000000000000000..dbc5dd9f3dffde6845c987938e347532937a16a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHT21=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 new file mode 100644 index 0000000000000000000000000000000000000000..f91426bc7c019a9d0674e52309779935f33c38f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHTC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 new file mode 100644 index 0000000000000000000000000000000000000000..70c05287c6ca96c37d7256e56ee224ac1f128075 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 @@ -0,0 +1 @@ +CONFIG_SENSORS_SIS5595=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 new file mode 100644 index 0000000000000000000000000000000000000000..eba9f9c8055ce76536c1c463b2af843a1fa3ad87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47B397=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 new file mode 100644 index 0000000000000000000000000000000000000000..5c35e76134eafdd371d203b3d7cfd4d9d9569f89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47M1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 new file mode 100644 index 0000000000000000000000000000000000000000..e239c8299bad9f28231e43ca536f2a95f015dcde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47M192=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 new file mode 100644 index 0000000000000000000000000000000000000000..56843784b051a759b10938701452e7e8316f06d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TC74 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 new file mode 100644 index 0000000000000000000000000000000000000000..f3b849b5c2b747db70cb2ddfd02f7a26023e8502 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 @@ -0,0 +1 @@ +CONFIG_SENSORS_THMC50=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 new file mode 100644 index 0000000000000000000000000000000000000000..eec18af38213412d1ecb960b83dff85fb301ca19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP102=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 new file mode 100644 index 0000000000000000000000000000000000000000..b3a789182d5c49e69c3eccf1e12ea56a06b0792c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP103 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 new file mode 100644 index 0000000000000000000000000000000000000000..de3f7de90c4a21515d2c492a0be4af014d435d52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP401=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 new file mode 100644 index 0000000000000000000000000000000000000000..390515bf3c5f70d48299ab6ec06f622062cac9f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP421=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 new file mode 100644 index 0000000000000000000000000000000000000000..111dc8c2a45ffd0d9b9bcc2779a2436ed6d4b541 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS40422 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 new file mode 100644 index 0000000000000000000000000000000000000000..93ceff8107d896f55530089457d049db158a4cac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 @@ -0,0 +1 @@ +CONFIG_SENSORS_TSL2550=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 new file mode 100644 index 0000000000000000000000000000000000000000..b94d33641dd60cb20c1f9a4fe1f48ad65d15d2b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TSL2563 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 new file mode 100644 index 0000000000000000000000000000000000000000..1bce80fa9db530ab090516e6258001e387df45be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 @@ -0,0 +1 @@ +CONFIG_SENSORS_UCD9000=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 new file mode 100644 index 0000000000000000000000000000000000000000..da028a1dfc7b96d1c2bcf7197d958a0f2206498a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 @@ -0,0 +1 @@ +CONFIG_SENSORS_UCD9200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A new file mode 100644 index 0000000000000000000000000000000000000000..d3b212f2708ba0d9643a974c4ba840e19ee3ecca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A @@ -0,0 +1 @@ +CONFIG_SENSORS_VIA686A=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP new file mode 100644 index 0000000000000000000000000000000000000000..f4ca4454a4fe16c0c11dad8b8e81dff75b223dfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_VIA_CPUTEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 new file mode 100644 index 0000000000000000000000000000000000000000..bfcec4313f5e354067f321c174d22242950aa4bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 @@ -0,0 +1 @@ +CONFIG_SENSORS_VT1211=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 new file mode 100644 index 0000000000000000000000000000000000000000..7e76791749d647f49743351e9d280f54e055c5a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 @@ -0,0 +1 @@ +CONFIG_SENSORS_VT8231=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF new file mode 100644 index 0000000000000000000000000000000000000000..6a06ef0fac32a749c7d6c9a3d28a27803919041e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF @@ -0,0 +1 @@ +CONFIG_SENSORS_W83627EHF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF new file mode 100644 index 0000000000000000000000000000000000000000..8c578db80a0db901fd982f8b7c8f50f666714c16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF @@ -0,0 +1 @@ +CONFIG_SENSORS_W83627HF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D new file mode 100644 index 0000000000000000000000000000000000000000..e0a34bf27f8f2abc4dfa0b4670ba95ee12dc9b28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83781D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D new file mode 100644 index 0000000000000000000000000000000000000000..8bbb2b5b8003eab590707ecc634d728225ea1c8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83791D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D new file mode 100644 index 0000000000000000000000000000000000000000..9dc3fda8bb7fbc0c2bb5d6a69c27ae6a86a8f655 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83792D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 new file mode 100644 index 0000000000000000000000000000000000000000..6c2fefb4566f28ecb760d78c7cfb5922f4015c58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 @@ -0,0 +1 @@ +CONFIG_SENSORS_W83793=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 new file mode 100644 index 0000000000000000000000000000000000000000..e85aa8e5e82871fc87cc16a022909257c3717971 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 @@ -0,0 +1 @@ +CONFIG_SENSORS_W83795=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL new file mode 100644 index 0000000000000000000000000000000000000000..896ede33498c70ee10c352819657881e8a1bcfdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83795_FANCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS new file mode 100644 index 0000000000000000000000000000000000000000..0c220bdf89a3fa240b3c0046fe4b9cd03683660b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS @@ -0,0 +1 @@ +CONFIG_SENSORS_W83L785TS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG new file mode 100644 index 0000000000000000000000000000000000000000..0dfd976647096b9f6269d52f3f2b01f4bc774a89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG @@ -0,0 +1 @@ +CONFIG_SENSORS_W83L786NG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE new file mode 100644 index 0000000000000000000000000000000000000000..ae549128e540c879b332bcfe5d40169303104242 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE @@ -0,0 +1 @@ +# CONFIG_SENSORS_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 new file mode 100644 index 0000000000000000000000000000000000000000..8a28e5080d536534cbe704a50a1a56b776fb947e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 @@ -0,0 +1 @@ +CONFIG_SENSORS_ZL6100=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS new file mode 100644 index 0000000000000000000000000000000000000000..056bf6f166c85eee9bd39a99287406139eafd83d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS new file mode 100644 index 0000000000000000000000000000000000000000..e033c4db19cf972a5b0de7d2e6088ec81afcd751 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_LPSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID new file mode 100644 index 0000000000000000000000000000000000000000..58ee08f11f2edba8f4659d032f659c28102d9ab3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_MID=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X new file mode 100644 index 0000000000000000000000000000000000000000..1f10e7574fb433b0e8e344d99b90d8ae5947436a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_RT288X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC new file mode 100644 index 0000000000000000000000000000000000000000..998cbcfd789ab05a4acd02f2c02f951aa20c1b27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC @@ -0,0 +1 @@ +CONFIG_SERIAL_ARC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS new file mode 100644 index 0000000000000000000000000000000000000000..02dfacf686c29fcc87731d4d26daf96d703e6340 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS @@ -0,0 +1 @@ +CONFIG_SERIAL_ARC_NR_PORTS=1 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM new file mode 100644 index 0000000000000000000000000000000000000000..7f35395cfc2808ea3fdd01c4e644ec4dfa58fab6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM @@ -0,0 +1 @@ +CONFIG_SERIAL_JSM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ new file mode 100644 index 0000000000000000000000000000000000000000..7bd602b2fd916456b3e8bd397e98bd8bec0292ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ @@ -0,0 +1 @@ +# CONFIG_SERIAL_LANTIQ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE new file mode 100644 index 0000000000000000000000000000000000000000..25a0b4bdf115d02bc7dfc7a84e49965204277e8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE @@ -0,0 +1 @@ +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 new file mode 100644 index 0000000000000000000000000000000000000000..6ba82bb7c56744e80364e5a4a504b137c471d31d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 @@ -0,0 +1 @@ +# CONFIG_SERIO_CT82C710 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD new file mode 100644 index 0000000000000000000000000000000000000000..8f681961795d425f14e54280f1a26b4a30134291 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD @@ -0,0 +1 @@ +# CONFIG_SERIO_PARKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC new file mode 100644 index 0000000000000000000000000000000000000000..dd76039a18423a368d5b3b373f4fcb449506b079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC @@ -0,0 +1 @@ +CONFIG_SFC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING new file mode 100644 index 0000000000000000000000000000000000000000..25cb3fd7c858848f6ea413d296cb3beb010fd7de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING @@ -0,0 +1 @@ +CONFIG_SFC_MCDI_LOGGING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON new file mode 100644 index 0000000000000000000000000000000000000000..5bb40eaeafc4ee8fe9ff65299b2319fe3d019e10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON @@ -0,0 +1 @@ +CONFIG_SFC_MCDI_MON=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD new file mode 100644 index 0000000000000000000000000000000000000000..35ab623c5d71c18eaff3117b1ea376d243b2b73a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD @@ -0,0 +1 @@ +CONFIG_SFC_MTD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV new file mode 100644 index 0000000000000000000000000000000000000000..62e41fa84744d8ee558107e4f179279f9a75b818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV @@ -0,0 +1 @@ +CONFIG_SFC_SRIOV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU new file mode 100644 index 0000000000000000000000000000000000000000..5b133b2686990b306c75c92b7827b7fc492242ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU @@ -0,0 +1 @@ +CONFIG_SGI_GRU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..5e65cd5dd8a860100f31b699db0d2cc3f1f0c2f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG @@ -0,0 +1 @@ +# CONFIG_SGI_GRU_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..2c96dcdff373304ab756e0249842e47b70cd16e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION @@ -0,0 +1 @@ +CONFIG_SGI_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP new file mode 100644 index 0000000000000000000000000000000000000000..c27ee782f3afca93d7966c8c939f8331849c3273 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP @@ -0,0 +1 @@ +CONFIG_SGI_XP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 new file mode 100644 index 0000000000000000000000000000000000000000..8336ff08b8b753115084a288afda32ec1858c870 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 @@ -0,0 +1 @@ +# CONFIG_SI1133 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 new file mode 100644 index 0000000000000000000000000000000000000000..744fc2b7fa0c3f11f59f68f0f9f194a91f77a2a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 @@ -0,0 +1 @@ +# CONFIG_SI1145 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 new file mode 100644 index 0000000000000000000000000000000000000000..f04c186e3f0148da0f5de959375e19cfe22db7b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 @@ -0,0 +1 @@ +# CONFIG_SI7005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 new file mode 100644 index 0000000000000000000000000000000000000000..25a811632e71616441e3154d0fc772a66334f974 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 @@ -0,0 +1 @@ +# CONFIG_SI7020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC new file mode 100644 index 0000000000000000000000000000000000000000..91b7eb527e36a230a4d9adc90af49ff8cec4151d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC @@ -0,0 +1 @@ +# CONFIG_SIEMENS_SIMATIC_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT new file mode 100644 index 0000000000000000000000000000000000000000..5448253d665027d94413d9b4d3d69b461f16f790 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT @@ -0,0 +1 @@ +# CONFIG_SMSC37B787_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT new file mode 100644 index 0000000000000000000000000000000000000000..86a9a5ae1fba0be58350a779c5d6e70949c6ad01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT @@ -0,0 +1 @@ +CONFIG_SMSC_SCH311X_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 new file mode 100644 index 0000000000000000000000000000000000000000..09102d9cfc64b20ca5cea44344d6e3c04975c337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 @@ -0,0 +1 @@ +# CONFIG_SND_AD1889 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 new file mode 100644 index 0000000000000000000000000000000000000000..293781cd636ab5781da260113b44848b2cc39f4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 @@ -0,0 +1 @@ +# CONFIG_SND_ALI5451 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP new file mode 100644 index 0000000000000000000000000000000000000000..a7eccb6f0a122f8784f34da2d5510307dad86d87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP @@ -0,0 +1 @@ +# CONFIG_SND_ALOOP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 new file mode 100644 index 0000000000000000000000000000000000000000..68cb96e053acd847428a22e01ecd52e458a59ba0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 @@ -0,0 +1 @@ +# CONFIG_SND_ALS300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 new file mode 100644 index 0000000000000000000000000000000000000000..0ede559b708c1eb98b7e08264b4b2ccec8aed566 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 @@ -0,0 +1 @@ +# CONFIG_SND_ALS4000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI new file mode 100644 index 0000000000000000000000000000000000000000..d58e532055fc8e743f0b2692dbf563b3f490aa79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI @@ -0,0 +1 @@ +# CONFIG_SND_ASIHPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP new file mode 100644 index 0000000000000000000000000000000000000000..22c792ae0bfe75438424f299d32dd4d586360be0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP @@ -0,0 +1 @@ +# CONFIG_SND_ATIIXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM new file mode 100644 index 0000000000000000000000000000000000000000..4833d797128602c5734185a007fd9024655f6535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM @@ -0,0 +1 @@ +# CONFIG_SND_ATIIXP_MODEM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 new file mode 100644 index 0000000000000000000000000000000000000000..8f3dbf45d927ecb001a4151717ae4684a051a572 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 @@ -0,0 +1 @@ +# CONFIG_SND_AU8810 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 new file mode 100644 index 0000000000000000000000000000000000000000..7dbfee28b70159a23ec517bf308fa6b618d8f933 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 @@ -0,0 +1 @@ +# CONFIG_SND_AU8820 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 new file mode 100644 index 0000000000000000000000000000000000000000..e8a91203c5c95376801ed248421d168d25046147 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 @@ -0,0 +1 @@ +# CONFIG_SND_AU8830 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 new file mode 100644 index 0000000000000000000000000000000000000000..0065d112f5bae8538ff4e71d20587a7b21e57b79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 @@ -0,0 +1 @@ +# CONFIG_SND_AW2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 new file mode 100644 index 0000000000000000000000000000000000000000..6375bd6ed5a67d0d39d553394d3a556a88ea0af7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 @@ -0,0 +1 @@ +# CONFIG_SND_AZT3328 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 new file mode 100644 index 0000000000000000000000000000000000000000..0a60c490b2bb7bc8a81be1802f76e164e134bb8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 @@ -0,0 +1 @@ +# CONFIG_SND_BCD2000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB new file mode 100644 index 0000000000000000000000000000000000000000..5600866c12086fc2301d2d086457a648e78cfde6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB @@ -0,0 +1 @@ +# CONFIG_SND_BEBOB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X new file mode 100644 index 0000000000000000000000000000000000000000..05dbf16d52c542aa4d352cd918e73bee510bc015 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X @@ -0,0 +1 @@ +# CONFIG_SND_BT87X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 new file mode 100644 index 0000000000000000000000000000000000000000..8ccecad9bd709d11091d09310a65eb3ffe44b231 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 @@ -0,0 +1 @@ +# CONFIG_SND_CA0106 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI new file mode 100644 index 0000000000000000000000000000000000000000..15b6957c0201739cc8c21109a57687c8a38d4b9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI @@ -0,0 +1 @@ +# CONFIG_SND_CMIPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 new file mode 100644 index 0000000000000000000000000000000000000000..06b17f0f7999ea5fd47845efd9bb4861d4ff4417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 @@ -0,0 +1 @@ +# CONFIG_SND_CS4281 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX new file mode 100644 index 0000000000000000000000000000000000000000..bdf735f0b183258eeff1ff28ce4af126f41f1c17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX @@ -0,0 +1 @@ +# CONFIG_SND_CS46XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP new file mode 100644 index 0000000000000000000000000000000000000000..23547bc33781ab04d90bb6b754a05660d5bd4437 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP @@ -0,0 +1 @@ +CONFIG_SND_CTL_FAST_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION new file mode 100644 index 0000000000000000000000000000000000000000..55768f31aee4770f9b5454a7b6cf617566190fcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION @@ -0,0 +1 @@ +# CONFIG_SND_CTL_INPUT_VALIDATION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI new file mode 100644 index 0000000000000000000000000000000000000000..4210ca96de183b05e79561a32d3a26283b9d3479 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI @@ -0,0 +1 @@ +# CONFIG_SND_CTXFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 new file mode 100644 index 0000000000000000000000000000000000000000..94cfd295f5a49fc0f4d6b7b913138120a6804e7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 @@ -0,0 +1 @@ +# CONFIG_SND_DARLA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 new file mode 100644 index 0000000000000000000000000000000000000000..7dad111a18bae1f779dd8c696312350116b0feb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 @@ -0,0 +1 @@ +# CONFIG_SND_DARLA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..21d131de6679e33050d4a8568654b83c58b1c5ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG @@ -0,0 +1 @@ +# CONFIG_SND_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE new file mode 100644 index 0000000000000000000000000000000000000000..e9bc8c38601029f4db947ca572ed8b7d34b63c64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE @@ -0,0 +1 @@ +# CONFIG_SND_DICE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF new file mode 100644 index 0000000000000000000000000000000000000000..d9563197100467150d152a99c72ad7d3cb43a63c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF @@ -0,0 +1 @@ +CONFIG_SND_DMA_SGBUF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS new file mode 100644 index 0000000000000000000000000000000000000000..83723e9d48b3734fcfbc39b9d8e984b0fc30d027 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS @@ -0,0 +1 @@ +CONFIG_SND_DRIVERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY new file mode 100644 index 0000000000000000000000000000000000000000..2e2d9940a64e63018b7d5c31bb4f7acc820d6e35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY @@ -0,0 +1 @@ +# CONFIG_SND_DUMMY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS new file mode 100644 index 0000000000000000000000000000000000000000..6c2911b74a73335ad99885a15aac4cac37a6d776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS @@ -0,0 +1 @@ +# CONFIG_SND_DYNAMIC_MINORS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G new file mode 100644 index 0000000000000000000000000000000000000000..dbc71572d39be0fe22c47f2ecc1ed3a9c3ebbb93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G @@ -0,0 +1 @@ +# CONFIG_SND_ECHO3G is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 new file mode 100644 index 0000000000000000000000000000000000000000..5b3614bfc977d0d2f996900c107aa1db4de7b825 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 @@ -0,0 +1 @@ +# CONFIG_SND_EMU10K1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X new file mode 100644 index 0000000000000000000000000000000000000000..b44b14a86ebcdfc59123211997ad025ccefc4737 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X @@ -0,0 +1 @@ +# CONFIG_SND_EMU10K1X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 new file mode 100644 index 0000000000000000000000000000000000000000..8ed9cccd199d537badd5e59987456be7a0bdc95f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 @@ -0,0 +1 @@ +# CONFIG_SND_ENS1370 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 new file mode 100644 index 0000000000000000000000000000000000000000..635ec92367bc6931e8417b50b139f1f3aa9c7ec5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 @@ -0,0 +1 @@ +# CONFIG_SND_ENS1371 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 new file mode 100644 index 0000000000000000000000000000000000000000..cd9e4ba6c1d5a43c23697d163ce05ab32d21a9e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 @@ -0,0 +1 @@ +# CONFIG_SND_ES1938 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 new file mode 100644 index 0000000000000000000000000000000000000000..22231b0583fa32a4853e715452228f2e4a77842f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 @@ -0,0 +1 @@ +# CONFIG_SND_ES1968 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE new file mode 100644 index 0000000000000000000000000000000000000000..e175acb0a098b8e956d288736ef5a9197bf1231f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE @@ -0,0 +1 @@ +# CONFIG_SND_FIREFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE new file mode 100644 index 0000000000000000000000000000000000000000..6e0712765bdf9d461ac82387ed09b75e1e196e95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE @@ -0,0 +1 @@ +CONFIG_SND_FIREWIRE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X new file mode 100644 index 0000000000000000000000000000000000000000..8cd2fa8174ef0f2307ea83b7776fbfb77254843b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_DIGI00X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU new file mode 100644 index 0000000000000000000000000000000000000000..a9c67879cbd8528f359e1c84216dee3ac27429ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_MOTU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM new file mode 100644 index 0000000000000000000000000000000000000000..07d6b9dbd0df0dc2fc3da86b5b8ebb92d6fc00a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_TASCAM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS new file mode 100644 index 0000000000000000000000000000000000000000..b9e704c9ae81b06a9ac1e3929fd09adbb3cd33b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS @@ -0,0 +1 @@ +# CONFIG_SND_FIREWORKS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 new file mode 100644 index 0000000000000000000000000000000000000000..2f820d8f49ea47ece1c788bff3d240afd2b2de1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 @@ -0,0 +1 @@ +# CONFIG_SND_FM801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 new file mode 100644 index 0000000000000000000000000000000000000000..237adaa67cc228413c3001f5783c18e8239619b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 @@ -0,0 +1 @@ +# CONFIG_SND_GINA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 new file mode 100644 index 0000000000000000000000000000000000000000..c248e36cef3f9dab338fe0c88d5d398e81846b7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 @@ -0,0 +1 @@ +# CONFIG_SND_GINA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..6f057ecfeaaadc92fc8ea9a1b057c96137c31d43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL @@ -0,0 +1 @@ +# CONFIG_SND_HDA_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE new file mode 100644 index 0000000000000000000000000000000000000000..c7493fadbec29b41d0da471bc5f7bce08d8fb365 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE @@ -0,0 +1 @@ +CONFIG_SND_HDA_PREALLOC_SIZE=0 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP new file mode 100644 index 0000000000000000000000000000000000000000..488843333c1971d028d1d6e702f0cb28ebaa165c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP @@ -0,0 +1 @@ +# CONFIG_SND_HDSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM new file mode 100644 index 0000000000000000000000000000000000000000..591d39285e3b5dc8603a141b00d1c57a330ac601 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM @@ -0,0 +1 @@ +# CONFIG_SND_HDSPM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER new file mode 100644 index 0000000000000000000000000000000000000000..c05a9e7cd9bc853621a32d95d18aa3d42d5dde03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER @@ -0,0 +1 @@ +# CONFIG_SND_HRTIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 new file mode 100644 index 0000000000000000000000000000000000000000..4bc037df56d663f2671a17e9312858b968217cf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 @@ -0,0 +1 @@ +# CONFIG_SND_ICE1712 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 new file mode 100644 index 0000000000000000000000000000000000000000..a61f1f14b42c37a17fe80d0cf8a69071c0372562 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 @@ -0,0 +1 @@ +# CONFIG_SND_ICE1724 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO new file mode 100644 index 0000000000000000000000000000000000000000..018458b13ddd13346f7bfdd74d2c418d9e9962c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO @@ -0,0 +1 @@ +# CONFIG_SND_INDIGO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ new file mode 100644 index 0000000000000000000000000000000000000000..1b50efe2806ec5d58c92ced73e25dcae9504a856 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ @@ -0,0 +1 @@ +# CONFIG_SND_INDIGODJ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX new file mode 100644 index 0000000000000000000000000000000000000000..55d1e51b95d257ac6f47cc1d47bb6803809c3a35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX @@ -0,0 +1 @@ +# CONFIG_SND_INDIGODJX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO new file mode 100644 index 0000000000000000000000000000000000000000..f08d6c10ec4983b56cb837d8e50cd34adee22213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO @@ -0,0 +1 @@ +# CONFIG_SND_INDIGOIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX new file mode 100644 index 0000000000000000000000000000000000000000..5d2cf897abb444719ce5bfc080ba23ab5dd43b4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX @@ -0,0 +1 @@ +# CONFIG_SND_INDIGOIOX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 new file mode 100644 index 0000000000000000000000000000000000000000..d97191a98f68e4fc6edb31d8bb09587f49d2c1f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 @@ -0,0 +1 @@ +# CONFIG_SND_INTEL8X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M new file mode 100644 index 0000000000000000000000000000000000000000..4e04bb51cf1ebe27ba27af379dec28ed86583031 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M @@ -0,0 +1 @@ +# CONFIG_SND_INTEL8X0M is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT new file mode 100644 index 0000000000000000000000000000000000000000..a2b5bdd76f4a890fc1f20d923080ce8bdf4a8cc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT @@ -0,0 +1 @@ +# CONFIG_SND_ISIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 new file mode 100644 index 0000000000000000000000000000000000000000..4b6bb9cfd200b27e45ed4e37bb58b6a62ff5aef9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 @@ -0,0 +1 @@ +# CONFIG_SND_KORG1212 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 new file mode 100644 index 0000000000000000000000000000000000000000..d49f5af1aa1a6d66d16872bf88b8cf12b537a01e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 @@ -0,0 +1 @@ +# CONFIG_SND_LAYLA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 new file mode 100644 index 0000000000000000000000000000000000000000..7a1198db1825300f8d22bf4af61c55f8d7b844e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 @@ -0,0 +1 @@ +# CONFIG_SND_LAYLA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA new file mode 100644 index 0000000000000000000000000000000000000000..4687c9628fdcda526a39d8f3c81735f48a49c83b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA @@ -0,0 +1 @@ +# CONFIG_SND_LOLA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES new file mode 100644 index 0000000000000000000000000000000000000000..f5ca1aa353899a58649b2e5122c3f9910204b704 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES @@ -0,0 +1 @@ +# CONFIG_SND_LX6464ES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 new file mode 100644 index 0000000000000000000000000000000000000000..d93d1ac5fbfc31fd17d3fdd7944d5ad802f5634c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 @@ -0,0 +1 @@ +# CONFIG_SND_MAESTRO3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA new file mode 100644 index 0000000000000000000000000000000000000000..663a8cff6c91eac5907a272e341c86e2c20813a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA @@ -0,0 +1 @@ +# CONFIG_SND_MIA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART new file mode 100644 index 0000000000000000000000000000000000000000..b1a96442ea0b682b541ea8de8ee67afc108f5299 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART @@ -0,0 +1 @@ +# CONFIG_SND_MIXART is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA new file mode 100644 index 0000000000000000000000000000000000000000..369b59f3b522515f5fd86a48730b9e2471f0d439 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA @@ -0,0 +1 @@ +# CONFIG_SND_MONA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 new file mode 100644 index 0000000000000000000000000000000000000000..5ef99d5ba1f23d074a87a72c664d425fe27d7b97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 @@ -0,0 +1 @@ +# CONFIG_SND_MPU401 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV new file mode 100644 index 0000000000000000000000000000000000000000..150befdf873284bda9b3e34ab6d203454f52d2b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV @@ -0,0 +1 @@ +# CONFIG_SND_MTPAV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 new file mode 100644 index 0000000000000000000000000000000000000000..80edee474d8ab0ae3c00e304364915fa69f2d5cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 @@ -0,0 +1 @@ +# CONFIG_SND_MTS64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 new file mode 100644 index 0000000000000000000000000000000000000000..84971d4fd34f85a7e02d26efe9464fc39bab3311 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 @@ -0,0 +1 @@ +# CONFIG_SND_NM256 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL new file mode 100644 index 0000000000000000000000000000000000000000..998d310620a17eeea62456f162f44eea9575c5cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL @@ -0,0 +1 @@ +# CONFIG_SND_OSSEMUL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW new file mode 100644 index 0000000000000000000000000000000000000000..31da39fc0faadee0e7b11bcab37e322b263bff73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW @@ -0,0 +1 @@ +# CONFIG_SND_OXFW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN new file mode 100644 index 0000000000000000000000000000000000000000..a2f71fa5c9147f8cde261be6bb877a36331d2e27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN @@ -0,0 +1 @@ +# CONFIG_SND_OXYGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI new file mode 100644 index 0000000000000000000000000000000000000000..667778acc79a88b5a40e620ab8d9232d8cc9e116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI @@ -0,0 +1 @@ +CONFIG_SND_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST new file mode 100644 index 0000000000000000000000000000000000000000..07232a49cf04592ae4b8db82e6f53d8a6c33afa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST @@ -0,0 +1 @@ +# CONFIG_SND_PCMTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER new file mode 100644 index 0000000000000000000000000000000000000000..504115d7059127969e539b3eeb9a71f0cf8dab11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER @@ -0,0 +1 @@ +CONFIG_SND_PCM_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP new file mode 100644 index 0000000000000000000000000000000000000000..8a97d6baae09d36c6f8173e78eda10e475166fcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP @@ -0,0 +1 @@ +# CONFIG_SND_PCSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR new file mode 100644 index 0000000000000000000000000000000000000000..fa7e3199ebb4fdaf8df91bb00ea2421036b4bb3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR @@ -0,0 +1 @@ +# CONFIG_SND_PCXHR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 new file mode 100644 index 0000000000000000000000000000000000000000..7f4615b982e1a4136033484a81ca315453aa1091 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 @@ -0,0 +1 @@ +# CONFIG_SND_PORTMAN2X4 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS new file mode 100644 index 0000000000000000000000000000000000000000..506a2835836013481e513a49036d5dedbb7df5ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS @@ -0,0 +1 @@ +CONFIG_SND_PROC_FS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE new file mode 100644 index 0000000000000000000000000000000000000000..da50510fa0fd9848c71be910589d7fee7157b866 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE @@ -0,0 +1 @@ +# CONFIG_SND_RIPTIDE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 new file mode 100644 index 0000000000000000000000000000000000000000..7def0b277782d4eb19aa29f480455cfe62c2adfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 @@ -0,0 +1 @@ +# CONFIG_SND_RME32 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 new file mode 100644 index 0000000000000000000000000000000000000000..8b3f743ec0aca8957255a199a8bd6d0ab18abfb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 @@ -0,0 +1 @@ +# CONFIG_SND_RME96 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 new file mode 100644 index 0000000000000000000000000000000000000000..3c5e88f77896428335e545aff41079e4022ff3a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 @@ -0,0 +1 @@ +# CONFIG_SND_RME9652 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X new file mode 100644 index 0000000000000000000000000000000000000000..3da4e9ce72bab2685d575947814a4003e2cb2b55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X @@ -0,0 +1 @@ +# CONFIG_SND_SE6X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER new file mode 100644 index 0000000000000000000000000000000000000000..d3055ec3bd7dfc0e5cf298b7ba552a432ab8f229 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER @@ -0,0 +1 @@ +# CONFIG_SND_SEQUENCER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 new file mode 100644 index 0000000000000000000000000000000000000000..4b802a5283e55764a6e6c99b345a68c88c2cdfba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 @@ -0,0 +1 @@ +# CONFIG_SND_SERIAL_U16550 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC new file mode 100644 index 0000000000000000000000000000000000000000..a161b8e18362337afd7df321f7a94346f9b50388 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC @@ -0,0 +1 @@ +# CONFIG_SND_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES new file mode 100644 index 0000000000000000000000000000000000000000..d9a4a21668c13c9b7c0d935f87d86ed30194f8a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES @@ -0,0 +1 @@ +# CONFIG_SND_SONICVIBES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI new file mode 100644 index 0000000000000000000000000000000000000000..05a828ae220358355e78edd4bbc630bc54ed6a37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI @@ -0,0 +1 @@ +CONFIG_SND_SPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API new file mode 100644 index 0000000000000000000000000000000000000000..dd74570bec64453f75d06a1a7f8d59ed7980cd6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API @@ -0,0 +1 @@ +CONFIG_SND_SUPPORT_OLD_API=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT new file mode 100644 index 0000000000000000000000000000000000000000..abc485d574a6ce2ea3f550067290161e1a83cd3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT @@ -0,0 +1 @@ +# CONFIG_SND_TRIDENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB new file mode 100644 index 0000000000000000000000000000000000000000..fb681b173d67543344e1b2c21ecb4c4b9c514494 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB @@ -0,0 +1 @@ +CONFIG_SND_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE new file mode 100644 index 0000000000000000000000000000000000000000..d48ebbd6c1f4a19d41ce0e7cec8cb41641ec7fe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE @@ -0,0 +1 @@ +# CONFIG_SND_USB_6FIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO new file mode 100644 index 0000000000000000000000000000000000000000..232911a82531bd21313869c96845bb63d99948c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO @@ -0,0 +1 @@ +# CONFIG_SND_USB_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ new file mode 100644 index 0000000000000000000000000000000000000000..2352b346814bce99f97bfa6f0ae19415e539f3db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ @@ -0,0 +1 @@ +# CONFIG_SND_USB_CAIAQ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE new file mode 100644 index 0000000000000000000000000000000000000000..bd9be87194f56eab11f03400180556def45d7e70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE @@ -0,0 +1 @@ +# CONFIG_SND_USB_HIFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD new file mode 100644 index 0000000000000000000000000000000000000000..86f53fbe75ab55be765ef364f094660135bf5f71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD @@ -0,0 +1 @@ +# CONFIG_SND_USB_POD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD new file mode 100644 index 0000000000000000000000000000000000000000..c96cf752eae510d2c0268236eaddcc694e2eefb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD @@ -0,0 +1 @@ +# CONFIG_SND_USB_PODHD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT new file mode 100644 index 0000000000000000000000000000000000000000..15ecf5ac46ec88dfdf3f736d322b28f1cf785b68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT @@ -0,0 +1 @@ +# CONFIG_SND_USB_TONEPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 new file mode 100644 index 0000000000000000000000000000000000000000..4805309714ac326917cb7426003f9d9f52a96d5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 @@ -0,0 +1 @@ +# CONFIG_SND_USB_UA101 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L new file mode 100644 index 0000000000000000000000000000000000000000..b7a20b76efc569048cf43abae439c4a0665b1b9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L @@ -0,0 +1 @@ +# CONFIG_SND_USB_US122L is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y new file mode 100644 index 0000000000000000000000000000000000000000..1e222656395d1b10a067fe6cc34cf28e1be1e9e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y @@ -0,0 +1 @@ +# CONFIG_SND_USB_USX2Y is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX new file mode 100644 index 0000000000000000000000000000000000000000..68675ae6abbeec2a0f8887bbaa5277f79f770e19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX @@ -0,0 +1 @@ +# CONFIG_SND_USB_VARIAX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK new file mode 100644 index 0000000000000000000000000000000000000000..b119c633de985d682561496328e0e48d9e35964b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK @@ -0,0 +1 @@ +# CONFIG_SND_VERBOSE_PRINTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS new file mode 100644 index 0000000000000000000000000000000000000000..4ae50d741fccf6d449bf1b8069875b2382e36382 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS @@ -0,0 +1 @@ +CONFIG_SND_VERBOSE_PROCFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX new file mode 100644 index 0000000000000000000000000000000000000000..2c26735789e07dbe2c29b7983f563eef00dd9b8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX @@ -0,0 +1 @@ +# CONFIG_SND_VIA82XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM new file mode 100644 index 0000000000000000000000000000000000000000..53055c69427bc27bb7b26c449854fedd549bdd3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM @@ -0,0 +1 @@ +# CONFIG_SND_VIA82XX_MODEM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..506b74e9d78a200b28568571877ffac87a5f4551 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO @@ -0,0 +1 @@ +# CONFIG_SND_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO new file mode 100644 index 0000000000000000000000000000000000000000..0d204f85d909f57a2a6343ef91b23c6013ea5305 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO @@ -0,0 +1 @@ +# CONFIG_SND_VIRTUOSO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 new file mode 100644 index 0000000000000000000000000000000000000000..8cd1e3bcdb07e1507db70d076228d1655e988354 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 @@ -0,0 +1 @@ +# CONFIG_SND_VX222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 new file mode 100644 index 0000000000000000000000000000000000000000..310c61afc0ba091a64443b012a03447bbf98334d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 @@ -0,0 +1 @@ +CONFIG_SND_X86=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..064c6b2fa66891de7130ab4ea5493403d8f0919c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND @@ -0,0 +1 @@ +# CONFIG_SND_XEN_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI new file mode 100644 index 0000000000000000000000000000000000000000..7bc69034cfad5484fe7a8586455fc93b7b3c5e16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI @@ -0,0 +1 @@ +# CONFIG_SND_YMFPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..deb79e27e78f7dff67565ffcd62c4452fc325eff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION @@ -0,0 +1 @@ +CONFIG_SOLARIS_X86_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT new file mode 100644 index 0000000000000000000000000000000000000000..6655c779b0ab949275ee7f508820392d916f0d9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT @@ -0,0 +1 @@ +CONFIG_SONYPI_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..a31919301348f76e3cae421a889729ac0a0b8f07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP @@ -0,0 +1 @@ +CONFIG_SONY_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY new file mode 100644 index 0000000000000000000000000000000000000000..958604c8fe6087f034cc89c2dedcead630d6282c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY @@ -0,0 +1 @@ +# CONFIG_SPI_BUTTERFLY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC new file mode 100644 index 0000000000000000000000000000000000000000..f93334dd32f3f7a6393e12d189d621fc229143e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC @@ -0,0 +1 @@ +# CONFIG_SPI_LANTIQ_SSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP new file mode 100644 index 0000000000000000000000000000000000000000..a3b56af5a4f93d0242b58e960c9fb980eb475346 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP @@ -0,0 +1 @@ +# CONFIG_SPI_LM70_LLP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_MEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_MEM new file mode 100644 index 0000000000000000000000000000000000000000..89608229780568293451403d60dd7482152fd868 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_MEM @@ -0,0 +1 @@ +# CONFIG_SPI_MEM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C new file mode 100644 index 0000000000000000000000000000000000000000..65e6b608ecac0a4a3f4fc3f5591061b0770f544b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C @@ -0,0 +1 @@ +# CONFIG_SPS30_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 new file mode 100644 index 0000000000000000000000000000000000000000..7dcc9136e884c4e141b048d8a17054a717a3f2e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 @@ -0,0 +1 @@ +# CONFIG_SRF04 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 new file mode 100644 index 0000000000000000000000000000000000000000..11f5dbcf5a3c4601814c22d116829564f83ffd03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 @@ -0,0 +1 @@ +# CONFIG_SRF08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 new file mode 100644 index 0000000000000000000000000000000000000000..085e44f4d1a00986aa81897c59c4bf0b3922d715 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 @@ -0,0 +1 @@ +# CONFIG_STK3310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 new file mode 100644 index 0000000000000000000000000000000000000000..a6776d90eef62dbe324360ad10af024d5fab0905 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 @@ -0,0 +1 @@ +# CONFIG_STK8312 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 new file mode 100644 index 0000000000000000000000000000000000000000..7608b4ef79e6e80ec2ac027e5a56cea63832c6cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 @@ -0,0 +1 @@ +# CONFIG_STK8BA50 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 new file mode 100644 index 0000000000000000000000000000000000000000..c4b590934fb849605ab841707758fb92e3d5221a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 @@ -0,0 +1 @@ +# CONFIG_ST_UVIS25 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION new file mode 100644 index 0000000000000000000000000000000000000000..67e2deb517cdc1faeea693658a7b07fa1df40843 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION @@ -0,0 +1 @@ +CONFIG_SUN_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI new file mode 100644 index 0000000000000000000000000000000000000000..8e45af1e32a0c23d7b79cdf6fddbfa4d9c8f812c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI @@ -0,0 +1 @@ +# CONFIG_SURFACE3_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 new file mode 100644 index 0000000000000000000000000000000000000000..14c168021e5f0e5b7c452ef42eaec8bb671d4032 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 @@ -0,0 +1 @@ +# CONFIG_SX9310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 new file mode 100644 index 0000000000000000000000000000000000000000..1864bc0dc6277080909000b730758ed91ee9260c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 @@ -0,0 +1 @@ +# CONFIG_SX9324 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 new file mode 100644 index 0000000000000000000000000000000000000000..1b9af0ea4056932499a535a5975f6690dd9ec587 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 @@ -0,0 +1 @@ +# CONFIG_SX9360 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 new file mode 100644 index 0000000000000000000000000000000000000000..f7899805e8a7f177b7da27a27abefad3e96dd148 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 @@ -0,0 +1 @@ +# CONFIG_SX9500 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..40471194d63c9dea4edb3e13fc7ccf8a52c40792 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI @@ -0,0 +1 @@ +# CONFIG_SYSTEM76_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR new file mode 100644 index 0000000000000000000000000000000000000000..a237fda7aa29c42aae4986be4c7678b37c69307c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR @@ -0,0 +1 @@ +CONFIG_SYS_HYPERVISOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 new file mode 100644 index 0000000000000000000000000000000000000000..ef5a8130a77a6a0ab50d5d5d825de57979e42f47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 @@ -0,0 +1 @@ +# CONFIG_T5403 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 new file mode 100644 index 0000000000000000000000000000000000000000..dc8670f60fe3d1405efb12a6921eadc406c159b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 @@ -0,0 +1 @@ +CONFIG_TABLET_SERIAL_WACOM4=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD new file mode 100644 index 0000000000000000000000000000000000000000..cc981e4af3de37db624dfc6766701202a7a44381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD @@ -0,0 +1 @@ +CONFIG_TABLET_USB_ACECAD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK new file mode 100644 index 0000000000000000000000000000000000000000..250cea738cc69f6233da6544f86b5a0ff4ecc0a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK @@ -0,0 +1 @@ +CONFIG_TABLET_USB_AIPTEK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG new file mode 100644 index 0000000000000000000000000000000000000000..0e71e48a49cbe8296067f016e820fa28f180059b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG @@ -0,0 +1 @@ +# CONFIG_TABLET_USB_HANWANG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB new file mode 100644 index 0000000000000000000000000000000000000000..2dcbea9334869c41ac763e40438d986942779a63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB @@ -0,0 +1 @@ +CONFIG_TABLET_USB_KBTAB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS new file mode 100644 index 0000000000000000000000000000000000000000..78f51e018cfc07b0dbd6b95e4ac3f66d07fb3497 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS @@ -0,0 +1 @@ +# CONFIG_TABLET_USB_PEGASUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL new file mode 100644 index 0000000000000000000000000000000000000000..d7bd6a50d1266421718de41429c5fd1d77e120b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_ATMEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON new file mode 100644 index 0000000000000000000000000000000000000000..c4409f95bf96726ee232322d71711f64922c39a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_INFINEON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON new file mode 100644 index 0000000000000000000000000000000000000000..bba92992521946a1b86f83431c6448a6f5e25535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_NUVOTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 new file mode 100644 index 0000000000000000000000000000000000000000..7dbdd343ad162612767fc1393d7d49666864f997 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 @@ -0,0 +1 @@ +CONFIG_TCG_TIS_ST33ZP24=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C new file mode 100644 index 0000000000000000000000000000000000000000..284cac4f07e3a78016629c763b843f2a29baf5dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C @@ -0,0 +1 @@ +CONFIG_TCG_TIS_ST33ZP24_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN new file mode 100644 index 0000000000000000000000000000000000000000..778bc2d900f0cf74eedd8c3f94509989340ff3b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN @@ -0,0 +1 @@ +# CONFIG_TCG_XEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 new file mode 100644 index 0000000000000000000000000000000000000000..5b04d4ee12983f34b35d5aafc94ffef1eab08fac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 @@ -0,0 +1 @@ +# CONFIG_TCS3414 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 new file mode 100644 index 0000000000000000000000000000000000000000..0d00b4785abd9a0d949ed7a399283961b9e25694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 @@ -0,0 +1 @@ +# CONFIG_TCS3472 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK new file mode 100644 index 0000000000000000000000000000000000000000..0cfe9ee4aedd635343aca2e1b0ca422863df9cc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK @@ -0,0 +1 @@ +CONFIG_TELCLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG new file mode 100644 index 0000000000000000000000000000000000000000..2f86ea497b700630a6912945cfdb829ff9237fa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU new file mode 100644 index 0000000000000000000000000000000000000000..8091d4abf37995077c37a24889161b43a3dd6233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU @@ -0,0 +1 @@ +# CONFIG_TEST_FPU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..f0a399285ee8d0b07730ebe9db3ac317620af896 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI @@ -0,0 +1 @@ +CONFIG_THERMAL_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG new file mode 100644 index 0000000000000000000000000000000000000000..eeb8ec40aa12a0cab28be2948b4baf6094e8a2b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..b56a7701d5d1daf846b6f4333f95e20a1fe29b6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..5b406b9baa4f2ffc7a44da11852f53f04f50b8c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG new file mode 100644 index 0000000000000000000000000000000000000000..10e6eb6a70fab5ab8597784b11991ca0691ed626 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES new file mode 100644 index 0000000000000000000000000000000000000000..0673591f5ac32dec32de9264cc3932cb8bc21872 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL new file mode 100644 index 0000000000000000000000000000000000000000..cf6a85819368e55601c9b2216972af55a5907466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS new file mode 100644 index 0000000000000000000000000000000000000000..535380b2780243b7ab7cfbe209caef33fc81e837 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO new file mode 100644 index 0000000000000000000000000000000000000000..a13ebc06be533138d75afb92f4cc29aece73fa55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_VIDEO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI new file mode 100644 index 0000000000000000000000000000000000000000..71ff74ef7e7b075990bba862ecca4e464f852c15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI @@ -0,0 +1 @@ +# CONFIG_THINKPAD_LMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX new file mode 100644 index 0000000000000000000000000000000000000000..99c2371d5292f69f1da638dae7381f1206469b03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_BGX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF new file mode 100644 index 0000000000000000000000000000000000000000..7b9f749d884d0f31b182b1f4aecc5f2cf6b919be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_PF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX new file mode 100644 index 0000000000000000000000000000000000000000..ee56094121d9f0976df73cfab6ee1798378a346c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_RGX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF new file mode 100644 index 0000000000000000000000000000000000000000..44295fa0bc37592191d67ba6db02db613ffbd324 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_VF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 new file mode 100644 index 0000000000000000000000000000000000000000..86734bb34b95b3f57eac638f6ff089ee149f9fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 @@ -0,0 +1 @@ +CONFIG_TIFM_7XX1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C new file mode 100644 index 0000000000000000000000000000000000000000..58ab3469536d63e85bd63543dc715c45d72e66d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C @@ -0,0 +1 @@ +# CONFIG_TI_ADC081C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 new file mode 100644 index 0000000000000000000000000000000000000000..a1e55e6c029a226cf6c76d2697c7f925057594c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 @@ -0,0 +1 @@ +# CONFIG_TI_ADC0832 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 new file mode 100644 index 0000000000000000000000000000000000000000..4845420948d4c72502677951e67a3b07146410db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 @@ -0,0 +1 @@ +# CONFIG_TI_ADC084S021 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 new file mode 100644 index 0000000000000000000000000000000000000000..b70880f5bfa7021d163417f3ce8b800dea8d054a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 @@ -0,0 +1 @@ +# CONFIG_TI_ADC108S102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 new file mode 100644 index 0000000000000000000000000000000000000000..097a76b1d61feb513a3d98b8f1b64d4fa1eb6f2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 @@ -0,0 +1 @@ +# CONFIG_TI_ADC12138 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 new file mode 100644 index 0000000000000000000000000000000000000000..d42dd79ca6f1e7205bee8814b27babc9870fe0f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 @@ -0,0 +1 @@ +# CONFIG_TI_ADC128S052 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 new file mode 100644 index 0000000000000000000000000000000000000000..016e8c552677027be8c1122798be00fe2918398f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 @@ -0,0 +1 @@ +# CONFIG_TI_ADC161S626 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 new file mode 100644 index 0000000000000000000000000000000000000000..8a8d511c60a82e9af7515f8a6fb0ab550e79ed8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 @@ -0,0 +1 @@ +# CONFIG_TI_ADS1015 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 new file mode 100644 index 0000000000000000000000000000000000000000..abc5533f8cde2a172f62c2c095f0f0a91e292dbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 @@ -0,0 +1 @@ +# CONFIG_TI_ADS1100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 new file mode 100644 index 0000000000000000000000000000000000000000..8d6d673b505b70d949f011e6d3af17a926e5e03e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 @@ -0,0 +1 @@ +# CONFIG_TI_ADS124S08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 new file mode 100644 index 0000000000000000000000000000000000000000..c60c731eb2d188f414c27bbd77261d5df741c825 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 @@ -0,0 +1 @@ +# CONFIG_TI_ADS131E08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 new file mode 100644 index 0000000000000000000000000000000000000000..0c6bdb1eea122bc1843b70d400ce9084bd0932e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 @@ -0,0 +1 @@ +# CONFIG_TI_ADS7924 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 new file mode 100644 index 0000000000000000000000000000000000000000..f2a68954b22c920baca627b07a82fd2e01c9e134 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 @@ -0,0 +1 @@ +# CONFIG_TI_ADS7950 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 new file mode 100644 index 0000000000000000000000000000000000000000..5c0ba0608a384144bf94906c0b2d397cd2393250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 @@ -0,0 +1 @@ +# CONFIG_TI_ADS8344 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 new file mode 100644 index 0000000000000000000000000000000000000000..c1e7d30101b5f19c8ecd3a1c755394a5d6703420 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 @@ -0,0 +1 @@ +# CONFIG_TI_ADS8688 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 new file mode 100644 index 0000000000000000000000000000000000000000..90397f5e302ac1330662e58acf9c8123fa539fb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 @@ -0,0 +1 @@ +# CONFIG_TI_DAC082S085 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 new file mode 100644 index 0000000000000000000000000000000000000000..4c8e5798c547e9c2d89f6ee2d3bdda8b9493a6cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 @@ -0,0 +1 @@ +# CONFIG_TI_DAC5571 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 new file mode 100644 index 0000000000000000000000000000000000000000..26dfffbc34e7cd621537d90521506bcab03d30eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 @@ -0,0 +1 @@ +# CONFIG_TI_DAC7311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 new file mode 100644 index 0000000000000000000000000000000000000000..38e697dd813f469a69a88c37027669a8a6842890 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 @@ -0,0 +1 @@ +# CONFIG_TI_DAC7612 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 new file mode 100644 index 0000000000000000000000000000000000000000..61cc56124502f497ef4b5a8749aa04409932ede3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 @@ -0,0 +1 @@ +# CONFIG_TI_LMP92064 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 new file mode 100644 index 0000000000000000000000000000000000000000..c8c8183fd385bb2f8a215fddfb301634ec9b526d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 @@ -0,0 +1 @@ +# CONFIG_TI_TLC4541 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 new file mode 100644 index 0000000000000000000000000000000000000000..31297ff62f03f41c838c5d95931e767ae3d2c1f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 @@ -0,0 +1 @@ +# CONFIG_TI_TMAG5273 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 new file mode 100644 index 0000000000000000000000000000000000000000..32d63cd874466e13497611fec12be03d7dfa067b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 @@ -0,0 +1 @@ +# CONFIG_TI_TSC2046 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 new file mode 100644 index 0000000000000000000000000000000000000000..d76f2b090ee503fec4e603af0bcc37d405fed1f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 @@ -0,0 +1 @@ +# CONFIG_TMP006 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 new file mode 100644 index 0000000000000000000000000000000000000000..a3a96d4a8b3c1fb3c24447e99d4a090d43b1346d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 @@ -0,0 +1 @@ +# CONFIG_TMP007 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 new file mode 100644 index 0000000000000000000000000000000000000000..68663fdbed8c5f038fa261ae5a4a5785976e4529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 @@ -0,0 +1 @@ +# CONFIG_TMP117 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP new file mode 100644 index 0000000000000000000000000000000000000000..d38a75930deab4980afb4a28f5c76d283a71f3a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP @@ -0,0 +1 @@ +CONFIG_TOPSTAR_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL new file mode 100644 index 0000000000000000000000000000000000000000..07f3cf8b1f7c75a6ef406c7c7f9a9ad4e971e600 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL @@ -0,0 +1 @@ +CONFIG_TOSHIBA_BT_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS new file mode 100644 index 0000000000000000000000000000000000000000..09c625d33293b4c9daa39f32c287c195c62941cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS @@ -0,0 +1 @@ +# CONFIG_TOSHIBA_HAPS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI new file mode 100644 index 0000000000000000000000000000000000000000..37af276daa73693c049b132c21336933e80fde9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI @@ -0,0 +1 @@ +# CONFIG_TOSHIBA_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 new file mode 100644 index 0000000000000000000000000000000000000000..822e85f151a5a0956cb7b9f4ad97bdd118522ab6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AD7877 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 new file mode 100644 index 0000000000000000000000000000000000000000..c3836e69d50abccadde0fac39c6d2bbd1c8fed75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AD7879 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 new file mode 100644 index 0000000000000000000000000000000000000000..dd12ad15c85bd542db9eb246e1e24f43d3166d24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ADS7846 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT new file mode 100644 index 0000000000000000000000000000000000000000..2fd6137904cfb074cb927adad3c92d78aee083d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR new file mode 100644 index 0000000000000000000000000000000000000000..b33f61f376bbef6b79f53bebf9543534294506b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 new file mode 100644 index 0000000000000000000000000000000000000000..547232cb26839c7f41f194691cdf1feec659b8a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_BU21013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 new file mode 100644 index 0000000000000000000000000000000000000000..231bdaf59ee5dc29adb820c90d60e119bb09d163 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_BU21029 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 new file mode 100644 index 0000000000000000000000000000000000000000..20fb6c5e9f0ea9cbcae2f4dc74c6b256fe0e9977 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 new file mode 100644 index 0000000000000000000000000000000000000000..2629724240b65d76d72ab46c63faa548bd896697 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 new file mode 100644 index 0000000000000000000000000000000000000000..fa03c444e54b3d1db376fea3f4e5364ddd2ce9a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 new file mode 100644 index 0000000000000000000000000000000000000000..0e43d53b5930f495b9b053c3b1062e70182a0881 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE new file mode 100644 index 0000000000000000000000000000000000000000..9a38f4fd8d0775150a42f274be6d3b5e7009b063 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 new file mode 100644 index 0000000000000000000000000000000000000000..58375d2ed6e464e113152f0175966a42f9a71edd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE new file mode 100644 index 0000000000000000000000000000000000000000..aa92f1028348883e57cd96b6e1447b404aab4756 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO new file mode 100644 index 0000000000000000000000000000000000000000..a392363c08e3eab299948b7baa5c08a149ef48eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_DYNAPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 new file mode 100644 index 0000000000000000000000000000000000000000..c84fdf491d7eee5bc99038c4968e780537b405a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI new file mode 100644 index 0000000000000000000000000000000000000000..6fdac1278637d5db5becb9462c643ce542d496aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EETI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL new file mode 100644 index 0000000000000000000000000000000000000000..f3e1b7a98af7395a798b865745d6d2317308225d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 new file mode 100644 index 0000000000000000000000000000000000000000..2b8f3d5944344f8c88c26e983374f326de19ba30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EKTF2127 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN new file mode 100644 index 0000000000000000000000000000000000000000..9b42854bababcd41dc8b65d4430ab4a7828bc16b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ELAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO new file mode 100644 index 0000000000000000000000000000000000000000..889c6b213f9b980abbf194c72606b022cb04ba80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_ELO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 new file mode 100644 index 0000000000000000000000000000000000000000..99f4f0f0f8f16176e1b2a07137b4ad364eea0ccb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EXC3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU new file mode 100644 index 0000000000000000000000000000000000000000..f9c8e6cc39f55b2c661425323368170cebb82a18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_FUJITSU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX new file mode 100644 index 0000000000000000000000000000000000000000..2cd772276bedf149cd6f77ef245bbcad77603b00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_GOODIX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE new file mode 100644 index 0000000000000000000000000000000000000000..138d24dcb40c293c93157804631276f1143d48fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_GUNZE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE new file mode 100644 index 0000000000000000000000000000000000000000..6030c1a6c8349057568f7b1973005f54967b4b38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP new file mode 100644 index 0000000000000000000000000000000000000000..95f913e6fac9f74586e62abde2c471b0c2bd5f97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HIDEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B new file mode 100644 index 0000000000000000000000000000000000000000..6b38ce12978c91d39c163859f09772d669bb11ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX new file mode 100644 index 0000000000000000000000000000000000000000..0514f62a88e9b819b7a682790f00691eeb175103 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX new file mode 100644 index 0000000000000000000000000000000000000000..cfc752b4558656a6974f4842ec0e71a7237bb735 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X new file mode 100644 index 0000000000000000000000000000000000000000..b8045cd897bdbee369c513a8e0f31a37a4b690ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ILI210X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK new file mode 100644 index 0000000000000000000000000000000000000000..77f5ae298ede36d9909b7cd6a86978e09b919bb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ILITEK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS new file mode 100644 index 0000000000000000000000000000000000000000..454a356efa2ac93ef047db6b52dd79e9730e733b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IMAGIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO new file mode 100644 index 0000000000000000000000000000000000000000..5b2d7d9fa692074f327b3dfd81c9be36f3822a09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_INEXIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX new file mode 100644 index 0000000000000000000000000000000000000000..cf0340b28c52cb97b362b1e645915e03708061ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IQS5XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 new file mode 100644 index 0000000000000000000000000000000000000000..184ee14a0bdb4a492ef0bc6762235f56995b186a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IQS7211 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 new file mode 100644 index 0000000000000000000000000000000000000000..2273504dac44ab820493d7e3444b2fd7d48b0271 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MAX11801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 new file mode 100644 index 0000000000000000000000000000000000000000..5e3c796631011e29b16ad8d3f077435f82d52016 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MCS5000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 new file mode 100644 index 0000000000000000000000000000000000000000..0adb4f84a0f32a7ae3011457ea6c45acdbb6ea37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 new file mode 100644 index 0000000000000000000000000000000000000000..5220b17a780c567a642010d18bf8cd478e5dad7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MMS114 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 new file mode 100644 index 0000000000000000000000000000000000000000..5fead293a15959f58ac09b8ce5cda287a5ba5687 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MSG2638 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH new file mode 100644 index 0000000000000000000000000000000000000000..1e80aceb8e0ac14128d0a589c2ff9b03709a2a98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS new file mode 100644 index 0000000000000000000000000000000000000000..1c325921ea7d1dc8fda1bc728bd4b11a663f1ec7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT new file mode 100644 index 0000000000000000000000000000000000000000..c86237b5a10a361e8b1176c8eeec60ad074047b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_PENMOUNT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR new file mode 100644 index 0000000000000000000000000000000000000000..30d025e2849c7e678b7f76cc6daacbd49480198b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_PIXCIR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS new file mode 100644 index 0000000000000000000000000000000000000000..ddaacfcb2c6e309c0b956e44bbdb67394161de86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_RM_TS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 new file mode 100644 index 0000000000000000000000000000000000000000..94349fdcb4fd12021a08bee809d50774841ec191 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 new file mode 100644 index 0000000000000000000000000000000000000000..3b9b34f031eced3a2c3b699a7ebfa8441cdbc61d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_S6SY761 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD new file mode 100644 index 0000000000000000000000000000000000000000..0c094b8525d30ebd515ef3eb452cfad10887f86f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SILEAD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C new file mode 100644 index 0000000000000000000000000000000000000000..1522b1e2fa03ed0316729fbb07069cd4019b596e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SIS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 new file mode 100644 index 0000000000000000000000000000000000000000..a665e9edac131ea1e08cf9170ee199d12e0e67cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ST1232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS new file mode 100644 index 0000000000000000000000000000000000000000..0b1cd21e702ccfdecc98374010d90d5f663613f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_STMFTS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI new file mode 100644 index 0000000000000000000000000000000000000000..2254ef05f9b9c8c62947e15a2223d018255645a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 new file mode 100644 index 0000000000000000000000000000000000000000..d63f10cfbe499d68f7e9fefef0de9979bdbf9da2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SX8654 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 new file mode 100644 index 0000000000000000000000000000000000000000..f003709277170271cadd9a0e0675af352c21ae53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT new file mode 100644 index 0000000000000000000000000000000000000000..a7c81b4512cd5fa611ffc975e710b8140da3c578 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN new file mode 100644 index 0000000000000000000000000000000000000000..8ed1bea3129015e586b2d2d53217a007c4f8871e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X new file mode 100644 index 0000000000000000000000000000000000000000..6f56a322e032f7bca3c6d613540330cc66fe3432 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 new file mode 100644 index 0000000000000000000000000000000000000000..6ecfb739ddb7e6c0d65ac1b01470a5499c157c0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2004 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 new file mode 100644 index 0000000000000000000000000000000000000000..6c1893766c8cdd97b18670289f0436247d8e64f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 new file mode 100644 index 0000000000000000000000000000000000000000..5232f1df83a3b792cc9f8bcc5eafb8089333072d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2007 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO new file mode 100644 index 0000000000000000000000000000000000000000..05b67b58de9707e79610c46bd563bee2ca2b6ec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE new file mode 100644 index 0000000000000000000000000000000000000000..b376949578a3e851aa29ad94217e825742591e18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C new file mode 100644 index 0000000000000000000000000000000000000000..efff558f0b19c7ad0ac132798de05540d4ae5cbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_WACOM_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 new file mode 100644 index 0000000000000000000000000000000000000000..27610e7df87aaac9bf7a1437c51ef279d7de099e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_WACOM_W8001=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C new file mode 100644 index 0000000000000000000000000000000000000000..66be75a9dfd8bf31e7158eb104b8c59bdadcec39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 new file mode 100644 index 0000000000000000000000000000000000000000..ed3563855dda8117443be7d2f75285cedc373f4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZET6223 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE new file mode 100644 index 0000000000000000000000000000000000000000..51a44c719cf5ba29b4cb9573f57832fb9005fee8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZFORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX new file mode 100644 index 0000000000000000000000000000000000000000..72fb1fc318f2911983913dbe63e4b26af02f247a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZINITIX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 new file mode 100644 index 0000000000000000000000000000000000000000..a74d18f7572f1b8ff552f6862db0bcfc919832f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 @@ -0,0 +1 @@ +# CONFIG_TPL0102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT new file mode 100644 index 0000000000000000000000000000000000000000..38671f9df968f737dab2482ab7a3a31442621501 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT @@ -0,0 +1 @@ +# CONFIG_TQMX86_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 new file mode 100644 index 0000000000000000000000000000000000000000..04de2624019617137e88291d83eb82d4abd95b65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 @@ -0,0 +1 @@ +# CONFIG_TSL2583 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 new file mode 100644 index 0000000000000000000000000000000000000000..bccc3cfc94d467843582dd88d2ebb28f76107e0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 @@ -0,0 +1 @@ +# CONFIG_TSL2591 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 new file mode 100644 index 0000000000000000000000000000000000000000..5a6ef0c7ae7a3b0aea353268466b4051e8b9f6f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 @@ -0,0 +1 @@ +# CONFIG_TSL2772 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 new file mode 100644 index 0000000000000000000000000000000000000000..f83080df9daa3e442943884427c6d81804083d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 @@ -0,0 +1 @@ +# CONFIG_TSL4531 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 new file mode 100644 index 0000000000000000000000000000000000000000..278ed9b09181a6fe6130f656f5404958ce96bb3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 @@ -0,0 +1 @@ +# CONFIG_TSYS01 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D new file mode 100644 index 0000000000000000000000000000000000000000..a81a8031d5497beedc434130c224656c9ee10610 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D @@ -0,0 +1 @@ +# CONFIG_TSYS02D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP new file mode 100644 index 0000000000000000000000000000000000000000..ddcccdbdfe8f7683b5adc7d3d7c957a8be8f73cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP @@ -0,0 +1 @@ +# CONFIG_TULIP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 new file mode 100644 index 0000000000000000000000000000000000000000..9633e1956bc3a561ac77283478ecfbc96ab6ab0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 @@ -0,0 +1 @@ +CONFIG_TYPEC_FUSB302=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 new file mode 100644 index 0000000000000000000000000000000000000000..aa26eae2bb14ed55d1fa048a25d4bb2e3b86d8cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 @@ -0,0 +1 @@ +CONFIG_UEFI_CPER_X86=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH new file mode 100644 index 0000000000000000000000000000000000000000..6f689df15b1155208e2532d1b43068be78d6e1f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH @@ -0,0 +1 @@ +CONFIG_UEVENT_HELPER_PATH="" diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X new file mode 100644 index 0000000000000000000000000000000000000000..9afaec20ff500c077faeae96cf8e243db2940b0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X @@ -0,0 +1 @@ +# CONFIG_ULI526X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL new file mode 100644 index 0000000000000000000000000000000000000000..94a5d7bd1c339a1466031b194a0b85869f391553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL @@ -0,0 +1 @@ +CONFIG_UNIXWARE_DISKLABEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D new file mode 100644 index 0000000000000000000000000000000000000000..176817910fb9370302027185b30c7c4710f5eb66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D @@ -0,0 +1 @@ +# CONFIG_US5182D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY new file mode 100644 index 0000000000000000000000000000000000000000..f8d3f2f8c80f3d803ef655c66483d4c08f6a7aea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY @@ -0,0 +1 @@ +# CONFIG_USB_CHAOSKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..28e68839c0422ab238cdd1077aba480ebbd9720b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_HCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY new file mode 100644 index 0000000000000000000000000000000000000000..793c45154118f119a9e9054c9346431231f47d32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY @@ -0,0 +1 @@ +# CONFIG_USB_LGM_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN new file mode 100644 index 0000000000000000000000000000000000000000..d576cda2cf5315f4ffe0af8baaf936c595cc34bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN @@ -0,0 +1 @@ +# CONFIG_USB_NET_RNDIS_WLAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 new file mode 100644 index 0000000000000000000000000000000000000000..8002721e31b7696542740527b652cdb7ee3622cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 @@ -0,0 +1 @@ +# CONFIG_USB_NET_SR9700 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC new file mode 100644 index 0000000000000000000000000000000000000000..bd9c5b9bc8cec7c2dbe6ae5029ca809316fa624f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC @@ -0,0 +1 @@ +CONFIG_USB_PULSE8_CEC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC new file mode 100644 index 0000000000000000000000000000000000000000..c6605282b6d41fe4913de65d99d92bf89d3e949c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC @@ -0,0 +1 @@ +CONFIG_USB_RAINSHADOW_CEC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI new file mode 100644 index 0000000000000000000000000000000000000000..4613084180a62c97f195ed78be2aeb31740e1609 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI @@ -0,0 +1 @@ +CONFIG_USB_ROLES_INTEL_XHCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE new file mode 100644 index 0000000000000000000000000000000000000000..d0dc474f93df352e790f43730dc70f8ee5161180 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT new file mode 100644 index 0000000000000000000000000000000000000000..87be7826c0041c5f181df17e6b298c0f88e1d60f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7715_PARPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE new file mode 100644 index 0000000000000000000000000000000000000000..d4a7817eecf489b9e161aece4d49b53ade60c8a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH new file mode 100644 index 0000000000000000000000000000000000000000..4d2b84780e5c7f1f93e434162d345241df178bd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH @@ -0,0 +1 @@ +CONFIG_USB_SPEEDTOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD new file mode 100644 index 0000000000000000000000000000000000000000..4722af6171e0f7fc8fe736d7733052836e3af42c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD @@ -0,0 +1 @@ +# CONFIG_USB_UHCI_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS new file mode 100644 index 0000000000000000000000000000000000000000..81571d175b5d7fb702ecbed77feae967516ca3f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS @@ -0,0 +1 @@ +# CONFIG_USB_ULPI_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 new file mode 100644 index 0000000000000000000000000000000000000000..ca903371e4ad53ca2e9393fffd13675acb14bd5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 @@ -0,0 +1 @@ +CONFIG_USB_USS720=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD new file mode 100644 index 0000000000000000000000000000000000000000..cca3e16d1ab10ca409f2052240414357801083c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD @@ -0,0 +1 @@ +# CONFIG_USB_XEN_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP new file mode 100644 index 0000000000000000000000000000000000000000..3cf92a71ad6f96cc5f971ced6a110feed94283d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP @@ -0,0 +1 @@ +CONFIG_USB_XHCI_DBGCAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM new file mode 100644 index 0000000000000000000000000000000000000000..c824b0c45becde92a9c9a582940fa4d44975b397 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER new file mode 100644 index 0000000000000000000000000000000000000000..02295270b59a68c9567f881c1ffc83610b813e5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER @@ -0,0 +1 @@ +CONFIG_USER_RETURN_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT new file mode 100644 index 0000000000000000000000000000000000000000..079153a892dc2fec217ae3cb94d9130a0518d322 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_USER_STACKTRACE_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC new file mode 100644 index 0000000000000000000000000000000000000000..4b9bc743b271a4be5008e98b34907933937ff115 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC @@ -0,0 +1 @@ +# CONFIG_USING_FPU_IN_KERNEL_NONATOMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER new file mode 100644 index 0000000000000000000000000000000000000000..93fe2251b4ac3d27a4e16ff2f2cc02a1a3dc3855 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER @@ -0,0 +1 @@ +CONFIG_UV_MMTIMER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS new file mode 100644 index 0000000000000000000000000000000000000000..d5ea1fa7ee09749006a4248e80833e4ff3af4708 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS @@ -0,0 +1 @@ +# CONFIG_UV_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 new file mode 100644 index 0000000000000000000000000000000000000000..256fb80de2f79f6922843e75dc5f8435a6a12cfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 @@ -0,0 +1 @@ +# CONFIG_VCNL3020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 new file mode 100644 index 0000000000000000000000000000000000000000..ae803eceddf07f914362fec3778d9a807eb6862f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 @@ -0,0 +1 @@ +# CONFIG_VCNL4000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 new file mode 100644 index 0000000000000000000000000000000000000000..2fbd58f6d8ed18cda01e7cd2314842b8b19b7112 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 @@ -0,0 +1 @@ +# CONFIG_VCNL4035 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 new file mode 100644 index 0000000000000000000000000000000000000000..a78163437edf65c5953178ea6097a37853039f20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 @@ -0,0 +1 @@ +# CONFIG_VEML6030 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 new file mode 100644 index 0000000000000000000000000000000000000000..ad005705fa546ac1e480eac7e3404d77d38c9fbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 @@ -0,0 +1 @@ +# CONFIG_VEML6070 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC new file mode 100644 index 0000000000000000000000000000000000000000..33803bea02ee348304f44de6cbd414c4fca2078f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC @@ -0,0 +1 @@ +# CONFIG_VF610_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC new file mode 100644 index 0000000000000000000000000000000000000000..556c9934d3ebdb6b4b156898fbcd066bfa4ca182 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC @@ -0,0 +1 @@ +# CONFIG_VF610_DAC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV new file mode 100644 index 0000000000000000000000000000000000000000..f6c5ea150d6b35e8b28a87ce8a3e88e69ac38a59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV @@ -0,0 +1 @@ +# CONFIG_VFIO_DEVICE_CDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA new file mode 100644 index 0000000000000000000000000000000000000000..047bcb8d655c50c5c34526364b8e73ef2ce68717 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA @@ -0,0 +1 @@ +# CONFIG_VFIO_PCI_VGA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT new file mode 100644 index 0000000000000000000000000000000000000000..cf0dc5e89921b8ad8a583264f031fc63c7885aed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT @@ -0,0 +1 @@ +CONFIG_VIA_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC new file mode 100644 index 0000000000000000000000000000000000000000..6427af502ff258e6cee2bfe2630aeb752c7e2204 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC @@ -0,0 +1 @@ +# CONFIG_VIPERBOARD_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI new file mode 100644 index 0000000000000000000000000000000000000000..54a643a6503f5aa8876506b151c6c29a60b7e276 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI @@ -0,0 +1 @@ +# CONFIG_VIRT_WIFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C new file mode 100644 index 0000000000000000000000000000000000000000..2ee7f58bb9125fb0df0157f5af7c78e274f4a553 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C @@ -0,0 +1 @@ +# CONFIG_VL53L0X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 new file mode 100644 index 0000000000000000000000000000000000000000..47fbc80f53c94c07d3fb8ba6325140943cf01fb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 @@ -0,0 +1 @@ +# CONFIG_VL6180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN new file mode 100644 index 0000000000000000000000000000000000000000..f522eff5bcda6c8ccecf4916bd71ac7e3a27c0bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN @@ -0,0 +1 @@ +CONFIG_VMAP_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID new file mode 100644 index 0000000000000000000000000000000000000000..4811b5ce5b72fee2bbe6b8a3dbf450484df6de09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID @@ -0,0 +1 @@ +CONFIG_VMGENID=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X new file mode 100644 index 0000000000000000000000000000000000000000..3c56d573f03708c4a30082b2a08da61aa9f20830 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X @@ -0,0 +1 @@ +# CONFIG_VZ89X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT new file mode 100644 index 0000000000000000000000000000000000000000..f2cd258ebe03ab78175f98992fa2ddcfd9fbaf84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT @@ -0,0 +1 @@ +CONFIG_W83627HF_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT new file mode 100644 index 0000000000000000000000000000000000000000..f72b7e392a340900e5cc7add5c9df7100a1e3baf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT @@ -0,0 +1 @@ +CONFIG_W83877F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT new file mode 100644 index 0000000000000000000000000000000000000000..77f20d78f4d9ca8191b78ec41c510e3979b03b23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT @@ -0,0 +1 @@ +CONFIG_W83977F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT new file mode 100644 index 0000000000000000000000000000000000000000..40857148cc4674977faa1efd9ed98b42c082b125 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT @@ -0,0 +1 @@ +# CONFIG_WAFER_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP new file mode 100644 index 0000000000000000000000000000000000000000..1151440d7d750f594c6e154b9a9245ba46942597 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_WANT_DEV_COREDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX new file mode 100644 index 0000000000000000000000000000000000000000..f6863da140891279b2a440e424d8f462bffdbfa3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX @@ -0,0 +1 @@ +# CONFIG_WCN36XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX new file mode 100644 index 0000000000000000000000000000000000000000..aa136a90c6a29c03a30b8b6b42cf2d4fe0bec12d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX @@ -0,0 +1 @@ +# CONFIG_WFX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 new file mode 100644 index 0000000000000000000000000000000000000000..e15bf8c174888ba3b9b3433a259255d4c273b69f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 @@ -0,0 +1 @@ +# CONFIG_WIL6210 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO new file mode 100644 index 0000000000000000000000000000000000000000..603383d16f392ff514faaecbf71a98461be1f330 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO @@ -0,0 +1 @@ +# CONFIG_WILC1000_SDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI new file mode 100644 index 0000000000000000000000000000000000000000..a12849b374c4eb0a748089e2cb4a468524d3f26f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI @@ -0,0 +1 @@ +# CONFIG_WILC1000_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 new file mode 100644 index 0000000000000000000000000000000000000000..61e16e73f6fb8593ccdf1dc78b5ceef6cebbf792 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 @@ -0,0 +1 @@ +# CONFIG_WINBOND_840 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS new file mode 100644 index 0000000000000000000000000000000000000000..2ad330a0001e100ed5a9d611fd50ad4b2f36f506 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS @@ -0,0 +1 @@ +# CONFIG_WINMATE_FM07_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY new file mode 100644 index 0000000000000000000000000000000000000000..8b379c4a93e6857b76490ca65da53ff9c6481d45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY @@ -0,0 +1 @@ +# CONFIG_WIRELESS_HOTKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN new file mode 100644 index 0000000000000000000000000000000000000000..acb27493069398f7ff41ce7e13ab4be374c19c53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN @@ -0,0 +1 @@ +CONFIG_WLAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK new file mode 100644 index 0000000000000000000000000000000000000000..b38ec2938d7100cd928ae52a60087ac6b64004be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ADMTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH new file mode 100644 index 0000000000000000000000000000000000000000..bbd0da798dc6dfde1929104101177593cc3e42a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_ATH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL new file mode 100644 index 0000000000000000000000000000000000000000..0cf5b4233396015a6f0da579bed9eda70ce834f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ATMEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM new file mode 100644 index 0000000000000000000000000000000000000000..24736aa895fb41853db85a1dd6a37468b1ae6630 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_BROADCOM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO new file mode 100644 index 0000000000000000000000000000000000000000..19bbb0848582da488bd46cfe87b03657a2a4bf11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_CISCO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL new file mode 100644 index 0000000000000000000000000000000000000000..80de8dade29e2f41ae8d2a9b10239498f8e8bc0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_INTEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL new file mode 100644 index 0000000000000000000000000000000000000000..842e3e9cc744acf63516963e1327772c65f00e88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_INTERSIL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL new file mode 100644 index 0000000000000000000000000000000000000000..46fdde2171df22cb9c40a19d0a295a9441c811c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MARVELL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK new file mode 100644 index 0000000000000000000000000000000000000000..47b49d2cbca910e2eb598cc6881f5e28984fb654 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MEDIATEK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP new file mode 100644 index 0000000000000000000000000000000000000000..48f5fabaecfb8c512ed310ac2e89bef43ba3db97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MICROCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI new file mode 100644 index 0000000000000000000000000000000000000000..2261b86f00582b902e3a64cd516650bb15f94211 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_PURELIFI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA new file mode 100644 index 0000000000000000000000000000000000000000..94a9969d70e71297029923cfca98a659cc405078 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_QUANTENNA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK new file mode 100644 index 0000000000000000000000000000000000000000..604b617c52cef9746e0a34c66ea1fbcadeb36b42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_RALINK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK new file mode 100644 index 0000000000000000000000000000000000000000..2f41028dd08777dff90523bcbfcee08cdcff40f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_REALTEK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI new file mode 100644 index 0000000000000000000000000000000000000000..255ed59d805c74fcb67f53a182ed0adb5b212c3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_RSI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS new file mode 100644 index 0000000000000000000000000000000000000000..d381a7c18b034129871bb6709ba85cfbc5270e1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_SILABS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST new file mode 100644 index 0000000000000000000000000000000000000000..39df8e212faa2bf7657474bb4a6baabe22f5e1b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI new file mode 100644 index 0000000000000000000000000000000000000000..68c9bcef3452976adb3dc7ef2fedec4d7823712b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS new file mode 100644 index 0000000000000000000000000000000000000000..416b6599496fc1881d802006194b930d0c6f728f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ZYDAS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF new file mode 100644 index 0000000000000000000000000000000000000000..61dcf543be3ae7d9439b32c3b91c8654b473e703 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF @@ -0,0 +1 @@ +CONFIG_WMI_BMOF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB new file mode 100644 index 0000000000000000000000000000000000000000..db5a9b22be53392f4c834869f372b954344acf52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB @@ -0,0 +1 @@ +CONFIG_X86_ACPI_CPUFREQ_CPB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT new file mode 100644 index 0000000000000000000000000000000000000000..27a13b9c51803c9f1db3e854c759a1cb2c7f8914 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT @@ -0,0 +1 @@ +# CONFIG_X86_AMD_PSTATE_UT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET new file mode 100644 index 0000000000000000000000000000000000000000..6b7f2db8e7afcf6423165ee91a179885a48939d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET @@ -0,0 +1 @@ +CONFIG_X86_CET=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV new file mode 100644 index 0000000000000000000000000000000000000000..ee7029780b0bbc92b426051ebee89b98d1054329 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV @@ -0,0 +1 @@ +CONFIG_X86_CMOV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR new file mode 100644 index 0000000000000000000000000000000000000000..15c1dd3e760b03f9fb82ac018d513ca2452d0c70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR @@ -0,0 +1 @@ +CONFIG_X86_DEBUGCTLMSR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR new file mode 100644 index 0000000000000000000000000000000000000000..cf8d705e4d5040e0016e4d1e35563f30c1e86377 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR @@ -0,0 +1 @@ +CONFIG_X86_HV_CALLBACK_VECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..76cfa7a5ea7fd304a3d5ba02af2dfc9c67314149 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT @@ -0,0 +1 @@ +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT new file mode 100644 index 0000000000000000000000000000000000000000..27ee8728baeac4c45b0dca7bb203787f75810015 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT @@ -0,0 +1 @@ +CONFIG_X86_L1_CACHE_SHIFT=6 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD new file mode 100644 index 0000000000000000000000000000000000000000..4df8f99e88268cb3eebe66e2408ed7f0133f9003 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD @@ -0,0 +1 @@ +CONFIG_X86_MCE_THRESHOLD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT new file mode 100644 index 0000000000000000000000000000000000000000..40d268ca36c161f963d6f454683c8d8ea390b6a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_X86_MEM_ENCRYPT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY new file mode 100644 index 0000000000000000000000000000000000000000..b8b342994073cc930a99af459cbca19afe1bb73c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY @@ -0,0 +1 @@ +CONFIG_X86_MINIMUM_CPU_FAMILY=64 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS new file mode 100644 index 0000000000000000000000000000000000000000..8d53b5ff155c6469e27d46f16eab3b600b96fc7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS @@ -0,0 +1 @@ +CONFIG_X86_NEED_RELOCS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL new file mode 100644 index 0000000000000000000000000000000000000000..8b178c89ebad9165e78359faad6723a1ea35fff1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL @@ -0,0 +1 @@ +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP new file mode 100644 index 0000000000000000000000000000000000000000..5230804c7fec53d9eec0bef60b6d045e88820151 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP @@ -0,0 +1 @@ +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY new file mode 100644 index 0000000000000000000000000000000000000000..b93c1350fb30ff544781ade05a76eb657b5544bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY @@ -0,0 +1 @@ +CONFIG_X86_PMEM_LEGACY=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE new file mode 100644 index 0000000000000000000000000000000000000000..f55010b529b1c17f4bdcc663518b9957d35326c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE @@ -0,0 +1 @@ +CONFIG_X86_PMEM_LEGACY_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 new file mode 100644 index 0000000000000000000000000000000000000000..07b56a8248331f4481a47e015b97ddacedfedca1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 @@ -0,0 +1 @@ +CONFIG_X86_POWERNOW_K8=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO new file mode 100644 index 0000000000000000000000000000000000000000..ea1e56da22973608fd0739259ae1ec19637dbec8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO @@ -0,0 +1 @@ +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB new file mode 100644 index 0000000000000000000000000000000000000000..15c7b319a56665b2522ab14cea3d53e402f71ab7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB @@ -0,0 +1 @@ +CONFIG_X86_SPEEDSTEP_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR new file mode 100644 index 0000000000000000000000000000000000000000..df08747dda7146883805a6390773dc27c39b1308 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR @@ -0,0 +1 @@ +CONFIG_X86_THERMAL_VECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK new file mode 100644 index 0000000000000000000000000000000000000000..5f911c56286b886f51fc39b826ad94b0ef01e782 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK @@ -0,0 +1 @@ +# CONFIG_X86_USER_SHADOW_STACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES new file mode 100644 index 0000000000000000000000000000000000000000..124b55ac91881282bb92df68bdb72db815189edc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES @@ -0,0 +1 @@ +CONFIG_X86_VMX_FEATURE_NAMES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI new file mode 100644 index 0000000000000000000000000000000000000000..d06854274864b98af0d7ab5ffae3c681e72a841f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI @@ -0,0 +1 @@ +# CONFIG_X86_X32_ABI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 new file mode 100644 index 0000000000000000000000000000000000000000..76a420beefe98576f9aa46aa2196b02609541753 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 @@ -0,0 +1 @@ +# CONFIG_X9250 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS new file mode 100644 index 0000000000000000000000000000000000000000..2a9db2ee750b29e65cb80e3dece94de5d25689e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS @@ -0,0 +1 @@ +CONFIG_XENFS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI new file mode 100644 index 0000000000000000000000000000000000000000..0912f9c171843764d17aa1c4f76da8a21e331f57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI @@ -0,0 +1 @@ +CONFIG_XEN_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE new file mode 100644 index 0000000000000000000000000000000000000000..1401d69f15f332cddc98a9ecf7ee47ee9a2e90db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE @@ -0,0 +1 @@ +CONFIG_XEN_AUTO_XLATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND new file mode 100644 index 0000000000000000000000000000000000000000..838367289aaded8fce1811c260b0245ba0944d9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND @@ -0,0 +1 @@ +# CONFIG_XEN_BACKEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON new file mode 100644 index 0000000000000000000000000000000000000000..1210e9672ad718ff87bafdb25ec223d94eb2d88f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON @@ -0,0 +1 @@ +# CONFIG_XEN_BALLOON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..5b1ec5f1480ffda3f894c62244ce907069d5ba61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_BLKDEV_FRONTEND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS new file mode 100644 index 0000000000000000000000000000000000000000..280bc95c8f956495e3104d769cb5a85f9cc82556 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS @@ -0,0 +1 @@ +CONFIG_XEN_COMPAT_XENFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS new file mode 100644 index 0000000000000000000000000000000000000000..4346991da02fc999c6cc801f67a9ec7b890df2ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS @@ -0,0 +1 @@ +# CONFIG_XEN_DEBUG_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN new file mode 100644 index 0000000000000000000000000000000000000000..a198d3b62d56dc3b50584bc7d016630284a72fd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN @@ -0,0 +1 @@ +CONFIG_XEN_DEV_EVTCHN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI new file mode 100644 index 0000000000000000000000000000000000000000..21644fd8e20ee55de74eadb872ba41b7beaa865c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI @@ -0,0 +1 @@ +CONFIG_XEN_EFI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..dfa002e49beea50cbcfd197b50049ebf643e0fe4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_FBDEV_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV new file mode 100644 index 0000000000000000000000000000000000000000..e0e2127a67f659d3fc619e43f79cf77b9bec38d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV @@ -0,0 +1 @@ +# CONFIG_XEN_GNTDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..b3d34b07e12e9e17018f477df1f8f6ae66906ba3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_GRANT_DEV_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..1fd236c73558e97406c10dfdc1645c3247a2a8d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_GRANT_DMA_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD new file mode 100644 index 0000000000000000000000000000000000000000..a471c328b311f1e08f788c48bde9ba103483221a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD @@ -0,0 +1 @@ +CONFIG_XEN_PRIVCMD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV new file mode 100644 index 0000000000000000000000000000000000000000..4743cb300f35f4f256dd44aae411dcc683be16cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV @@ -0,0 +1 @@ +# CONFIG_XEN_PV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..057a413181332d43f66a01d18410ec5e5dd55747 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_PVCALLS_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH new file mode 100644 index 0000000000000000000000000000000000000000..2435b87927fc3f06310c7034f4f66a0da4c4e8d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH @@ -0,0 +1 @@ +# CONFIG_XEN_PVH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST new file mode 100644 index 0000000000000000000000000000000000000000..d971c335c658ecea15cc31d703043702174da813 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM_GUEST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP new file mode 100644 index 0000000000000000000000000000000000000000..25314166b736e22009bf97c840adbe409b05b436 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM_SMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE new file mode 100644 index 0000000000000000000000000000000000000000..b42ea51375f78a103e137a888796fe92c260bac8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE @@ -0,0 +1 @@ +CONFIG_XEN_SAVE_RESTORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..5cc303352c12d45c629213034fa919968489c83f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_SCSI_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR new file mode 100644 index 0000000000000000000000000000000000000000..a8ff9c8e6bc69054c0e72aaeeb63e40c93dce230 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR @@ -0,0 +1 @@ +CONFIG_XEN_SYS_HYPERVISOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC new file mode 100644 index 0000000000000000000000000000000000000000..8ab61b832addf2c640f8971c4f7aba2022c8623c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_UNPOPULATED_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO new file mode 100644 index 0000000000000000000000000000000000000000..87b3667c853c2b6a0424c162b3b52f150d9ea720 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO @@ -0,0 +1 @@ +# CONFIG_XEN_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT new file mode 100644 index 0000000000000000000000000000000000000000..333aa5528e7d1d0fe1d90e7ab9380f0bf5c6de34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT @@ -0,0 +1 @@ +CONFIG_XEN_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND new file mode 100644 index 0000000000000000000000000000000000000000..b25d45529bf194f8e245c6b90f6490c748462247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_XENBUS_FRONTEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI new file mode 100644 index 0000000000000000000000000000000000000000..6ecb6ffa9522bbed9285dea69612b398364d7a75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI @@ -0,0 +1 @@ +# CONFIG_XIAOMI_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC new file mode 100644 index 0000000000000000000000000000000000000000..c9b967ad14c63df3d5290266097373b23d87eef9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC @@ -0,0 +1 @@ +# CONFIG_XILINX_XADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 new file mode 100644 index 0000000000000000000000000000000000000000..d68189626fc6a119a5ca2b66a8c5d1a8a292e9a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 @@ -0,0 +1 @@ +# CONFIG_YAMAHA_YAS530 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK new file mode 100644 index 0000000000000000000000000000000000000000..010ddb94e84588758b4995c35977eb23e3206c84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK @@ -0,0 +1 @@ +# CONFIG_YOGABOOK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 new file mode 100644 index 0000000000000000000000000000000000000000..be622878758dba90edad0cff067ecdc9b7eee101 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 @@ -0,0 +1 @@ +# CONFIG_ZOPT2201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 new file mode 100644 index 0000000000000000000000000000000000000000..a2a86f07a8ffdd1b6143ae619e5f0f86d4d4fcf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 @@ -0,0 +1 @@ +# CONFIG_ZPA2326 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME new file mode 100644 index 0000000000000000000000000000000000000000..f7f4c4c575469acd4629ef3be2253d43881755df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME @@ -0,0 +1 @@ +CONFIG_ZRAM_TRACK_ENTRY_ACTIME=y diff --git a/anolis/configs/Makefile b/anolis/configs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a6af2befecd4868ffeb4667d9199d09d6a58e4c7 --- /dev/null +++ b/anolis/configs/Makefile @@ -0,0 +1,47 @@ +include Makefile.configs + +ifeq ($(MAKELEVEL),0) + $(error The config related target cannot be executed directly from the shell.) +endif + +dist-defconfig: + @sh scripts/generate_configs.sh generic-${DIST_ARCH} + +dist-debug-defconfig: + @sh scripts/generate_configs.sh debug-${DIST_ARCH} + +dist-configs: + @sh scripts/generate_configs.sh + +dist-configs-update: + @sh scripts/update_configs.sh + +dist-configs-move: + @sh scripts/move_configs.sh + +dist-configs-import: + @DO_IMPORT_CONFIGS=Y sh scripts/update_configs.sh + +dist-configs-export: + @sh scripts/export_configs.sh + +dist-configs-check: dist-configs + @sh examination/configs-check.sh + +dist-configs-modify: + @sh scripts/modify_config.sh + +dist-configs-help: + @echo '' + @echo '--------------------------------' + @echo 'Generate and update kernel configs' + @echo ' dist-defconfig: - generate anolis default config file of current ARCH, output to $${DIST_OUTPUT}' + @echo ' dist-debug-defconfig: - generate anolis debug default config file of current ARCH, output to $${DIST_OUTPUT}' + @echo ' dist-configs: - generate anolis config files to all arch, output to $${DIST_OUTPUT}' + @echo ' dist-configs-update: - refresh configs' + @echo ' dist-configs-modify: - modify the specified kconfig and refresh configs' + @echo ' dist-configs-move: - adjust the level of kconfig' + @echo ' dist-configs-import: - import legacy kconfig files, and split them into independent small files' + @echo ' dist-configs-export: - export all configs into a xlsx file' + +export diff --git a/anolis/configs/Makefile.ANCK b/anolis/configs/Makefile.ANCK new file mode 100644 index 0000000000000000000000000000000000000000..07a1ca0a88e303e165884bfaddd661e3f0fcd61f --- /dev/null +++ b/anolis/configs/Makefile.ANCK @@ -0,0 +1,3 @@ +DIST_CONFIG_LAYOUTS = anolis/configs/scripts/kconfig_layout +DIST_CONFIG_ACTIONS_REFRESH = anolis/configs/scripts/kconfig_import +DIST_CONFIG_ACTIONS_IMPORTS = anolis/configs/scripts/kconfig_import \ No newline at end of file diff --git a/anolis/configs/Makefile.configs b/anolis/configs/Makefile.configs new file mode 100644 index 0000000000000000000000000000000000000000..38b25c1332e1327f757f5143b998ee74851ad8c9 --- /dev/null +++ b/anolis/configs/Makefile.configs @@ -0,0 +1,11 @@ +# This file is for kernel kconfig baseline + +# the config levels, do not touch it, please +DIST_LEVELS= L0-MANDATORY L1-RECOMMEND L2-OPTIONAL UNKNOWN + +# the default kernel name, the ANCK downstream distributions could override it +# eg: +# DIST_CONFIG_KERNEL_NAME ?= FOO +DIST_CONFIG_KERNEL_NAME ?= ANCK + +include Makefile.$(DIST_CONFIG_KERNEL_NAME) diff --git a/anolis/configs/README.zh.md b/anolis/configs/README.zh.md new file mode 100644 index 0000000000000000000000000000000000000000..265c05ce3baf61a8800585e7490ca9b466961ebc --- /dev/null +++ b/anolis/configs/README.zh.md @@ -0,0 +1,143 @@ +# ANCK config说明 +# 为什么会有 ANCK kconfig 基线 +## 管理较乱 +原来的 ANCK kconfig 管理较乱,出现了许多问题。比如: + +1. 以为打开了,实际没打开的情况 +比如 [CONFIG_NUMA_AWARE_SPINLOCKS](https://gitee.com/anolis/cloud-kernel/pulls/535) 和 [CONFIG_CK_KABI_SIZE_ALIGN_CHECKS](https://gitee.com/anolis/cloud-kernel/pulls/1627),虽然修改了 anolis_defconfig 文件,但是由于依赖不满足,实际上未成功开启。 + +2. 在 Kconfig 中新增了 kconfig ,但是没有更新 anolis_defconfig 文件 +比如 [CONFIG_VTOA](https://gitee.com/anolis/cloud-kernel/pulls/1749) 和 [CONFIG_SCHED_ACPU](https://gitee.com/anolis/cloud-kernel/pulls/2260) + +3. kconfig依赖错误 +比如 [CONFIG_YITIAN_CPER_RAWDATA](https://gitee.com/anolis/cloud-kernel/pulls/2046),仅与 arm64 arch 相关,但出现在了 x86 的 anolis_defconfig 中。 + +4. 重要config被错误修改,导致严重的性能问题 +比如 [CONFIG_ARM64_TLB_RANGE 和 CONFIG_ARM64_PTR_AUTH](https://gitee.com/anolis/cloud-kernel/pulls/1960) + +## 变更难以追溯 +之前许多 kconfig 的变更没有及时记录下来,导致在决定 kconfig 是否可以修改时需要仔细斟酌,没有可以参考的历史信息。 +通过 git 回溯信息也有困难,因为不断地刷新 anolis_defconfig 文件,导致很多 kconfig 的位置不断变化,导致需要 git blame 多次才能找到原始的 commit 。 + +## 兼容性 +ANCK 需要将重要 kconfig 高亮出来,作为给 ANCK 下游衍生版本的参考,以保证下游衍生版本与 ANCK 的兼容性。 + +## 逐渐复杂的 kconfig 文件 +随着龙蜥社区的发展,ANCK 的 kconfig 配置文件,在原来仅支持 x86 和 arm64 的 defconfig 和 debug-defconfig 共计 4 个 kconfig 文件的基础上,增加了对龙芯、申威架构的支持,对 核代码覆盖率 gcov 的支持,以及对 arm64 64k 的支持。 +当 kconfig 配置文件增多以后,很容易出现调整某个文件的配置项后,忘记调整其他文件的情况。 +比如该问题:更新config配置时,未同时更新 anolis_defconfig 和 anolis_debug-defconfig +比如:[CONFIG_KVM_INTEL_TDX](https://gitee.com/anolis/cloud-kernel/pulls/818) 和 [CONFIG_AMD_PTDMA](https://gitee.com/anolis/cloud-kernel/pulls/288) + +# kconfig 组织结构说明 +## 背景 +一个具体的 kconfig 配置项,由以下要素决定: +1. dist +产品。表示该 kconfig 是关于哪个产品的配置。比如说 CONFIG_ABC,可能关于 ANCK 的配置是 y,而关于 ANCK 的下游某个衍生版的配置为 m。 +2. level +层级。表示该 kconfig 对当前产品的重要程度,ANCK 划分了 3 个层级(L0/L1/L2),具体内容见后文。 +3. variant +场景。表示该 kconfig 是关于哪个场景的配置,比如是生产环境(generic)、测试环境(debug)、还是覆盖率测试(gcov)等。 +4. arch +架构。表示该 kconfig 是关于当前产品某个场景下,某个具体架构的配置。比如 x86、 arm64、loongarch 等。 +5. name +名称。该 kconfig 的名字,比如 CONFIG_EXT4_FS。 +6. value +值。该 kconfig 的值,比如 `CONFIG_EXT4_FS=m`。 + +举例: +假设当前有内核版本 ANCK,以及它的下游衍生版 FOO,以及配置项 CONFIG_EXT4_FS。 +在不同的产品、场景、架构下对该值的配置可能完全不同,重要程度也不同。 +比如 ANCK 需要在 x86 上要求 CONFIG_EXT4_FS 为y,而在 arm64 是需要它 m 即可,且该选项非常重要,不应该被随意变更。 +以及在衍生版 FOO 上,该文件系统并不重要,因此应该置为 `not set`。 +那么我们可以这么表示: +> Conf[(name="CONFIG_EXT4_FS", dist="ANCK", level="L0", variant="generic", arch="x86")] = "y" +> Conf[(name="CONFIG_EXT4_FS", dist="ANCK", level="L0", variant="generic", arch="arm64")] = "m" +> Conf[(name="CONFIG_EXT4_FS", dist="FOO", level="L2", variant="generic", arch="default")] = "n" + +## 产品说明 +1. ANCK (Anolis Cloud Kernel) +这是 Anolis 的内核,Anolis7、Anolis8、Anolis23 会搭载不同版本的 ANCK 内核。 +2. FOO +您可以在 ANCK 现有的代码和 kconfig 基础上进行修改和构建,从而形成一个 ANCK 的下游衍生版本,比如说新的版本名为 FOO。 + +## 分层说明 +ANCK 按照重要程度,将所有的 kconfig 划分为 3 个层级,以便标记重要的 config,为开发者修改 kconfig 时提供参考。 +### L0-MANDATORY +最核心的 kconfig,这类 kconfig 赋予内核最基础的产品化能力,保证内核能作为一个基本的服务器操作系统进行使用。 +这类 kconfig 的变更需要十分谨慎,建议 ANCK 下游衍生版不要去 override 此类配置。 + +入选条件: +1. 有国家标准/行业标准背书。 +2. 对兼容性有着重要影响的 kconfig。具体而言,可分为以下几类: +- 具有不证自明的基础能力支持,比如CONFIG_NET、CONFIG_PCI。 +- 具有广泛的通用使用场景的 kconfig。如CONFIG_NFS_FS,绝大多数服务器操作系统都支持了 nfs。 +- 被主流开源软件所广泛使用/依赖的kconfig。如CONFIG_USERFAULTFD,qemu 热迁移需要该特性。 +3. 有现实案例背书,或者有用户反馈错误配置会导致严重的功能/性能问题的 kconfig。 + +### L1-RECOMMEND +针对特定场景有着重要意义的 kconfig,此类 kconfig 的错误配置将导致该场景出现严重的产品化问题。 +Anolis 会站在云场景和服务器场景的角度配置 L1 层的 kconfig,下游衍生版按照可根据实际业务需求对其酌情 override。 + +入选条件: +1. 有重要的特定业务场景背书。注意是特定场景,如果是通用场景,请放L0。 +2. 在特定场景中因为错误配置,引发过一些事故的。 + +### L2-OPTIONAL +不被关注的 kconfig。 + +此层级 kconfig 其中分为两类: +1. 可以被手动修改的 kconfig。 +此类 kconfig 当前已配置,但是无法确定其对现有场景是否有重要意义,出于兼容性考虑,保持其不变,但将其放置到 L2。 +ANCK 认为它们的变更不会对现有使用场景造成严重影响,可以被任意打开或者关闭,比如 CONFIG_CAN、CONFIG_WIRELESS。 +下游衍生版如有需要,可随意覆盖。 +若后续发现该层级中某些 kconfig 对于某些场景非常重要,可提 PR 申请将其调整至 L1 或 L0。 + +2. 无法被手动修改的 kconfig。 +某些 kconfig 无法被手动调整,只能通过调整其他 kconfig 时通过依赖关系自动 select。关注此类 kconfig 的意义不大,因此将其放置到 L2 中。 +典型的 kconfig,如:CONFIG_ARCH_WANT_XXX,CONFIG_HAVE_XXX + +入选条件: +1. 当前已经配置了,但是说不清具体使用场景和使用价值的 kconfig +2. 不能被手动配置,只能被自动 select 的 kconfig + +### UNKNOWN +尚不明确具体层级的 kconfig。 +不建议将 kconfig 长期归类在此层级中。 + +## 场景说明 +ANCK 典型的场景包括: +1. generic。 +对应生产环境,属于正式上线使用的场景。 +2. debug。 +对应测试环境,在版本发布阶段的测试中使用,通常来说会打开 KASAN、KMEMLEAK、LOCKDEP 之类的检测项,以及时发现内核的相关问题。 +3. gcov。 +对应覆盖率测试的环境,在版本发布阶段的测试中使用。 +4. 64k。 +使用 arm64 64k 页表的内核。 + +## 架构说明 +ANCK 典型的架构包括: +1. x86 +2. arm64 +3. loongarch (龙芯) +4. sw_64 (申威) + +# kconfig 目录结构说明 +kconfig 的目录组织结构,是按照上文提到的几要素来的。具体来说, +kconfig 目录位于 $(srctree)/anolis/configs 中,共分为以下几类: +- scripts/ ,用于存放于 kconfig 有关的脚本文件,开发者通常无需关注 +- metadata/ ,用于存放 kconfig 的元数据信息。 + - metadata/description,关于 kconfig 的描述信息 + - metadata/changelog, 关于 kconfig 的变更记录 +- L*/ ,以分层方式存放的 kconfig 配置,用于生产环境 + - L*/{x86,arm64,...}, 按架构存放 kconfig 的配置 +- custom-overrides/,用于存放除生产环境以外的其他场景的差异化 kconfig + - custom-overrides/{debug, gcov},与 debug/gcov 有关的,与生产环境有差异的 kconfig + - custom-overrides/{debug, gcov}/{default, x86, arm64}.config, 与 debug/gcov 有关的,与生产环境有差异的,通用/x86 特有/arm64 特有的 kconfig +- OVERRIDE/ ,为 ANCK 衍生版提供的,用于存放覆盖 ANCK 的基础配置的目录 + - OVERRIDE/FOO,衍生版 FOO 相对于 ANCK 的差异化配置 + - OVERRIDE/FOO/L*/,衍生版 FOO 以分层方式存放 kconfig 的配置 + ... + +## 如何更新 kconfig +请参考 How-To-Modify-Kconfig.zh.md diff --git a/anolis/configs/custom-overrides/64k/arm64.config b/anolis/configs/custom-overrides/64k/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..e38bc0ebca185382e2c93e7cc3b6203cd2107629 --- /dev/null +++ b/anolis/configs/custom-overrides/64k/arm64.config @@ -0,0 +1,2 @@ +# CONFIG_ARM64_4K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y \ No newline at end of file diff --git a/anolis/configs/custom-overrides/debug/arm64.config b/anolis/configs/custom-overrides/debug/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..e006806c71cf0454cb92429bf7029524764f0096 --- /dev/null +++ b/anolis/configs/custom-overrides/debug/arm64.config @@ -0,0 +1,32 @@ +# CONFIG_VMAP_STACK is not set +CONFIG_EROFS_FS_DEBUG=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_PERCPU_TEST=m +CONFIG_PM_TEST_SUSPEND=y +CONFIG_SPI_DEBUG=y +CONFIG_WQ_WATCHDOG=y +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_KASAN_SHADOW_OFFSET=0xdfffa00000000000 +# CONFIG_KASAN_SW_TAGS is not set +CONFIG_MAILBOX_TEST=m +# CONFIG_UBSAN_UNREACHABLE is not set diff --git a/anolis/configs/custom-overrides/debug/default.config b/anolis/configs/custom-overrides/debug/default.config new file mode 100644 index 0000000000000000000000000000000000000000..d45df92ffa6223942831a95678c6506669b40a93 --- /dev/null +++ b/anolis/configs/custom-overrides/debug/default.config @@ -0,0 +1,111 @@ +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_CONFIGFS=m +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_CGROUP_DEBUG=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_DEBUG_PAGE_REF=y +CONFIG_DEBUG_SG=y +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y +CONFIG_DRM_AMDGPU_GART_DEBUGFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EXT4_DEBUG=y +CONFIG_FAIL_IO_TIMEOUT=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_MMC_REQUEST=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAILSLAB=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FSCACHE_OBJECT_LIST=y +CONFIG_FTRACE_RECORD_RECURSION=y +CONFIG_FTRACE_RECORD_RECURSION_SIZE=128 +CONFIG_GENERIC_IRQ_DEBUGFS=y +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_INLINE=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_LOCKDEP=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_PAGE_EXTENSION=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +CONFIG_QUOTA_DEBUG=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_RING_BUFFER_RECORD_RECURSION=y +CONFIG_UBSAN=y +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_ENUM=y +CONFIG_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN_SHIFT=y +CONFIG_XFS_WARN=y +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_CC_HAS_UBSAN_BOUNDS=y +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CONSTRUCTORS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=32000 +# CONFIG_DEBUG_KMEMLEAK_TEST is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set +CONFIG_DEBUG_LOCK_ALLOC=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +# CONFIG_FAIL_FUNCTION is not set +# CONFIG_FAIL_FUTEX is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +CONFIG_KASAN_STACK=y +CONFIG_LATENCYTOP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_NFP_DEBUG=y +CONFIG_NOUVEAU_DEBUG_MMU=y +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_STACKDEPOT=y +CONFIG_TASKS_RCU=y +# CONFIG_TEST_KASAN_MODULE is not set +CONFIG_TEST_LIST_SORT=y +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_TEST_UBSAN is not set +CONFIG_TORTURE_TEST=m +CONFIG_TRACE_IRQFLAGS=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_ARRAY_BOUNDS=y +# CONFIG_UBSAN_DIV_ZERO is not set +# CONFIG_UBSAN_TRAP is not set +CONFIG_UNINLINE_SPIN_UNLOCK=y diff --git a/anolis/configs/custom-overrides/debug/x86.config b/anolis/configs/custom-overrides/debug/x86.config new file mode 100644 index 0000000000000000000000000000000000000000..be7f712a99e425e14d2d30dfd0ab28cc257cb2c4 --- /dev/null +++ b/anolis/configs/custom-overrides/debug/x86.config @@ -0,0 +1,46 @@ +CONFIG_DNS_RESOLVER=y +CONFIG_AMD_PTDMA=y +CONFIG_BFQ_CGROUP_DEBUG=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_INTEL_IOMMU_DEBUGFS=y +CONFIG_IOMMU_DEBUGFS=y +CONFIG_IP_VS_DEBUG=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KCSAN is not set +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_DEBUG_FPU=y +# CONFIG_AMD_IOMMU_DEBUGFS is not set +CONFIG_ATH10K_DEBUG=y +CONFIG_ATH10K_TRACING=y +CONFIG_ATH_DEBUG=y +# CONFIG_ATH_TRACEPOINTS is not set +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CIFS=y +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_LIB_DES=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_VMACACHE is not set +CONFIG_DMAR_PERF=y +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_LOCK_STAT=y +CONFIG_MAC80211_MESSAGE_TRACING=y +CONFIG_MMIOTRACE=y +# CONFIG_MMIOTRACE_TEST is not set +CONFIG_RANDOM32_SELFTEST=y +CONFIG_RC_LOOPBACK=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +CONFIG_TRACE_IRQFLAGS_NMI=y diff --git a/anolis/configs/custom-overrides/gcov/default.config b/anolis/configs/custom-overrides/gcov/default.config new file mode 100644 index 0000000000000000000000000000000000000000..6481357789fe428c9a8575a70130a20398036f27 --- /dev/null +++ b/anolis/configs/custom-overrides/gcov/default.config @@ -0,0 +1,2 @@ +CONFIG_GCOV_KERNEL=y +CONFIG_GCOV_PROFILE_ALL=y \ No newline at end of file diff --git a/anolis/configs/custom-overrides/kvm_modulize/arm64.config b/anolis/configs/custom-overrides/kvm_modulize/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..c221222ab1c9c641e8484e218d0728e7cb5c80da --- /dev/null +++ b/anolis/configs/custom-overrides/kvm_modulize/arm64.config @@ -0,0 +1 @@ +CONFIG_KVM=m \ No newline at end of file diff --git a/anolis/configs/examination/EXTRA/arm64.config b/anolis/configs/examination/EXTRA/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..58855622c402d213b0deb68edc62a679998f8129 --- /dev/null +++ b/anolis/configs/examination/EXTRA/arm64.config @@ -0,0 +1,2 @@ +## (ANBZ#10820) +CONFIG_FCOE=m diff --git a/anolis/configs/examination/EXTRA/x86.config b/anolis/configs/examination/EXTRA/x86.config new file mode 100644 index 0000000000000000000000000000000000000000..9a309ef17a84f7dca8ba4344c8f494659dd696fe --- /dev/null +++ b/anolis/configs/examination/EXTRA/x86.config @@ -0,0 +1,21 @@ +## These CXL & DEV_DAX* related kconfigs must be m to allow out-of-tree modules override them. +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_REGION=y +CONFIG_CXL_PMU=m +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_KMEM=m + +## (ANBZ#10825) +# CONFIG_RAS_CEC is not set + +## (ANBZ#10820) +CONFIG_FCOE=m + +## (ANBZ#11914) +CONFIG_RETPOLINE=y diff --git a/anolis/configs/examination/L0-MANDATORY/arm64.config b/anolis/configs/examination/L0-MANDATORY/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..0f5c593d2b4d94072c53332716f49d01000041e5 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/arm64.config @@ -0,0 +1,400 @@ +# UNLIMITED CONFIG_LSM +# CHOICE CONFIG_NODES_SHIFT 6/8/10 +# RANGE CONFIG_NR_CPUS 1024,8192 +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ARCH_HISI=y +CONFIG_ARM64=y +CONFIG_ARM64_CNP=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_EPAN=y +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_PSEUDO_NMI=y +CONFIG_ARM64_RAS_EXTN=y +# UNLIMITED CONFIG_ARM64_SME +## (https://lore.kernel.org/all/20241106164220.2789279-1-mark.rutland@arm.com/) +## Due to the issues identified with the implementation of CONFIG_ARM64_SME, the upstream has +## currently marked the SME Kconfig as BROKEN. This action is taken with the intention of +## re-enabling the Kconfig once these issues are resolved in the future. Despite this temporary +## change, the SME Kconfig remains a Level 0 (L0) configuration that we need to monitor. +## For now, we will disregard checks related to this Kconfig. Once the relevant patches have +## been merged and the issues are resolved, we will resume the checks and re-enable the +## Kconfig accordingly. +CONFIG_ARM64_SVE=y +CONFIG_ARM_CCN=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_PMU=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_SPE_PMU=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=m +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_PM=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DRM_HISI_HIBMC=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EDAC=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FS_DAX=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIO_HISI=m +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HISILICON_LPC=y +CONFIG_HISI_PCIE_PMU=m +CONFIG_HISI_PMU=m +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=y +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC_FILE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_KVM_MMIO=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARAVIRT=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_HISI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=y +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_SATA_AHCI=m +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_THERMAL=y +CONFIG_THREAD_INFO_IN_TASK=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VMAP_STACK=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=m +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/loongarch.config b/anolis/configs/examination/L0-MANDATORY/loongarch.config new file mode 100644 index 0000000000000000000000000000000000000000..11899dd44cad6cca61dd21de8b565a2c53eac2e4 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/loongarch.config @@ -0,0 +1,346 @@ +# UNLIMITED CONFIG_LSM +## CONFIG_NFS_FSCACHE=y +CONFIG_NODES_SHIFT=6 +CONFIG_NR_CPUS=256 +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=y +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_PM=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_KVM_MMIO=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOONGARCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=y +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=m +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_AHCI=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=y +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_THERMAL=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UNWINDER_PROLOGUE=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=y +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/sw_64.config b/anolis/configs/examination/L0-MANDATORY/sw_64.config new file mode 100644 index 0000000000000000000000000000000000000000..62c44debb1917be1c72ae63de623437f5911d179 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/sw_64.config @@ -0,0 +1,343 @@ +# UNLIMITED CONFIG_LSM +CONFIG_NODES_SHIFT=7 +CONFIG_NR_CPUS=512 +CONFIG SW64=y +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=y +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CRASH_CORE=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEEP_MEMCPY=y +CONFIG_DEEP_MEMSET=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EFI=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOONGARCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NONCACHE_PAGE=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=y +CONFIG_NVME_TARGET=y +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=m +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_AHCI=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SW64_CHIP3=y +CONFIG_SW64_CPUAUTOPLUG=y +CONFIG_SW64_CPUFREQ=y +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_THERMAL=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=y +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=y +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/x86.config b/anolis/configs/examination/L0-MANDATORY/x86.config new file mode 100644 index 0000000000000000000000000000000000000000..19ca9ca965b1db1e31e30226d50b58d6a57566b5 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/x86.config @@ -0,0 +1,395 @@ +# UNLIMITED CONFIG_LSM +# CHOICE CONFIG_NODES_SHIFT 6/8/10 +# RANGE CONFIG_NR_CPUS 1024,8192 + +## CONFIG_SPECULATION_MITIGATIONS has been renamed to CONFIG_CPU_MITIGATIONS on linux stable linux-6.6.y, +## so make these two configs exclusive. +# EXCLUSIVE y CONFIG_SPECULATION_MITIGATIONS CONFIG_CPU_MITIGATIONS + +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_AMD_MEM_ENCRYPT=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=m +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EDAC=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FS_DAX=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HPET_TIMER=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_I2C=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_INTEGRITY=y +CONFIG_INTEL_IOMMU=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=y +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_REMAP=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC_FILE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=m +CONFIG_KVM_AMD=m +CONFIG_KVM_GUEST=y +CONFIG_KVM_INTEL=m +CONFIG_KVM_MMIO=y +CONFIG_LIVEPATCH=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MICROCODE=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=y +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_SATA_AHCI=m +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_THERMAL=y +CONFIG_THREAD_INFO_IN_TASK=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VGA_CONSOLE=y +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VMAP_STACK=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_X86=y +CONFIG_X86_64=y +CONFIG_X86_64_SMP=y +CONFIG_X86_CPUID=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MSR=y +CONFIG_X86_SGX=y +CONFIG_X86_TSC=y +CONFIG_X86_X2APIC=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=m +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L1-RECOMMEND/arm64.config b/anolis/configs/examination/L1-RECOMMEND/arm64.config new file mode 100644 index 0000000000000000000000000000000000000000..0d625ac05fd47cfc5f9f5a66a44f3893a8cd3ddd --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/arm64.config @@ -0,0 +1,140 @@ +# UNLIMITED CONFIG_BUILD_SALT +# CHOICE CONFIG_HZ 100/250/1000 +# CONFIG_ARM64_64K_PAGES is not set +# EXCLUSIVE y CONFIG_ARM64_4K_PAGES CONFIG_ARM64_64K_PAGES +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=4 +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_HMAT=y +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARM64_AMU_EXTN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_SMMU_V3_SVA=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=m +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CORESIGHT=m +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CUSE=m +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_DAMON=y +CONFIG_DRM=m +CONFIG_DRM_PHYTIUM=m +CONFIG_E1000=m +CONFIG_EXT3_FS=m +CONFIG_EXT4_FS=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_GENERIC_PHY=y +CONFIG_HINIC=m +CONFIG_HISI_THERMAL=m +CONFIG_HNS3=m +CONFIG_HNS=m +CONFIG_I2C_HISI=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_INET_MPTCP_DIAG=m +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP08=y +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=m +CONFIG_KSM=y +CONFIG_KUNPENG_HCCS=m +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NO_HZ_FULL=y +CONFIG_NTB=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RESET_HISI=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPI_HISI_SFC_V3XX=m +CONFIG_SPI_MASTER=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_UACCE=m +CONFIG_UNWINDER_ORC=y +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VFIO_PLATFORM=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/loongarch.config b/anolis/configs/examination/L1-RECOMMEND/loongarch.config new file mode 100644 index 0000000000000000000000000000000000000000..09acb6eaaf436fa6fb4806bffdd76799402dfcc0 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/loongarch.config @@ -0,0 +1,103 @@ +CONFIG_ARCH_FORCE_MAX_ORDER=11 +# UNLIMITED CONFIG_BUILD_SALT +CONFIG_EXT3_FS=y +CONFIG_HZ=250 +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=3 +# UNLIMITED CONFIG_RANDOMIZE_BASE +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +# EXCLUSIVE y CONFIG_16KB_2LEVEL CONFIG_16KB_3LEVEL CONFIG_4KB_3LEVEL CONFIG_4KB_4LEVEL CONFIG_64KB_2LEVEL CONFIG_64KB_3LEVEL +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=y +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=m +CONFIG_CUSE=m +CONFIG_DRM=y +CONFIG_DRM_LOONGSON=y +CONFIG_DWMAC_LOONGSON=m +CONFIG_E1000=m +CONFIG_EXT4_FS=y +CONFIG_FB_LS2K500=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_I2C_LS2X=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=y +CONFIG_KSM=y +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_RTC_INTF_DEV=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=m +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/sw_64.config b/anolis/configs/examination/L1-RECOMMEND/sw_64.config new file mode 100644 index 0000000000000000000000000000000000000000..09b5b503d843f654704743de37f4080c4ac86064 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/sw_64.config @@ -0,0 +1,86 @@ +# UNLIMITED CONFIG_BUILD_SALT +CONFIG_HZ=250 +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=4 +# UNLIMITED CONFIG_RANDOMIZE_BASE +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=m +CONFIG_CUSE=m +CONFIG_DRM=y +CONFIG_E1000=m +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=y +CONFIG_KSM=y +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=y +CONFIG_USB_XHCI_HCD=y +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/x86.config b/anolis/configs/examination/L1-RECOMMEND/x86.config new file mode 100644 index 0000000000000000000000000000000000000000..1bfc8b392b014e986eba7279cf418f402be58e61 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/x86.config @@ -0,0 +1,139 @@ +# UNLIMITED CONFIG_BUILD_SALT +# CHOICE CONFIG_HZ 100/250/1000 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_PGTABLE_LEVELS=5 +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_HMAT=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=m +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_COMPAT=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m +CONFIG_CUSE=m +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_DAMON=y +CONFIG_DRM=m +CONFIG_E1000=m +CONFIG_EXT3_FS=m +CONFIG_EXT4_FS=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_HINIC=m +CONFIG_HW_RANDOM_ZHAOXIN=m +CONFIG_I2C_ZHAOXIN=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_INET_MPTCP_DIAG=m +CONFIG_INTEL_IDLE=y +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IFS=m +CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_TPMI=m +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=m +CONFIG_KSM=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NO_HZ_FULL=y +CONFIG_NTB=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_PINCTRL_KX7000=m +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_ZHAOXIN=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_UACCE=m +CONFIG_UNWINDER_ORC=y +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_MDEV=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_X86_CMOV=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_X86_MPPARSE=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/README.md b/anolis/configs/examination/README.md new file mode 100644 index 0000000000000000000000000000000000000000..78480608a03ffc78be51d7251cbc87a99eaefe93 --- /dev/null +++ b/anolis/configs/examination/README.md @@ -0,0 +1,33 @@ +# 背景 +本文档用于存放 kconfig 的检查规则,以便检查 kconfig 的是否有违背规则。 + +# 目录组织 +- L0-MANDATORY/,用于存放**必须**遵守的 kconfig 规则,如果违反则视为失败 +- L1-RECOMMEND/,用于存放**推荐**遵守的 kconfig 规则,如果违反则会告警 +- {L0-MANDATORY, L1-RECOMMEND}/{x86/arm64/loongarch/sw_64}.config,对应 x86、arm64、龙芯、申威平台的 kconfig 规则 + +# 规则文件说明 +文件的每一行存放一个规则,具体如下: +1. `CONFIG_FOO=value` +CONFIG_FOO 必须出现在 config 文件中,且值必须为 value + +2. `# CONFIG_FOO is not set` +CONFIG_FOO 必须出现在 config 文件中,其值必须为 not set + +3. `# UNLIMITED CONFIG_FOO` +对 CONFIG_FOO 不做要求 + +4. `# CHOICE CONFIG_FOO a/b/c` +CONFIG_FOO 必须出现在 config 文件中,值必须在 a/b/c 中选择一个 + +5. `# RANGE CONFIG_FOO a,b` +CONFIG_FOO 必须出现在 config 文件中,值为整型,且必须在 [a, b] 这个范围内 + +6. `# EXCLUSIVE value CONFIG_FOO1 [CONFIG_FOO2 ...]` +CONFIG_FOO1, CONFIG_FOO2 等列表中,有且只有一个能出现在 config 文件中,且值必须为 value + +7. `## xxxx` +此行为注释 + +# 使用方式 +在 clone 该仓库后,执行 `cd anolis; make dist-configs-check` 命令即可。 diff --git a/anolis/configs/examination/anolis_kconfig_check.py b/anolis/configs/examination/anolis_kconfig_check.py new file mode 100644 index 0000000000000000000000000000000000000000..de85fc6592870c86c01ce39c8d8dd2fcdfffdcdf --- /dev/null +++ b/anolis/configs/examination/anolis_kconfig_check.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# The core script for ANCK kconfig checking. +# It is not recommended to call directly. +# +# Copyright (C) 2024 Qiao Ma + +import argparse, re +from typing import List, Type, Dict, Tuple +from enum import Enum + +def die(args: str): + print(args) + exit(1) + +def default_args_func(args): + pass + +class Config(): + name: str + value: str + + def __init__(self, name, value) -> None: + self.name = name + self.value = value + + @staticmethod + def from_text(line: str) -> Type["Config"] : + RE_CONFIG_SET = r'^(CONFIG_\w+)=(.*)$' + RE_CONFIG_NOT_SET = r'^# (CONFIG_\w+) is not set$' + + if re.match(RE_CONFIG_SET, line): + obj = re.match(RE_CONFIG_SET, line) + return Config(name=obj.group(1), value=obj.group(2)) + elif re.match(RE_CONFIG_NOT_SET, line): + obj = re.match(RE_CONFIG_NOT_SET, line) + return Config(name=obj.group(1), value="n") + return None + +class ConfigList(): + configs: Dict[str, Config] + + def __init__(self) -> None: + self.configs = {} + + @staticmethod + def from_file(file: str) -> Type["ConfigList"]: + confs = ConfigList() + with open(file) as f: + for line in f.readlines(): + conf = Config.from_text(line) + if conf is None: + continue + confs.configs[conf.name] = conf + return confs + + def get(self, name) -> Type["Config"]: + return self.configs.get(name, None) + +ResultKind = Enum("ResultKind", ("SUCCESS", "MISS", "WRONG_VALUE", "NOT_IN_CHOICE", "NOT_IN_RANGE", "EXCLUSIVE_ERROR")) +RuleLevel = Enum("RuleLevel", ("L0_MANDATORY", "L1_RECOMMEND")) + +class CheckResult(): + name: str + kind: ResultKind + level: RuleLevel + value: str + + def __init__(self, level: RuleLevel, kind: ResultKind, name: str, text: str) -> None: + self.level = level + self.kind = kind + self.name = name + self.text = text + + def is_fatal_error(self): + return self.kind != ResultKind.SUCCESS and self.level == RuleLevel.L0_MANDATORY + + def __str__(self) -> str: + if self.kind == ResultKind.SUCCESS: + return "" + if self.level == RuleLevel.L0_MANDATORY: + return f"ERROR: {self.text}\n" + return f"WARNING: {self.text}\n" + + @staticmethod + def success(): + return CheckResult(RuleLevel.L0_MANDATORY, ResultKind.SUCCESS, "", "") + + @staticmethod + def miss(level: RuleLevel, name: str): + return CheckResult(level, ResultKind.MISS, name, f"missed: {name}") + + @staticmethod + def group_miss(level: RuleLevel, confs: List[str]): + conf_list = " ".join(confs) + return CheckResult(level, ResultKind.MISS, "", f"missed: none of follow configs exist {conf_list}") + + @staticmethod + def wrong_value(level: RuleLevel, name: str, expected: str, real: str): + return CheckResult(level, ResultKind.WRONG_VALUE, name, + f"wrong_value: {name}, expected: {expected}, real: {real}") + + @staticmethod + def not_in_choice(level: RuleLevel, name: str, real_value: str, values: List[str]): + str_values = ",".join(values) + return CheckResult(level, ResultKind.NOT_IN_CHOICE, name, + f"not_in_choice: {name} {real_value} not in [{str_values}]") + + @staticmethod + def not_in_range(level: RuleLevel, name: str, real_value: int, start: int, end: int): + return CheckResult(level, ResultKind.NOT_IN_RANGE, name, f"not_in_range: {name} {real_value} not in range [{start}, {end}]") + + @staticmethod + def exlusive_error(level: RuleLevel, confs: List[str]): + str_confs = ",".join(confs) + return CheckResult(level, ResultKind.EXCLUSIVE_ERROR, "", f"exclusive error: expected only one appears, but follow configs appears: {str_confs}") + +class Rule(): + subclasses = [] + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + Rule.subclasses.append(cls) + + @staticmethod + def try_parse(line: str, level: RuleLevel): + raise NotImplementedError + + def check(self, line: str, level: RuleLevel): + raise NotImplementedError + + @staticmethod + def parse(line: str, level: RuleLevel): + for subclass in Rule.subclasses: + result = subclass.try_parse(line, level) + if result is not None: + return result + die(f"cannot parse : {line}") + +class RuleList(): + rules: List[Rule] + + def __init__(self): + self.rules = [] + + @staticmethod + def from_file(path: str, level: RuleLevel) -> Type["RuleList"]: + rl = RuleList() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if line == "" or line.startswith("##"): + continue + rule = Rule.parse(line, level) + rl.rules.append(rule) + return rl + + def check(self, confs: ConfigList) -> List[CheckResult]: + results : List[CheckResult] = [] + for rule in self.rules: + res = rule.check(confs) + results.append(res) + return results + + def merge(self, rhs: ConfigList): + self.rules.extend(rhs.rules) + +class ValueRule(Rule): + conf: Config + level: RuleLevel + + @staticmethod + def try_parse(line: str, level: RuleLevel): + rule = ValueRule() + conf = Config.from_text(line) + if conf is None: + return None + rule.conf = conf + rule.level = level + return rule + + def check(self, confs: ConfigList): + name = self.conf.name + conf = confs.get(name) + if conf is None: + return CheckResult.miss(self.level, self.conf.name) + if conf.value != self.conf.value: + return CheckResult.wrong_value(self.level, name, self.conf.value, conf.value) + return CheckResult.success() + +class UnlimitedRule(Rule): + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_UNLIMITED = r'^# UNLIMITED CONFIG_\w+$' + if not re.match(RE_CONF_UNLIMITED, line): + return None + return UnlimitedRule() + + def check(self, confs: ConfigList): + return CheckResult.success() + +class ChoiceRule(Rule): + name: str + values: List[str] + + def __init__(self, level, name, values) -> None: + self.level = level + self.name = name + self.values = values + + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_CHOICE = r'^#\s*CHOICE\s+(CONFIG_\w+)\s+([\w,\/]+)$' + obj = re.match(RE_CONF_CHOICE, line) + if obj is None: + return None + name = obj.group(1) + values = obj.group(2) + return ChoiceRule(level, name, values.split("/")) + + def check(self, confs: ConfigList): + conf = confs.get(self.name) + if conf is None: + return CheckResult.miss(self.level, self.name) + if conf.value not in self.values: + return CheckResult.not_in_choice(self.level, self.name, conf.value, self.values) + return CheckResult.success() + +class RangeRule(Rule): + level: RuleLevel + name: str + start: int + end: int + + def __init__(self, level: RuleLevel, name: str, start: int, end: int) -> None: + self.level = level + self.name = name + self.start = start + self.end = end + + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_RANGE = r'^#\s*RANGE\s+(CONFIG_\w+)\s+(\d+)\,(\d+)$' + obj = re.match(RE_CONF_RANGE, line) + if obj is None: + return None + return RangeRule(level, obj.group(1), int(obj.group(2)), int(obj.group(3))) + + def check(self, confs: ConfigList): + conf = confs.get(self.name) + if conf is None: + return CheckResult.miss(self.level, self.name) + val = int(conf.value) + if val <= self.end and val >= self.start: + return CheckResult.success() + return CheckResult.not_in_range(self.level, self.name, val, self.start, self.end) + +class ExclusiveRule(Rule): + level: RuleLevel + value: str + confs: List[str] + + def __init__(self, level: RuleLevel, value: str, confs: List[str]) -> None: + self.level = level + self.value = value + self.confs = confs + + @staticmethod + def try_parse(line: str, level: RuleLevel): + """# EXCLUSIVE value CONFIG_XXX [CONFIG_XXX ...]""" + RE_CONF_RANGE = r'^#\s*EXCLUSIVE\s+(\w+)\s+(.*)$' + obj = re.match(RE_CONF_RANGE, line) + if obj is None: + return None + value = obj.group(1) + confs = obj.group(2).split() + if len(confs) == 0: + return None + return ExclusiveRule(level, value, confs) + + def check(self, confs: ConfigList): + appears : List[Config] = [] + for name in self.confs: + conf = confs.get(name) + if conf is not None and conf.value != 'n': + appears.append(conf) + if len(appears) == 0: + return CheckResult.group_miss(self.level, appears) + if len(appears) != 1: + return CheckResult.exlusive_error(self.level, [x.name for x in appears]) + if appears[0].value != self.value: + return CheckResult.wrong_value(self.level, appears[0].name, self.value, appears[0].value) + return CheckResult.success() + +def level_of(l: str) -> RuleLevel: + if l == "L0-MANDATORY": + return RuleLevel.L0_MANDATORY + elif l == "L1-RECOMMEND": + return RuleLevel.L1_RECOMMEND + die(f"unknown level {l}") + +def do_check(args): + confs = ConfigList.from_file(args.config) + rules = RuleList() + + if len(args.rules) != len(args.level): + die("the num of level and rules do not match") + + for i, rule_file in enumerate(args.rules): + rules.merge(RuleList.from_file(rule_file, level_of(args.level[i]))) + results = rules.check(confs) + + fatal_error = False + result_text = "" + for r in results: + result_text += str(r) + fatal_error = fatal_error or r.is_fatal_error() + + if result_text == "": + result_text = "PASS\n" + print(result_text) + exit(fatal_error) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='check configs') + parser.set_defaults(func=default_args_func) + subparsers = parser.add_subparsers() + + checker = subparsers.add_parser("check") + checker.add_argument("--rules", action='append', default=[], help="the kconfig checking rule files") + checker.add_argument("--level", action='append', default=[], help="the kconfig checking rule files") + checker.add_argument("config", help="the config files to be checked") + checker.set_defaults(func=do_check) + + args = parser.parse_args() + args.func(args) diff --git a/anolis/configs/examination/configs-check.sh b/anolis/configs/examination/configs-check.sh new file mode 100644 index 0000000000000000000000000000000000000000..468b8a00aa278ea62863631590eb151f1fdf6a06 --- /dev/null +++ b/anolis/configs/examination/configs-check.sh @@ -0,0 +1,56 @@ +#! /bin/bash +# check kconfigs obey constraints or not. +# it is called from Makefile, do not run it directly. +# +# usage: +# - check for only one arch: +# ARCH=${arch} make dist-configs-check +# available archs are: x86, arm64, loongarch +# - check for all arch: +# make dist-configs-check + +SCRIPT_DIR=$(realpath $(dirname $0)) + +final_exit_status=0 + +function check_arch() { + local arch=$1 + + local opt="--rules ${SCRIPT_DIR}/L0-MANDATORY/${arch}.config + --level L0-MANDATORY + --rules ${SCRIPT_DIR}/L1-RECOMMEND/${arch}.config + --level L1-RECOMMEND " + + if [ -f ${SCRIPT_DIR}/EXTRA/${arch}.config ]; then + opt="${opt} --rules ${SCRIPT_DIR}/EXTRA/${arch}.config + --level L0-MANDATORY " + fi + + if [ -f ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig ]; then + opt="${opt} ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig" + else + opt="${opt} ${DIST_OUTPUT}/kernel-ANCK-generic-${arch}.config" + fi + + echo "* Checking configs for arch: $arch" + python3 ${SCRIPT_DIR}/anolis_kconfig_check.py check ${opt} + + local ret=$? + if [ $final_exit_status -eq 0 ]; then + final_exit_status=$ret + fi +} + +# arch sw_64 is not available now +arch_list=("x86" "arm64" "loongarch") + +if [ -n "${ARCH}" ]; then + arch_list=(${ARCH}) +fi + +for arch in ${arch_list[@]} +do + check_arch $arch +done + +exit $final_exit_status diff --git a/anolis/configs/metadata/changelog/CONFIG_LSM b/anolis/configs/metadata/changelog/CONFIG_LSM new file mode 100644 index 0000000000000000000000000000000000000000..a7feeea4ae7d33e1dafd767cf224b476b2a95580 --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_LSM @@ -0,0 +1,6 @@ +Enabling lsm=bpf by default would cause a performance regression +for lmbench/tcp and lmbench/syscall by 5%~11%. + +Actually not all or most users want lsm=bpf and those who want +could enable it by cmdline. Thus here we will not enable lsm=bpf +by default. diff --git a/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY new file mode 100644 index 0000000000000000000000000000000000000000..680ba446eb338ab3b84d7c025bbd575b45c97507 --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY @@ -0,0 +1,3 @@ +Considering compatibility issues, we have introduced dynamic preempt and set the Kconfig of preempt +in ANCK to Voluntary, and added 'preempt=none' in bootcmdline to ensure that the preemption mode +remains consistent with previous settings. diff --git a/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV b/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV new file mode 100644 index 0000000000000000000000000000000000000000..71f781a9b647fa73219adc3896a2904736a55408 --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV @@ -0,0 +1,3 @@ +Backport patches from openEuler to support virt platform device direct inject lpi irq(ANBZ: #9398). + +It's required by internal users(Aone: 56141911). diff --git a/anolis/configs/scripts/anolis_kconfig.py b/anolis/configs/scripts/anolis_kconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..5a19d9df594580b78badbfed027f05ef61354907 --- /dev/null +++ b/anolis/configs/scripts/anolis_kconfig.py @@ -0,0 +1,858 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# The core script for ANCK kconfig baseline +# It is not recommended to call directly. +# +# Copyright (C) 2023 Qiao Ma + +import argparse, re, os, glob, shutil, copy +from typing import List, Dict, Type, Callable, Tuple +import json +from collections import Counter +import fnmatch +import functools + +def die(*args, **kwargs): + print(*args, **kwargs) + exit(1) + +class Rules(): + @staticmethod + def levels() -> List[str]: + return ["L0-MANDATORY", "L1-RECOMMEND", "L2-OPTIONAL", "UNKNOWN"] + + @staticmethod + def base_dist(): + return "ANCK" + + @staticmethod + def as_config_text(name: str, value: str) -> str: + if value is None or value == "n": + return f"# {name} is not set\n" + else: + return f"{name}={value}\n" + +class PathIterContext(): + dist: str + level: str + arch: str + subarch: str + name: str + path: str + data: any + + def __init__(self, data: any, dist: str, level: str, arch: str, subarch: str, name: str, path: str) -> None: + self.data = data + self.dist = dist + self.level = level + self.arch = arch + self.subarch = subarch + self.name = name + self.path = path + +class PathManager(): + @staticmethod + def dists(top_dir: str, dists: List[str] = None) -> List[str]: + dist_list = [Rules.base_dist()] + override_dir = os.path.join(top_dir, "OVERRIDE") + if os.path.exists(override_dir): + dist_list.extend(os.listdir(override_dir)) + + if dists is None: + return dist_list + return list(set(dist_list).intersection(set(dists))) + + @staticmethod + def dist_to_path(top_dir: str, dist: str) -> str: + if dist == Rules.base_dist(): + return top_dir + return os.path.join(top_dir, "OVERRIDE", dist) + + @staticmethod + def levels(dist_dir: str, levels: List[str] = None) -> List[str]: + all_levels = [] + for d in os.listdir(dist_dir): + if not os.path.isdir(os.path.join(dist_dir, d)): + continue + if not re.match('^L[0-9].*|UNKNOWN', d): + continue + all_levels.append(d) + + if levels is None: + return all_levels + return list(set(all_levels).intersection(set(levels))) + + @staticmethod + def archs(variant_dir: str, archs: List[str] = None) -> List[str]: + all_archs = os.listdir(variant_dir) + return all_archs if archs is None else list(set(all_archs).intersection(set(archs))) + + @staticmethod + def __for_each_arch(level_dir: str, data: any, func: Callable[[PathIterContext], None], dist: str, level: str, archs: List[str] = None, subarchs: List[str] = None): + for arch_dir in os.listdir(level_dir): + if "-" in arch_dir: + arch, subarch = arch_dir.split("-", maxsplit=1) + else: + arch = arch_dir + subarch = None + if archs is not None and arch not in archs: + continue + if subarchs is not None and subarch is not None and subarch not in subarchs: + continue + full_arch_dir = os.path.join(level_dir, arch_dir) + for conf in os.listdir(full_arch_dir): + path = os.path.join(full_arch_dir, conf) + context = PathIterContext(data, dist, level, arch, subarch, conf, path) + func(context) + + @staticmethod + def for_each(top_dir: str, data: any, func: Callable[[PathIterContext], None], dists: List[str] = None, levels: List[str] = None, archs: List[str] = None, subarchs: List[str] = None): + for dist in PathManager.dists(top_dir, dists): + dist_dir = PathManager.dist_to_path(top_dir, dist) + for level in PathManager.levels(dist_dir, levels): + level_dir = os.path.join(dist_dir, level) + if level != "UNKNOWN": + PathManager.__for_each_arch(level_dir, data, func, dist, level, archs, subarchs) + else: + for conf in os.listdir(level_dir): + PathManager.__for_each_arch(os.path.join(level_dir, conf), data, func, dist, level, archs, subarchs) + + @staticmethod + def as_level_dir(top_dir: str, dist: str, level: str): + path = PathManager.dist_to_path(top_dir, dist) + path = os.path.join(path, level) + return path + + @staticmethod + def as_path(top_dir: str, dist: str, level: str, arch: str, subarch: str, name: str): + path = PathManager.as_level_dir(top_dir, dist, level) + if level == "UNKNOWN": + path = os.path.join(path, name) + if subarch is None: + path = os.path.join(path, arch, name) + else: + path = os.path.join(path, f"{arch}-{subarch}", name) + return path + + +def default_args_func(args): + pass + +class LevelInfo(): + info: Dict[str, str] + + def __init__(self) -> None: + self.info = {} + + def get(self, conf: str) -> str: + return self.info.get(conf, "UNKNOWN") + + def merge_with_base(self, base: Type["LevelInfo"]): + if base is None: + return + for name, level in base.info.items(): + if name not in self.info: + self.info[name] = level + + @staticmethod + def __collect_info(ctx: PathIterContext): + level_info: Dict[str, str] = ctx.data + level_info[ctx.name] = ctx.level + + @staticmethod + def build(path: str, dist: str) -> Type["LevelInfo"]: + info = LevelInfo() + PathManager.for_each(path, info.info, LevelInfo.__collect_info, dists=[dist]) + return info + + @staticmethod + def load(file: str): + info = LevelInfo() + with open(file) as f: + info.info = json.loads(f.read()) + return info + +class Config(): + name: str + value: str + arch: str + subarch: str + level: str + dist: str + + def __init__(self, name: str, value: str, dist: str = None, level: str = "UNKNOWN", arch: str = None, subarch: str = None) -> None: + self.name = name + self.value = value + self.dist = dist + self.level = level + self.arch = arch + self.subarch = subarch + + @staticmethod + def from_text(line: str, dist: str, arch: str, subarch: str) -> Type["Config"] : + RE_CONFIG_SET = r'^(CONFIG_\w+)=(.*)$' + RE_CONFIG_NOT_SET = r'^# (CONFIG_\w+) is not set$' + + if re.match(RE_CONFIG_SET, line): + obj = re.match(RE_CONFIG_SET, line) + return Config(name=obj.group(1), value=obj.group(2), dist=dist, arch=arch, subarch=subarch) + elif re.match(RE_CONFIG_NOT_SET, line): + obj = re.match(RE_CONFIG_NOT_SET, line) + return Config(name=obj.group(1), value="n", dist=dist, arch=arch, subarch=subarch) + return None + + def as_text(self) -> str: + return Rules.as_config_text(self.name, self.value) + + def as_path(self, top_dir: str) -> str: + return PathManager.as_path(top_dir, self.dist, self.level, self.arch, self.subarch, self.name) + + def as_file(self, top_dir: str): + text = self.as_text() + path = self.as_path(top_dir) + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, "w") as f: + f.write(text) + +class ConfigList(): + arch: str + dist: str + subarch: str + configs: Dict[str, Config] + + def __init__(self, dist: str, arch: str, subarch: str = None) -> None: + self.dist = dist + self.arch = arch + self.subarch = subarch + self.configs = {} + + def lists(self) -> List[Config]: + return list(self.configs.values()) + + def diff_to_base(self, base: Type["ConfigList"], level_info: LevelInfo): + same_configs = [] + for name, conf in self.configs.items(): + if name not in base.configs: + continue + if conf.value != base.configs[name].value: + continue + same_configs.append(name) + + for name in base.configs: + if name not in self.configs: + self.configs[name] = Config(name, value=None, dist=self.dist, arch=self.arch, level=level_info.get(name)) + + for name in same_configs: + del self.configs[name] + + def merge_with_base(self, base: Type["ConfigList"]): + if base is None: + return + for name, conf in base.configs.items(): + if name not in self.configs: + self.configs[name] = conf + + def dump_as_file(self, top_dir: str): + for conf in self.configs.values(): + conf.as_file(top_dir) + + def as_text(self): + text = "" + for conf in self.configs.values(): + text = text + conf.as_text() + return text + + @staticmethod + def from_path(path: str, dist: str, arch: str, subarch: str = None, level_info: LevelInfo = None, level: str = None) -> Type["ConfigList"]: + if level_info is not None and level is not None: + die("the argument level_info and level cannot be passed together") + if level_info is None and level is None: + level = "UNKNOWN" + + conflist = ConfigList(dist, arch, subarch) + with open(path) as f: + for line in f.readlines(): + conf = Config.from_text(line, dist, arch, subarch) + if conf is None: + continue + if level_info is not None: + conf.level = level_info.get(conf.name) + else: + conf.level = level + conflist.configs[conf.name] = conf + return conflist + +class LevelCollector(): + @staticmethod + def do_collect(args): + info = LevelInfo.build(args.top_dir, args.dist) + if args.base is not None: + base_info = None + for base in args.base: + cur_base = LevelInfo.build(args.top_dir, base) + cur_base.merge_with_base(base_info) + base_info = cur_base + info.merge_with_base(base_info) + print(json.dumps(info.info, ensure_ascii=False, indent=2)) + +class Importer(): + @staticmethod + def do_import(args): + level_info = LevelInfo.load(args.level_info) + conflist = ConfigList.from_path(path=args.config, dist=args.dist, arch=args.arch, subarch=args.subarch, level_info=level_info) + conflist.dump_as_file(args.top_dir) + +class Generator(): + @staticmethod + def collect_config(ctx: PathIterContext): + conflist : ConfigList = ctx.data + cur_conf = ConfigList.from_path(path=ctx.path, dist=ctx.dist, arch=ctx.arch, subarch=ctx.subarch) + conflist.merge_with_base(cur_conf) + + @staticmethod + def do_generate(args): + dist = args.dist + archdir = args.archdir + if "-" in archdir: + arch, subarch = archdir.split("-", maxsplit=1) + else: + arch, subarch = archdir, None + conflist = ConfigList(dist, arch, subarch) + subarchs = None if subarch is None else [subarch] + PathManager.for_each(args.top_dir, conflist, Generator.collect_config, dists=[dist], archs=[arch], subarchs=subarchs) + print(conflist.as_text()) + +class Merger(): + @staticmethod + def do_merge(args): + conflist = None + for file in args.file: + cur_conflist = ConfigList.from_path(file, dist="", arch="") + cur_conflist.merge_with_base(conflist) + conflist = cur_conflist + + print(conflist.as_text()) + +class Collapser(): + # for configs, the keys are: conf_name, arch + configs: Dict[str, Dict[str, Config]] + archs: set + + def __init__(self) -> None: + self.configs = {} + self.archs = set() + + @staticmethod + def __do_collect_info(ctx: PathIterContext): + c: Collapser = ctx.data + configs: Dict[str, Dict[str, Config]] = c.configs + archs = c.archs + + full_arch = ctx.arch + if ctx.subarch is not None: + full_arch = f"{ctx.arch}-{ctx.subarch}" + archs.add(full_arch) + + conflist = ConfigList.from_path(path=ctx.path, dist=ctx.dist, arch=ctx.arch, subarch=ctx.subarch, level=ctx.level) + for conf in conflist.lists(): + if conf.name not in configs: + configs[conf.name] = {} + configs[conf.name][full_arch] = conf + + @staticmethod + def __collapse_one_config(arch_confs: Dict[str, Config], archs: set, top_dir: str): + # the default value is only depends on arch x86 and arm64. + # For example: + # 1. the configs "x86 y, arm64 y, sw_64 m/n" will be collpased to "default y, sw_64 m/n" + # 2. the configs "x86 y, arm64 y, sw_64 y" will be collpased to "default y" + # 3. the configs "x86 y, arm64 m, sw_64 y" will not be collpased + if "x86" not in arch_confs or "arm64" not in arch_confs: + return + if arch_confs["x86"].value != arch_confs["arm64"].value: + return + common_conf = copy.deepcopy(arch_confs["x86"]) + common_conf.arch = "default" + common_conf.subarch = None + + for arch in archs: + if arch in arch_confs: + conf = arch_confs[arch] + if conf.value == common_conf.value: + os.remove(conf.as_path(top_dir)) + else: + miss_conf = copy.deepcopy(common_conf) + miss_conf.arch = arch + miss_conf.subarch = None + miss_conf.value = "n" + miss_conf.as_file(top_dir) + common_conf.as_file(top_dir) + + @staticmethod + def do_collapse(args): + c = Collapser() + PathManager.for_each(args.top_dir, c, Collapser.__do_collect_info, dists=[args.dist]) + + for arch_confs in c.configs.values(): + Collapser.__collapse_one_config(arch_confs, c.archs, args.top_dir) + +class Striper(): + configs: Dict[str, List[str]] + file_list: List[str] + + def __init__(self, file_list: List[str]) -> None: + self.configs = {} + self.file_list = file_list + + for i, path in enumerate(file_list): + conflist = ConfigList.from_path(path, dist="", arch="") + for conf in conflist.lists(): + name = conf.name + if name not in self.configs: + self.configs[name] = [None]*i + self.configs[name].append(conf.value) + for conf_values in self.configs.values(): + if len(conf_values) != i+1: + conf_values.append(None) + + def strip(self, base: Type["Striper"]): + disappear_confs = [] + same_confs = [] + for name, conf_values in base.configs.items(): + if name not in self.configs: + disappear_confs.append(name) + continue + if conf_values == self.configs[name]: + same_confs.append(name) + + for name in same_confs: + del self.configs[name] + + num_files = len(self.file_list) + for name in disappear_confs: + self.configs[name] = [None]*num_files + + def override_files(self): + for i, path in enumerate(self.file_list): + with open(path, "w") as f: + for name, values in self.configs.items(): + f.write(Rules.as_config_text(name, values[i])) + + @staticmethod + def do_strip(args): + if len(args.base) != len(args.target): + die("the target config files do not match base") + base = Striper(args.base) + target = Striper(args.target) + target.strip(base) + target.override_files() + +class ImportOpTranslater(): + files: Dict[str, str] + files_info: Dict[Tuple[str, str, str, str], str] + level_info_path: str + input_dir: str + output_dir: str + src_root: str + + file_counter = 0 + + def __init__(self, input_dir: str, output_dir: str, src_root: str) -> None: + self.files = {} + self.files_info = {} + self.input_dir = input_dir + self.output_dir = output_dir + self.src_root = src_root + self.level_info_path = "" + + def __cmd(self, cmd: str): + return f"python3 {__file__} {cmd} " + + def __op_file(self, args: str): + # FILE dist arch variant file_path REFRESH/NOREFRESH + + # use file_counter to make file name unique + ImportOpTranslater.file_counter += 1 + dist, arch, subarch, path, refresh = args.split() + new_path = os.path.join(self.output_dir, f"{ImportOpTranslater.file_counter}-{os.path.basename(path)}") + if subarch != "null": + self.files[f"{dist}-{arch}-{subarch}"] = new_path + self.files_info[(dist, arch, subarch)] = new_path + else: + self.files[f"{dist}-{arch}"] = new_path + self.files_info[(dist, arch, None)] = new_path + cmd = f"cp {path} {new_path}\n" + if refresh == "REFRESH": + cmd += f"make KCONFIG_CONFIG={new_path} ARCH={arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"PAHOLE=scripts/dummy-tools/pahole " + cmd += f"-C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"rm -f {new_path}.old \n" + return cmd + + def __op_levelinfo(self, args: str): + #LEVELINFO target_dist [base_dist [base_dist ...]] + target_dist, base_dists = args.split(maxsplit=1) + cmd = self.__cmd("collect_level") + cmd += f"--dist {target_dist} --top_dir {self.input_dir} " + for base in base_dists.split(): + if base == "null": + continue + cmd += f"--base {base} " + self.level_info_path = os.path.join(self.output_dir, "level_info") + cmd += f"> {self.level_info_path}" + return cmd + + def __op_import(self, args: str): + # IMPORT file + file = args + subarch = None + dist, arch = file.split("-", maxsplit=1) + if "-" in arch: + arch, subarch = arch.split("-", maxsplit=1) + + cmd = self.__cmd("import") + cmd += f"--dist {dist} --arch {arch} " + if subarch is not None: + cmd += f"--subarch {subarch} " + cmd += f"--level_info {self.level_info_path} --top_dir {self.output_dir} " + cmd += f"{self.files[file]} " + return cmd + + def __op_collapse(self, args: str): + # COLLAPSE dist + dist = args + cmd = self.__cmd("collapse") + cmd += f"--dist {dist} --top_dir {self.output_dir}" + return cmd + + def __op_strip(self, args: str): + # STRIP target_dist base_dist + target_dist, base_dist = args.split() + copy_cmd = "" + cmd = self.__cmd("strip") + for (dist, arch, subarch), target_path in self.files_info.items(): + if dist != target_dist: + continue + try: + copy_cmd += f"cp {target_path} {target_path}.bak\n" + base_path = self.files_info[(base_dist, arch, subarch)] + except: + full_arch = arch + if subarch is not None: + full_arch = f"{arch}-{subarch}" + die(f"strip error. cannot find file {base_dist}-{full_arch} to match {target_dist}-{full_arch}") + cmd += f"--base {base_path} --target {target_path} " + return copy_cmd + cmd + + def __translate_one(self, op:str, args: str): + cmd = "" + if op == "FILE": + cmd = self.__op_file(args) + elif op == "LEVELINFO": + cmd = self.__op_levelinfo(args) + elif op == "IMPORT": + cmd = self.__op_import(args) + elif op == "COLLAPSE": + cmd = self.__op_collapse(args) + elif op == "STRIP": + cmd = self.__op_strip(args) + else: + die(f"unknown op {op}") + print(cmd) + + @staticmethod + def do_translate(args): + t = ImportOpTranslater(input_dir=args.input_dir, output_dir=args.output_dir, src_root=args.src_root) + with open(args.path) as f: + for i, line in enumerate(f.readlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + (op, action_args) = line.split(maxsplit=1) + try: + t.__translate_one(op, action_args) + except: + die(f"parse error in {args.path}:{i+1}\n> {line}") + +class KconfigLayoutEntry(): + name: str + dist: str + arch: str + subarch: str + base_dist: str + base_name: str + # (dist, variant, arch) + layout_list: List[Tuple[str, str, str]] + + def __init__(self, name: str, dist: str, arch: str, base_dist: str, base_name: str) -> None: + self.name = name + self.dist = dist + self.arch = arch + self.base_dist = base_dist + self.base_name = base_name + self.layout_list = [] + + @staticmethod + def from_text(line: str): + cur, arch, base, layouts = line.split() + dist, name = cur.split("/") + if base == "null": + base_dist = None + base_name = None + else: + base_dist, base_name = base.split("/") + entry = KconfigLayoutEntry(name, dist, arch,base_dist, base_name) + for l in layouts.split(";"): + variant, arch = l.split("/") + entry.layout_list.append((dist, variant, arch)) + return entry + +class KconfigLayout(): + # (dist, file_name) + layouts: Dict[Tuple[str, str], KconfigLayoutEntry] + + def __init__(self) -> None: + self.layouts = {} + + @staticmethod + def from_path(path: str) -> Type["KconfigLayout"]: + l = KconfigLayout() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if line.startswith("#") or line == "": + continue + e = KconfigLayoutEntry.from_text(line) + l.layouts[(e.dist, e.name)] = e + + if e.base_dist is None: + continue + if (e.base_dist, e.base_name) not in l.layouts: + die(f"cannot find {e.base_dist}/{e.base_name} while parsing {e.dist}/{e.name}") + e.layout_list = l.layouts[(e.base_dist, e.base_name)].layout_list + e.layout_list + return l + +class GenerateTranslater(): + input_dir: str + output_dir: str + src_root: str + + def __init__(self, args) -> None: + self.input_dir = args.input_dir + self.output_dir = args.output_dir + self.src_root = args.src_root + + def __cmd(self, cmd: str): + return f"python3 {__file__} {cmd} " + + def __translate_one(self, e: KconfigLayoutEntry, tmp_dir: str): + files = [] + cmd = "" + for dist, variant, arch in e.layout_list: + if variant == "generic": + # for geneic configs, generate them + file = os.path.join(tmp_dir, f"kernel-partial-{dist}-{variant}-{arch}.config") + cmd += self.__cmd("generate") + cmd += f"--top_dir {self.input_dir} --dist {dist} --archdir {arch}" + cmd += f"> {file} \n" + files.append(file) + else: + dist_path = PathManager.dist_to_path(self.input_dir, dist) + file = os.path.join(dist_path, "custom-overrides", variant, f"{arch}.config") + if os.path.exists(file): + files.append(file) + + # merge all partial configs + final_path = os.path.join(self.output_dir, f"kernel-{e.dist}-{e.name}.config") + cmd += self.__cmd("merge") + cmd += " ".join(files) + cmd += f" > {final_path} \n" + + # refresh configs + cmd += f"echo \"* generated file: {final_path}\"\n" + cmd += f"make KCONFIG_CONFIG={final_path} ARCH={e.arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"PAHOLE=scripts/dummy-tools/pahole " + cmd += f"-C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"rm -f {final_path}.old \n" + cmd += f"echo \"* processed file: {final_path}\"\n" + + return cmd + + @staticmethod + def do_translate(args): + cmd = "" + t = GenerateTranslater(args) + l = KconfigLayout.from_path(args.layout) + + tmp_dir = os.path.join(args.output_dir, "tmp") + cmd += f"mkdir -p {tmp_dir}\n" + if args.target is not None: + dist, file_name = args.target.split("/", maxsplit=1) + if (dist, file_name) not in l.layouts: + die(f"cannot find config layout info for {dist}/{file_name}") + cmd += t.__translate_one(l.layouts[((dist, file_name))], tmp_dir) + else: + for e in l.layouts.values(): + cmd += t.__translate_one(e, tmp_dir) + cmd += f"rm -rf {tmp_dir}" + print(cmd) + +class Mover(): + """move configs from old level to new level""" + config_patterns: List[str] + new_level: str + top_dir: str + + def __init__(self, top_dir: str, new_level: str, config_patterns: List[str]) -> None: + self.top_dir = top_dir + self.new_level = new_level + self.config_patterns = config_patterns + + @staticmethod + def get_level(level: str) -> str: + target_level = "" + for l in Rules.levels(): + if l.startswith(level): + if target_level != "": + die(f"the level {level} is ambiguous") + target_level = l + + if target_level == "": + die(f"unkonw level {level}") + return target_level + + @staticmethod + def __move(ctx: PathIterContext): + m : Mover = ctx.data + for config_pattern in m.config_patterns: + if fnmatch.fnmatch(ctx.name, config_pattern): + new_path = PathManager.as_path(m.top_dir, ctx.dist, m.new_level, ctx.arch, ctx.subarch, ctx.name) + os.makedirs(os.path.dirname(new_path), exist_ok=True) + shutil.move(ctx.path, new_path) + print("* move: {} -> {}".format(ctx.path.replace(m.top_dir, "", 1), new_path.replace(m.top_dir, "", 1))) + return + + @staticmethod + def __remove_empty_dirs(dir_path: str): + for root, dirs, _ in os.walk(dir_path, topdown=False): + for name in dirs: + cur_dir_path = os.path.join(root, name) + if len(os.listdir(cur_dir_path)) == 0: + os.rmdir(cur_dir_path) + + @staticmethod + def do_move(args): + old_level = Mover.get_level(args.old) + new_level = Mover.get_level(args.new_level) + m = Mover(args.top_dir, new_level, args.config_name) + PathManager.for_each(args.top_dir, m, Mover.__move, dists=[args.dist], levels=[old_level]) + level_dir = PathManager.as_level_dir(args.top_dir, args.dist, args.old) + Mover.__remove_empty_dirs(level_dir) + +class Exporter(): + # conf_name, file_name, value + configs: Dict[str, Dict[str, str]] + + def __init__(self) -> None: + self.configs = {} + + def __save_as_xlsx(self, columns: List[str], output: str): + import pandas + if not output.endswith(".xlsx"): + output+=".xlsx" + + writer = pandas.ExcelWriter(output, engine="openpyxl") + data = pandas.DataFrame.from_dict(list(self.configs.values())) + data = data[columns] + data.to_excel(writer, index=False) + writer.save() + + @staticmethod + def do_export(args): + e = Exporter() + levelinfo = LevelInfo.load(args.level_info) + columns = ["name", "level"] + for file in args.files: + file_name = os.path.basename(file) + columns.append(file_name) + with open(file) as f: + conf_list = ConfigList.from_path(file, dist="", arch="", level_info=levelinfo) + for c in conf_list.lists(): + if c.name not in e.configs: + e.configs[c.name] = {} + e.configs[c.name][file_name] = c.value + e.configs[c.name]["level"] = c.level + e.configs[c.name]["name"] = c.name + e.__save_as_xlsx(columns, args.output) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='process configs') + parser.set_defaults(func=default_args_func) + subparsers = parser.add_subparsers() + + level_collector = subparsers.add_parser('collect_level', description="collect level information") + level_collector.add_argument("--dist", required=True, help="the dist") + level_collector.add_argument("--top_dir", required=True, help="the dist") + level_collector.add_argument("--base", nargs="*", help="the base dist level info") + level_collector.set_defaults(func=LevelCollector.do_collect) + + importer = subparsers.add_parser('import', description="import new configs") + importer.add_argument("--dist", required=True, help="the dist") + importer.add_argument("--arch", required=True, help="the arch") + importer.add_argument("--subarch", help="the subarch") + importer.add_argument("--level_info", required=True, help="the level info ouputed by subcmd collect_level") + importer.add_argument("--top_dir", required=True, help="the output top dir") + importer.add_argument("config", help="the config file") + importer.set_defaults(func=Importer.do_import) + + generator = subparsers.add_parser("generate", description="generate configs") + generator.add_argument("--top_dir", required=True, help="the top dir to store configs") + generator.add_argument("--dist", help="the dist") + generator.add_argument("--archdir", help="the arch directory, be like \{arch\}-\{subarch\}") + generator.set_defaults(func=Generator.do_generate) + + merger = subparsers.add_parser("merge", description="merge with configs") + merger.add_argument("file", nargs="+", help="the config files") + merger.set_defaults(func=Merger.do_merge) + + collapser = subparsers.add_parser("collapse", description="collapse configs") + collapser.add_argument("--dist", required=True, help="the dist") + collapser.add_argument("--top_dir", required=True, help="the top dir to store configs") + collapser.set_defaults(func=Collapser.do_collapse) + + striper = subparsers.add_parser("strip", description="strip repeated configs") + striper.add_argument("--base", action='append', default=[], help="the base config files") + striper.add_argument("--target", action='append', default=[], help="the target config files") + striper.set_defaults(func=Striper.do_strip) + + import_translater = subparsers.add_parser("import_tanslate", description="import operations translater") + import_translater.add_argument("--input_dir", required=True, help="the dir to store old configs, used for collect level infos") + import_translater.add_argument("--output_dir", required=True, help="the dir to store new configs") + import_translater.add_argument("--src_root", required=True, help="the dir of kernel source") + import_translater.add_argument("path", help="the import scripts") + import_translater.set_defaults(func=ImportOpTranslater.do_translate) + + generate_translater = subparsers.add_parser("generate_translate", description="generate operations translater") + generate_translater.add_argument("--input_dir", required=True, help="the dir to store old configs, used for collect level infos") + generate_translater.add_argument("--output_dir", required=True, help="the dir to store new configs") + generate_translater.add_argument("--src_root", required=True, help="the dir of kernel source") + generate_translater.add_argument("--target", help="the target config file, like: /") + generate_translater.add_argument("layout", help="the kconfig layout file") + generate_translater.set_defaults(func=GenerateTranslater.do_translate) + + mover = subparsers.add_parser("move", description="move configs to new level") + mover.add_argument("--old", default="UNKNOWN", help="the config's old level dir, default is UNKNOWN") + mover.add_argument("--dist", required=True, help="the dist") + mover.add_argument("--top_dir", required=True, help="the top dir to store configs") + mover.add_argument("config_name", nargs="+", help="the config name") + mover.add_argument("new_level", help="the new level") + mover.set_defaults(func=Mover.do_move) + + exporter = subparsers.add_parser('export', description="export to excel format") + exporter.add_argument("files", nargs="+", help="the config files") + exporter.add_argument("--output", required=True, help="the output name") + exporter.add_argument("--level_info", required=True, help="the level info") + exporter.set_defaults(func=Exporter.do_export) + + args = parser.parse_args() + args.func(args) \ No newline at end of file diff --git a/anolis/configs/scripts/export_configs.sh b/anolis/configs/scripts/export_configs.sh new file mode 100644 index 0000000000000000000000000000000000000000..67880c39fc61722233b237d5794ce005b59293eb --- /dev/null +++ b/anolis/configs/scripts/export_configs.sh @@ -0,0 +1,29 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To export kconfigs as xlsx format. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +FILE_LIST=${DIST_OUTPUT}/file_list +LEVEL_INFO=${DIST_OUTPUT}/level_info + +mkdir -p ${DIST_OUTPUT} + +sh ${SCRIPT_DIR}/generate_configs.sh | tee ${FILE_LIST} + +python3 ${SCRIPT_DIR}/anolis_kconfig.py collect_level --top_dir ${BASE_CONFIG_DIR} \ + --dist ${DIST_CONFIG_KERNEL_NAME} > ${LEVEL_INFO} + +files=$(cat ${FILE_LIST} | grep "generated" | awk '{print $4}' | xargs) + +python3 ${SCRIPT_DIR}/anolis_kconfig.py export \ + --level_info ${LEVEL_INFO} \ + --output ${DIST_OUTPUT}/configs.xlsx\ + ${files} + +echo "* file generated: ${DIST_OUTPUT}/configs.xlsx" diff --git a/anolis/configs/scripts/generate_configs.sh b/anolis/configs/scripts/generate_configs.sh new file mode 100644 index 0000000000000000000000000000000000000000..a101b469c51142681ba20a57ddf563735ab21100 --- /dev/null +++ b/anolis/configs/scripts/generate_configs.sh @@ -0,0 +1,38 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Generate the whole kconfig files. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +FILE_LIST=${DIST_OUTPUT}/file_list + +mkdir -p ${DIST_OUTPUT} + +if [ -z "$@" ]; then + python3 ${SCRIPT_DIR}/anolis_kconfig.py generate_translate \ + --input_dir ${SCRIPT_DIR}/../ \ + --output_dir ${DIST_OUTPUT} \ + --src_root ${DIST_SRCROOT} \ + ${DIST_SRCROOT}/${DIST_CONFIG_LAYOUTS} > ${DIST_OUTPUT}/generate.sh +else + for target in $@ + do + python3 ${SCRIPT_DIR}/anolis_kconfig.py generate_translate \ + --input_dir ${SCRIPT_DIR}/../ \ + --output_dir ${DIST_OUTPUT} \ + --src_root ${DIST_SRCROOT} \ + --target ${DIST_CONFIG_KERNEL_NAME}/${target} \ + ${DIST_SRCROOT}/${DIST_CONFIG_LAYOUTS} > ${DIST_OUTPUT}/generate.sh + done +fi + +sh ${DIST_OUTPUT}/generate.sh | tee ${FILE_LIST} + +if [ "x${DIST_DO_GENERATE_DOT_CONFIG}" = "xY" ]; then + file=$(cat ${FILE_LIST} | grep "generated" | awk '{print $4}' | head -1) + cp -f ${file} ${DIST_SRCROOT}.config +fi diff --git a/anolis/configs/scripts/kconfig_import b/anolis/configs/scripts/kconfig_import new file mode 100644 index 0000000000000000000000000000000000000000..b55ee6847d7dab4a807eada97cd1d11e4c15b148 --- /dev/null +++ b/anolis/configs/scripts/kconfig_import @@ -0,0 +1,15 @@ +# FILE dist arch subarch file_path REFRESH/NOREFRESH +# LEVELINFO target_dist base_dist [base_dist ...] +# IMPORT file +# COLLAPSE dist +# STRIP target_dist base_dist + +FILE ANCK x86 null %%DIST_OUTPUT%%/kernel-ANCK-generic-x86.config REFRESH +FILE ANCK arm64 null %%DIST_OUTPUT%%/kernel-ANCK-generic-arm64.config REFRESH + +# for ANCK +LEVELINFO ANCK null +IMPORT ANCK-x86 +IMPORT ANCK-arm64 + +COLLAPSE ANCK \ No newline at end of file diff --git a/anolis/configs/scripts/kconfig_layout b/anolis/configs/scripts/kconfig_layout new file mode 100644 index 0000000000000000000000000000000000000000..a1933e7412ab9b37347fc7abe783e085bbe03d26 --- /dev/null +++ b/anolis/configs/scripts/kconfig_layout @@ -0,0 +1,8 @@ +# dist/config_file_name arch base layout(variant/arch) +ANCK/generic-x86 x86 null generic/default;generic/x86 +ANCK/debug-x86 x86 null generic/default;generic/x86;debug/default;debug/x86 +ANCK/gcov-x86 x86 null generic/default;generic/x86;gcov/default +ANCK/generic-arm64 arm64 null generic/default;generic/arm64 +ANCK/debug-arm64 arm64 null generic/default;generic/arm64;debug/default;debug/arm64 +ANCK/gcov-arm64 arm64 null generic/default;generic/arm64;gcov/default +ANCK/arm64-64k arm64 null generic/default;generic/arm64;64k/arm64 diff --git a/anolis/configs/scripts/modify_config.sh b/anolis/configs/scripts/modify_config.sh new file mode 100644 index 0000000000000000000000000000000000000000..7f77b30e09663ccbdc1147f8c6a09b83621a7c8a --- /dev/null +++ b/anolis/configs/scripts/modify_config.sh @@ -0,0 +1,133 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To modify kconfigs. +# +# Copyright (C) 2024 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +DIST_CONFIG_DIR=${BASE_CONFIG_DIR} + +if [ "$DIST_CONFIG_KERNEL_NAME" != "ANCK" ]; then + DIST_CONFIG_DIR=$(realpath ${BASE_CONFIG_DIR}/OVERRIDE/${DIST}/); +fi + +function die() { + echo "" + echo $@ + echo "usage:" + echo " make dist-configs-modify" \ + "C= L= [x86=] [arm64=] [others=] [all=]" + echo " C: the config name, must be specified" + echo " L: the level of config, must be specified" + echo " x86: the value of x86 architecture" + echo " arm64: the value of arm64 architecture" + echo " others: the default value for the architectures that not be specified" + echo " all: the value for all architectures" + echo "" + echo "example:" + echo " - only set x86 to y" + echo " make dist-configs-modify C=CONFIG_CRYPTO_ECDSA x86=y arm=n others=n L=L1" + echo " - set all archs to y" + echo " make dist-configs-modify C=CONFIG_CRYPTO_ECDSA all=y L=L1" + echo "" + exit 1 +} + +declare -A ARCH_VALUES + +function collect_ARCH_VALUES() { + if [ -n "${x86}" ]; then ARCH_VALUES["x86"]=${x86}; fi + if [ -n "${arm64}" ]; then ARCH_VALUES["arm64"]=${arm64}; fi + if [ -n "${others}" ]; then ARCH_VALUES["default"]=${others}; fi + if [ -n "${all}" ]; then ARCH_VALUES["default"]=${all}; fi + + if [ ${#ARCH_VALUES[@]} -eq 0 ]; then + die "need to specify at least one architecture's value"; + fi +} + +function set_correct_level() { + case $L in + "L0"|"L0-MANDATORY") + L="L0-MANDATORY" + ;; + "L1"|"L1-RECOMMEND") + L="L1-RECOMMEND" + ;; + "L2"|"L2-OPTIONAL") + L="L2-OPTIONAL" + ;; + *) + die "unsupported level: $L" + ;; + esac +} + +function check_args() { + if [ -z "$C" ]; then die "the config name must be specified"; fi + if [ -z "$L" ]; then die "the level must be specified"; fi + collect_ARCH_VALUES + set_correct_level +} + +function remove_old_configs() { + for f in $(find ${DIST_CONFIG_DIR}/L* -type f -name "$C") + do + echo "remove old file: $f" + rm -f $f + done +} + +function add_new_configs() { + for arch in ${!ARCH_VALUES[@]}; do + local value=${ARCH_VALUES[${arch}]} + local text="$C=$value" + if [ "$value" = "n" ]; then text="# $C is not set"; fi + + mkdir -p ${DIST_CONFIG_DIR}/${L}/${arch} + echo "$text" > ${DIST_CONFIG_DIR}/${L}/${arch}/$C; + echo "created new file: ${DIST_CONFIG_DIR}/${L}/${arch}/$C" + done +} + +function refresh_configs() { + echo "refresh configs" + sh ${SCRIPT_DIR}/update_configs.sh +} + +CHECK_FOUND_FILE=0 + +function check_config_for_one_arch() { + local arch=$1 + if [ -f ${DIST_CONFIG_DIR}/${L}/${arch}/$C ]; then + echo "$arch: $(cat ${DIST_CONFIG_DIR}/${L}/${arch}/$C)" + CHECK_FOUND_FILE=1 + fi +} + +function check_config() { + local appears=0 + echo "The Final Configs After Refresh" + check_config_for_one_arch "x86" + check_config_for_one_arch "arm64" + check_config_for_one_arch "default" + if [ "$CHECK_FOUND_FILE" == "0" ]; then + echo "Not Found Any Valid config files, maybe some dependency not satisfied" + fi + echo "" + echo "******************************************************************************" +} + +function main() { + check_args + remove_old_configs + add_new_configs + refresh_configs + check_config +} + +main diff --git a/anolis/configs/scripts/move_configs.sh b/anolis/configs/scripts/move_configs.sh new file mode 100644 index 0000000000000000000000000000000000000000..22a8f56e7d6eeb1096fbe48ac20d716bc5f05299 --- /dev/null +++ b/anolis/configs/scripts/move_configs.sh @@ -0,0 +1,48 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To adjust the level of kconfig. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) + +function die() { + echo "" + echo $@ + echo "usage:" + echo " make dist-config-move OLD= C= L=" + echo " OLD: the old level, default is UNKONWN" + echo " C: config name" + echo " L: the new level" + echo "example:" + echo " - to move CONFIG_CAN to L1" + echo " make dist-config-move OLD=L2 C=CONFIG_CAN L=L1" + echo "" + exit 1 +} + +function check_args() { + if [ -z "$OLD" ]; then + OLD="UNKNOWN" + fi + if [ -z "$C" ]; then + die "config name \$C is not specified" + fi + if [ -z "$L" ]; then + die "config level \$L is not specified" + fi +} + +function do_move() { + python3 ${SCRIPT_DIR}/anolis_kconfig.py move \ + --top_dir ${BASE_CONFIG_DIR} \ + --dist ${DIST_CONFIG_KERNEL_NAME} \ + --old "$OLD" "$C" "$L" +} + +check_args +do_move diff --git a/anolis/configs/scripts/update_configs.sh b/anolis/configs/scripts/update_configs.sh new file mode 100644 index 0000000000000000000000000000000000000000..b7ab61fa9e9520cd40e6272845d9065e0b78dec0 --- /dev/null +++ b/anolis/configs/scripts/update_configs.sh @@ -0,0 +1,113 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To update kconfigs. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +TMP_DIR=${DIST_OUTPUT}/configs +OLD_CONFIG_DIR=${TMP_DIR}/old +NEW_CONFIG_DIR=${TMP_DIR}/new +BACKUP_CONFIG_DIR=${BASE_CONFIG_DIR}/configs.${DIST_CONFIG_KERNEL_NAME}.old + +if [ "${DIST_CONFIG_KERNEL_NAME}" != "ANCK" ]; then + OLD_DIST_CONFIG_DIR=${BASE_CONFIG_DIR}/OVERRIDE/${DIST_CONFIG_KERNEL_NAME} + NEW_DIST_CONFIG_DIR=${NEW_CONFIG_DIR}/OVERRIDE/${DIST_CONFIG_KERNEL_NAME} +else + OLD_DIST_CONFIG_DIR=${BASE_CONFIG_DIR} + NEW_DIST_CONFIG_DIR=${NEW_CONFIG_DIR} +fi + +if [ -n "$DO_IMPORT_CONFIGS" ]; then + IMPORT_ACTION=${DIST_SRCROOT}/${DIST_CONFIG_ACTIONS_IMPORTS} +else + IMPORT_ACTION=${DIST_SRCROOT}/${DIST_CONFIG_ACTIONS_REFRESH} +fi + + +function log() { + echo $@ +} + +function prepare_env() { + rm -rf ${TMP_DIR} + mkdir -p ${OLD_CONFIG_DIR} + mkdir -p ${NEW_CONFIG_DIR} +} + +function generate_configs() { + log "collect all old configs..." + # generate old config files + sh ${SCRIPT_DIR}/generate_configs.sh +} + +function split_new_configs() { + # split new config files + echo "split new configs..." + cp ${IMPORT_ACTION} ${DIST_OUTPUT}/kconfig_import + sed -i "s#%%DIST_OUTPUT%%#\${DIST_OUTPUT}#" ${DIST_OUTPUT}/kconfig_import + sed -i "s#%%DIST_SRCROOT%%#\${DIST_SRCROOT}#" ${DIST_OUTPUT}/kconfig_import + python3 ${SCRIPT_DIR}/anolis_kconfig.py import_tanslate \ + --input_dir ${BASE_CONFIG_DIR} \ + --output_dir ${NEW_CONFIG_DIR} \ + --src_root ${DIST_SRCROOT} ${DIST_OUTPUT}/kconfig_import > ${DIST_OUTPUT}/import.sh + sh -e ${DIST_OUTPUT}/import.sh +} + +function replace_with_new_configs() { + log "replace old configs with new configs...." + + rm -rf ${BACKUP_CONFIG_DIR} + mkdir -p ${BACKUP_CONFIG_DIR} + mkdir -p ${OLD_DIST_CONFIG_DIR} + for level in ${DIST_LEVELS}; + do + if [ -d ${OLD_DIST_CONFIG_DIR}/${level} ]; then + mv ${OLD_DIST_CONFIG_DIR}/${level} ${BACKUP_CONFIG_DIR} + fi + done + + for level in ${DIST_LEVELS} + do + if [ -d ${NEW_DIST_CONFIG_DIR}/${level} ]; then + mv ${NEW_DIST_CONFIG_DIR}/${level} ${OLD_DIST_CONFIG_DIR} + fi + done +} + +function check_configs() { + # check unknown config files + echo "" + echo "******************************************************************************" + local unknown_dir=${OLD_DIST_CONFIG_DIR}/UNKNOWN + if [ -d ${unknown_dir} ] && [ -n "$(ls ${unknown_dir})" ]; then + echo "There are some UNKNOWN level's new configs." + echo "" + ls ${unknown_dir} + echo "" + echo "Need to classify above configs manually !!!" + echo "See: ${unknown_dir}" + echo "HINT: \`make dist-configs-move\` can help you." + echo "eg: make dist-configs-move C=CONFIG_CAN* L=L2" + else + echo "" + echo "Congratulations, all configs has a determined level." + echo "**DO NOT FORGET** to add changelogs if any config is changed" + rm -rf ${BACKUP_CONFIG_DIR} + fi + echo "" + echo "******************************************************************************" + echo "" +} + +prepare_env +if [ -z "$DO_IMPORT_CONFIGS" ]; then + generate_configs +fi +split_new_configs +replace_with_new_configs +check_configs diff --git a/anolis/configs/specification/KCONFIG_specification.md b/anolis/configs/specification/KCONFIG_specification.md new file mode 100644 index 0000000000000000000000000000000000000000..8082348d6c051192f9862eabc2ba88c23db45f64 --- /dev/null +++ b/anolis/configs/specification/KCONFIG_specification.md @@ -0,0 +1,641 @@ +# Anolis 23 KCONFIG规范列表 + +【说明】 + +- 应:该选项属于强制要求 +- 宜:该选项属于建议 +- N/A:该选项不做要求 + +| 编号 | kconfig名称 | x86 | arm | loongarch | sw64 | x86 | arm | loongarch | sw64 | 备注 | +| ---- | --------------------------------------- | ------ | ------ | -------------- | ------ | ----------------------------------- | ----------------------------------- | ------------------------------------------------------ | -------------------------------------------- | -------------------------------- | +| 1. | CONFIG_CPU_ISOLATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 2. | CONFIG_MEMCG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 3. | CONFIG_FAIR_GROUP_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 4. | CONFIG_CFS_BANDWIDTH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 5. | CONFIG_CPUSETS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 6. | CONFIG_UTS_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 7. | CONFIG_IPC_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 8. | CONFIG_PID_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 9. | CONFIG_NET_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 10. | CONFIG_BLK_DEV_INITRD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 11. | CONFIG_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 12. | CONFIG_MULTIUSER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 13. | CONFIG_POSIX_TIMERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 14. | CONFIG_PRINTK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 15. | CONFIG_BUG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 16. | CONFIG_ELF_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 17. | CONFIG_FUTEX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 18. | CONFIG_EPOLL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 19. | CONFIG_SHMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 20. | CONFIG_AIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 21. | CONFIG_IO_URING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 22. | CONFIG_KALLSYMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 23. | CONFIG_KALLSYMS_ALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 24. | CONFIG_RCU_STALL_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 25. | CONFIG_NUMA_BALANCING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 26. | CONFIG_SYSFS_SYSCALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 27. | CONFIG_FHANDLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 28. | CONFIG_SIGNALFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 29. | CONFIG_EVENTFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 30. | CONFIG_TREE_RCU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 31. | CONFIG_ADVISE_SYSCALLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 32. | CONFIG_PSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 33. | CONFIG_USER_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 34. | CONFIG_TIMERFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 35. | CONFIG_PAGE_COUNTER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 36. | CONFIG_MEMBARRIER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 37. | CONFIG_NR_CPUS | 应 | 应 | 应 | 应 | [1024,8192] | [1024,8192] | 256 | 512 | | +| 38. | CONFIG_64BIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 39. | CONFIG_MMU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 40. | CONFIG_SMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 41. | CONFIG_PARAVIRT | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 42. | CONFIG_DMI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 43. | CONFIG_SCHED_MC | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 44. | CONFIG_NUMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 45. | CONFIG_RELOCATABLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 46. | CONFIG_SCHED_SMT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 47. | CONFIG_GENERIC_BUG | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 48. | CONFIG_PERF_EVENTS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 49. | CONFIG_TRACEPOINTS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 50. | CONFIG_KEXEC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 51. | CONFIG_KEXEC_FILE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 52. | CONFIG_CRASH_DUMP | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 53. | CONFIG_DEBUG_KERNEL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 54. | CONFIG_MAGIC_SYSRQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 55. | CONFIG_DEBUG_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 56. | CONFIG_PANIC_ON_OOPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 57. | CONFIG_LOCKUP_DETECTOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 58. | CONFIG_SOFTLOCKUP_DETECTOR | 应 | 应 | 应 | 应 | y | y | y | y | | +| 59. | CONFIG_HARDLOCKUP_DETECTOR | 应 | 应 | 应 | 应 | y | y | y | y | | +| 60. | CONFIG_DETECT_HUNG_TASK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 61. | CONFIG_STACKTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 62. | CONFIG_FTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 63. | CONFIG_DYNAMIC_FTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 64. | CONFIG_FTRACE_SYSCALLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 65. | CONFIG_BLK_DEV_IO_TRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 66. | CONFIG_DEBUG_INFO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 67. | CONFIG_DEBUG_INFO_BTF | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 68. | CONFIG_CRASH_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 69. | CONFIG_KEXEC_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 70. | CONFIG_PVPANIC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 71. | CONFIG_BPF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 72. | CONFIG_BPF_SYSCALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 73. | CONFIG_BPF_JIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 75. | CONFIG_LSM | 应 | 应 | 应 | 应 | lockdown,yama,integrity,selinux,bpf | lockdown,yama,integrity,selinux,bpf | landlock,lockdown,yama,loadpin,safesetid,integrity,bpf | landlock,lockdown,yama,loadpin,safesetid,bpf | | +| 76. | CONFIG_CRYPTO_SM4 | 应 | 应 | 应 | 应 | m | m | y | y | | +| 77. | CONFIG_CRYPTO_SM4_GENERIC | 应 | 应 | 应 | 应 | m | m | y | y | | +| 78. | CONFIG_SECURITY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 79. | CONFIG_SECURITYFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 80. | CONFIG_SECURITY_NETWORK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 81. | CONFIG_SECURITY_PATH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 82. | CONFIG_SECURITY_SELINUX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 83. | CONFIG_INTEGRITY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 84. | CONFIG_IMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 85. | CONFIG_EVM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 86. | CONFIG_CRYPTO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 87. | CONFIG_CRYPTO_ALGAPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 88. | CONFIG_CRYPTO_ALGAPI2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 89. | CONFIG_CRYPTO_AEAD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 90. | CONFIG_CRYPTO_AEAD2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 91. | CONFIG_CRYPTO_SKCIPHER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 92. | CONFIG_CRYPTO_SKCIPHER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 93. | CONFIG_CRYPTO_HASH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 94. | CONFIG_CRYPTO_HASH2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 95. | CONFIG_CRYPTO_RNG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 96. | CONFIG_CRYPTO_RNG2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 97. | CONFIG_CRYPTO_AKCIPHER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 98. | CONFIG_CRYPTO_AKCIPHER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 99. | CONFIG_CRYPTO_MANAGER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 100. | CONFIG_CRYPTO_MANAGER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 101. | CONFIG_CRYPTO_RSA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 102. | CONFIG_CRYPTO_SM2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 103. | CONFIG_CRYPTO_AES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 104. | CONFIG_CRYPTO_GCM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 105. | CONFIG_CRYPTO_GHASH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 106. | CONFIG_CRYPTO_SHA256 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 107. | CONFIG_CRYPTO_SM3 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 108. | CONFIG_CRYPTO_SM3_GENERIC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 109. | CONFIG_ASYMMETRIC_KEY_TYPE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 110. | CONFIG_CRYPTO_LIB_AES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 111. | CONFIG_CRYPTO_LIB_SHA256 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 112. | CONFIG_KEYS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 113. | CONFIG_SYSTEM_TRUSTED_KEYRING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 114. | CONFIG_TRUSTED_KEYS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 115. | CONFIG_BLOCK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 116. | CONFIG_IOSCHED_BFQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 117. | CONFIG_BLK_MQ_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 118. | CONFIG_BLK_MQ_VIRTIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 119. | CONFIG_MQ_IOSCHED_DEADLINE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 120. | CONFIG_FREEZER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 121. | CONFIG_ACPI_IPMI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 122. | CONFIG_PM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 123. | CONFIG_ACPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 124. | CONFIG_ACPI_PROCESSOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 125. | CONFIG_ACPI_NUMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 126. | CONFIG_CPU_FREQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 127. | CONFIG_CPU_IDLE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 128. | CONFIG_ACPI_APEI_PCIEAER | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 130. | CONFIG_ACPI_PCI_SLOT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 131. | CONFIG_SYSVIPC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 132. | CONFIG_AUDIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 133. | CONFIG_HOTPLUG_CPU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 134. | CONFIG_KPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 135. | CONFIG_SECCOMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 136. | CONFIG_MODULES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 137. | CONFIG_MODULE_UNLOAD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 138. | CONFIG_MODULE_SIG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 139. | CONFIG_BINFMT_ELF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 140. | CONFIG_BINFMT_SCRIPT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 141. | CONFIG_COREDUMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 142. | CONFIG_SYSVIPC_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 143. | CONFIG_UPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 144. | CONFIG_KRETPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 145. | CONFIG_STACKPROTECTOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 146. | CONFIG_ELFCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 147. | CONFIG_POSIX_MQUEUE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 148. | CONFIG_POSIX_MQUEUE_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 149. | CONFIG_THREAD_INFO_IN_TASK | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 150. | CONFIG_VMAP_STACK | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 151. | CONFIG_MODVERSIONS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 152. | CONFIG_SWAP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 153. | CONFIG_SLUB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 154. | CONFIG_SPARSEMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 155. | CONFIG_MEMORY_HOTPLUG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 156. | CONFIG_COMPACTION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 157. | CONFIG_MIGRATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 158. | CONFIG_TRANSPARENT_HUGEPAGE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 159. | CONFIG_ZONE_DMA | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 160. | CONFIG_ZONE_DMA32 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 161. | CONFIG_USERFAULTFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 162. | CONFIG_MEMFD_CREATE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 163. | CONFIG_VM_EVENT_COUNTERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 164. | CONFIG_EFI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 165. | CONFIG_EFI_STUB | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 166. | CONFIG_TLS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 167. | CONFIG_SMC | 应 | 应 | 应 | 应 | m | m | m | m | | +| 168. | CONFIG_INET_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 169. | CONFIG_NF_CONNTRACK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 170. | CONFIG_NF_NAT | 应 | 应 | 应 | 应 | m | m | m | m | | +| 171. | CONFIG_NF_TABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 172. | CONFIG_IP_SET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 173. | CONFIG_IP_VS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 174. | CONFIG_IP_NF_RAW | 应 | 应 | 应 | 应 | m | m | m | m | | +| 175. | CONFIG_IP_NF_SECURITY | 应 | 应 | 应 | 应 | m | m | m | m | | +| 176. | CONFIG_IP_NF_ARPTABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 177. | CONFIG_BRIDGE | 应 | 应 | 应 | 应 | m | m | m | m | | +| 178. | CONFIG_NET_SCH_INGRESS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 179. | CONFIG_DNS_RESOLVER | 应 | 应 | 应 | 应 | m | m | y | m | | +| 180. | CONFIG_VSOCKETS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 181. | CONFIG_NETLINK_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 182. | CONFIG_VLAN_8021Q | 应 | 应 | 应 | 应 | m | m | m | m | | +| 183. | CONFIG_INET_TCP_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 184. | CONFIG_INET_UDP_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 185. | CONFIG_PACKET_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 186. | CONFIG_NETFILTER_XT_MARK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 187. | CONFIG_IP_NF_IPTABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 188. | CONFIG_NET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 189. | CONFIG_PACKET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 190. | CONFIG_UNIX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 191. | CONFIG_XFRM_USER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 192. | CONFIG_XDP_SOCKETS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 193. | CONFIG_INET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 194. | CONFIG_SYN_COOKIES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 195. | CONFIG_NETFILTER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 196. | CONFIG_NETFILTER_INGRESS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 197. | CONFIG_NF_TABLES_INET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 198. | CONFIG_NET_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 199. | CONFIG_NET_CLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 200. | CONFIG_NET_CLS_ACT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 201. | CONFIG_RPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 202. | CONFIG_XPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 203. | CONFIG_XFRM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 204. | CONFIG_TCP_CONG_ADVANCED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 205. | CONFIG_TCP_CONG_CUBIC | 应 | 应 | 应 | 应 | y | y | m | m | | +| 206. | CONFIG_IPV6 | 应 | 应 | 应 | 应 | y | y | m | m | | +| 207. | CONFIG_NETFILTER_ADVANCED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 208. | CONFIG_NF_TABLES_IPV4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 209. | CONFIG_NF_TABLES_IPV6 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 210. | CONFIG_NF_TABLES_ARP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 211. | CONFIG_NET_SCH_FQ_CODEL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 212. | CONFIG_CGROUPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 213. | CONFIG_BLK_CGROUP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 214. | CONFIG_CGROUP_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 215. | CONFIG_CGROUP_PIDS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 216. | CONFIG_CGROUP_RDMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 217. | CONFIG_CGROUP_HUGETLB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 218. | CONFIG_CGROUP_DEVICE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 219. | CONFIG_CGROUP_CPUACCT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 220. | CONFIG_CGROUP_PERF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 221. | CONFIG_CGROUP_BPF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 222. | CONFIG_NAMESPACES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 223. | CONFIG_CGROUP_FREEZER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 224. | CONFIG_ZRAM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 225. | CONFIG_NVME_CORE | 应 | 应 | 应 | 应 | m | m | m | y | | +| 226. | CONFIG_BLK_DEV_NVME | 应 | 应 | 应 | 应 | m | m | m | m | | +| 227. | CONFIG_BONDING | 应 | 应 | 应 | 应 | m | m | m | m | | +| 228. | CONFIG_TUN | 应 | 应 | 应 | 应 | m | m | m | y | | +| 229. | CONFIG_TAP | 应 | 应 | 应 | 应 | m | m | m | m | | +| 230. | CONFIG_VETH | 应 | 应 | 应 | 应 | m | m | m | m | | +| 231. | CONFIG_VIRTIO_NET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 232. | CONFIG_IPMI_HANDLER | 应 | 应 | 应 | 应 | m | m | m | m | | +| 233. | CONFIG_INFINIBAND | 应 | 应 | 应 | 应 | m | m | m | m | | +| 234. | CONFIG_UIO | 应 | 应 | 应 | 应 | m | m | m | m | | +| 235. | CONFIG_VFIO | 应 | 应 | 应 | 应 | m | m | m | m | | +| 236. | CONFIG_VFIO_PCI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 237. | CONFIG_VHOST_NET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 238. | CONFIG_VHOST_VSOCK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 239. | CONFIG_VXLAN | 应 | 应 | 应 | 应 | m | m | m | m | | +| 240. | CONFIG_IPMI_SI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 241. | CONFIG_SOFT_WATCHDOG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 242. | CONFIG_VHOST | 应 | 应 | 应 | 应 | m | m | m | m | | +| 243. | CONFIG_NVME_TARGET | 应 | 应 | 应 | 应 | m | m | m | y | | +| 244. | CONFIG_BLK_DEV_DM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 245. | CONFIG_NET_TEAM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 246. | CONFIG_SATA_AHCI | 应 | 应 | 应 | 应 | m | m | y | y | | +| 247. | CONFIG_I2C | 应 | 应 | 应 | 应 | y | y | y | y | | +| 248. | CONFIG_VIRTIO_MEM | 应 | 应 | N/A | N/A | m | m | N/A | N/A | | +| 249. | CONFIG_ATA | 应 | 应 | 应 | 应 | m | m | y | y | | +| 250. | CONFIG_ETHTOOL_NETLINK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 251. | CONFIG_BLK_DEV | 应 | 应 | 应 | 应 | y | y | y | y | | +| 252. | CONFIG_SCSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 253. | CONFIG_MD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 254. | CONFIG_NETDEVICES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 255. | CONFIG_NET_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 256. | CONFIG_INPUT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 257. | CONFIG_INPUT_KEYBOARD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 258. | CONFIG_INPUT_MOUSE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 259. | CONFIG_SERIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 260. | CONFIG_TTY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 261. | CONFIG_HW_RANDOM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 262. | CONFIG_HWMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 263. | CONFIG_THERMAL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 264. | CONFIG_FB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 265. | CONFIG_HDMI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 266. | CONFIG_FRAMEBUFFER_CONSOLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 267. | CONFIG_USB_SUPPORT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 268. | CONFIG_USB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 269. | CONFIG_EDAC | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 270. | CONFIG_RTC_CLASS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 271. | CONFIG_DMADEVICES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 272. | CONFIG_VIRTIO_MENU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 273. | CONFIG_VIRTIO_MMIO | 宜 | 宜 | 宜 | 宜 | m | m | m | y | | +| 274. | CONFIG_COMMON_CLK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 275. | CONFIG_IOMMU_SUPPORT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 276. | CONFIG_RAS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 277. | CONFIG_SCSI_SAS_ATA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 278. | CONFIG_SERIAL_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 279. | CONFIG_DEVMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 280. | CONFIG_WATCHDOG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 281. | CONFIG_DMA_ENGINE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 282. | CONFIG_VIRTIO_PCI_LEGACY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 283. | CONFIG_SERIAL_8250 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 284. | CONFIG_FB_EFI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 285. | CONFIG_VT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 286. | CONFIG_VT_CONSOLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 287. | CONFIG_VIRTIO | 应 | 应 | 应 | 应 | y | m | y | y | | +| 288. | CONFIG_VIRTIO_PCI | 应 | 应 | 应 | 应 | y | m | y | y | | +| 289. | CONFIG_SPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 290. | CONFIG_DEVTMPFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 291. | CONFIG_DEVTMPFS_MOUNT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 292. | CONFIG_AUXILIARY_BUS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 293. | CONFIG_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 294. | CONFIG_PCIEPORTBUS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 295. | CONFIG_HOTPLUG_PCI_PCIE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 296. | CONFIG_PCI_MSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 297. | CONFIG_HOTPLUG_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 298. | CONFIG_PCIEAER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 299. | CONFIG_PCI_ATS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 300. | CONFIG_PCI_IOV | 应 | 应 | 应 | 应 | y | y | y | y | | +| 301. | CONFIG_PCIEASPM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 302. | CONFIG_HIGH_RES_TIMERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 303. | CONFIG_NO_HZ_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 304. | CONFIG_NO_HZ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 305. | CONFIG_IP_VS_IPV6 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 306. | CONFIG_XFS_FS | 应 | 应 | 应 | 应 | m | m | y | y | | +| 307. | CONFIG_FUSE_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 308. | CONFIG_VIRTIO_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 309. | CONFIG_OVERLAY_FS | 应 | 应 | 应 | 应 | m | m | y | m | | +| 310. | CONFIG_ISO9660_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 311. | CONFIG_SQUASHFS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 312. | CONFIG_EROFS_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 313. | CONFIG_NFS_FS | 应 | 应 | 应 | 应 | m | m | y | m | | +| 314. | CONFIG_NFS_V4 | 应 | 应 | 应 | 应 | m | m | m | m | | +| 315. | CONFIG_SUNRPC | 应 | 应 | 应 | 应 | m | m | y | m | | +| 316. | CONFIG_NLS_UTF8 | 应 | 应 | 应 | 应 | m | m | y | m | | +| 317. | CONFIG_FAT_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 318. | CONFIG_VFAT_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 319. | CONFIG_NFSD | 应 | 应 | 应 | 应 | m | m | y | m | | +| 320. | CONFIG_LOCKD | 应 | 应 | 应 | 应 | m | m | y | m | | +| 321. | CONFIG_DAX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 322. | CONFIG_FS_DAX | 应 | 应 | N/A | N/A | y | y | y | y | | +| 323. | CONFIG_FILE_LOCKING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 324. | CONFIG_FSNOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 325. | CONFIG_DNOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 326. | CONFIG_FANOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 327. | CONFIG_QUOTA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 328. | CONFIG_PROC_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 329. | CONFIG_PROC_KCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 330. | CONFIG_PROC_VMCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 331. | CONFIG_PROC_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 332. | CONFIG_KERNFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 333. | CONFIG_SYSFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 334. | CONFIG_TMPFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 335. | CONFIG_HUGETLBFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 336. | CONFIG_HUGETLB_PAGE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 337. | CONFIG_MISC_FILESYSTEMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 338. | CONFIG_NETWORK_FILESYSTEMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 339. | CONFIG_NFS_FSCACHE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 340. | CONFIG_NFSD_V4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 341. | CONFIG_LOCKD_V4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 342. | CONFIG_NFS_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 343. | CONFIG_NLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 344. | CONFIG_NLS_ASCII | 应 | 应 | 应 | 应 | y | y | y | y | | +| 345. | CONFIG_PSTORE | 应 | 应 | 应 | 应 | y | y | m | m | | +| 346. | CONFIG_AUTOFS_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 347. | CONFIG_KVM | 应 | 应 | 应 | 应 | m | y | y | y | | +| 348. | CONFIG_VIRTUALIZATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 349. | CONFIG_KVM_MMIO | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 350. | CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 351. | CONFIG_GENERIC_MSI_IRQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 352. | CONFIG_IRQ_MSI_IOMMU | 应 | 应 | N/A | 应 | y | y | N/A | y | | +| 353. | CONFIG_NODES_SHIFT | 应 | 应 | 应 | 应 | 6/8/10 | 6/8/10 | 6 | 7 | | +| 354. | CONFIG_NTFS3_FS | 应 | 应 | 应 | N/A | m | m | m | N/A | | +| 355. | CONFIG_BLK_DEV_SD | 应 | 应 | 应 | 应 | m | m | m | y | | +| 356. | CONFIG_ACPI_THERMAL | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 357. | CONFIG_TRACING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 358. | CONFIG_GPIO_ACPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 359. | CONFIG_MEMORY_FAILURE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 360. | CONFIG_LIVEPATCH | 应 | 宜 | 宜 | 宜 | y | y | y | y | | +| 361. | CONFIG_PCIE_EDR | 应 | 应 | N/A | 应 | y | y | N/A | y | | +| 362. | CONFIG_RANDOMIZE_BASE | 应 | 应 | 宜 | 宜 | y | y | N/A | N/A | | +| 363. | CONFIG_X86_64 | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 364. | CONFIG_INSTRUCTION_DECODER | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 365. | CONFIG_KVM_GUEST | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 366. | CONFIG_X86_TSC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 367. | CONFIG_IA32_FEAT_CTL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 368. | CONFIG_CPU_SUP_INTEL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 369. | CONFIG_CPU_SUP_AMD | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 370. | CONFIG_CPU_SUP_HYGON | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 371. | CONFIG_CPU_SUP_ZHAOXIN | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 372. | CONFIG_X86_SGX | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 373. | CONFIG_X86 | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 374. | CONFIG_X86_X2APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 375. | CONFIG_HYPERVISOR_GUEST | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 376. | CONFIG_HPET_TIMER | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 377. | CONFIG_X86_LOCAL_APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 378. | CONFIG_X86_IO_APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 379. | CONFIG_ARCH_CPUIDLE_HALTPOLL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 380. | CONFIG_PARAVIRT_CLOCK | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 381. | CONFIG_X86_64_SMP | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 382. | CONFIG_X86_CPUID | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 383. | CONFIG_X86_MSR | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 384. | CONFIG_CRYPTO_SIMD | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 385. | CONFIG_CPU_MITIGATIONS | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | 同CONFIG_SPECULATION_MITIGATIONS | +| 386. | CONFIG_VGA_CONSOLE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 387. | CONFIG_IRQ_REMAP | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 388. | CONFIG_KVM_INTEL | 应 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 389. | CONFIG_KVM_AMD | 应 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 390. | CONFIG_CPU_SUP_CENTAUR | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 391. | CONFIG_INTEL_IOMMU | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 392. | CONFIG_X86_MCE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 393. | CONFIG_X86_MCE_INTEL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 394. | CONFIG_MICROCODE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 395. | CONFIG_AMD_MEM_ENCRYPT | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 396. | CONFIG_ARM_SMMU | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 397. | CONFIG_ARM64 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 398. | CONFIG_ARM64_HW_AFDBM | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 399. | CONFIG_ARM64_PAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 400. | CONFIG_ARM64_RAS_EXTN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 401. | CONFIG_ARM64_CNP | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 402. | CONFIG_ARM64_SVE | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 403. | CONFIG_ARM64_PSEUDO_NMI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 404. | CONFIG_ARM_SMMU_V3 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 405. | CONFIG_ARM_GIC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 406. | CONFIG_ARM_GIC_V2M | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 407. | CONFIG_ARM_GIC_V3 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 408. | CONFIG_ARM_GIC_V3_ITS | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 409. | CONFIG_ARM_GIC_V3_ITS_PCI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 410. | CONFIG_ARM_PMU | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 411. | CONFIG_CPU_LITTLE_ENDIAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 412. | CONFIG_ARCH_HISI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 413. | CONFIG_ARM64_E0PD | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 414. | CONFIG_ARM64_EPAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 415. | CONFIG_ARM_CCN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 416. | CONFIG_ARM_SPE_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 417. | CONFIG_HISI_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 418. | CONFIG_CPU_PM | N/A | 应 | 应 | N/A | N/A | y | y | N/A | | +| 419. | CONFIG_HISILICON_LPC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 420. | CONFIG_PCI_HOST_GENERIC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 421. | CONFIG_PCI_HISI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 422. | CONFIG_GENERIC_IRQ_IPI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 423. | CONFIG_GPIO_HISI | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 424. | CONFIG_HISI_PCIE_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 425. | CONFIG_ARM_SMMU_V3_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 426. | CONFIG_SCSI_HISI_SAS | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 427. | CONFIG_SCSI_HISI_SAS_PCI | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 428. | CONFIG_DRM_HISI_HIBMC | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 429. | CONFIG_ARM64_SME | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 430. | CONFIG_SPI_HISI_KUNPENG | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 431. | CONFIG_LOONGARCH | N/A | N/A | 应 | 应 | N/A | N/A | y | N/A | | +| 432. | CONFIG_UNWINDER_PROLOGUE | N/A | N/A | 应 | N/A | N/A | N/A | y | N/A | | +| 433. | CONFIG SW64 | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 434. | CONFIG_NONCACHE_PAGE | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 435. | CONFIG_SW64_CHIP3 | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 436. | CONFIG_SW64_CPUFREQ | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 437. | CONFIG_SW64_CPUAUTOPLUG | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 438. | CONFIG_DEEP_MEMCPY | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 439. | CONFIG_DEEP_MEMSET | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 440. | CONFIG_RSEQ | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 441. | CONFIG_MEMCG_KMEM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 442. | CONFIG_CHECKPOINT_RESTORE | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 443. | CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 444. | CONFIG_BASE_FULL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 445. | CONFIG_TASKSTATS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 446. | CONFIG_PROC_PID_CPUSET | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 447. | CONFIG_SCHED_AUTOGROUP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 448. | CONFIG_PGTABLE_LEVELS | 宜 | 宜 | 宜 | 宜 | 5 | 4 | 3 | 4 | | +| 449. | CONFIG_PROFILING | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 450. | CONFIG_CRYPTO_CRC32 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 451. | CONFIG_SECURITY_INFINIBAND | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 452. | CONFIG_SECURITY_NETWORK_XFRM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 453. | CONFIG_CRYPTO_LZO | 宜 | 宜 | 宜 | 宜 | y | y | m | m | | +| 454. | CONFIG_CUSE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 455. | CONFIG_CRYPTO_FIPS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 456. | CONFIG_RATIONAL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 457. | CONFIG_SYSTEM_TRUSTED_KEYS | 宜 | 宜 | 宜 | 宜 | N/A | N/A | 0 | 0 | 需要配置一个值但不做限定 | +| 458. | CONFIG_MQ_IOSCHED_KYBER | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 459. | CONFIG_BLK_PM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 460. | CONFIG_CRC16 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 461. | CONFIG_BUILD_SALT | 宜 | 宜 | 宜 | 宜 | 0 | N/A | 0 | 0 | 需要配置一个值但不做限定 | +| 462. | CONFIG_HZ | 宜 | 宜 | 宜 | 宜 | 100/250/1000 | 100/250/1000 | 250 | 250 | | +| 463. | CONFIG_ZSMALLOC | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 464. | CONFIG_KSM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 465. | CONFIG_IP_VS_PROTO_TCP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 466. | CONFIG_IP_VS_RR | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 467. | CONFIG_TCP_CONG_BBR | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 468. | CONFIG_MPTCP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 469. | CONFIG_NET_ACT_POLICE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 470. | CONFIG_NET_ACT_GACT | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 471. | CONFIG_NETFILTER_XTABLES | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 472. | CONFIG_CGROUP_WRITEBACK | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 473. | CONFIG_USB_ACM | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 474. | CONFIG_RTC_INTF_DEV | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 475. | CONFIG_NVME_FABRICS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 476. | CONFIG_NVME_RDMA | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 477. | CONFIG_NVME_TCP | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 478. | CONFIG_MACVLAN | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 479. | CONFIG_USB_XHCI_HCD | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 480. | CONFIG_USB_EHCI_HCD | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 481. | CONFIG_USB_EHCI_PCI | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 482. | CONFIG_MEGARAID_SAS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 483. | CONFIG_SCSI_MPT3SAS | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 484. | CONFIG_BNX2 | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 485. | CONFIG_BNX2X | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 486. | CONFIG_BNXT | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 487. | CONFIG_CHELSIO_T4 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 488. | CONFIG_IGB | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 489. | CONFIG_IXGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 490. | CONFIG_IXGBEVF | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 491. | CONFIG_I40E | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 492. | CONFIG_I40EVF | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 493. | CONFIG_ICE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 494. | CONFIG_MLX4_EN | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 495. | CONFIG_MLX4_CORE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 496. | CONFIG_MLX5_CORE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 497. | CONFIG_MLX5_CORE_EN | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 498. | CONFIG_NGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 499. | CONFIG_TXGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 500. | CONFIG_MTD | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 501. | CONFIG_USB_STORAGE | 宜 | 宜 | 宜 | 宜 | m | m | m | y | | +| 502. | CONFIG_VIRTIO_CONSOLE | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 503. | CONFIG_DRM | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 504. | CONFIG_SCSI_MPT2SAS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 505. | CONFIG_FCOE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 506. | CONFIG_E1000 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 507. | CONFIG_FSCACHE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 508. | CONFIG_NFS_V3 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | 推荐 | +| 509. | CONFIG_NFS_V4_1 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 510. | CONFIG_NFS_V4_2 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 511. | CONFIG_NFSD_V3_ACL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 512. | CONFIG_NFS_ACL_SUPPORT | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 513. | CONFIG_CONFIGFS_FS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 514. | CONFIG_CIFS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 515. | CONFIG_BTRFS_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 516. | CONFIG_SPARSEMEM_VMEMMAP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 517. | CONFIG_VIRTIO_BLK | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 518. | CONFIG_EXT3_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 519. | CONFIG_EXT4_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 520. | CONFIG_JBD2 | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 521. | CONFIG_LOG_BUF_SHIFT | 宜 | 宜 | 宜 | 宜 | 20 | 20 | 18 | 18 | | +| 522. | CONFIG_LOG_CPU_MAX_BUF_SHIFT | 宜 | 宜 | 宜 | 宜 | 12 | 12 | 12 | 12 | | +| 523. | CONFIG_RTC_SYSTOHC | 宜 | 宜 | 应 | 应 | y | y | y | y | | +| 524. | CONFIG_ILLEGAL_POINTER_VALUE | 宜 | 宜 | N/A | N/A | 0xdead000000000000 | 0xdead000000000000 | N/A | N/A | | +| 525. | CONFIG_DAMON | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 526. | CONFIG_CXL_BUS | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 527. | CONFIG_CXL_PCI | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 528. | CONFIG_NO_HZ_FULL | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 529. | CONFIG_NTB | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 530. | CONFIG_UACCE | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 531. | CONFIG_VIRT_CPU_ACCOUNTING | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 532. | CONFIG_INET_MPTCP_DIAG | 宜 | 宜 | N/A | N/A | m | m | m | m | | +| 533. | CONFIG_VIRT_CPU_ACCOUNTING_GEN | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 534. | CONFIG_PVPANIC_MMIO | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 535. | CONFIG_HINIC | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 536. | CONFIG_SCHED_CLUSTER | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 537. | CONFIG_ACPI_HMAT | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 538. | CONFIG_ACPI_APEI | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 539. | CONFIG_ACPI_APEI_GHES | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 540. | CONFIG_ACPI_APEI_MEMORY_FAILURE | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 541. | CONFIG_STACKPROTECTOR_STRONG | 宜 | 宜 | 宜 | N/A | y | y | y | N/A | | +| 542. | CONFIG_SCHED_MC_PRIO | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 543. | CONFIG_X86_CMPXCHG64 | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 544. | CONFIG_X86_CMOV | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 545. | CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 546. | CONFIG_X86_VSYSCALL_EMULATION | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 547. | CONFIG_X86_IOPL_IOPERM | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 548. | CONFIG_X86_DIRECT_GBPAGES | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 549. | CONFIG_X86_MPPARSE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 550. | CONFIG_OUTPUT_FORMAT | 宜 | N/A | N/A | N/A | elf64-x86-64 | N/A | N/A | N/A | | +| 551. | CONFIG_PARAVIRT_SPINLOCKS | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 552. | CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 553. | CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 554. | CONFIG_CRYPTO_SM3_AVX_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 555. | CONFIG_INTEL_IDXD_BUS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 556. | CONFIG_INTEL_PMT_CLASS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 557. | CONFIG_INTEL_TPMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 558. | CONFIG_UNWINDER_ORC | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 559. | CONFIG_VFIO_MDEV | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 560. | CONFIG_VFIO_IOMMU_TYPE1 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 561. | CONFIG_X86_INTEL_PSTATE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 562. | CONFIG_INTEL_IDLE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 563. | CONFIG_COMPAT | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 564. | CONFIG_INTEL_PMC_CORE | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 565. | CONFIG_INTEL_IFS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 566. | CONFIG_SATA_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 567. | CONFIG_HW_RANDOM_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 568. | CONFIG_CRYPTO_DEV_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 569. | CONFIG_CRYPTO_DEV_ZHAOXIN_AES | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 570. | CONFIG_CRYPTO_DEV_ZHAOXIN_SHA | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 571. | CONFIG_SENSORS_ZHAOXIN_CPUTEMP | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 572. | CONFIG_I2C_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 573. | CONFIG_CRYPTO_SM3_ZHAOXIN_GMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 574. | CONFIG_CRYPTO_SM4_ZHAOXIN_GMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 575. | CONFIG_PINCTRL_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 576. | CONFIG_PINCTRL_KX7000 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 577. | CONFIG_CRYPTO_DEV_CCP | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 578. | CONFIG_ARM64_PMEM | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | 同类推荐 | +| 579. | CONFIG_ARM64_4K_PAGES | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 580. | CONFIG_CRYPTO_SM3_ARM64_CE | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 581. | CONFIG_SCSI_HISI_SAS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 582. | CONFIG_HNS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 583. | CONFIG_HNS3 | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 584. | CONFIG_RESET_HISI | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 585. | CONFIG_I2C_HISI | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 586. | CONFIG_CRYPTO_DEV_HISI_SEC | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 587. | CONFIG_CRYPTO_DEV_HISI_HPRE | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 588. | CONFIG_CRYPTO_DEV_HISI_TRNG | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 589. | CONFIG_ARM64_AMU_EXTN | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 590. | CONFIG_HISI_THERMAL | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 591. | CONFIG_GENERIC_PHY | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 592. | CONFIG_KUNPENG_HCCS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 593. | CONFIG_ARM64_64K_PAGES | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 594. | CONFIG_ARM64_VA_BITS_48 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 595. | CONFIG_ARM64_PA_BITS_48 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 596. | CONFIG_ARM64_LSE_ATOMICS | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 597. | CONFIG_ARCH_PHYTIUM | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 598. | CONFIG_ARM_GIC_PHYTIUM_2500 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 599. | CONFIG_CRYPTO_DEV_HISI_QM | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 600. | CONFIG_CRYPTO_DEV_HISI_SEC2 | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 601. | CONFIG_CRYPTO_DEV_HISI_ZIP | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 602. | CONFIG_INFINIBAND_HNS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 603. | CONFIG_INFINIBAND_HNS_HIP08 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 604. | CONFIG_CORESIGHT | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 605. | CONFIG_SPI_HISI_SFC_V3XX | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 606. | CONFIG_SPI_MASTER | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 607. | CONFIG_ARM_SMMU_V3_PMU | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 608. | CONFIG_ARM_SMMU_V3_SVA | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 609. | CONFIG_DRM_PHYTIUM | N/A | 宜 | N/A | N/A | N/A | m | m | N/A | | +| 610. | CONFIG_ACPI_CPPC_CPUFREQ | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 611. | CONFIG_ACPI_APEI_SEA | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 612. | CONFIG_VFIO_PLATFORM | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 613. | CONFIG_ARCH_STRICT_ALIGN | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 614. | CONFIG_ARCH_FORCE_MAX_ORDER | N/A | N/A | 宜 | N/A | N/A | N/A | 11 | N/A | | +| 615. | CONFIG_ARCH_IOREMAP | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 616. | CONFIG_16KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | 同类推荐 | +| 617. | CONFIG_16KB_2LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 618. | CONFIG_64KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 619. | CONFIG_64KB_2LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 620. | CONFIG_4KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 621. | CONFIG_4KB_4LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 622. | CONFIG_ARCH_WRITECOMBINE | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 623. | CONFIG_CPU_HAS_LSX | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 624. | CONFIG_CPU_HAS_LASX | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 625. | CONFIG_CPU_HAS_LBT | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 626. | CONFIG_I2C_LS2X | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 627. | CONFIG_SPI_LOONGSON_PCI | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 628. | CONFIG_DWMAC_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 629. | CONFIG_DRM_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 630. | CONFIG_FB_LS2K500 | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 631. | CONFIG_GPIO_LOONGSON_64BIT | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 632. | CONFIG_RTC_DRV_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 633. | CONFIG_CRYPTO_CRC32_LOONGARCH | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | \ No newline at end of file diff --git a/anolis/genlog.sh b/anolis/genlog.sh new file mode 100644 index 0000000000000000000000000000000000000000..a11293855fa59c2ce8cd91b9960741bbac3cdd02 --- /dev/null +++ b/anolis/genlog.sh @@ -0,0 +1,65 @@ +# by default, it generates changlogs from latest-tag to HEAD +function get_changelog_start_end() { + if [ -z "$CHANGELOG_START" ]; then + CHANGELOG_START=$(git describe --tags --abbrev=0) + fi + if [ -z "$CHANGELOG_START" ]; then + echo "cannot decide CHANGELOG_START" + exit 1 + fi + + if [ -z "$CHANGELOG_END" ]; then + CHANGELOG_END=$(git log --format="%H" -1 HEAD) + fi +} + +function get_author_sign() { + if [ -z "$AUTHOR_SIGN" ]; then + AUTHOR_SIGN=$(git var GIT_COMMITTER_IDENT |sed 's/>.*/>/') + fi + if [ -z "$AUTHOR_SIGN" ]; then + echo "unkonwn AUTHOR_SIGN" + exit 1 + fi +} + +function get_changelog_file_name() { + local file_base_name="changelog.${DIST_ANOLIS_VERSION}" + local files_num=$(ls ${DIST_CHANGELOG} | grep -E '[0-9]+-changelog.*' | wc -l) + local file_name=$(printf "%03d-${file_base_name}\n" ${files_num}) + CHANGELOG_FILE=${DIST_CHANGELOG}/${file_name} +} + +function generate_changelog() { + get_changelog_start_end + get_author_sign + get_changelog_file_name + + touch ${CHANGELOG_FILE} + echo "* $(date +"%a %b %d %Y") ${AUTHOR_SIGN} [${DIST_ANOLIS_VERSION}%%DIST%%]" > ${CHANGELOG_FILE} + + # TODO: + # 1. if config changes, add kernel config refresh log + # 2. if linux upstream kernel version updated, add related log + + local commits=$(git rev-list ${CHANGELOG_START}..${CHANGELOG_END}) + for commit in $commits + do + ## eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) + local log=$(git log --format='- %s (%an)' -1 ${commit}) + + ## eg: {CVE-2022-32250} + ## xargs is used to strip space + local cve_list=$(git log --format='%b' -1 ${commit} | grep -Eio '^[[:blank:]]*Fixes:[[:blank:]]*CVE-.*[[:blank:]]*$' | sed 's/fixes://ig' | xargs | sed 's/[[:blank:]]/,/') + local cve_fmt="" + if [ -n "${cve_list}" ]; then + cve_fmt=$(cat <<< "${cve_list}" | paste -sd "," -) + cve_fmt=" {${cve_fmt}}" + fi + ## merge them together, eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) {CVE-2022-32250} + echo "${log}${cve_fmt}" >> ${CHANGELOG_FILE} + done + echo "" >> ${CHANGELOG_FILE} +} + +generate_changelog \ No newline at end of file diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh new file mode 100644 index 0000000000000000000000000000000000000000..65fc8d016212193c67bb5487a41a3d221afeb9bb --- /dev/null +++ b/anolis/genrpmtree.sh @@ -0,0 +1,47 @@ +#! /bin/bash + +set -xe + +function do_prep() { + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT} + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + cp ${DIST_RPM}/cpupower* ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + cp ${DIST_RPM}/generate_bls_conf.sh ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + + # for official build, the corresponding tag should exist + if [ -n "$DIST_OFFICIAL_BUILD" ]; then + if ! git tag | grep -q -x "${DIST_PKG_COMMIT_ID}"; then + echo "cannot find official build tag: ${DIST_PKG_COMMIT_ID}" + exit 1 + fi + fi + + pkgname="linux-${DIST_ANOLIS_VERSION}${DIST}" + pushd ${DIST_SRCROOT} > /dev/null + git archive --format=tar --prefix="${pkgname}/" ${DIST_PKG_COMMIT_ID} | xz -T$(nproc) > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz + md5sum ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/download + popd > /dev/null + DIST_OUTPUT=${DIST_RPMBUILDDIR_OUTPUT}/SPECS/ sh genspec.sh + + # the kconfigs of x86 and arm64 has been moved to kconfig baseline, + # so use `make dist-configs` to generate them + make -C ${DIST_SRCROOT}/anolis dist-configs + cp ${DIST_OUTPUT}/kernel-ANCK-generic-x86.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config + cp ${DIST_OUTPUT}/kernel-ANCK-debug-x86.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config + cp ${DIST_OUTPUT}/kernel-ANCK-generic-arm64.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config + cp ${DIST_OUTPUT}/kernel-ANCK-debug-arm64.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + + # the kconfigs of sw_64 and loongarch keep the legacy way, + # so still copy them from arch/${arch}/configs/ directory. + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64.config + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis-debug_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64-debug.config +} + +do_prep \ No newline at end of file diff --git a/anolis/genspec.sh b/anolis/genspec.sh new file mode 100644 index 0000000000000000000000000000000000000000..96c84d74d2161c76dac5cf54733ca8460139e34d --- /dev/null +++ b/anolis/genspec.sh @@ -0,0 +1,33 @@ +#! /bin/bash +# generate kernel spec through spec template and changelog files. +# it it call from Makefile, do not run it directly. + +mkdir -p ${DIST_OUTPUT} +cp -f ${DIST_RPM}/${DIST_SPEC_TEMPLATE} ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +for changelog_file in $(ls ${DIST_CHANGELOG} | sort) +do + sed -i "/%changelog/r ${DIST_CHANGELOG}/${changelog_file}" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +done + +sed -i -e " + s/%%DIST%%/$DIST/ + s/%%DIST_KERNELVERSION%%/$DIST_KERNELVERSION/ + s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +function generate_cmdline() { + local arch=$1 + local cmdline="" + for cmd in $(awk '!/^#/ && !/^[[:space:]]*$/' ${DIST_SOURCES}cmdline/${arch}) + do + cmdline="${cmdline} ${cmd}" + done + echo "${cmdline}" +} + +x86_cmdline=$(generate_cmdline x86) +arm_cmdline=$(generate_cmdline arm64) +loongarch_cmdline=$(generate_cmdline loongarch64) +sed -i -e "s/%%X86_CMDLINE%%/$x86_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +sed -i -e "s/%%ARM_CMDLINE%%/$arm_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +sed -i -e "s/%%LOONGARCH_CMDLINE%%/$loongarch_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} diff --git a/anolis/rpm/cpupower.config b/anolis/rpm/cpupower.config new file mode 100644 index 0000000000000000000000000000000000000000..8629a4a3ede722c5b8c91b259441583dbe65ba6f --- /dev/null +++ b/anolis/rpm/cpupower.config @@ -0,0 +1,3 @@ +# See 'cpupower help' and cpupower(1) for more info +CPUPOWER_START_OPTS="frequency-set -g performance" +CPUPOWER_STOP_OPTS="frequency-set -g ondemand" diff --git a/anolis/rpm/cpupower.service b/anolis/rpm/cpupower.service new file mode 100644 index 0000000000000000000000000000000000000000..5f10ab7ee39a271b492c2a2bd9a24bdb26db7cb7 --- /dev/null +++ b/anolis/rpm/cpupower.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configure CPU power related settings +After=syslog.target + +[Service] +Type=oneshot +RemainAfterExit=yes +EnvironmentFile=/etc/sysconfig/cpupower +ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS +ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS + +[Install] +WantedBy=multi-user.target diff --git a/anolis/rpm/generate_bls_conf.sh b/anolis/rpm/generate_bls_conf.sh new file mode 100755 index 0000000000000000000000000000000000000000..878696c12f338f947361b19266af48fc7b7b57a3 --- /dev/null +++ b/anolis/rpm/generate_bls_conf.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e + +. /etc/os-release + +if [ "${ID}" == "anolis" ]; then + VERSION=${VERSION%%.*} +fi + +kernelver=$1 && shift +rootfs=$1 && shift +variant=$1 && shift + +output="${rootfs}/lib/modules/${kernelver}/bls.conf" +date=$(date -u +%Y%m%d%H%M%S) + +if [ "${variant:-5}" = "debug" ]; then + debugname=" with debugging" + debugid="-debug" +else + debugname="" + debugid="" +fi + +cat >${output} < in your rpmbuild command or force values +# to 0 in here to disable them. +# +# standard kernel +%define with_up %{?_without_up: 0} %{?!_without_up: 1} +# kernel-debug +%define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} +# kernel-doc +%define with_doc %{?_without_doc: 0} %{?!_without_doc: 1} +# kernel-headers +%define with_headers %{?_without_headers: 0} %{?!_without_headers: 1} +# perf +%define with_perf %{?_without_perf: 0} %{?!_without_perf: 1} +# tools +%define with_tools %{?_without_tools: 0} %{?!_without_tools: 1} +# bpf tool +%define with_bpftool %{?_without_bpftool: 0} %{?!_without_bpftool: 1} +# kernel-debuginfo +%define with_debuginfo %{?_without_debuginfo: 0} %{?!_without_debuginfo: 1} +# +# Additional options for user-friendly one-off kernel building: +# +# Only build the base kernel (--with baseonly): +%define with_baseonly %{?_with_baseonly: 1} %{?!_with_baseonly: 0} +# Only build the debug kernel (--with dbgonly): +%define with_dbgonly %{?_with_dbgonly: 1} %{?!_with_dbgonly: 0} +# +# should we do C=1 builds with sparse +%define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} + +#For loongarch, disable kernel-debug and with_bpftool for unsupport. +%ifarch loongarch64 +%define with_debug 0 +%define with_bpftool 0 +%endif + +%define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} + +# turn off debug kernel for gcov builds +%if %{with_gcov} +%define with_debug 0 +%endif + +%define make_target bzImage +%define image_install_path boot + +%define KVERREL %{version}-%{release}.%{_target_cpu} +%define KVERREL_RE %(echo %KVERREL | sed 's/+/[+]/g') +%define hdrarch %_target_cpu +%define asmarch %_target_cpu + +%if !%{with_debuginfo} +%define _enable_debug_packages 0 +%endif +%define debuginfodir /usr/lib/debug +# Needed because we override almost everything involving build-ids +# and debuginfo generation. Currently we rely on the old alldebug setting. +%global _build_id_links alldebug + +# if requested, only build base kernel +%if %{with_baseonly} +%define with_debug 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%endif + +# if requested, only build debug kernel +%if %{with_dbgonly} +%define with_up 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%endif + +# Overrides for generic default options + +# only package docs noarch +%ifnarch noarch +%define with_doc 0 +%define doc_build_fail true +%endif + +# don't build noarch kernels or headers (duh) +%ifarch noarch +%define with_up 0 +%define with_headers 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%define with_debug 0 +%define with_doc 0 +%define all_arch_configs %{name}-%{version}-*.config +%endif + +# Per-arch tweaks + +%ifarch x86_64 +%define asmarch x86 +%define all_arch_configs %{name}-%{version}-x86_64*.config +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch aarch64 +%define all_arch_configs %{name}-%{version}-aarch64*.config +%define asmarch arm64 +%define hdrarch arm64 +%define make_target Image.gz +%define kernel_image arch/arm64/boot/Image.gz +%endif + +%ifarch loongarch64 +%define all_arch_configs %{name}-%{version}-loongarch64*.config +%define asmarch loongarch +%define make_target vmlinux +%define hdrarch loongarch +%define kernel_image vmlinux +%endif + +# To temporarily exclude an architecture from being built, add it to +# %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we +# don't build kernel-headers then the new build system will no longer let +# us use the previous build of that package -- it'll just be completely AWOL. +# Which is a BadThing(tm). + +# We only build kernel-headers on the following... +%define nobuildarches i386 i686 + +%ifarch %nobuildarches +%define with_up 0 +%define with_debug 0 +%define with_debuginfo 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%define _enable_debug_packages 0 +%endif + +# Architectures we build tools/cpupower on +%define cpupowerarchs x86_64 aarch64 + + +# +# Packages that need to be installed before the kernel is, because the %%post +# scripts use them. +# +%define kernel_prereq coreutils, systemd >= 203-2, /usr/bin/kernel-install +%define initrd_prereq dracut >= 027 + + +Name: kernel%{?variant} +Group: System Environment/Kernel +License: GPLv2 and Redistributable, no modification permitted +URL: http://www.kernel.org/ +Version: %{kernelversion} +Release: %{pkg_release} +Summary: The Linux kernel, based on version %{version}, heavily modified with backports +# DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. +# SET %%nobuildarches (ABOVE) INSTEAD +ExclusiveArch: noarch i686 x86_64 aarch64 loongarch64 +ExclusiveOS: Linux + + +# +# List the packages used during the kernel build +# +BuildRequires: kmod, patch, bash, coreutils, tar, git, which +BuildRequires: bzip2, xz, findutils, gzip, m4, perl-interpreter, perl-Carp, perl-devel, perl-generators, make, diffutils, gawk +BuildRequires: gcc, binutils, system-rpm-config, hmaccalc, python3-devel +BuildRequires: net-tools, hostname, bc, bison, flex, elfutils-devel, dwarves +BuildRequires: libnl3-devel +%ifarch x86_64 +BuildRequires: llvm +%endif +%if %{with_doc} +BuildRequires: xmlto, asciidoc, python3-sphinx +%endif +%if %{with_headers} +BuildRequires: rsync +%endif +%if %{with_sparse} +BuildRequires: sparse +%endif +%if %{with_perf} +BuildRequires: zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex xz-devel +BuildRequires: audit-libs-devel +BuildRequires: java-devel +BuildRequires: libbpf-devel +BuildRequires: libbabeltrace-devel +BuildRequires: libtraceevent-devel +BuildRequires: numactl-devel +%ifarch aarch64 +BuildRequires: opencsd-devel >= 1.0.0 +%endif +%endif +%if %{with_tools} +BuildRequires: gettext ncurses-devel +BuildRequires: libcap-devel libcap-ng-devel +BuildRequires: pciutils-devel +BuildRequires: openssl-devel +%endif +%if %{with_bpftool} +BuildRequires: python3-docutils +BuildRequires: zlib-devel binutils-devel +%endif +BuildConflicts: rhbuildsys(DiskFree) < 500Mb +%if %{with_debuginfo} +BuildRequires: rpm-build, elfutils +#BuildConflicts: rpm < 4.13.0.1-19 +# Most of these should be enabled after more investigation +%undefine _include_minidebuginfo +%undefine _find_debuginfo_dwz_opts +%undefine _unique_build_ids +%undefine _unique_debug_names +%undefine _unique_debug_srcs +%undefine _debugsource_packages +%undefine _debuginfo_subpackages +%global _find_debuginfo_opts -r --keep-section .BTF* +%global _missing_build_ids_terminate_build 1 +%global _no_recompute_build_ids 1 +%endif + +BuildRequires: openssl openssl-devel + +# These below are required to build man pages +%if %{with_perf} +BuildRequires: xmlto +%endif +%if %{with_perf} || %{with_tools} +BuildRequires: asciidoc +%endif + +Source0: linux-%{kernelversion}-%{pkg_release}.tar.xz + +Source20: kernel-%{version}-aarch64.config +Source21: kernel-%{version}-aarch64-debug.config +Source39: kernel-%{version}-x86_64.config +Source40: kernel-%{version}-x86_64-debug.config +Source43: generate_bls_conf.sh +Source45: kernel-%{version}-loongarch64.config +Source46: kernel-%{version}-loongarch64-debug.config + + +# Sources for kernel-tools +Source2000: cpupower.service +Source2001: cpupower.config + +## Patches needed for building this package + +# %%PATCH_LIST%% + +# END OF PATCH DEFINITIONS + +BuildRoot: %{_tmppath}/%{name}-%{KVERREL}-root + +%description +This is the package which provides the Linux kernel for Alibaba Cloud Linux. +It is based on upstream Linux at version %{version} and maintains kABI +compatibility of a set of approved symbols, however it is heavily modified with +backports and fixes pulled from newer upstream Linux %{name} releases. This means +this is not a %{version} kernel anymore: it includes several components which come +from newer upstream linux versions, while maintaining a well tested and stable +core. Some of the components/backports that may be pulled in are: changes like +updates to the core kernel (eg.: scheduler, cgroups, memory management, security +fixes and features), updates to block layer, supported filesystems, major driver +updates for supported hardware in Alibaba Cloud Linux, enhancements for +enterprise customers, etc. + +# +# This macro does requires, provides, conflicts, obsoletes for a kernel package. +# %%kernel_reqprovconf +# It uses any kernel__conflicts and kernel__obsoletes +# macros defined above. +# +%define kernel_reqprovconf \ +Provides: %{name} = %{kernelversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{kernelversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: kernel-drm-nouveau = 16\ +Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Requires(pre): %{kernel_prereq}\ +Requires(pre): %{initrd_prereq}\ +Requires(pre): linux-firmware >= 20190516-94.git711d3297\ +Requires(preun): systemd >= 200\ +Conflicts: xfsprogs < 4.3.0-1\ +Conflicts: xorg-x11-drv-vmmouse < 13.0.99\ +%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\ +# We can't let RPM do the dependencies automatic because it'll then pick up\ +# a correct but undesirable perl dependency from the module headers which\ +# isn't required for the kernel proper to function\ +AutoReq: no\ +AutoProv: yes\ +%{nil} + + +%package doc +Summary: Various documentation bits found in the kernel source +Group: Documentation +%description doc +This package contains documentation files from the kernel +source. Various bits of information about the Linux kernel and the +device drivers shipped with it are documented in these files. + +You'll want to install this package if you need a reference to the +options that can be passed to Linux kernel modules at load time. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders < 3.0-46 +Provides: glibc-kernheaders = 3.0-46 +%if "0%{?variant}" +Obsoletes: kernel-headers < %{kernelversion}-%{pkg_release} +Provides: kernel-headers = %{kernelversion}-%{pkg_release} +%endif +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{name}-debuginfo packages +Group: Development/Debug +Provides: installonlypkg(kernel) +%description debuginfo-common-%{_target_cpu} +This package is required by %{name}-debuginfo subpackages. +It provides the kernel source files common to all builds. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Requires: bzip2 +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|.*%%{_libdir}/libperf-jvmti.so(\.debug)?|XXX' -o perf-debuginfo.list} + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python3-perf +The python3-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%package -n python3-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n python3-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{python3_sitearch}/perf.*so(\.debug)?|XXX' -o python3-perf-debuginfo.list} + +# with_perf +%endif + +%if %{with_tools} +%package -n %{name}-tools +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +%ifarch %{cpupowerarchs} +Provides: cpupowerutils = 1:009-0.6.p1 +Obsoletes: cpupowerutils < 1:009-0.6.p1 +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +Requires: %{name}-tools-libs = %{version}-%{release} +%endif +%define __requires_exclude ^%{_bindir}/python +%description -n %{name}-tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package -n %{name}-tools-libs +Summary: Libraries for the %{name}-tools +Group: Development/System +License: GPLv2 +%description -n %{name}-tools-libs +This package contains the libraries built from the tools/ directory +from the kernel source. + +%package -n %{name}-tools-libs-devel +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +Requires: %{name}-tools = %{version}-%{release} +%ifarch %{cpupowerarchs} +Provides: cpupowerutils-devel = 1:009-0.6.p1 +Obsoletes: cpupowerutils-devel < 1:009-0.6.p1 +%endif +Requires: %{name}-tools-libs = %{version}-%{release} +Provides: %{name}-tools-devel +%description -n %{name}-tools-libs-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%package -n %{name}-tools-debuginfo +Summary: Debug information for package %{name}-tools +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n %{name}-tools-debuginfo +This package provides debug information for package %{name}-tools. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/centrino-decode(\.debug)?|.*%%{_bindir}/powernow-k8-decode(\.debug)?|.*%%{_bindir}/cpupower(\.debug)?|.*%%{_libdir}/libcpupower.*|.*%%{_bindir}/turbostat(\.debug)?|.*%%{_bindir}/x86_energy_perf_policy(\.debug)?|.*%%{_bindir}/tmon(\.debug)?|.*%%{_bindir}/lsgpio(\.debug)?|.*%%{_bindir}/gpio-hammer(\.debug)?|.*%%{_bindir}/gpio-event-mon(\.debug)?|.*%%{_bindir}/iio_event_monitor(\.debug)?|.*%%{_bindir}/iio_generic_buffer(\.debug)?|.*%%{_bindir}/lsiio(\.debug)?|XXX' -o %{name}-tools-debuginfo.list} + +# with_tools +%endif + +%if %{with_bpftool} + +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +License: GPLv2 +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package -n bpftool-debuginfo +Summary: Debug information for package bpftool +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n bpftool-debuginfo +This package provides debug information for the bpftool package. + +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_sbindir}/bpftool(\.debug)?|XXX' -o bpftool-debuginfo.list} + +# with_bpftool +%endif + +%if %{with_gcov} +%package gcov +Summary: gcov graph and source files for coverage data collection. +Group: Development/System +%description gcov +kernel-gcov includes the gcov graph and source files for gcov coverage collection. +%endif + +# +# This macro creates a kernel--debuginfo package. +# %%kernel_debuginfo_package +# +%define kernel_debuginfo_package() \ +%package %{?1:%{1}-}debuginfo\ +Summary: Debug information for package %{name}%{?1:-%{1}}\ +Group: Development/Debug\ +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}%{?1:-%{1}}-debuginfo-%{_target_cpu} = %{version}-%{release}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +%description %{?1:%{1}-}debuginfo\ +This package provides debug information for package %{name}%{?1:-%{1}}.\ +This is required to use SystemTap with %{name}%{?1:-%{1}}-%{KVERREL}.\ +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '/.*/%%{KVERREL_RE}%{?1:[+]%{1}}/.*|/.*%%{KVERREL_RE}%{?1:\+%{1}}(\.debug)?' -o debuginfo%{?1}.list}\ +%{nil} + +# +# This macro creates a kernel--devel package. +# %%kernel_devel_package +# +%define kernel_devel_package() \ +%package %{?1:%{1}-}devel\ +Summary: Development package for building kernel modules to match the %{?2:%{2} }kernel\ +Group: System Environment/Kernel\ +Provides: %{name}%{?1:-%{1}}-devel-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}-devel-%{_target_cpu} = %{version}-%{release}%{?1:+%{1}}\ +Provides: %{name}-devel-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +Requires(pre): findutils\ +Requires: findutils\ +Requires: perl-interpreter\ +%description %{?1:%{1}-}devel\ +This package provides kernel headers and makefiles sufficient to build modules\ +against the %{?2:%{2} }kernel package.\ +%{nil} + +# +# This macro creates a %%{name}- and its -devel and -debuginfo too. +# %%define variant_summary The Linux kernel compiled for +# %%kernel_variant_package [-n ] +# +%define kernel_variant_package(n:) \ +%package %{?1:%{1}}\ +Summary: %{variant_summary}\ +Group: System Environment/Kernel\ +Provides: installonlypkg(kernel)\ +Requires: grubby \ +%{expand:%%kernel_reqprovconf}\ +%{expand:%%kernel_devel_package %{?1:%{1}} %{!?-n:%{?1:%{1}}}%{?-n:%{-n*}}}\ +%{expand:%%kernel_debuginfo_package %{?1:%{1}}}\ +%{nil} + +# First the auxiliary packages of the main kernel package. +%kernel_devel_package +%kernel_debuginfo_package + +# Now, each variant package. + +%define variant_summary The Linux kernel compiled with extra debugging enabled +%kernel_variant_package debug +%description debug +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + +%prep +# do a few sanity-checks for --with *only builds +%if %{with_baseonly} +%if !%{with_up} +echo "Cannot build --with baseonly, up build is disabled" +exit 1 +%endif +%endif + +# more sanity checking; do it quietly +if [ "%{patches}" != "%%{patches}" ] ; then + for patch in %{patches} ; do + if [ ! -f $patch ] ; then + echo "ERROR: Patch ${patch##/*/} listed in specfile but is missing" + exit 1 + fi + done +fi 2>/dev/null + +patch_command='patch -p1 -F1 -s' +ApplyPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + if ! grep -E "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then + if [ "${patch:0:8}" != "patch-4." ] ; then + echo "ERROR: Patch $patch not listed as a source patch in specfile" + exit 1 + fi + fi 2>/dev/null + case "$patch" in + *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.gz) gunzip < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.xz) unxz < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *) sed -n '/^---$/,$p' "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + esac +} + +# don't apply patch if it's empty +ApplyOptionalPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + local C=$(wc -l $RPM_SOURCE_DIR/$patch | awk '{print $1}') + if [ "$C" -gt 9 ]; then + ApplyPatch $patch ${1+"$@"} + fi +} + +%setup -q -n %{name}-%{kernelversion}-%{pkg_release} -c +mv linux-%{kernelversion}-%{pkg_release} linux-%{KVERREL} + +cd linux-%{KVERREL} + +# Drop some necessary files from the source dir into the buildroot +cp $RPM_SOURCE_DIR/kernel-%{version}-*.config . + +# %%PATCH_APPLICATION%% + +# END OF PATCH APPLICATIONS + +# Any further pre-build tree manipulations happen here. + +chmod +x scripts/checkpatch.pl +mv COPYING COPYING-%{version} + +# This Prevents scripts/setlocalversion from mucking with our version numbers. +touch .scmversion + +# Do not use "ambiguous" python shebangs. RHEL 8 now has a new script +# (/usr/lib/rpm/redhat/brp-mangle-shebangs), which forces us to specify a +# "non-ambiguous" python shebang for scripts we ship in buildroot. This +# script throws an error like below: +# *** ERROR: ambiguous python shebang in /usr/bin/kvm_stat: #!/usr/bin/python. Change it to python3 (or python2) explicitly. +# We patch all sources below for which we got a report/error. +pathfix.py -i "%{__python3} %{py3_shbang_opts}" -p -n \ + tools/kvm/kvm_stat/kvm_stat \ + scripts/show_delta \ + scripts/diffconfig \ + scripts/bloat-o-meter \ + scripts/jobserver-exec \ + tools \ + Documentation \ + scripts/clang-tools + +%define make make HOSTCFLAGS="%{?build_hostcflags}" HOSTLDFLAGS="%{?build_hostldflags}" + +# only deal with configs if we are going to build for the arch +%ifnarch %nobuildarches + +rm -rf configs +mkdir configs + +# Remove configs not for the buildarch +for cfg in kernel-%{version}-*.config; do + if [ `echo %{all_arch_configs} | grep -c $cfg` -eq 0 ]; then + rm -f $cfg + fi +done + +# enable GCOV kernel config options if gcov is on +%if %{with_gcov} +for i in *.config +do + sed -i 's/# CONFIG_GCOV_KERNEL is not set/CONFIG_GCOV_KERNEL=y\nCONFIG_GCOV_PROFILE_ALL=y\n/' $i +done +%endif + +# now run oldconfig over all the config files +for i in *.config +do + mv $i .config + Arch=`sed -n 3p .config | cut -d' ' -f2 | cut -d'/' -f2` + make ARCH=$Arch listnewconfig | grep -E '^CONFIG_' >.newoptions || true + if [ -s .newoptions ]; then + cat .newoptions + #exit 1 + fi + rm -f .newoptions + make ARCH=$Arch olddefconfig + echo "# $Arch" > configs/$i + cat .config >> configs/$i +done +# end of kernel config +%endif + +# # End of Configs stuff + +# get rid of unwanted files resulting from patch fuzz +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null + +# remove unnecessary SCM files +find . -name .gitignore -exec rm -f {} \; >/dev/null + +cd .. + +### +### build +### +%build + +%if %{with_sparse} +%define sparse_mflags C=1 +%endif + +cp_vmlinux() +{ + eu-strip --remove-comment -o "$2" "$1" +} + +BuildKernel() { + MakeTarget=$1 + KernelImage=$2 + Flavour=$3 + Flav=${Flavour:++${Flavour}} + InstallName=${5:-vmlinuz} + + DoModules=1 + + # Pick the right config file for the kernel we're building + Config=kernel-%{version}-%{_target_cpu}${Flavour:+-${Flavour}}.config + DevelDir=/usr/src/kernels/%{KVERREL}${Flav} + + # When the bootable image is just the ELF kernel, strip it. + # We already copy the unstripped file into the debuginfo package. + if [ "$KernelImage" = vmlinux ]; then + CopyKernel=cp_vmlinux + else + CopyKernel=cp + fi + + KernelVer=%{version}-%{release}.%{_target_cpu}${Flav} + echo BUILDING A KERNEL FOR ${Flavour} %{_target_cpu}... + + # make sure EXTRAVERSION says what we want it to say + perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}.%{_target_cpu}${Flav}/" Makefile + + # and now to start the build process + + %{make} -s %{?_smp_mflags} mrproper + cp configs/$Config .config + + Arch=`head -1 .config | cut -b 3-` + echo USING ARCH=$Arch + + KCFLAGS="%{?kcflags}" + + # add kpatch flags for base kernel + if [ "$Flavour" == "" ]; then + KCFLAGS="$KCFLAGS %{?kpatch_kcflags}" + fi + + %{make} -s ARCH=$Arch olddefconfig >/dev/null + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" $MakeTarget %{?sparse_mflags} %{?kernel_mflags} + if [ $DoModules -eq 1 ]; then + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" modules %{?sparse_mflags} || exit 1 + fi + + mkdir -p $RPM_BUILD_ROOT/%{image_install_path} + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/%{image_install_path} +%endif + +%ifarch aarch64 + %{make} -s ARCH=$Arch V=1 dtbs dtbs_install INSTALL_DTBS_PATH=$RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer + cp -r $RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/dtb + find arch/$Arch/boot/dts -name '*.dtb' -type f | xargs rm -f +%endif + + # Start installing the results + install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer + install -m 644 .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/config + install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer + install -m 644 System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/System.map + + # We estimate the size of the initramfs because rpm needs to take this size + # into consideration when performing disk space calculations. (See bz #530778) + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 + + if [ -f arch/$Arch/boot/zImage.stub ]; then + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/lib/modules/$KernelVer/zImage.stub-$KernelVer || : + fi + + $CopyKernel $KernelImage \ + $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + chmod 755 $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + cp $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/$InstallName + + # hmac sign the kernel for FIPS + echo "Creating hmac file: $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac" + ls -l $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + sha512hmac $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer | sed -e "s,$RPM_BUILD_ROOT,," > $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac; + cp $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac $RPM_BUILD_ROOT/lib/modules/$KernelVer/.vmlinuz.hmac + + if [ $DoModules -eq 1 ]; then + # Override $(mod-fw) because we don't want it to install any firmware + # we'll get it from the linux-firmware package and we don't want conflicts + %{make} -s %{?_smp_mflags} ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= + fi + +%if %{with_gcov} + # install gcov-needed files to $BUILDROOT/$BUILD/...: + # gcov_info->filename is absolute path + # gcno references to sources can use absolute paths (e.g. in out-of-tree builds) + # sysfs symlink targets (set up at compile time) use absolute paths to BUILD dir + find . \( -name '*.gcno' -o -name '*.[chS]' \) -exec install -D '{}' "$RPM_BUILD_ROOT/$(pwd)/{}" \; +%endif + + if [ $DoVDSO -ne 0 ]; then + %{make} -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if [ ! -s ldconfig-kernel.conf ]; then + echo > ldconfig-kernel.conf "\ + # Placeholder file, no vDSO hwcap entries used in this kernel." + fi + %{__install} -D -m 444 ldconfig-kernel.conf \ + $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-$KernelVer.conf + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/vdso/.build-id + fi + + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/source + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + (cd $RPM_BUILD_ROOT/lib/modules/$KernelVer ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/updates + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/weak-updates + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -s Module.markers ]; then + cp Module.markers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + fi + + # create the kABI metadata for use in packaging + # NOTENOTE: the name symvers is used by the rpm backend + # NOTENOTE: to discover and run the /usr/lib/rpm/fileattrs/kabi.attr + # NOTENOTE: script which dynamically adds exported kernel symbol + # NOTENOTE: checksums to the rpm metadata provides list. + # NOTENOTE: if you change the symvers name, update the backend too + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < Module.symvers > $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz + cp $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz $RPM_BUILD_ROOT/lib/modules/$KernelVer/symvers.gz + + # then drop all but the needed Makefiles/Kconfig files + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/tracing + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/spdxcheck.py + if [ -f tools/objtool/objtool ]; then + cp -a tools/objtool/objtool $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/tools/objtool/ || : + fi + if [ -d arch/$Arch/scripts ]; then + cp -a arch/$Arch/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || : + fi + if [ -f arch/$Arch/*lds ]; then + cp -a arch/$Arch/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || : + fi + if [ -f arch/%{asmarch}/kernel/module.lds ]; then + cp -a --parents arch/%{asmarch}/kernel/module.lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o + if [ -d arch/%{asmarch}/include ]; then + cp -a --parents arch/%{asmarch}/include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi +%ifarch aarch64 + # arch/arm64/include/asm/xen references arch/arm + cp -a --parents arch/arm/include/asm/xen $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + # arch/arm64/include/asm/opcodes.h references arch/arm + cp -a --parents arch/arm/include/asm/opcodes.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + cp -a include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include +%ifarch x86_64 + # files for 'make prepare' to succeed with kernel-devel + cp -a --parents arch/x86/entry/syscalls/syscall_32.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/entry/syscalls/syscall_64.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_32.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_64.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_common.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents tools/include/tools/le_byteshift.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/purgatory.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/stack.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/setup-x86_64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/entry64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/ctype.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + + cp -a --parents scripts/syscalltbl.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents scripts/syscallhdr.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + # Make sure the Makefile, version.h, and auto.conf have a matching + # timestamp so that external modules can be built + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/generated/uapi/linux/version.h \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/config/auto.conf + +%if %{with_debuginfo} + eu-readelf -n vmlinux | grep "Build ID" | awk '{print $NF}' > vmlinux.id + cp vmlinux.id $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/vmlinux.id + + # + # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm + # + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer +%endif + + find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f >modnames + + # mark modules executable so that strip-to-file can strip them + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + + grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt(l_|2x00)(pci|usb)_probe|register_netdevice' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + + # detect missing or incorrect license tags + ( find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name '*.ko' | xargs /sbin/modinfo -l | \ + grep -E -v 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' ) && exit 1 + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + + if [ $DoModules -eq 1 ]; then + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + %ifnarch loongarch64 + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + %else + rm -rf depmod.out + %endif + else + # Ensure important files/directories exist to let the packaging succeed + mkdir -p lib/modules/$KernelVer/kernel + # Add files usually created by make modules, needed to prevent errors + # thrown by depmod during package installation + touch lib/modules/$KernelVer/modules.order + touch lib/modules/$KernelVer/modules.builtin + fi + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Cleanup + rm System.map + popd + + # Move the devel headers out of the root file system + mkdir -p $RPM_BUILD_ROOT/usr/src/kernels + mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir + + # This is going to create a broken link during the build, but we don't use + # it after this point. We need the link to actually point to something + # when kernel-devel is installed, and a relative link doesn't work across + # the F17 UsrMove feature. + ln -sf $DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + + # prune junk from kernel-devel + find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \; + + # build a BLS config for this kernel + %{SOURCE43} "$KernelVer" "$RPM_BUILD_ROOT" "%{?variant}" + +%ifarch x86_64 + # ensure vmlinuz' btf section is same as vmlinux + # since extract btf section from arm64 Image.bz is too difficult, + # we check for x86 only. + ./scripts/extract-vmlinux $KernelImage > tmp_vmlinux + llvm-objcopy --dump-section=.BTF=tmp_btf_vmlinuz tmp_vmlinux + llvm-objcopy --dump-section=.BTF=tmp_btf_vmlinux vmlinux + if ! cmp tmp_btf_vmlinuz tmp_btf_vmlinux ; then + echo "detected BTF section in vmlinuz is not same as vmlinux !!!" + exit 1 + fi + rm -f tmp_btf_vmlinuz tmp_btf_vmlinux tmp_vmlinux +%endif +} + +### +# DO it... +### + +# prepare directories +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/boot +mkdir -p $RPM_BUILD_ROOT%{_libexecdir} + +cd linux-%{KVERREL} + + +%if %{with_debug} +BuildKernel %make_target %kernel_image debug +%endif + +%if %{with_up} +BuildKernel %make_target %kernel_image +%endif + +%global perf_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" -C tools/perf V=1 NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 NO_BIONIC=1 LIBBPF_DYNAMIC=1 LIBTRACEEVENT_DYNAMIC=1 %{?perf_build_extra_opts} prefix=%{_prefix} PYTHON=%{__python3} +%if %{with_perf} +# perf +# make sure check-headers.sh is executable +chmod +x tools/perf/check-headers.sh +%{perf_make} DESTDIR=$RPM_BUILD_ROOT all +%endif + +%global tools_make \ + %{make} V=1 CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" + +%if %{with_tools} +%ifarch %{cpupowerarchs} +# cpupower +# make sure version-gen.sh is executable. +chmod +x tools/power/cpupower/utils/version-gen.sh +%{tools_make} -C tools/power/cpupower CPUFREQ_BENCH=false DEBUG=false +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + %{tools_make} centrino-decode powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/x86/x86_energy_perf_policy/ + %{tools_make} + popd + pushd tools/power/x86/turbostat + %{tools_make} + popd + pushd tools/power/x86/intel-speed-select + %{make} + popd +%endif +%endif +pushd tools/thermal/tmon/ +%{make} V=1 +popd +pushd tools/iio/ +%{make} V=1 +popd +pushd tools/gpio/ +%{make} V=1 +popd +# build MM tools +pushd tools/mm/ +%{make} V=1 slabinfo page_owner_sort page-types +popd +%endif + +%global bpftool_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" EXTRA_LDFLAGS="%{__global_ldflags}" DESTDIR=$RPM_BUILD_ROOT V=1 +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} +popd +%endif + +%if %{with_doc} +# Make the HTML pages. +make htmldocs || %{doc_build_fail} + +# sometimes non-world-readable files sneak into the kernel source tree +chmod -R a=rX Documentation +find Documentation -type d | xargs chmod u+w +%endif + +### +### Special hacks for debuginfo subpackages. +### + +# This macro is used by %%install, so we must redefine it before that. +%define debug_package %{nil} + +%if %{with_debuginfo} + +%ifnarch noarch +%global __debug_package 1 +%files -f debugfiles.list debuginfo-common-%{_target_cpu} +%defattr(-,root,root) +%endif + +%endif + +# +# Disgusting hack alert! We need to ensure we sign modules *after* all +# invocations of strip occur, which is in __debug_install_post if +# find-debuginfo.sh runs, and __os_install_post if not. +# +%define __spec_install_post \ + %{?__debug_package:%{__debug_install_post}}\ + %{__arch_install_post}\ + %{__os_install_post} + +### +### install +### + +%install + +cd linux-%{KVERREL} + +%if %{with_doc} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{kernelversion} + +# copy the source over +mkdir -p $docdir +tar -h -f - --exclude=man --exclude='.*' -c Documentation | tar xf - -C $docdir + +# with_doc +%endif + +# We have to do the headers install before the tools install because the +# kernel headers_install will remove any header files in /usr/include that +# it doesn't install itself. + +%if %{with_headers} +# Install kernel headers +%{make} ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +%endif + +%if %{with_perf} +# perf tool binary and supporting scripts/binaries +%{perf_make} DESTDIR=$RPM_BUILD_ROOT lib=%{_lib} install-bin +# remove the 'trace' symlink. +rm -f %{buildroot}%{_bindir}/trace + +# For both of the below, yes, this should be using a macro but right now +# it's hard coded and we don't actually want it anyway right now. +# Whoever wants examples can fix it up! + +# remove examples +rm -rf %{buildroot}/usr/lib/perf/examples +rm -rf %{buildroot}/usr/lib/perf/include + +# python-perf extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +mkdir -p %{buildroot}/%{_mandir}/man1 +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +# remove any tracevent files, eg. its plugins still gets built and installed, +# even if we build against system's libtracevent during perf build (by setting +# LIBTRACEEVENT_DYNAMIC=1 above in perf_make macro). Those files should already +# ship with libtraceevent package. +rm -rf %{buildroot}%{_libdir}/traceevent +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%{make} -C tools/power/cpupower DESTDIR=$RPM_BUILD_ROOT libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install +rm -f %{buildroot}%{_libdir}/*.{a,la} +%find_lang cpupower +mv cpupower.lang ../ +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* +mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig +install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service +install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower +%endif +%ifarch x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + pushd tools/power/x86/x86_energy_perf_policy + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/turbostat + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/intel-speed-select + %{make} DESTDIR=%{buildroot} install + popd +%endif +pushd tools/thermal/tmon +%{make} V=1 INSTALL_ROOT=%{buildroot} install +popd +pushd tools/iio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/gpio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/kvm/kvm_stat +make INSTALL_ROOT=%{buildroot} install-tools +make INSTALL_ROOT=%{buildroot} install-man +popd +# install MM tools +pushd tools/mm/ +install -m755 slabinfo %{buildroot}%{_bindir}/slabinfo +install -m755 page_owner_sort %{buildroot}%{_bindir}/page_owner_sort +install -m755 page-types %{buildroot}%{_bindir}/page-types +popd +%endif + +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} prefix=%{_prefix} bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +popd +%endif + +# We have to do the headers checksum calculation after the tools install because +# these might end up installing their own set of headers on top of kernel's +%if %{with_headers} +# compute a content hash to export as Provides: kernel-headers-checksum +HEADERS_CHKSUM=$(export LC_ALL=C; find $RPM_BUILD_ROOT/usr/include -type f -name "*.h" \ + ! -path $RPM_BUILD_ROOT/usr/include/linux/version.h | \ + sort | xargs cat | sha1sum - | cut -f 1 -d ' '); +# export the checksum via usr/include/linux/version.h, so the dynamic +# find-provides can grab the hash to update it accordingly +echo "#define KERNEL_HEADERS_CHECKSUM \"$HEADERS_CHKSUM\"" >> $RPM_BUILD_ROOT/usr/include/linux/version.h +%endif + +### +### clean +### + +%clean +rm -rf $RPM_BUILD_ROOT + +### +### scripts +### + +%if %{with_tools} +%post -n %{name}-tools-libs +/sbin/ldconfig + +%postun -n %{name}-tools-libs +/sbin/ldconfig +%endif + +# +# This macro defines a %%post script for a kernel*-devel package. +# %%kernel_devel_post [] +# +%define kernel_devel_post() \ +%{expand:%%post %{?1:%{1}-}devel}\ +if [ -f /etc/sysconfig/kernel ]\ +then\ + . /etc/sysconfig/kernel || exit $?\ +fi\ +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ]\ +then\ + (cd /usr/src/kernels/%{KVERREL}%{?1:+%{1}} &&\ + /usr/bin/find . -type f | while read f; do\ + hardlink -c /usr/src/kernels/*%{?dist}.*/$f $f\ + done)\ +fi\ +%{nil} + +# This macro defines a %%posttrans script for a kernel package. +# %%kernel_variant_posttrans [] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_posttrans() \ +%{expand:%%posttrans %{?1:%{1}}}\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --add-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +/bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +%ifarch aarch64 \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%ARM_CMDLINE%%"\ +%elifarch loongarch64 \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%LOONGARCH_CMDLINE%%"\ +%else \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%X86_CMDLINE%%"\ +%endif \ +%{nil} + +# +# This macro defines a %%post script for a kernel package and its devel package. +# %%kernel_variant_post [-v ] [-r ] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_post(v:r:) \ +%{expand:%%kernel_devel_post %{?-v*}}\ +%{expand:%%kernel_variant_posttrans %{?-v*}}\ +%{expand:%%post %{?-v*}}\ +%{-r:\ +if [ `uname -i` == "x86_64" -o `uname -i` == "i386" ] &&\ + [ -f /etc/sysconfig/kernel ]; then\ + /bin/sed -r -i -e 's/^DEFAULTKERNEL=%{-r*}$/DEFAULTKERNEL=kernel%{?-v:-%{-v*}}/' /etc/sysconfig/kernel || exit $?\ +fi}\ +%{nil} + +# +# This macro defines a %%preun script for a kernel package. +# %%kernel_variant_preun +# +%define kernel_variant_preun() \ +%{expand:%%preun %{?1}}\ +/bin/kernel-install remove %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --remove-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +%{nil} + +%kernel_variant_preun +%kernel_variant_post -r kernel-smp + +%kernel_variant_preun debug +%kernel_variant_post -v debug + +if [ -x /sbin/ldconfig ] +then + /sbin/ldconfig -X || exit $? +fi + +### +### file lists +### + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +# only some architecture builds need kernel-doc +%if %{with_doc} +%files doc +%defattr(-,root,root) +%{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{kernelversion} +%endif + +%if %{with_perf} +%files -n perf +%{_bindir}/perf +%{_libdir}/libperf-jvmti.so +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_datadir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%doc linux-%{KVERREL}/tools/perf/Documentation/examples.txt +%{_docdir}/perf-tip/tips.txt + +%files -n python3-perf +%{python3_sitearch}/* + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo + +%files -f python3-perf-debuginfo.list -n python3-perf-debuginfo +%endif +# with_perf +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%defattr(-,root,root) +%files -n %{name}-tools -f cpupower.lang +%{_bindir}/cpupower +%{_datadir}/bash-completion/completions/cpupower +%ifarch x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%{_bindir}/intel-speed-select +%endif +# !cpupowerarchs +%else +%files -n %{name}-tools +%defattr(-,root,root) +# cpupowerarchs +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-hammer +%{_bindir}/gpio-event-mon +%{_bindir}/gpio-watch +%{_mandir}/man1/kvm_stat* +%{_bindir}/kvm_stat +%{_bindir}/page_owner_sort +%{_bindir}/slabinfo +%{_bindir}/page-types + +%if %{with_debuginfo} +%files -f %{name}-tools-debuginfo.list -n %{name}-tools-debuginfo +%defattr(-,root,root) +%endif + +%ifarch %{cpupowerarchs} +%files -n %{name}-tools-libs +%{_libdir}/libcpupower.so.1 +%{_libdir}/libcpupower.so.0.0.1 + +%files -n %{name}-tools-libs-devel +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h +%endif +# with_tools +%endif + +%if %{with_bpftool} +%files -n bpftool +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool-cgroup.8.* +%{_mandir}/man8/bpftool-map.8.* +%{_mandir}/man8/bpftool-prog.8.* +%{_mandir}/man8/bpftool-perf.8.* +%{_mandir}/man8/bpftool.8.* +%{_mandir}/man8/bpftool-btf.8.* +%{_mandir}/man8/bpftool-feature.8.* +%{_mandir}/man8/bpftool-gen.8.* +%{_mandir}/man8/bpftool-iter.8.* +%{_mandir}/man8/bpftool-link.8.* +%{_mandir}/man8/bpftool-net.8.* +%{_mandir}/man8/bpftool-struct_ops.8.* + +%if %{with_debuginfo} +%files -f bpftool-debuginfo.list -n bpftool-debuginfo +%defattr(-,root,root) +%endif +%endif + +# empty meta-package +%ifnarch %nobuildarches noarch +%files +%defattr(-,root,root) +%endif + +%if %{with_gcov} +%ifarch x86_64 aarch64 +%files gcov +%defattr(-,root,root) +%{_builddir} +%endif +%endif + +# This is %%{image_install_path} on an arch where that includes ELF files, +# or empty otherwise. +%define elf_image_install_path %{?kernel_image_elf:%{image_install_path}} + +# +# This macro defines the %%files sections for a kernel package +# and its devel and debuginfo packages. +# %%kernel_variant_files [-k vmlinux] +# +%define kernel_variant_files(k:) \ +%if %{1}\ +%{expand:%%files %{?2}}\ +%defattr(-,root,root)\ +%{!?_licensedir:%global license %%doc}\ +%license linux-%{KVERREL}/COPYING-%{version}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/%{?-k:%{-k*}}%{!?-k:vmlinuz}\ +%ghost /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/.vmlinuz.hmac \ +%ghost /%{image_install_path}/.vmlinuz-%{KVERREL}%{?2:+%{2}}.hmac \ +%ifarch aarch64\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/dtb \ +%ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ +%endif\ +%attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ +%ghost %attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/config\ +%ghost %attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%ghost %attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%ghost %attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%dir /lib/modules\ +%dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/build\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/source\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/weak-updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/bls.conf\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/modules.*\ +%{expand:%%files %{?2:%{2}-}devel}\ +%defattr(-,root,root)\ +%defverify(not mtime)\ +/usr/src/kernels/%{KVERREL}%{?2:+%{2}}\ +%if %{with_debuginfo}\ +%ifnarch noarch\ +%{expand:%%files -f debuginfo%{?2}.list %{?2:%{2}-}debuginfo}\ +%defattr(-,root,root)\ +%endif\ +%endif\ +%endif\ +%{nil} + +%kernel_variant_files %{with_up} +%kernel_variant_files %{with_debug} debug + +# plz don't put in a version string unless you're going to tag +# and build. +# +# +%changelog + diff --git a/arch/Kconfig b/arch/Kconfig index 09603e0bc2cc1655afdb4199a466b672b9462f6d..80533a75f511afa8ed16cf3332e5326fd37518e5 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1333,6 +1333,14 @@ config STRICT_MODULE_RWX config ARCH_HAS_PHYS_TO_DMA bool +config ARCH_HAS_CPU_RESCTRL + bool + help + The 'resctrl' filesystem allows CPU controls of shared resources + such as caches and memory bandwidth to be configured. An architecture + selects this if it provides the arch-specific hooks for the filesystem + and needs the per-task CLOSID/RMID properties. + config HAVE_ARCH_COMPILER_H bool help diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h index 529d2cf4d06f4adf88170ca6c84f3e39d3305188..1eb4d733459c5050b7813fbab929b8c11a257dde 100644 --- a/arch/arm/include/asm/mcs_spinlock.h +++ b/arch/arm/include/asm/mcs_spinlock.h @@ -6,7 +6,7 @@ #include /* MCS spin-locking. */ -#define arch_mcs_spin_lock_contended(lock) \ +#define arch_mcs_spin_wait(lock) \ do { \ /* Ensure prior stores are observed before we enter wfe. */ \ smp_mb(); \ @@ -14,9 +14,9 @@ do { \ wfe(); \ } while (0) \ -#define arch_mcs_spin_unlock_contended(lock) \ +#define arch_mcs_lock_handoff(lock, val) \ do { \ - smp_store_release(lock, 1); \ + smp_store_release((lock), (val)); \ dsb_sev(); \ } while (0) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d657b84b6bf706a701d3e93a51a18e21755d0bde..be91e376df79e44c1e835eec8a762dac5e83430a 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -209,6 +209,8 @@ static inline void __sync_icache_dcache(pte_t pteval) extern void __sync_icache_dcache(pte_t pteval); #endif +#define PFN_PTE_SHIFT PAGE_SHIFT + void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval, unsigned int nr); #define set_ptes set_ptes diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 073de5b24560ddb7bc42ea48d7eeafe759fbb2d3..735cca0ccfe20053116413bafd73a26d948b40c0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1822,6 +1822,6 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, if (--nr == 0) break; ptep++; - pte_val(pteval) += PAGE_SIZE; + pteval = pte_next_pfn(pteval); } } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 658c6a61ab6fb8a984c568826a389b3269552cfa..c0db32c45f579c910395f70c286e252c52a39e07 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_COPY_MC if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -173,7 +174,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PREL32_RELOCATIONS - select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET if !HAVE_LIVEPATCH select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -256,6 +257,10 @@ config ARM64 select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT select HAVE_SOFTIRQ_ON_OWN_STACK + select HAVE_STACK_VALIDATION if FRAME_POINTER_VALIDATION + select STACK_VALIDATION if HAVE_STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if STACK_VALIDATION + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_ARGS && HAVE_RELIABLE_STACKTRACE help ARM 64-bit (AArch64) Linux support. @@ -2013,7 +2018,27 @@ config ARM64_TLB_RANGE The feature introduces new assembly instructions, and they were support when binutils >= 2.30. -endmenu # "ARMv8.4 architectural features" +config ARM64_MPAM + bool "Enable support for MPAM" + select ACPI_MPAM if ACPI + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS + help + Memory Partitioning and Monitoring is an optional extension + that allows the CPUs to mark load and store transactions with + labels for partition-id and performance-monitoring-group. + System components, such as the caches, can use the partition-id + to apply a performance policy. MPAM monitors can use the + partition-id and performance-monitoring-group to measure the + cache occupancy or data throughput. + + Use of this extension requires CPU support, support in the + memory system components (MSC), and a description from firmware + of where the MSC are in the address space. + + MPAM is exposed to user-space via the resctrl pseudo filesystem. + +endmenu menu "ARMv8.5 architectural features" @@ -2264,6 +2289,15 @@ config UNWIND_PATCH_PAC_INTO_SCS select UNWIND_TABLES select DYNAMIC_SCS +config ARM64_CONTPTE + bool "Contiguous PTE mappings for user memory" if EXPERT + depends on TRANSPARENT_HUGEPAGE + default y + help + When enabled, user mappings are configured using the PTE contiguous + bit, for any mappings that meet the size and alignment requirements. + This reduces TLB pressure and improves performance. + endmenu # "Kernel Features" menu "Boot options" @@ -2374,3 +2408,4 @@ source "drivers/acpi/Kconfig" source "arch/arm64/kvm/Kconfig" +source "kernel/livepatch/Kconfig" diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 265c4461031f4df457eb782eac820070710c8d06..6d5dc90a0a5202bd51719790438a6cc480d847b6 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -20,4 +20,37 @@ config ARM64_RELOC_TEST depends on m tristate "Relocation testing module" +config UNWINDER_ORC + bool "ORC unwinder" + depends on FRAME_POINTER_VALIDATION + select HAVE_MOD_ARCH_SPECIFIC + select OBJTOOL + help + This option enables ORC (Oops Rewind Capability) for ARM64. This + allows the unwinder to look up ORC data for an instruction address + and compute the frame pointer at that address. The computed frame + pointer is used to validate the actual frame pointer. + +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + depends on FRAME_POINTER_VALIDATION + select FRAME_POINTER + help + ARM64 already uses the frame pointer for unwinding kernel stack + traces. We need to enable this config to enable STACK_VALIDATION. + STACK_VALIDATION is needed to get objtool to do static analysis + of kernel code. + +config FRAME_POINTER_VALIDATION + bool "Dynamic Frame pointer validation" + select UNWINDER_FRAME_POINTER + select UNWINDER_ORC + select HAVE_DYNAMIC_FTRACE_WITH_ARGS + help + This invokes objtool on every object file causing it to + generate ORC data for the object file. ORC data is in a custom + data format which is a simplified version of the DWARF + Call Frame Information standard. See UNWINDER_ORC for more + details. + source "drivers/hwtracing/coresight/Kconfig" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 6069120199bbcaf26800641174b423ee84cc30fd..62b813d80700f22151d5c6d528a9a70c9b07c881 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -244,6 +244,12 @@ config ARCH_NPCM General support for NPCM8xx BMC (Arbel). Nuvoton NPCM8xx BMC based on the Cortex A35. +config ARCH_PHYTIUM + bool "Phytium SoC Family" + select ARM_GIC_PHYTIUM_2500 + help + This enables support for Phytium ARMv8 SoC family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 11782860717fae6aa8c472d260316b45d81ece7e..14ffc627e1e57bfe1a88eb3482cc4b0c6582278a 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -63,6 +63,18 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +ifdef CONFIG_UNWINDER_ORC +orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h +orc_hash_sh := $(srctree)/scripts/orc_hash.sh +targets += $(orc_hash_h) +quiet_cmd_orc_hash = GEN $@ + cmd_orc_hash = mkdir -p $(dir $@); \ + $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@ +$(orc_hash_h): $(srctree)/arch/arm64/include/asm/orc_types.h $(orc_hash_sh) FORCE + $(call if_changed,orc_hash) +prepare: $(orc_hash_h) +endif + ifeq ($(CONFIG_ARM64_BTI_KERNEL),y) KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index cbc980fb02e3357feb25aad8f93c1ec86c11247d..22c9069c065054c5a8f0430fa3161045aca51a99 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -473,7 +473,8 @@ poly1305_blocks_neon: subs $len,$len,#64 ldp x9,x13,[$inp,#48] add $in2,$inp,#96 - adr $zeros,.Lzeros + adrp $zeros,.Lzeros + add $zeros,$zeros,#:lo12:.Lzeros lsl $padbit,$padbit,#24 add x15,$ctx,#48 @@ -885,10 +886,13 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon +.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" +.popsection + .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 5c8ee5a541d2047c10c8440e3691d6b1b9f97c25..654a4b174a361c0baddaf4d5ccd023a78b15a4fb 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += user.h generated-y += cpucaps.h generated-y += sysreg-defs.h +generated-y += orc_hash.h diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 980d1dd8e1a32bfc249af74a8719bc56dfee4111..819044fefbe7641d3f1246503cff4cde5f61c1d9 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -10,6 +10,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_KACCESS_ERR_ZERO 3 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 +#define EX_TYPE_COPY_MC_PAGE_ERR_ZERO 5 /* Data fields for EX_TYPE_UACCESS_ERR_ZERO */ #define EX_DATA_REG_ERR_SHIFT 0 @@ -51,6 +52,16 @@ #define _ASM_EXTABLE_UACCESS(insn, fixup) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) +#define _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, err, zero) \ + __ASM_EXTABLE_RAW(insn, fixup, \ + EX_TYPE_COPY_MC_PAGE_ERR_ZERO, \ + ( \ + EX_DATA_REG(ERR, err) | \ + EX_DATA_REG(ZERO, zero) \ + )) + +#define _ASM_EXTABLE_COPY_MC_PAGE(insn, fixup) \ + _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, wzr, wzr) /* * Create an exception table entry for uaccess `insn`, which will branch to `fixup` * when an unhandled fault is taken. @@ -59,6 +70,10 @@ _ASM_EXTABLE_UACCESS(\insn, \fixup) .endm + .macro _asm_extable_copy_mc_page, insn, fixup + _ASM_EXTABLE_COPY_MC_PAGE(\insn, \fixup) + .endm + /* * Create an exception table entry for `insn` if `fixup` is provided. Otherwise * do nothing. diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 376a980f2bad08bb5a823ef4d3e4eccb96892080..547ab2f858886a71b0b19dca90655a41d5566d56 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -154,6 +154,10 @@ lr .req x30 // link register #define CPU_LE(code...) code #endif +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_extable_copy_mc_page 9999b, l + /* * Define a macro that constructs a 64-bit value by concatenating two * 32-bit registers. Note that on big endian systems the order of the diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5bba393760557d0b390ff39927de19d861af006d..aebd82fa854f491766a2d65701fbc767deb300e2 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -619,6 +619,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1) return val > 0; } +static inline bool id_aa64pfr0_mpam(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + + return val > 0; +} + static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); @@ -855,6 +862,32 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) } } +static inline u32 id_aa64mmfr0_pa_range_bits(u64 mmfr0) +{ + u32 parange; + + parange = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + return id_aa64mmfr0_parange_to_phys_shift(parange); +} + +static inline u32 id_aa64mmfr2_varange_to_virt_shift(int varange) +{ + switch (varange) { + case ID_AA64MMFR2_EL1_VARange_48: return 48; + case ID_AA64MMFR2_EL1_VARange_52: return 52; + default: return CONFIG_ARM64_VA_BITS; + } +} + +static inline u32 id_aa64mmfr2_va_range_bits(u64 mmfr2) +{ + u32 varange; + + varange = cpuid_feature_extract_unsigned_field(mmfr2, ID_AA64MMFR2_EL1_VARange_SHIFT); + return id_aa64mmfr2_varange_to_virt_shift(varange); +} + /* Check whether hardware update of the Access flag is supported */ static inline bool cpu_has_hw_af(void) { @@ -923,6 +956,7 @@ extern struct arm64_ftr_override arm64_sw_feature_override; u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); +unsigned int arch_cpufreq_get_khz(int cpu); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 488f8e75134959f5263a61230dbde5192e8d4a58..dbafb40d051adcde0cd399f7374dd820d915c571 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -54,6 +54,8 @@ #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 +#define ARM_CPU_IMP_PHYTIUM 0x70 + #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -98,6 +100,11 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 +#define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 @@ -153,6 +160,12 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b7afaa026842b7ebce94228e6031ce99f5cbb2a8..1e2181820a0afc3aefe0127dbcaf072a12c2da81 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -208,6 +208,21 @@ msr spsr_el2, x0 .endm +.macro __init_el2_mpam +#ifdef CONFIG_ARM64_MPAM + /* Memory Partioning And Monitoring: disable EL2 traps */ + mrs x1, id_aa64pfr0_el1 + ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4 + cbz x0, .Lskip_mpam_\@ // skip if no MPAM + msr_s SYS_MPAM2_EL2, xzr // use the default partition + // and disable lower traps + mrs_s x0, SYS_MPAMIDR_EL1 + tbz x0, #17, .Lskip_mpam_\@ // skip if no MPAMHCR reg + msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 +.Lskip_mpam_\@: +#endif /* CONFIG_ARM64_MPAM */ +.endm + /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -225,6 +240,7 @@ __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr + __init_el2_mpam __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 72b0e71cc3de88bbc3673d19feeb56e924ae2069..f80ebd0addfdea9af592e2c42712dd4c05dacd9a 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex, #endif /* !CONFIG_BPF_JIT */ bool fixup_exception(struct pt_regs *regs); +bool fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index a81937fae9f6da772c6a3f0fc94fb19f019525a0..786d62839961dd56213def9fdcb1c2ffc490d0da 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,26 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include + #include -static inline bool arch_kfence_init_pool(void) { return true; } +#ifdef CONFIG_KFENCE +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) +{ + unsigned long addr = (unsigned long)kpa->addr; + + if (!can_set_block_and_cont_map()) + return false; + + /* + * If the allocated range is block and contiguous mapping, split it + * to pte level before re-initializing kfence pages. + */ + split_linear_mapping_after_init(addr, kpa->pool_size, PAGE_KERNEL); + + return true; +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { @@ -19,7 +36,8 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } -#ifdef CONFIG_KFENCE +static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } + extern bool kfence_early_init; static inline bool arm64_kfence_can_set_direct_map(void) { diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1095c6647e9665267e6aa67bac2dd7bb11b091f1..2d8b243a86cd4aea48744f0563996b6872f45d18 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -104,6 +104,7 @@ #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) +#define MPAMHCR_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b84ed3ad91a9eba3a30a6081371f2ec98963de06..a89b35070a35aa2a58888b85e24d11738c3845d9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -277,6 +277,8 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + + CK_KABI_RESERVE(1) }; struct kvm_vcpu_fault_info { diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index d3e354bb8351d7aa9762155e18cb49cffee938e0..de0b8845df7a14d5f76526e031bf62b47efe9182 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -169,6 +169,7 @@ enum kvm_pgtable_stage2_flags { * @KVM_PGTABLE_PROT_W: Write permission. * @KVM_PGTABLE_PROT_R: Read permission. * @KVM_PGTABLE_PROT_DEVICE: Device attributes. + * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes. * @KVM_PGTABLE_PROT_SW0: Software bit 0. * @KVM_PGTABLE_PROT_SW1: Software bit 1. * @KVM_PGTABLE_PROT_SW2: Software bit 2. @@ -180,6 +181,7 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_R = BIT(2), KVM_PGTABLE_PROT_DEVICE = BIT(3), + KVM_PGTABLE_PROT_NORMAL_NC = BIT(4), KVM_PGTABLE_PROT_SW0 = BIT(55), KVM_PGTABLE_PROT_SW1 = BIT(56), diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fde4186cc3870894aa21f736f1bbeefdd63499c3..c247e5f29d5af58a48af51cf8bee191b8155fd07 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -147,6 +147,7 @@ * Memory types for Stage-2 translation */ #define MT_S2_NORMAL 0xf +#define MT_S2_NORMAL_NC 0x5 #define MT_S2_DEVICE_nGnRE 0x1 /* @@ -154,6 +155,7 @@ * Stage-2 enforces Normal-WB and Device-nGnRE */ #define MT_S2_FWB_NORMAL 6 +#define MT_S2_FWB_NORMAL_NC 5 #define MT_S2_FWB_DEVICE_nGnRE 1 #ifdef CONFIG_ARM64_4K_PAGES diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 94b68850cb9f009acbe513f97cfd0dae30ae8020..bad28b274467ab789ac3cd0fdb279126687d6b21 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -12,6 +12,10 @@ #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) #define TTBR_ASID_MASK (UL(0xffff) << 48) +#define NO_BLOCK_MAPPINGS BIT(0) +#define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ + #ifndef __ASSEMBLY__ #include @@ -72,7 +76,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); - +extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); +extern void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index bfa6638b4c930ce11cc2b3462a802581a141fcfe..57e97c23e768e92b82857fc29dc0bd7d505673b7 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -6,6 +6,7 @@ #define __ASM_MODULE_H #include +#include struct mod_plt_sec { int plt_shndx; @@ -19,6 +20,12 @@ struct mod_arch_specific { /* for CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampolines; + +#ifdef CONFIG_UNWINDER_ORC + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; +#endif }; u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h new file mode 100644 index 0000000000000000000000000000000000000000..9abe1fe58c34317c16bbc3f4aa6da2fabad74639 --- /dev/null +++ b/arch/arm64/include/asm/mpam.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#ifndef __ASM__MPAM_H +#define __ASM__MPAM_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* CPU Registers */ +#define MPAM_SYSREG_EN BIT_ULL(63) +#define MPAM_SYSREG_TRAP_IDR BIT_ULL(58) +#define MPAM_SYSREG_TRAP_MPAM0_EL1 BIT_ULL(49) +#define MPAM_SYSREG_TRAP_MPAM1_EL1 BIT_ULL(48) +#define MPAM_SYSREG_PMG_D GENMASK(47, 40) +#define MPAM_SYSREG_PMG_I GENMASK(39, 32) +#define MPAM_SYSREG_PARTID_D GENMASK(31, 16) +#define MPAM_SYSREG_PARTID_I GENMASK(15, 0) + +#define MPAMIDR_PMG_MAX GENMASK(40, 32) +#define MPAMIDR_PMG_MAX_SHIFT 32 +#define MPAMIDR_PMG_MAX_LEN 8 +#define MPAMIDR_VPMR_MAX GENMASK(20, 18) +#define MPAMIDR_VPMR_MAX_SHIFT 18 +#define MPAMIDR_VPMR_MAX_LEN 3 +#define MPAMIDR_HAS_HCR BIT(17) +#define MPAMIDR_HAS_HCR_SHIFT 17 +#define MPAMIDR_PARTID_MAX GENMASK(15, 0) +#define MPAMIDR_PARTID_MAX_SHIFT 0 +#define MPAMIDR_PARTID_MAX_LEN 15 + +#define MPAMHCR_EL0_VPMEN BIT_ULL(0) +#define MPAMHCR_EL1_VPMEN BIT_ULL(1) +#define MPAMHCR_GSTAPP_PLK BIT_ULL(8) +#define MPAMHCR_TRAP_MPAMIDR BIT_ULL(31) + +/* Properties of the VPM registers */ +#define MPAM_VPM_NUM_REGS 8 +#define MPAM_VPM_PARTID_LEN 16 +#define MPAM_VPM_PARTID_MASK 0xffff +#define MPAM_VPM_REG_LEN 64 +#define MPAM_VPM_PARTIDS_PER_REG (MPAM_VPM_REG_LEN / MPAM_VPM_PARTID_LEN) +#define MPAM_VPM_MAX_PARTID (MPAM_VPM_NUM_REGS * MPAM_VPM_PARTIDS_PER_REG) + + +DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DECLARE_STATIC_KEY_FALSE(mpam_enabled); +DECLARE_PER_CPU(u64, arm64_mpam_default); +DECLARE_PER_CPU(u64, arm64_mpam_current); + +/* check whether all CPUs have MPAM support */ +static __always_inline bool mpam_cpus_have_feature(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return cpus_have_final_cap(ARM64_MPAM); + return false; +} + +/* check whether all CPUs have MPAM virtualisation support */ +static __always_inline bool mpam_cpus_have_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return static_branch_unlikely(&arm64_mpam_has_hcr); + return false; +} + +/* enable MPAM virtualisation support */ +static inline void __init __enable_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + static_branch_enable(&arm64_mpam_has_hcr); +} + +/* + * The resctrl filesystem writes to the partid/pmg values for threads and CPUs, + * which may race with reads in __mpam_sched_in(). Ensure only one of the old + * or new values are used. Particular care should be taken with the pmg field + * as __mpam_sched_in() may read a partid and pmg that don't match, causing + * this value to be stored with cache allocations, despite being considered + * 'free' by resctrl. + * + * A value in struct thread_info is used instead of struct task_struct as the + * cpu's u64 register format is used, but struct task_struct has two u32'. + */ + static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ + u64 default_val; + + default_val = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val); +} + +static inline void mpam_set_task_partid_pmg(struct task_struct *tsk, + u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval; + + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + regval |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + +static inline u64 mpam_get_regval(struct task_struct *tsk) +{ +#ifdef CONFIG_ARM64_MPAM + return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg); +#else + return 0; +#endif +} + +static inline void resctrl_arch_set_rmid(struct task_struct *tsk, u32 rmid) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval = mpam_get_regval(tsk); + + regval &= ~MPAM_SYSREG_PMG_D; + regval &= ~MPAM_SYSREG_PMG_I; + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, rmid); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, rmid); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + +static inline void mpam_thread_switch(struct task_struct *tsk) +{ + u64 oldregval; + int cpu = smp_processor_id(); + u64 regval = mpam_get_regval(tsk); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM) || + !static_branch_likely(&mpam_enabled)) + return; + + if (regval == READ_ONCE(mpam_resctrl_default_group)) + regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); + + oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + if (oldregval == regval) + return; + + /* Synchronising this write is left until the ERET to EL0 */ + write_sysreg_s(regval, SYS_MPAM0_EL1); + WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval); +} +#endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 4cedbaa16f419641f45c316be067947c279794c5..cc83edbaef36da6ce854293940a79ee5298da764 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -90,9 +90,10 @@ static inline bool try_page_mte_tagging(struct page *page) } void mte_zero_clear_page_tags(void *addr); -void mte_sync_tags(pte_t pte); +void mte_sync_tags(pte_t pte, unsigned int nr_pages); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); +int mte_copy_mc_page_tags(void *kto, const void *kfrom); void mte_thread_switch(struct task_struct *next); void mte_cpu_setup(void); void mte_suspend_enter(void); @@ -122,7 +123,7 @@ static inline bool try_page_mte_tagging(struct page *page) static inline void mte_zero_clear_page_tags(void *addr) { } -static inline void mte_sync_tags(pte_t pte) +static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages) { } static inline void mte_copy_page_tags(void *kto, const void *kfrom) @@ -131,6 +132,10 @@ static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void mte_thread_init_user(void) { } +static inline int mte_copy_mc_page_tags(void *kto, const void *kfrom) +{ + return 0; +} static inline void mte_thread_switch(struct task_struct *next) { } diff --git a/arch/arm64/include/asm/orc_header.h b/arch/arm64/include/asm/orc_header.h new file mode 100644 index 0000000000000000000000000000000000000000..a7857588fb39759832bd76401d561c717ea7978d --- /dev/null +++ b/arch/arm64/include/asm/orc_header.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _ORC_HEADER_H +#define _ORC_HEADER_H + +#include +#include +#include + +/* + * The header is currently a 20-byte hash of the ORC entry definition; see + * scripts/orc_hash.sh. + */ +#define ORC_HEADER \ + __used __section(".orc_header") __aligned(4) \ + static const u8 orc_header[] = { ORC_HASH } + +#endif /* _ORC_HEADER_H */ diff --git a/arch/arm64/include/asm/orc_lookup.h b/arch/arm64/include/asm/orc_lookup.h new file mode 100644 index 0000000000000000000000000000000000000000..b9f9763d6e585ced591c1101dfe870ca69e7d2a5 --- /dev/null +++ b/arch/arm64/include/asm/orc_lookup.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ +#ifndef _ORC_LOOKUP_H +#define _ORC_LOOKUP_H + +/* + * This is a lookup table for speeding up access to the .orc_unwind table. + * Given an input address offset, the corresponding lookup table entry + * specifies a subset of the .orc_unwind table to search. + * + * Each block represents the end of the previous range and the start of the + * next range. An extra block is added to give the last range an end. + * + * The block size should be a power of 2 to avoid a costly 'div' instruction. + * + * A block size of 256 was chosen because it roughly doubles unwinder + * performance while only adding ~5% to the ORC data footprint. + */ +#define LOOKUP_BLOCK_ORDER 8 +#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER) + +#ifndef LINKER_SCRIPT + +#include + +extern unsigned int orc_lookup[]; +extern unsigned int orc_lookup_end[]; + +#define LOOKUP_START_IP (unsigned long)_stext +#define LOOKUP_STOP_IP (unsigned long)_etext + +#endif /* LINKER_SCRIPT */ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_UNWINDER_ORC +void orc_lookup_init(void); +void orc_lookup_module_init(struct module *mod, + void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size); +#else +static inline void orc_lookup_init(void) {} +static inline +void orc_lookup_module_init(struct module *mod, + void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size) +{ +} +#endif + +struct orc_entry *arch_orc_find(unsigned long ip); + +#define orc_warn(fmt, ...) \ + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +#define orc_warn_current(args...) \ +({ \ + static bool dumped_before; \ + if (state->task == current && !state->error) { \ + orc_warn(args); \ + if (unwind_debug && !dumped_before) { \ + dumped_before = true; \ + unwind_dump(state); \ + } \ + } \ +}) + +struct orc_entry *orc_find(unsigned long ip); + +extern bool orc_init; +extern int __start_orc_unwind_ip[]; +extern int __stop_orc_unwind_ip[]; +extern struct orc_entry __start_orc_unwind[]; +extern struct orc_entry __stop_orc_unwind[]; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_LOOKUP_H */ diff --git a/arch/arm64/include/asm/orc_types.h b/arch/arm64/include/asm/orc_types.h new file mode 100644 index 0000000000000000000000000000000000000000..e18971fdf867fcadb2ae0c9e34d36bf77c33bbf9 --- /dev/null +++ b/arch/arm64/include/asm/orc_types.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * We only use base registers SP and FP -- which the previous SP is based on -- + * and PREV_SP and UNDEFINED -- which the previous FP is based on. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:4; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:3; + unsigned signal:1; + unsigned type:4; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595fda5b818e4ea9e2f057b44ffd735c..62bdc843e3e79cc7bde0be4767a74d86988e0761 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,6 +36,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, void tag_clear_highpage(struct page *to); #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE +#ifdef CONFIG_ARCH_HAS_COPY_MC +int copy_mc_page(void *to, const void *from); +int copy_mc_highpage(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_MC_HIGHPAGE + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_MC_USER_HIGHPAGE +#endif + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 07bdf5dd8ebef5bd8bac6a3e449054a561397e48..1e4e385b6e4a3f4801667cc0396e3e4fd4687ac3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -93,7 +93,8 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pte_none(pte) (!pte_val(pte)) -#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) +#define __pte_clear(mm, addr, ptep) \ + __set_pte(ptep, __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) /* @@ -132,12 +133,16 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) */ #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) +/* + * Returns true if the pte is valid and has the contiguous bit set. + */ +#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending * so that we don't erroneously return false for pages that have been * remapped as PROT_NONE but are yet to be flushed from the TLB. * Note that we can't make any assumptions based on the state of the access - * flag, since ptep_clear_flush_young() elides a DSB when invalidating the + * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the * TLB. */ #define pte_accessible(mm, pte) \ @@ -256,12 +261,17 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } +static inline pmd_t pmd_mknoncont(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); +} + static inline pte_t pte_mkdevmap(pte_t pte) { return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); } -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void __set_pte(pte_t *ptep, pte_t pte) { WRITE_ONCE(*ptep, pte); @@ -275,6 +285,11 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } +static inline pte_t __ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} + extern void __sync_icache_dcache(pte_t pteval); bool pgattr_change_is_safe(u64 old, u64 new); @@ -302,7 +317,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, if (!IS_ENABLED(CONFIG_DEBUG_VM)) return; - old_pte = READ_ONCE(*ptep); + old_pte = __ptep_get(ptep); if (!pte_valid(old_pte) || !pte_valid(pte)) return; @@ -311,7 +326,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, /* * Check for potential race with hardware updates of the pte - * (ptep_set_access_flags safely changes valid ptes without going + * (__ptep_set_access_flags safely changes valid ptes without going * through an invalid entry). */ VM_WARN_ONCE(!pte_young(pte), @@ -325,8 +340,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, __func__, pte_val(old_pte), pte_val(pte)); } -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) { if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte); @@ -339,28 +353,40 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ if (system_supports_mte() && pte_access_permitted(pte, false) && !pte_special(pte) && pte_tagged(pte)) - mte_sync_tags(pte); + mte_sync_tags(pte, nr_pages); +} +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); - __check_safe_pte_update(mm, ptep, pte); + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} - set_pte(ptep, pte); +#define pte_advance_pfn pte_advance_pfn +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) +{ + return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); } -static inline void set_ptes(struct mm_struct *mm, unsigned long addr, +static inline void __set_ptes(struct mm_struct *mm, + unsigned long __always_unused addr, pte_t *ptep, pte_t pte, unsigned int nr) { page_table_check_ptes_set(mm, ptep, pte, nr); + __sync_cache_and_tags(pte, nr); for (;;) { - __set_pte_at(mm, addr, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); + __set_pte(ptep, pte); if (--nr == 0) break; ptep++; - addr += PAGE_SIZE; - pte_val(pte) += PAGE_SIZE; + pte = pte_advance_pfn(pte, 1); } } -#define set_ptes set_ptes /* * Huge pte definitions. @@ -436,16 +462,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); } -/* - * Select all bits except the pfn - */ -static inline pgprot_t pte_pgprot(pte_t pte) -{ - unsigned long pfn = pte_pfn(pte); - - return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); -} - #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/linux/pgtable.h @@ -491,6 +507,7 @@ static inline int pmd_trans_huge(pmd_t pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_exec(pmd) (!(pmd_val(pmd) & PMD_TABLE_PXN)) static inline pmd_t pmd_mkinvalid(pmd_t pmd) { @@ -531,18 +548,29 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) +static inline void __set_pte_at(struct mm_struct *mm, + unsigned long __always_unused addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + __sync_cache_and_tags(pte, nr); + __check_safe_pte_update(mm, ptep, pte); + __set_pte(ptep, pte); +} + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); + return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), + PMD_SIZE >> PAGE_SHIFT); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); + return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), + PUD_SIZE >> PAGE_SHIFT); } #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) @@ -685,6 +713,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_user(pud) pte_user(pud_pte(pud)) #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) +#define pud_exec(pud) (!(pud_val(pud) & PUD_TABLE_PXN)) static inline void set_pud(pud_t *pudp, pud_t pud) { @@ -752,6 +781,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define p4d_none(p4d) (!p4d_val(p4d)) #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) #define p4d_present(p4d) (p4d_val(p4d)) +#define p4d_exec(p4d) (!(p4d_val(p4d) & P4D_TABLE_PXN)) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { @@ -798,6 +828,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) /* Match pud_offset folding in */ +#define pud_offset_phys(dir, addr) NULL #define pud_set_fixmap(addr) NULL #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) #define pud_clear_fixmap() @@ -840,8 +871,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); } -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -extern int ptep_set_access_flags(struct vm_area_struct *vma, +extern int __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); @@ -851,7 +881,8 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { - return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); + return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, + pmd_pte(entry), dirty); } static inline int pud_devmap(pud_t pud) @@ -885,12 +916,13 @@ static inline bool pud_user_accessible_page(pud_t pud) /* * Atomic pte/pmd modifications. */ -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) { pte_t old_pte, pte; - pte = READ_ONCE(*ptep); + pte = __ptep_get(ptep); do { old_pte = pte; pte = pte_mkold(pte); @@ -901,18 +933,10 @@ static inline int __ptep_test_and_clear_young(pte_t *ptep) return pte_young(pte); } -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) -{ - return __ptep_test_and_clear_young(ptep); -} - -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -static inline int ptep_clear_flush_young(struct vm_area_struct *vma, +static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - int young = ptep_test_and_clear_young(vma, address, ptep); + int young = __ptep_test_and_clear_young(vma, address, ptep); if (young) { /* @@ -935,12 +959,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); + return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, +static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); @@ -950,6 +973,37 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, return pte; } +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + __ptep_get_and_clear(mm, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} + +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte, tmp_pte; + + pte = __ptep_get_and_clear(mm, addr, ptep); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -963,16 +1017,12 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -/* - * ptep_set_wrprotect - mark read-only while trasferring potential hardware - * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. - */ -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +static inline void ___ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + pte_t pte) { - pte_t old_pte, pte; + pte_t old_pte; - pte = READ_ONCE(*ptep); do { old_pte = pte; pte = pte_wrprotect(pte); @@ -981,12 +1031,31 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } while (pte_val(pte) != pte_val(old_pte)); } +/* + * __ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. + */ +static inline void __ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); +} + +static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, + pte_t *ptep, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) + __ptep_set_wrprotect(mm, address, ptep); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_SET_WRPROTECT static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - ptep_set_wrprotect(mm, address, (pte_t *)pmdp); + __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); } #define pmdp_establish pmdp_establish @@ -1064,7 +1133,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif /* CONFIG_ARM64_MTE */ /* - * On AArch64, the cache coherency is handled via the set_pte_at() function. + * On AArch64, the cache coherency is handled via the __set_ptes() function. */ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, @@ -1116,6 +1185,282 @@ extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, extern void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); + +#ifdef CONFIG_ARM64_CONTPTE + +/* + * The contpte APIs are used to transparently manage the contiguous bit in ptes + * where it is possible and makes sense to do so. The PTE_CONT bit is considered + * a private implementation detail of the public ptep API (see below). + */ +extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); +extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); +extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr); +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full); +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full); +extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); +extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); +extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr); +extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty); + +static __always_inline void contpte_try_fold(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) +{ + /* + * Only bother trying if both the virtual and physical addresses are + * aligned and correspond to the last entry in a contig range. The core + * code mostly modifies ranges from low to high, so this is the likely + * the last modification in the contig range, so a good time to fold. + * We can't fold special mappings, because there is no associated folio. + */ + + const unsigned long contmask = CONT_PTES - 1; + bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; + + if (unlikely(valign)) { + bool palign = (pte_pfn(pte) & contmask) == contmask; + + if (unlikely(palign && + pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) + __contpte_try_fold(mm, addr, ptep, pte); + } +} + +static __always_inline void contpte_try_unfold(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) +{ + if (unlikely(pte_valid_cont(pte))) + __contpte_try_unfold(mm, addr, ptep, pte); +} + +#define pte_batch_hint pte_batch_hint +static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) +{ + if (!pte_valid_cont(pte)) + return 1; + + return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); +} + +/* + * The below functions constitute the public API that arm64 presents to the + * core-mm to manipulate PTE entries within their page tables (or at least this + * is the subset of the API that arm64 needs to implement). These public + * versions will automatically and transparently apply the contiguous bit where + * it makes sense to do so. Therefore any users that are contig-aware (e.g. + * hugetlb, kernel mapper) should NOT use these APIs, but instead use the + * private versions, which are prefixed with double underscore. All of these + * APIs except for ptep_get_lockless() are expected to be called with the PTL + * held. Although the contiguous bit is considered private to the + * implementation, it is deliberately allowed to leak through the getters (e.g. + * ptep_get()), back to core code. This is required so that pte_leaf_size() can + * provide an accurate size for perf_get_pgtable_size(). But this leakage means + * its possible a pte will be passed to a setter with the contiguous bit set, so + * we explicitly clear the contiguous bit in those cases to prevent accidentally + * setting it in the pgtable. + */ + +#define ptep_get ptep_get +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_t pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(pte))) + return pte; + + return contpte_ptep_get(ptep, pte); +} + +#define ptep_get_lockless ptep_get_lockless +static inline pte_t ptep_get_lockless(pte_t *ptep) +{ + pte_t pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(pte))) + return pte; + + return contpte_ptep_get_lockless(ptep); +} + +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + /* + * We don't have the mm or vaddr so cannot unfold contig entries (since + * it requires tlb maintenance). set_pte() is not used in core code, so + * this should never even be called. Regardless do our best to service + * any call and emit a warning if there is any attempt to set a pte on + * top of an existing contig range. + */ + pte_t orig_pte = __ptep_get(ptep); + + WARN_ON_ONCE(pte_valid_cont(orig_pte)); + __set_pte(ptep, pte_mknoncont(pte)); +} + +#define set_ptes set_ptes +static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + pte = pte_mknoncont(pte); + + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __set_ptes(mm, addr, ptep, pte, 1); + contpte_try_fold(mm, addr, ptep, pte); + } else { + contpte_set_ptes(mm, addr, ptep, pte, nr); + } +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __pte_clear(mm, addr, ptep); +} + +#define clear_full_ptes clear_full_ptes +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __clear_full_ptes(mm, addr, ptep, nr, full); + } else { + contpte_clear_full_ptes(mm, addr, ptep, nr, full); + } +} + +#define get_and_clear_full_ptes get_and_clear_full_ptes +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte; + + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } else { + pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } + + return pte; +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + return __ptep_get_and_clear(mm, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t orig_pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_test_and_clear_young(vma, addr, ptep); + + return contpte_ptep_test_and_clear_young(vma, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t orig_pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_clear_flush_young(vma, addr, ptep); + + return contpte_ptep_clear_flush_young(vma, addr, ptep); +} + +#define wrprotect_ptes wrprotect_ptes +static __always_inline void wrprotect_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr) +{ + if (likely(nr == 1)) { + /* + * Optimization: wrprotect_ptes() can only be called for present + * ptes so we only need to check contig bit as condition for + * unfold, and we can remove the contig bit from the pte we read + * to avoid re-reading. This speeds up fork() which is sensitive + * for order-0 folios. Equivalent to contpte_try_unfold(). + */ + pte_t orig_pte = __ptep_get(ptep); + + if (unlikely(pte_cont(orig_pte))) { + __contpte_try_unfold(mm, addr, ptep, orig_pte); + orig_pte = pte_mknoncont(orig_pte); + } + ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); + } else { + contpte_wrprotect_ptes(mm, addr, ptep, nr); + } +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + wrprotect_ptes(mm, addr, ptep, 1); +} + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + pte_t orig_pte = __ptep_get(ptep); + + entry = pte_mknoncont(entry); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); + + return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); +} + +#else /* CONFIG_ARM64_CONTPTE */ + +#define ptep_get __ptep_get +#define set_pte __set_pte +#define set_ptes __set_ptes +#define pte_clear __pte_clear +#define clear_full_ptes __clear_full_ptes +#define get_and_clear_full_ptes __get_and_clear_full_ptes +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define ptep_get_and_clear __ptep_get_and_clear +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young __ptep_test_and_clear_young +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young __ptep_clear_flush_young +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define ptep_set_wrprotect __ptep_set_wrprotect +#define wrprotect_ptes __wrprotect_ptes +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define ptep_set_access_flags __ptep_set_access_flags + +#endif /* CONFIG_ARM64_CONTPTE */ + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/phytium_machine_types.h b/arch/arm64/include/asm/phytium_machine_types.h new file mode 100644 index 0000000000000000000000000000000000000000..8aed50daca4bce4ec55421b63b6b811446a90954 --- /dev/null +++ b/arch/arm64/include/asm/phytium_machine_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Authors: Wang Yinfeng . + */ + +#ifndef _MACHINE_TYPE_H_ +#define _MACHINE_TYPE_H_ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h index 006946745352ef348d1d9f1525edf230abeee911..c6da8b300c126a9864aae25b81b8412c7c06571a 100644 --- a/arch/arm64/include/asm/probes.h +++ b/arch/arm64/include/asm/probes.h @@ -8,6 +8,7 @@ #define _ARM_PROBES_H #include +#include typedef u32 probe_opcode_t; typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); @@ -24,6 +25,9 @@ struct arch_probe_insn { typedef u32 kprobe_opcode_t; struct arch_specific_insn { struct arch_probe_insn api; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..b506e95cf6e374690a71e0f1d142fe0032ae6e45 --- /dev/null +++ b/arch/arm64/include/asm/resctrl.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b7811871cbd5b35fc99fbe550830739738e..20fb7b1d542381cf03f4e47eb166eeb589839c28 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -8,6 +8,8 @@ bool can_set_direct_map(void); #define can_set_direct_map can_set_direct_map +bool can_set_block_and_cont_map(void); + int set_memory_valid(unsigned long addr, int numpages, int enable); int set_direct_map_invalid_noflush(struct page *page); diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 508f734de46ee207e9f7e28a47ad5f55624655ed..064aaf5dc3a0cf7dd860b36c2f86cc040fe166a9 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -11,6 +11,7 @@ #include #include +#include struct stack_info { unsigned long low; @@ -23,6 +24,7 @@ struct stack_info { * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) * + * @prev_pc: The lr value in the previous frame record. * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * associated with the most recently encountered replacement lr * value. @@ -32,10 +34,15 @@ struct stack_info { * @stack: The stack currently being unwound. * @stacks: An array of stacks which can be unwound. * @nr_stacks: The number of stacks in @stacks. + * + * @cfa: The sp value at the call site of the current function. + * @unwind_type The previous frame's unwind type. + * @reliable: Stack trace is reliable. */ struct unwind_state { unsigned long fp; unsigned long pc; + unsigned long prev_pc; #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif @@ -44,6 +51,9 @@ struct unwind_state { struct stack_info stack; struct stack_info *stacks; int nr_stacks; + unsigned long cfa; + int unwind_type; + bool reliable; }; static inline struct stack_info stackinfo_get_unknown(void) @@ -70,11 +80,15 @@ static inline void unwind_init_common(struct unwind_state *state, struct task_struct *task) { state->task = task; + state->prev_pc = 0; #ifdef CONFIG_KRETPROBES state->kr_cur = NULL; #endif state->stack = stackinfo_get_unknown(); + state->reliable = true; + state->cfa = 0; + state->unwind_type = UNWIND_HINT_TYPE_CALL; } static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, @@ -167,6 +181,7 @@ unwind_next_frame_record(struct unwind_state *state) /* * Record this frame record's values. */ + state->prev_pc = state->pc; state->fp = READ_ONCE(*(unsigned long *)(fp)); state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd54661ea7ffaa47c8ea89bffdea8d..94633246d31138eaa4caa6ea2d032d1e2910980a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -515,6 +515,13 @@ #define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) #define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) + +#define __VPMn_op2(n) ((n) & 0x7) +#define SYS_MPAM_VPMn_EL2(n) sys_reg(3, 4, 10, 6, __VPMn_op2(n)) + #define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) #define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) #define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) @@ -579,6 +586,7 @@ #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_MPAM1_EL12 sys_reg(3, 5, 10, 5, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 553d1bc559c603f5d9933651463b9025a663e7d6..b7d2412a0f5f700e0dfd2d4fadce11002f0e5e55 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -41,6 +41,9 @@ struct thread_info { #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; void *scs_sp; +#endif +#ifdef CONFIG_ARM64_MPAM + u64 mpam_partid_pmg; #endif u32 cpu; }; @@ -64,6 +67,7 @@ void arch_setup_new_exec(void); #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ +#define TIF_PATCH_PENDING 7 /* pending live patching update */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -96,11 +100,12 @@ void arch_setup_new_exec(void); #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_PATCH_PENDING) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b73baaf8ae47beb58dd95cf804c7eef2253117d5..141b47351bc5ebf9751cf8007b86e49b18094c8c 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -229,6 +229,10 @@ static inline unsigned long get_trans_granule(void) * determined by 'stride' and only affect any walk-cache entries * if 'last_level' is equal to false. * + * __flush_tlb_kernel_pgtable_entry(addr) + * Invalidate a single kernel mapping for address "addr" on all + * CPUs. Must be called if the corresponding page table is + * last_level entry. * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -402,7 +406,7 @@ do { \ #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false) -static inline void __flush_tlb_range(struct vm_area_struct *vma, +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) @@ -434,10 +438,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, else __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true); - dsb(ish); mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); } +static inline void __flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long stride, bool last_level, + int tlb_level) +{ + __flush_tlb_range_nosync(vma, start, end, stride, + last_level, tlb_level); + dsb(ish); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -481,6 +494,20 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +/* + * Used to invalidate the TLB entries to the last level page table + * (pud/pmd/pte). + */ +static inline void __flush_tlb_kernel_pgtable_entry(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + dsb(ishst); + __tlbi(vaale1is, addr); + dsb(ish); + isb(); +} #endif #endif diff --git a/arch/arm64/include/asm/unwind_hints.h b/arch/arm64/include/asm/unwind_hints.h new file mode 100644 index 0000000000000000000000000000000000000000..e11a0586b434c436203cf1e8b430ba051e0f84ec --- /dev/null +++ b/arch/arm64/include/asm/unwind_hints.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_ARM64_UNWIND_HINTS_H +#define _ASM_ARM64_UNWIND_HINTS_H + +#include + +#include "orc_types.h" + +#ifdef CONFIG_STACK_VALIDATION + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_STACK_VALIDATION */ + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_STACK_VALIDATION */ +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_FTRACE, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_FTRACE + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_REGS, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_REGS + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_IRQ, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_IRQ_STACK + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARM64_UNWIND_HINTS_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d95b3d6b471a7d63957c47151fd6cb404ca0f4c7..f9439f96f0ec3039b986051675a84e010461b9e6 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -65,14 +65,18 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MPAM) += mpam.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o +obj-$(CONFIG_UNWINDER_ORC) += orc_lookup.o CFLAGS_patch-scs.o += -mbranch-protection=none # Force dependency (vdso*-wrap.S includes vdso.so through incbin) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7e96604559004ba847d215ea7c934aeccedc8626..148986926ed90897e103fd439e9b3173a376b1c7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2730,6 +2730,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, +#ifdef CONFIG_ARM64_MPAM + { + .desc = "Memory Partitioning And Monitoring", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_MPAM, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) + }, +#endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535341a063d3ee308d77788602308d6..77cd06f2144f982c2d48e11c2eacb77674fc50b5 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -178,17 +179,24 @@ static int c_show(struct seq_file *m, void *v) { int i, j; bool compat = personality(current->personality) == PER_LINUX32; + unsigned int cpu, index, total, freq; + bool rich_container = false; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); u32 midr = cpuinfo->reg_midr; + index = cpu = i; + + if (check_rich_container(cpu, &index, &rich_container, &total)) + continue; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ - seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "processor\t: %d\n", index); if (compat) seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); @@ -235,7 +243,15 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU architecture: 8\n"); seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); - seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); + seq_printf(m, "CPU revision\t: %d\n", MIDR_REVISION(midr)); + seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", + id_aa64mmfr0_pa_range_bits(cpuinfo->reg_id_aa64mmfr0), + id_aa64mmfr2_va_range_bits(cpuinfo->reg_id_aa64mmfr2)); + + freq = arch_cpufreq_get_khz(cpu); + if (freq) + seq_printf(m, "CPU MHz\t\t: %u.%03u\n", freq / 1000, freq % 1000); + seq_puts(m, "\n"); } return 0; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b478ca356b00f1f71c11d40e6bb6eb66cf9b173..89d104c0bce656baa0a3140f538bb7d5f456fe3e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -107,7 +107,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { struct set_perm_data *spd = data; const efi_memory_desc_t *md = spd->md; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = __ptep_get(ptep); if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); @@ -116,7 +116,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti() && spd->has_bti) pte = set_pte_bit(pte, __pgprot(PTE_GP)); - set_pte(ptep, pte); + __set_pte(ptep, pte); return 0; } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7fcbee0f6c0e4e316da280eb1706c6be5213dd51..b69ffa44c031d0c68f8fa24025a230f183daa346 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,6 +28,7 @@ #include #include #include +#include .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 @@ -578,6 +579,7 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) .if \el == 0 b ret_to_user .else + UNWIND_HINT_REGS PT_REGS_SIZE b ret_to_kernel .endif SYM_CODE_END(el\el\ht\()_\regsize\()_\label) @@ -888,6 +890,7 @@ SYM_FUNC_START(call_on_irq_stack) /* Move to the new stack and call the function there */ add sp, x16, #IRQ_STACK_SIZE blr x1 + UNWIND_HINT_IRQ 16 /* * Restore the SP from the FP, and restore the FP and LR from the frame diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c795951373549faad3ed2d1bd30ad427eb78..6999668e9ecf0811258a02acefabde99543bae49 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,6 +64,12 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); +/* Additional static keys for cpufeatures */ +#ifdef CONFIG_ARM64_MPAM +KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +KVM_NVHE_ALIAS(mpam_enabled); +#endif + /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 078910db77a41b6ffa60c665debc927dff0c7ca5..cfa6b0dafc88bed1220372547f8c9c8e8dac8339 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,15 @@ void machine_crash_shutdown(struct pt_regs *regs) /* shutdown non-crashing cpus */ crash_smp_send_stop(); + /* + * when we panic in hardlockup detected by sdei_watchdog, the secure + * timer interrupt remains activate here because firmware clear eoi + * after dispatch is completed. This will cause arm_arch_timer + * interrupt failed to trigger in the second kernel. So we clear eoi + * of the secure timer before booting the second kernel. + */ + sdei_watchdog_clear_eoi(); + /* for crashing cpu */ crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index dd851297596e5e9372bf4f64f640283181e86395..8f19c7a7d65d5ee9cbdb83444d79cdd68b0b9d45 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include #include #include +#include static u64 module_direct_base __ro_after_init = 0; static u64 module_plt_base __ro_after_init = 0; @@ -587,7 +588,8 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { - const Elf_Shdr *s; + const Elf_Shdr *s, *orc, *orc_ip;; + s = find_section(hdr, sechdrs, ".altinstructions"); if (s) apply_alternatives_module((void *)s->sh_addr, s->sh_size); @@ -598,5 +600,14 @@ int module_finalize(const Elf_Ehdr *hdr, scs_patch((void *)s->sh_addr, s->sh_size); } + orc = find_section(hdr, sechdrs, ".orc_unwind"); + orc_ip = find_section(hdr, sechdrs, ".orc_unwind_ip"); + + if (orc && orc_ip) { + orc_lookup_module_init(me, + (void *)orc_ip->sh_addr, orc_ip->sh_size, + (void *)orc->sh_addr, orc->sh_size); + } + return module_init_ftrace_plt(hdr, sechdrs, me); } diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c new file mode 100644 index 0000000000000000000000000000000000000000..134b44118553c85ec6798380c5b1d99c9ab766ef --- /dev/null +++ b/arch/arm64/kernel/mpam.c @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#include + +#include +#include +#include + +DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DEFINE_STATIC_KEY_FALSE(mpam_enabled); +DEFINE_PER_CPU(u64, arm64_mpam_default); +DEFINE_PER_CPU(u64, arm64_mpam_current); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 4edecaac8f919a232528806fb0daa79dc11b3e9b..cea96ee75d22d1cb1f3e63e3beaedca710322e38 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -35,10 +35,10 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); #endif -void mte_sync_tags(pte_t pte) +void mte_sync_tags(pte_t pte, unsigned int nr_pages) { struct page *page = pte_page(pte); - long i, nr_pages = compound_nr(page); + unsigned int i; /* if PG_mte_tagged is set, tags have already been initialised */ for (i = 0; i < nr_pages; i++, page++) { @@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2) /* * If the page content is identical but at least one of the pages is * tagged, return non-zero to avoid KSM merging. If only one of the - * pages is tagged, set_pte_at() may zero or change the tags of the + * pages is tagged, __set_ptes() may zero or change the tags of the * other page via mte_sync_tags(). */ if (page_mte_tagged(page1) || page_mte_tagged(page2)) diff --git a/arch/arm64/kernel/orc_lookup.c b/arch/arm64/kernel/orc_lookup.c new file mode 100644 index 0000000000000000000000000000000000000000..9c062c054dcba047303e0098a53665b328bd9bfe --- /dev/null +++ b/arch/arm64/kernel/orc_lookup.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include + +bool orc_init __ro_after_init; +static unsigned int lookup_num_blocks __ro_after_init; + +static inline unsigned long orc_ip(const int *ip) +{ + return (unsigned long)ip + *ip; +} + +static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, + unsigned int num_entries, unsigned long ip) +{ + int *first = ip_table; + int *last = ip_table + num_entries - 1; + int *mid = first, *found = first; + + if (!num_entries) + return NULL; + + /* + * Do a binary range search to find the rightmost duplicate of a given + * starting address. Some entries are section terminators which are + * "weak" entries for ensuring there are no gaps. They should be + * ignored when they conflict with a real entry. + */ + while (first <= last) { + mid = first + ((last - first) / 2); + + if (orc_ip(mid) <= ip) { + found = mid; + first = mid + 1; + } else + last = mid - 1; + } + + return u_table + (found - ip_table); +} + +#ifdef CONFIG_MODULES +static struct orc_entry *orc_module_find(unsigned long ip) +{ + struct module *mod; + + mod = __module_address(ip); + if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) + return NULL; + return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, + mod->arch.num_orcs, ip); +} +#else +static struct orc_entry *orc_module_find(unsigned long ip) +{ + return NULL; +} +#endif + +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry null_orc_entry = { + .sp_offset = sizeof(long), + .sp_reg = ORC_REG_SP, + .fp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + +struct orc_entry *orc_find(unsigned long ip) +{ + static struct orc_entry *orc; + + if (ip == 0) + return &null_orc_entry; + + /* For non-init vmlinux addresses, use the fast lookup table: */ + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { + unsigned int idx, start, stop; + + idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; + + if (unlikely((idx >= lookup_num_blocks-1))) { + orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", + idx, lookup_num_blocks, (void *)ip); + return NULL; + } + + start = orc_lookup[idx]; + stop = orc_lookup[idx + 1] + 1; + + if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || + (__start_orc_unwind + stop > __stop_orc_unwind))) { + orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", + idx, lookup_num_blocks, start, stop, (void *)ip); + return NULL; + } + + return __orc_find(__start_orc_unwind_ip + start, + __start_orc_unwind + start, stop - start, ip); + } + + /* vmlinux .init slow lookup: */ + if (is_kernel_inittext(ip)) + return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); + + /* Module lookup: */ + orc = orc_module_find(ip); + if (orc) + return orc; + + return arch_orc_find(ip); +} + +#ifdef CONFIG_MODULES + +static DEFINE_MUTEX(sort_mutex); +static int *cur_orc_ip_table = __start_orc_unwind_ip; +static struct orc_entry *cur_orc_table = __start_orc_unwind; + +static void orc_sort_swap(void *_a, void *_b, int size) +{ + struct orc_entry *orc_a, *orc_b; + int *a = _a, *b = _b, tmp; + int delta = _b - _a; + + /* Swap the .orc_unwind_ip entries: */ + tmp = *a; + *a = *b + delta; + *b = tmp - delta; + + /* Swap the corresponding .orc_unwind entries: */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + orc_b = cur_orc_table + (b - cur_orc_ip_table); + swap(*orc_a, *orc_b); +} + +static int orc_sort_cmp(const void *_a, const void *_b) +{ + struct orc_entry *orc_a; + const int *a = _a, *b = _b; + unsigned long a_val = orc_ip(a); + unsigned long b_val = orc_ip(b); + + if (a_val > b_val) + return 1; + if (a_val < b_val) + return -1; + + /* + * The "weak" section terminator entries need to always be first + * to ensure the lookup code skips them in favor of real entries. + * These terminator entries exist to handle any gaps created by + * whitelisted .o files which didn't get objtool generation. + */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; +} + +void orc_lookup_module_init(struct module *mod, + void *_orc_ip, size_t orc_ip_size, + void *_orc, size_t orc_size) +{ + int *orc_ip = _orc_ip; + struct orc_entry *orc = _orc; + unsigned int num_entries = orc_ip_size / sizeof(int); + + WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(*orc) != 0 || + num_entries != orc_size / sizeof(*orc)); + + /* + * The 'cur_orc_*' globals allow the orc_sort_swap() callback to + * associate an .orc_unwind_ip table entry with its corresponding + * .orc_unwind entry so they can both be swapped. + */ + mutex_lock(&sort_mutex); + cur_orc_ip_table = orc_ip; + cur_orc_table = orc; + sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); + mutex_unlock(&sort_mutex); + + mod->arch.orc_unwind_ip = orc_ip; + mod->arch.orc_unwind = orc; + mod->arch.num_orcs = num_entries; +} +#endif + +void __init orc_lookup_init(void) +{ + size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; + size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; + size_t num_entries = orc_ip_size / sizeof(int); + struct orc_entry *orc; + int i; + + if (!num_entries || orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(struct orc_entry) != 0 || + num_entries != orc_size / sizeof(struct orc_entry)) { + orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); + return; + } + + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ + + /* Initialize the fast lookup table: */ + lookup_num_blocks = orc_lookup_end - orc_lookup; + for (i = 0; i < lookup_num_blocks-1; i++) { + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + num_entries, + LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + + orc_lookup[i] = orc - __start_orc_unwind; + } + + /* Initialize the ending block: */ + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, + LOOKUP_STOP_IP); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; + + orc_init = true; +} + +__weak struct orc_entry *arch_orc_find(unsigned long ip) +{ + return NULL; +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 385fb78845d696fd1d0e1d6f08bcdeff4c5027c1..dcd519994395394c951da9acf9a801f20457f0a9 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -552,6 +553,12 @@ struct task_struct *__switch_to(struct task_struct *prev, if (prev->thread.sctlr_user != next->thread.sctlr_user) update_sctlr_el1(next->thread.sctlr_user); + /* + * MPAM thread switch happens after the DSB to ensure prev's accesses + * use prev's MPAM settings. + */ + mpam_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 040b0175334c057f955e63b65c51182efb4e8f8d..7b8aed3de9466752ee5965c23d7c45715ec43e02 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -53,6 +53,7 @@ #include #include #include +#include static int num_standard_resources; static struct resource *standard_resources; @@ -390,6 +391,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) "This indicates a broken bootloader or old kernel\n", boot_args[1], boot_args[2], boot_args[3]); } + orc_lookup_init(); } static inline bool cpu_can_disable(unsigned int cpu) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 425b1bc17a3f6dc3237e81fabcb35d774f1aa9d1..27528e91b67566209761f56c33b970b44b5f511e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -1298,6 +1299,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) (void __user *)NULL, current); } + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 14365ef842440225c2da6f289afa2e3315988f48..0199ee17ef56a2bd935b59367b5090566ddb1610 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -503,6 +504,34 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI + +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk as interrupts like + * SPI and LPI(except SGI) can't communicate across cpu sockets in this server + * platform. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can boot up and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + return false; +} +#endif + static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) @@ -552,6 +581,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ set_cpu_logical_map(cpu_count, hwid); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 17f66a74c745c8570c3d4e534313e73cb990716a..ab9605aa721db0abf3c0e76a7ee997b8c4115136 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -5,6 +5,8 @@ * Copyright (C) 2012 ARM Ltd. */ #include +#include +#include #include #include #include @@ -18,6 +20,122 @@ #include #include +static inline bool unwind_completed(struct unwind_state *state) +{ + if (state->fp == (unsigned long)task_pt_regs(state->task)->stackframe) { + /* Final frame; nothing to unwind */ + return true; + } + return false; +} + +#ifdef CONFIG_FRAME_POINTER_VALIDATION + +static void unwind_check_reliable(struct unwind_state *state) +{ + unsigned long pc, fp; + struct orc_entry *orc; + bool adjust_pc = false; + + if (unwind_completed(state)) + return; + + /* + * If a previous frame was unreliable, the CFA cannot be reliably + * computed anymore. + */ + if (!state->reliable) + return; + + pc = state->pc; + + /* Don't let modules unload while we're reading their ORC data. */ + preempt_disable(); + + orc = orc_find(pc); + if (!orc || (!orc->fp_offset && orc->type == UNWIND_HINT_TYPE_CALL)) { + /* + * If the final instruction in a function happens to be a call + * instruction, the return address would fall outside of the + * function. That could be the case here. This can happen, for + * instance, if the called function is a "noreturn" function. + * The compiler can optimize away the instructions after the + * call. So, adjust the PC so it falls inside the function and + * retry. + * + * We only do this if the current and the previous frames + * are call frames and not hint frames. + */ + if (state->unwind_type == UNWIND_HINT_TYPE_CALL) { + pc -= 4; + adjust_pc = true; + orc = orc_find(pc); + } + } + if (!orc) { + state->reliable = false; + goto out; + } + state->unwind_type = orc->type; + + if (!state->cfa) { + /* Set up the initial CFA and return. */ + state->cfa = state->fp - orc->fp_offset; + goto out; + } + + /* Compute the next CFA and FP. */ + switch (orc->type) { + case UNWIND_HINT_TYPE_CALL: + /* Normal call */ + state->cfa += orc->sp_offset; + fp = state->cfa + orc->fp_offset; + break; + + case UNWIND_HINT_TYPE_REGS: + /* + * pt_regs hint: The frame pointer points to either the + * synthetic frame within pt_regs or to the place where + * x29 and x30 are saved in the register save area in + * pt_regs. + */ + state->cfa += orc->sp_offset; + fp = state->cfa + offsetof(struct pt_regs, stackframe) - + sizeof(struct pt_regs); + if (state->fp != fp) { + fp = state->cfa + offsetof(struct pt_regs, regs[29]) - + sizeof(struct pt_regs); + } + break; + + case UNWIND_HINT_TYPE_IRQ_STACK: + /* Hint to unwind from the IRQ stack to the task stack. */ + state->cfa = state->fp + orc->sp_offset; + fp = state->fp; + break; + + default: + fp = 0; + break; + } + + /* Validate the actual FP with the computed one. */ + if (state->fp != fp) + state->reliable = false; +out: + if (state->reliable && adjust_pc) + state->pc = pc; + preempt_enable(); +} + +#else /* !CONFIG_FRAME_POINTER_VALIDATION */ + +static void unwind_check_reliable(struct unwind_state *state) +{ +} + +#endif /* CONFIG_FRAME_POINTER_VALIDATION */ + /* * Start an unwind from a pt_regs. * @@ -108,12 +226,9 @@ unwind_recover_return_address(struct unwind_state *state) static __always_inline int unwind_next(struct unwind_state *state) { - struct task_struct *tsk = state->task; - unsigned long fp = state->fp; int err; - /* Final frame; nothing to unwind */ - if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + if (unwind_completed(state)) return -ENOENT; err = unwind_next_frame_record(state); @@ -125,22 +240,28 @@ unwind_next(struct unwind_state *state) return unwind_recover_return_address(state); } -static __always_inline void -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, - void *cookie) +static __always_inline int +unwind(struct unwind_state *state, bool need_reliable, + stack_trace_consume_fn consume_entry, void *cookie) { - if (unwind_recover_return_address(state)) - return; + int ret = unwind_recover_return_address(state); + + if (ret) + return ret; while (1) { - int ret; + if (need_reliable && !state->reliable) + return -EINVAL; if (!consume_entry(cookie, state->pc)) break; ret = unwind_next(state); + if (need_reliable && !ret) + unwind_check_reliable(state); if (ret < 0) break; } + return ret; } /* @@ -205,7 +326,42 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, unwind_init_from_task(&state, task); } - unwind(&state, consume_entry, cookie); + unwind(&state, false, consume_entry, cookie); +} + +noinline notrace int arch_stack_walk_reliable( + stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) +{ + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), +#endif +#ifdef CONFIG_EFI + STACKINFO_EFI, +#endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; + int ret; + + if (task == current) + unwind_init_from_caller(&state); + else + unwind_init_from_task(&state, task); + unwind_check_reliable(&state); + + ret = unwind(&state, true, consume_entry, cookie); + + return ret == -ENOENT ? 0 : -EINVAL; } static bool dump_backtrace_entry(void *arg, unsigned long where) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 817d788cd86669439cf4b55af45214f016cc5efb..3466387443a78e88c5d13b86a15524d029a06cf4 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -21,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI static bool __init acpi_cpu_is_threaded(int cpu) @@ -71,6 +72,48 @@ int __init parse_acpi_topology(void) } #endif +static unsigned int cpufreq_khz; + +struct arch_cpufreq_sample { + unsigned int khz; + ktime_t time; +}; + +static DEFINE_PER_CPU(struct arch_cpufreq_sample, samples); + +#define ARCH_CPUFREQ_CACHE_THRESHOLD_MS 100 + +static void arch_cpufreq_snapshot_cpu(int cpu, ktime_t now) +{ + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu)); + struct arch_cpufreq_sample *s; + + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < ARCH_CPUFREQ_CACHE_THRESHOLD_MS) + return; + + s = per_cpu_ptr(&samples, cpu); + + s->khz = cpufreq_get(cpu); + if (s->khz) + s->time = ktime_get(); +} + +unsigned int arch_cpufreq_get_khz(int cpu) +{ + unsigned int new_cpufreq; + + arch_cpufreq_snapshot_cpu(cpu, ktime_get()); + + new_cpufreq = per_cpu(samples.khz, cpu); + + /* + * If the cpufreq driver can provide a value, use it. + * Otherwise use the cpufreq_khz. + */ + return new_cpufreq ? new_cpufreq : cpufreq_khz; +} + #ifdef CONFIG_ARM64_AMU_EXTN #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0) @@ -81,12 +124,78 @@ int __init parse_acpi_topology(void) #undef pr_fmt #define pr_fmt(fmt) "AMU: " fmt +#define ARCH_FREQ_THRESHOLD_MS 10 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); static DEFINE_PER_CPU(u64, arch_const_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static cpumask_var_t amu_fie_cpus; +/* + * Sample cpu freq. + * + * The register SYS_AMEVCNTR0_EL0(1) increases at the fixed + * rate of arch_timer_get_cntfrq() and can be used as timekeeper. + * While The register SYS_AMEVCNTR0_EL0(0) counte the cpu + * cycle elapsed. With the two registers, we can sample cpu + * freq: + * delta(cycle) / delta(timekeeper) + * + * But these registers are halted by wfe/wfi and can't + * in/out of the idle state synchronously, which is different + * from x86 MSR_IA32_APERF/MSR_IA32_MPERF. + * + * NOTE: + * ALL core use same freq by default(ignore big.LITTLE) + */ +static void __init __arch_cpufreq_init(void *dummy) +{ + unsigned long flags; + u64 stable_cnt; + u64 nonstable_cnt; + u32 freq = arch_timer_get_cntfrq(); + u64 delta = freq / 1000 * ARCH_FREQ_THRESHOLD_MS; + u64 counter; + + local_irq_save(flags); + counter = stable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)); + nonstable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(0)); + local_irq_restore(flags); + + /* + * Meaningless operations & keep cpu out of + * wfe/wfi idle state. + * + * While sampling core freq, detecting time taking + * may be more than 10 miliseconds by default. + * REFER to: intel x86 APERFMPERF_CACHE_THRESHOLD_MS + */ + while (counter - stable_cnt < delta) + counter = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)); + + local_irq_save(flags); + stable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)) - stable_cnt; + nonstable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(0)) - nonstable_cnt; + local_irq_restore(flags); + + cpufreq_khz = div64_u64(freq * nonstable_cnt, stable_cnt) / 1000; +} + +static int __init arch_cpufreq_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (cpu_has_amu_feat(cpu)) { + smp_call_function_single(cpu, __arch_cpufreq_init, NULL, 1); + return 0; + } + } + return 0; +} + +late_initcall(arch_cpufreq_init); + void update_freq_counters_refs(void) { this_cpu_write(arch_core_cycles_prev, read_corecnt()); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a553dae9a0d482e8c17030b42f1e07ced62874fe..e49aba6e5d236d61b77109adf06b88dd70d60695 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -61,6 +61,7 @@ #define RUNTIME_DISCARD_EXIT #include +#include #include #include #include @@ -310,6 +311,8 @@ SECTIONS __mmuoff_data_end = .; } + ORC_UNWIND_TABLE + PECOFF_EDATA_PADDING __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c new file mode 100644 index 0000000000000000000000000000000000000000..c7b12806364e0fcb5a68295ddd8bc979ecb7a179 --- /dev/null +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Detect hard lockups on a system + * + * Note: Most of this code is borrowed heavily from the perf hardlockup + * detector, so thanks to Don for the initial implementation. + */ + +#define pr_fmt(fmt) "SDEI NMI watchdog: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/* We use the secure physical timer as SDEI NMI watchdog timer */ +#define SDEI_NMI_WATCHDOG_HWIRQ 29 + +static int sdei_watchdog_event_num; +bool disable_sdei_nmi_watchdog; +static bool sdei_watchdog_registered; +static DEFINE_PER_CPU(ktime_t, last_check_time); + +void sdei_watchdog_hardlockup_enable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + /* Skip the first hardlockup check incase BIOS didn't init the + * secure timer correctly */ + watchdog_hardlockup_touch_cpu(cpu); + sdei_api_set_secure_timer_period(watchdog_thresh); + __this_cpu_write(last_check_time, ktime_get_mono_fast_ns()); + + ret = sdei_api_event_enable(sdei_watchdog_event_num); + if (ret) { + pr_err("Enable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); + } +} + +void sdei_watchdog_hardlockup_disable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + ret = sdei_api_event_disable(sdei_watchdog_event_num); + if (ret) + pr_err("Disable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); +} + +static int sdei_watchdog_callback(u32 event, + struct pt_regs *regs, void *arg) +{ + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_check_time); + __this_cpu_write(last_check_time, now); + + /* + * Set delta to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + if (delta < watchdog_thresh * (u64)NSEC_PER_SEC * 4 / 5) { + pr_err(FW_BUG "SDEI Watchdog event triggered too soon, " + "time to last check:%lld ns\n", delta); + return 0; + } + + watchdog_hardlockup_check(smp_processor_id(), regs); + + return 0; +} +NOKPROBE_SYMBOL(sdei_watchdog_callback); + +static void sdei_nmi_watchdog_bind(void *data) +{ + int ret; + + ret = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (ret < 0) + pr_err("SDEI bind failed on cpu%d, return %d\n", + smp_processor_id(), ret); +} + +static int __init disable_sdei_nmi_watchdog_setup(char *str) +{ + disable_sdei_nmi_watchdog = true; + return 1; +} +__setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); + +void sdei_watchdog_clear_eoi(void) +{ + if (sdei_watchdog_registered) + sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); +} + +int __init sdei_watchdog_hardlockup_probe(void) +{ + int ret; + + if (disable_sdei_nmi_watchdog) + return -EINVAL; + + if (!is_hyp_mode_available()) { + pr_err("Disable SDEI NMI Watchdog in VM\n"); + return -EINVAL; + } + + sdei_watchdog_event_num = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (sdei_watchdog_event_num < 0) { + pr_err("Bind interrupt failed. Firmware may not support SDEI !\n"); + return sdei_watchdog_event_num; + } + + /* + * After we introduced 'sdei_api_set_secure_timer_period', we disselect + * 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP'. So we need to make sure that + * firmware can set the period of the secure timer and the timer + * interrupt doesn't trigger too soon. + */ + if (sdei_api_set_secure_timer_period(watchdog_thresh)) { + pr_err("Firmware doesn't support setting the secure timer period, please update your BIOS !\n"); + return -EINVAL; + } + + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); + + ret = sdei_event_register(sdei_watchdog_event_num, + sdei_watchdog_callback, NULL); + if (ret) { + pr_err("SDEI Watchdog register callback failed\n"); + return ret; + } + + sdei_watchdog_registered = true; + pr_info("SDEI Watchdog registered successfully\n"); + + return 0; +} diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index efe82cc86bd1f3fc73900c85d3170d085f9be3ed..135fcf3fc4bbe7bb6922be309d1aae69a475a94a 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1073,7 +1073,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, } else { /* * Only locking to serialise with a concurrent - * set_pte_at() in the VMM but still overriding the + * __set_ptes() in the VMM but still overriding the * tags, hence ignoring the return value. */ try_page_mte_tagging(page); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe459cb3588bccd94359369a546947e..da30acce63082a4fff8f7d694e5ec89436e4d241 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } +static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) +{ + u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; + + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) + return; + + /* trap guest access to MPAMIDR_EL1 */ + if (mpam_cpus_have_mpam_hcr()) { + write_sysreg_s(MPAMHCR_TRAP_MPAMIDR, SYS_MPAMHCR_EL2); + } else { + /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */ + r |= MPAM_SYSREG_TRAP_IDR; + } + + write_sysreg_s(r, SYS_MPAM2_EL2); +} + +static inline void __deactivate_traps_mpam(void) +{ + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) + return; + + write_sysreg_s(0, SYS_MPAM2_EL2); + + if (mpam_cpus_have_mpam_hcr()) + write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2); +} + static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ @@ -212,6 +242,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) } __activate_traps_hfgxtr(vcpu); + __activate_traps_mpam(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -231,6 +262,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); __deactivate_traps_hfgxtr(vcpu); + __deactivate_traps_mpam(); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627dede466c5fa1d05785a9c9f78764..8e99f66b377bd37f3622ae9b5c9bf881517659b8 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -15,6 +15,7 @@ #include #include #include +#include static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { @@ -243,4 +244,32 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); } +/* + * The _EL0 value was written by the host's context switch, copy this into the + * guest's EL1. + */ +static inline void __mpam_guest_load(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) + write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); +} + +/* + * Copy the _EL2 register back to _EL1, clearing any trap bits EL2 may have set. + * nVHE world-switch copies the _EL1 register to _EL2. A VHE host writes to the + * _EL2 register as it is aliased by the hardware when TGE is set. + */ +static inline void __mpam_guest_put(void) +{ + u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | + MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; + + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) { + val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); + write_sysreg_el1(val, SYS_MPAM1); + } +} + #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6d624b41997021379b7b4cf77453d..04b7f83c2ae36be1efe1c4567df1be5d5ca336c2 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,6 +242,14 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) } } +/* Use the host thread's partid and pmg for world switch */ +static void __mpam_copy_el1_to_el2(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) + write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -251,6 +259,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) bool pmu_switch_needed; u64 exit_code; + __mpam_copy_el1_to_el2(); + /* * Having IRQs masked via PMR when entering the guest means the GIC * will not signal the CPU of interrupts of lower priority, and the @@ -310,6 +320,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __timer_enable_traps(vcpu); __debug_switch_to_guest(vcpu); + __mpam_guest_load(); do { /* Jump in the fire! */ @@ -320,6 +331,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); + __mpam_guest_put(); __timer_disable_traps(vcpu); __hyp_vgic_save_state(vcpu); diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index ca0bf0b92ca09ec0c9e96c97f32100bc0f9d30b2..b0586d79d2e075b0806adc1d5d079ba3c105c8d4 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -695,15 +695,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep) { - bool device = prot & KVM_PGTABLE_PROT_DEVICE; - kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : - KVM_S2_MEMATTR(pgt, NORMAL); + kvm_pte_t attr; u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; + switch (prot & (KVM_PGTABLE_PROT_DEVICE | + KVM_PGTABLE_PROT_NORMAL_NC)) { + case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC: + return -EINVAL; + case KVM_PGTABLE_PROT_DEVICE: + if (prot & KVM_PGTABLE_PROT_X) + return -EINVAL; + attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE); + break; + case KVM_PGTABLE_PROT_NORMAL_NC: + if (prot & KVM_PGTABLE_PROT_X) + return -EINVAL; + attr = KVM_S2_MEMATTR(pgt, NORMAL_NC); + break; + default: + attr = KVM_S2_MEMATTR(pgt, NORMAL); + } + if (!(prot & KVM_PGTABLE_PROT_X)) attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; - else if (device) - return -EINVAL; if (prot & KVM_PGTABLE_PROT_R) attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e0db0571ff6aba5a4fcaa08fc70749f..6b407cd3230d4dca06dece7a9347d2f1587025ab 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -90,6 +90,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); + __mpam_guest_load(); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 482280fe22d7c59d2539b9f0e758cfc95afad0ff..68f225d354d22ac64b445deaabbc9f1239a507f8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1398,7 +1398,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, int ret = 0; bool write_fault, writable, force_pte = false; bool exec_fault, mte_allowed; - bool device = false; + bool device = false, vfio_allow_any_uc = false; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; @@ -1490,6 +1490,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn = fault_ipa >> PAGE_SHIFT; mte_allowed = kvm_vma_mte_allowed(vma); + vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; + /* Don't use the VMA after the unlock -- it may have vanished */ vma = NULL; @@ -1576,10 +1578,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault) prot |= KVM_PGTABLE_PROT_X; - if (device) - prot |= KVM_PGTABLE_PROT_DEVICE; - else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) + if (device) { + if (vfio_allow_any_uc) + prot |= KVM_PGTABLE_PROT_NORMAL_NC; + else + prot |= KVM_PGTABLE_PROT_DEVICE; + } else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) { prot |= KVM_PGTABLE_PROT_X; + } /* * Under the premise of getting a FSC_PERM fault, we just need to relax diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2031703424ea14aea8cc00fab4965ef994f1fee6..b542cb9c5d1d615094a28cca114d19f8414a6683 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -417,6 +417,31 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } +static bool trap_mpam(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 aa64pfr0_el1 = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); + + /* + * What did we expose to the guest? + * Earlier guests may have seen the ID bits, which can't be removed + * without breaking migration, but MPAMIDR_EL1 can advertise all-zeroes, + * indicating there are zero PARTID/PMG supported by the CPU, allowing + * the other two trapped registers (MPAM1_EL1 and MPAM0_EL1) to be + * treated as RAZ/WI. + * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit + * as clear, and realises MPAM isn't usable on this CPU. + */ + if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, aa64pfr0_el1)) { + p->regval = 0; + return true; + } + + kvm_inject_undefined(vcpu); + return false; +} + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -1234,6 +1259,36 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, return arm64_ftr_safe_value(&kvm_ftr, new, cur); } +static u64 kvm_arm64_ftr_max(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 pfr0, val = rd->reset(vcpu, rd); + u32 field, id = reg_to_encoding(rd); + + /* + * Some values may reset to a lower value than can be supported, + * get the maximum feature value. + */ + switch (id) { + case SYS_ID_AA64PFR0_EL1: + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * MPAM resets to 0, but migration of MPAM=1 guests is needed. + * See trap_mpam() for more. + */ + field = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + if (field == ID_AA64PFR0_EL1_MPAM_1) { + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, ID_AA64PFR0_EL1_MPAM_1); + } + + break; + } + + return val; +} + /** * arm64_check_features() - Check if a feature register value constitutes * a subset of features indicated by the idreg's KVM sanitised limit. @@ -1254,8 +1309,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, const struct arm64_ftr_bits *ftrp = NULL; u32 id = reg_to_encoding(rd); u64 writable_mask = rd->val; - u64 limit = rd->reset(vcpu, rd); - u64 mask = 0; + u64 limit, mask = 0; /* * Hidden and unallocated ID registers may not have a corresponding @@ -1269,6 +1323,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, if (!ftr_reg) return -EINVAL; + limit = kvm_arm64_ftr_max(vcpu, rd); ftrp = ftr_reg->ftr_bits; for (; ftrp && ftrp->width; ftrp++) { @@ -1476,7 +1531,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, /* * MPAM is disabled by default as KVM also needs a set of PARTID to * program the MPAMVPMx_EL2 PARTID remapping registers with. But some - * older kernels let the guest see the ID bit. + * older kernels let the guest see the ID bit. Turning it on causes + * the registers to be emulated as RAZ/WI. See trap_mpam() for more. */ val &= ~ID_AA64PFR0_EL1_MPAM_MASK; @@ -2064,7 +2120,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, .reset = read_sanitised_id_aa64pfr0_el1, - .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, }, + .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK | + ID_AA64PFR0_EL1_MPAM_MASK, }, { SYS_DESC(SYS_ID_AA64PFR1_EL1), .access = access_id_reg, .get_user = get_id_reg, @@ -2184,8 +2241,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAMIDR_EL1), trap_mpam }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAM1_EL1), trap_mpam }, + { SYS_DESC(SYS_MPAM0_EL1), trap_mpam }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 29490be2546bfce91862735e2b66a83c8d682afe..a2fd865b816db7029ca956b70b5b4934b6baa569 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -15,6 +15,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_mc_page.S b/arch/arm64/lib/copy_mc_page.S new file mode 100644 index 0000000000000000000000000000000000000000..656d831ef4b87c0813515a53680b119e3ea20603 --- /dev/null +++ b/arch/arm64/lib/copy_mc_page.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + * Returns: + * x0 - Return 0 if copy success, or -EFAULT if anything goes wrong + * while copying. + */ +SYM_FUNC_START(__pi_copy_mc_page) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret + +SYM_FUNC_END(__pi_copy_mc_page) +SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page) +EXPORT_SYMBOL(copy_mc_page) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 5018ac03b6bf39330c09f686656c6f1a72fa9f42..2b748e83f6cf01359fd1b8dda0e7c1b481d4acf0 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -80,6 +80,33 @@ SYM_FUNC_START(mte_copy_page_tags) ret SYM_FUNC_END(mte_copy_page_tags) +/* + * Copy the tags from the source page to the destination one wiht machine check safe + * x0 - address of the destination page + * x1 - address of the source page + * Returns: + * x0 - Return 0 if copy success, or + * -EFAULT if anything goes wrong while copying. + */ +SYM_FUNC_START(mte_copy_mc_page_tags) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: +CPY_MC(2f, ldgm x4, [x3]) +CPY_MC(2f, stgm x4, [x2]) + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b + + mov x0, #0 + ret + +2: mov x0, #-EFAULT + ret +SYM_FUNC_END(mte_copy_mc_page_tags) + /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index dbd1bc95967d00d364e27e27c1f5fff2bc48e81f..60454256945b8b3ff47bdb609b51b3d5ad9b0572 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -3,6 +3,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ context.o proc.o pageattr.o fixmap.o +obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c new file mode 100644 index 0000000000000000000000000000000000000000..1b64b4c3f8bf8af49d53d1d835ad9eaa3dda5407 --- /dev/null +++ b/arch/arm64/mm/contpte.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include +#include +#include +#include + +static inline bool mm_is_user(struct mm_struct *mm) +{ + /* + * Don't attempt to apply the contig bit to kernel mappings, because + * dynamically adding/removing the contig bit can cause page faults. + * These racing faults are ok for user space, since they get serialized + * on the PTL. But kernel mappings can't tolerate faults. + */ + if (unlikely(mm_is_efi(mm))) + return false; + return mm != &init_mm; +} + +static inline pte_t *contpte_align_down(pte_t *ptep) +{ + return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); +} + +static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * Unfold any partially covered contpte block at the beginning and end + * of the range. + */ + + if (ptep != contpte_align_down(ptep) || nr < CONT_PTES) + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + + if (ptep + nr != contpte_align_down(ptep + nr)) { + unsigned long last_addr = addr + PAGE_SIZE * (nr - 1); + pte_t *last_ptep = ptep + nr - 1; + + contpte_try_unfold(mm, last_addr, last_ptep, + __ptep_get(last_ptep)); + } +} + +static void contpte_convert(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long start_addr; + pte_t *start_ptep; + int i; + + start_ptep = ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte)); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) { + pte_t ptent = __ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(ptent)) + pte = pte_mkdirty(pte); + + if (pte_young(ptent)) + pte = pte_mkyoung(pte); + } + + __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); + + __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); +} + +void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the virtual and pysical addresses are + * correctly aligned for a contpte mapping in contpte_try_fold() so the + * remaining checks are to ensure that the contpte range is fully + * covered by a single folio, and ensure that all the ptes are valid + * with contiguous PFNs and matching prots. We ignore the state of the + * access and dirty bits for the purpose of deciding if its a contiguous + * range; the folding process will generate a single contpte entry which + * has a single access and dirty bit. Those 2 bits are the logical OR of + * their respective bits in the constituent pte entries. In order to + * ensure the contpte range is covered by a single folio, we must + * recover the folio from the pfn, but special mappings don't have a + * folio backing them. Fortunately contpte_try_fold() already checked + * that the pte is not special - we never try to fold special mappings. + * Note we can't use vm_normal_page() for this since we don't have the + * vma. + */ + + unsigned long folio_start, folio_end; + unsigned long cont_start, cont_end; + pte_t expected_pte, subpte; + struct folio *folio; + struct page *page; + unsigned long pfn; + pte_t *orig_ptep; + pgprot_t prot; + + int i; + + if (!mm_is_user(mm)) + return; + + page = pte_page(pte); + folio = page_folio(page); + folio_start = addr - (page - &folio->page) * PAGE_SIZE; + folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE; + cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE); + cont_end = cont_start + CONT_PTE_SIZE; + + if (folio_start > cont_start || folio_end < cont_end) + return; + + pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + expected_pte = pfn_pte(pfn, prot); + orig_ptep = ptep; + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++) { + subpte = pte_mkold(pte_mkclean(__ptep_get(ptep))); + if (!pte_same(subpte, expected_pte)) + return; + expected_pte = pte_advance_pfn(expected_pte, 1); + ptep++; + } + + pte = pte_mkcont(pte); + contpte_convert(mm, addr, orig_ptep, pte); +} +EXPORT_SYMBOL_GPL(__contpte_try_fold); + +void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the ptes are contiguous in + * contpte_try_unfold(), so just check that the mm is user space. + */ + if (!mm_is_user(mm)) + return; + + pte = pte_mknoncont(pte); + contpte_convert(mm, addr, ptep, pte); +} +EXPORT_SYMBOL_GPL(__contpte_try_unfold); + +pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) +{ + /* + * Gather access/dirty bits, which may be populated in any of the ptes + * of the contig range. We are guaranteed to be holding the PTL, so any + * contiguous range cannot be unfolded or otherwise modified under our + * feet. + */ + + pte_t pte; + int i; + + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++) { + pte = __ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL_GPL(contpte_ptep_get); + +pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) +{ + /* + * The ptep_get_lockless() API requires us to read and return *orig_ptep + * so that it is self-consistent, without the PTL held, so we may be + * racing with other threads modifying the pte. Usually a READ_ONCE() + * would suffice, but for the contpte case, we also need to gather the + * access and dirty bits from across all ptes in the contiguous block, + * and we can't read all of those neighbouring ptes atomically, so any + * contiguous range may be unfolded/modified/refolded under our feet. + * Therefore we ensure we read a _consistent_ contpte range by checking + * that all ptes in the range are valid and have CONT_PTE set, that all + * pfns are contiguous and that all pgprots are the same (ignoring + * access/dirty). If we find a pte that is not consistent, then we must + * be racing with an update so start again. If the target pte does not + * have CONT_PTE set then that is considered consistent on its own + * because it is not part of a contpte range. + */ + + pgprot_t orig_prot; + unsigned long pfn; + pte_t orig_pte; + pgprot_t prot; + pte_t *ptep; + pte_t pte; + int i; + +retry: + orig_pte = __ptep_get(orig_ptep); + + if (!pte_valid_cont(orig_pte)) + return orig_pte; + + orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte))); + ptep = contpte_align_down(orig_ptep); + pfn = pte_pfn(orig_pte) - (orig_ptep - ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) { + pte = __ptep_get(ptep); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + + if (!pte_valid_cont(pte) || + pte_pfn(pte) != pfn || + pgprot_val(prot) != pgprot_val(orig_prot)) + goto retry; + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless); + +void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + unsigned long next; + unsigned long end; + unsigned long pfn; + pgprot_t prot; + + /* + * The set_ptes() spec guarantees that when nr > 1, the initial state of + * all ptes is not-present. Therefore we never need to unfold or + * otherwise invalidate a range before we set the new ptes. + * contpte_set_ptes() should never be called for nr < 2. + */ + VM_WARN_ON(nr == 1); + + if (!mm_is_user(mm)) + return __set_ptes(mm, addr, ptep, pte, nr); + + end = addr + (nr << PAGE_SHIFT); + pfn = pte_pfn(pte); + prot = pte_pgprot(pte); + + do { + next = pte_cont_addr_end(addr, end); + nr = (next - addr) >> PAGE_SHIFT; + pte = pfn_pte(pfn, prot); + + if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0) + pte = pte_mkcont(pte); + else + pte = pte_mknoncont(pte); + + __set_ptes(mm, addr, ptep, pte, nr); + + addr = next; + ptep += nr; + pfn += nr; + + } while (addr != end); +} +EXPORT_SYMBOL_GPL(contpte_set_ptes); + +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + __clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL_GPL(contpte_clear_full_ptes); + +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); + +int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* + * ptep_clear_flush_young() technically requires us to clear the access + * flag for a _single_ pte. However, the core-mm code actually tracks + * access/dirty per folio, not per page. And since we only create a + * contig range when the range is covered by a single folio, we can get + * away with clearing young for the whole contig range here, so we avoid + * having to unfold. + */ + + int young = 0; + int i; + + ptep = contpte_align_down(ptep); + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + young |= __ptep_test_and_clear_young(vma, addr, ptep); + + return young; +} +EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); + +int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + int young; + + young = contpte_ptep_test_and_clear_young(vma, addr, ptep); + + if (young) { + /* + * See comment in __ptep_clear_flush_young(); same rationale for + * eliding the trailing DSB applies here. + */ + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE, + PAGE_SIZE, true, 3); + } + + return young; +} +EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); + +void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * If wrprotecting an entire contig range, we can avoid unfolding. Just + * set wrprotect and wait for the later mmu_gather flush to invalidate + * the tlb. Until the flush, the page may or may not be wrprotected. + * After the flush, it is guaranteed wrprotected. If it's a partial + * range though, we must unfold, because we can't have a case where + * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this + * would cause it to continue to be unpredictable after the flush. + */ + + contpte_try_unfold_partial(mm, addr, ptep, nr); + __wrprotect_ptes(mm, addr, ptep, nr); +} +EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes); + +int contpte_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + unsigned long start_addr; + pte_t orig_pte; + int i; + + /* + * Gather the access/dirty bits for the contiguous range. If nothing has + * changed, its a noop. + */ + orig_pte = pte_mknoncont(ptep_get(ptep)); + if (pte_val(orig_pte) == pte_val(entry)) + return 0; + + /* + * We can fix up access/dirty bits without having to unfold the contig + * range. But if the write bit is changing, we must unfold. + */ + if (pte_write(orig_pte) == pte_write(entry)) { + /* + * For HW access management, we technically only need to update + * the flag on a single pte in the range. But for SW access + * management, we need to update all the ptes to prevent extra + * faults. Avoid per-page tlb flush in __ptep_set_access_flags() + * and instead flush the whole range at the end. + */ + ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + __ptep_set_access_flags(vma, addr, ptep, entry, 0); + + if (dirty) + __flush_tlb_range(vma, start_addr, addr, + PAGE_SIZE, true, 3); + } else { + __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte); + __ptep_set_access_flags(vma, addr, ptep, entry, dirty); + } + + return 1; +} +EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags); diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a7bb20055ce0948a6f586e29fd1a05bbbc1cc75f..b062c925daa4dcb471de37326a64a59e8d253453 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -14,6 +14,21 @@ #include #include +static int do_mte(struct page *to, struct page *from, void *kto, void *kfrom, bool mc) +{ + int ret = 0; + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + if (mc) + ret = mte_copy_mc_page_tags(kto, kfrom); + else + mte_copy_page_tags(kto, kfrom); + } + + return ret; +} + void copy_highpage(struct page *to, struct page *from) { void *kto = page_address(to); @@ -24,12 +39,7 @@ void copy_highpage(struct page *to, struct page *from) if (kasan_hw_tags_enabled()) page_kasan_tag_reset(to); - if (system_supports_mte() && page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); - mte_copy_page_tags(kto, kfrom); - set_page_mte_tagged(to); - } + do_mte(to, from, kto, kfrom, false); } EXPORT_SYMBOL(copy_highpage); @@ -40,3 +50,40 @@ void copy_user_highpage(struct page *to, struct page *from, flush_dcache_page(to); } EXPORT_SYMBOL_GPL(copy_user_highpage); + +#ifdef CONFIG_ARCH_HAS_COPY_MC +/* + * Return -EFAULT if anything goes wrong while copying page or mte. + */ +int copy_mc_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + int ret; + + ret = copy_mc_page(kto, kfrom); + if (ret) + return -EFAULT; + + ret = do_mte(to, from, kto, kfrom, true); + if (ret) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(copy_mc_highpage); + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + int ret; + + ret = copy_mc_highpage(to, from); + + if (!ret) + flush_dcache_page(to); + + return ret; +} +EXPORT_SYMBOL_GPL(copy_mc_user_highpage); +#endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 228d681a871594f05acea36f572b31363f8000c6..bdc81518d20788c1fc99aa5a49267ed14d957c9d 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,7 +16,7 @@ get_ex_fixup(const struct exception_table_entry *ex) return ((unsigned long)&ex->fixup + ex->fixup); } -static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, +static bool ex_handler_fixup_err_zero(const struct exception_table_entry *ex, struct pt_regs *regs) { int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); @@ -69,10 +69,27 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: case EX_TYPE_KACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + return ex_handler_fixup_err_zero(ex, regs); case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: return ex_handler_load_unaligned_zeropad(ex, regs); } BUG(); } + +bool fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *ex; + + ex = search_exception_tables(instruction_pointer(regs)); + if (!ex) + return false; + + switch (ex->type) { + case EX_TYPE_UACCESS_ERR_ZERO: + case EX_TYPE_COPY_MC_PAGE_ERR_ZERO: + return ex_handler_fixup_err_zero(ex, regs); + } + + return false; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af958e8dcdd07b498f990421b27cc02..f8b16fc10b3f5cf5c6b5f74cfee5882cc54ad2f4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -191,7 +191,7 @@ static void show_pte(unsigned long addr) if (!ptep) break; - pte = READ_ONCE(*ptep); + pte = __ptep_get(ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); } while(0); @@ -205,16 +205,16 @@ static void show_pte(unsigned long addr) * * It needs to cope with hardware update of the accessed/dirty state by other * agents in the system and can safely skip the __sync_icache_dcache() call as, - * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * like __set_ptes(), the PTE is never changed from no-exec to exec here. * * Returns whether or not the PTE actually changed. */ -int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty) +int __ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) { pteval_t old_pteval, pteval; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = __ptep_get(ptep); if (pte_same(pte, entry)) return 0; @@ -494,25 +494,6 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) -#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) - -static vm_fault_t __do_page_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags, - struct pt_regs *regs) -{ - /* - * Ok, we have a good vm_area for this memory access, so we can handle - * it. - * Check that the permissions on the VMA allow for the fault which - * occurred. - */ - if (!(vma->vm_flags & vm_flags)) - return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr, mm_flags, regs); -} - static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; @@ -527,6 +508,9 @@ static bool is_write_abort(unsigned long esr) return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) + static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { @@ -596,7 +580,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, if (!(vma->vm_flags & vm_flags)) { vma_end_read(vma); - goto lock_mmap; + fault = VM_FAULT_BADACCESS; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; } fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) @@ -623,7 +609,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto done; } - fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr, mm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -728,6 +717,31 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -753,7 +767,9 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c index c0a3301203bdf7070c33a60d126a40b556686c1e..bfc02568805aea999138fef0ef63c983c2fffaa9 100644 --- a/arch/arm64/mm/fixmap.c +++ b/arch/arm64/mm/fixmap.c @@ -121,9 +121,9 @@ void __set_fixmap(enum fixed_addresses idx, ptep = fixmap_pte(addr); if (pgprot_val(flags)) { - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); } else { - pte_clear(&init_mm, addr, ptep); + __pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr+PAGE_SIZE); } } diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 13fd592228b188658bd25011b66fd0420ca44d66..b1e8d8f8dae4e169fe531e8c68969a41d0aeaa05 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -152,14 +152,14 @@ pte_t huge_ptep_get(pte_t *ptep) { int ncontig, i; size_t pgsize; - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); if (!pte_present(orig_pte) || !pte_cont(orig_pte)) return orig_pte; ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); for (i = 0; i < ncontig; i++, ptep++) { - pte_t pte = ptep_get(ptep); + pte_t pte = __ptep_get(ptep); if (pte_dirty(pte)) orig_pte = pte_mkdirty(orig_pte); @@ -184,11 +184,11 @@ static pte_t get_clear_contig(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); unsigned long i; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); + pte_t pte = __ptep_get_and_clear(mm, addr, ptep); /* * If HW_AFDBM is enabled, then the HW could turn on @@ -236,7 +236,7 @@ static void clear_flush(struct mm_struct *mm, unsigned long i, saddr = addr; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - ptep_clear(mm, addr, ptep); + __ptep_get_and_clear(mm, addr, ptep); flush_tlb_range(&vma, saddr, addr); } @@ -254,12 +254,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) - set_pte_at(mm, addr, ptep, pte); + __set_ptes(mm, addr, ptep, pte, 1); return; } if (!pte_cont(pte)) { - set_pte_at(mm, addr, ptep, pte); + __set_ptes(mm, addr, ptep, pte, 1); return; } @@ -270,7 +270,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, clear_flush(mm, addr, ptep, pgsize, ncontig); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -400,7 +400,7 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr, ncontig = num_contig_ptes(sz, &pgsize); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - pte_clear(mm, addr, ptep); + __pte_clear(mm, addr, ptep); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, @@ -408,10 +408,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, { int ncontig; size_t pgsize; - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); if (!pte_cont(orig_pte)) - return ptep_get_and_clear(mm, addr, ptep); + return __ptep_get_and_clear(mm, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -431,11 +431,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) { int i; - if (pte_write(pte) != pte_write(ptep_get(ptep))) + if (pte_write(pte) != pte_write(__ptep_get(ptep))) return 1; for (i = 0; i < ncontig; i++) { - pte_t orig_pte = ptep_get(ptep + i); + pte_t orig_pte = __ptep_get(ptep + i); if (pte_dirty(pte) != pte_dirty(orig_pte)) return 1; @@ -459,7 +459,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t orig_pte; if (!pte_cont(pte)) - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return __ptep_set_access_flags(vma, addr, ptep, pte, dirty); ncontig = find_num_contig(mm, addr, ptep, &pgsize); dpfn = pgsize >> PAGE_SHIFT; @@ -478,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, hugeprot = pte_pgprot(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); return 1; } @@ -492,8 +492,8 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, size_t pgsize; pte_t pte; - if (!pte_cont(READ_ONCE(*ptep))) { - ptep_set_wrprotect(mm, addr, ptep); + if (!pte_cont(__ptep_get(ptep))) { + __ptep_set_wrprotect(mm, addr, ptep); return; } @@ -507,7 +507,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pfn = pte_pfn(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, @@ -517,7 +517,7 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, size_t pgsize; int ncontig; - if (!pte_cont(READ_ONCE(*ptep))) + if (!pte_cont(__ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -551,7 +551,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(__ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index f17d066e85eb854a06a8949da954ff704137746f..28856f511fb638189e8392fff89c2f8e29426ab2 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -112,8 +112,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, if (!early) memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); next = addr + PAGE_SIZE; - set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); + __set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -266,7 +266,7 @@ static void __init kasan_init_shadow(void) * so we should make sure that it maps the zero page read-only. */ for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(&kasan_early_shadow_pte[i], + __set_pte(&kasan_early_shadow_pte[i], pfn_pte(sym_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO)); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 47781bec61719dee864d216774fd3d99e3fb53d3..9c975759cff0084df60d3f5955c7329ba1f6d5bf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -41,10 +42,6 @@ #include #include -#define NO_BLOCK_MAPPINGS BIT(0) -#define NO_CONT_MAPPINGS BIT(1) -#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ - int idmap_t0sz __ro_after_init; #if VA_BITS > 48 @@ -75,6 +72,15 @@ EXPORT_SYMBOL(empty_zero_page); static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); +static DEFINE_MUTEX(split_linear_mapping_lock); + +static struct split_memory_params { + unsigned long virt; + phys_addr_t size; + pgprot_t prot; + + atomic_t cpu_count; +} split_memory_param; void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -169,6 +175,49 @@ bool pgattr_change_is_safe(u64 old, u64 new) return ((old ^ new) & ~mask) == 0; } +/* + * If the physical address of block-mapping pud/pmd or contiguous mapping pmd/pte + * entry is located in the physical range it points to, clearing the entry would + * cause the corresponding physcial range can not be accessed any longer. The + * remapping process of this range can not be done because of inaccessible. + * For this case, it should be mapped with PTE level when initializing the page + * table. + */ +static bool should_clear_cont_pte(pmd_t *pmdp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pte_offset_phys(pmdp, addr); + + return (pa >> CONT_PTE_SHIFT) == (phys >> CONT_PTE_SHIFT); +} + +static bool should_clear_cont_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> CONT_PMD_SHIFT) == (phys >> CONT_PMD_SHIFT); +} + +static bool should_split_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> PMD_SHIFT) == (phys >> PMD_SHIFT); +} + +#ifndef __PAGETABLE_PUD_FOLDED +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pud_offset_phys(p4dp, addr); + + return (pa >> PUD_SHIFT) == (phys >> PUD_SHIFT); +} +#else +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + return false; +} +#endif + static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot) { @@ -176,16 +225,16 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, ptep = pte_set_fixmap_offset(pmdp, addr); do { - pte_t old_pte = READ_ONCE(*ptep); + pte_t old_pte = __ptep_get(ptep); - set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); + __set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); /* * After the PTE entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), - READ_ONCE(pte_val(*ptep)))); + pte_val(__ptep_get(ptep)))); phys += PAGE_SIZE; } while (ptep++, addr += PAGE_SIZE, addr != end); @@ -223,7 +272,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, /* use a contiguous mapping if the range is suitably aligned */ if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && - (flags & NO_CONT_MAPPINGS) == 0) + (flags & NO_CONT_MAPPINGS) == 0 && + !should_clear_cont_pte(pmdp, addr, phys)) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); init_pte(pmdp, addr, next, phys, __prot); @@ -240,6 +290,14 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, pmd_t *pmdp; pmdp = pmd_set_fixmap_offset(pudp, addr); + /* + * the physical address of PMDs with contiguous flag might locate in the + * physical range they point to. Thus clear the CONT flag earlier to + * avoid inaccessiable situation. + */ + if (should_clear_cont_pmd(pudp, addr, phys)) + prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); + do { pmd_t old_pmd = READ_ONCE(*pmdp); @@ -247,7 +305,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~PMD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pmd(pudp, addr, phys)) { pmd_set_huge(pmdp, phys, prot); /* @@ -340,11 +399,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); /* - * For 4K granule only, attempt to put down a 1GB block + * For 4K granule only, attempt to put down a 1GB block. If the + * physical address of pudp is included in the range where + * itself points to, split the block of pudp earlier. */ if (pud_sect_supported() && ((addr | next | phys) & ~PUD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pud(p4dp, addr, phys)) { pud_set_huge(pudp, phys, prot); /* @@ -511,6 +573,9 @@ void __init mark_linear_text_alias_ro(void) #ifdef CONFIG_KFENCE +static unsigned long __ro_after_init +kfence_pool_size = ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE); + bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; /* early_param() will be parsed before map_mem() below. */ @@ -531,7 +596,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) if (!kfence_early_init) return 0; - kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + kfence_pool = memblock_phys_alloc(kfence_pool_size, PAGE_SIZE); if (!kfence_pool) { pr_err("failed to allocate kfence pool\n"); kfence_early_init = false; @@ -539,7 +604,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) } /* Temporarily mark as NOMAP. */ - memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + memblock_mark_nomap(kfence_pool, kfence_pool_size); return kfence_pool; } @@ -550,11 +615,11 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) return; /* KFENCE pool needs page-level mapping. */ - __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = phys_to_virt(kfence_pool); + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS | NO_EXEC_MAPPINGS); + memblock_clear_nomap(kfence_pool, kfence_pool_size); + __kfence_pool_early_init = phys_to_virt(kfence_pool); } #else /* CONFIG_KFENCE */ @@ -584,7 +649,7 @@ static void __init map_mem(pgd_t *pgdp) early_kfence_pool = arm64_kfence_alloc_pool(); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -854,12 +919,12 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = __ptep_get(ptep); if (pte_none(pte)) continue; WARN_ON(!pte_present(pte)); - pte_clear(&init_mm, addr, ptep); + __pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (free_mapped) free_hotplug_page_range(pte_page(pte), @@ -987,7 +1052,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = __ptep_get(ptep); /* * This is just a sanity check here which verifies that @@ -1006,7 +1071,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, */ ptep = pte_offset_kernel(pmdp, 0UL); for (i = 0; i < PTRS_PER_PTE; i++) { - if (!pte_none(READ_ONCE(ptep[i]))) + if (!pte_none(__ptep_get(&ptep[i]))) return; } @@ -1310,7 +1375,7 @@ int arch_add_memory(int nid, u64 start, u64 size, VM_BUG_ON(!mhp_range_allowed(start, size, true)); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), @@ -1476,7 +1541,7 @@ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); } return ptep_get_and_clear(vma->vm_mm, addr, ptep); @@ -1487,3 +1552,328 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte { set_pte_at(vma->vm_mm, addr, ptep, pte); } + +static void clear_cont_pte_mapping(pmd_t *pmdp, unsigned long addr, + unsigned long end) +{ + pte_t *ptep, *sptep, pte; + unsigned long saddr, next; + int i; + + /* + * Clear the CONT flag of ptes at the input range. CONT flag should be + * cleared at the granularity of CONT_PTE. + */ + addr &= CONT_PTE_MASK; + if (end & ~CONT_PTE_MASK) + end = (end + CONT_PTE_SIZE) & CONT_PTE_MASK; + + do { + pgprot_t prot; + unsigned long pfn; + + saddr = addr; + next = pte_cont_addr_end(addr, end); + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + if (pte_none(pte)) + continue; + + if (pte_cont(READ_ONCE(*ptep))) { + sptep = ptep; + prot = pte_pgprot(pte_mknoncont(pte)); + pfn = pte_pfn(pte); + + /* + * Changing the bit of contiguous entries requires to + * follow Break-Before-Make approach. See ARM DDI + * 0487A.k_iss10775, "Misprogramming of the Contiguous bit", + * page D4-1762. + */ + for (i = 0; i < CONT_PTES; i++, ptep++) + pte_clear(&init_mm, addr, ptep); + + for (i = 0; i < CONT_PTES; i++, saddr += PAGE_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PTES; i++, sptep++, pfn++) + set_pte(sptep, pfn_pte(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void clear_cont_pmd_mapping(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + pmd_t *pmdp, *spmdp, pmd; + unsigned long saddr, next; + int i; + + addr &= CONT_PMD_MASK; + if (end & ~CONT_PMD_MASK) + end = (end + CONT_PMD_SIZE) & CONT_PMD_MASK; + + do { + pgprot_t prot; + unsigned long pfn, pfn_offset = PMD_SIZE >> PAGE_SHIFT; + + saddr = addr; + next = pmd_cont_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (pte_cont(pmd_pte(pmd))) { + spmdp = pmdp; + prot = pte_pgprot(pmd_pte(pmd_mknoncont(pmd))); + pfn = pmd_pfn(pmd); + + for (i = 0; i < CONT_PMDS; i++, pmdp++) + pmd_clear(pmdp); + + for (i = 0; i < CONT_PMDS; i++, saddr += PMD_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PMDS; i++, spmdp++, pfn += pfn_offset) + set_pmd(spmdp, pfn_pmd(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void split_pmd_mapping(pud_t *pudp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pmd_t *pmdp, pmd, split_pmd; + unsigned long next; + int new_flags = 0; + + /* + * Clear the contiguous pmd if there is any splitting request located in + * the corresponding range. + */ + if (flags & NO_CONT_MAPPINGS) + clear_cont_pmd_mapping(pudp, addr, end); + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (!pmd_exec(pmd)) + flags |= NO_EXEC_MAPPINGS; + + if (pmd_sect(pmd)) { + phys_addr_t phys, pte_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + /* + * Get the original protections except PMD_SECT. + */ + orig_prot = __pgprot(pgprot_val(pte_pgprot(pmd_pte(pmd))) | + PMD_TYPE_TABLE); + + /* + * Allocate a new pmd page to re-initialize + * corresponding ptes. + */ + pte_phys = pgd_pgtable_alloc(PAGE_SHIFT); + split_pmd = pfn_pmd(__phys_to_pfn(pte_phys), orig_prot); + + /* + * If addr/next is not PMD aligned, create contiguous + * mapping at the rest of specific split range. + */ + if (addr & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, addr & PMD_MASK, addr, + phys & PMD_MASK, prot, + pgd_pgtable_alloc, new_flags); + if (next & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, next, + (next + PMD_SIZE) & PMD_MASK, + phys + next - addr, prot, + pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pte(&split_pmd, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pmd entry and flush it, then set + * the newly allocated pmd page. + */ + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pmd(pmdp, split_pmd); + } else { + clear_cont_pte_mapping(pmdp, addr, next); + } + } while (addr = next, addr < end); +} + +static void split_pud_mapping(p4d_t *p4dp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pud_t *pudp, pud, split_pud; + unsigned long next; + int new_flags = 0; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + + if (!pud_exec(pud)) + flags |= NO_EXEC_MAPPINGS; + + if (pud_sect(pud)) { + phys_addr_t phys, pmd_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + orig_prot = __pgprot(pgprot_val(pte_pgprot(pud_pte(pud))) | + PUD_TYPE_TABLE); + + pmd_phys = pgd_pgtable_alloc(PMD_SHIFT); + split_pud = pfn_pud(__phys_to_pfn(pmd_phys), orig_prot); + + /* + * If addr/next is not PUD aligned, create block and + * contiguous mapping at the rest of specific split range. + */ + if (addr & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, addr & PUD_MASK, + addr, phys & PUD_MASK, + prot, pgd_pgtable_alloc, new_flags); + if (next & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, next, + (next + PUD_SIZE) & PUD_MASK, + phys + next - addr, + prot, pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pmd(&split_pud, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pud entry and flush it, then set + * the newly allocated pud page. + */ + pud_clear(pudp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pud(pudp, split_pud); + } else { + split_pmd_mapping(pudp, addr, next, prot, flags); + } + } while (addr = next, addr < end); +} + +static void split_p4d_mapping(pgd_t *pgdp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + p4d_t *p4dp, p4d; + unsigned long next; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + +#if CONFIG_PGTABLE_LEVELS > 3 + /* + * If the original p4d mapping is not executable, remain it even + * splitting. + */ + if (!p4d_exec(p4d)) + flags |= NO_EXEC_MAPPINGS; +#endif + + split_pud_mapping(p4dp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) +{ + pgd_t *pgdp, pgd; + unsigned long addr, next, end; + int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + addr = virt & PAGE_MASK; + end = PAGE_ALIGN(virt + size); + prot = pgprot_tagged(prot); + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + + split_p4d_mapping(pgdp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +static int __split_linear_mapping_after_init(void *data) +{ + struct split_memory_params *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + split_linear_mapping(param->virt, param->size, param->prot); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + return 0; +} + +/* + * When splitting the kernel page table through the Break-Before-Make principle, + * other CPUs might access address that mapped by a cleared entry before + * remapping. Thus the stop machine is used to avoid kernel page fault + * caused by inter-CPU synchronization. + */ +void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot) + +{ + mutex_lock(&split_linear_mapping_lock); + + split_memory_param.virt = virt; + split_memory_param.size = size; + split_memory_param.prot = prot; + atomic_set(&split_memory_param.cpu_count, 0); + + stop_machine(__split_linear_mapping_after_init, &split_memory_param, cpu_online_mask); + + mutex_unlock(&split_linear_mapping_lock); +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0a62f458c5cb023b01c894c555d120ce74941fea..a756056bbd0aff9a68d164e2976254566758adfd 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -26,22 +26,28 @@ bool can_set_direct_map(void) * rodata_full and DEBUG_PAGEALLOC require linear map to be * mapped at page granularity, so that it is possible to * protect/unprotect single pages. - * - * KFENCE pool requires page-granular mapping if initialized late. */ - return rodata_full || debug_pagealloc_enabled() || - arm64_kfence_can_set_direct_map(); + return rodata_full || debug_pagealloc_enabled(); +} + +/* + * If rodata_full is enabled, the mapping of linear mapping range can not be + * block & cont mapping, here combines the rodata_full and debug_pagealloc. + */ +bool can_set_block_and_cont_map(void) +{ + return !rodata_full && !debug_pagealloc_enabled(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = __ptep_get(ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); - set_pte(ptep, pte); + __set_pte(ptep, pte); return 0; } @@ -108,6 +114,16 @@ static int change_memory_common(unsigned long addr, int numpages, if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { for (i = 0; i < area->nr_pages; i++) { + unsigned long virt = (unsigned long)page_address(area->pages[i]); + + /* + * Only split the linear mapping when the attribute is + * changed to read only. Other situations do not suffer + * the mapping type. + */ + if (pgprot_val(set_mask) == PTE_RDONLY && can_set_block_and_cont_map()) + split_linear_mapping_after_init(virt, PAGE_SIZE, PAGE_KERNEL); + __change_memory_common((u64)page_address(area->pages[i]), PAGE_SIZE, set_mask, clear_mask); } @@ -162,6 +178,18 @@ int set_memory_valid(unsigned long addr, int numpages, int enable) __pgprot(PTE_VALID)); } +int set_memory_np(unsigned long addr, int numpages) +{ + /* + * If the addr belongs to linear mapping range, split it to pte level + * before changing the attribute of the page table. + */ + if (can_set_block_and_cont_map() && __is_lm_address(addr)) + split_linear_mapping_after_init(addr, PAGE_SIZE * numpages, PAGE_KERNEL); + + return set_memory_valid(addr, numpages, 0); +} + int set_direct_map_invalid_noflush(struct page *page) { struct page_change_data data = { @@ -219,6 +247,9 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); + if (!can_set_direct_map()) + return true; + pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; @@ -242,5 +273,5 @@ bool kernel_page_present(struct page *page) return true; ptep = pte_offset_kernel(pmdp, addr); - return pte_valid(READ_ONCE(*ptep)); + return pte_valid(__ptep_get(ptep)); } diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 7b14df3c64776f4c61a81aa85af317344e741df2..5139a28130c0888555395d574c2b1373e840f014 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -33,7 +33,7 @@ static void *trans_alloc(struct trans_pgd_info *info) static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) { - pte_t pte = READ_ONCE(*src_ptep); + pte_t pte = __ptep_get(src_ptep); if (pte_valid(pte)) { /* @@ -41,7 +41,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) * read only (code, rodata). Clear the RDONLY bit from * the temporary mappings we use during restore. */ - set_pte(dst_ptep, pte_mkwrite_novma(pte)); + __set_pte(dst_ptep, pte_mkwrite_novma(pte)); } else if ((debug_pagealloc_enabled() || is_kfence_address((void *)addr)) && !pte_none(pte)) { /* @@ -55,7 +55,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) */ BUG_ON(!pfn_valid(pte_pfn(pte))); - set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte))); + __set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte))); } } diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c251ef3caae560ecaf83fa9d2134a889e2e2348e..bbb90102781783ccad2d4485e8058900f79ece4f 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -56,6 +56,7 @@ HW_DBM KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE +MPAM MTE MTE_ASYMM SME diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 76ce150e7347e56e2f497e905c02dc28733e68e3..0e7d7f327410ab33cbdaf090dbae92a2bf149eca 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2530,6 +2530,22 @@ Res0 1 Field 0 EN EndSysreg +Sysreg MPAMIDR_EL1 3 0 10 4 4 +Res0 63:62 +Field 61 HAS_SDEFLT +Field 60 HAS_FORCE_NS +Field 59 SP4 +Field 58 HAS_TIDR +Field 57 HAS_ALTSP +Res0 56:40 +Field 39:32 PMG_MAX +Res0 31:21 +Field 20:18 VPMR_MAX +Field 17 HAS_HCR +Res0 16 +Field 15:0 PARTID_MAX +EndSysreg + Sysreg LORID_EL1 3 0 10 4 7 Res0 63:24 Field 23:16 LD @@ -2537,6 +2553,22 @@ Res0 15:8 Field 7:0 LR EndSysreg +Sysreg MPAM1_EL1 3 0 10 5 0 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + +Sysreg MPAM0_EL1 3 0 10 5 1 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + Sysreg ISR_EL1 3 0 12 1 0 Res0 63:11 Field 10 IS @@ -2550,6 +2582,7 @@ EndSysreg Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 Res0 63:24 Field 23:0 INTID + EndSysreg Sysreg TRBLIMITR_EL1 3 0 9 11 0 diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index b01f5cdb27e03d778dfa400e370037c39cd7abed..beb8499dd8ed84330beecbcd61977df0aa3474f8 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -3,5 +3,7 @@ obj-y += mm/ obj-y += net/ obj-y += vdso/ +obj-$(CONFIG_KVM) += kvm/ + # for cleaning subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 9fd8644a9a4c6a679f81d35da9c91fff00b48b54..d31c89c87210c3b361f2a255c4f2b003406d9c06 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -75,6 +75,7 @@ config LOONGARCH select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY select GENERIC_IOREMAP if !ARCH_IOREMAP + select GENERIC_IRQ_MATRIX_ALLOCATOR select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -131,19 +132,25 @@ config LOONGARCH select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_KVM + select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC select HAVE_RETHOOK select HAVE_RSEQ select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SETUP_PER_CPU_AREA if NUMA + select HAVE_STACK_VALIDATION if HAVE_OBJTOOL select HAVE_STACKPROTECTOR + select ARCH_HAS_PHYS_TO_DMA select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP @@ -256,6 +263,9 @@ config AS_HAS_EXPLICIT_RELOCS config AS_HAS_FCSR_CLASS def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) +config AS_HAS_THIN_ADD_SUB + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM + config AS_HAS_LSX_EXTENSION def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0) @@ -265,6 +275,9 @@ config AS_HAS_LASX_EXTENSION config AS_HAS_LBT_EXTENSION def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0) +config AS_HAS_LVZ_EXTENSION + def_bool $(as-instr,hvcl 0) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -423,7 +436,6 @@ config SMP config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP - select GENERIC_IRQ_MIGRATION help Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. @@ -561,6 +573,15 @@ config CPU_HAS_PREFETCH bool default y +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + config ARCH_SUPPORTS_KEXEC def_bool y @@ -605,6 +626,28 @@ config RANDOMIZE_BASE_MAX_OFFSET This is limited by the size of the lower address memory, 256MB. +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + +source "kernel/livepatch/Kconfig" + endmenu config ARCH_SELECT_MEMORY_MODEL @@ -653,7 +696,21 @@ config ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE def_bool y +source "drivers/cpufreq/Kconfig" source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + endmenu + +source "arch/loongarch/kvm/Kconfig" diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug index 8d36aab530083b9c57355ea0dd96f69e23a64765..8b2ce5b5d43e8f7df0af14019d2c8b28a7da1511 100644 --- a/arch/loongarch/Kconfig.debug +++ b/arch/loongarch/Kconfig.debug @@ -26,4 +26,16 @@ config UNWINDER_PROLOGUE Some of the addresses it reports may be incorrect (but better than the Guess unwinder). +config UNWINDER_ORC + bool "ORC unwinder" + depends on HAVE_OBJTOOL + select OBJTOOL + help + This option enables the ORC (Oops Rewind Capability) unwinder for + unwinding kernel stack traces. It uses a custom data format which is + a simplified version of the DWARF Call Frame Information standard. + + Enabling this option will increase the kernel's runtime memory usage + by roughly 2-4MB, depending on your kernel config. + endchoice diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 81e8089c9c4f1898cb9334856e6f96640d76ac7c..e05c10b044560278ddc8bb54e88251d139238ef4 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -25,6 +25,18 @@ endif 32bit-emul = elf32loongarch 64bit-emul = elf64loongarch +ifdef CONFIG_UNWINDER_ORC +orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h +orc_hash_sh := $(srctree)/scripts/orc_hash.sh +targets += $(orc_hash_h) +quiet_cmd_orc_hash = GEN $@ + cmd_orc_hash = mkdir -p $(dir $@); \ + $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@ +$(orc_hash_h): $(srctree)/arch/loongarch/include/asm/orc_types.h $(orc_hash_sh) FORCE + $(call if_changed,orc_hash) +archprepare: $(orc_hash_h) +endif + ifdef CONFIG_DYNAMIC_FTRACE KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 @@ -68,8 +80,6 @@ LDFLAGS_vmlinux += -static -n -nostdlib ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS cflags-y += $(call cc-option,-mexplicit-relocs) KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) -KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) -KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) else cflags-y += $(call cc-option,-mno-explicit-relocs) KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel @@ -78,6 +88,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs endif +KBUILD_AFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) +KBUILD_CFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) +KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub) +KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub) + +ifdef CONFIG_OBJTOOL +KBUILD_CFLAGS += -fno-jump-tables +endif + ifeq ($(CONFIG_RELOCATABLE),y) KBUILD_CFLAGS_KERNEL += -fPIE LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs) diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..4765e24c2f06d30ba2612fc1df55b0405987fcfa --- /dev/null +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -0,0 +1,8853 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +# CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set +CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=y +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=y + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# +CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y +CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# +CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +# CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..274612ac70423e56a7aeb2be1de61940dfb8618a --- /dev/null +++ b/arch/loongarch/configs/anolis_defconfig @@ -0,0 +1,2221 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.52 Kernel Configuration +# +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_ARCH_IOREMAP=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_LIVEPATCH=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_HIBERNATION=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_IOV=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_ATA_PIIX=m +CONFIG_PATA_ATIIXP=y +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +CONFIG_YT6801=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +CONFIG_DWMAC_LOONGSON=m +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m +CONFIG_RT2X00=m +CONFIG_RT2800PCI=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINCTRL_LOONGSON2=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST_LOONGSON=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_INSPUR=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_COMMON_CLK_LOONGSON2=y +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +# CONFIG_STRICT_DEVMEM is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_LOONGARCH_IOMMU=m +CONFIG_CMDLINE_EXTEND=y +CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index e5f70642ed20626a40cd6215c08a09e141e726ef..1a51e214097fb930f3acc4c3fc4d82dd0bf44a98 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -5,13 +5,16 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y -CONFIG_PREEMPT=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y @@ -19,6 +22,7 @@ CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y @@ -30,78 +34,86 @@ CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y -CONFIG_PERF_EVENTS=y -CONFIG_LOONGARCH=y -CONFIG_64BIT=y -CONFIG_MACH_LOONGSON64=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_250=y -CONFIG_DMI=y -CONFIG_EFI=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y -CONFIG_NR_CPUS=64 +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 CONFIG_NUMA=y -CONFIG_CPU_HAS_FPU=y +CONFIG_ARCH_IOREMAP=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_SUSPEND=y +CONFIG_CPU_HAS_LBT=y +CONFIG_LIVEPATCH=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_HIBERNATION=y -CONFIG_ACPI=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_EFI_ZBOOT=y -CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZBUD=y CONFIG_ZSMALLOC=m +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_UNIX=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y CONFIG_XFRM_USER=y -CONFIG_NET_KEY=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y -CONFIG_INET=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y @@ -117,27 +129,83 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_UDP_DIAG=y +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m -CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -150,22 +218,35 @@ CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m @@ -181,7 +262,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -190,6 +270,7 @@ CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m @@ -199,10 +280,12 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -211,8 +294,25 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y CONFIG_IP_VS_PROTO_ESP=y @@ -220,11 +320,24 @@ CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_NFCT=y -CONFIG_NF_TABLES_IPV4=y +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -245,18 +358,21 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_IP6_NF_IPTABLES=y +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m @@ -266,76 +382,211 @@ CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_IP_SCTP=m -CONFIG_RDS=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_CGROUP=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_NETLINK_DIAG=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_MTK=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_INTEL=y -CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m -CONFIG_BT_VIRTIO=m CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -343,15 +594,19 @@ CONFIG_RFKILL=m CONFIG_RFKILL_INPUT=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y -CONFIG_CEPH_LIB=m -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y CONFIG_PCIEAER=y -# CONFIG_PCIEASPM is not set +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m CONFIG_PCI_IOV=y -CONFIG_HOTPLUG_PCI=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set CONFIG_YENTA=m CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y @@ -363,7 +618,12 @@ CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m CONFIG_MTD=m CONFIG_MTD_BLOCK=m CONFIG_MTD_CFI=m @@ -373,20 +633,29 @@ CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_ZSTD=y -CONFIG_BLK_DEV_LOOP=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_VIRTIO_BLK=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m CONFIG_BLK_DEV_NVME=y CONFIG_NVME_MULTIPATH=y @@ -398,18 +667,41 @@ CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=m CONFIG_SCSI_SAS_ATA=y CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y @@ -418,8 +710,11 @@ CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m @@ -427,35 +722,46 @@ CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_DWC=y +CONFIG_ATA_PIIX=m CONFIG_PATA_ATIIXP=y -CONFIG_PATA_PCMCIA=m +CONFIG_ATA_GENERIC=m CONFIG_MD=y -CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=y +CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m @@ -463,18 +769,45 @@ CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m CONFIG_ISCSI_TARGET=m -CONFIG_NETDEVICES=y +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m CONFIG_BONDING=m -CONFIG_DUMMY=y +CONFIG_DUMMY=m CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m -CONFIG_VXLAN=y +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m CONFIG_RIONET=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set @@ -486,36 +819,64 @@ CONFIG_VIRTIO_NET=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y # CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m # CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_I825XX is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m # CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m CONFIG_8139TOO=m -CONFIG_R8169=y +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +CONFIG_YT6801=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set @@ -524,46 +885,151 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_SIS is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set CONFIG_STMMAC_ETH=y +CONFIG_DWMAC_LOONGSON=m # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set -CONFIG_NGBE=y -CONFIG_TXGBE=y +CONFIG_NGBE=m +CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set # CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y # CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_HOSTAP=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m CONFIG_RT2X00=m +CONFIG_RT2800PCI=m CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -575,29 +1041,80 @@ CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m -CONFIG_RTW88=m -CONFIG_RTW88_8822BE=m -CONFIG_RTW88_8822CE=m -CONFIG_RTW88_8723DE=m -CONFIG_RTW88_8821CE=m -CONFIG_RTW89=m -CONFIG_RTW89_8852AE=m -CONFIG_RTW89_8852CE=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set CONFIG_ZD1211RW=m CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_XTKBD=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_PS2_SENTELIC=y CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y CONFIG_SERIO_SERPORT=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 @@ -605,37 +1122,190 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m CONFIG_PRINTER=m +CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=y -CONFIG_I2C_LS2X=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m CONFIG_SPI=y -CONFIG_SPI_LOONGSON_PCI=m +CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m CONFIG_PINCTRL=y CONFIG_PINCTRL_LOONGSON2=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_LOONGSON=y +CONFIG_GPIO_AMDPT=m CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_RESTART=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_POWER_RESET_SYSCON_POWEROFF=y -CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y @@ -649,99 +1319,580 @@ CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y -CONFIG_DRM_AST=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST_LOONGSON=y +CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_LOONGSON=y +CONFIG_DRM_LOONGSON=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_INSPUR=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y -CONFIG_LCD_CLASS_DEVICE=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y -CONFIG_SND_HDA_INTEL=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y CONFIG_USB=y -CONFIG_USB_OTG=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m CONFIG_USB_MON=y CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_ACM=m +CONFIG_USB_UHCI_HCD=y CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m CONFIG_USB_STORAGE=m CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m CONFIG_USB_GADGET=y CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m CONFIG_RTC_DRV_LOONGSON=y CONFIG_DMADEVICES=y -CONFIG_UIO=m +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=m @@ -777,7 +1928,27 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -798,37 +1969,41 @@ CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m +CONFIG_QFMT_V2=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y -CONFIG_FSCACHE=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y +CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y -CONFIG_UDF_FS=y +CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y CONFIG_ORANGEFS_FS=m CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y @@ -837,7 +2012,8 @@ CONFIG_HFSPLUS_FS=m CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_CRAMFS=m -CONFIG_SQUASHFS=y +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y @@ -845,79 +2021,199 @@ CONFIG_SQUASHFS_XZ=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m -CONFIG_PSTORE_LZO_COMPRESS=m -CONFIG_PSTORE_LZ4_COMPRESS=m -CONFIG_PSTORE_LZ4HC_COMPRESS=m -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y CONFIG_EROFS_FS_PCPU_KTHREAD=y CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y -CONFIG_ROOT_NFS=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y CONFIG_KEY_DH_OPERATIONS=y CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +# CONFIG_STRICT_DEVMEM is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_LOONGARCH_IOMMU=m +CONFIG_CMDLINE_EXTEND=y +CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr" diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index dede0b422cfb91dc069f18632ab3cde04487d453..aa4ab6ccc0c0faac49efb44a142954da895f55ea 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,10 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 +generated-y += orc_hash.h + generic-y += dma-contiguous.h generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += user.h @@ -24,4 +25,3 @@ generic-y += poll.h generic-y += param.h generic-y += posix_types.h generic-y += resource.h -generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b24437e28c6eda457b2be003b51ad3809600f7cc..d9be1df3b95fc1b39dd6186c6841d1d025c57312 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -36,6 +36,10 @@ extern unsigned long vm_map_base; #define UNCACHE_BASE CSR_DMW0_BASE #endif +#ifndef WRITECOMBINE_BASE +#define WRITECOMBINE_BASE CSR_DMW2_BASE +#endif + #define DMW_PABITS 48 #define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) @@ -124,6 +128,7 @@ extern unsigned long vm_map_base; #define PCI_IOSIZE SZ_32M #define ISA_IOSIZE SZ_16K #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) +#define ISA_PHY_IOBASE LOONGSON_LIO_BASE #define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index e27f0c72d3242b58aec094d40199ab30160583a5..2143202cb380f1153b58e5a4b3b4a3a7d0d0387c 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -15,6 +15,7 @@ #define __LL "ll.w " #define __SC "sc.w " #define __AMADD "amadd.w " +#define __AMOR "amor.w " #define __AMAND_DB "amand_db.w " #define __AMOR_DB "amor_db.w " #define __AMXOR_DB "amxor_db.w " @@ -22,6 +23,7 @@ #define __LL "ll.d " #define __SC "sc.d " #define __AMADD "amadd.d " +#define __AMOR "amor.d " #define __AMAND_DB "amand_db.d " #define __AMOR_DB "amor_db.d " #define __AMXOR_DB "amxor_db.d " diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h index d4ca3ba2541885c0a7d28229b1adacadad9ec0a5..08388876ade4ce11d60d7ab0c7658a1aacffda1d 100644 --- a/arch/loongarch/include/asm/bug.h +++ b/arch/loongarch/include/asm/bug.h @@ -44,6 +44,7 @@ do { \ instrumentation_begin(); \ __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ instrumentation_end(); \ } while (0) diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h index 80bd74106985a97d7b246447a7d0482e047b2274..f8754d08a31ab07490717c31b9253871668b9a76 100644 --- a/arch/loongarch/include/asm/cacheflush.h +++ b/arch/loongarch/include/asm/cacheflush.h @@ -37,8 +37,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_icache_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 - #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_dup_mm(mm) do { } while (0) @@ -47,7 +45,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define flush_icache_user_page(vma, page, addr, len) do { } while (0) -#define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index 2eafe6a6aca8189c88617c18da45c373294a6aa8..16a716f88a5ca2c2d0117184d4a6b55046fe44d7 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -65,5 +65,6 @@ #define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) #define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) #define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) +#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT) #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index 48b9f7168bcca03f92a63f891a346f88055b7bfc..843f9c4ec98071b818c615272734d2cc8428b7e5 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -99,6 +99,7 @@ enum cpu_type_enum { #define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ #define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ #define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ +#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) @@ -127,5 +128,6 @@ enum cpu_type_enum { #define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) #define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) #define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) +#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT) #endif /* _ASM_CPU_H */ diff --git a/arch/loongarch/include/asm/device.h b/arch/loongarch/include/asm/device.h new file mode 100644 index 0000000000000000000000000000000000000000..30cc6b61033545b3c2f8a8b6078e4e1782f7f1b8 --- /dev/null +++ b/arch/loongarch/include/asm/device.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + * Copyright (C) 2020 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_DEVICE_H +#define _ASM_LOONGARCH_DEVICE_H + +struct dev_archdata { + /* hook for IOMMU specific extension */ + void *iommu; + struct bus_dma_region *dma_range_map; + /* + * On some old 7A chipset, dma address is different from physical + * address, the main difference is that node id. For dma address + * node id starts from bit 36, physical node id starts from + * bit 44. The remaining address below node id is the same. + */ + unsigned long dma_node_mask; + unsigned int dma_node_off; +}; + +struct pdev_archdata { +}; + +struct dma_domain { + struct list_head node; + const struct dma_map_ops *dma_ops; + int domain_nr; +}; +void add_dma_domain(struct dma_domain *domain); +void del_dma_domain(struct dma_domain *domain); + +#endif /* _ASM_LOONGARCH_DEVICE_H*/ diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h new file mode 100644 index 0000000000000000000000000000000000000000..75ccd808a2af399884bac51ac3f91a9db44b2115 --- /dev/null +++ b/arch/loongarch/include/asm/dma-direct.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _LOONGARCH_DMA_DIRECT_H +#define _LOONGARCH_DMA_DIRECT_H + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); + +#endif /* _LOONGARCH_DMA_DIRECT_H */ diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index eddc8e79b3fae7ba6688e702164f4c2c9d204811..54b538d7b7c0ae5ac2b1212000d808b70cd580b1 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -6,6 +6,7 @@ #define _ASM_LOONGARCH_EFI_H #include +#include void __init efi_init(void); void __init efi_runtime_init(void); diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h index af74a3fdcad179f7582d0234c5cfd4cdca6d5d65..c6d20736fd9270605b57c7ad9a30110bb4b47f02 100644 --- a/arch/loongarch/include/asm/exception.h +++ b/arch/loongarch/include/asm/exception.h @@ -6,6 +6,8 @@ #include #include +extern void *exception_table[]; + void show_registers(struct pt_regs *regs); asmlinkage void cache_parity_error(void); diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index c2d8962fda00bec9b6a7fa1fee0a3ff720823a01..4d635b8e32459fa5baf05bc963d304c710022b3f 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -48,6 +48,10 @@ static inline void disable_lasx(void); static inline void save_lasx(struct task_struct *t); static inline void restore_lasx(struct task_struct *t); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ +DECLARE_PER_CPU(unsigned long, msa_count); +DECLARE_PER_CPU(unsigned long, lasx_count); +#endif /* * Mask the FCSR Cause bits according to the Enable bits, observing * that Unimplemented is always enabled. @@ -210,6 +214,9 @@ static inline void enable_lsx(void) { if (cpu_has_lsx) csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(msa_count, raw_smp_processor_id())++; +#endif } static inline void disable_lsx(void) @@ -256,8 +263,12 @@ static inline void restore_lsx_upper(struct task_struct *t) {} static inline void enable_lasx(void) { - if (cpu_has_lasx) + if (cpu_has_lasx) { csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(lasx_count, raw_smp_processor_id())++; +#endif + } } static inline void disable_lasx(void) diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h index a11996eb5892dd169a1e5a0ba9ad20fb854f4be8..de891c2c83d4a980284cc5376dbc0934b7233a13 100644 --- a/arch/loongarch/include/asm/ftrace.h +++ b/arch/loongarch/include/asm/ftrace.h @@ -63,7 +63,7 @@ ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs) static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) { - regs_set_return_value(&fregs->regs, ip); + instruction_pointer_set(&fregs->regs, ip); } #define ftrace_regs_get_argument(fregs, n) \ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 0ef3b18f89803708d6e8a96b37c9ff037cc2e264..5f70cb77b54dadcd54bf529c4e295e6353608f5b 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -12,11 +12,17 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq -#define NR_IPI 2 +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNCTION, + IPI_CLEAR_VECTOR, +}; +#define NR_IPI 3 typedef struct { unsigned int ipi_irqs[NR_IPI]; unsigned int __softirq_pending; + atomic_t message ____cacheline_aligned_in_smp; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index 427b487fbfd658635d750108ce59943b98aa8481..376c0708e2979bf5f4988b7c65b3e5102c4c78b7 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -44,7 +44,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t clear; - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); pte_val(clear) = (unsigned long)invalid_pte_table; set_pte_at(mm, addr, ptep, clear); @@ -75,7 +75,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - int changed = !pte_same(*ptep, pte); + int changed = !pte_same(ptep_get(ptep), pte); if (changed) { set_pte_at(vma->vm_mm, addr, ptep, pte); diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 4fa53ad82efb326fe26567475c3ff0f003a35f49..1d43a781a2dde455b0b24864762ab2f53bb1381d 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -12,6 +12,7 @@ #define INSN_NOP 0x03400000 #define INSN_BREAK 0x002a0000 +#define INSN_HVCL 0x002b8000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 @@ -65,6 +66,15 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + cpucfg_op = 0x1b, + iocsrrdb_op = 0x19200, + iocsrrdh_op = 0x19201, + iocsrrdw_op = 0x19202, + iocsrrdd_op = 0x19203, + iocsrwrb_op = 0x19204, + iocsrwrh_op = 0x19205, + iocsrwrw_op = 0x19206, + iocsrwrd_op = 0x19207, }; enum reg2i5_op { @@ -318,6 +328,13 @@ struct reg2bstrd_format { unsigned int opcode : 10; }; +struct reg2csr_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int csr : 14; + unsigned int opcode : 8; +}; + struct reg3_format { unsigned int rd : 5; unsigned int rj : 5; @@ -346,6 +363,7 @@ union loongarch_instruction { struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; struct reg2bstrd_format reg2bstrd_format; + struct reg2csr_format reg2csr_format; struct reg3_format reg3_format; struct reg3sa2_format reg3sa2_format; }; diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 4a8adcca329b81e4f289dd7825fb15dbf2f4f7a9..838db690b723a5c6b3f89400c5fc88d0d8f1aac5 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -30,10 +30,16 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long prot_val) { - if (prot_val & _CACHE_CC) + switch (prot_val & _CACHE_MASK) { + case _CACHE_CC: return (void __iomem *)(unsigned long)(CACHE_BASE + offset); - else + case _CACHE_SUC: return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); + case _CACHE_WUC: + return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset); + default: + return NULL; + } } #define ioremap(offset, size) \ diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 218b4da0ea90d012199fe65f47eceae810a0a300..a43cbd2f1dd5df0194ec10a4acd299fbe7dee126 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -38,12 +38,23 @@ static inline bool on_irq_stack(int cpu, unsigned long sp) void spurious_interrupt(void); #define NR_IRQS_LEGACY 16 +/* + * 256 Vectors Mapping for AVECINTC: + * + * 0 - 15: Mapping classic IPs, e.g. IP0-12. + * 16 - 255: Mapping vectors for external IRQ. + * + */ +#define NR_VECTORS 256 +#define NR_LEGACY_VECTORS 16 +#define IRQ_MATRIX_BITS NR_VECTORS + #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); -#define MAX_IO_PICS 2 -#define NR_IRQS (64 + (256 * MAX_IO_PICS)) +#define MAX_IO_PICS 16 +#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { int node; @@ -53,6 +64,7 @@ struct acpi_vector_group { extern struct acpi_vector_group pch_group[MAX_IO_PICS]; extern struct acpi_vector_group msi_group[MAX_IO_PICS]; +#define MAX_CORES_PER_EIO_NODE 256 #define CORES_PER_EIO_NODE 4 #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ @@ -65,7 +77,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS]; #define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) #define LOONGSON_CPU_IRQ_BASE 16 -#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) +#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15) #define LOONGSON_PCH_IRQ_BASE 64 #define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) @@ -88,20 +100,8 @@ struct acpi_madt_bio_pic; struct acpi_madt_msi_pic; struct acpi_madt_lpc_pic; -int liointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lio_pic *acpi_liointc); -int eiointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_eio_pic *acpi_eiointc); - -int htvec_acpi_init(struct irq_domain *parent, - struct acpi_madt_ht_pic *acpi_htvec); -int pch_lpc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lpc_pic *acpi_pchlpc); -int pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi); -int pch_pic_acpi_init(struct irq_domain *parent, - struct acpi_madt_bio_pic *acpi_pchpic); -int find_pch_pic(u32 gsi); +void complete_irq_moving(void); + struct fwnode_handle *get_pch_msi_handle(int pci_segment); extern struct acpi_madt_lio_pic *acpi_liointc; @@ -117,8 +117,18 @@ extern struct fwnode_handle *liointc_handle; extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; -extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); +extern void fixup_irqs(void); + +static inline int get_percpu_irq(int vector) +{ + struct irq_domain *d; + d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + if (d) + return irq_create_mapping(d, vector); + + return -EINVAL; +} #include #endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h index 6c82aea1c99398c46484a77cc28da1316799affb..2835b41d2a848e33d1ad65e3ba1c9091258b05a3 100644 --- a/arch/loongarch/include/asm/kfence.h +++ b/arch/loongarch/include/asm/kfence.h @@ -43,13 +43,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) { pte_t *pte = virt_to_kpte(addr); - if (WARN_ON(!pte) || pte_none(*pte)) + if (WARN_ON(!pte) || pte_none(ptep_get(pte))) return false; if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT))); else - set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT))); preempt_disable(); local_flush_tlb_one(addr); diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h new file mode 100644 index 0000000000000000000000000000000000000000..4a76ce796f1f401eba8ce246fe8fb5892aee07c4 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_CSR_H__ +#define __ASM_LOONGARCH_KVM_CSR_H__ + +#include +#include +#include +#include + +#define gcsr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__( \ + " gcsrrd %[val], %[reg]\n\t" \ + : [val] "=r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +#define gcsr_write(v, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__ ( \ + " gcsrwr %[val], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +#define gcsr_xchg(v, m, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__( \ + " gcsrxchg %[val], %[mask], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [mask] "r" (m), [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +/* Guest CSRS read and write */ +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD) +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD) +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD) +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD) +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN) +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN) +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC) +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC) +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG) +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG) +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT) +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT) +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA) +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA) +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV) +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV) +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI) +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI) +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY) +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY) + +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID) +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID) +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL) +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL) +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH) +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH) +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD) +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD) +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0) +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0) +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1) +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1) +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE) +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG) +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG) + +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID) +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID) +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1) +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2) +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3) +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3) + +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0) +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0) +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1) +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1) +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2) +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2) +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3) +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3) +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4) +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4) +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5) +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5) +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6) +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6) +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7) +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7) + +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID) +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID) +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG) +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG) +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL) +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL) +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC) +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC) + +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL) +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL) + +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX) +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX) +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY) +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY) +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV) +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV) +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA) +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA) +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE) +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE) +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0) +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0) +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1) +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1) +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI) +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI) +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD) +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD) + +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0) +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0) +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1) +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1) +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2) +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2) +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3) +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3) + +/* Guest related CSRs */ +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC) +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC) +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP) +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG) +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG) +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT) +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT) +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC) +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC) +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC) +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC) + +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name) + +__BUILD_CSR_OP(gcfg) +__BUILD_CSR_OP(gstat) +__BUILD_CSR_OP(gtlbc) +__BUILD_CSR_OP(gintc) +__BUILD_GCSR_OP(llbctl) +__BUILD_GCSR_OP(tlbidx) + +#define set_gcsr_estat(val) \ + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT) +#define clear_gcsr_estat(val) \ + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT) + +#define kvm_read_hw_gcsr(id) gcsr_read(id) +#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id) + +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) + +#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid)) + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); + +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid) +{ + return csr->csrs[gid]; +} + +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val) +{ + csr->csrs[gid] = val; +} + +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long val) +{ + csr->csrs[gid] |= val; +} + +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long mask, unsigned long val) +{ + unsigned long _mask = mask; + + csr->csrs[gid] &= ~_mask; + csr->csrs[gid] |= val & _mask; +} + +#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3) + +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h new file mode 100644 index 0000000000000000000000000000000000000000..c2bd295d0edcb6d0b750ba89e55fef2a4d6a1966 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_EXTIOI_H +#define LOONGARCH_EXTIOI_H + +#include + +#define EXTIOI_IRQS 256 +#define EXTIOI_ROUTE_MAX_VCPUS 256 +#define EXTIOI_IRQS_U8_NUMS (EXTIOI_IRQS / 8) +#define EXTIOI_IRQS_U32_NUMS (EXTIOI_IRQS_U8_NUMS / 4) +#define EXTIOI_IRQS_U64_NUMS (EXTIOI_IRQS_U32_NUMS / 2) +/* map to ipnum per 32 irqs */ +#define EXTIOI_IRQS_NODETYPE_COUNT 16 + +#define EXTIOI_BASE 0x1400 +#define EXTIOI_SIZE 0x900 + +#define EXTIOI_NODETYPE_START 0xa0 +#define EXTIOI_NODETYPE_END 0xbf +#define EXTIOI_IPMAP_START 0xc0 +#define EXTIOI_IPMAP_END 0xc7 +#define EXTIOI_ENABLE_START 0x200 +#define EXTIOI_ENABLE_END 0x21f +#define EXTIOI_BOUNCE_START 0x280 +#define EXTIOI_BOUNCE_END 0x29f +#define EXTIOI_ISR_START 0x300 +#define EXTIOI_ISR_END 0x31f +#define EXTIOI_COREISR_START 0x400 +#define EXTIOI_COREISR_END 0x71f +#define EXTIOI_COREMAP_START 0x800 +#define EXTIOI_COREMAP_END 0x8ff + +#define LS3A_INTC_IP 8 + +#define EXTIOI_SW_COREMAP_FLAG (1 << 0) + +struct loongarch_extioi { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + /* hardware state */ + union nodetype { + u64 reg_u64[EXTIOI_IRQS_NODETYPE_COUNT / 4]; + u32 reg_u32[EXTIOI_IRQS_NODETYPE_COUNT / 2]; + uint16_t reg_u16[EXTIOI_IRQS_NODETYPE_COUNT]; + u8 reg_u8[EXTIOI_IRQS_NODETYPE_COUNT * 2]; + } nodetype; + + /* one bit shows the state of one irq */ + union bounce { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } bounce; + + union isr { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } isr; + union coreisr { + u64 reg_u64[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U8_NUMS]; + } coreisr; + union enable { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } enable; + + /* use one byte to config ipmap for 32 irqs at once */ + union ipmap { + u64 reg_u64; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS / 4]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS / 4]; + } ipmap; + /* use one byte to config coremap for one irq */ + union coremap { + u64 reg_u64[EXTIOI_IRQS / 8]; + u32 reg_u32[EXTIOI_IRQS / 4]; + u8 reg_u8[EXTIOI_IRQS]; + } coremap; + + DECLARE_BITMAP(sw_coreisr[EXTIOI_ROUTE_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS); + uint8_t sw_coremap[EXTIOI_IRQS]; +}; + +void extioi_set_irq(struct loongarch_extioi *s, int irq, int level); +int kvm_loongarch_register_extioi_device(void); +int kvm_loongarch_reset_extioi(struct kvm *kvm); +#endif /* LOONGARCH_EXTIOI_H */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h new file mode 100644 index 0000000000000000000000000000000000000000..d28d70fea012b6cc7b36ae82ee66296bc12645d2 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_host.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_HOST_H__ +#define __ASM_LOONGARCH_KVM_HOST_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Loongarch KVM register ids */ +#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) +#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) + +#define KVM_MAX_VCPUS 256 +#define KVM_MAX_CPUCFG_REGS 21 + +#define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) +#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) +#define KVM_REQ_PMU KVM_ARCH_REQ(2) + +#define KVM_GUESTDBG_SW_BP_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) +#define KVM_GUESTDBG_VALID_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff +#define KVM_LOONGARCH_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGARCH_IRQ_VCPU_MASK 0xff +#define KVM_LOONGARCH_IRQ_NUM_SHIFT 0 +#define KVM_LOONGARCH_IRQ_NUM_MASK 0xffff + +/* irq_type field */ +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IP 0 +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IO 1 +#define KVM_LOONGARCH_IRQ_TYPE_HT 2 +#define KVM_LOONGARCH_IRQ_TYPE_MSI 3 +#define KVM_LOONGARCH_IRQ_TYPE_IOAPIC 4 +#define KVM_LOONGARCH_IRQ_TYPE_ROUTE 5 + +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) + +#define KVM_DIRTY_LOG_MANUAL_CAPS \ + (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 pages; + u64 hugepages; + u64 ipi_read_exits; + u64 ipi_write_exits; + u64 extioi_read_exits; + u64 extioi_write_exits; + u64 pch_pic_read_exits; + u64 pch_pic_write_exits; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 int_exits; + u64 idle_exits; + u64 cpucfg_exits; + u64 signal_exits; + u64 hypercall_exits; +}; + +#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) +#define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) +struct kvm_arch_memory_slot { + unsigned long flags; +}; + +#define HOST_MAX_PMNUM 16 +struct kvm_context { + unsigned long vpid_cache; + struct kvm_vcpu *last_vcpu; + /* Host PMU CSR */ + u64 perf_ctrl[HOST_MAX_PMNUM]; + u64 perf_cntr[HOST_MAX_PMNUM]; +}; + +struct kvm_world_switch { + int (*exc_entry)(void); + int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); + unsigned long page_order; +}; + +#define MAX_PGTABLE_LEVELS 4 + +/* + * Physical CPUID is used for interrupt routing, there are different + * definitions about physical cpuid on different hardwares. + * + * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 + * For IPI hardware, max destination CPUID size 1024 + * For extioi interrupt controller, max destination CPUID size is 256 + * For msgint interrupt controller, max supported CPUID size is 65536 + * + * Currently max CPUID is defined as 256 for KVM hypervisor, in future + * it will be expanded to 4096, including 16 packages at most. And every + * package supports at most 256 vcpus + */ +#define KVM_MAX_PHYID 256 + +struct kvm_phyid_info { + struct kvm_vcpu *vcpu; + bool enabled; +}; + +struct kvm_phyid_map { + int max_phyid; + struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; +}; + +struct kvm_arch { + /* Guest physical mm */ + kvm_pte_t *pgd; + unsigned long gpa_size; + unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; + unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; + unsigned int root_level; + spinlock_t phyid_map_lock; + struct kvm_phyid_map *phyid_map; + /* Enabled PV features */ + unsigned long pv_features; + + s64 time_offset; + struct kvm_context __percpu *vmcs; + struct loongarch_ipi *ipi; + struct loongarch_extioi *extioi; + struct loongarch_pch_pic *pch_pic; +}; + +#define CSR_MAX_NUMS 0x800 + +struct loongarch_csrs { + unsigned long csrs[CSR_MAX_NUMS]; +}; + +/* Resume Flags */ +#define RESUME_HOST 0 +#define RESUME_GUEST 1 + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_IOCSR, /* handle IOCSR request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_EXCEPT, /* A guest exception has been generated */ +}; + +#define KVM_LARCH_FPU (0x1 << 0) +#define KVM_LARCH_LSX (0x1 << 1) +#define KVM_LARCH_LASX (0x1 << 2) +#define KVM_LARCH_LBT (0x1 << 3) +#define KVM_LARCH_PMU (0x1 << 4) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 5) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 6) + +#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) +#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ + BIT(KVM_FEATURE_STEAL_TIME) | \ + BIT(KVM_FEATURE_VIRT_EXTIOI)) + +struct kvm_vcpu_arch { + /* + * Switch pointer-to-function type to unsigned long + * for loading the value into register directly. + */ + unsigned long host_eentry; + unsigned long guest_eentry; + + /* Pointers stored here for easy accessing from assembly code */ + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + + /* Host registers preserved across guest mode execution */ + unsigned long host_sp; + unsigned long host_tp; + unsigned long host_pgd; + + /* Host CSRs are used when handling exits from guest */ + unsigned long badi; + unsigned long badv; + unsigned long host_ecfg; + unsigned long host_estat; + unsigned long host_percpu; + + /* GPRs */ + unsigned long gprs[32]; + unsigned long pc; + + /* Which auxiliary state is loaded (KVM_LARCH_*) */ + unsigned int aux_inuse; + + /* FPU state */ + struct loongarch_fpu fpu FPU_ALIGN; + struct loongarch_lbt lbt; + + /* CSR state */ + struct loongarch_csrs *csr; + + /* Guest max PMU CSR id */ + int max_pmu_csrid; + + /* GPR used as IO source/target */ + u32 io_gpr; + + /* KVM register to control count timer */ + u32 count_ctl; + struct hrtimer swtimer; + + /* Bitmask of intr that are pending */ + unsigned long irq_pending; + /* Bitmask of pending intr to be cleared */ + unsigned long irq_clear; + + /* Bitmask of exceptions that are pending */ + unsigned long exception_pending; + unsigned int esubcode; + + /* Cache for pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* vcpu's vpid */ + u64 vpid; + gpa_t flush_gpa; + + /* Frequency of stable timer in Hz */ + u64 timer_mhz; + ktime_t expire; + + /* Last CPU the vCPU state was loaded on */ + int last_sched_cpu; + /* mp state */ + struct kvm_mp_state mp_state; + /* ipi state */ + struct ipi_state ipi_state; + /* cpucfg */ + u32 cpucfg[KVM_MAX_CPUCFG_REGS]; + /* paravirt steal time */ + struct { + u64 guest_addr; + u64 last_steal; + struct gfn_to_hva_cache cache; + } st; +}; + +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) +{ + return csr->csrs[reg]; +} + +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) +{ + csr->csrs[reg] = val; +} + +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_FP; +} + +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LSX; +} + +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LASX; +} + +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); +} + +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[6] & CPUCFG6_PMP; +} + +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +{ + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; +} + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* MMU handling */ +void kvm_flush_tlb_all(void); +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +static inline void update_pc(struct kvm_vcpu_arch *arch) +{ + arch->pc += 4; +} + +/* + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. + * @vcpu: Virtual CPU. + * + * Returns: Whether the TLBL exception was likely due to an instruction + * fetch fault rather than a data load fault. + */ +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) +{ + return arch->pc == arch->badv; +} + +/* Misc */ +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} +void kvm_check_vpid(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); +void kvm_init_vmcs(struct kvm *kvm); +void kvm_exc_entry(void); +int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); + +extern unsigned long vpid_mask; +extern const unsigned long kvm_exception_size; +extern const unsigned long kvm_enter_guest_size; +extern struct kvm_world_switch *kvm_loongarch_ops; + +#define SW_GCSR (1 << 0) +#define HW_GCSR (1 << 1) +#define INVALID_GCSR (1 << 2) + +int get_gcsr_flag(int csr); +void set_hw_gcsr(int csr_id, unsigned long val); + +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h new file mode 100644 index 0000000000000000000000000000000000000000..729dfc1e3f401758601a9bf8673f9ee51f8156b4 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_ipi.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef __LS3A_KVM_IPI_H +#define __LS3A_KVM_IPI_H + +#include + +#define LARCH_INT_IPI 12 + +struct loongarch_ipi { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + struct kvm_io_device mail_dev; +}; + +struct ipi_state { + spinlock_t lock; + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + uint64_t buf[4]; +}; + +#define SMP_MAILBOX 0x1000 +#define KVM_IOCSR_IPI_ADDR_SIZE 0x48 + +#define CORE_STATUS_OFF 0x000 +#define CORE_EN_OFF 0x004 +#define CORE_SET_OFF 0x008 +#define CORE_CLEAR_OFF 0x00c +#define CORE_BUF_20 0x020 +#define CORE_BUF_28 0x028 +#define CORE_BUF_30 0x030 +#define CORE_BUF_38 0x038 +#define IOCSR_IPI_SEND 0x040 + +#define IOCSR_MAIL_SEND 0x048 +#define IOCSR_ANY_SEND 0x158 + +#define MAIL_SEND_ADDR (SMP_MAILBOX + IOCSR_MAIL_SEND) +#define KVM_IOCSR_MAIL_ADDR_SIZE 0x118 + +#define MAIL_SEND_OFFSET 0 +#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) + +int kvm_loongarch_register_ipi_device(void); +#endif diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..099bafc6f797c960adf971147150ce5e9a580407 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_MMU_H__ +#define __ASM_LOONGARCH_KVM_MMU_H__ + +#include +#include +#include + +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels + * for which pages need to be cached. + */ +#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) + +#define _KVM_FLUSH_PGTABLE 0x1 +#define _KVM_HAS_PGMASK 0x2 +#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) +#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) + +typedef unsigned long kvm_pte_t; +typedef struct kvm_ptw_ctx kvm_ptw_ctx; +typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx); + +struct kvm_ptw_ctx { + kvm_pte_ops ops; + unsigned long flag; + + /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */ + unsigned long mask; + unsigned long gfn; + + /* page walk mmu info */ + unsigned int level; + unsigned long pgtable_shift; + unsigned long invalid_entry; + unsigned long *invalid_ptes; + unsigned int *pte_shifts; + void *opaque; + + /* free pte table page list */ + struct list_head list; +}; + +kvm_pte_t *kvm_pgd_alloc(void); + +static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) +{ + WRITE_ONCE(*ptep, val); +} + +static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } +static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } +static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } + +static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) +{ + return pte | _PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) +{ + return pte & ~_PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) +{ + return pte | _PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) +{ + return pte & ~_PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) +{ + return pte | _PAGE_HUGE; +} + +static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) +{ + return pte & ~_PAGE_HUGE; +} + +static inline int kvm_need_flush(kvm_ptw_ctx *ctx) +{ + return ctx->flag & _KVM_FLUSH_PGTABLE; +} + +static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table, + phys_addr_t addr) +{ + + return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1)); +} + +static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary, size; + + size = 0x1UL << ctx->pgtable_shift; + boundary = (addr + size) & ~(size - 1); + return (boundary - 1 < end - 1) ? boundary : end; +} + +static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + if (!ctx || ctx->level == 0) + return !!(*entry & _PAGE_PRESENT); + + return *entry != ctx->invalid_entry; +} + +static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + return *entry == ctx->invalid_entry; +} + +static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx) +{ + ctx->level--; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx) +{ + ctx->level++; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */ diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..710ca8c4b61d63567cc98495a11a118e2b17f253 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_para.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_KVM_PARA_H +#define _ASM_LOONGARCH_KVM_PARA_H + +#include + +/* + * Hypercall code field + */ +#define HYPERVISOR_KVM 1 +#define HYPERVISOR_VENDOR_SHIFT 8 +#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) + +#define KVM_HCALL_CODE_SERVICE 0 +#define KVM_HCALL_CODE_SWDBG 1 + +#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE) +#define KVM_HCALL_FUNC_IPI 1 +#define KVM_HCALL_FUNC_NOTIFY 2 + +#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) + +/* + * LoongArch hypercall return code + */ +#define KVM_HCALL_SUCCESS 0 +#define KVM_HCALL_INVALID_CODE -1UL +#define KVM_HCALL_INVALID_PARAMETER -2UL + +#define KVM_STEAL_PHYS_VALID BIT_ULL(0) +#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6) + +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u32 pad[12]; +}; + + +/* + * Hypercall interface for KVM hypervisor + * + * a0: function identifier + * a1-a5: args + * Return value will be placed in a0. + * Up to 5 arguments are passed in a1, a2, a3, a4, a5. + */ +static __always_inline long kvm_hypercall0(u64 fid) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r" (fun) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall2(u64 fid, + unsigned long arg0, unsigned long arg1) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall3(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2), "r" (a3) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall4(u64 fid, + unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall5(u64 fid, + unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, unsigned long arg4) +{ + register long ret asm("a0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + register unsigned long a5 asm("a5") = arg4; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) + : "memory" + ); + + return ret; +} + +#ifdef CONFIG_PARAVIRT +bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); +#else +static inline bool kvm_para_available(void) +{ + return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} +#endif + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +#endif /* _ASM_LOONGARCH_KVM_PARA_H */ diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h new file mode 100644 index 0000000000000000000000000000000000000000..91bd5a5ec575d23662df8e85a28ce44024841db7 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_PCH_PIC_H +#define LOONGARCH_PCH_PIC_H + +#include + +#define PCH_PIC_SIZE 0x3e8 + +#define PCH_PIC_INT_ID_START 0x0 +#define PCH_PIC_INT_ID_END 0x7 +#define PCH_PIC_MASK_START 0x20 +#define PCH_PIC_MASK_END 0x27 +#define PCH_PIC_HTMSI_EN_START 0x40 +#define PCH_PIC_HTMSI_EN_END 0x47 +#define PCH_PIC_EDGE_START 0x60 +#define PCH_PIC_EDGE_END 0x67 +#define PCH_PIC_CLEAR_START 0x80 +#define PCH_PIC_CLEAR_END 0x87 +#define PCH_PIC_AUTO_CTRL0_START 0xc0 +#define PCH_PIC_AUTO_CTRL0_END 0xc7 +#define PCH_PIC_AUTO_CTRL1_START 0xe0 +#define PCH_PIC_AUTO_CTRL1_END 0xe7 +#define PCH_PIC_ROUTE_ENTRY_START 0x100 +#define PCH_PIC_ROUTE_ENTRY_END 0x13f +#define PCH_PIC_HTMSI_VEC_START 0x200 +#define PCH_PIC_HTMSI_VEC_END 0x23f +#define PCH_PIC_INT_IRR_START 0x380 +#define PCH_PIC_INT_IRR_END 0x38f +#define PCH_PIC_INT_ISR_START 0x3a0 +#define PCH_PIC_INT_ISR_END 0x3af +#define PCH_PIC_POLARITY_START 0x3e0 +#define PCH_PIC_POLARITY_END 0x3e7 +#define PCH_PIC_INT_ID_VAL 0x7000000UL +#define PCH_PIC_INT_ID_VER 0x1UL + +struct loongarch_pch_pic { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + uint64_t mask; /* 1:disable irq, 0:enable irq */ + uint64_t htmsi_en; /* 1:msi */ + uint64_t edge; /* 1:edge triggered, 0:level triggered */ + uint64_t auto_ctrl0; /* only use default value 00b */ + uint64_t auto_ctrl1; /* only use default value 00b */ + uint64_t last_intirr; /* edge detection */ + uint64_t irr; /* interrupt request register */ + uint64_t isr; /* interrupt service register */ + uint64_t polarity; /* 0: high level trigger, 1: low level trigger */ + uint8_t route_entry[64]; /* default value 0, route to int0: extioi */ + uint8_t htmsi_vector[64]; /* irq route table for routing to extioi */ + uint64_t pch_pic_base; +}; + +void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level); +void pch_msi_set_irq(struct kvm *kvm, int irq, int level); +int kvm_loongarch_register_pch_pic_device(void); +#endif /* LOONGARCH_PCH_PIC_H */ diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h new file mode 100644 index 0000000000000000000000000000000000000000..2fe1d4bdff66cac9e4e6703752ce1ac89fb2fd86 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_KVM_TYPES_H +#define _ASM_LOONGARCH_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */ diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e8f7d50ee0ce13360336f6c66ba24c746989ea --- /dev/null +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__ +#define __ASM_LOONGARCH_KVM_VCPU_H__ + +#include +#include + +/* Controlled by 0x5 guest estat */ +#define CPU_SIP0 (_ULCAST_(1)) +#define CPU_SIP1 (_ULCAST_(1) << 1) +#define CPU_PMU (_ULCAST_(1) << 10) +#define CPU_TIMER (_ULCAST_(1) << 11) +#define CPU_IPI (_ULCAST_(1) << 12) + +/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */ +#define CPU_IP0 (_ULCAST_(1)) +#define CPU_IP1 (_ULCAST_(1) << 1) +#define CPU_IP2 (_ULCAST_(1) << 2) +#define CPU_IP3 (_ULCAST_(1) << 3) +#define CPU_IP4 (_ULCAST_(1) << 4) +#define CPU_IP5 (_ULCAST_(1) << 5) +#define CPU_IP6 (_ULCAST_(1) << 6) +#define CPU_IP7 (_ULCAST_(1) << 7) + +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20) + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0 +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff + +typedef union loongarch_instruction larch_inst; +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_emu_idle(struct kvm_vcpu *vcpu); +int kvm_pending_timer(struct kvm_vcpu *vcpu); +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); +void kvm_deliver_intr(struct kvm_vcpu *vcpu); +void kvm_deliver_exception(struct kvm_vcpu *vcpu); + +void kvm_own_fpu(struct kvm_vcpu *vcpu); +void kvm_lose_fpu(struct kvm_vcpu *vcpu); +void kvm_save_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fcsr(struct loongarch_fpu *fpu); + +#ifdef CONFIG_CPU_HAS_LSX +int kvm_own_lsx(struct kvm_vcpu *vcpu); +void kvm_save_lsx(struct loongarch_fpu *fpu); +void kvm_restore_lsx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } +#endif + +#ifdef CONFIG_CPU_HAS_LASX +int kvm_own_lasx(struct kvm_vcpu *vcpu); +void kvm_save_lasx(struct loongarch_fpu *fpu); +void kvm_restore_lasx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } +#endif + +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu); +#else +static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; } +#endif + +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); +void kvm_save_timer(struct kvm_vcpu *vcpu); +void kvm_restore_timer(struct kvm_vcpu *vcpu); + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid); + +/* + * Loongarch KVM guest interrupt handling + */ +static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + set_bit(irq, &vcpu->arch.irq_pending); + clear_bit(irq, &vcpu->arch.irq_clear); +} + +static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + clear_bit(irq, &vcpu->arch.irq_pending); + set_bit(irq, &vcpu->arch.irq_clear); +} + +static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + /* only one exception can be injected */ + if (!vcpu->arch.exception_pending) { + set_bit(code, &vcpu->arch.exception_pending); + vcpu->arch.esubcode = subcode; + return 0; + } else + return -1; +} + +static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num) +{ + return vcpu->arch.gprs[num]; +} + +static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val) +{ + vcpu->arch.gprs[num] = val; +} + +static inline bool kvm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + +static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature) +{ + return vcpu->kvm->arch.pv_features & BIT(feature); +} + +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 23232c7bdb9ff56ca2f37086d03deff5740ef5d9..7578a10a32ebbd0b95d653fc75c5872d8a282d3b 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -119,6 +119,7 @@ #define CPUCFG6_PMP BIT(0) #define CPUCFG6_PAMVER GENMASK(3, 1) #define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMNUM_SHIFT 4 #define CPUCFG6_PMBITS GENMASK(13, 8) #define CPUCFG6_UPM BIT(14) @@ -158,6 +159,11 @@ #define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_RAM_CG BIT(3) +/* + * cpucfg index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h + */ + #ifndef __ASSEMBLY__ /* CSR */ @@ -171,6 +177,7 @@ /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) +#define iocsr_write8(val, reg) __iocsrwr_b(val, reg) #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) @@ -226,6 +233,7 @@ #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ #define CSR_ECFG_VS_SHIFT 16 #define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1) #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) #define CSR_ECFG_IM_SHIFT 0 #define CSR_ECFG_IM_WIDTH 14 @@ -239,8 +247,8 @@ #define CSR_ESTAT_EXC_WIDTH 6 #define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) #define CSR_ESTAT_IS_SHIFT 0 -#define CSR_ESTAT_IS_WIDTH 14 -#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) +#define CSR_ESTAT_IS_WIDTH 15 +#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) #define LOONGARCH_CSR_ERA 0x6 /* Exception return address */ @@ -314,13 +322,14 @@ #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ -#define CSR_GTLBC_RID_SHIFT 16 -#define CSR_GTLBC_RID_WIDTH 8 -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TGID_SHIFT 16 +#define CSR_GTLBC_TGID_WIDTH 8 +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1) +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT) #define CSR_GTLBC_TOTI_SHIFT 13 #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) -#define CSR_GTLBC_USERID_SHIFT 12 -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_USETGID_SHIFT 12 +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT) #define CSR_GTLBC_GMTLBSZ_SHIFT 0 #define CSR_GTLBC_GMTLBSZ_WIDTH 6 #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) @@ -475,6 +484,7 @@ #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ #define CSR_GSTAT_GID_SHIFT 16 #define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1) #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) #define CSR_GSTAT_GIDBIT_SHIFT 4 #define CSR_GSTAT_GIDBIT_WIDTH 6 @@ -525,6 +535,12 @@ #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATP_NEST_SHIFT 2 +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT) +#define CSR_GCFG_MATP_ROOT_SHIFT 1 +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT) +#define CSR_GCFG_MATP_GUEST_SHIFT 0 +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT) #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ #define CSR_GINTC_HC_SHIFT 16 @@ -627,6 +643,13 @@ #define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ +#define LOONGARCH_CSR_ISR0 0xa0 +#define LOONGARCH_CSR_ISR1 0xa1 +#define LOONGARCH_CSR_ISR2 0xa2 +#define LOONGARCH_CSR_ISR3 0xa3 + +#define LOONGARCH_CSR_IRR 0xa4 + #define LOONGARCH_CSR_PRID 0xc0 /* Shadow MCSR : 0xc0 ~ 0xff */ @@ -856,7 +879,7 @@ #define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */ #define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ -/* Direct Map window 0/1 */ +/* Direct Map window 0/1/2/3 */ #define CSR_DMW0_PLV0 _CONST64_(1 << 0) #define CSR_DMW0_VSEG _CONST64_(0x8000) #define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) @@ -868,6 +891,14 @@ #define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) #define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) +#define CSR_DMW2_PLV0 _CONST64_(1 << 0) +#define CSR_DMW2_MAT _CONST64_(2 << 4) +#define CSR_DMW2_VSEG _CONST64_(0xa000) +#define CSR_DMW2_BASE (CSR_DMW2_VSEG << DMW_PABITS) +#define CSR_DMW2_INIT (CSR_DMW2_BASE | CSR_DMW2_MAT | CSR_DMW2_PLV0) + +#define CSR_DMW3_INIT 0x0 + /* Performance Counter registers */ #define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ #define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ @@ -981,7 +1012,7 @@ /* * CSR_ECFG IM */ -#define ECFG0_IM 0x00001fff +#define ECFG0_IM 0x00005fff #define ECFGB_SIP0 0 #define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) #define ECFGB_SIP1 1 @@ -1024,6 +1055,7 @@ #define IOCSRF_EIODECODE BIT_ULL(9) #define IOCSRF_FLATMODE BIT_ULL(10) #define IOCSRF_VM BIT_ULL(11) +#define IOCSRF_AVEC BIT_ULL(15) #define LOONGARCH_IOCSR_VENDOR 0x10 @@ -1034,6 +1066,7 @@ #define LOONGARCH_IOCSR_MISC_FUNC 0x420 #define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) #define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) +#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51) #define LOONGARCH_IOCSR_CPUTEMP 0x428 @@ -1355,9 +1388,10 @@ __BUILD_CSR_OP(tlbidx) #define INT_TI 11 /* Timer */ #define INT_IPI 12 #define INT_NMI 13 +#define INT_AVEC 14 /* ExcCodes corresponding to interrupts */ -#define EXCCODE_INT_NUM (INT_NMI + 1) +#define EXCCODE_INT_NUM (INT_AVEC + 1) #define EXCCODE_INT_START 64 #define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1) diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h index 9f97c3453b9cebec7485322ebbe378e03b180d9d..304363bd3935242f2ce702a16adcfea1da9858a9 100644 --- a/arch/loongarch/include/asm/mmu_context.h +++ b/arch/loongarch/include/asm/mmu_context.h @@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) /* Normal, classic get_new_mmu_context */ static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) { u64 asid = asid_cache(cpu); if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) - local_flush_tlb_user(); /* start new asid cycle */ + *need_flush = true; /* start new asid cycle */ cpu_context(cpu, mm) = asid_cache(cpu) = asid; } @@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) return 0; } +static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) +{ + __asm__ __volatile__( + "csrwr %[pgdl_val], %[pgdl_reg] \n\t" + "csrwr %[asid_val], %[asid_reg] \n\t" + : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) + : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL) + : "memory" + ); +} + static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + bool need_flush = false; unsigned int cpu = smp_processor_id(); /* Check if our ASID is of an older version and thus invalid */ if (!asid_valid(next, cpu)) - get_new_mmu_context(next, cpu); - - write_csr_asid(cpu_asid(cpu, next)); + get_new_mmu_context(next, cpu, &need_flush); if (next != &init_mm) - csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd); else - csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir); + + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ /* * Mark current->active_mm as not "active" anymore. @@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu) asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); if (asid == cpu_asid(cpu, mm)) { + bool need_flush = false; + if (!current->mm || (current->mm == mm)) { - get_new_mmu_context(mm, cpu); + get_new_mmu_context(mm, cpu, &need_flush); + write_csr_asid(cpu_asid(cpu, mm)); + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ + goto out; } } diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h index 2ecd82bb64e1322cec7666cfaded4d6da2421185..f33f3fd32ecc2cf3c5cf66e8c29d0f61a7b750f9 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -6,6 +6,7 @@ #define _ASM_MODULE_H #include +#include #include #define RELA_STACK_DEPTH 16 @@ -21,6 +22,12 @@ struct mod_arch_specific { struct mod_section plt; struct mod_section plt_idx; +#ifdef CONFIG_UNWINDER_ORC + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; +#endif + /* For CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampolines; }; diff --git a/arch/loongarch/include/asm/orc_header.h b/arch/loongarch/include/asm/orc_header.h new file mode 100644 index 0000000000000000000000000000000000000000..f9d509c3fd704898ba51408f3d7ef3b66d200c72 --- /dev/null +++ b/arch/loongarch/include/asm/orc_header.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _ORC_HEADER_H +#define _ORC_HEADER_H + +#include +#include +#include + +/* + * The header is currently a 20-byte hash of the ORC entry definition; see + * scripts/orc_hash.sh. + */ +#define ORC_HEADER \ + __used __section(".orc_header") __aligned(4) \ + static const u8 orc_header[] = { ORC_HASH } + +#endif /* _ORC_HEADER_H */ diff --git a/arch/loongarch/include/asm/orc_lookup.h b/arch/loongarch/include/asm/orc_lookup.h new file mode 100644 index 0000000000000000000000000000000000000000..b02e6357def4848f19fa4f153f66364992b228ae --- /dev/null +++ b/arch/loongarch/include/asm/orc_lookup.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_LOOKUP_H +#define _ORC_LOOKUP_H + +/* + * This is a lookup table for speeding up access to the .orc_unwind table. + * Given an input address offset, the corresponding lookup table entry + * specifies a subset of the .orc_unwind table to search. + * + * Each block represents the end of the previous range and the start of the + * next range. An extra block is added to give the last range an end. + * + * The block size should be a power of 2 to avoid a costly 'div' instruction. + * + * A block size of 256 was chosen because it roughly doubles unwinder + * performance while only adding ~5% to the ORC data footprint. + */ +#define LOOKUP_BLOCK_ORDER 8 +#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER) + +#ifndef LINKER_SCRIPT + +extern unsigned int orc_lookup[]; +extern unsigned int orc_lookup_end[]; + +#define LOOKUP_START_IP (unsigned long)_stext +#define LOOKUP_STOP_IP (unsigned long)_etext + +#endif /* LINKER_SCRIPT */ + +#endif /* _ORC_LOOKUP_H */ diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h new file mode 100644 index 0000000000000000000000000000000000000000..caf1f71a1057b699887873c0973c1fe5d832f0c8 --- /dev/null +++ b/arch/loongarch/include/asm/orc_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * The most commonly used base registers are SP and FP -- which the previous SP + * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is + * usually based on. + * + * The rest of the base registers are needed for special cases like entry code + * and GCC realigned stacks. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and FP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; + s16 ra_offset; + unsigned int sp_reg:4; + unsigned int fp_reg:4; + unsigned int ra_reg:4; + unsigned int type:3; + unsigned int signal:1; +}; +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h new file mode 100644 index 0000000000000000000000000000000000000000..dabc5aec179c4a37d330c5078b801f8f77d6aa66 --- /dev/null +++ b/arch/loongarch/include/asm/paravirt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_PARAVIRT_H +#define _ASM_LOONGARCH_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int pv_ipi_init(void); +int __init pv_time_init(void); +int __init pv_spinlock_init(void); +#else +static inline int pv_ipi_init(void) +{ + return 0; +} + +static inline int pv_time_init(void) +{ + return 0; +} + +static inline int pv_spinlock_init(void) +{ + return 0; +} + +#endif // CONFIG_PARAVIRT +#endif diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h new file mode 100644 index 0000000000000000000000000000000000000000..8a418f0b4fd537164e5f5fa5a0bdbc5886fcee1f --- /dev/null +++ b/arch/loongarch/include/asm/paravirt_api_clock.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#ifndef _ASM_API_CLOCK_H +#define _ASM_API_CLOCK_H + +#include + +#endif diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 79470f0b4f1d8dfbd715e4d1dc2e711949175b30..c9f9895f237d974029fd9dc4242f1bff4610aa15 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -10,6 +10,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #include static inline void pmd_populate_kernel(struct mm_struct *mm, @@ -44,6 +45,16 @@ extern void pagetable_init(void); extern pgd_t *pgd_alloc(struct mm_struct *mm); +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + pte_t *pte = __pte_alloc_one_kernel(mm); + + if (pte) + kernel_pte_init(pte); + + return pte; +} + #define __pte_free_tlb(tlb, pte, address) \ do { \ pagetable_pte_dtor(page_ptdesc(pte)); \ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 29d9b12298bc843ecf24012d37f9042079d939e5..4f4498ce22550d95bbe5180b19e451b92c558ccc 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) +#define ptep_get(ptep) READ_ONCE(*(ptep)) +#define pmdp_get(pmdp) READ_ONCE(*(pmdp)) + #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #ifndef __PAGETABLE_PMD_FOLDED @@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d) return p4d_val(p4d) != (unsigned long)invalid_pud_table; } -static inline void p4d_clear(p4d_t *p4dp) -{ - p4d_val(*p4dp) = (unsigned long)invalid_pud_table; -} - static inline pud_t *p4d_pgtable(p4d_t p4d) { return (pud_t *)p4d_val(p4d); @@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) { - *p4d = p4dval; + WRITE_ONCE(*p4d, p4dval); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table)); } #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) @@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud) return pud_val(pud) != (unsigned long)invalid_pmd_table; } -static inline void pud_clear(pud_t *pudp) +static inline pmd_t *pud_pgtable(pud_t pud) { - pud_val(*pudp) = ((unsigned long)invalid_pmd_table); + return (pmd_t *)pud_val(pud); } -static inline pmd_t *pud_pgtable(pud_t pud) +static inline void set_pud(pud_t *pud, pud_t pudval) { - return (pmd_t *)pud_val(pud); + WRITE_ONCE(*pud, pudval); } -#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud((unsigned long)invalid_pmd_table)); +} #define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) @@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd) return pmd_val(pmd) != (unsigned long)invalid_pte_table; } -static inline void pmd_clear(pmd_t *pmdp) +static inline void set_pmd(pmd_t *pmd, pmd_t pmdval) { - pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); + WRITE_ONCE(*pmd, pmdval); } -#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table)); +} #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) @@ -260,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm extern void pgd_init(void *addr); extern void pud_init(void *addr); extern void pmd_init(void *addr); +extern void kernel_pte_init(void *addr); /* * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that @@ -314,46 +324,19 @@ extern void paging_init(void); static inline void set_pte(pte_t *ptep, pte_t pteval) { - *ptep = pteval; - if (pte_val(pteval) & _PAGE_GLOBAL) { - pte_t *buddy = ptep_buddy(ptep); - /* - * Make sure the buddy is global too (if it's !none, - * it better already be global) - */ + WRITE_ONCE(*ptep, pteval); + #ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need to do - * this atomically. - */ - unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; - - __asm__ __volatile__ ( - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" - " nop \n" - "2: \n" - __WEAK_LLSC_MB - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); -#else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; -#endif /* CONFIG_SMP */ - } + if (pte_val(pteval) & _PAGE_GLOBAL) + DBAR(0b11000); /* o_wrw = 0b11000 */ +#endif } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - /* Preserve global status for the pair */ - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); - else - set_pte(ptep, __pte(0)); + pte_t pte = ptep_get(ptep); + pte_val(pte) &= _PAGE_GLOBAL; + set_pte(ptep, pte); } #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) @@ -470,8 +453,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -591,7 +574,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t old = *pmdp; + pmd_t old = pmdp_get(pmdp); pmd_clear(pmdp); diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h new file mode 100644 index 0000000000000000000000000000000000000000..e76d3aa1e1ebe7dcae953bf6dc89d55802e05cfc --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_QSPINLOCK_H +#define _ASM_LOONGARCH_QSPINLOCK_H + +#include + +#ifdef CONFIG_PARAVIRT + +DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); + +#define virt_spin_lock virt_spin_lock + +static inline bool virt_spin_lock(struct qspinlock *lock) +{ + int val; + + if (!static_branch_unlikely(&virt_spin_lock_key)) + return false; + + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + +__retry: + val = atomic_read(&lock->val); + + if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { + cpu_relax(); + goto __retry; + } + + return true; +} + +#endif /* CONFIG_PARAVIRT */ + +#include + +#endif // _ASM_LOONGARCH_QSPINLOCK_H diff --git a/arch/loongarch/include/asm/se.h b/arch/loongarch/include/asm/se.h new file mode 100644 index 0000000000000000000000000000000000000000..a6b968d2d545556db3aca84698850ac9703a8e13 --- /dev/null +++ b/arch/loongarch/include/asm/se.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2012 IBM Corporation + * + * Copyright 2023 Loongson Technology, Inc. + * Yinggang Gu + * + * Device driver for Loongson SE module. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#ifndef __LOONGSON_SE_H__ +#define __LOONGSON_SE_H__ + +#define SE_MAILBOX_S 0x0 +#define SE_MAILBOX_L 0x20 +#define SE_S2LINT_STAT 0x88 +#define SE_S2LINT_EN 0x8c +#define SE_S2LINT_SET 0x90 +#define SE_S2LINT_CL 0x94 +#define SE_L2SINT_STAT 0x98 +#define SE_L2SINT_EN 0x9c +#define SE_L2SINT_SET 0xa0 +#define SE_L2SINT_CL 0xa4 + +/* INT bit definition */ +#define SE_INT_SETUP BIT(0) +#define SE_INT_SM2 BIT(0) +#define SE_INT_SM3 BIT(0) +#define SE_INT_SM4 BIT(0) +#define SE_INT_RNG BIT(0) +#define SE_INT_TPM BIT(5) +#define SE_INT_ALL 0xffffffff + +#define SE_CMD_START 0x0 +#define SE_CMD_STOP 0x1 +#define SE_CMD_GETVER 0x2 +#define SE_CMD_SETBUF 0x3 +#define SE_CMD_SETMSG 0x4 + +#define SE_CMD_RNG 0x100 + +#define SE_CMD_SM2_SIGN 0x200 +#define SE_CMD_SM2_VSIGN 0x201 + +#define SE_CMD_SM3_DIGEST 0x300 +#define SE_CMD_SM3_UPDATE 0x301 +#define SE_CMD_SM3_FINISH 0x302 + +#define SE_CMD_SM4_ECB_ENCRY 0x400 +#define SE_CMD_SM4_ECB_DECRY 0x401 +#define SE_CMD_SM4_CBC_ENCRY 0x402 +#define SE_CMD_SM4_CBC_DECRY 0x403 +#define SE_CMD_SM4_CTR 0x404 + +#define SE_CMD_TPM 0x500 +#define SE_CMD_ZUC_INIT_READ 0x600 +#define SE_CMD_ZUC_READ 0x601 + +#define SE_CMD_SDF 0x700 + +#define SE_CH_MAX 32 + +#define SE_CH_RNG 1 +#define SE_CH_SM2 2 +#define SE_CH_SM3 3 +#define SE_CH_SM4 4 +#define SE_CH_TPM 5 +#define SE_CH_ZUC 6 +#define SE_CH_SDF 7 + +struct se_msg { + u32 cmd; + u32 data_off; + u32 data_len; + u32 info[5]; +}; + +struct se_cmd { + u32 cmd; + u32 info[7]; +}; + +struct se_res { + u32 cmd; + u32 cmd_ret; + u32 info[6]; +}; + +struct se_mailbox_data { + u32 int_bit; + union { + u32 mailbox[8]; + struct se_cmd gcmd; + struct se_res res; + } u; +}; + +struct lsse_ch { + u32 id; + u32 int_bit; + struct loongson_se *se; + void *priv; + spinlock_t ch_lock; + void *smsg; + void *rmsg; + int msg_size; + void *data_buffer; + dma_addr_t data_addr; + int data_size; + + void (*complete)(struct lsse_ch *se_ch); +}; + +struct loongson_se { + struct device *dev; + void __iomem *base; + u32 version; + u32 ch_status; + spinlock_t cmd_lock; + spinlock_t dev_lock; + + /* Interaction memory */ + void *mem_base; + dma_addr_t mem_addr; + unsigned long *mem_map; + int mem_map_size; + void *smsg; + void *rmsg; + + /* Synchronous CMD */ + struct completion cmd_completion; + + /* Virtual Channel */ + struct lsse_ch chs[SE_CH_MAX]; +}; + +struct lsse_ch *se_init_ch(int id, int data_size, int msg_size, void *priv, + void (*complete)(struct lsse_ch *se_ch)); +void se_deinit_ch(struct lsse_ch *ch); +int se_send_ch_requeset(struct lsse_ch *ch); + +#endif diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h index ee52fb1e99631653e3e40d6998afd159a7e5986d..eefb30c33ba33e1de9153676e8653d851bfa1c6c 100644 --- a/arch/loongarch/include/asm/setup.h +++ b/arch/loongarch/include/asm/setup.h @@ -12,6 +12,7 @@ #define VECSIZE 0x200 +extern bool disable_pci_irq_limit; extern unsigned long eentry; extern unsigned long tlbrentry; extern char init_command_line[COMMAND_LINE_SIZE]; diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h index c9554f48d2dfab400add2d2c4e30ff6e44f1db14..8af1e70cbf2c5acaf81f4a4cf81675f980f6bc2f 100644 --- a/arch/loongarch/include/asm/shmparam.h +++ b/arch/loongarch/include/asm/shmparam.h @@ -6,7 +6,6 @@ #define _ASM_SHMPARAM_H #define __ARCH_FORCE_SHMLBA 1 - -#define SHMLBA SZ_64K /* attach addr a multiple of this */ +#include #endif /* _ASM_SHMPARAM_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index f81e5f01d61905f5b8d7da4786ba512258381acd..cc232901e4dd2373f97eb38b3955936919526d2f 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -12,6 +12,13 @@ #include #include +struct smp_ops { + void (*init_ipi)(void); + void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); + void (*send_ipi_single)(int cpu, unsigned int action); +}; + +extern struct smp_ops smp_ops; extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; @@ -24,8 +31,6 @@ void loongson_prepare_cpus(unsigned int max_cpus); void loongson_boot_secondary(int cpu, struct task_struct *idle); void loongson_init_secondary(void); void loongson_smp_finish(void); -void loongson_send_ipi_single(int cpu, unsigned int action); -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); #ifdef CONFIG_HOTPLUG_CPU int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); @@ -59,9 +64,14 @@ extern int __cpu_logical_map[NR_CPUS]; #define cpu_physical_id(cpu) cpu_logical_map(cpu) -#define SMP_BOOT_CPU 0x1 -#define SMP_RESCHEDULE 0x2 -#define SMP_CALL_FUNCTION 0x4 +#define ACTION_BOOT_CPU 0 +#define ACTION_RESCHEDULE 1 +#define ACTION_CALL_FUNCTION 2 +#define ACTION_CLEAR_VECTOR 3 +#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) +#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) +#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) +#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR) struct secondary_data { unsigned long stack; @@ -71,7 +81,8 @@ extern struct secondary_data cpuboot_data; extern asmlinkage void smpboot_entry(void); extern asmlinkage void start_secondary(void); - +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void calculate_cpu_foreign_map(void); /* @@ -79,16 +90,6 @@ extern void calculate_cpu_foreign_map(void); */ extern void show_ipi_list(struct seq_file *p, int prec); -static inline void arch_send_call_function_single_ipi(int cpu) -{ - loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); -} - -static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); -} - #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { @@ -101,4 +102,7 @@ static inline void __cpu_die(unsigned int cpu) } #endif +int topo_add_cpu(int physid); +int topo_get_cpu(int physid); + #endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8f1e149f59b4a9b34b80bf7197db2..1f331ee584ef38c0b21de9b0536ce883b9b62cee 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -8,7 +8,7 @@ * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ -#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define SECTION_SIZE_BITS 28 #define MAX_PHYSMEM_BITS 48 #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index efc8c42290d0196f907827e6ebab13910d02c9a3..66736837085b61cd7c6d62461e5beb237e982fec 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -13,6 +13,7 @@ #include #include #include +#include /* Make the addition of cfi info a little easier. */ .macro cfi_rel_offset reg offset=0 docfi=0 @@ -37,6 +38,17 @@ cfi_restore \reg \offset \docfi .endm + .macro SETUP_DMWINS temp + li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN0 + li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN1 + li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN2 + li.d \temp, CSR_DMW3_INIT # 0x0, unused + csrwr \temp, LOONGARCH_CSR_DMWIN3 + .endm + /* Jump to the runtime virtual address. */ .macro JUMP_VIRT_ADDR temp1 temp2 li.d \temp1, CACHE_BASE @@ -162,6 +174,7 @@ li.w t0, CSR_CRMD_WE csrxchg t0, t0, LOONGARCH_CSR_CRMD #endif + UNWIND_HINT_REGS .endm .macro SAVE_ALL docfi=0 @@ -219,6 +232,7 @@ .macro RESTORE_SP_AND_RET docfi=0 cfi_ld sp, PT_R3, \docfi + UNWIND_HINT_FUNC ertn .endm diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 8cb653d49a54343ebfa26814e037ddf844c98351..8bf0e6f5154668e7ea477e5182d68705ab4a4f32 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -86,6 +86,7 @@ register unsigned long current_stack_pointer __asm__("$sp"); #define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */ #define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */ #define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */ +#define TIF_PATCH_PENDING 21 /* pending live patching update */ #define _TIF_SIGPENDING (1<sp = regs->regs[3]; state->pc = regs->csr_era; state->ra = regs->regs[1]; + state->fp = regs->regs[22]; } else if (task && task != current) { state->sp = thread_saved_fp(task); state->pc = thread_saved_ra(task); state->ra = 0; + state->fp = 0; } else { state->sp = (unsigned long)__builtin_frame_address(0); state->pc = (unsigned long)__builtin_return_address(0); state->ra = 0; + state->fp = 0; } state->task = task; get_stack_info(state->sp, state->task, &state->stack_info); @@ -77,6 +81,18 @@ static __always_inline void __unwind_start(struct unwind_state *state, static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state) { - return unwind_done(state) ? 0 : state->pc; + if (unwind_done(state)) + return 0; + + return __kernel_text_address(state->pc) ? state->pc : 0; } + +#ifdef CONFIG_UNWINDER_ORC +void unwind_init(void); +void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size); +#else +static inline void unwind_init(void) {} +static inline void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {} +#endif + #endif /* _ASM_UNWIND_H */ diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h new file mode 100644 index 0000000000000000000000000000000000000000..a01086ad9ddea44fb964accfcbf00bf112e798a1 --- /dev/null +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_UNWIND_HINTS_H +#define _ASM_LOONGARCH_UNWIND_HINTS_H + +#include +#include + +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_UNDEFINED + UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED +.endm + +.macro UNWIND_HINT_END_OF_STACK + UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK +.endm + +.macro UNWIND_HINT_REGS + UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_REGS +.endm + +.macro UNWIND_HINT_FUNC + UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..00b4ba1e5cdf032f81aff9f728dcffea68571880 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_LOONGARCH_BITSPERLONG_H +#define __ASM_LOONGARCH_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8) + +#include + +#endif /* __ASM_LOONGARCH_BITSPERLONG_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..d619b943d20d5b8be3f79b5220e3becda1d3578b --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_GUEST_DEBUG + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 +#define __KVM_HAVE_IRQ_LINE + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) +/* Debugging: Special instruction for software breakpoint */ +#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) + +/* LBT registers */ +#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +/* Device Control API on vm fd */ +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_LSX 0 +#define KVM_LOONGARCH_VM_FEAT_LASX 1 +#define KVM_LOONGARCH_VM_FEAT_X86BT 2 +#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 +#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 +#define KVM_LOONGARCH_VM_FEAT_PMU 5 +#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 +#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 + +/* Device Control API on vcpu fd */ +#define KVM_LOONGARCH_VCPU_CPUCFG 0 +#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 +#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#define KVM_LOONGARCH_VM_HAVE_IRQCHIP 0x40000001 + +#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 + +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 +#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 + +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..b0604aa9b4bbd29dfafedbdf59f571bd445c2c2c --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_KVM_PARA_H +#define _UAPI_ASM_KVM_PARA_H + +#include + +/* + * CPUCFG index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000 +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0) +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_IPI 1 +#define KVM_FEATURE_STEAL_TIME 2 +/* BIT 24 - 31 are features configurable by user space vmm */ +#define KVM_FEATURE_VIRT_EXTIOI 24 + +#endif /* _UAPI_ASM_KVM_PARA_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 4fcc168f07323154b4d7fc6712ab7a2298bb0d3e..caf9a0b5e62d8dbf976698a7dad6641dd0bd4a1e 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -3,12 +3,15 @@ # Makefile for the Linux/LoongArch kernel. # +OBJECT_FILES_NON_STANDARD_head.o := y + extra-y := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ alternative.o unwind.o +obj-y += legacy_boot.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o @@ -21,6 +24,7 @@ obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o CFLAGS_module.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,) +CFLAGS_traps.o += $(call cc-option,-Wno-override-init,) CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,) ifdef CONFIG_FUNCTION_TRACER @@ -48,6 +52,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_SMP) += smp.o @@ -62,6 +67,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o +obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 55d6a48c76a8211797f1020ace84aefba207324d..17dc28821a9ddb4cd8116c165100f74d4e025e35 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -17,6 +17,7 @@ #include #include #include +#include "legacy_boot.h" int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); @@ -58,7 +59,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } #ifdef CONFIG_SMP -static int set_processor_mask(u32 id, u32 flags) +int set_processor_mask(u32 id, u32 flags) { int cpu, cpuid = id; @@ -70,10 +71,10 @@ static int set_processor_mask(u32 id, u32 flags) return -ENODEV; } - if (cpuid == loongson_sysconf.boot_cpu_id) - cpu = 0; - else - cpu = cpumask_next_zero(-1, cpu_present_mask); + + cpu = topo_add_cpu(cpuid); + if (cpu < 0) + return -EEXIST; if (flags & ACPI_MADT_ENABLED) { num_processors++; @@ -132,6 +133,10 @@ static void __init acpi_process_madt(void) __cpu_logical_map[i] = -1; } #endif + + if (efi_bp && bpi_version <= BPI_VERSION_V1) + legacy_madt_table_init(); + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, acpi_parse_processor, MAX_CORE_PIC); @@ -192,8 +197,6 @@ void __init acpi_boot_table_init(void) goto fdt_earlycon; } - loongson_sysconf.boot_cpu_id = read_csr_cpuid(); - /* * Process the Multiple APIC Description Table (MADT), if present */ @@ -243,7 +246,7 @@ void __init numa_set_distance(int from, int to, int distance) void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { - int pxm, node; + int pxm, node, cpu; if (srat_disabled()) return; @@ -272,6 +275,11 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } + cpu = topo_get_cpu(pa->apic_id); + /* Check whether apic_id exists in MADT table */ + if (cpu < 0) + return; + early_numa_add_cpu(pa->apic_id, node); set_cpuid_to_node(pa->apic_id, node); @@ -310,12 +318,17 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu { int cpu; - cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); + cpu = topo_get_cpu(physid); + /* Check whether apic_id exists in MADT table */ if (cpu < 0) { pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); return cpu; } + num_processors++; + set_cpu_present(cpu, true); + __cpu_number_map[physid] = cpu; + __cpu_logical_map[cpu] = physid; acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 8da0726777edb41ea66d47f640308c18435f4551..173fe514fc9ecf2974c01c1e06b8f130988da9a7 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -289,3 +290,34 @@ void output_fgraph_ret_regs_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT("KVM/LoongArch Specific offsets."); + + OFFSET(VCPU_FCC, kvm_vcpu_arch, fpu.fcc); + OFFSET(VCPU_FCSR0, kvm_vcpu_arch, fpu.fcsr); + BLANK(); + + OFFSET(KVM_VCPU_ARCH, kvm_vcpu, arch); + OFFSET(KVM_VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_VCPU_RUN, kvm_vcpu, run); + BLANK(); + + OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp); + OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp); + OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd); + OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit); + OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry); + OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry); + OFFSET(KVM_ARCH_GPC, kvm_vcpu_arch, pc); + OFFSET(KVM_ARCH_GGPR, kvm_vcpu_arch, gprs); + OFFSET(KVM_ARCH_HBADI, kvm_vcpu_arch, badi); + OFFSET(KVM_ARCH_HBADV, kvm_vcpu_arch, badv); + OFFSET(KVM_ARCH_HECFG, kvm_vcpu_arch, host_ecfg); + OFFSET(KVM_ARCH_HESTAT, kvm_vcpu_arch, host_estat); + OFFSET(KVM_ARCH_HPERCPU, kvm_vcpu_arch, host_percpu); + + OFFSET(KVM_GPGD, kvm, arch.pgd); + BLANK(); +} diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 55320813ee0819f0f23429361b7fc9722b710d65..14f0449f54520ac70db6a7e9d8636b391933b7af 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -106,7 +106,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) elf_hwcap |= HWCAP_LOONGARCH_CRC32; } - config = read_cpucfg(LOONGARCH_CPUCFG2); if (config & CPUCFG2_LAM) { c->options |= LOONGARCH_CPU_LAM; @@ -174,6 +173,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_FLATMODE; if (config & IOCSRF_EIODECODE) c->options |= LOONGARCH_CPU_EIODECODE; + if (config & IOCSRF_AVEC) + c->options |= LOONGARCH_CPU_AVECINT; if (config & IOCSRF_VM) c->options |= LOONGARCH_CPU_HYPERVISOR; diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index 7a9c6a9dd2d01fb429b67aea02deada5e5b1f3be..cc0ccde58db87f8ddd6d5e568f40f864bd55b0c1 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -4,6 +4,28 @@ */ #include #include +#include + +/* + * We extract 4bit node id (bit 44~47) from Loongson-3's + * 48bit physical address space and embed it into 40bit. + */ + +static int node_id_offset; + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + long nid = (paddr >> 44) & 0xf; + + return ((nid << 44) ^ paddr) | (nid << node_id_offset); +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + long nid = (daddr >> node_id_offset) & 0xf; + + return ((nid << node_id_offset) ^ daddr) | (nid << 44); +} void acpi_arch_dma_setup(struct device *dev) { @@ -11,6 +33,11 @@ void acpi_arch_dma_setup(struct device *dev) u64 mask, end = 0; const struct bus_dma_region *map = NULL; + if (node_id_offset == 0) { + node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF); + node_id_offset += 36; + } + ret = acpi_dma_get_range(dev, &map); if (!ret && map) { const struct bus_dma_region *r = map; diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 4ae77e9300d58043a50537fb450737762e2770b3..459583c985be0f91105689049713458489d0a076 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -23,13 +23,17 @@ #include #include +#include #include +#include +#include "legacy_boot.h" static unsigned long efi_nr_tables; static unsigned long efi_config_table; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR; +static __initdata pgd_t *pgd_efi; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { @@ -49,8 +53,188 @@ void __init *efi_fdt_pointer(void) return early_memremap_ro(fdt_pointer, SZ_64K); } +static int __init efimap_populate_hugepages( + unsigned long start, unsigned long end, + pgprot_t prot) +{ + unsigned long addr; + unsigned long next; + pmd_t entry; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (pud_none(*pud)) { + void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + + if (!p) { + pr_err("can not alloc efimap huge pages!\n"); + return -1; + } + pmd_init(p); + pud_populate(&init_mm, pud, p); + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + entry = pfn_pmd((addr >> PAGE_SHIFT), prot); + entry = pmd_mkhuge(entry); + set_pmd_at(&init_mm, addr, pmd, entry); + } + } + return 0; +} + +static void __init efi_map_pgt(void) +{ + unsigned long node; + unsigned long start, end; + efi_memory_desc_t *md; + u32 mem_type; + + pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pgd_efi) { + pr_err("alloc efi pgd failed!\n"); + return; + } + pgd_init(pgd_efi); + csr_write64((long)pgd_efi, LOONGARCH_CSR_PGDL); + + /* Low Memory, Cached */ + efimap_populate_hugepages(0, SZ_256M, PAGE_KERNEL); + + for_each_node_mask(node, node_possible_map) { + /* MMIO Registers, Uncached */ + efimap_populate_hugepages(SZ_256M | (node << 44), + SZ_512M | (node << 44), PAGE_KERNEL_SUC); + } + + /* Parse memory information */ + for_each_efi_memory_desc(md) { + mem_type = md->type; + start = ALIGN_DOWN(md->phys_addr, PMD_SIZE); + end = ALIGN(start + (md->num_pages << EFI_PAGE_SHIFT), PMD_SIZE); + node = start >> 44; + + switch (mem_type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_PAL_CODE: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + break; + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL_SUC); + break; + } + } +} + +static int __init efimap_free_pgt(unsigned long start, unsigned long end) +{ + unsigned long addr; + unsigned long next; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (!pud_present(*pud)) + continue; + pmd = pmd_offset(pud, addr); + memblock_free(pmd, PAGE_SIZE); + pud_clear(pud); + } + return 0; +} + +static void __init efi_unmap_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + for_each_node_mask(node, node_possible_map) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* Free pagetable memory */ + efimap_free_pgt(start, end); + } + + memblock_free(pgd_efi, PAGE_SIZE); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + local_flush_tlb_all(); +} + +/* + * set_virtual_map() - create a virtual mapping for the EFI memory map and call + * efi_set_virtual_address_map enter virtual for runtime service + * + * This function populates the virt_addr fields of all memory region descriptors + * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors + * are also copied to @runtime_map, and their total count is returned in @count. + */ +static int __init set_virtual_map(void) +{ + efi_status_t status; + int count = 0; + unsigned int size; + unsigned long attr; + efi_runtime_services_t *rt; + efi_set_virtual_address_map_t *svam; + efi_memory_desc_t *in, runtime_map[32]; + + if (efi_bp) + return EFI_SUCCESS; + + size = sizeof(efi_memory_desc_t); + + for_each_efi_memory_desc(in) { + attr = in->attribute; + if (!(attr & EFI_MEMORY_RUNTIME)) + continue; + + if (attr & (EFI_MEMORY_WB | EFI_MEMORY_WT)) + in->virt_addr = TO_CACHE(in->phys_addr); + else + in->virt_addr = TO_UNCACHE(in->phys_addr); + + memcpy(&runtime_map[count++], in, size); + } + + rt = early_memremap_ro((unsigned long)efi_systab->runtime, sizeof(*rt)); + + /* Install the new virtual address map */ + svam = rt->set_virtual_address_map; + + efi_map_pgt(); + + status = svam(size * count, size, efi.memmap.desc_version, + (efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map)); + + efi_unmap_pgt(); + if (status != EFI_SUCCESS) + return -1; + + return 0; +} + void __init efi_runtime_init(void) { + efi_status_t status; + if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime) return; @@ -59,7 +243,11 @@ void __init efi_runtime_init(void) return; } - efi.runtime = (efi_runtime_services_t *)efi_systab->runtime; + status = set_virtual_map(); + if (status < 0) + return; + + efi.runtime = READ_ONCE(efi_systab->runtime); efi.runtime_version = (unsigned int)efi.runtime->hdr.revision; efi_native_runtime_setup(); @@ -99,10 +287,12 @@ void __init efi_init(void) void *config_tables; struct efi_boot_memmap *tbl; - if (!efi_system_table) - return; + if (efi_system_table) + efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, + sizeof(*efi_systab)); + else + efi_systab = (efi_system_table_t *)efi_bp->systemtable; - efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab)); if (!efi_systab) { pr_err("Can't find EFI system table.\n"); return; diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 1ec8e4c4cc2bd89d5af2492b0b31158ce8b53f9e..48e7e34e355e83eae8165957ba2eac05a8bf17df 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -14,11 +14,13 @@ #include #include #include +#include .text .cfi_sections .debug_frame .align 5 SYM_CODE_START(handle_syscall) + UNWIND_HINT_UNDEFINED csrrd t0, PERCPU_BASE_KS la.pcrel t1, kernelsp add.d t1, t1, t0 @@ -57,6 +59,7 @@ SYM_CODE_START(handle_syscall) cfi_st fp, PT_R22 SAVE_STATIC + UNWIND_HINT_REGS #ifdef CONFIG_KGDB li.w t1, CSR_CRMD_WE @@ -75,6 +78,7 @@ SYM_CODE_END(handle_syscall) _ASM_NOKPROBE(handle_syscall) SYM_CODE_START(ret_from_fork) + UNWIND_HINT_REGS bl schedule_tail # a0 = struct task_struct *prev move a0, sp bl syscall_exit_to_user_mode @@ -84,6 +88,7 @@ SYM_CODE_START(ret_from_fork) SYM_CODE_END(ret_from_fork) SYM_CODE_START(ret_from_kernel_thread) + UNWIND_HINT_REGS bl schedule_tail # a0 = struct task_struct *prev move a0, s1 jirl ra, s0, 0 diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 6b3bfb0092e60b34946490415ff7cd2a51287886..85dbfb1256eb260d09278108c00be564f3528749 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -12,6 +12,7 @@ #include #include #include +#include "legacy_boot.h" u64 efi_system_table; struct loongson_system_configuration loongson_sysconf; @@ -22,6 +23,11 @@ void __init init_environ(void) int efi_boot = fw_arg0; char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); + legacy_boot_init(fw_arg0, fw_arg1, fw_arg2); + + if (efi_bp) + return; + if (efi_boot) set_bit(EFI_BOOT, &efi.flags); else diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index d53ab10f464465e3f88910614afefce92b5af607..6ab640101457cc4d222716de1ab17f8b4090f9c4 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -15,6 +15,7 @@ #include #include #include +#include #define FPU_REG_WIDTH 8 #define LSX_REG_WIDTH 16 @@ -349,6 +350,7 @@ SYM_FUNC_START(_restore_lsx_upper) lsx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lsx_upper) +EXPORT_SYMBOL(_restore_lsx_upper) SYM_FUNC_START(_init_lsx_upper) lsx_init_all_upper t1 @@ -384,6 +386,7 @@ SYM_FUNC_START(_restore_lasx_upper) lasx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lasx_upper) +EXPORT_SYMBOL(_restore_lasx_upper) SYM_FUNC_START(_init_lasx_upper) lasx_init_all_upper t1 @@ -524,3 +527,13 @@ SYM_FUNC_END(_restore_lasx_context) .L_fpu_fault: li.w a0, -EFAULT # failure jr ra + +#ifdef CONFIG_CPU_HAS_LBT +STACK_FRAME_NON_STANDARD _restore_fp +#ifdef CONFIG_CPU_HAS_LSX +STACK_FRAME_NON_STANDARD _restore_lsx +#endif +#ifdef CONFIG_CPU_HAS_LASX +STACK_FRAME_NON_STANDARD _restore_lasx +#endif +#endif diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S index 2bb3aa2dcfcb2e67935389d76585e813c7a6e169..86d5d90ebefe5b704aaeae79f529ec3f323dcd34 100644 --- a/arch/loongarch/kernel/genex.S +++ b/arch/loongarch/kernel/genex.S @@ -32,6 +32,7 @@ SYM_FUNC_START(__arch_cpu_idle) SYM_FUNC_END(__arch_cpu_idle) SYM_CODE_START(handle_vint) + UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL la_abs t1, __arch_cpu_idle @@ -49,6 +50,7 @@ SYM_CODE_START(handle_vint) SYM_CODE_END(handle_vint) SYM_CODE_START(except_vec_cex) + UNWIND_HINT_UNDEFINED b cache_parity_error SYM_CODE_END(except_vec_cex) @@ -67,6 +69,7 @@ SYM_CODE_END(except_vec_cex) .macro BUILD_HANDLER exception handler prep .align 5 SYM_CODE_START(handle_\exception) + UNWIND_HINT_UNDEFINED 666: BACKUP_T0T1 SAVE_ALL @@ -77,7 +80,9 @@ SYM_CODE_END(except_vec_cex) 668: RESTORE_ALL_AND_RET SYM_CODE_END(handle_\exception) + .pushsection ".data", "aw", %progbits SYM_DATA(unwind_hint_\exception, .word 668b - 666b) + .popsection .endm BUILD_HANDLER ade ade badv @@ -94,6 +99,7 @@ SYM_CODE_END(except_vec_cex) BUILD_HANDLER reserved reserved none /* others */ SYM_CODE_START(handle_sys) + UNWIND_HINT_UNDEFINED la_abs t0, handle_syscall jr t0 SYM_CODE_END(handle_sys) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e336fbc4eb9675ce5d33c29d2f786833de9aaf02..841e51144945a6fcf55efebcb68b3b4171a8b4d5 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); SYM_CODE_START(kernel_entry) # kernel entry point /* Config direct window and set PG */ - li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN1 - + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ @@ -119,11 +115,8 @@ SYM_CODE_END(kernel_entry) * function after setting up the stack and tp registers. */ SYM_CODE_START(smpboot_entry) - li.d t0, CSR_DMW0_INIT # UC, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN1 + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index df42c063f6c4308bf96c13b94fdffe90a6db23b9..1b9eec76699abca4405c4d0988b483caec25ea24 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -20,6 +20,7 @@ #include #include #include +#include "legacy_boot.h" DEFINE_PER_CPU(unsigned long, irq_stack); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); @@ -61,6 +62,12 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) if (header->length < sizeof(struct acpi_table_mcfg)) return -EINVAL; + for (i = 0; i < MAX_IO_PICS; i++) { + msi_group[i].pci_segment = -1; + msi_group[i].node = -1; + pch_group[i].node = -1; + } + n = (header->length - sizeof(struct acpi_table_mcfg)) / sizeof(struct acpi_mcfg_allocation); mcfg = (struct acpi_table_mcfg *)header; @@ -76,34 +83,61 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) static void __init init_vec_parent_group(void) { - int i; + acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); +} - for (i = 0; i < MAX_IO_PICS; i++) { - msi_group[i].pci_segment = -1; - msi_group[i].node = -1; - pch_group[i].node = -1; +#ifdef CONFIG_HOTPLUG_CPU +static void handle_irq_affinity(void) +{ + struct irq_desc *desc; + struct irq_chip *chip; + unsigned int irq; + unsigned long flags; + struct cpumask *affinity; + + for_each_active_irq(irq) { + desc = irq_to_desc(irq); + if (!desc) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + affinity = desc->irq_data.common->affinity; + if (!cpumask_intersects(affinity, cpu_online_mask)) + cpumask_copy(affinity, cpu_online_mask); + + chip = irq_data_get_irq_chip(&desc->irq_data); + if (chip && chip->irq_set_affinity) + chip->irq_set_affinity(&desc->irq_data, + desc->irq_data.common->affinity, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); } +} - acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); +void fixup_irqs(void) +{ + handle_irq_affinity(); + irq_cpu_offline(); + clear_csr_ecfg(ECFG0_IM); } +#endif -static int __init get_ipi_irq(void) +int __init arch_probe_nr_irqs(void) { - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + int nr_io_pics = bitmap_weight(&loongson_sysconf.cores_io_master, NR_CPUS); - if (d) - return irq_create_mapping(d, INT_IPI); + if (!cpu_has_avecint) + nr_irqs = (64 + NR_VECTORS * nr_io_pics); + else + nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics)); - return -EINVAL; + return NR_IRQS_LEGACY; } + void __init init_IRQ(void) { - int i; -#ifdef CONFIG_SMP - int r, ipi_irq; - static int ipi_dummy_dev; -#endif + int i, ret; unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; @@ -111,15 +145,15 @@ void __init init_IRQ(void) clear_csr_estat(ESTATF_IP); init_vec_parent_group(); - irqchip_init(); + if (efi_bp && bpi_version <= BPI_VERSION_V1) { + ret = setup_legacy_IRQ(); + if (ret) + panic("IRQ domain init error!\n"); + } else { + irqchip_init(); + } #ifdef CONFIG_SMP - ipi_irq = get_ipi_irq(); - if (ipi_irq < 0) - panic("IPI IRQ mapping failed\n"); - irq_set_percpu_devid(ipi_irq); - r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev); - if (r < 0) - panic("IPI IRQ request failed\n"); + smp_ops.init_ipi(); #endif for_each_possible_cpu(i) { @@ -130,5 +164,5 @@ void __init init_IRQ(void) per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE); } - set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); + set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); } diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S index 9c75120a26d836d75d3bb08c2b5a0022a8c5d58d..001f061d226ab52818aaa79cca57e94c0b4d3f9d 100644 --- a/arch/loongarch/kernel/lbt.S +++ b/arch/loongarch/kernel/lbt.S @@ -11,6 +11,7 @@ #include #include #include +#include #define SCR_REG_WIDTH 8 @@ -153,3 +154,5 @@ SYM_FUNC_END(_restore_ftop_context) .L_lbt_fault: li.w a0, -EFAULT # failure jr ra + +STACK_FRAME_NON_STANDARD _restore_ftop_context diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c new file mode 100644 index 0000000000000000000000000000000000000000..6503d5f0c034aff6f229a4769566d05a6673c2e6 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Yun Liu, liuyun@loongson.cn + * Copyright (C) 2020 Loongson Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "legacy_boot.h" + +#define MAX_CORE_PIC 256 +#define PREFIX "ACPI: " + +#define MSI_MSG_ADDRESS 0x2FF00000 +#define MSI_MSG_DEFAULT_COUNT 0xC0 + +extern int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +extern int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +extern int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +extern int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); + +struct boot_params *efi_bp; +struct loongsonlist_mem_map *g_mmap; +struct acpi_madt_lio_pic *acpi_liointc; +struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +struct acpi_madt_ht_pic *acpi_htintc; +struct acpi_madt_lpc_pic *acpi_pchlpc; +struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +struct irq_domain *cpu_domain; +struct irq_domain *liointc_domain; +struct irq_domain *pch_lpc_domain; +struct irq_domain *pch_msi_domain[MAX_IO_PICS]; +struct irq_domain *pch_pic_domain[MAX_IO_PICS]; + +char arcs_cmdline[COMMAND_LINE_SIZE]; +int nr_io_pics; +int bpi_version; + +struct acpi_madt_lio_pic liointc_default = { + .address = LOONGSON_REG_BASE + 0x1400, + .size = 256, + .cascade = {2, 3}, + .cascade_map = {0x00FFFFFF, 0xff000000}, +}; + +struct acpi_madt_lpc_pic pchlpc_default = { + .address = LS7A_LPC_REG_BASE, + .size = SZ_4K, + .cascade = 19, +}; + +struct acpi_madt_eio_pic eiointc_default[MAX_IO_PICS]; +struct acpi_madt_msi_pic pchmsi_default[MAX_IO_PICS]; +struct acpi_madt_bio_pic pchpic_default[MAX_IO_PICS]; + +static int +acpi_parse_lapic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + set_processor_mask(processor->id, processor->lapic_flags); + + return 0; +} + +static int bad_pch_pic(unsigned long address) +{ + if (nr_io_pics >= MAX_IO_PICS) { + pr_warn("WARNING: Max # of I/O PCH_PICs (%d) exceeded (found %d), skipping\n", + MAX_IO_PICS, nr_io_pics); + return 1; + } + if (!address) { + pr_warn("WARNING: Bogus (zero) I/O PCH_PIC address found in table, skipping!\n"); + return 1; + } + return 0; +} + +void register_default_pic(int id, u32 address, u32 irq_base) +{ + int j, idx, entries, cores; + unsigned long addr; + u64 node_map = 0; + + if (bad_pch_pic(address)) + return; + + idx = nr_io_pics; + cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + pchpic_default[idx].address = address; + if (idx) + pchpic_default[idx].address |= nid_to_addrbase(id) | HT1LO_OFFSET; + pchpic_default[idx].id = id; + pchpic_default[idx].version = 0; + pchpic_default[idx].size = 0x1000; + pchpic_default[idx].gsi_base = irq_base; + + msi_group[nr_io_pics].pci_segment = nr_io_pics; + pch_group[nr_io_pics].node = msi_group[nr_io_pics].node = id; + + addr = pchpic_default[idx].address; + /* Read INT_ID.int_num */ + entries = (((unsigned long)ls7a_readq(addr) >> 48) & 0xff) + 1; + pchmsi_default[idx].msg_address = MSI_MSG_ADDRESS; + pchmsi_default[idx].start = entries; + pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + node_map |= (1 << node); + } + eiointc_default[idx].cascade = 3 + idx; + eiointc_default[idx].node = id; + eiointc_default[idx].node_map = node_map; + + if (idx) { + int i; + + for (i = 0; i < idx + 1; i++) { + node_map = 0; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + if (((node & 7) < 4) ? !i : i) + node_map |= (1 << node); + } + eiointc_default[i].node_map = node_map; + } + } + + acpi_pchpic[idx] = &pchpic_default[idx]; + acpi_pchmsi[idx] = &pchmsi_default[idx]; + acpi_eiointc[idx] = &eiointc_default[idx]; + + nr_io_pics++; +} + +static int +acpi_parse_legacy_pch_pic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_io_apic *pch_pic = NULL; + + pch_pic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(pch_pic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + register_default_pic(pch_pic->id, pch_pic->address, + pch_pic->global_irq_base); + + return 0; +} + +__init int legacy_madt_table_init(void) +{ + /* Parse MADT LAPIC entries */ + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_CORE_PIC); + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_legacy_pch_pic, MAX_IO_PICS); + + acpi_liointc = &liointc_default; + acpi_pchlpc = &pchlpc_default; + + return 0; +} + +int setup_legacy_IRQ(void) +{ + int i, ret; + struct irq_domain *pic_domain; + + if (!acpi_eiointc[0]) + cpu_data[0].options &= ~LOONGARCH_CPU_EXTIOI; + + ret = cpuintc_acpi_init(NULL, 0); + if (ret) { + pr_err("CPU domain init error!\n"); + return -1; + } + cpu_domain = get_cpudomain(); + ret = liointc_acpi_init(cpu_domain, acpi_liointc); + if (ret) { + pr_err("Liointc domain init error!\n"); + return -1; + } + liointc_domain = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); + if (cpu_has_extioi) { + pr_info("Using EIOINTC interrupt mode\n"); + for (i = 0; i < nr_io_pics; i++) { + ret = eiointc_acpi_init(cpu_domain, acpi_eiointc[i]); + if (ret) { + pr_err("Eiointc domain init error!\n"); + return -1; + } + + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[i], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[i], 0); + } + /* HTVECINTC maybe not use */ + } else { + pr_info("Using HTVECINTC interrupt mode\n"); + ret = htvec_acpi_init(liointc_domain, acpi_htintc); + if (ret) { + pr_err("HTVECintc domain init error!\n"); + return -1; + } + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[0], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0); + } + + pic_domain = get_pchpic_irq_domain(); + if (pic_domain && !cpu_has_hypervisor) + pch_lpc_acpi_init(pic_domain, acpi_pchlpc); + + return 0; +} + +/* + * Manage initrd + */ +#ifdef CONFIG_BLK_DEV_INITRD +static __init int rd_start_early(char *p) +{ + phys_initrd_start = __pa(memparse(p, NULL)); + + return 0; +} +early_param("rd_start", rd_start_early); + +static __init int rd_size_early(char *p) +{ + phys_initrd_size = memparse(p, NULL); + + return 0; +} +early_param("rd_size", rd_size_early); + +#endif + +__init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +{ + int i; + char **_fw_argv; + + _fw_argv = (char **)cmdp; + + arcs_cmdline[0] = '\0'; + for (i = 1; i < argc; i++) { + strlcat(arcs_cmdline, _fw_argv[i], COMMAND_LINE_SIZE); + if (i < (argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } + strscpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +} + +static u8 ext_listhdr_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum = (u8)(sum + *(buffer++)); + + return sum; +} + +static int parse_mem(struct _extention_list_hdr *head) +{ + g_mmap = (struct loongsonlist_mem_map *)head; + if (ext_listhdr_checksum((u8 *)g_mmap, head->length)) { + pr_err("mem checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need vbios */ +static int parse_vbios(struct _extention_list_hdr *head) +{ + struct loongsonlist_vbios *pvbios; + + pvbios = (struct loongsonlist_vbios *)head; + + if (ext_listhdr_checksum((u8 *)pvbios, head->length)) { + pr_err("vbios_addr checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need screeninfo KVM? */ +static int parse_screeninfo(struct _extention_list_hdr *head) +{ + struct loongsonlist_screeninfo *pscreeninfo; + + pscreeninfo = (struct loongsonlist_screeninfo *)head; + if (ext_listhdr_checksum((u8 *)pscreeninfo, head->length)) { + pr_err("screeninfo_addr checksum error\n"); + return -EPERM; + } + + memcpy(&screen_info, &pscreeninfo->si, sizeof(screen_info)); + return 0; +} + +static int list_find(struct boot_params *bp) +{ + struct _extention_list_hdr *fhead = NULL; + unsigned long index; + + fhead = bp->extlist; + if (!fhead) { + pr_err("the bp ext struct empty!\n"); + return -1; + } + do { + if (memcmp(&(fhead->signature), LOONGSON_MEM_SIGNATURE, 3) == 0) { + if (parse_mem(fhead) != 0) { + pr_err("parse mem failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_VBIOS_SIGNATURE, 5) == 0) { + if (parse_vbios(fhead) != 0) { + pr_err("parse vbios failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_SCREENINFO_SIGNATURE, 5) == 0) { + if (parse_screeninfo(fhead) != 0) { + pr_err("parse screeninfo failed\n"); + return -EPERM; + } + } + fhead = (struct _extention_list_hdr *)fhead->next; + index = (unsigned long)fhead; + } while (index); + return 0; +} + +unsigned int bpi_init(void) +{ + return list_find(efi_bp); +} + +static int get_bpi_version(u64 *signature) +{ + u8 data[9]; + int version = BPI_VERSION_NONE; + + data[8] = 0; + + memcpy(data, signature, sizeof(*signature)); + if (kstrtoint(&data[3], 10, &version)) + return BPI_VERSION_NONE; + return version; +} + +static void __init parse_bpi_flags(void) +{ + if (efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); +} + +__init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) +{ + int ret; + + if (!bpi || argc < 2) + return -1; + efi_bp = (struct boot_params *)bpi; + bpi_version = get_bpi_version(&efi_bp->signature); + pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); + if (bpi_version == BPI_VERSION_NONE) { + if (cpu_has_hypervisor) + pr_err(FW_BUG "Fatal error, bpi ver NONE!\n"); + else + panic(FW_BUG "Fatal error, bpi ver NONE!\n"); + } else if (bpi_version == BPI_VERSION_V2) + parse_bpi_flags(); + + fw_init_cmdline(argc, cmdptr); + ret = bpi_init(); + if (ret) { + pr_err("init legacy firmware error!\n"); + return -1; + } + + return 0; +} + +static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, unsigned long isa_base) +{ + int ret = 0; + unsigned long vaddr; + struct logic_pio_hwaddr *range; + + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + range->fwnode = fwnode; + range->size = ISA_IOSIZE; + range->hw_start = isa_base; + range->flags = LOGIC_PIO_CPU_MMIO; + + ret = logic_pio_register_range(range); + if (ret) { + kfree(range); + return ret; + } + + if (range->io_start != 0) { + logic_pio_unregister_range(range); + kfree(range); + return -EINVAL; + } + + vaddr = (unsigned long)(PCI_IOBASE + range->io_start); + ret = ioremap_page_range(vaddr, vaddr + range->size, range->hw_start, + pgprot_device(PAGE_KERNEL)); + return ret; +} + +static struct fwnode_handle * __init parse_isa_base(u64 *cpu_addr) +{ + struct device_node *np; + const __be32 *ranges = NULL; + int len; + struct device_node *node; + + for_each_node_by_name(np, "isa") { + node = of_node_get(np); + + if (!node) + break; + + ranges = of_get_property(node, "ranges", &len); + + if (!ranges || (ranges && len > 0)) + break; + } + if (ranges) { + ranges += 2; + *cpu_addr = of_translate_address(np, ranges); + return &np->fwnode; + } + + return NULL; +} + +static int __init register_legacy_isa_io(void) +{ + struct fwnode_handle *fwnode; + u64 cpu_addr; + + if (!acpi_disabled) { + cpu_addr = ISA_PHY_IOBASE; + fwnode = kzalloc(sizeof(*fwnode), GFP_ATOMIC); + } else { + fwnode = parse_isa_base(&cpu_addr); + } + + if (fwnode) + add_legacy_isa_io(fwnode, cpu_addr); + + return 0; +} +arch_initcall(register_legacy_isa_io); diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h new file mode 100644 index 0000000000000000000000000000000000000000..104d8c53bd2dc295d2a1b97845f02cba89b376c7 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LEGACY_BOOT_H_ +#define __LEGACY_BOOT_H_ +#include +#include +#include +#define ADDRESS_TYPE_SYSRAM 1 +#define ADDRESS_TYPE_RESERVED 2 +#define ADDRESS_TYPE_ACPI 3 +#define ADDRESS_TYPE_NVS 4 +#define ADDRESS_TYPE_PMEM 5 + +#define LOONGSON3_BOOT_MEM_MAP_MAX 128 +#define RT_MAP_START 100 +#define FIX_MAP_ENTRY 32 + +/* mask of the flags in bootparamsinterface */ +#define BPI_FLAGS_UEFI_SUPPORTED BIT(0) +#define BPI_FLAGS_SOC_CPU BIT(1) + +#define LOONGSON_DMA_MASK_BIT 64 +#define LOONGSON_MEM_SIGNATURE "MEM" +#define LOONGSON_VBIOS_SIGNATURE "VBIOS" +#define LOONGSON_EFIBOOT_SIGNATURE "BPI" +#define LOONGSON_SCREENINFO_SIGNATURE "SINFO" +#define LOONGSON_EFIBOOT_VERSION 1000 + +/* Values for Version firmware */ + +enum bpi_vers { + BPI_VERSION_NONE = 0, + BPI_VERSION_V1 = 1000, + BPI_VERSION_V2 = 1001, +}; + +struct boot_params { + u64 signature; /* {"BPIXXXXX"} */ + void *systemtable; + struct _extention_list_hdr *extlist; + u64 flags; +} __packed; + +struct _extention_list_hdr { + u64 signature; + u32 length; + u8 revision; + u8 checksum; + struct _extention_list_hdr *next; +} __packed; + +struct loongsonlist_mem_map { + struct _extention_list_hdr header; /*{"M", "E", "M"}*/ + u8 map_count; + struct _loongson_mem_map { + u32 mem_type; + u64 mem_start; + u64 mem_size; + } __packed map[LOONGSON3_BOOT_MEM_MAP_MAX]; +} __packed; + +struct loongsonlist_vbios { + struct _extention_list_hdr header; /* {VBIOS} */ + u64 vbios_addr; +} __packed; + +struct loongsonlist_screeninfo { + struct _extention_list_hdr header; + struct screen_info si; +}; +unsigned long legacy_boot_init(unsigned long argc, + unsigned long cmdptr, unsigned long bpi); +extern int bpi_version; +extern struct boot_params *efi_bp; +extern struct loongsonlist_mem_map *g_mmap; +extern int set_processor_mask(u32 id, u32 flags); +extern int __init setup_legacy_IRQ(void); +extern struct loongson_system_configuration loongson_sysconf; +extern unsigned long long smp_group[MAX_PACKAGES]; +extern int legacy_madt_table_init(void); +extern struct pch_pic *pch_pic_priv[MAX_IO_PICS]; +extern struct irq_domain *get_cpudomain(void); +extern int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern struct irq_domain *get_pchpic_irq_domain(void); + +extern __init void fw_init_cmdline(unsigned long argc, unsigned long cmdp); +#endif diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 2dcb9e003657c848adff71078870fe682451e416..561706cb1e6d1a25e4d41279bff3a51361cd10bb 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -59,6 +59,9 @@ static void kexec_image_info(const struct kimage *kimage) } } +#define MAX_ARGS 64 +#define KEXEC_CMDLINE_SIZE (COMMAND_LINE_SIZE * 2) + int machine_kexec_prepare(struct kimage *kimage) { int i; @@ -70,11 +73,49 @@ int machine_kexec_prepare(struct kimage *kimage) kimage->arch.efi_boot = fw_arg0; kimage->arch.systable_ptr = fw_arg2; + if (!fw_arg2) + pr_err("Small fdt mode is not supported!\n"); + /* Find the command line */ for (i = 0; i < kimage->nr_segments; i++) { if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + if (fw_arg0 < 2) { + /* New firmware */ + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + } else { + /* Old firmware */ + int argc = 0; + long offt; + char *ptr, *str; + unsigned long *argv; + + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + argv = (unsigned long *)kmalloc(KEXEC_CMDLINE_SIZE, GFP_KERNEL); + argv[argc++] = (unsigned long)(KEXEC_CMDLINE_ADDR + KEXEC_CMDLINE_SIZE/2); + str = (char *)argv + KEXEC_CMDLINE_SIZE/2; + + if (copy_from_user(str, kimage->segment[i].buf, KEXEC_CMDLINE_SIZE/2)) + return -EINVAL; + + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (long)(ptr - str + 1); + argv[argc++] = (unsigned long)argv + KEXEC_CMDLINE_SIZE/2 + offt; + } + ptr = strchr(ptr + 1, ' '); + } + + kimage->arch.efi_boot = argc; + kimage->arch.cmdline_ptr = (unsigned long)argv; + break; + } break; } } diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S index 482aa553aa2d5eb54a460ffa1822d0524e56046b..0c65cf09110cd4cbab6ed06917c52532c3526e1f 100644 --- a/arch/loongarch/kernel/mcount_dyn.S +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -73,6 +73,7 @@ SYM_FUNC_START(ftrace_stub) SYM_FUNC_END(ftrace_stub) SYM_CODE_START(ftrace_common) + UNWIND_HINT_UNDEFINED PTR_ADDI a0, ra, -8 /* arg0: ip */ move a1, t0 /* arg1: parent_ip */ la.pcrel t1, function_trace_op @@ -113,12 +114,14 @@ ftrace_common_return: SYM_CODE_END(ftrace_common) SYM_CODE_START(ftrace_caller) + UNWIND_HINT_UNDEFINED ftrace_regs_entry allregs=0 b ftrace_common SYM_CODE_END(ftrace_caller) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS SYM_CODE_START(ftrace_regs_caller) + UNWIND_HINT_UNDEFINED ftrace_regs_entry allregs=1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) @@ -126,6 +129,7 @@ SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_CODE_START(ftrace_graph_caller) + UNWIND_HINT_UNDEFINED PTR_L a0, sp, PT_ERA PTR_ADDI a0, a0, -8 /* arg0: self_addr */ PTR_ADDI a1, sp, PT_R1 /* arg1: parent */ @@ -134,6 +138,7 @@ SYM_CODE_START(ftrace_graph_caller) SYM_CODE_END(ftrace_graph_caller) SYM_CODE_START(return_to_handler) + UNWIND_HINT_UNDEFINED /* Save return value regs */ PTR_ADDI sp, sp, -FGRET_REGS_SIZE PTR_S a0, sp, FGRET_REGS_A0 @@ -155,6 +160,7 @@ SYM_CODE_END(return_to_handler) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS SYM_CODE_START(ftrace_stub_direct_tramp) + UNWIND_HINT_UNDEFINED jr t0 SYM_CODE_END(ftrace_stub_direct_tramp) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index aed901c57fb439493c560de777fbc86689f827ae..5fd1bc3333bc36a808830d411b404925660833ab 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -9,13 +9,35 @@ #include #include #include - +#include "legacy_boot.h" void __init memblock_init(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_start, mem_end, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + /* parse memory information */ + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + pr_info("add memory region memblock - base: 0x%llx size: 0x%llx\n", mem_start, mem_size); + memblock_add(mem_start, mem_size); + if (max_low_pfn < (mem_end >> PAGE_SHIFT)) + max_low_pfn = mem_end >> PAGE_SHIFT; + break; + } + } + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); + return; + } /* Parse memory information */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index b13b2858fe392398823cb8f261665a964250a836..c7d0338d12c15bc15634f69191eb20a44a1d42c5 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -20,6 +20,7 @@ #include #include #include +#include static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) { @@ -515,15 +516,28 @@ static void module_init_ftrace_plt(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { - const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + const Elf_Shdr *s, *alt = NULL, *orc = NULL, *orc_ip = NULL, *ftrace = NULL; - for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".altinstructions", secstrs + s->sh_name)) - apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size); + alt = s; + if (!strcmp(".orc_unwind", secstrs + s->sh_name)) + orc = s; + if (!strcmp(".orc_unwind_ip", secstrs + s->sh_name)) + orc_ip = s; if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name)) - module_init_ftrace_plt(hdr, s, mod); + ftrace = s; } + if (alt) + apply_alternatives((void *)alt->sh_addr, (void *)alt->sh_addr + alt->sh_size); + + if (orc && orc_ip) + unwind_module_init(mod, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size); + + if (ftrace) + module_init_ftrace_plt(hdr, ftrace, mod); + return 0; } diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 8fe21f868f72d4aa8c4b55be5937a037f514991d..97fcbf7678f6f1d2e0dd1d1c8991510bb552b9d6 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -25,6 +25,7 @@ #include #include #include +#include "legacy_boot.h" int numa_off; struct pglist_data *node_data[MAX_NUMNODES]; @@ -37,7 +38,6 @@ static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); - /* * apicid, cpu, node mappings */ @@ -275,10 +275,45 @@ static void __init add_numamem_region(u64 start, u64 end, u32 type) static void __init init_node_memblock(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_end, mem_start, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = g_mmap->map[i].mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + mem_start = PFN_ALIGN(mem_start); + mem_end = PFN_ALIGN(mem_end - PAGE_SIZE + 1); + if (mem_start >= mem_end) + break; + add_numamem_region(mem_start, mem_end, EFI_PERSISTENT_MEMORY); + break; + + case ADDRESS_TYPE_ACPI: + mem_start = PFN_ALIGN(mem_start - PAGE_SIZE + 1); + mem_end = PFN_ALIGN(mem_end); + mem_size = mem_end - mem_start; + memblock_add(mem_start, mem_size); + memblock_mark_nomap(mem_start, mem_size); + memblock_set_node(mem_start, mem_size, + &memblock.memory, 0); + memblock_reserve(mem_start, mem_size); + break; + + case ADDRESS_TYPE_RESERVED: + memblock_reserve(mem_start, mem_size); + break; + } + } + return; + } + /* Parse memory information and activate */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c new file mode 100644 index 0000000000000000000000000000000000000000..9cc27c3feb69a0c51b81099a5a89d255ecbb82f3 --- /dev/null +++ b/arch/loongarch/kernel/paravirt.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +static int has_steal_clock; +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +static bool steal_acc = true; + +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 paravt_steal_clock(int cpu) +{ + int version; + u64 steal; + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + virt_rmb(); /* Make sure that the version is read before the steal */ + steal = src->steal; + virt_rmb(); /* Make sure that the steal is read before the next version */ + + } while ((version & 1) || (version != src->version)); + + return steal; +} + +#ifdef CONFIG_SMP +static void pv_send_ipi_single(int cpu, unsigned int action) +{ + int min, old; + irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + + old = atomic_fetch_or(BIT(action), &info->message); + if (old) + return; + + min = cpu_logical_map(cpu); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min); +} + +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) + +static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + int i, cpu, min = 0, max = 0, old; + __uint128_t bitmap = 0; + irq_cpustat_t *info; + + if (cpumask_empty(mask)) + return; + + action = BIT(action); + for_each_cpu(i, mask) { + info = &per_cpu(irq_stat, i); + old = atomic_fetch_or(action, &info->message); + if (old) + continue; + + cpu = cpu_logical_map(i); + if (!bitmap) { + min = max = cpu; + } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) { + /* cpu < min, and bitmap still enough */ + bitmap <<= min - cpu; + min = cpu; + } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) { + /* cpu > min, and bitmap still enough */ + max = cpu > max ? cpu : max; + } else { + /* + * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE, + * send IPI here directly and skip the remaining CPUs. + */ + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); + min = max = cpu; + bitmap = 0; + } + __set_bit(cpu - min, (unsigned long *)&bitmap); + } + + if (bitmap) + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); +} + +static irqreturn_t pv_ipi_interrupt(int irq, void *dev) +{ + u32 action; + irq_cpustat_t *info; + + /* Clear SWI interrupt */ + clear_csr_estat(1 << INT_SWI0); + info = this_cpu_ptr(&irq_stat); + action = atomic_xchg(&info->message, 0); + + if (action & SMP_RESCHEDULE) { + scheduler_ipi(); + info->ipi_irqs[IPI_RESCHEDULE]++; + } + + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + + return IRQ_HANDLED; +} + +static void pv_init_ipi(void) +{ + int r, swi; + + swi = get_percpu_irq(INT_SWI0); + if (swi < 0) + panic("SWI0 IRQ mapping failed\n"); + irq_set_percpu_devid(swi); + r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat); + if (r < 0) + panic("SWI0 IRQ request failed\n"); +} +#endif + +bool kvm_para_available(void) +{ + int config; + static int hypervisor_type; + + if (!cpu_has_hypervisor) + return false; + + if (!hypervisor_type) { + config = read_cpucfg(CPUCFG_KVM_SIG); + if (!memcmp(&config, KVM_SIGNATURE, 4)) + hypervisor_type = HYPERVISOR_KVM; + } + + return hypervisor_type == HYPERVISOR_KVM; +} + +unsigned int kvm_arch_para_features(void) +{ + static unsigned int feature; + + if (!kvm_para_available()) + return 0; + + if (!feature) + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + + return feature; +} + +int __init pv_ipi_init(void) +{ + if (!kvm_para_has_feature(KVM_FEATURE_IPI)) + return 0; + +#ifdef CONFIG_SMP + smp_ops.init_ipi = pv_init_ipi; + smp_ops.send_ipi_single = pv_send_ipi_single; + smp_ops.send_ipi_mask = pv_send_ipi_mask; +#endif + + return 0; +} + +static int pv_enable_steal_time(void) +{ + int cpu = smp_processor_id(); + unsigned long addr; + struct kvm_steal_time *st; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be in one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr); + + return 0; +} + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0); +} + +#ifdef CONFIG_SMP +static int pv_time_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_enable_steal_time(); + local_irq_restore(flags); + + return 0; +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + + return 0; +} +#endif + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int r; + + if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + r = pv_enable_steal_time(); + if (r < 0) { + has_steal_clock = 0; + return 0; + } + register_reboot_notifier(&pv_reboot_nb); + +#ifdef CONFIG_SMP + r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "loongarch/pv_time:online", + pv_time_cpu_online, pv_time_cpu_down_prepare); + if (r < 0) { + has_steal_clock = 0; + pr_err("Failed to install cpu hotplug callbacks\n"); + return r; + } +#endif + + static_call_update(pv_steal_clock, paravt_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif + + pr_info("Using paravirt steal-time\n"); + + return 0; +} + +int __init pv_spinlock_init(void) +{ + if (!cpu_has_hypervisor) + return 0; + + static_branch_enable(&virt_spin_lock_key); + + return 0; +} diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index cac7cba81b65f791cf5d3379dfda2daca01814ec..f86a4b838dd78ef1e1368e9665ec0082328ce6b5 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu) static DEFINE_MUTEX(pmu_reserve_mutex); static atomic_t active_events = ATOMIC_INIT(0); -static int get_pmc_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_PCOV); - - return -EINVAL; -} - static void reset_counters(void *arg); static int __hw_perf_event_init(struct perf_event *event); @@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { on_each_cpu(reset_counters, NULL, 1); - free_irq(get_pmc_irq(), &loongarch_pmu); + free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu); mutex_unlock(&pmu_reserve_mutex); } } @@ -562,7 +552,7 @@ static int loongarch_pmu_event_init(struct perf_event *event) if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; - irq = get_pmc_irq(); + irq = get_percpu_irq(INT_PCOV); flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmu_reserve_mutex); diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 0eddd4a66b87452929cceb149b2d6d3e20da691b..14e5bb8e57c3e69ad28e68551804595d1acf0211 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -16,6 +16,7 @@ #include #include #include +#include "legacy_boot.h" #define RELOCATED(x) ((void *)((long)x + reloc_offset)) #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset)) @@ -173,7 +174,10 @@ unsigned long __init relocate_kernel(void) void *location_new = _text; /* Default to original kernel start */ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ - strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + if (fw_arg0 < 2) + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + else + fw_init_cmdline(fw_arg0, TO_CACHE(fw_arg1)); /* OLD BPI parameters */ #ifdef CONFIG_RANDOMIZE_BASE location_new = determine_relocation_address(); diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S index f49f6b053763d1e729b68b870b1b64ffe09d4504..84e6de2fd97354376740f52407834d108fda5fb1 100644 --- a/arch/loongarch/kernel/relocate_kernel.S +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -15,6 +15,7 @@ #include SYM_CODE_START(relocate_new_kernel) + UNWIND_HINT_UNDEFINED /* * a0: EFI boot flag for the new kernel * a1: Command line pointer for the new kernel @@ -90,6 +91,7 @@ SYM_CODE_END(relocate_new_kernel) * then start at the entry point from LOONGARCH_IOCSR_MBUF0. */ SYM_CODE_START(kexec_smp_wait) + UNWIND_HINT_UNDEFINED 1: li.w t0, 0x100 /* wait for init loop */ 2: addi.w t0, t0, -1 /* limit mailbox access */ bnez t0, 2b @@ -106,6 +108,5 @@ SYM_CODE_END(kexec_smp_wait) relocate_new_kernel_end: -SYM_DATA_START(relocate_new_kernel_size) - PTR relocate_new_kernel_end - relocate_new_kernel -SYM_DATA_END(relocate_new_kernel_size) + .section ".data" +SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel) diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 1ef8c63835351ba7b04eb4838464927c1ac1c32c..e7282e8de1cda0e9bde69f4cdfc7430d3b0c319c 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -49,7 +49,8 @@ void machine_power_off(void) #endif do_kernel_power_off(); #ifdef CONFIG_EFI - efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + if (efi.reset_system) + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif while (true) { diff --git a/arch/loongarch/kernel/rethook_trampoline.S b/arch/loongarch/kernel/rethook_trampoline.S index bd5772c963382f7bc475c142d4af9ca985357f65..d4ceb2fa2a5ce46372539126da4b3a777b621802 100644 --- a/arch/loongarch/kernel/rethook_trampoline.S +++ b/arch/loongarch/kernel/rethook_trampoline.S @@ -76,6 +76,7 @@ .endm SYM_CODE_START(arch_rethook_trampoline) + UNWIND_HINT_UNDEFINED addi.d sp, sp, -PT_SIZE save_all_base_regs diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 7ef1c1ff1fc44cb64dd330acb8b7c232deecf6c1..37d61e240201ee8f110a6db1cb838613061084f1 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -48,6 +48,8 @@ #include #include #include +#include +#include "legacy_boot.h" #define SMBIOS_BIOSSIZE_OFFSET 0x09 #define SMBIOS_BIOSEXTERN_OFFSET 0x13 @@ -70,6 +72,8 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; +static int possible_cpus; +static bool bsp_added; /* * Setup information @@ -137,9 +141,22 @@ static void __init parse_cpu_table(const struct dmi_header *dm) static void __init parse_bios_table(const struct dmi_header *dm) { + int bios_extern; char *dmi_data = (char *)dm; + bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET); b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; + + if (bpi_version == BPI_VERSION_V2) { + if ((!!(efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED)) != (!!(bios_extern & LOONGSON_EFI_ENABLE))) + pr_err("There is a conflict of definitions between efi_bp->flags and smbios\n"); + return; + } + + if (bios_extern & LOONGSON_EFI_ENABLE) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); } static void __init find_tokens(const struct dmi_header *dm, void *dummy) @@ -171,12 +188,14 @@ bool wc_enabled = false; EXPORT_SYMBOL(wc_enabled); +static int wc_arg = -1; + static int __init setup_writecombine(char *p) { if (!strcmp(p, "on")) - wc_enabled = true; + wc_arg = true; else if (!strcmp(p, "off")) - wc_enabled = false; + wc_arg = false; else pr_warn("Unknown writecombine setting \"%s\".\n", p); @@ -358,10 +377,75 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } +int topo_get_cpu(int physid) +{ + int i; + + for (i = 0; i < possible_cpus; i++) + if (cpu_logical_map(i) == physid) + break; + + if (i == possible_cpus) + return -ENOENT; + + return i; +} + +int topo_add_cpu(int physid) +{ + int cpu; + + if (!bsp_added && (physid == loongson_sysconf.boot_cpu_id)) { + bsp_added = true; + return 0; + } + + cpu = topo_get_cpu(physid); + if (cpu >= 0) { + pr_warn("Adding duplicated physical cpuid 0x%x\n", physid); + return -EEXIST; + } + + if (possible_cpus >= nr_cpu_ids) + return -ERANGE; + + __cpu_logical_map[possible_cpus] = physid; + cpu = possible_cpus++; + return cpu; +} + +static void __init topo_init(void) +{ + loongson_sysconf.boot_cpu_id = read_csr_cpuid(); + __cpu_logical_map[0] = loongson_sysconf.boot_cpu_id; + possible_cpus++; +} + +static void __init writecombine_detect(void) +{ + u64 cpuname; + + if (wc_arg >= 0) { + wc_enabled = wc_arg; + return; + } + + cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); + cpuname &= 0x0000ffffffffffff; + switch (cpuname) { + case 0x0000303030364333: + wc_enabled = false; + break; + default: + break; + } +} + void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); + topo_init(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); @@ -381,6 +465,8 @@ void __init platform_init(void) smbios_parse(); pr_info("The BIOS Version: %s\n", b_info.bios_version); + writecombine_detect(); + pr_info("WriteCombine: %s\n", wc_enabled ? "on":"off"); efi_runtime_init(); } @@ -609,6 +695,7 @@ static void __init prefill_possible_map(void) void __init setup_arch(char **cmdline_p) { cpu_probe(); + unwind_init(); init_environ(); efi_init(); @@ -617,12 +704,16 @@ void __init setup_arch(char **cmdline_p) pagetable_init(); bootcmdline_init(cmdline_p); parse_early_param(); - reserve_initrd_mem(); + /* The small fdt method should be skipped directly to avoid two reserved operations. */ + if (fw_arg2) + reserve_initrd_mem(); platform_init(); arch_mem_init(cmdline_p); resource_init(); + jump_label_init(); /* Initialise the static keys for paravirtualization */ + #ifdef CONFIG_SMP plat_smp_setup(); prefill_possible_map(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 9dbe7907a96124f0efeaf2c48bb152a76e4de5b5..d4717fc9e1db9cc29b2aed0afbb92f4a3eb7636d 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -29,9 +29,11 @@ #include #include #include +#include #include #include #include +#include "legacy_boot.h" int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); @@ -66,14 +68,10 @@ static cpumask_t cpu_core_setup_map; struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNCTION, -}; - static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", + [IPI_CLEAR_VECTOR] = "Clear vector interrupts", }; void show_ipi_list(struct seq_file *p, int prec) @@ -190,24 +188,19 @@ static u32 ipi_read_clear(int cpu) static void ipi_write_action(int cpu, u32 action) { - unsigned int irq = 0; - - while ((irq = ffs(action))) { - uint32_t val = IOCSR_IPI_SEND_BLOCKING; + uint32_t val; - val |= (irq - 1); - val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); - iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); - action &= ~BIT(irq - 1); - } + val = IOCSR_IPI_SEND_BLOCKING | action; + val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); + iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); } -void loongson_send_ipi_single(int cpu, unsigned int action) +static void loongson_send_ipi_single(int cpu, unsigned int action) { ipi_write_action(cpu_logical_map(cpu), (u32)action); } -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) +static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; @@ -215,6 +208,16 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) ipi_write_action(cpu_logical_map(i), (u32)action); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION); +} + /* * This function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -222,11 +225,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) */ void arch_smp_send_reschedule(int cpu) { - loongson_send_ipi_single(cpu, SMP_RESCHEDULE); + smp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); -irqreturn_t loongson_ipi_interrupt(int irq, void *dev) +static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { unsigned int action; unsigned int cpu = smp_processor_id(); @@ -243,9 +246,34 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++; } + if (action & SMP_CLEAR_VECTOR) { + complete_irq_moving(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++; + } + return IRQ_HANDLED; } +static void loongson_init_ipi(void) +{ + int r, ipi_irq; + + ipi_irq = get_percpu_irq(INT_IPI); + if (ipi_irq < 0) + panic("IPI IRQ mapping failed\n"); + + irq_set_percpu_devid(ipi_irq); + r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat); + if (r < 0) + panic("IPI IRQ request failed\n"); +} + +struct smp_ops smp_ops = { + .init_ipi = loongson_init_ipi, + .send_ipi_single = loongson_send_ipi_single, + .send_ipi_mask = loongson_send_ipi_mask, +}; + static void __init fdt_smp_setup(void) { #ifdef CONFIG_OF @@ -260,11 +288,9 @@ static void __init fdt_smp_setup(void) if (cpuid >= nr_cpu_ids) continue; - if (cpuid == loongson_sysconf.boot_cpu_id) { - cpu = 0; - } else { - cpu = cpumask_next_zero(-1, cpu_present_mask); - } + cpu = topo_add_cpu(cpuid); + if (cpu < 0) + continue; num_processors++; set_cpu_possible(cpu, true); @@ -288,6 +314,7 @@ void __init loongson_smp_setup(void) cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; + pv_ipi_init(); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus); } @@ -312,17 +339,18 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) */ void loongson_boot_secondary(int cpu, struct task_struct *idle) { - unsigned long entry; + unsigned long entry = (unsigned long)&smpboot_entry; pr_info("Booting CPU#%d...\n", cpu); - entry = __pa_symbol((unsigned long)&smpboot_entry); + if (!efi_bp) + entry = __pa_symbol((unsigned long)&smpboot_entry); cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle); cpuboot_data.thread_info = (unsigned long)task_thread_info(idle); csr_mail_send(entry, cpu_logical_map(cpu), 0); - loongson_send_ipi_single(cpu, SMP_BOOT_CPU); + loongson_send_ipi_single(cpu, ACTION_BOOT_CPU); } /* @@ -331,7 +359,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) void loongson_init_secondary(void) { unsigned int cpu = smp_processor_id(); - unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | + unsigned int imask = ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER; change_csr_ecfg(ECFG0_IM, imask); @@ -372,8 +400,7 @@ int loongson_cpu_disable(void) clear_cpu_sibling_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); - irq_migrate_all_off_this_cpu(); - clear_csr_ecfg(ECFG0_IM); + fixup_irqs(); local_irq_restore(flags); local_flush_tlb_all(); @@ -448,7 +475,7 @@ core_initcall(ipi_pm_init); #endif /* Preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { unsigned int cpu, node, rr_node; @@ -481,6 +508,8 @@ void smp_prepare_boot_cpu(void) rr_node = next_node_in(rr_node, node_online_map); } } + + pv_spinlock_init(); } /* called from main before smp_init() */ diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index f623feb2129f12829b623d77c84d2ba6f677a689..9a038d1070d73b1efcf195ea7ba02e574bca104a 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -29,6 +29,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, regs->csr_era = thread_saved_ra(task); } regs->regs[1] = 0; + regs->regs[22] = 0; } for (unwind_start(&state, task, regs); @@ -39,6 +40,46 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, } } +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) +{ + unsigned long addr; + struct pt_regs dummyregs; + struct pt_regs *regs = &dummyregs; + struct unwind_state state; + + if (task == current) { + regs->regs[3] = (unsigned long)__builtin_frame_address(0); + regs->csr_era = (unsigned long)__builtin_return_address(0); + } else { + regs->regs[3] = thread_saved_fp(task); + regs->csr_era = thread_saved_ra(task); + } + regs->regs[1] = 0; + regs->regs[22] = 0; + + for (unwind_start(&state, task, regs); + !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + + /* + * A NULL or invalid return address probably means there's some + * generated code which __kernel_text_address() doesn't know about. + */ + if (!addr) + return -EINVAL; + + if (!consume_entry(cookie, addr)) + return -EINVAL; + } + + /* Check for stack corruption */ + if (unwind_error(&state)) + return -EINVAL; + + return 0; +} + static int copy_stack_frame(unsigned long fp, struct stack_frame *frame) { diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index e7015f7b70e37c4cabf736512c50a998455bbdf9..46d7d40c87e38e097af74385be1e518bf95d5251 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -15,6 +15,7 @@ #include #include +#include #include u64 cpu_clock_freq; @@ -123,16 +124,6 @@ void sync_counter(void) csr_write64(init_offset, LOONGARCH_CSR_CNTC); } -static int get_timer_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_TI); - - return -EINVAL; -} - int constant_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -142,7 +133,7 @@ int constant_clockevent_init(void) static int irq = 0, timer_irq_installed = 0; if (!timer_irq_installed) { - irq = get_timer_irq(); + irq = get_percpu_irq(INT_TI); if (irq < 0) pr_err("Failed to map irq %d (timer)\n", irq); } @@ -224,4 +215,5 @@ void __init time_init(void) constant_clockevent_init(); constant_clocksource_init(); + pv_time_init(); } diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index d59052c03d9b7ec32d94f0669b9c4f87d7eeaa37..c57b4134f3e84bafbd1d5ffd4c9936adfe40f217 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -53,6 +53,32 @@ #include "access-helper.h" +void *exception_table[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = handle_reserved, + + [EXCCODE_TLBI] = handle_tlb_load, + [EXCCODE_TLBL] = handle_tlb_load, + [EXCCODE_TLBS] = handle_tlb_store, + [EXCCODE_TLBM] = handle_tlb_modify, + [EXCCODE_TLBNR] = handle_tlb_protect, + [EXCCODE_TLBNX] = handle_tlb_protect, + [EXCCODE_TLBPE] = handle_tlb_protect, + [EXCCODE_ADE] = handle_ade, + [EXCCODE_ALE] = handle_ale, + [EXCCODE_BCE] = handle_bce, + [EXCCODE_SYS] = handle_sys, + [EXCCODE_BP] = handle_bp, + [EXCCODE_INE] = handle_ri, + [EXCCODE_IPE] = handle_ri, + [EXCCODE_FPDIS] = handle_fpu, + [EXCCODE_LSXDIS] = handle_lsx, + [EXCCODE_LASXDIS] = handle_lasx, + [EXCCODE_FPE] = handle_fpe, + [EXCCODE_WATCH] = handle_watch, + [EXCCODE_BTDIS] = handle_lbt, +}; +EXPORT_SYMBOL_GPL(exception_table); + static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, const char *loglvl, bool user) { @@ -1155,19 +1181,9 @@ void __init trap_init(void) for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++) set_handler(i * VECSIZE, handle_vint, VECSIZE); - set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE); - set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE); - set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE); - set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE); - set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE); - set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE); - set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE); - set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE); - set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE); - set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE); - set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE); - set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE); - set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE); + /* Set exception vector handler */ + for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++) + set_handler(i * VECSIZE, exception_table[i], VECSIZE); cache_error_setup(); diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c new file mode 100644 index 0000000000000000000000000000000000000000..b25722876331792f5ac218fa4376f36318156f7f --- /dev/null +++ b/arch/loongarch/kernel/unwind_orc.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +ORC_HEADER; + +#define orc_warn(fmt, ...) \ + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +extern int __start_orc_unwind_ip[]; +extern int __stop_orc_unwind_ip[]; +extern struct orc_entry __start_orc_unwind[]; +extern struct orc_entry __stop_orc_unwind[]; + +static bool orc_init __ro_after_init; +static unsigned int lookup_num_blocks __ro_after_init; + +/* Fake frame pointer entry -- used as a fallback for generated code */ +static struct orc_entry orc_fp_entry = { + .sp_reg = ORC_REG_FP, + .sp_offset = 16, + .fp_reg = ORC_REG_PREV_SP, + .fp_offset = -16, + .ra_reg = ORC_REG_PREV_SP, + .ra_offset = -8, + .type = ORC_TYPE_CALL +}; + +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry orc_null_entry = { + .sp_reg = ORC_REG_SP, + .sp_offset = sizeof(long), + .fp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + +static inline unsigned long orc_ip(const int *ip) +{ + return (unsigned long)ip + *ip; +} + +static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, + unsigned int num_entries, unsigned long ip) +{ + int *first = ip_table; + int *mid = first, *found = first; + int *last = ip_table + num_entries - 1; + + if (!num_entries) + return NULL; + + /* + * Do a binary range search to find the rightmost duplicate of a given + * starting address. Some entries are section terminators which are + * "weak" entries for ensuring there are no gaps. They should be + * ignored when they conflict with a real entry. + */ + while (first <= last) { + mid = first + ((last - first) / 2); + + if (orc_ip(mid) <= ip) { + found = mid; + first = mid + 1; + } else + last = mid - 1; + } + + return u_table + (found - ip_table); +} + +#ifdef CONFIG_MODULES +static struct orc_entry *orc_module_find(unsigned long ip) +{ + struct module *mod; + + mod = __module_address(ip); + if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) + return NULL; + + return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip); +} +#else +static struct orc_entry *orc_module_find(unsigned long ip) +{ + return NULL; +} +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +static struct orc_entry *orc_find(unsigned long ip); + +/* + * Ftrace dynamic trampolines do not have orc entries of their own. + * But they are copies of the ftrace entries that are static and + * defined in ftrace_*.S, which do have orc entries. + * + * If the unwinder comes across a ftrace trampoline, then find the + * ftrace function that was used to create it, and use that ftrace + * function's orc entry, as the placement of the return code in + * the stack will be identical. + */ +static struct orc_entry *orc_ftrace_find(unsigned long ip) +{ + struct ftrace_ops *ops; + unsigned long tramp_addr, offset; + + ops = ftrace_ops_trampoline(ip); + if (!ops) + return NULL; + + /* Set tramp_addr to the start of the code copied by the trampoline */ + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + tramp_addr = (unsigned long)ftrace_regs_caller; + else + tramp_addr = (unsigned long)ftrace_caller; + + /* Now place tramp_addr to the location within the trampoline ip is at */ + offset = ip - ops->trampoline; + tramp_addr += offset; + + /* Prevent unlikely recursion */ + if (ip == tramp_addr) + return NULL; + + return orc_find(tramp_addr); +} +#else +static struct orc_entry *orc_ftrace_find(unsigned long ip) +{ + return NULL; +} +#endif + +static struct orc_entry *orc_find(unsigned long ip) +{ + static struct orc_entry *orc; + + if (ip == 0) + return &orc_null_entry; + + /* For non-init vmlinux addresses, use the fast lookup table: */ + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { + unsigned int idx, start, stop; + + idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; + + if (unlikely((idx >= lookup_num_blocks-1))) { + orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", + idx, lookup_num_blocks, (void *)ip); + return NULL; + } + + start = orc_lookup[idx]; + stop = orc_lookup[idx + 1] + 1; + + if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || + (__start_orc_unwind + stop > __stop_orc_unwind))) { + orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", + idx, lookup_num_blocks, start, stop, (void *)ip); + return NULL; + } + + return __orc_find(__start_orc_unwind_ip + start, + __start_orc_unwind + start, stop - start, ip); + } + + /* vmlinux .init slow lookup: */ + if (is_kernel_inittext(ip)) + return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); + + /* Module lookup: */ + orc = orc_module_find(ip); + if (orc) + return orc; + + return orc_ftrace_find(ip); +} + +#ifdef CONFIG_MODULES + +static DEFINE_MUTEX(sort_mutex); +static int *cur_orc_ip_table = __start_orc_unwind_ip; +static struct orc_entry *cur_orc_table = __start_orc_unwind; + +static void orc_sort_swap(void *_a, void *_b, int size) +{ + int delta = _b - _a; + int *a = _a, *b = _b, tmp; + struct orc_entry *orc_a, *orc_b; + + /* Swap the .orc_unwind_ip entries: */ + tmp = *a; + *a = *b + delta; + *b = tmp - delta; + + /* Swap the corresponding .orc_unwind entries: */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + orc_b = cur_orc_table + (b - cur_orc_ip_table); + swap(*orc_a, *orc_b); +} + +static int orc_sort_cmp(const void *_a, const void *_b) +{ + const int *a = _a, *b = _b; + unsigned long a_val = orc_ip(a); + unsigned long b_val = orc_ip(b); + struct orc_entry *orc_a; + + if (a_val > b_val) + return 1; + if (a_val < b_val) + return -1; + + /* + * The "weak" section terminator entries need to always be first + * to ensure the lookup code skips them in favor of real entries. + * These terminator entries exist to handle any gaps created by + * whitelisted .o files which didn't get objtool generation. + */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; +} + +void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, + void *_orc, size_t orc_size) +{ + int *orc_ip = _orc_ip; + struct orc_entry *orc = _orc; + unsigned int num_entries = orc_ip_size / sizeof(int); + + WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(*orc) != 0 || + num_entries != orc_size / sizeof(*orc)); + + /* + * The 'cur_orc_*' globals allow the orc_sort_swap() callback to + * associate an .orc_unwind_ip table entry with its corresponding + * .orc_unwind entry so they can both be swapped. + */ + mutex_lock(&sort_mutex); + cur_orc_ip_table = orc_ip; + cur_orc_table = orc; + sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); + mutex_unlock(&sort_mutex); + + mod->arch.orc_unwind_ip = orc_ip; + mod->arch.orc_unwind = orc; + mod->arch.num_orcs = num_entries; +} +#endif + +void __init unwind_init(void) +{ + int i; + size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; + size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; + size_t num_entries = orc_ip_size / sizeof(int); + struct orc_entry *orc; + + if (!num_entries || orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(struct orc_entry) != 0 || + num_entries != orc_size / sizeof(struct orc_entry)) { + orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); + return; + } + + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ + + /* Initialize the fast lookup table: */ + lookup_num_blocks = orc_lookup_end - orc_lookup; + for (i = 0; i < lookup_num_blocks-1; i++) { + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + + orc_lookup[i] = orc - __start_orc_unwind; + } + + /* Initialize the ending block: */ + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; + + orc_init = true; +} + +static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len) +{ + unsigned long begin = info->begin; + unsigned long end = info->end; + + return (info->type != STACK_TYPE_UNKNOWN && + addr >= begin && addr < end && addr + len > begin && addr + len <= end); +} + +static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len) +{ + struct stack_info *info = &state->stack_info; + + if (on_stack(info, addr, len)) + return true; + + return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len); +} + +unsigned long unwind_get_return_address(struct unwind_state *state) +{ + return __unwind_get_return_address(state); +} +EXPORT_SYMBOL_GPL(unwind_get_return_address); + +void unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs) +{ + __unwind_start(state, task, regs); + state->type = UNWINDER_ORC; + if (!unwind_done(state) && !__kernel_text_address(state->pc)) + unwind_next_frame(state); +} +EXPORT_SYMBOL_GPL(unwind_start); + +static bool is_entry_func(unsigned long addr) +{ + extern u32 kernel_entry; + extern u32 kernel_entry_end; + + return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end; +} + +static inline unsigned long bt_address(unsigned long ra) +{ + extern unsigned long eentry; + + if (__kernel_text_address(ra)) + return ra; + + if (__module_text_address(ra)) + return ra; + + if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) { + unsigned long func; + unsigned long type = (ra - eentry) / VECSIZE; + unsigned long offset = (ra - eentry) % VECSIZE; + + switch (type) { + case 0 ... EXCCODE_INT_START - 1: + func = (unsigned long)exception_table[type]; + break; + case EXCCODE_INT_START ... EXCCODE_INT_END: + func = (unsigned long)handle_vint; + break; + default: + func = (unsigned long)handle_reserved; + break; + } + + return func + offset; + } + + return ra; +} + +bool unwind_next_frame(struct unwind_state *state) +{ + unsigned long *p, pc; + struct pt_regs *regs; + struct orc_entry *orc; + struct stack_info *info = &state->stack_info; + + if (unwind_done(state)) + return false; + + /* Don't let modules unload while we're reading their ORC data. */ + preempt_disable(); + + if (is_entry_func(state->pc)) + goto end; + + orc = orc_find(state->pc); + if (!orc) { + /* + * As a fallback, try to assume this code uses a frame pointer. + * This is useful for generated code, like BPF, which ORC + * doesn't know about. This is just a guess, so the rest of + * the unwind is no longer considered reliable. + */ + orc = &orc_fp_entry; + state->error = true; + } else { + if (orc->type == ORC_TYPE_UNDEFINED) + goto err; + + if (orc->type == ORC_TYPE_END_OF_STACK) + goto end; + } + + switch (orc->sp_reg) { + case ORC_REG_SP: + if (info->type == STACK_TYPE_IRQ && state->sp == info->end) + orc->type = ORC_TYPE_REGS; + else + state->sp = state->sp + orc->sp_offset; + break; + case ORC_REG_FP: + state->sp = state->fp; + break; + default: + orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc); + goto err; + } + + switch (orc->fp_reg) { + case ORC_REG_PREV_SP: + p = (unsigned long *)(state->sp + orc->fp_offset); + if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long))) + goto err; + + state->fp = *p; + break; + case ORC_REG_UNDEFINED: + /* Nothing. */ + break; + default: + orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc); + goto err; + } + + switch (orc->type) { + case ORC_TYPE_CALL: + if (orc->ra_reg == ORC_REG_PREV_SP) { + p = (unsigned long *)(state->sp + orc->ra_offset); + if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long))) + goto err; + + pc = unwind_graph_addr(state, *p, state->sp); + pc -= LOONGARCH_INSN_SIZE; + } else if (orc->ra_reg == ORC_REG_UNDEFINED) { + if (!state->ra || state->ra == state->pc) + goto err; + + pc = unwind_graph_addr(state, state->ra, state->sp); + pc -= LOONGARCH_INSN_SIZE; + state->ra = 0; + } else { + orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc); + goto err; + } + break; + case ORC_TYPE_REGS: + if (info->type == STACK_TYPE_IRQ && state->sp == info->end) + regs = (struct pt_regs *)info->next_sp; + else + regs = (struct pt_regs *)state->sp; + + if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs))) + goto err; + + if ((info->end == (unsigned long)regs + sizeof(*regs)) && + !regs->regs[3] && !regs->regs[1]) + goto end; + + if (user_mode(regs)) + goto end; + + pc = regs->csr_era; + if (!__kernel_text_address(pc)) + goto err; + + state->sp = regs->regs[3]; + state->ra = regs->regs[1]; + state->fp = regs->regs[22]; + get_stack_info(state->sp, state->task, info); + + break; + default: + orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc); + goto err; + } + + state->pc = bt_address(pc); + if (!state->pc) { + pr_err("cannot find unwind pc at %pK\n", (void *)pc); + goto err; + } + + if (!__kernel_text_address(state->pc)) + goto err; + + preempt_enable(); + return true; + +err: + state->error = true; + +end: + preempt_enable(); + state->stack_info.type = STACK_TYPE_UNKNOWN; + return false; +} +EXPORT_SYMBOL_GPL(unwind_next_frame); diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index d5afd0c80a49990ef8c7c3b499014d033d940bf9..3c7595342730ed2c1e76c343e16553180fbd167d 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -2,6 +2,7 @@ #include #include #include +#include #define PAGE_SIZE _PAGE_SIZE #define RO_EXCEPTION_TABLE_ALIGN 4 @@ -123,6 +124,8 @@ SECTIONS } #endif + ORC_UNWIND_TABLE + .sdata : { *(.sdata) } diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e899d96f4da61c923cf879d2a535defdf0061b93 --- /dev/null +++ b/arch/loongarch/kvm/Kconfig @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on AS_HAS_LVZ_EXTENSION + depends on HAVE_KVM + select HAVE_KVM_DIRTY_RING_ACQ_REL + select HAVE_KVM_EVENTFD + select HAVE_KVM_VCPU_ASYNC_IOCTL + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_MMIO + select KVM_XFER_TO_GUEST_WORK + select SCHED_INFO + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + select KVM_VFIO + help + Support hosting virtualized guest machines using + hardware virtualization extensions. You will need + a processor equipped with virtualization extensions. + + If unsure, say N. + +endif # VIRTUALIZATION diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f363e4b6fcf35ae6e615a40555ec6e744fa84687 --- /dev/null +++ b/arch/loongarch/kvm/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for LoongArch KVM support +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += exit.o +kvm-y += interrupt.o +kvm-y += main.o +kvm-y += mmu.o +kvm-y += switch.o +kvm-y += timer.o +kvm-y += tlb.o +kvm-y += vcpu.o +kvm-y += vm.o +kvm-y += intc/ipi.o +kvm-y += intc/extioi.o +kvm-y += intc/pch_pic.o +kvm-y += irqfd.o + +CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c new file mode 100644 index 0000000000000000000000000000000000000000..5579ee8afedc6a9a95e96ff247f70e43bbd2d1c2 --- /dev/null +++ b/arch/loongarch/kvm/exit.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int rd, rj; + unsigned int index, ret; + + if (inst.reg2_format.opcode != cpucfg_op) + return EMULATE_FAIL; + + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + + /* + * By LoongArch Reference Manual 2.2.10.5 + * Return value is 0 for undefined CPUCFG index + * + * Disable preemption since hw gcsr is accessed + */ + preempt_disable(); + switch (index) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + if (cpu_has_ptw && (index == LOONGARCH_CPUCFG2)) + vcpu->arch.gprs[rd] |= CPUCFG2_PTW; + break; + case CPUCFG_KVM_SIG: + /* CPUCFG emulation between 0x40000000 -- 0x400000ff */ + vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; + break; + case CPUCFG_KVM_FEATURE: + ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; + vcpu->arch.gprs[rd] = ret; + break; + default: + vcpu->arch.gprs[rd] = 0; + break; + } + preempt_enable(); + + return EMULATE_DONE; +} + +static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) +{ + unsigned long val = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * From LoongArch Reference Manual Volume 1 Chapter 4.2.1 + * For undefined CSR id, return value is 0 + */ + if (get_gcsr_flag(csrid) & SW_GCSR) + val = kvm_read_sw_gcsr(csr, csrid); + else + pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return val; +} + +static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + kvm_write_sw_gcsr(csr, csrid, val); + } else + pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, + unsigned long csr_mask, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + val = (old & ~csr_mask) | (val & csr_mask); + kvm_write_sw_gcsr(csr, csrid, val); + old = old & csr_mask; + } else + pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +{ + unsigned int rd, rj, csrid; + unsigned long csr_mask, val = 0; + + /* + * CSR value mask imm + * rj = 0 means csrrd + * rj = 1 means csrwr + * rj != 0,1 means csrxchg + */ + rd = inst.reg2csr_format.rd; + rj = inst.reg2csr_format.rj; + csrid = inst.reg2csr_format.csr; + + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) { + if (kvm_guest_has_pmu(&vcpu->arch)) { + vcpu->arch.pc -= 4; + kvm_make_request(KVM_REQ_PMU, vcpu); + return EMULATE_DONE; + } + } + + /* Process CSR ops */ + switch (rj) { + case 0: /* process csrrd */ + val = kvm_emu_read_csr(vcpu, csrid); + vcpu->arch.gprs[rd] = val; + break; + case 1: /* process csrwr */ + val = vcpu->arch.gprs[rd]; + val = kvm_emu_write_csr(vcpu, csrid, val); + vcpu->arch.gprs[rd] = val; + break; + default: /* process csrxchg */ + val = vcpu->arch.gprs[rd]; + csr_mask = vcpu->arch.gprs[rj]; + val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val); + vcpu->arch.gprs[rd] = val; + } + + return EMULATE_DONE; +} + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret; + unsigned long *val; + u32 addr, rd, rj, opcode; + + /* + * Each IOCSR with different opcode + */ + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + opcode = inst.reg2_format.opcode; + addr = vcpu->arch.gprs[rj]; + ret = EMULATE_DO_IOCSR; + run->iocsr_io.phys_addr = addr; + run->iocsr_io.is_write = 0; + val = &vcpu->arch.gprs[rd]; + + /* LoongArch is Little endian */ + switch (opcode) { + case iocsrrdb_op: + run->iocsr_io.len = 1; + break; + case iocsrrdh_op: + run->iocsr_io.len = 2; + break; + case iocsrrdw_op: + run->iocsr_io.len = 4; + break; + case iocsrrdd_op: + run->iocsr_io.len = 8; + break; + case iocsrwrb_op: + run->iocsr_io.len = 1; + run->iocsr_io.is_write = 1; + break; + case iocsrwrh_op: + run->iocsr_io.len = 2; + run->iocsr_io.is_write = 1; + break; + case iocsrwrw_op: + run->iocsr_io.len = 4; + run->iocsr_io.is_write = 1; + break; + case iocsrwrd_op: + run->iocsr_io.len = 8; + run->iocsr_io.is_write = 1; + break; + case CPUCFG_KVM_FEATURE: + vcpu->arch.gprs[rd] = KVM_FEATURE_IPI; + break; + default: + ret = EMULATE_FAIL; + return ret; + } + + if (run->iocsr_io.is_write) { + if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val)) + ret = EMULATE_DONE; + else + /* Save data and let user space to write it */ + memcpy(run->iocsr_io.data, val, run->iocsr_io.len); + } else { + if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val)) + ret = EMULATE_DONE; + else + /* Save register id for iocsr read completion */ + vcpu->arch.io_gpr = rd; + } + + return ret; +} + +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + switch (run->iocsr_io.len) { + case 1: + *gpr = *(s8 *)run->iocsr_io.data; + break; + case 2: + *gpr = *(s16 *)run->iocsr_io.data; + break; + case 4: + *gpr = *(s32 *)run->iocsr_io.data; + break; + case 8: + *gpr = *(s64 *)run->iocsr_io.data; + break; + default: + kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n", + run->iocsr_io.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_emu_idle(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.idle_exits; + trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); + + if (!kvm_arch_vcpu_runnable(vcpu)) + kvm_vcpu_halt(vcpu); + + return EMULATE_DONE; +} + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ + unsigned long curr_pc; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + + /* Fetch the instruction */ + inst.word = vcpu->arch.badi; + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + trace_kvm_exit_gspr(vcpu, inst.word); + er = EMULATE_FAIL; + switch (((inst.word >> 24) & 0xff)) { + case 0x0: /* CPUCFG GSPR */ + er = kvm_emu_cpucfg(vcpu, inst); + break; + case 0x4: /* CSR{RD,WR,XCHG} GSPR */ + er = kvm_handle_csr(vcpu, inst); + break; + case 0x6: /* Cache, Idle and IOCSR GSPR */ + switch (((inst.word >> 22) & 0x3ff)) { + case 0x18: /* Cache GSPR */ + er = EMULATE_DONE; + trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); + break; + case 0x19: /* Idle/IOCSR GSPR */ + switch (((inst.word >> 15) & 0x1ffff)) { + case 0xc90: /* IOCSR GSPR */ + er = kvm_emu_iocsr(inst, run, vcpu); + break; + case 0xc91: /* Idle GSPR */ + er = kvm_emu_idle(vcpu); + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) { + kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n", + curr_pc, __func__, inst.word); + + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->arch.pc = curr_pc; + } + + return er; +} + +/* + * Trigger GSPR: + * 1) Execute CPUCFG instruction; + * 2) Execute CACOP/IDLE instructions; + * 3) Access to unimplemented CSRs/IOCSRs. + */ +static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + enum emulation_result er = EMULATE_DONE; + + er = kvm_trap_handle_gspr(vcpu); + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + vcpu->run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else if (er == EMULATE_DO_IOCSR) { + vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + ret = RESUME_GUEST; + } + + return ret; +} + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int op8, opcode, rd; + struct kvm_run *run = vcpu->run; + + run->mmio.phys_addr = vcpu->arch.badv; + vcpu->mmio_needed = 2; /* signed */ + op8 = (inst.word >> 24) & 0xff; + ret = EMULATE_DO_MMIO; + + switch (op8) { + case 0x24 ... 0x27: /* ldptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case ldptrw_op: + run->mmio.len = 4; + break; + case ldptrd_op: + run->mmio.len = 8; + break; + default: + break; + } + break; + case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + + switch (opcode) { + case ldb_op: + run->mmio.len = 1; + break; + case ldbu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 1; + break; + case ldh_op: + run->mmio.len = 2; + break; + case ldhu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 2; + break; + case ldw_op: + run->mmio.len = 4; + break; + case ldwu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 4; + break; + case ldd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case ldxb_op: + run->mmio.len = 1; + break; + case ldxbu_op: + run->mmio.len = 1; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxh_op: + run->mmio.len = 2; + break; + case ldxhu_op: + run->mmio.len = 2; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxw_op: + run->mmio.len = 4; + break; + case ldxwu_op: + run->mmio.len = 4; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + /* + * if mmio device such as pch pic is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, + run->mmio.len, &vcpu->arch.gprs[rd]); + if (!ret) { + update_pc(&vcpu->arch); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + + /* Set for kvm_complete_mmio_read() use */ + vcpu->arch.io_gpr = rd; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len, + run->mmio.phys_addr, NULL); + return EMULATE_DO_MMIO; + } + + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + + return ret; +} + +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + /* Update with new PC */ + update_pc(&vcpu->arch); + switch (run->mmio.len) { + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(s8 *)run->mmio.data; + else + *gpr = *(u8 *)run->mmio.data; + break; + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(s16 *)run->mmio.data; + else + *gpr = *(u16 *)run->mmio.data; + break; + case 4: + if (vcpu->mmio_needed == 2) + *gpr = *(s32 *)run->mmio.data; + else + *gpr = *(u32 *)run->mmio.data; + break; + case 8: + *gpr = *(s64 *)run->mmio.data; + break; + default: + kvm_err("Bad MMIO length: %d, addr is 0x%lx\n", + run->mmio.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, + run->mmio.phys_addr, run->mmio.data); + + return er; +} + +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int rd, op8, opcode; + unsigned long curr_pc, rd_val = 0; + struct kvm_run *run = vcpu->run; + void *data = run->mmio.data; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + op8 = (inst.word >> 24) & 0xff; + run->mmio.phys_addr = vcpu->arch.badv; + ret = EMULATE_DO_MMIO; + switch (op8) { + case 0x24 ... 0x27: /* stptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case stptrw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stptrd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x28 ... 0x2e: /* st.b/h/w/d process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + rd_val = vcpu->arch.gprs[rd]; + + switch (opcode) { + case stb_op: + run->mmio.len = 1; + *(unsigned char *)data = rd_val; + break; + case sth_op: + run->mmio.len = 2; + *(unsigned short *)data = rd_val; + break; + case stw_op: + run->mmio.len = 4; + *(unsigned int *)data = rd_val; + break; + case std_op: + run->mmio.len = 8; + *(unsigned long *)data = rd_val; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* stx.b/h/w/d process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case stxb_op: + run->mmio.len = 1; + *(unsigned char *)data = vcpu->arch.gprs[rd]; + break; + case stxh_op: + run->mmio.len = 2; + *(unsigned short *)data = vcpu->arch.gprs[rd]; + break; + case stxw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stxd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + /* + * if mmio device such as pch pic is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, + run->mmio.len, data); + if (!ret) + return EMULATE_DONE; + + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, + run->mmio.phys_addr, data); + return EMULATE_DO_MMIO; + } + + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + + return ret; +} + +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +{ + int ret; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + unsigned long badv = vcpu->arch.badv; + + ret = kvm_handle_mm_fault(vcpu, badv, write); + if (ret) { + /* Treat as MMIO */ + inst.word = vcpu->arch.badi; + if (write) { + er = kvm_emu_mmio_write(vcpu, inst); + } else { + /* A code fetch fault doesn't count as an MMIO */ + if (kvm_is_ifetch_fault(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF); + return RESUME_GUEST; + } + + er = kvm_emu_mmio_read(vcpu, inst); + } + } + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + ret = RESUME_GUEST; + } + + return ret; +} + +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, false); +} + +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, true); +} + +/** + * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use fpu which hasn't been allowed + * by the root context. + */ +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (!kvm_guest_has_fpu(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + return RESUME_GUEST; + } + + /* + * If guest FPU not present, the FPU operation should have been + * treated as a reserved instruction! + * If FPU already in use, we shouldn't get this at all. + */ + if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) { + kvm_err("%s internal error\n", __func__); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } + + kvm_own_fpu(vcpu); + + return RESUME_GUEST; +} + +static long kvm_save_notify(struct kvm_vcpu *vcpu) +{ + unsigned long id, data; + + id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); + data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); + switch (id) { + case BIT(KVM_FEATURE_STEAL_TIME): + if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) + return KVM_HCALL_INVALID_PARAMETER; + + vcpu->arch.st.guest_addr = data; + if (!(data & KVM_STEAL_PHYS_VALID)) + return 0; + + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + return 0; + default: + return KVM_HCALL_INVALID_CODE; + }; + + return KVM_HCALL_INVALID_CODE; +}; + +/* + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LSX when it is disabled in the root + * context. + */ +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lsx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +/* + * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LASX when it is disabled in the root + * context. + */ +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lasx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lbt(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) +{ + unsigned int min, cpu, i; + unsigned long ipi_bitmap; + struct kvm_vcpu *dest; + + min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); + for (i = 0; i < 2; i++, min += BITS_PER_LONG) { + ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i); + if (!ipi_bitmap) + continue; + + cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); + while (cpu < BITS_PER_LONG) { + dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); + cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1); + if (!dest) + continue; + + /* Send SWI0 to dest vcpu to emulate IPI interrupt */ + kvm_queue_irq(dest, INT_SWI0); + kvm_vcpu_kick(dest); + } + } + + return 0; +} + +/* + * Hypercall emulation always return to guest, Caller should check retval. + */ +static void kvm_handle_service(struct kvm_vcpu *vcpu) +{ + long ret = KVM_HCALL_INVALID_CODE; + unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); + + switch (func) { + case KVM_HCALL_FUNC_IPI: + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) { + kvm_send_pv_ipi(vcpu); + ret = KVM_HCALL_SUCCESS; + } + break; + case KVM_HCALL_FUNC_NOTIFY: + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)) + ret = kvm_save_notify(vcpu); + break; + default: + break; + } + + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); +} + +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +{ + int ret; + larch_inst inst; + unsigned int code; + + inst.word = vcpu->arch.badi; + code = inst.reg0i15_format.immediate; + ret = RESUME_GUEST; + + switch (code) { + case KVM_HCALL_SERVICE: + vcpu->stat.hypercall_exits++; + kvm_handle_service(vcpu); + break; + case KVM_HCALL_SWDBG: + /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */ + if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) { + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + ret = RESUME_HOST; + break; + } + fallthrough; + default: + /* Treat it as noop intruction, only set return value */ + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); + break; + } + + if (ret == RESUME_GUEST) + update_pc(&vcpu->arch); + + return ret; +} + +/* + * LoongArch KVM callback handling for unimplemented guest exiting + */ +static int kvm_fault_ni(struct kvm_vcpu *vcpu) +{ + unsigned int ecode, inst; + unsigned long estat, badv; + + /* Fetch the instruction */ + inst = vcpu->arch.badi; + badv = vcpu->arch.badv; + estat = vcpu->arch.host_estat; + ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", + ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); + kvm_arch_vcpu_dump_regs(vcpu); + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni, + [EXCCODE_TLBI] = kvm_handle_read_fault, + [EXCCODE_TLBL] = kvm_handle_read_fault, + [EXCCODE_TLBS] = kvm_handle_write_fault, + [EXCCODE_TLBM] = kvm_handle_write_fault, + [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, + [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, + [EXCCODE_BTDIS] = kvm_handle_lbt_disabled, + [EXCCODE_GSPR] = kvm_handle_gspr, + [EXCCODE_HVC] = kvm_handle_hypercall, +}; + +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) +{ + return kvm_fault_tables[fault](vcpu); +} diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c new file mode 100644 index 0000000000000000000000000000000000000000..5327066f16aec95ca383f127e1aa1f048fa73a10 --- /dev/null +++ b/arch/loongarch/kvm/intc/extioi.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +#define loongarch_ext_irq_lock(s, flags) spin_lock_irqsave(&s->lock, flags) +#define loongarch_ext_irq_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags) + +static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level) +{ + int ipnum, cpu, found, irq_index, irq_mask; + struct kvm_interrupt vcpu_irq; + struct kvm_vcpu *vcpu; + + ipnum = s->ipmap.reg_u8[irq / 32]; + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + + cpu = s->sw_coremap[irq]; + vcpu = kvm_get_vcpu(s->kvm, cpu); + irq_index = irq / 32; + /* length of accessing core isr is 4 bytes */ + irq_mask = 1 << (irq & 0x1f); + + if (level) { + /* if not enable return false */ + if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0) + return; + s->coreisr.reg_u32[cpu][irq_index] |= irq_mask; + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS); + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + } else { + s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask; + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS); + } + + if (found < EXTIOI_IRQS) + /* other irq is handling, need not update parent irq level */ + return; + + vcpu_irq.irq = level ? INT_HWI0 + ipnum : -(INT_HWI0 + ipnum); + kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq); +} + +static void extioi_set_sw_coreisr(struct loongarch_extioi *s) +{ + int ipnum, cpu, irq_index, irq_mask, irq; + + for (irq = 0; irq < EXTIOI_IRQS; irq++) { + ipnum = s->ipmap.reg_u8[irq / 32]; + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + irq_index = irq / 32; + /* length of accessing core isr is 4 bytes */ + irq_mask = 1 << (irq & 0x1f); + + cpu = s->coremap.reg_u8[irq]; + if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + else + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + } +} + +void extioi_set_irq(struct loongarch_extioi *s, int irq, int level) +{ + unsigned long *isr = (unsigned long *)s->isr.reg_u8; + unsigned long flags; + + level ? set_bit(irq, isr) : clear_bit(irq, isr); + if (!level) + return; + loongarch_ext_irq_lock(s, flags); + extioi_update_irq(s, irq, level); + loongarch_ext_irq_unlock(s, flags); +} + +static inline void extioi_enable_irq(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + int index, u8 mask, int level) +{ + u8 val; + int irq; + + val = mask & s->isr.reg_u8[index]; + irq = ffs(val); + while (irq != 0) { + /* + * enable bit change from 0 to 1, + * need to update irq by pending bits + */ + extioi_update_irq(s, irq - 1 + index * 8, level); + val &= ~(1 << (irq - 1)); + irq = ffs(val); + } +} + +static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int index, irq, ret = 0; + u8 data, old_data, cpu; + u8 coreisr, old_coreisr; + gpa_t offset; + + data = *(u8 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START); + s->nodetype.reg_u8[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START); + s->ipmap.reg_u8[index] = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START); + old_data = s->enable.reg_u8[index]; + s->enable.reg_u8[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index]; + extioi_enable_irq(vcpu, s, index, data, 1); + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index]; + extioi_enable_irq(vcpu, s, index, data, 0); + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = offset - EXTIOI_BOUNCE_START; + s->bounce.reg_u8[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START); + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u8[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + irq = ffs(coreisr); + while (irq != 0) { + extioi_update_irq(s, irq - 1 + index * 8, 0); + coreisr &= ~(1 << (irq - 1)); + irq = ffs(coreisr); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq; + s->coremap.reg_u8[index] = data; + + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + + if (s->sw_coremap[irq] == cpu) + break; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq, 0); + s->sw_coremap[irq] = cpu; + extioi_update_irq(s, irq, 1); + } else + s->sw_coremap[irq] = cpu; + + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int loongarch_extioi_writew(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, ret = 0; + u8 cpu; + u32 data, old_data; + u32 coreisr, old_coreisr; + gpa_t offset; + + data = *(u32 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + s->nodetype.reg_u32[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START) >> 2; + s->ipmap.reg_u32[index] = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 2; + old_data = s->enable.reg_u32[index]; + s->enable.reg_u32[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; + index = index << 2; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index, mask, 0); + } + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EXTIOI_BOUNCE_START) >> 2; + s->bounce.reg_u32[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 2; + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u32[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + irq = ffs(coreisr); + while (irq != 0) { + extioi_update_irq(s, irq - 1 + index * 32, 0); + coreisr &= ~(1 << (irq - 1)); + irq = ffs(coreisr); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq >> 2; + + s->coremap.reg_u32[index] = data; + + for (i = 0; i < sizeof(data); i++) { + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + data = data >> 8; + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else + s->sw_coremap[irq + i] = cpu; + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int loongarch_extioi_writel(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, bits, ret = 0; + u8 cpu; + u64 data, old_data; + u64 coreisr, old_coreisr; + gpa_t offset; + + data = *(u64 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 3; + s->nodetype.reg_u64[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START) >> 3; + s->ipmap.reg_u64 = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 3; + old_data = s->enable.reg_u64[index]; + s->enable.reg_u64[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; + index = index << 3; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index, mask, 0); + } + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EXTIOI_BOUNCE_START) >> 3; + s->bounce.reg_u64[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 3; + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u64[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + + bits = sizeof(u64) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + extioi_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq >> 3; + + s->coremap.reg_u64[index] = data; + + for (i = 0; i < sizeof(data); i++) { + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + data = data >> 8; + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else + s->sw_coremap[irq + i] = cpu; + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int kvm_loongarch_extioi_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + unsigned long flags; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + vcpu->kvm->stat.extioi_write_exits++; + loongarch_ext_irq_lock(extioi, flags); + + switch (len) { + case 1: + ret = loongarch_extioi_writeb(vcpu, extioi, addr, len, val); + break; + case 4: + ret = loongarch_extioi_writew(vcpu, extioi, addr, len, val); + break; + case 8: + ret = loongarch_extioi_writel(vcpu, extioi, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n", + __func__, addr, len); + } + + loongarch_ext_irq_unlock(extioi, flags); + + + return ret; +} + +static int loongarch_extioi_readb(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = offset - EXTIOI_NODETYPE_START; + data = s->nodetype.reg_u8[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = offset - EXTIOI_IPMAP_START; + data = s->ipmap.reg_u8[index]; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = offset - EXTIOI_ENABLE_START; + data = s->enable.reg_u8[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = offset - EXTIOI_BOUNCE_START; + data = s->bounce.reg_u8[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = offset - EXTIOI_COREISR_START; + data = s->coreisr.reg_u8[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = offset - EXTIOI_COREMAP_START; + data = s->coremap.reg_u8[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u8 *)val = data; + + return ret; +} + +static int loongarch_extioi_readw(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + data = s->nodetype.reg_u32[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = (offset - EXTIOI_IPMAP_START) >> 2; + data = s->ipmap.reg_u32[index]; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 2; + data = s->enable.reg_u32[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = (offset - EXTIOI_BOUNCE_START) >> 2; + data = s->bounce.reg_u32[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 2; + data = s->coreisr.reg_u32[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = (offset - EXTIOI_COREMAP_START) >> 2; + data = s->coremap.reg_u32[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u32 *)val = data; + + return ret; +} + +static int loongarch_extioi_readl(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 3; + data = s->nodetype.reg_u64[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = (offset - EXTIOI_IPMAP_START) >> 3; + data = s->ipmap.reg_u64; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 3; + data = s->enable.reg_u64[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = (offset - EXTIOI_BOUNCE_START) >> 3; + data = s->bounce.reg_u64[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 3; + data = s->coreisr.reg_u64[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = (offset - EXTIOI_COREMAP_START) >> 3; + data = s->coremap.reg_u64[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u64 *)val = data; + + return ret; +} + +static int kvm_loongarch_extioi_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + int ret; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + unsigned long flags; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + vcpu->kvm->stat.extioi_read_exits++; + loongarch_ext_irq_lock(extioi, flags); + + switch (len) { + case 1: + ret = loongarch_extioi_readb(vcpu, extioi, addr, len, val); + break; + case 4: + ret = loongarch_extioi_readw(vcpu, extioi, addr, len, val); + break; + case 8: + ret = loongarch_extioi_readl(vcpu, extioi, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n", + __func__, addr, len); + } + + loongarch_ext_irq_unlock(extioi, flags); + + return ret; +} + +static const struct kvm_io_device_ops kvm_loongarch_extioi_ops = { + .read = kvm_loongarch_extioi_read, + .write = kvm_loongarch_extioi_write, +}; + +static int kvm_loongarch_extioi_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int len, addr; + void __user *data; + void *p = NULL; + struct loongarch_extioi *s; + unsigned long flags; + + s = dev->kvm->arch.extioi; + addr = attr->attr; + data = (void __user *)attr->addr; + + loongarch_ext_irq_lock(s, flags); + switch (addr) { + case EXTIOI_NODETYPE_START: + p = s->nodetype.reg_u8; + len = sizeof(s->nodetype); + break; + case EXTIOI_IPMAP_START: + p = s->ipmap.reg_u8; + len = sizeof(s->ipmap); + break; + case EXTIOI_ENABLE_START: + p = s->enable.reg_u8; + len = sizeof(s->enable); + break; + case EXTIOI_BOUNCE_START: + p = s->bounce.reg_u8; + len = sizeof(s->bounce); + break; + case EXTIOI_ISR_START: + p = s->isr.reg_u8; + len = sizeof(s->isr); + break; + case EXTIOI_COREISR_START: + p = s->coreisr.reg_u8; + len = sizeof(s->coreisr); + break; + case EXTIOI_COREMAP_START: + p = s->coremap.reg_u8; + len = sizeof(s->coremap); + break; + case EXTIOI_SW_COREMAP_FLAG: + p = s->sw_coremap; + len = sizeof(s->sw_coremap); + break; + default: + loongarch_ext_irq_unlock(s, flags); + kvm_err("%s: unknown extioi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + loongarch_ext_irq_unlock(s, flags); + + if (is_write) { + if (copy_from_user(p, data, len)) + return -EFAULT; + } else { + if (copy_to_user(data, p, len)) + return -EFAULT; + } + + if ((addr == EXTIOI_COREISR_START) && is_write) { + loongarch_ext_irq_lock(s, flags); + extioi_set_sw_coreisr(s); + loongarch_ext_irq_unlock(s, flags); + } + + return 0; +} + +static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + return kvm_loongarch_extioi_regs_access(dev, attr, false); + + return -EINVAL; +} + +static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + return kvm_loongarch_extioi_regs_access(dev, attr, true); + + return -EINVAL; +} + +static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_extioi *extioi; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + extioi = kvm->arch.extioi; + if (!extioi) + return; + + device = &extioi->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kfree(extioi); +} + +static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct loongarch_extioi *s; + struct kvm_io_device *device; + struct kvm *kvm = dev->kvm; + + /* extioi has been created */ + if (kvm->arch.extioi) + return -EINVAL; + + s = kzalloc(sizeof(struct loongarch_extioi), GFP_KERNEL); + if (!s) + return -ENOMEM; + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_loongarch_extioi_ops); + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, EXTIOI_BASE, EXTIOI_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kfree(s); + return -EFAULT; + } + + kvm->arch.extioi = s; + + kvm_info("create extioi device successfully\n"); + return 0; +} + +static struct kvm_device_ops kvm_loongarch_extioi_dev_ops = { + .name = "kvm-loongarch-extioi", + .create = kvm_loongarch_extioi_create, + .destroy = kvm_loongarch_extioi_destroy, + .set_attr = kvm_loongarch_extioi_set_attr, + .get_attr = kvm_loongarch_extioi_get_attr, +}; + +int kvm_loongarch_register_extioi_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_extioi_dev_ops, + KVM_DEV_TYPE_LA_EXTIOI); +} + +int kvm_loongarch_reset_extioi(struct kvm *kvm) +{ + struct loongarch_extioi *extioi = kvm->arch.extioi; + unsigned long flags; + u8 offset, size; + u8 *pstart; + + if (!extioi) + return -EINVAL; + + pstart = (char *)&extioi->nodetype; + offset = (char *)&extioi->nodetype - (char *)extioi; + size = sizeof(struct loongarch_extioi) - offset; + + loongarch_ext_irq_lock(extioi, flags); + memset(pstart, 0, size); + loongarch_ext_irq_unlock(extioi, flags); + + return 0; +} diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c new file mode 100644 index 0000000000000000000000000000000000000000..12024d9fdd0b5a751474cedbc45bab6bf624c760 --- /dev/null +++ b/arch/loongarch/kvm/intc/ipi.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +static void ipi_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + struct kvm_interrupt irq; + int cpu, action, status; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + + action = 1 << (data & 0x1f); + + spin_lock(&vcpu->arch.ipi_state.lock); + status = vcpu->arch.ipi_state.status; + vcpu->arch.ipi_state.status |= action; + if (status == 0) { + irq.irq = LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data) +{ + struct kvm_interrupt irq; + + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.status &= ~data; + if (!vcpu->arch.ipi_state.status) { + irq.irq = -LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len) +{ + void *pbuf; + uint64_t ret = 0; + + spin_lock(&vcpu->arch.ipi_state.lock); + pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20); + if (len == 1) + ret = *(unsigned char *)pbuf; + else if (len == 2) + ret = *(unsigned short *)pbuf; + else if (len == 4) + ret = *(unsigned int *)pbuf; + else if (len == 8) + ret = *(unsigned long *)pbuf; + else + kvm_err("%s: unknown data len: %d\n", __func__, len); + spin_unlock(&vcpu->arch.ipi_state.lock); + + return ret; +} + +static void write_mailbox(struct kvm_vcpu *vcpu, int offset, + uint64_t data, int len) +{ + void *pbuf; + + spin_lock(&vcpu->arch.ipi_state.lock); + pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20); + if (len == 1) + *(unsigned char *)pbuf = (unsigned char)data; + else if (len == 2) + *(unsigned short *)pbuf = (unsigned short)data; + else if (len == 4) + *(unsigned int *)pbuf = (unsigned int)data; + else if (len == 8) + *(unsigned long *)pbuf = (unsigned long)data; + else + kvm_err("%s: unknown data len: %d\n", __func__, len); + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, + int len, const void *val) +{ + uint64_t data; + uint32_t offset; + int ret = 0; + + data = *(uint64_t *)val; + + offset = (uint32_t)(addr & 0xff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case CORE_STATUS_OFF: + kvm_err("CORE_SET_OFF Can't be write\n"); + ret = -EINVAL; + break; + case CORE_EN_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.en = data; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case IOCSR_IPI_SEND: + ipi_send(vcpu->kvm, data); + break; + case CORE_SET_OFF: + kvm_info("CORE_SET_OFF simulation is required\n"); + ret = -EINVAL; + break; + case CORE_CLEAR_OFF: + /* Just clear the status of the current vcpu */ + ipi_clear(vcpu, data); + break; + case CORE_BUF_20 ... CORE_BUF_38 + 7: + if (offset + len > CORE_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + write_mailbox(vcpu, offset, data, len); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + + return ret; +} + +static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, + int len, void *val) +{ + uint32_t offset; + uint64_t res = 0; + int ret = 0; + + offset = (uint32_t)(addr & 0xff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case CORE_STATUS_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.status; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case CORE_EN_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.en; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case CORE_SET_OFF: + res = 0; + break; + case CORE_CLEAR_OFF: + res = 0; + break; + case CORE_BUF_20 ... CORE_BUF_38 + 7: + if (offset + len > CORE_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + res = read_mailbox(vcpu, offset, len); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + + *(uint64_t *)val = res; + + return ret; +} + +static int kvm_loongarch_ipi_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + struct loongarch_ipi *ipi; + int ret; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + ipi->kvm->stat.ipi_write_exits++; + ret = loongarch_ipi_writel(vcpu, addr, len, val); + + return ret; +} + +static int kvm_loongarch_ipi_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + struct loongarch_ipi *ipi; + int ret; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + ipi->kvm->stat.ipi_read_exits++; + ret = loongarch_ipi_readl(vcpu, addr, len, val); + + return ret; +} + +static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) +{ + int i, ret; + uint32_t val = 0, mask = 0; + /* + * Bit 27-30 is mask for byte writing. + * If the mask is 0, we need not to do anything. + */ + if ((data >> 27) & 0xf) { + /* Read the old val */ + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + if (unlikely(ret)) { + kvm_err("%s: : read date from addr %llx failed\n", __func__, addr); + return ret; + } + /* Construct the mask by scanning the bit 27-30 */ + for (i = 0; i < 4; i++) { + if (data & (0x1 << (27 + i))) + mask |= (0xff << (i * 8)); + } + /* Save the old part of val */ + val &= mask; + } + + val |= ((uint32_t)(data >> 32) & ~mask); + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + if (unlikely(ret)) + kvm_err("%s: : write date to addr %llx failed\n", __func__, addr); + + return ret; +} + +static int mail_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + int cpu, mailbox; + int offset, ret; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + + mailbox = ((data & 0xffffffff) >> 2) & 0x7; + offset = SMP_MAILBOX + CORE_BUF_20 + mailbox * 4; + ret = send_ipi_data(vcpu, offset, data); + + return ret; +} + +static int any_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + int cpu, offset, ret; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + + offset = data & 0xffff; + ret = send_ipi_data(vcpu, offset, data); + return ret; +} + +static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + struct loongarch_ipi *ipi; + int ret; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr &= 0xfff; + addr -= IOCSR_MAIL_SEND; + + switch (addr) { + case MAIL_SEND_OFFSET: + ret = mail_send(vcpu->kvm, *(uint64_t *)val); + break; + case ANY_SEND_OFFSET: + ret = any_send(vcpu->kvm, *(uint64_t *)val); + break; + default: + kvm_err("%s: invalid addr %llx!\n", __func__, addr); + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct kvm_io_device_ops kvm_loongarch_ipi_ops = { + .read = kvm_loongarch_ipi_read, + .write = kvm_loongarch_ipi_write, +}; + +static const struct kvm_io_device_ops kvm_loongarch_mail_ops = { + .write = kvm_loongarch_mail_write, +}; + +static int kvm_loongarch_ipi_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + uint64_t val; + int cpu, addr; + void *p = NULL; + int len = 4; + struct kvm_vcpu *vcpu; + + cpu = (attr->attr >> 16) & 0x3ff; + addr = attr->attr & 0xff; + + vcpu = kvm_get_vcpu(dev->kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + switch (addr) { + case CORE_STATUS_OFF: + p = &vcpu->arch.ipi_state.status; + break; + case CORE_EN_OFF: + p = &vcpu->arch.ipi_state.en; + break; + case CORE_SET_OFF: + p = &vcpu->arch.ipi_state.set; + break; + case CORE_CLEAR_OFF: + p = &vcpu->arch.ipi_state.clear; + break; + case CORE_BUF_20: + p = &vcpu->arch.ipi_state.buf[0]; + len = 8; + break; + case CORE_BUF_28: + p = &vcpu->arch.ipi_state.buf[1]; + len = 8; + break; + case CORE_BUF_30: + p = &vcpu->arch.ipi_state.buf[2]; + len = 8; + break; + case CORE_BUF_38: + p = &vcpu->arch.ipi_state.buf[3]; + len = 8; + break; + default: + kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + if (is_write) { + if (len == 4) { + if (get_user(val, (uint32_t __user *)attr->addr)) + return -EFAULT; + *(uint32_t *)p = (uint32_t)val; + } else if (len == 8) { + if (get_user(val, (uint64_t __user *)attr->addr)) + return -EFAULT; + *(uint64_t *)p = val; + } + } else { + if (len == 4) { + val = *(uint32_t *)p; + return put_user(val, (uint32_t __user *)attr->addr); + } else if (len == 8) { + val = *(uint64_t *)p; + return put_user(val, (uint64_t __user *)attr->addr); + } + } + + return 0; +} + +static int kvm_loongarch_ipi_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_loongarch_ipi_regs_access(dev, attr, false); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } +} + +static int kvm_loongarch_ipi_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_loongarch_ipi_regs_access(dev, attr, true); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } +} + +static void kvm_loongarch_ipi_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_ipi *ipi; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + ipi = kvm->arch.ipi; + if (!ipi) + return; + + device = &ipi->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + + device = &ipi->mail_dev; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + + kfree(ipi); +} + +static int kvm_loongarch_ipi_create(struct kvm_device *dev, u32 type) +{ + struct kvm *kvm; + struct loongarch_ipi *s; + unsigned long addr; + struct kvm_io_device *device; + int ret; + + kvm_info("begin create loongarch ipi in kvm ...\n"); + if (!dev) { + kvm_err("%s: kvm_device ptr is invalid!\n", __func__); + return -EINVAL; + } + + kvm = dev->kvm; + if (kvm->arch.ipi) { + kvm_err("%s: loongarch ipi has been created!\n", __func__); + return -EINVAL; + } + + s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL); + if (!s) + return -ENOMEM; + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_loongarch_ipi_ops); + addr = SMP_MAILBOX; + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr, + KVM_IOCSR_IPI_ADDR_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm_err("%s: initialize IOCSR dev failed, ret = %d\n", __func__, ret); + goto err; + } + + device = &s->mail_dev; + kvm_iodevice_init(device, &kvm_loongarch_mail_ops); + addr = MAIL_SEND_ADDR; + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr, + KVM_IOCSR_MAIL_ADDR_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + device = &s->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kvm_err("%s: initialize mail box dev failed, ret = %d\n", __func__, ret); + goto err; + } + + kvm->arch.ipi = s; + kvm_info("create loongarch ipi in kvm done!\n"); + + return 0; + +err: + kfree(s); + return -EFAULT; +} + +static struct kvm_device_ops kvm_loongarch_ipi_dev_ops = { + .name = "kvm-loongarch-ipi", + .create = kvm_loongarch_ipi_create, + .destroy = kvm_loongarch_ipi_destroy, + .set_attr = kvm_loongarch_ipi_set_attr, + .get_attr = kvm_loongarch_ipi_get_attr, +}; + +int kvm_loongarch_register_ipi_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_ipi_dev_ops, + KVM_DEV_TYPE_LA_IPI); +} diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c new file mode 100644 index 0000000000000000000000000000000000000000..7d053dbcd5c066829fa6c12ab15a6aac4d7957be --- /dev/null +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +/* update the isr according to irq level and route irq to extioi */ +static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = (1 << irq); + + /* + * set isr and route irq to extioi and + * the route table is in htmsi_vector[] + */ + if (level) { + if (mask & s->irr & ~s->mask) { + s->isr |= mask; + irq = s->htmsi_vector[irq]; + extioi_set_irq(s->kvm->arch.extioi, irq, level); + } + } else { + if (mask & s->isr & ~s->irr) { + s->isr &= ~mask; + irq = s->htmsi_vector[irq]; + extioi_set_irq(s->kvm->arch.extioi, irq, level); + } + } +} + +/* msi irq handler */ +void pch_msi_set_irq(struct kvm *kvm, int irq, int level) +{ + extioi_set_irq(kvm->arch.extioi, irq, level); +} + +/* called when a irq is triggered in pch pic */ +void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = (1 << irq); + + spin_lock(&s->lock); + if (level) + /* set irr */ + s->irr |= mask; + else { + /* 0 level signal in edge triggered irq does not mean to clear irq + * The irr register variable is cleared when the cpu writes to the + * PCH_PIC_CLEAR_START address area + */ + if (s->edge & mask) { + spin_unlock(&s->lock); + return; + } + s->irr &= ~mask; + } + pch_pic_update_irq(s, irq, level); + spin_unlock(&s->lock); +} + +/* update batch irqs, the irq_mask is a bitmap of irqs */ +static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level) +{ + int irq, bits; + + /* find each irq by irqs bitmap and update each irq */ + bits = sizeof(irq_mask) * 8; + irq = find_first_bit((void *)&irq_mask, bits); + while (irq < bits) { + pch_pic_update_irq(s, irq, level); + bitmap_clear((void *)&irq_mask, irq, 1); + irq = find_first_bit((void *)&irq_mask, bits); + } +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to read. + */ +static u32 pch_pic_read_reg(u64 *s, int high) +{ + u64 val = *s; + + /* read the high 32 bits when the high is 1 */ + return high ? (u32)(val >> 32) : (u32)val; +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to write. + */ +static u32 pch_pic_write_reg(u64 *s, int high, u32 v) +{ + u64 val = *s, data = v; + + if (high) { + /* + * Clear val high 32 bits + * write the high 32 bits when the high is 1 + */ + *s = (val << 32 >> 32) | (data << 32); + val >>= 32; + } else + /* + * Clear val low 32 bits + * write the low 32 bits when the high is 0 + */ + *s = (val >> 32 << 32) | v; + + return (u32)val; +} + +static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, + int len, const void *val) +{ + u32 old, data, offset, index; + u64 irq; + int ret; + + ret = 0; + data = *(u32 *)val; + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + /* get whether high or low 32 bits we want to write */ + index = offset >> 2; + old = pch_pic_write_reg(&s->mask, index, data); + + /* enable irq when mask value change to 0 */ + irq = (old & ~data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 1); + + /* disable irq when mask value change to 1 */ + irq = (~old & data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + pch_pic_write_reg(&s->htmsi_en, index, data); + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* 1: edge triggered, 0: level triggered */ + pch_pic_write_reg(&s->edge, index, data); + break; + case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END: + offset -= PCH_PIC_CLEAR_START; + index = offset >> 2; + /* write 1 to clear edge irq */ + old = pch_pic_read_reg(&s->irr, index); + /* + * get the irq bitmap which is edge triggered and + * already set and to be cleared + */ + irq = old & pch_pic_read_reg(&s->edge, index) & data; + /* write irr to the new state where irqs have been cleared */ + pch_pic_write_reg(&s->irr, index, old & ~irq); + /* update cleared irqs */ + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + offset -= PCH_PIC_AUTO_CTRL0_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl0, index, 0); + break; + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + offset -= PCH_PIC_AUTO_CTRL1_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl1, index, 0); + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + offset -= PCH_PIC_ROUTE_ENTRY_START; + /* only route to int0: extioi */ + s->route_entry[offset] = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + /* route table to extioi */ + offset -= PCH_PIC_HTMSI_VEC_START; + s->htmsi_vector[offset] = (u8)data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + offset -= PCH_PIC_POLARITY_START; + index = offset >> 2; + + /* we only use defalut value 0: high level triggered */ + pch_pic_write_reg(&s->polarity, index, 0); + break; + default: + ret = -EINVAL; + break; + } + + spin_unlock(&s->lock); + return ret; +} + +static int kvm_loongarch_pch_pic_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic writing */ + vcpu->kvm->stat.pch_pic_write_exits++; + ret = loongarch_pch_pic_write(s, addr, len, val); + + return ret; +} + +static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) +{ + int offset, index, ret = 0; + u32 data = 0; + u64 int_id = 0; + + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END: + /* int id version */ + int_id |= (u64)PCH_PIC_INT_ID_VER << 32; + /* irq number */ + int_id |= (u64)31 << (32 + 16); + /* int id value */ + int_id |= PCH_PIC_INT_ID_VAL; + *(u64 *)val = int_id; + break; + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + index = offset >> 2; + /* read mask reg */ + data = pch_pic_read_reg(&s->mask, index); + *(u32 *)val = data; + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + /* read htmsi enable reg */ + data = pch_pic_read_reg(&s->htmsi_en, index); + *(u32 *)val = data; + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* read edge enable reg */ + data = pch_pic_read_reg(&s->edge, index); + *(u32 *)val = data; + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + /* we only use default mode: fixed interrupt distribution mode */ + *(u32 *)val = 0; + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + /* only route to int0: extioi */ + *(u8 *)val = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + offset -= PCH_PIC_HTMSI_VEC_START; + /* read htmsi vector */ + data = s->htmsi_vector[offset]; + *(u8 *)val = data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + /* we only use defalut value 0: high level triggered */ + *(u32 *)val = 0; + break; + default: + ret = -EINVAL; + } + spin_unlock(&s->lock); + return ret; +} + +static int kvm_loongarch_pch_pic_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic reading */ + vcpu->kvm->stat.pch_pic_read_exits++; + ret = loongarch_pch_pic_read(s, addr, len, val); + return ret; +} + +static const struct kvm_io_device_ops kvm_loongarch_pch_pic_ops = { + .read = kvm_loongarch_pch_pic_read, + .write = kvm_loongarch_pch_pic_write, +}; + +static int kvm_loongarch_pch_pic_init(struct kvm_device *dev, u64 addr) +{ + int ret; + struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic; + struct kvm_io_device *device; + struct kvm *kvm = dev->kvm; + + s->pch_pic_base = addr; + device = &s->device; + /* init device by pch pic writing and reading ops */ + kvm_iodevice_init(device, &kvm_loongarch_pch_pic_ops); + mutex_lock(&kvm->slots_lock); + /* register pch pic device */ + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) + return -EFAULT; + + return 0; +} + +/* used by user space to get or set pch pic registers */ +static int kvm_loongarch_pch_pic_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, len = 8, ret = 0; + void __user *data; + void *p = NULL; + struct loongarch_pch_pic *s; + + s = dev->kvm->arch.pch_pic; + addr = attr->attr; + data = (void __user *)attr->addr; + + spin_lock(&s->lock); + /* get pointer to pch pic register by addr */ + switch (addr) { + case PCH_PIC_MASK_START: + p = &s->mask; + break; + case PCH_PIC_HTMSI_EN_START: + p = &s->htmsi_en; + break; + case PCH_PIC_EDGE_START: + p = &s->edge; + break; + case PCH_PIC_AUTO_CTRL0_START: + p = &s->auto_ctrl0; + break; + case PCH_PIC_AUTO_CTRL1_START: + p = &s->auto_ctrl1; + break; + case PCH_PIC_ROUTE_ENTRY_START: + p = s->route_entry; + len = 64; + break; + case PCH_PIC_HTMSI_VEC_START: + p = s->htmsi_vector; + len = 64; + break; + case PCH_PIC_INT_IRR_START: + p = &s->irr; + break; + case PCH_PIC_INT_ISR_START: + p = &s->isr; + break; + case PCH_PIC_POLARITY_START: + p = &s->polarity; + break; + default: + ret = -EINVAL; + } + + /* write or read value according to is_write */ + if (is_write) { + if (copy_from_user(p, data, len)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, len)) + ret = -EFAULT; + } + + spin_unlock(&s->lock); + return ret; +} + +static int kvm_loongarch_pch_pic_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + /* only support pch pic group registers */ + if (attr->group == KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS) + return kvm_loongarch_pch_pic_regs_access(dev, attr, false); + + return -EINVAL; +} + +static int kvm_loongarch_pch_pic_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret = -EINVAL; + u64 addr; + void __user *uaddr = (void __user *)(long)attr->addr; + + switch (attr->group) { + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT: + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + + if (!dev->kvm->arch.pch_pic) { + kvm_err("%s: please create pch_pic irqchip first!\n", __func__); + ret = -EFAULT; + break; + } + + ret = kvm_loongarch_pch_pic_init(dev, addr); + break; + default: + kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group, + attr->attr); + ret = -EINVAL; + break; + } + break; + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS: + ret = kvm_loongarch_pch_pic_regs_access(dev, attr, true); + break; + default: + break; + } + + return ret; +} + +static int kvm_setup_default_irq_routing(struct kvm *kvm) +{ + struct kvm_irq_routing_entry *entries; + + u32 nr = KVM_IRQCHIP_NUM_PINS; + int i, ret; + + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < nr; i++) { + entries[i].gsi = i; + entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; + entries[i].u.irqchip.irqchip = 0; + entries[i].u.irqchip.pin = i; + } + ret = kvm_set_irq_routing(kvm, entries, nr, 0); + kfree(entries); + + return 0; +} + +static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_pch_pic *s; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + s = kvm->arch.pch_pic; + if (!s) + return; + + device = &s->device; + /* unregister pch pic device and free it's memory */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, device); + kfree(s); +} + +static int kvm_loongarch_pch_pic_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct loongarch_pch_pic *s; + struct kvm *kvm = dev->kvm; + + /* pch pic should not has been created */ + if (kvm->arch.pch_pic) + return -EINVAL; + + ret = kvm_setup_default_irq_routing(kvm); + if (ret) + return -ENOMEM; + + s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL); + if (!s) + return -ENOMEM; + + spin_lock_init(&s->lock); + s->kvm = kvm; + + + kvm->arch.pch_pic = s; + + kvm_info("create pch pic device successfully\n"); + return 0; +} + +static struct kvm_device_ops kvm_loongarch_pch_pic_dev_ops = { + .name = "kvm-loongarch-pch-pic", + .create = kvm_loongarch_pch_pic_create, + .destroy = kvm_loongarch_pch_pic_destroy, + .set_attr = kvm_loongarch_pch_pic_set_attr, + .get_attr = kvm_loongarch_pch_pic_get_attr, +}; + +int kvm_loongarch_register_pch_pic_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_pch_pic_dev_ops, + KVM_DEV_TYPE_LA_IOAPIC); +} diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c new file mode 100644 index 0000000000000000000000000000000000000000..4c3f22de4b40a321811947f2ebff47a9e7b45ea6 --- /dev/null +++ b/arch/loongarch/kvm/interrupt.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +static unsigned int priority_to_irq[EXCCODE_INT_NUM] = { + [INT_TI] = CPU_TIMER, + [INT_IPI] = CPU_IPI, + [INT_SWI0] = CPU_SIP0, + [INT_SWI1] = CPU_SIP1, + [INT_HWI0] = CPU_IP0, + [INT_HWI1] = CPU_IP1, + [INT_HWI2] = CPU_IP2, + [INT_HWI3] = CPU_IP3, + [INT_HWI4] = CPU_IP4, + [INT_HWI5] = CPU_IP5, + [INT_HWI6] = CPU_IP6, + [INT_HWI7] = CPU_IP7, +}; + +static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_pending); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + set_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + set_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_clear); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + clear_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + clear_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +void kvm_deliver_intr(struct kvm_vcpu *vcpu) +{ + unsigned int priority; + unsigned long *pending = &vcpu->arch.irq_pending; + unsigned long *pending_clr = &vcpu->arch.irq_clear; + + if (!(*pending) && !(*pending_clr)) + return; + + if (*pending_clr) { + priority = __ffs(*pending_clr); + while (priority <= INT_IPI) { + kvm_irq_clear(vcpu, priority); + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + } + + if (*pending) { + priority = __ffs(*pending); + while (priority <= INT_IPI) { + kvm_irq_deliver(vcpu, priority); + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + } +} + +int kvm_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(INT_TI, &vcpu->arch.irq_pending); +} + +/* + * Only support illegal instruction or illegal Address Error exception, + * Other exceptions are injected by hardware in kvm mode + */ +static void _kvm_deliver_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + unsigned long val, vec_size; + + /* + * BADV is added for EXCCODE_ADE exception + * Use PC register (GVA address) if it is instruction exeception + * Else use BADV from host side (GPA address) for data exeception + */ + if (code == EXCCODE_ADE) { + if (subcode == EXSUBCODE_ADEF) + val = vcpu->arch.pc; + else + val = vcpu->arch.badv; + kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val); + } + + /* Set exception instruction */ + kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi); + + /* + * Save CRMD in PRMD + * Set IRQ disabled and PLV0 with CRMD + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD); + kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val); + val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE); + kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val); + + /* Set exception PC address */ + kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc); + + /* + * Set exception code + * Exception and interrupt can be inject at the same time + * Hardware will handle exception first and then extern interrupt + * Exception code is Ecode in ESTAT[16:21] + * Interrupt code in ESTAT[0:12] + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT); + val = (val & ~CSR_ESTAT_EXC) | code; + kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val); + + /* Calculate expcetion entry address */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG); + vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT; + if (vec_size) + vec_size = (1 << vec_size) * 4; + val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY); + vcpu->arch.pc = val + code * vec_size; +} + +void kvm_deliver_exception(struct kvm_vcpu *vcpu) +{ + unsigned int code; + unsigned long *pending = &vcpu->arch.exception_pending; + + if (*pending) { + code = __ffs(*pending); + _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode); + *pending = 0; + vcpu->arch.esubcode = 0; + } +} diff --git a/arch/loongarch/kvm/irqfd.c b/arch/loongarch/kvm/irqfd.c new file mode 100644 index 0000000000000000000000000000000000000000..bf67f329ebc962cc6b7e9320074c998e99713921 --- /dev/null +++ b/arch/loongarch/kvm/irqfd.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + /* ioapic pin (0 ~ 64) <---> gsi(0 ~ 64) */ + pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level); + + return 0; +} + +/* + * kvm_set_routing_entry: populate a kvm routing entry + * from a user routing entry + * + * @kvm: the VM this entry is applied to + * @e: kvm kernel routing entry handle + * @ue: user api routing entry handle + * return 0 on success, -EINVAL on errors. + */ +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + e->set = kvm_set_ioapic_irq; + + e->irqchip.irqchip = ue->u.irqchip.irqchip; + e->irqchip.pin = ue->u.irqchip.pin; + + if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) + goto out; + break; + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + if (e->type == KVM_IRQ_ROUTING_MSI) { + pch_msi_set_irq(kvm, e->msi.data, 1); + return 0; + } + + return -EWOULDBLOCK; +} + +/** + * kvm_set_msi: inject the MSI corresponding to the + * MSI routing entry + * + * This is the entry point for irqfd MSI injection + * and userspace MSI injection. + */ +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + if (!level) + return -1; + + pch_msi_set_irq(kvm, e->msi.data, level); + return 0; +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c new file mode 100644 index 0000000000000000000000000000000000000000..1f50f6723739cde230b961ed429cee62b693db44 --- /dev/null +++ b/arch/loongarch/kvm/main.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +unsigned long vpid_mask; +struct kvm_world_switch *kvm_loongarch_ops; +static int gcsr_flag[CSR_MAX_NUMS]; +static struct kvm_context __percpu *vmcs; + +int get_gcsr_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + return gcsr_flag[csr]; + + return INVALID_GCSR; +} + +static inline void set_gcsr_sw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= SW_GCSR; +} + +static inline void set_gcsr_hw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= HW_GCSR; +} + +/* + * The default value of gcsr_flag[CSR] is 0, and we use this + * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the + * gcsr is software or hardware. It will be used by get/set_gcsr, + * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, + * else use software csr to emulate it. + */ +static void kvm_init_gcsr_flag(void) +{ + set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); + set_gcsr_hw_flag(LOONGARCH_CSR_MISC); + set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); + set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); + set_gcsr_hw_flag(LOONGARCH_CSR_ERA); + set_gcsr_hw_flag(LOONGARCH_CSR_BADV); + set_gcsr_hw_flag(LOONGARCH_CSR_BADI); + set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_ASID); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); + set_gcsr_hw_flag(LOONGARCH_CSR_PGD); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); + set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); + set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); + set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS0); + set_gcsr_hw_flag(LOONGARCH_CSR_KS1); + set_gcsr_hw_flag(LOONGARCH_CSR_KS2); + set_gcsr_hw_flag(LOONGARCH_CSR_KS3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS4); + set_gcsr_hw_flag(LOONGARCH_CSR_KS5); + set_gcsr_hw_flag(LOONGARCH_CSR_KS6); + set_gcsr_hw_flag(LOONGARCH_CSR_KS7); + set_gcsr_hw_flag(LOONGARCH_CSR_TMID); + set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); + set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); + set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); + set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); + set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); + + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); + set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); + set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); + set_gcsr_sw_flag(LOONGARCH_CSR_DERA); + set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); + + set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); + + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); +} + +static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long vpid; + struct kvm_context *context; + + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + vpid = context->vpid_cache + 1; + if (!(vpid & vpid_mask)) { + /* finish round of vpid loop */ + if (unlikely(!vpid)) + vpid = vpid_mask + 1; + + ++vpid; /* vpid 0 reserved for root */ + + /* start new vpid cycle */ + kvm_flush_tlb_all(); + } + + context->vpid_cache = vpid; + vcpu->arch.vpid = vpid; +} + +void kvm_check_vpid(struct kvm_vcpu *vcpu) +{ + int cpu; + bool migrated; + unsigned long ver, old, vpid; + struct kvm_context *context; + + cpu = smp_processor_id(); + /* + * Are we entering guest context on a different CPU to last time? + * If so, the vCPU's guest TLB state on this CPU may be stale. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + migrated = (vcpu->cpu != cpu); + + /* + * Check if our vpid is of an older version + * + * We also discard the stored vpid if we've executed on + * another CPU, as the guest mappings may have changed without + * hypervisor knowledge. + */ + ver = vcpu->arch.vpid & ~vpid_mask; + old = context->vpid_cache & ~vpid_mask; + if (migrated || (ver != old)) { + kvm_update_vpid(vcpu, cpu); + trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); + vcpu->cpu = cpu; + kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + } + + /* Restore GSTAT(0x50).vpid */ + vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; + change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); +} + +void kvm_init_vmcs(struct kvm *kvm) +{ + kvm->arch.vmcs = vmcs; +} + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_hardware_enable(void) +{ + unsigned long env, gcfg = 0; + + env = read_csr_gcfg(); + + /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* + * Enable virtualization features granting guest direct control of + * certain features: + * GCI=2: Trap on init or unimplement cache instruction. + * TORU=0: Trap on Root Unimplement. + * CACTRL=1: Root control cache. + * TOP=0: Trap on Previlege. + * TOE=0: Trap on Exception. + * TIT=0: Trap on Timer. + */ + if (env & CSR_GCFG_GCIP_ALL) + gcfg |= CSR_GCFG_GCI_SECURE; + if (env & CSR_GCFG_MATC_ROOT) + gcfg |= CSR_GCFG_MATC_ROOT; + + write_csr_gcfg(gcfg); + + kvm_flush_tlb_all(); + + /* Enable using TGID */ + set_csr_gtlbc(CSR_GTLBC_USETGID); + kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", + read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* Flush any remaining guest TLB entries */ + kvm_flush_tlb_all(); +} + +static int kvm_loongarch_env_init(void) +{ + int cpu, order, ret; + void *addr; + struct kvm_context *context; + + vmcs = alloc_percpu(struct kvm_context); + if (!vmcs) { + pr_err("kvm: failed to allocate percpu kvm_context\n"); + return -ENOMEM; + } + + kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); + if (!kvm_loongarch_ops) { + free_percpu(vmcs); + vmcs = NULL; + return -ENOMEM; + } + + /* + * PGD register is shared between root kernel and kvm hypervisor. + * So world switch entry should be in DMW area rather than TLB area + * to avoid page fault reenter. + * + * In future if hardware pagetable walking is supported, we won't + * need to copy world switch code to DMW area. + */ + order = get_order(kvm_exception_size + kvm_enter_guest_size); + addr = (void *)__get_free_pages(GFP_KERNEL, order); + if (!addr) { + free_percpu(vmcs); + vmcs = NULL; + kfree(kvm_loongarch_ops); + kvm_loongarch_ops = NULL; + return -ENOMEM; + } + + memcpy(addr, kvm_exc_entry, kvm_exception_size); + memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); + flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); + kvm_loongarch_ops->exc_entry = addr; + kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; + kvm_loongarch_ops->page_order = order; + + vpid_mask = read_csr_gstat(); + vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; + if (vpid_mask) + vpid_mask = GENMASK(vpid_mask - 1, 0); + + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vmcs, cpu); + context->vpid_cache = vpid_mask + 1; + context->last_vcpu = NULL; + } + + kvm_init_gcsr_flag(); + + /* Register loongarch ipi interrupt controller interface. */ + ret = kvm_loongarch_register_ipi_device(); + if (ret) + return ret; + + /* Register loongarch extioi interrupt controller interface. */ + ret = kvm_loongarch_register_extioi_device(); + if (ret) + return ret; + + /* Register loongarch pch pic interrupt controller interface. */ + ret = kvm_loongarch_register_pch_pic_device(); + + return ret; +} + +static void kvm_loongarch_env_exit(void) +{ + unsigned long addr; + + if (vmcs) + free_percpu(vmcs); + + if (kvm_loongarch_ops) { + if (kvm_loongarch_ops->exc_entry) { + addr = (unsigned long)kvm_loongarch_ops->exc_entry; + free_pages(addr, kvm_loongarch_ops->page_order); + } + kfree(kvm_loongarch_ops); + } +} + +static int kvm_loongarch_init(void) +{ + int r; + + if (!cpu_has_lvz) { + kvm_info("Hardware virtualization not available\n"); + return -ENODEV; + } + r = kvm_loongarch_env_init(); + if (r) + return r; + + return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); +} + +static void kvm_loongarch_exit(void) +{ + kvm_exit(); + kvm_loongarch_env_exit(); +} + +module_init(kvm_loongarch_init); +module_exit(kvm_loongarch_exit); + +#ifdef MODULE +static const struct cpu_feature kvm_feature[] = { + { .feature = cpu_feature(LOONGARCH_LVZ) }, + {}, +}; +MODULE_DEVICE_TABLE(cpu, kvm_feature); +#endif diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..cb701649f56fe43efc5ebc1f72d4c5fdeabe0a53 --- /dev/null +++ b/arch/loongarch/kvm/mmu.c @@ -0,0 +1,986 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE; +} + +static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE; +} + +static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) +{ + ctx->level = kvm->arch.root_level; + /* pte table */ + ctx->invalid_ptes = kvm->arch.invalid_ptes; + ctx->pte_shifts = kvm->arch.pte_shifts; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; + ctx->opaque = kvm; +} + +/* + * Mark a range of guest physical address space old (all accesses fault) in the + * VM's GPA page table to allow detection of commonly used pages. + */ +static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + if (kvm_pte_young(*pte)) { + *pte = kvm_pte_mkold(*pte); + return 1; + } + + return 0; +} + +/* + * Mark a range of guest physical address space clean (writes fault) in the VM's + * GPA page table to allow dirty page tracking. + */ +static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + gfn_t offset; + kvm_pte_t val; + + val = *pte; + /* + * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end + * may cross hugepage, for first huge page parameter addr is equal to + * start, however for the second huge page addr is base address of + * this huge page, rather than start or end address + */ + if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) { + offset = (addr >> PAGE_SHIFT) - ctx->gfn; + if (!(BIT(offset) & ctx->mask)) + return 0; + } + + /* + * Need not split huge page now, just set write-proect pte bit + * Split huge page until next write fault + */ + if (kvm_pte_dirty(val)) { + *pte = kvm_pte_mkclean(val); + return 1; + } + + return 0; +} + +/* + * Clear pte entry + */ +static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + struct kvm *kvm; + + kvm = ctx->opaque; + if (ctx->level) + kvm->stat.hugepages--; + else + kvm->stat.pages--; + + *pte = ctx->invalid_entry; + + return 1; +} + +/* + * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. + * + * Allocate a blank KVM GPA page directory (PGD) for representing guest physical + * to host physical page mappings. + * + * Returns: Pointer to new KVM GPA page directory. + * NULL on allocation failure. + */ +kvm_pte_t *kvm_pgd_alloc(void) +{ + kvm_pte_t *pgd; + + pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0); + if (pgd) + pgd_init((void *)pgd); + + return pgd; +} + +static void _kvm_pte_init(void *addr, unsigned long val) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + do { + p[0] = val; + p[1] = val; + p[2] = val; + p[3] = val; + p[4] = val; + p += 8; + p[-3] = val; + p[-2] = val; + p[-1] = val; + } while (p != end); +} + +/* + * Caller must hold kvm->mm_lock + * + * Walk the page tables of kvm to find the PTE corresponding to the + * address @addr. If page tables don't exist for @addr, they will be created + * from the MMU cache if @cache is not NULL. + */ +static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache, + unsigned long addr, int level) +{ + kvm_ptw_ctx ctx; + kvm_pte_t *entry, *child; + + kvm_ptw_prepare(kvm, &ctx); + child = kvm->arch.pgd; + while (ctx.level > level) { + entry = kvm_pgtable_offset(&ctx, child, addr); + if (kvm_pte_none(&ctx, entry)) { + if (!cache) + return NULL; + + child = kvm_mmu_memory_cache_alloc(cache); + _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); + smp_wmb(); /* Make pte visible before pmd */ + kvm_set_pte(entry, __pa(child)); + } else if (kvm_pte_huge(*entry)) { + return entry; + } else + child = (kvm_pte_t *)__va(PHYSADDR(*entry)); + kvm_ptw_enter(&ctx); + } + + entry = kvm_pgtable_offset(&ctx, child, addr); + + return entry; +} + +/* + * Page walker for VM shadow mmu at last level + * The last level is small pte page or huge pmd page + */ +static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = addr + (0x1UL << ctx->pgtable_shift); + if (!kvm_pte_present(ctx, entry)) + continue; + + ret |= ctx->ops(entry, addr, ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page table dir level + */ +static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + if (kvm_pte_huge(*entry)) { + ret |= ctx->ops(entry, addr, ctx); + continue; + } + + kvm_ptw_enter(ctx); + if (ctx->level == 0) + ret |= kvm_ptw_leaf(entry, addr, next, ctx); + else + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page root table + */ +static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next; + kvm_pte_t *entry; + + ret = 0; + entry = kvm_pgtable_offset(ctx, dir, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + kvm_ptw_enter(ctx); + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + return ret; +} + +/* + * kvm_flush_range() - Flush a range of guest physical addresses. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * @lock: Whether to hold mmu_lock or not + * + * Flushes a range of GPA mappings from the GPA page tables. + */ +static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) +{ + int ret; + kvm_ptw_ctx ctx; + struct list_head *pos, *temp; + + ctx.ops = kvm_flush_pte; + ctx.flag = _KVM_FLUSH_PGTABLE; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + if (lock) { + spin_lock(&kvm->mmu_lock); + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + spin_unlock(&kvm->mmu_lock); + } else + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + + /* Flush vpid for each vCPU individually */ + if (ret) + kvm_flush_remote_tlbs(kvm); + + /* + * free pte table page after mmu_lock + * the pte table page is linked together with ctx.list + */ + list_for_each_safe(pos, temp, &ctx.list) { + list_del(pos); + free_page((unsigned long)pos); + } +} + +/* + * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * + * Make a range of GPA mappings clean so that guest writes will fault and + * trigger dirty page logging. + * + * The caller must hold the @kvm->mmu_lock spinlock. + * + * Returns: Whether any GPA mappings were modified, which would require + * derived mappings (GVA page tables & TLB enties) to be + * invalidated. + */ +static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) +{ + kvm_ptw_ctx ctx; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = 0; + kvm_ptw_prepare(kvm, &ctx); + return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire @kvm->mmu_lock. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) +{ + kvm_ptw_ctx ctx; + gfn_t base_gfn = slot->base_gfn + gfn_offset; + gfn_t start = base_gfn + __ffs(mask); + gfn_t end = base_gfn + __fls(mask) + 1; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = _KVM_HAS_PGMASK; + ctx.mask = mask; + ctx.gfn = base_gfn; + kvm_ptw_prepare(kvm, &ctx); + + kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + gpa_t gpa_start; + hva_t hva_start; + size_t size, gpa_offset, hva_offset; + + if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE)) + return 0; + /* + * Prevent userspace from creating a memory region outside of the + * VM GPA address space + */ + if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) + return -ENOMEM; + + new->arch.flags = 0; + size = new->npages * PAGE_SIZE; + gpa_start = new->base_gfn << PAGE_SHIFT; + hva_start = new->userspace_addr; + if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) + && IS_ALIGNED(hva_start, PMD_SIZE)) + new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE; + else { + /* + * Pages belonging to memslots that don't have the same + * alignment within a PMD for userspace and GPA cannot be + * mapped with PMD entries, because we'll end up mapping + * the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this + * incorrect mapping: + * d -> f + * e -> g + * f -> h + */ + gpa_offset = gpa_start & (PMD_SIZE - 1); + hva_offset = hva_start & (PMD_SIZE - 1); + if (gpa_offset != hva_offset) { + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } else { + if (gpa_offset == 0) + gpa_offset = PMD_SIZE; + if ((size + gpa_offset) < (PMD_SIZE * 2)) + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } + } + + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int needs_flush; + u32 old_flags = old ? old->flags : 0; + u32 new_flags = new ? new->flags : 0; + bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; + + /* Only track memslot flags changed */ + if (change != KVM_MR_FLAGS_ONLY) + return; + + /* Discard dirty page tracking on readonly memslot */ + if ((old_flags & new_flags) & KVM_MEM_READONLY) + return; + + /* + * If dirty page logging is enabled, write protect all pages in the slot + * ready for dirty logging. + * + * There is no need to do this in any of the following cases: + * CREATE: No dirty mappings will already exist. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot() + */ + if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) { + /* + * Initially-all-set does not require write protecting any page + * because they're all assumed to be dirty. + */ + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + return; + + spin_lock(&kvm->mmu_lock); + /* Write protect GPA page table entries */ + needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, + new->base_gfn + new->npages); + spin_unlock(&kvm->mmu_lock); + if (needs_flush) + kvm_flush_remote_tlbs(kvm); + } +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + /* + * The slot has been made invalid (ready for moving or deletion), so we + * need to ensure that it can no longer be accessed by any guest vCPUs. + */ + kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); +} + +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_flush_pte; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + unsigned long prot_bits; + kvm_pte_t *ptep; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + gpa_t gpa = range->start << PAGE_SHIFT; + + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep) + return false; + + /* Replacing an absent or old page doesn't need flushes */ + if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { + kvm_set_pte(ptep, 0); + return false; + } + + /* Fill new pte if write protected or page migrated */ + prot_bits = _PAGE_PRESENT | __READABLE; + prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); + + /* + * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support + * _PAGE_WRITE for map_page_fast if next page write fault + * _PAGE_DIRTY since gpa has already recorded as dirty page + */ + prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); + + return true; +} + +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_mkold_pte; + kvm_ptw_prepare(kvm, &ctx); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + gpa_t gpa = range->start << PAGE_SHIFT; + kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + + if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep)) + return true; + + return false; +} + +/* + * kvm_map_page_fast() - Fast path GPA fault handler. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Perform fast path GPA fault handling, doing all that can be done without + * calling into KVM. This handles marking old pages young (for idle page + * tracking), and dirtying of clean pages (for dirty page logging). + * + * Returns: 0 on success, in which case we can update derived mappings and + * resume guest execution. + * -EFAULT on failure due to absent GPA mapping or write to + * read-only page, in which case KVM must be consulted. + */ +static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret = 0; + kvm_pfn_t pfn = 0; + kvm_pte_t *ptep, changed, new; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + struct page *page; + + spin_lock(&kvm->mmu_lock); + + /* Fast path - just check GPA page table for an existing entry */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep || !kvm_pte_present(NULL, ptep)) { + ret = -EFAULT; + goto out; + } + + /* Track access to pages marked old */ + new = kvm_pte_mkyoung(*ptep); + /* call kvm_set_pfn_accessed() after unlock */ + + if (write && !kvm_pte_dirty(new)) { + if (!kvm_pte_write(new)) { + ret = -EFAULT; + goto out; + } + + if (kvm_pte_huge(new)) { + /* + * Do not set write permission when dirty logging is + * enabled for HugePages + */ + slot = gfn_to_memslot(kvm, gfn); + if (kvm_slot_dirty_track_enabled(slot)) { + ret = -EFAULT; + goto out; + } + } + + /* Track dirtying of writeable pages */ + new = kvm_pte_mkdirty(new); + } + + changed = new ^ (*ptep); + if (changed) { + kvm_set_pte(ptep, new); + pfn = kvm_pte_pfn(new); + page = kvm_pfn_to_refcounted_page(pfn); + if (page) + get_page(page); + } + spin_unlock(&kvm->mmu_lock); + + if (changed) { + if (kvm_pte_young(changed)) + kvm_set_pfn_accessed(pfn); + + if (kvm_pte_dirty(changed)) { + mark_page_dirty(kvm, gfn); + kvm_set_pfn_dirty(pfn); + } + if (page) + put_page(page); + } + return ret; +out: + spin_unlock(&kvm->mmu_lock); + return ret; +} + +static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, bool write) +{ + hva_t start, end; + + /* Disable dirty logging on HugePages */ + if (kvm_slot_dirty_track_enabled(memslot) && write) + return false; + + if (kvm_hugepage_capable(memslot)) + return true; + + if (kvm_hugepage_incapable(memslot)) + return false; + + start = memslot->userspace_addr; + end = start + memslot->npages * PAGE_SIZE; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); +} + +/* + * Lookup the mapping level for @gfn in the current mm. + * + * WARNING! Use of host_pfn_mapping_level() requires the caller and the end + * consumer to be tied into KVM's handlers for MMU notifier events! + * + * There are several ways to safely use this helper: + * + * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * consuming it. In this case, mmu_lock doesn't need to be held during the + * lookup, but it does need to be held while checking the MMU notifier. + * + * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation + * event for the hva. This can be done by explicit checking the MMU notifier + * or by ensuring that KVM already has a valid mapping that covers the hva. + * + * - Do not use the result to install new mappings, e.g. use the host mapping + * level only to decide whether or not to zap an entry. In this case, it's + * not required to hold mmu_lock (though it's highly likely the caller will + * want to hold mmu_lock anyways, e.g. to modify SPTEs). + * + * Note! The lookup can still race with modifications to host page tables, but + * the above "rules" ensure KVM will not _consume_ the result of the walk if a + * race with the primary MMU occurs. + */ +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = 0; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = pgdp_get(pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = p4dp_get(p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = pudp_get(pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + pmd = pmdp_get(pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (kvm_pte_huge(pmd_val(pmd))) + level = 1; + +out: + local_irq_restore(flags); + return level; +} + +/* + * Split huge page + */ +static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) +{ + int i; + kvm_pte_t val, *child; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache; + + memcache = &vcpu->arch.mmu_page_cache; + child = kvm_mmu_memory_cache_alloc(memcache); + val = kvm_pte_mksmall(*ptep); + for (i = 0; i < PTRS_PER_PTE; i++) { + kvm_set_pte(child + i, val); + val += PAGE_SIZE; + } + + smp_wmb(); /* Make pte visible before pmd */ + /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ + kvm_set_pte(ptep, __pa(child)); + + kvm->stat.hugepages--; + kvm->stat.pages += PTRS_PER_PTE; + + return child + (gfn & (PTRS_PER_PTE - 1)); +} + +/* + * kvm_map_page() - Map a guest physical page. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Handle GPA faults by creating a new GPA mapping (or updating an existing + * one). + * + * This takes care of marking pages young or dirty (idle/dirty page tracking), + * asking KVM for the corresponding PFN, and creating a mapping in the GPA page + * tables. Derived mappings (GVA page tables and TLBs) must be handled by the + * caller. + * + * Returns: 0 on success + * -EFAULT if there is no memory region at @gpa or a write was + * attempted to a read-only memory region. This is usually handled + * as an MMIO access. + */ +static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + bool writeable; + int srcu_idx, err, retry_no = 0, level; + unsigned long hva, mmu_seq, prot_bits; + kvm_pfn_t pfn; + kvm_pte_t *ptep, new_pte; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + + /* Try the fast path to handle old / clean pages */ + srcu_idx = srcu_read_lock(&kvm->srcu); + err = kvm_map_page_fast(vcpu, gpa, write); + if (!err) + goto out; + + memslot = gfn_to_memslot(kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + if (kvm_is_error_hva(hva) || (write && !writeable)) { + err = -EFAULT; + goto out; + } + + /* We need a minimum of cached pages ready for page table creation */ + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + if (err) + goto out; + +retry: + /* + * Used to check for invalidations in progress, of the pfn that is + * returned by pfn_to_pfn_prot below. + */ + mmu_seq = kvm->mmu_invalidate_seq; + /* + * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in + * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * risk the page we get a reference to getting unmapped before we have a + * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. + * + * This smp_rmb() pairs with the effective smp_wmb() of the combination + * of the pte_unmap_unlock() after the PTE is zapped, and the + * spin_lock() in kvm_mmu_invalidate_invalidate_() before + * mmu_invalidate_seq is incremented. + */ + smp_rmb(); + + /* Slow path - ask KVM core whether we can access this GPA */ + pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); + if (is_error_noslot_pfn(pfn)) { + err = -EFAULT; + goto out; + } + + /* Check if an invalidation has taken place since we got pfn */ + spin_lock(&kvm->mmu_lock); + if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { + /* + * This can happen when mappings are changed asynchronously, but + * also synchronously if a COW is triggered by + * gfn_to_pfn_prot(). + */ + spin_unlock(&kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (retry_no > 100) { + retry_no = 0; + schedule(); + } + retry_no++; + goto retry; + } + + /* + * For emulated devices such virtio device, actual cache attribute is + * determined by physical machine. + * For pass through physical device, it should be uncachable + */ + prot_bits = _PAGE_PRESENT | __READABLE; + if (pfn_valid(pfn)) + prot_bits |= _CACHE_CC; + else + prot_bits |= _CACHE_SUC; + + if (writeable) { + prot_bits |= _PAGE_WRITE; + if (write) + prot_bits |= __WRITEABLE; + } + + /* Disable dirty logging on HugePages */ + level = 0; + if (fault_supports_huge_mapping(memslot, hva, write)) { + /* Check page level about host mmu*/ + level = host_pfn_mapping_level(kvm, gfn, memslot); + if (level == 1) { + /* + * Check page level about secondary mmu + * Disable hugepage if it is normal page on + * secondary mmu already + */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (ptep && !kvm_pte_huge(*ptep)) + level = 0; + } + + if (level == 1) { + gfn = gfn & ~(PTRS_PER_PTE - 1); + pfn = pfn & ~(PTRS_PER_PTE - 1); + } + } + + /* Ensure page tables are allocated */ + ptep = kvm_populate_gpa(kvm, memcache, gpa, level); + new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits)); + if (level == 1) { + new_pte = kvm_pte_mkhuge(new_pte); + /* + * previous pmd entry is invalid_pte_table + * there is invalid tlb with small page + * need flush these invalid tlbs for current vcpu + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + ++kvm->stat.hugepages; + } else if (kvm_pte_huge(*ptep) && write) + ptep = kvm_split_huge(vcpu, ptep, gfn); + else + ++kvm->stat.pages; + kvm_set_pte(ptep, new_pte); + spin_unlock(&kvm->mmu_lock); + + if (prot_bits & _PAGE_DIRTY) { + mark_page_dirty_in_slot(kvm, memslot, gfn); + kvm_set_pfn_dirty(pfn); + } + + kvm_release_pfn_clean(pfn); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; +} + +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret; + + ret = kvm_map_page(vcpu, gpa, write); + if (ret) + return ret; + + /* Invalidate this entry in the TLB */ + vcpu->arch.flush_gpa = gpa; + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + + return 0; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + kvm_flush_remote_tlbs(kvm); +} diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S new file mode 100644 index 0000000000000000000000000000000000000000..0c292f81849277178110a94f09e6b37e90d0dae6 --- /dev/null +++ b/arch/loongarch/kvm/switch.S @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define HGPR_OFFSET(x) (PT_R0 + 8*x) +#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) + +.macro kvm_save_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +/* + * Save and restore all GPRs except base register, + * and default value of base register is a2. + */ +.macro kvm_save_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +/* + * Prepare switch to guest, save host regs and restore guest regs. + * a2: kvm_vcpu_arch, don't touch it until 'ertn' + * t0, t1: temp register + */ +.macro kvm_switch_to_guest + /* Set host ECFG.VS=0, all exceptions share one exception entry */ + csrrd t0, LOONGARCH_CSR_ECFG + bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT + csrwr t0, LOONGARCH_CSR_ECFG + + /* Load up the new EENTRY */ + ld.d t0, a2, KVM_ARCH_GEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Set Guest ERA */ + ld.d t0, a2, KVM_ARCH_GPC + csrwr t0, LOONGARCH_CSR_ERA + + /* Save host PGDL */ + csrrd t0, LOONGARCH_CSR_PGDL + st.d t0, a2, KVM_ARCH_HPGD + + /* Switch to kvm */ + ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH + + /* Load guest PGDL */ + li.w t0, KVM_GPGD + ldx.d t0, t1, t0 + csrwr t0, LOONGARCH_CSR_PGDL + + /* Mix GID and RID */ + csrrd t1, LOONGARCH_CSR_GSTAT + bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + + /* + * Enable intr in root mode with future ertn so that host interrupt + * can be responsed during VM runs + * Guest CRMD comes from separate GCSR_CRMD register + */ + ori t0, zero, CSR_PRMD_PIE + csrxchg t0, t0, LOONGARCH_CSR_PRMD + + /* Set PVM bit to setup ertn to guest context */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg t0, t0, LOONGARCH_CSR_GSTAT + + /* Load Guest GPRs */ + kvm_restore_guest_gprs a2 + /* Load KVM_ARCH register */ + ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ +.endm + + /* + * Exception entry for general exception from guest mode + * - IRQ is disabled + * - kernel privilege in root mode + * - page mode keep unchanged from previous PRMD in root mode + * - Fixme: tlb exception cannot happen since registers relative with TLB + * - is still in guest mode, such as pgd table/vmid registers etc, + * - will fix with hw page walk enabled in future + * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS + */ + .text + .cfi_sections .debug_frame +SYM_CODE_START(kvm_exc_entry) + UNWIND_HINT_UNDEFINED + csrwr a2, KVM_TEMP_KS + csrrd a2, KVM_VCPU_KS + addi.d a2, a2, KVM_VCPU_ARCH + + /* After save GPRs, free to use any GPR */ + kvm_save_guest_gprs a2 + /* Save guest A2 */ + csrrd t0, KVM_TEMP_KS + st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + /* A2 is kvm_vcpu_arch, A1 is free to use */ + csrrd s1, KVM_VCPU_KS + ld.d s0, s1, KVM_VCPU_RUN + + csrrd t0, LOONGARCH_CSR_ESTAT + st.d t0, a2, KVM_ARCH_HESTAT + csrrd t0, LOONGARCH_CSR_ERA + st.d t0, a2, KVM_ARCH_GPC + csrrd t0, LOONGARCH_CSR_BADV + st.d t0, a2, KVM_ARCH_HBADV + csrrd t0, LOONGARCH_CSR_BADI + st.d t0, a2, KVM_ARCH_HBADI + + /* Restore host ECFG.VS */ + csrrd t0, LOONGARCH_CSR_ECFG + ld.d t1, a2, KVM_ARCH_HECFG + or t0, t0, t1 + csrwr t0, LOONGARCH_CSR_ECFG + + /* Restore host EENTRY */ + ld.d t0, a2, KVM_ARCH_HEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Restore host pgd table */ + ld.d t0, a2, KVM_ARCH_HPGD + csrwr t0, LOONGARCH_CSR_PGDL + + /* + * Disable PGM bit to enter root mode by default with next ertn + */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg zero, t0, LOONGARCH_CSR_GSTAT + + /* + * Clear GTLBC.TGID field + * 0: for root tlb update in future tlb instr + * others: for guest tlb update like gpa to hpa in future tlb instr + */ + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + ld.d tp, a2, KVM_ARCH_HTP + ld.d sp, a2, KVM_ARCH_HSP + /* restore per cpu register */ + ld.d u0, a2, KVM_ARCH_HPERCPU + addi.d sp, sp, -PT_SIZE + + /* Prepare handle exception */ + or a0, s0, zero + or a1, s1, zero + ld.d t8, a2, KVM_ARCH_HANDLE_EXIT + jirl ra, t8, 0 + + or a2, s1, zero + addi.d a2, a2, KVM_VCPU_ARCH + + /* Resume host when ret <= 0 */ + blez a0, ret_to_host + + /* + * Return to guest + * Save per cpu register again, maybe switched to another cpu + */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr s1, KVM_VCPU_KS + kvm_switch_to_guest + +ret_to_host: + ld.d a2, a2, KVM_ARCH_HSP + addi.d a2, a2, -PT_SIZE + kvm_restore_host_gpr a2 + jr ra + +SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) +SYM_CODE_END(kvm_exc_entry) + +/* + * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) + * + * @register_param: + * a0: kvm_run* run + * a1: kvm_vcpu* vcpu + */ +SYM_FUNC_START(kvm_enter_guest) + /* Allocate space in stack bottom */ + addi.d a2, sp, -PT_SIZE + /* Save host GPRs */ + kvm_save_host_gpr a2 + + addi.d a2, a1, KVM_VCPU_ARCH + st.d sp, a2, KVM_ARCH_HSP + st.d tp, a2, KVM_ARCH_HTP + /* Save per cpu register */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr a1, KVM_VCPU_KS + kvm_switch_to_guest +SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) +SYM_FUNC_END(kvm_enter_guest) + +SYM_FUNC_START(kvm_save_fpu) + fpu_save_csr a0 t1 + fpu_save_double a0 t1 + fpu_save_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_save_fpu) + +SYM_FUNC_START(kvm_restore_fpu) + fpu_restore_double a0 t1 + fpu_restore_csr a0 t1 t2 + fpu_restore_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_fpu) + +#ifdef CONFIG_CPU_HAS_LSX +SYM_FUNC_START(kvm_save_lsx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lsx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lsx) + +SYM_FUNC_START(kvm_restore_lsx) + lsx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lsx) +#endif + +#ifdef CONFIG_CPU_HAS_LASX +SYM_FUNC_START(kvm_save_lasx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lasx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lasx) + +SYM_FUNC_START(kvm_restore_lasx) + lasx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lasx) +#endif + .section ".rodata" +SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) +SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) + +#ifdef CONFIG_CPU_HAS_LBT +STACK_FRAME_NON_STANDARD kvm_restore_fpu +#ifdef CONFIG_CPU_HAS_LSX +STACK_FRAME_NON_STANDARD kvm_restore_lsx +#endif +#ifdef CONFIG_CPU_HAS_LASX +STACK_FRAME_NON_STANDARD kvm_restore_lasx +#endif +#endif diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c new file mode 100644 index 0000000000000000000000000000000000000000..74a4b5c272d60e99523e12e89d5e663d53009c2b --- /dev/null +++ b/arch/loongarch/kvm/timer.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * ktime_to_tick() - Scale ktime_t to timer tick value. + */ +static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now) +{ + u64 delta; + + delta = ktime_to_ns(now); + return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC); +} + +static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) +{ + return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); +} + +/* Low level hrtimer wake routine */ +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer); + kvm_queue_irq(vcpu, INT_TI); + rcuwait_wake_up(&vcpu->wait); + + return HRTIMER_NORESTART; +} + +/* + * Initialise the timer to the specified frequency, zero it + */ +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) +{ + vcpu->arch.timer_mhz = timer_hz >> 20; + + /* Starting at 0 */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); +} + +/* + * Restore soft timer state from saved context. + */ +void kvm_restore_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, estat; + unsigned long ticks, delta, period; + ktime_t expire, now; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Set guest stable timer cfg csr + * Disable timer before restore estat CSR register, avoid to + * get invalid timer interrupt for old timer cfg + */ + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + + write_gcsr_timercfg(0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + if (!(cfg & CSR_TCFG_EN)) { + /* Guest timer is disabled, just restore timer registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + return; + } + + /* + * Freeze the soft-timer and sync the guest stable timer with it. + */ + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If oneshot timer is fired, CSR TVAL will be -1, there are two + * conditions: + * 1) timer is fired during exiting to host + * 2) timer is fired and vm is doing timer irq, and then exiting to + * host. Host should not inject timer irq to avoid spurious + * timer interrupt again + */ + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) { + /* + * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq + * and set CSR TVAL with -1 + */ + write_gcsr_timertick(0); + + /* + * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear + * timer interrupt, and CSR TVAL keeps unchanged with -1, it + * avoids spurious timer interrupt + */ + if (!(estat & CPU_TIMER)) + gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR); + return; + } + + /* + * Set remainder tick value if not expired + */ + delta = 0; + now = ktime_get(); + expire = vcpu->arch.expire; + if (ktime_before(now, expire)) + delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); + else if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + + /* + * Inject timer here though sw timer should inject timer + * interrupt async already, since sw timer may be cancelled + * during injecting intr async + */ + kvm_queue_irq(vcpu, INT_TI); + } + + write_gcsr_timertick(delta); +} + +/* + * Save guest timer state and switch to software emulation of guest + * timer. The hard timer must already be in use, so preemption should be + * disabled. + */ +static void _kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long ticks, delta, cfg; + ktime_t expire; + struct loongarch_csrs *csr = vcpu->arch.csr; + + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG + * If oneshot timer is fired, CSR TVAL will be -1 + * Here judge one-shot timer fired by checking whether TVAL is larger + * than TCFG + */ + if (ticks < cfg) + delta = tick_to_ns(vcpu, ticks); + else + delta = 0; + + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (kvm_vcpu_is_blocking(vcpu)) { + + /* + * HRTIMER_MODE_PINNED is suggested since vcpu may run in + * the same physical cpu in next time + */ + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } +} + +/* + * Save guest timer state and switch to soft guest timer if hard timer was in + * use. + */ +void kvm_save_timer(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + preempt_disable(); + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); + + /* Save timer-related state to vCPU context */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + preempt_enable(); +} diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c new file mode 100644 index 0000000000000000000000000000000000000000..ebdbe9264e9c60f57f316545572097c0362fcbf0 --- /dev/null +++ b/arch/loongarch/kvm/tlb.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * kvm_flush_tlb_all() - Flush all root TLB entries for guests. + * + * Invalidate all entries including GVA-->GPA and GPA-->HPA mappings. + */ +void kvm_flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + invtlb_all(INVTLB_ALLGID, 0, 0); + local_irq_restore(flags); +} + +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) +{ + lockdep_assert_irqs_disabled(); + gpa &= (PAGE_MASK << 1); + invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); +} diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..1783397b1bc88e852b5b424549e00acc1cce0ff8 --- /dev/null +++ b/arch/loongarch/kvm/trace.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoints for VM enters + */ +DECLARE_EVENT_CLASS(kvm_transition, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc) +); + +DEFINE_EVENT(kvm_transition, kvm_enter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_reenter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_out, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +/* Further exit reasons */ +#define KVM_TRACE_EXIT_IDLE 64 +#define KVM_TRACE_EXIT_CACHE 65 + +/* Tracepoints for VM exits */ +#define kvm_trace_symbol_exit_types \ + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ + { KVM_TRACE_EXIT_CACHE, "CACHE" } + +DECLARE_EVENT_CLASS(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("vcpu %u [%s] PC: 0x%08lx", + __entry->vcpu_id, + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), + __entry->pc) +); + +DEFINE_EVENT(kvm_exit, kvm_exit_idle, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit_cache, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +TRACE_EVENT(kvm_exit_gspr, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word), + TP_ARGS(vcpu, inst_word), + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, inst_word) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->inst_word = inst_word; + ), + + TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id, + __entry->inst_word) +); + +#define KVM_TRACE_AUX_SAVE 0 +#define KVM_TRACE_AUX_RESTORE 1 +#define KVM_TRACE_AUX_ENABLE 2 +#define KVM_TRACE_AUX_DISABLE 3 +#define KVM_TRACE_AUX_DISCARD 4 + +#define KVM_TRACE_AUX_FPU 1 +#define KVM_TRACE_AUX_LSX 2 +#define KVM_TRACE_AUX_LASX 3 + +#define kvm_trace_symbol_aux_op \ + { KVM_TRACE_AUX_SAVE, "save" }, \ + { KVM_TRACE_AUX_RESTORE, "restore" }, \ + { KVM_TRACE_AUX_ENABLE, "enable" }, \ + { KVM_TRACE_AUX_DISABLE, "disable" }, \ + { KVM_TRACE_AUX_DISCARD, "discard" } + +#define kvm_trace_symbol_aux_state \ + { KVM_TRACE_AUX_FPU, "FPU" }, \ + { KVM_TRACE_AUX_LSX, "LSX" }, \ + { KVM_TRACE_AUX_LASX, "LASX" } + +TRACE_EVENT(kvm_aux, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, + unsigned int state), + TP_ARGS(vcpu, op, state), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(u8, op) + __field(u8, state) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->op = op; + __entry->state = state; + ), + + TP_printk("%s %s PC: 0x%08lx", + __print_symbolic(__entry->op, + kvm_trace_symbol_aux_op), + __print_symbolic(__entry->state, + kvm_trace_symbol_aux_state), + __entry->pc) +); + +TRACE_EVENT(kvm_vpid_change, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid), + TP_ARGS(vcpu, vpid), + TP_STRUCT__entry( + __field(unsigned long, vpid) + ), + + TP_fast_assign( + __entry->vpid = vpid; + ), + + TP_printk("VPID: 0x%08lx", __entry->vpid) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c new file mode 100644 index 0000000000000000000000000000000000000000..50bd40d36eb9e5eecf0c522180ff7b4b33c0cb57 --- /dev/null +++ b/arch/loongarch/kvm/vcpu.c @@ -0,0 +1,1768 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, idle_exits), + STATS_DESC_COUNTER(VCPU, cpucfg_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, hypercall_exits) +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + +static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + context->perf_cntr[0] = read_csr_perfcntr0(); + context->perf_cntr[1] = read_csr_perfcntr1(); + context->perf_cntr[2] = read_csr_perfcntr2(); + context->perf_cntr[3] = read_csr_perfcntr3(); + context->perf_ctrl[0] = write_csr_perfctrl0(0); + context->perf_ctrl[1] = write_csr_perfctrl1(0); + context->perf_ctrl[2] = write_csr_perfctrl2(0); + context->perf_ctrl[3] = write_csr_perfctrl3(0); +} + +static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + write_csr_perfcntr0(context->perf_cntr[0]); + write_csr_perfcntr1(context->perf_cntr[1]); + write_csr_perfcntr2(context->perf_cntr[2]); + write_csr_perfcntr3(context->perf_cntr[3]); + write_csr_perfctrl0(context->perf_ctrl[0]); + write_csr_perfctrl1(context->perf_ctrl[1]); + write_csr_perfctrl2(context->perf_ctrl[2]); + write_csr_perfctrl3(context->perf_ctrl[3]); +} + + +static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + kvm_save_host_pmu(vcpu); + + /* Set PM0-PM(num) to guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + kvm_restore_guest_pmu(vcpu); + + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + return; + + kvm_save_guest_pmu(vcpu); + + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + /* + * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when + * exiting the guest, so that the next time trap into the guest. + * We don't need to deal with PMU CSRs contexts. + */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (!(val & KVM_PMU_EVENT_ENABLED)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; + + kvm_restore_host_pmu(vcpu); +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + kvm_make_request(KVM_REQ_PMU, vcpu); +} + +static void kvm_check_pmu(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_PMU, vcpu)) { + kvm_own_pmu(vcpu); + vcpu->arch.aux_inuse |= KVM_LARCH_PMU; + } +} + +static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) +{ + u32 version; + u64 steal; + gpa_t gpa; + struct kvm_memslots *slots; + struct kvm_steal_time __user *st; + struct gfn_to_hva_cache *ghc; + + ghc = &vcpu->arch.st.cache; + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_get_user(version, &st->version, out); + if (version & 1) + version += 1; /* first time write, random junk */ + + version += 1; + unsafe_put_user(version, &st->version, out); + smp_wmb(); + + unsafe_get_user(steal, &st->steal, out); + steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + unsafe_put_user(steal, &st->steal, out); + + smp_wmb(); + version += 1; + unsafe_put_user(version, &st->version, out); +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + +/* + * kvm_check_requests - check and handle pending vCPU requests + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + */ +static int kvm_check_requests(struct kvm_vcpu *vcpu) +{ + if (!kvm_request_pending(vcpu)) + return RESUME_GUEST; + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ + + if (kvm_dirty_ring_check_request(vcpu)) + return RESUME_HOST; + + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) + kvm_update_stolen_time(vcpu); + + return RESUME_GUEST; +} + +static void kvm_late_check_requests(struct kvm_vcpu *vcpu) +{ + lockdep_assert_irqs_disabled(); + if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu)) + if (vcpu->arch.flush_gpa != INVALID_GPA) { + kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); + vcpu->arch.flush_gpa = INVALID_GPA; + } +} + +/* + * Check and handle pending signal and vCPU requests etc + * Run with irq enabled and preempt enabled + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + * < 0 if we should exit to userspace, where the return value + * indicates an error + */ +static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) +{ + int ret; + + /* + * Check conditions before entering the guest + */ + ret = xfer_to_guest_mode_handle_work(vcpu); + if (ret < 0) + return ret; + + ret = kvm_check_requests(vcpu); + + return ret; +} + +/* + * Called with irq enabled + * + * Return: RESUME_GUEST if we should enter the guest, and irq disabled + * Others if we should exit to userspace + */ +static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) +{ + int ret; + + do { + ret = kvm_enter_guest_check(vcpu); + if (ret != RESUME_GUEST) + break; + + /* + * Handle vcpu timer, interrupts, check requests and + * check vmid before vcpu enter guest + */ + local_irq_disable(); + kvm_deliver_intr(vcpu); + kvm_deliver_exception(vcpu); + /* Make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, IN_GUEST_MODE); + kvm_check_vpid(vcpu); + kvm_check_pmu(vcpu); + + /* + * Called after function kvm_check_vpid() + * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(), + * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit + */ + kvm_late_check_requests(vcpu); + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); + /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + + if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + /* make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); + ret = -EAGAIN; + } + } while (ret != RESUME_GUEST); + + return ret; +} + +/* + * Return 1 for resume guest and "<= 0" for resume host. + */ +static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + unsigned long estat = vcpu->arch.host_estat; + u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + + vcpu->mode = OUTSIDE_GUEST_MODE; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + + kvm_lose_pmu(vcpu); + + guest_timing_exit_irqoff(); + guest_state_exit_irqoff(); + local_irq_enable(); + + trace_kvm_exit(vcpu, ecode); + if (ecode) { + ret = kvm_handle_fault(vcpu, ecode); + } else { + WARN(!intr, "vm exiting with suspicious irq\n"); + ++vcpu->stat.int_exits; + } + + if (ret == RESUME_GUEST) + ret = kvm_pre_enter_guest(vcpu); + + if (ret != RESUME_GUEST) { + local_irq_disable(); + return ret; + } + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_reenter(vcpu); + + return RESUME_GUEST; +} + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.irq_pending) && + vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + int ret; + + /* Protect from TOD sync and vcpu_load/put() */ + preempt_disable(); + ret = kvm_pending_timer(vcpu) || + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); + preempt_enable(); + + return ret; +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + + kvm_debug("vCPU Register Dump:\n"); + kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); + kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); + + for (i = 0; i < 32; i += 4) { + kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + + kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n", + kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); + + kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + *mp_state = vcpu->arch.mp_state; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + int ret = 0; + + switch (mp_state->mp_state) { + case KVM_MP_STATE_RUNNABLE: + vcpu->arch.mp_state = *mp_state; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) + return -EINVAL; + + if (dbg->control & KVM_GUESTDBG_ENABLE) + vcpu->guest_debug = dbg->control; + else + vcpu->guest_debug = 0; + + return 0; +} + +static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) +{ + int cpuid; + struct kvm_phyid_map *map; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (val >= KVM_MAX_PHYID) + return -EINVAL; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); + + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) { + /* Discard duplicated CPUID set operation */ + if (cpuid == val) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + + /* + * CPUID is already set before + * Forbid changing to a different CPUID at runtime + */ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + if (map->phys_map[val].enabled) { + /* Discard duplicated CPUID set operation */ + if (vcpu == map->phys_map[val].vcpu) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + + /* + * New CPUID is already set with other vcpu + * Forbid sharing the same CPUID between different vcpus + */ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); + map->phys_map[val].enabled = true; + map->phys_map[val].vcpu = vcpu; + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + + return 0; +} + +static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +{ + int cpuid; + struct kvm_phyid_map *map; + struct loongarch_csrs *csr = vcpu->arch.csr; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); + + if (cpuid >= KVM_MAX_PHYID) + return; + + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if (map->phys_map[cpuid].enabled) { + map->phys_map[cpuid].vcpu = NULL; + map->phys_map[cpuid].enabled = false; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); + } + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); +} + +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid) +{ + struct kvm_phyid_map *map; + + if (cpuid >= KVM_MAX_PHYID) + return NULL; + + map = kvm->arch.phyid_map; + if (!map->phys_map[cpuid].enabled) + return NULL; + + return map->phys_map[cpuid].vcpu; +} + +static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) +{ + unsigned long gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + preempt_disable(); + vcpu_load(vcpu); + /* + * Sync pending interrupts into ESTAT so that interrupt + * remains during VM migration stage + */ + kvm_deliver_intr(vcpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + vcpu_put(vcpu); + preempt_enable(); + + /* ESTAT IP0~IP7 get from GINTC */ + gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; + *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); + return 0; + } + + /* + * Get software CSR state since software state is consistent + * with hardware for synchronous ioctl + */ + *val = kvm_read_sw_gcsr(csr, id); + + return 0; +} + +static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) +{ + int ret = 0, gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_CPUID) + return kvm_set_cpuid(vcpu, val); + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 inject through GINTC */ + gintc = (val >> 2) & 0xff; + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); + + gintc = val & ~(0xffUL << 2); + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); + + return ret; + } + + kvm_write_sw_gcsr(csr, id, val); + + /* + * After modifying the PMU CSR register value of the vcpu. + * If the PMU CSRs are used, we need to set KVM_REQ_PMU. + */ + if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) { + unsigned long val; + + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + + if (val & KVM_PMU_EVENT_ENABLED) + kvm_make_request(KVM_REQ_PMU, vcpu); + } + + return ret; +} + +static int _kvm_get_cpucfg_mask(int id, u64 *v) +{ + if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + switch (id) { + case LOONGARCH_CPUCFG0: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG1: + /* CPUCFG1_MSGINT is not supported by KVM */ + *v = GENMASK(25, 0); + return 0; + case LOONGARCH_CPUCFG2: + /* CPUCFG2 features unconditionally supported by KVM */ + *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | + CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | + CPUCFG2_LSPW | CPUCFG2_LAM; + /* + * For the ISA extensions listed below, if one is supported + * by the host, then it is also supported by KVM. + */ + if (cpu_has_lsx) + *v |= CPUCFG2_LSX; + if (cpu_has_lasx) + *v |= CPUCFG2_LASX; + if (cpu_has_lbt_x86) + *v |= CPUCFG2_X86BT; + if (cpu_has_lbt_arm) + *v |= CPUCFG2_ARMBT; + if (cpu_has_lbt_mips) + *v |= CPUCFG2_MIPSBT; + + return 0; + case LOONGARCH_CPUCFG3: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG4: + case LOONGARCH_CPUCFG5: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG6: + if (cpu_has_pmp) + *v = GENMASK(14, 0); + else + *v = 0; + return 0; + case LOONGARCH_CPUCFG16: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20: + *v = GENMASK(30, 0); + return 0; + default: + /* + * CPUCFG bits should be zero if reserved by HW or not + * supported by KVM. + */ + *v = 0; + return 0; + } +} + +static int kvm_check_cpucfg(int id, u64 val) +{ + int ret; + u64 mask = 0; + + ret = _kvm_get_cpucfg_mask(id, &mask); + if (ret) + return ret; + + if (val & ~mask) + /* Unsupported features and/or the higher 32 bits should not be set */ + return -EINVAL; + + switch (id) { + case LOONGARCH_CPUCFG2: + if (!(val & CPUCFG2_LLFTP)) + /* Guests must have a constant timer */ + return -EINVAL; + if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when FP is enabled */ + return -EINVAL; + if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* LSX architecturally implies FP but val does not satisfy that */ + return -EINVAL; + if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LASX architecturally implies LSX and FP but val does not satisfy that */ + return -EINVAL; + return 0; + case LOONGARCH_CPUCFG6: + if (val & CPUCFG6_PMP) { + u32 host = read_cpucfg(LOONGARCH_CPUCFG6); + if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) + return -EINVAL; + if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) + return -EINVAL; + if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM)) + return -EINVAL; + } + return 0; + default: + /* + * Values for the other CPUCFG IDs are not being further validated + * besides the mask check above. + */ + return 0; + } +} + +static int kvm_get_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 *v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_getcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + *v = vcpu->arch.cpucfg[id]; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + *v = vcpu->arch.lbt.scr0; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + *v = vcpu->arch.lbt.scr1; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + *v = vcpu->arch.lbt.scr2; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + *v = vcpu->arch.lbt.scr3; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + *v = vcpu->arch.lbt.eflags; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + *v = vcpu->arch.fpu.ftop; + break; + default: + ret = -EINVAL; + break; + } + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + *v = drdtime() + vcpu->kvm->arch.time_offset; + break; + case KVM_REG_LOONGARCH_DEBUG_INST: + *v = INSN_HVCL | KVM_HCALL_SWDBG; + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = kvm_get_one_reg(vcpu, reg, &v); + if (ret) + return ret; + ret = put_user(v, (u64 __user *)(long)reg->addr); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_setcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + ret = kvm_check_cpucfg(id, v); + if (ret) + break; + vcpu->arch.cpucfg[id] = (u32)v; + if (id == LOONGARCH_CPUCFG6) + vcpu->arch.max_pmu_csrid = + LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; + break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + vcpu->arch.lbt.scr0 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + vcpu->arch.lbt.scr1 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + vcpu->arch.lbt.scr2 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + vcpu->arch.lbt.scr3 = v; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + vcpu->arch.lbt.eflags = v; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + vcpu->arch.fpu.ftop = v; + break; + default: + ret = -EINVAL; + break; + } + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + /* + * gftoffset is relative with board, not vcpu + * only set for the first time for smp system + */ + if (vcpu->vcpu_id == 0) + vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); + break; + case KVM_REG_LOONGARCH_VCPU_RESET: + vcpu->arch.st.guest_addr = 0; + if (vcpu->vcpu_id == 0) + kvm_loongarch_reset_extioi(vcpu->kvm); + memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); + memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = get_user(v, (u64 __user *)(long)reg->addr); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + + return kvm_set_one_reg(vcpu, reg, v); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; + + regs->pc = vcpu->arch.pc; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ + vcpu->arch.pc = regs->pc; + + return 0; +} + +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + /* FPU is enabled by default, will support LSX/LASX later. */ + return -EINVAL; +} + +static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case LOONGARCH_CPUCFG2: + case LOONGARCH_CPUCFG6: + return 0; + case CPUCFG_KVM_FEATURE: + return 0; + default: + return -ENXIO; + } + + return -ENXIO; +} + +static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + return 0; +} + +static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = 0; + uint64_t val; + uint64_t __user *uaddr = (uint64_t __user *)attr->addr; + + switch (attr->attr) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + ret = _kvm_get_cpucfg_mask(attr->attr, &val); + if (ret) + return ret; + break; + case CPUCFG_KVM_FEATURE: + val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; + break; + default: + return -ENXIO; + } + + put_user(val, uaddr); + + return ret; +} + +static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 gpa; + u64 __user *user = (u64 __user *)attr->addr; + + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + gpa = vcpu->arch.st.guest_addr; + if (put_user(gpa, user)) + return -EFAULT; + + return 0; +} + +static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 val, valid; + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + + switch (attr->attr) { + case CPUCFG_KVM_FEATURE: + if (get_user(val, user)) + return -EFAULT; + + valid = LOONGARCH_PV_FEAT_MASK; + if (val & ~valid) + return -EINVAL; + + /* All vCPUs need set the same PV features */ + if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) + && ((kvm->arch.pv_features & valid) != val)) + return -EINVAL; + kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; + return 0; + default: + return -ENXIO; + } +} + +static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int idx, ret = 0; + u64 gpa, __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + if (get_user(gpa, user)) + return -EFAULT; + + if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) + return -EINVAL; + + if (!(gpa & KVM_STEAL_PHYS_VALID)) { + vcpu->arch.st.guest_addr = gpa; + return 0; + } + + /* Check the address is in a valid memslot */ + idx = srcu_read_lock(&kvm->srcu); + if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) + ret = -EINVAL; + srcu_read_unlock(&kvm->srcu, idx); + + if (!ret) { + vcpu->arch.st.guest_addr = gpa; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + + return ret; +} + +static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + long r; + struct kvm_device_attr attr; + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + /* + * Only software CSR should be modified + * + * If any hardware CSR register is modified, vcpu_load/vcpu_put pair + * should be used. Since CSR registers owns by this vcpu, if switch + * to other vcpus, other vcpus need reload CSR registers. + * + * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should + * be clear in vcpu->arch.aux_inuse, and vcpu_load will check + * aux_inuse flag and reload CSR registers form software. + */ + + switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + if (ioctl == KVM_SET_ONE_REG) { + r = kvm_set_reg(vcpu, ®); + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + } else + r = kvm_get_reg(vcpu, ®); + break; + } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } + case KVM_HAS_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); + break; + } + case KVM_GET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); + break; + } + case KVM_SET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); + break; + } + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + fpu->fcc = vcpu->arch.fpu.fcc; + fpu->fcsr = vcpu->arch.fpu.fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + vcpu->arch.fpu.fcc = fpu->fcc; + vcpu->arch.fpu.fcsr = fpu->fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + set_csr_euen(CSR_EUEN_LBTEN); + _restore_lbt(&vcpu->arch.lbt); + vcpu->arch.aux_inuse |= KVM_LARCH_LBT; + preempt_enable(); + + return 0; +} + +static void kvm_lose_lbt(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { + _save_lbt(&vcpu->arch.lbt); + clear_csr_euen(CSR_EUEN_LBTEN); + vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; + } + preempt_enable(); +} + +static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) +{ + /* + * If TM is enabled, top register save/restore will + * cause lbt exception, here enable lbt in advance + */ + if (fcsr & FPU_CSR_TM) + kvm_own_lbt(vcpu); +} + +static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) + return; + kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); + } +} +#else +static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } +static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } +static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } +#endif + +/* Enable FPU and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + /* + * Enable FPU for guest + * Set FR and FRE according to guest context + */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); + set_csr_euen(CSR_EUEN_FPEN); + + kvm_restore_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse |= KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + + preempt_enable(); +} + +#ifdef CONFIG_CPU_HAS_LSX +/* Enable LSX and restore context */ +int kvm_own_lsx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + /* Enable LSX for guest */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + case KVM_LARCH_FPU: + /* + * Guest FPU state already loaded, + * only restore upper LSX state + */ + _restore_lsx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, + * restore full LSX state + */ + kvm_restore_lsx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + +#ifdef CONFIG_CPU_HAS_LASX +/* Enable LASX and restore context */ +int kvm_own_lasx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); + set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { + case KVM_LARCH_LSX: + case KVM_LARCH_LSX | KVM_LARCH_FPU: + /* Guest LSX state already loaded, only restore upper LASX state */ + _restore_lasx_upper(&vcpu->arch.fpu); + break; + case KVM_LARCH_FPU: + /* Guest FP state already loaded, only restore upper LSX & LASX state */ + _restore_lsx_upper(&vcpu->arch.fpu); + _restore_lasx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, restore full LASX state */ + kvm_restore_lasx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); + vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + +/* Save context and disable FPU */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + kvm_check_fcsr_alive(vcpu); + if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { + kvm_save_lasx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); + + /* Disable LASX & LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + kvm_save_lsx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); + + /* Disable LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + kvm_save_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); + + /* Disable FPU */ + clear_csr_euen(CSR_EUEN_FPEN); + } + kvm_lose_lbt(vcpu); + + preempt_enable(); +} + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) +{ + int intr = (int)irq->irq; + + if (intr > 0) + kvm_queue_irq(vcpu, intr); + else if (intr < 0) + kvm_dequeue_irq(vcpu, -intr); + else { + kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); + return -EINVAL; + } + + kvm_vcpu_kick(vcpu); + + return 0; +} + +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); + + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + unsigned long timer_hz; + struct loongarch_csrs *csr; + + vcpu->arch.vpid = 0; + vcpu->arch.flush_gpa = INVALID_GPA; + + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + + vcpu->arch.handle_exit = kvm_handle_exit; + vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; + vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); + if (!vcpu->arch.csr) + return -ENOMEM; + + /* + * All kvm exceptions share one exception entry, and host <-> guest + * switch also switch ECFG.VS field, keep host ECFG.VS info here. + */ + vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* Init ipi_state lock */ + spin_lock_init(&vcpu->arch.ipi_state.lock); + + /* + * Initialize guest register state to valid architectural reset state. + */ + timer_hz = calc_const_freq(); + kvm_init_timer(vcpu, timer_hz); + + /* Set Initialize mode for guest */ + csr = vcpu->arch.csr; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); + + /* Set cpuid */ + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); + + /* Start with no pending virtual guest interrupts */ + csr->csrs[LOONGARCH_CSR_GINTC] = 0; + + return 0; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + int cpu; + struct kvm_context *context; + + hrtimer_cancel(&vcpu->arch.swtimer); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kvm_drop_cpuid(vcpu); + kfree(vcpu->arch.csr); + + /* + * If the vCPU is freed and reused as another vCPU, we don't want the + * matching pointer wrongly hanging around in last_vcpu. + */ + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (context->last_vcpu == vcpu) + context->last_vcpu = NULL; + } +} + +static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + bool migrated; + struct kvm_context *context; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Have we migrated to a different CPU? + * If so, any old guest TLB state may be stale. + */ + migrated = (vcpu->arch.last_sched_cpu != cpu); + + /* + * Was this the last vCPU to run on this CPU? + * If not, any old guest state from this vCPU will have been clobbered. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (migrated || (context->last_vcpu != vcpu)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + context->last_vcpu = vcpu; + + /* Restore timer state regardless */ + kvm_restore_timer(vcpu); + + /* Control guest page CCA attribute */ + change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + /* Restore hardware PMU CSRs */ + kvm_restore_pmu(vcpu); + + /* Don't bother restoring registers multiple times unless necessary */ + if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) + return 0; + + write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); + + /* Restore guest CSR registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + + /* Restore Root.GINTC from unused Guest.GINTC register */ + write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); + + /* + * We should clear linked load bit to break interrupted atomics. This + * prevents a SC on the next vCPU from succeeding by matching a LL on + * the previous vCPU. + */ + if (vcpu->kvm->created_vcpus > 1) + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); + + vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; + + return 0; +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + /* Restore guest state to registers */ + _kvm_vcpu_load(vcpu, cpu); + local_irq_restore(flags); +} + +static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_lose_fpu(vcpu); + + /* + * Update CSR state from hardware if software CSR state is stale, + * most CSR registers are kept unchanged during process context + * switch except CSR registers like remaining timer tick value and + * injected interrupt state. + */ + if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) + goto out; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + + vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; + +out: + kvm_save_timer(vcpu); + /* Save Root.GINTC into unused Guest.GINTC register */ + csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); + + return 0; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + cpu = smp_processor_id(); + vcpu->arch.last_sched_cpu = cpu; + + /* Save guest state in registers */ + _kvm_vcpu_put(vcpu, cpu); + local_irq_restore(flags); +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + int r = -EINTR; + struct kvm_run *run = vcpu->run; + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_complete_mmio_read(vcpu, run); + vcpu->mmio_needed = 0; + } + + if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { + if (!run->iocsr_io.is_write) + kvm_complete_iocsr_read(vcpu, run); + } + + if (run->immediate_exit) + return r; + + /* Clear exit_reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + lose_fpu(1); + vcpu_load(vcpu); + kvm_sigset_activate(vcpu); + r = kvm_pre_enter_guest(vcpu); + if (r != RESUME_GUEST) + goto out; + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_enter(vcpu); + r = kvm_loongarch_ops->enter_guest(run, vcpu); + + trace_kvm_out(vcpu); + /* + * Guest exit is already recorded at kvm_handle_exit() + * return value must not be RESUME_GUEST + */ + local_irq_enable(); +out: + kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); + + return r; +} diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c new file mode 100644 index 0000000000000000000000000000000000000000..5f65610aa9fce27ce7947707dd08aca0b3c21506 --- /dev/null +++ b/arch/loongarch/kvm/vm.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS(), + STATS_DESC_ICOUNTER(VM, pages), + STATS_DESC_ICOUNTER(VM, hugepages), +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + int i; + + /* Allocate page table to map GPA -> RPA */ + kvm->arch.pgd = kvm_pgd_alloc(); + if (!kvm->arch.pgd) + return -ENOMEM; + + kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT); + if (!kvm->arch.phyid_map) { + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; + return -ENOMEM; + } + spin_lock_init(&kvm->arch.phyid_map_lock); + + kvm_init_vmcs(kvm); + + /* Enable all PV features by default */ + kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); + if (kvm_pvtime_supported()) + kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + + kvm->arch.gpa_size = BIT(cpu_vabits - 1); + kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; + kvm->arch.invalid_ptes[0] = 0; + kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; +#if CONFIG_PGTABLE_LEVELS > 2 + kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table; +#endif +#if CONFIG_PGTABLE_LEVELS > 3 + kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table; +#endif + for (i = 0; i <= kvm->arch.root_level; i++) + kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + + return 0; +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; + kvfree(kvm->arch.phyid_map); + kvm->arch.phyid_map = NULL; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_SYNC_MMU: + case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_MP_STATE: + case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_VM_ATTRIBUTES: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + r = num_online_cpus(); + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_IDS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + default: + r = 0; + break; + } + + return r; +} + +static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VM_FEAT_LSX: + if (cpu_has_lsx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_LASX: + if (cpu_has_lasx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_X86BT: + if (cpu_has_lbt_x86) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_ARMBT: + if (cpu_has_lbt_arm) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_MIPSBT: + if (cpu_has_lbt_mips) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PMU: + if (cpu_has_pmp) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PV_IPI: + return 0; + case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: + if (kvm_pvtime_supported()) + return 0; + return -ENXIO; + default: + return -ENXIO; + } +} + +static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_LOONGARCH_VM_FEAT_CTRL: + return kvm_vm_feature_has_attr(kvm, attr); + case KVM_LOONGARCH_VM_HAVE_IRQCHIP: + return 0; + default: + return -ENXIO; + } +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + int r; + void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; + struct kvm_device_attr attr; + + switch (ioctl) { + case KVM_CREATE_IRQCHIP: { + r = 1; + break; + } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + + return kvm_vm_has_attr(kvm, &attr); + } + default: + return -EINVAL; + } + + return r; +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *data, + bool line_status) +{ + bool level; + struct loongarch_pch_pic *s; + int type, vcpu, irq, vcpus, val, ret = 0; + + level = data->level; + val = data->irq; + s = kvm->arch.pch_pic; + vcpus = atomic_read(&kvm->online_vcpus); + + type = (val >> KVM_LOONGARCH_IRQ_TYPE_SHIFT) & KVM_LOONGARCH_IRQ_TYPE_MASK; + vcpu = (val >> KVM_LOONGARCH_IRQ_VCPU_SHIFT) & KVM_LOONGARCH_IRQ_VCPU_MASK; + irq = (val >> KVM_LOONGARCH_IRQ_NUM_SHIFT) & KVM_LOONGARCH_IRQ_NUM_MASK; + + switch (type) { + case KVM_LOONGARCH_IRQ_TYPE_IOAPIC: + if (irq < KVM_IRQCHIP_NUM_PINS) + pch_pic_set_irq(s, irq, level); + else if (irq < 256) + pch_msi_set_irq(kvm, irq, level); + else + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return (bool)((!!kvm->arch.extioi) && (!!kvm->arch.pch_pic)); +} diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S index be741544e62bf63f7198d451c72c8f47eb0501d7..7a0db643b2866c604a916ea4f992bba30e97eace 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -10,6 +10,7 @@ #include #include #include +#include SYM_FUNC_START(__clear_user) /* @@ -204,3 +205,5 @@ SYM_FUNC_START(__clear_user_fast) _asm_extable 28b, .Lsmall_fixup _asm_extable 29b, .Lexit SYM_FUNC_END(__clear_user_fast) + +STACK_FRAME_NON_STANDARD __clear_user_fast diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index feec3d3628032f433dbf65e006b8a07acfbc161c..095ce9181c6c04c512111671f29177ae1f1fd484 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -10,6 +10,7 @@ #include #include #include +#include SYM_FUNC_START(__copy_user) /* @@ -278,3 +279,5 @@ SYM_FUNC_START(__copy_user_fast) _asm_extable 58b, .Lexit _asm_extable 59b, .Lexit SYM_FUNC_END(__copy_user_fast) + +STACK_FRAME_NON_STANDARD __copy_user_fast diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S index fa1148878d2b9d06ccdfcf6f2c14fd302fd14114..9517a2f961af3dd4de5ac916d0bc52c4df2c2a46 100644 --- a/arch/loongarch/lib/memcpy.S +++ b/arch/loongarch/lib/memcpy.S @@ -9,6 +9,7 @@ #include #include #include +#include .section .noinstr.text, "ax" @@ -197,3 +198,5 @@ SYM_FUNC_START(__memcpy_fast) jr ra SYM_FUNC_END(__memcpy_fast) _ASM_NOKPROBE(__memcpy_fast) + +STACK_FRAME_NON_STANDARD __memcpy_small diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S index 06d3ca54cbfe7d73c6cc7a5cadeca4d558b3e6dc..df38466205531dc20d2aad086e6b9d85a44b1a65 100644 --- a/arch/loongarch/lib/memset.S +++ b/arch/loongarch/lib/memset.S @@ -9,6 +9,7 @@ #include #include #include +#include .macro fill_to_64 r0 bstrins.d \r0, \r0, 15, 8 @@ -166,3 +167,5 @@ SYM_FUNC_START(__memset_fast) jr ra SYM_FUNC_END(__memset_fast) _ASM_NOKPROBE(__memset_fast) + +STACK_FRAME_NON_STANDARD __memset_fast diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index 97b40defde060846d95c9bc02c70b13ec53372a7..deefd9617d00857a49095de9dcf1cc3b6913e105 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -31,11 +31,52 @@ int show_unhandled_signals = 1; +static int __kprobes spurious_fault(unsigned long write, unsigned long address) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (!(address & __UA_LIMIT)) + return 0; + + pgd = pgd_offset_k(address); + if (!pgd_present(pgdp_get(pgd))) + return 0; + + p4d = p4d_offset(pgd, address); + if (!p4d_present(p4dp_get(p4d))) + return 0; + + pud = pud_offset(p4d, address); + if (!pud_present(pudp_get(pud))) + return 0; + + pmd = pmd_offset(pud, address); + if (!pmd_present(pmdp_get(pmd))) + return 0; + + if (pmd_leaf(*pmd)) { + return write ? pmd_write(pmdp_get(pmd)) : 1; + } else { + pte = pte_offset_kernel(pmd, address); + if (!pte_present(ptep_get(pte))) + return 0; + + return write ? pte_write(ptep_get(pte)) : 1; + } +} + static void __kprobes no_context(struct pt_regs *regs, unsigned long write, unsigned long address) { const int field = sizeof(unsigned long) * 2; + if (spurious_fault(write, address)) + return; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index 1e76fcb83093dd6892801200e6027e92d9c68d37..62ddcea0aa14672cc4906f9706f0934d2735502b 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); - if (pgd_present(*pgd)) { + if (pgd_present(pgdp_get(pgd))) { p4d = p4d_offset(pgd, addr); - if (p4d_present(*p4d)) { + if (p4d_present(p4dp_get(p4d))) { pud = pud_offset(p4d, addr); - if (pud_present(*pud)) + if (pud_present(pudp_get(pud))) pmd = pmd_offset(pud, addr); } } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 4dd53427f6578531910e197503fc0ec9d7f82f5c..14f74a55baaead95f5bd69a75693f7b1018567ce 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -140,7 +140,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { - int huge = pmd_val(*pmd) & _PAGE_HUGE; + int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; if (huge) vmemmap_verify((pte_t *)pmd, node, addr, next); @@ -172,7 +172,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pud_t *pud; pmd_t *pmd; - if (p4d_none(*p4d)) { + if (p4d_none(p4dp_get(p4d))) { pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pud) panic("%s: Failed to allocate memory\n", __func__); @@ -183,7 +183,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { + if (pud_none(pudp_get(pud))) { pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pmd) panic("%s: Failed to allocate memory\n", __func__); @@ -194,13 +194,15 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) { + if (!pmd_present(pmdp_get(pmd))) { pte_t *pte; pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pte) panic("%s: Failed to allocate memory\n", __func__); + pmd_populate_kernel(&init_mm, pmd, pte); + kernel_pte_init(pte); } return pte_offset_kernel(pmd, addr); @@ -215,7 +217,7 @@ void __init __set_fixmap(enum fixed_addresses idx, BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); ptep = populate_kernel_pte(addr); - if (!pte_none(*ptep)) { + if (!pte_none(ptep_get(ptep))) { pte_ERROR(*ptep); return; } diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index 082cb2a6f1ef2464539e68f07c20f5e18a447694..7277b7583e1b787d35923efaf7fafa2a24698e2e 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -112,7 +112,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) { - if (__pmd_none(early, READ_ONCE(*pmdp))) { + if (__pmd_none(early, pmdp_get(pmdp))) { phys_addr_t pte_phys = early ? __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); if (!early) @@ -125,7 +125,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) { - if (__pud_none(early, READ_ONCE(*pudp))) { + if (__pud_none(early, pudp_get(pudp))) { phys_addr_t pmd_phys = early ? __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); if (!early) @@ -138,7 +138,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) { - if (__p4d_none(early, READ_ONCE(*p4dp))) { + if (__p4d_none(early, p4dp_get(p4dp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); if (!early) @@ -174,7 +174,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, : kasan_alloc_zeroed_page(node); next = addr + PAGE_SIZE; set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); + } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -186,7 +186,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, do { next = pmd_addr_end(addr, end); kasan_pte_populate(pmdp, addr, next, node, early); - } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); + } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp))); } static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 2aae72e638713a658475e6fb82fc73eae0fc3469..3b2fbe74d7d9baca31bcc2749ddfc63e84c9f5a8 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -116,6 +116,26 @@ void pud_init(void *addr) EXPORT_SYMBOL_GPL(pud_init); #endif +void kernel_pte_init(void *addr) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + + do { + p[0] = _PAGE_GLOBAL; + p[1] = _PAGE_GLOBAL; + p[2] = _PAGE_GLOBAL; + p[3] = _PAGE_GLOBAL; + p[4] = _PAGE_GLOBAL; + p += 8; + p[-3] = _PAGE_GLOBAL; + p[-2] = _PAGE_GLOBAL; + p[-1] = _PAGE_GLOBAL; + } while (p != end); +} + pmd_t mk_pmd(struct page *page, pgprot_t prot) { pmd_t pmd; @@ -128,7 +148,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot) void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); flush_tlb_all(); } diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 526310ec73c7e61701bb1d2a30f08a0d0ae6f5e4..5503d4e4b096a8d3f96dd7e0c8f342d31c28ce17 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -9,8 +9,9 @@ #include #include -#include #include +#include +#include #include #include #include @@ -266,24 +267,20 @@ static void setup_tlb_handler(int cpu) setup_ptwalker(); local_flush_tlb_all(); + if (cpu_has_ptw) { + exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw; + exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw; + exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw; + exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw; + } + /* The tlb handlers are generated only once */ if (cpu == 0) { memcpy((void *)tlbrentry, handle_tlb_refill, 0x80); local_flush_icache_range(tlbrentry, tlbrentry + 0x80); - if (!cpu_has_ptw) { - set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE); - set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE); - set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE); - set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE); - } else { - set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE); - set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE); - set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE); - set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE); - } - set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); - set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); - set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); + + for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++) + set_handler(i * VECSIZE, exception_table[i], VECSIZE); } else { int vec_sz __maybe_unused; void *addr __maybe_unused; diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index d5d682f3d29f3a808f37e1d23f2887d0ffece071..a44387b838af61b3598b734dca6c5efc6c749a55 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -18,6 +18,7 @@ .macro tlb_do_page_fault, write SYM_CODE_START(tlb_do_page_fault_\write) + UNWIND_HINT_UNDEFINED SAVE_ALL csrrd a2, LOONGARCH_CSR_BADV move a0, sp @@ -32,6 +33,7 @@ tlb_do_page_fault 1 SYM_CODE_START(handle_tlb_protect) + UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL move a0, sp @@ -44,6 +46,7 @@ SYM_CODE_START(handle_tlb_protect) SYM_CODE_END(handle_tlb_protect) SYM_CODE_START(handle_tlb_load) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -190,6 +193,7 @@ nopage_tlb_load: SYM_CODE_END(handle_tlb_load) SYM_CODE_START(handle_tlb_load_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_0 @@ -197,6 +201,7 @@ SYM_CODE_START(handle_tlb_load_ptw) SYM_CODE_END(handle_tlb_load_ptw) SYM_CODE_START(handle_tlb_store) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -346,6 +351,7 @@ nopage_tlb_store: SYM_CODE_END(handle_tlb_store) SYM_CODE_START(handle_tlb_store_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_1 @@ -353,6 +359,7 @@ SYM_CODE_START(handle_tlb_store_ptw) SYM_CODE_END(handle_tlb_store_ptw) SYM_CODE_START(handle_tlb_modify) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -500,6 +507,7 @@ nopage_tlb_modify: SYM_CODE_END(handle_tlb_modify) SYM_CODE_START(handle_tlb_modify_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_1 @@ -507,6 +515,7 @@ SYM_CODE_START(handle_tlb_modify_ptw) SYM_CODE_END(handle_tlb_modify_ptw) SYM_CODE_START(handle_tlb_refill) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_TLBRSAVE csrrd t0, LOONGARCH_CSR_PGD lddir t0, t0, 3 diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 1da4dc46df43e5b77bb0666cb849d00f0443d7f8..5ba4d3a169b21b869bba40f08f15b686ea18bac1 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,16 +26,17 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct acpi_device *adev = NULL; - struct device *bus_dev = &bridge->bus->dev; - struct pci_config_window *cfg = bridge->bus->sysdata; - if (!acpi_disabled) - adev = to_acpi_device(cfg->parent); + if (!acpi_disabled) { + struct acpi_device *adev = NULL; + struct device *bus_dev = &bridge->bus->dev; + struct pci_config_window *cfg = bridge->bus->sysdata; - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + adev = to_acpi_device(cfg->parent); + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + } return 0; } @@ -58,19 +59,167 @@ static void acpi_release_root_info(struct acpi_pci_root_info *ci) kfree(info); } +static void arch_pci_root_validate_resources(struct device *dev, + struct list_head *resources, + unsigned long type) +{ + LIST_HEAD(list); + struct resource *res1, *res2, *root = NULL; + struct resource_entry *tmp, *entry, *entry2; + + WARN_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0); + root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource; + + list_splice_init(resources, &list); + resource_list_for_each_entry_safe(entry, tmp, &list) { + bool free = false; + resource_size_t end; + + res1 = entry->res; + if (!(res1->flags & type)) + goto next; + + /* Exclude non-addressable range or non-addressable portion */ + end = min(res1->end, root->end); + if (end <= res1->start) { + dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n", + res1); + free = true; + goto next; + } else if (res1->end != end) { + dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n", + res1, (unsigned long long)end + 1, + (unsigned long long)res1->end); + res1->end = end; + } + + resource_list_for_each_entry(entry2, resources) { + res2 = entry2->res; + if (!(res2->flags & type)) + continue; + + /* + * I don't like throwing away windows because then + * our resources no longer match the ACPI _CRS, but + * the kernel resource tree doesn't allow overlaps. + */ + if (resource_overlaps(res1, res2)) { + res2->start = min(res1->start, res2->start); + res2->end = max(res1->end, res2->end); + dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n", + res2, res1); + free = true; + goto next; + } + } + +next: + resource_list_del(entry); + if (free) + resource_list_free_entry(entry); + else + resource_list_add_tail(entry, resources); + } +} +static void arch_pci_root_remap_iospace(struct fwnode_handle *fwnode, + struct resource_entry *entry) +{ + struct resource *res = entry->res; + resource_size_t cpu_addr = res->start; + resource_size_t pci_addr = cpu_addr - entry->offset; + resource_size_t length = resource_size(res); + unsigned long port; + + if (pci_register_io_range(fwnode, cpu_addr, length)) { + res->start += ISA_IOSIZE; + cpu_addr = res->start; + pci_addr = cpu_addr - entry->offset; + length = resource_size(res); + if (pci_register_io_range(fwnode, cpu_addr, length)) + goto err; + } + + port = pci_address_to_pio(cpu_addr); + if (port == (unsigned long)-1) + goto err; + + res->start = port; + res->end = port + length - 1; + entry->offset = port - pci_addr; + + if (pci_remap_iospace(res, cpu_addr) < 0) + goto err; + + pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res); + return; +err: + res->flags |= IORESOURCE_DISABLED; +} + +static int arch_pci_probe_root_resources(struct acpi_pci_root_info *info) +{ + int ret; + struct list_head *list = &info->resources; + struct acpi_device *device = info->bridge; + struct resource_entry *entry, *tmp; + unsigned long flags; + struct resource *res; + + flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT; + ret = acpi_dev_get_resources(device, list, + acpi_dev_filter_resource_type_cb, + (void *)flags); + if (ret < 0) + dev_warn(&device->dev, + "failed to parse _CRS method, error code %d\n", ret); + else if (ret == 0) + dev_dbg(&device->dev, + "no IO and memory resources present in _CRS\n"); + else { + resource_list_for_each_entry_safe(entry, tmp, list) { + if (entry->res->flags & IORESOURCE_IO) { + res = entry->res; + res->start = PFN_ALIGN(res->start); + res->end += 1; + res->end = PFN_ALIGN(res->end); + res->end -= 1; + if (!entry->offset) { + entry->offset = LOONGSON_LIO_BASE; + res->start |= LOONGSON_LIO_BASE; + res->end |= LOONGSON_LIO_BASE; + } + arch_pci_root_remap_iospace(&device->fwnode, + entry); + } + if (entry->res->flags & IORESOURCE_DISABLED) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_MEM); + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_IO); + } + + return ret; +} + static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) { int status; struct resource_entry *entry, *tmp; struct acpi_device *device = ci->bridge; - status = acpi_pci_probe_root_resources(ci); + status = arch_pci_probe_root_resources(ci); if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { - entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); - entry->res->start |= entry->offset; - entry->res->end |= entry->offset; + if (!entry->offset) { + entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); + entry->res->start |= entry->offset; + entry->res->end |= entry->offset; + } } } return status; diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index e2fc3b4e31f0019164f57de05c05324cc2bb391d..c28ad52b7bafba21fdaaae3e9aa42df36c0c0351 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -73,11 +73,7 @@ SYM_FUNC_START(loongarch_suspend_enter) * Reload all of the registers and return. */ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) - li.d t0, CSR_DMW0_INIT # UC, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN1 - + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index 1a0f6ca0247b4cdd0f56a80a5f57cc8236600d9f..9cdb53f2e3b00dcd5199dd363c7ecf02c1b099c4 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -4,6 +4,7 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n +OBJECT_FILES_NON_STANDARD := y # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 430b208c01307b7db1ea367ee12efb2317ee81ff..daa48f28ce5e03d217480dee45ce595bab02d4bd 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -594,8 +594,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, address, ptep) \ update_mmu_cache_range(NULL, vma, address, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, address, ptep, nr) \ + update_mmu_cache_range(NULL, vma, address, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 5144506dfa693270523e52e2ae95674c311fa3fc..d052dfcbe8d3a0c54c95d845415fce1e4388caab 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -178,6 +178,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) *ptep = pteval; } +#define PFN_PTE_SHIFT 0 + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d0ee46de248eaf7c61f9af0e19f692dfbd2d3a96..db2fe941e4c8b92e300b0274609fb9706b7ba72f 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,6 +41,8 @@ struct mm_struct; #ifndef __ASSEMBLY__ +#define PFN_PTE_SHIFT PTE_RPN_SHIFT + void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr); #define set_ptes set_ptes diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index b3de6102a90779739a598d9784ab9b55ab6e1ee0..1ca7d4c4b90dbf49cb7e002376fc5e73ad65a9ba 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -19,6 +19,8 @@ #include +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, + unsigned long address); #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry #define tlb_flush tlb_flush diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d3e0f5b3ecc74d668ebd2e9b6094a638b526e10c..554547c4bbad0c6e0c71bc109f5411d82ce1857a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; int pkey; /* @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -static noinline int bad_access(struct pt_regs *regs, unsigned long address) +static noinline int bad_access(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - return __bad_area(regs, address, SEGV_ACCERR); + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); } static int do_sigbus(struct pt_regs *regs, unsigned long address, @@ -484,13 +491,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access_pkey(regs, address, NULL, vma); } if (unlikely(access_error(is_write, is_exec, vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access(regs, address, NULL, vma); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); @@ -524,10 +531,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) - return bad_access_pkey(regs, address, vma); + return bad_access_pkey(regs, address, mm, vma); if (unlikely(access_error(is_write, is_exec, vma))) - return bad_access(regs, address); + return bad_access(regs, address, mm, vma); /* * If for any reason at all we couldn't handle the fault, diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 4d69bfb9bc111e2d235aa745f7e517883c36945d..79b7b35c48991c404f9d2da1f4bf44827ebebf58 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -220,10 +220,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, break; ptep++; addr += PAGE_SIZE; - /* - * increment the pfn. - */ - pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte))); + pte = pte_next_pfn(pte); } } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 10b946e9c6e756e3d4623495a5613cdcd425e628..b7ff680cde9649a36350743d2ffe771564f4225b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2312,7 +2312,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct cpu_hw_events *cpuhw; cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_read(event, cpuhw); - perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack); + perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL); } if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h index 56bf9d69d5e38c0e185d6b392410b76bd3044ef5..8f7a6225704499a9b32ffb40c1ade6fca9d03c0c 100644 --- a/arch/riscv/include/asm/cfi.h +++ b/arch/riscv/include/asm/cfi.h @@ -7,8 +7,9 @@ * * Copyright (C) 2023 Google LLC */ +#include -#include +struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 37829dab4a0a4896d020dc07228e434bdcc73172..63d8a84826e9ca1f8bbf06562af2dde4675da1fe 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -491,8 +491,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -528,6 +528,8 @@ static inline void __set_pte_at(pte_t *ptep, pte_t pteval) set_pte(ptep, pteval); } +#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval, unsigned int nr) { diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c index 820158d7a29130d9f42742019683f8c5034949ec..6ec9dbd7292eecad5d27787904ba890a37b1426f 100644 --- a/arch/riscv/kernel/cfi.c +++ b/arch/riscv/kernel/cfi.c @@ -4,7 +4,7 @@ * * Copyright (C) 2023 Google LLC */ -#include +#include #include /* diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index da2e91b5b19250c13c57265d048c16d9b017c5f0..0250073f522fe16fad5c897287793f252fb888e4 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1324,6 +1324,8 @@ pgprot_t pgprot_writecombine(pgprot_t prot); #define pgprot_writethrough pgprot_writethrough pgprot_t pgprot_writethrough(pgprot_t prot); +#define PFN_PTE_SHIFT PAGE_SHIFT + /* * Set multiple PTEs to consecutive pages with a single call. All PTEs * are within the same folio, PMD and VMA. diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 383b1f91442c997b21af066a287318020a88dea2..b76c8f028badeb0f27ea7519cff1b5b847df0df2 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -25,8 +25,9 @@ void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size); + struct page *page, bool delay_rmap, int page_size); +static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap); #define tlb_flush tlb_flush #define pte_free_tlb pte_free_tlb @@ -42,14 +43,29 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * has already been freed, so just do free_page_and_swap_cache. * - * s390 doesn't delay rmap removal, so there is nothing encoded in - * the page pointer. + * s390 doesn't delay rmap removal. */ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size) + struct page *page, bool delay_rmap, int page_size) { - free_page_and_swap_cache(encoded_page_ptr(page)); + VM_WARN_ON_ONCE(delay_rmap); + + free_page_and_swap_cache(page); + return false; +} + +static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap) +{ + struct encoded_page *encoded_pages[] = { + encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT), + encode_nr_pages(nr_pages), + }; + + VM_WARN_ON_ONCE(delay_rmap); + VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); + + free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages)); return false; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1a231181a413bde07b2afd35d48d9a6967a9b1c9..17b483c52815592dd379e56444b4eece02d7f30c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -414,7 +414,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) goto lock_mmap; if (!(vma->vm_flags & access)) { vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return handle_fault_error_nolock(regs, SEGV_ACCERR); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 5e41033bf4ca4b94d64d29d4f335be59a1186e49..be9bcc50e4cbf2efdc1c145550ce7410d355fb43 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -928,6 +928,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); } +#define PFN_PTE_SHIFT PAGE_SHIFT + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { diff --git a/arch/sw_64/Kbuild b/arch/sw_64/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..aa0bf0507406c9790b10d0b0c0b6dd57d22286ae --- /dev/null +++ b/arch/sw_64/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += kernel/ mm/ platform/ +obj-$(CONFIG_NET) += net/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_MATHEMU) += math-emu/ + +obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..62f655ceae3f709b0f151c89ed7241add7714d90 --- /dev/null +++ b/arch/sw_64/Kconfig @@ -0,0 +1,670 @@ +# SPDX-License-Identifier: GPL-2.0 +config SW64 + bool + default y + select ACPI + select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY + select ARCH_ATOMIC + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_PMEM_API + select ARCH_HAS_PTE_DEVMAP + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_VM_GET_PAGE_PROT + select ARCH_HAS_ZONE_DEVICE + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_NO_PREEMPT + select ARCH_SUPPORTS_ACPI + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_UPROBES + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_FRAME_POINTERS + select ARCH_WANT_IPC_PARSE_VERSION + select AUDIT_ARCH + select COMMON_CLK + select DMA_OPS if PCI + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_LEGACY + select GENERIC_IRQ_MIGRATION if SMP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP if PCI + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL + select HANDLE_DOMAIN_IRQ + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_PREL32_RELOCATIONS + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ASM_MODVERSIONS + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_EBPF_JIT + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_IDE + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PCI + select HAVE_PCSPKR_PLATFORM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if STACKTRACE + select HAVE_RSEQ + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA + select MEMORY_HOTPLUG_SPARSE if MEMORY_HOTPLUG + select MODULES_USE_ELF_RELA + select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OLD_SIGSUSPEND + select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI + select PCI_SW64 if PCI + select SET_FS + select SPARSEMEM_EXTREME if SPARSEMEM + select SW64_IRQ_CPU + select SW64_IRQ_MSI if PCI_MSI + select SW64_IRQ_MSI_VT if PCI_MSI + select SW64_TIMER + select SWIOTLB + select THREAD_INFO_IN_TASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select IOMMU_DMA if IOMMU_SUPPORT + select ARCH_SUPPORTS_MEMORY_FAILURE + select HAVE_CONTEXT_TRACKING + select HAVE_NMI + select HAVE_DMA_CONTIGUOUS + +config LOCKDEP_SUPPORT + def_bool y + +config 64BIT + def_bool y + +config MMU + bool + default y + +config PGTABLE_LEVELS + int + default 4 + +config ARCH_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_ENABLE_MEMORY_HOTPLUG + bool + default y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + bool + default y + +config ARCH_HAS_ILOG2_U32 + bool + default n + +config ARCH_HAS_ILOG2_U64 + bool + default n + +config GENERIC_GPIO + bool + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config ZONE_DMA + bool "Support DMA zone" if EXPERT + default y + +config ZONE_DMA32 + bool + default y + +config NEED_DMA_MAP_STATE + def_bool y + +config NEED_SG_DMA_LENGTH + def_bool y + +config ARCH_WANT_HUGE_PMD_SHARE + def_bool y + +config GENERIC_ISA_DMA + bool + default y + +config NONCACHE_PAGE + bool + depends on SW64 + default y + +config AUDIT_ARCH + bool + +config SYS_HAS_EARLY_PRINTK + bool + +config HAVE_CSRRW + bool + +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + +menu "System setup" + +menu "Machine Configuration" + +choice + prompt "Subarchitecture Configuration" + +config SUBARCH_C3B + bool "C3B" + +config SUBARCH_C4 + bool "C4" + select HAVE_CSRRW + select GENERIC_SCHED_CLOCK +endchoice + +choice + prompt "Uncore Configuration" + +config UNCORE_XUELANG + bool "Uncore for C3B" + depends on SUBARCH_C3B + help + Sunway cpu uncore for C3B + +config UNCORE_JUNZHANG + bool "Uncore for C4" + depends on SUBARCH_C4 + help + Sunway cpu uncore for C4 +endchoice + +choice + prompt "Platform Type" + +config PLATFORM_XUELANG + bool "Xuelang" + depends on UNCORE_XUELANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + select SW64_INTC_V2 + select I2C_SUNWAY if I2C + help + Sunway board chipset for C3B + +config PLATFORM_JUNZHANG + bool "JunZhang" + depends on UNCORE_JUNZHANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + help + Sunway board chipset for C4 + +endchoice + +config MIGHT_HAVE_PC_SERIO + bool "Use PC serio device i8042" + select ARCH_MIGHT_HAVE_PC_SERIO + default n + +endmenu + +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +endmenu +# clear all implied options (don't want default values for those): +# Most of these machines have ISA slots; not exactly sure which don't, +# and this doesn't activate hordes of code, so do it always. +config ISA + bool + default y + help + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +config ISA_DMA_API + bool + default y + +config PCI_DOMAINS + def_bool PCI + +config PCI_DOMAINS_GENERIC + def_bool PCI + +config PCI_SYSCALL + def_bool PCI + +config IOMMU_HELPER + def_bool PCI + +config PHYSICAL_START + hex "Physical address where the kernel starts" + default "0x900000" + help + This gives the physical address where the kernel starts, and it + is 0x10000 before _text. If you plan to use kernel for capturing + the crash dump change this value to start of the reserved region + (the "X" value as specified in the "crashkernel=YM@XM" command + line boot parameter passed to the panic-ed kernel). + +config KEXEC + bool "Kexec system call (EXPERIMENTAL)" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config CRASH_DUMP + bool "Kernel crash dumps (EXPERIMENTAL)" + help + Generate crash dump after being started by kexec. + This should be normally only set in special crash dump kernels + which are loaded in the main kernel with kexec-tools into + a specially reserved region and then later executed after + a crash by kdump/kexec. The crash dump kernel must be compiled + to a memory address not used by the main kernel or firmware using + PHYSICAL_START. + +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +config GENERIC_HWEIGHT + bool + default y + +config SMP + bool "Symmetric multi-processing support" + depends on SW64 + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If + you have a system with more than one CPU, say Y. + + If you say N here, the kernel will run on single and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + singleprocessor machines. On a singleprocessor machine, the kernel + will run faster if you say N here. + + See also the SMP-HOWTO available at + . + + If you don't know what to do here, say N. + +config ARCH_PROC_KCORE_TEXT + def_bool y + +config HAVE_DEC_LOCK + bool "Use arch-specified dec_and_lock" + depends on SMP && !NUMA + default y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y + +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP && SUBARCH_C4 + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config SCHED_MC + bool "Multi-core scheduler support" + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + +config NR_CPUS + int "Maximum number of CPUs (2-256)" + range 2 256 + depends on SMP + default "64" if UNCORE_XUELANG + help + SW6 support can handle a maximum of 256 CPUs. + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + ( Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config ARCH_SPARSEMEM_ENABLE + bool "Sparse Memory Support" + depends on SMP + select SPARSEMEM_VMEMMAP_ENABLE + +source "kernel/livepatch/Kconfig" + +config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM + select ACPI_NUMA if ACPI + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory + Access). This option is for configuring high-end multiprocessor + server machines. If in doubt, say N. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NODES_SHIFT + int + default "7" + depends on NUMA + +config RELOCATABLE + bool "Relocatable kernel" + help + This builds a kernel image that retains relocation information + so it can be loaded someplace besides the default 1MB. + The relocations make the kernel binary about 15% larger, + but are discarded at runtime + +config RELOCATION_TABLE_SIZE + hex "Relocation table size" + depends on RELOCATABLE + range 0x0 0x01000000 + default "0x80000" + help + A table of relocation data will be appended to the kernel binary + and parsed at boot to fix up the relocated kernel. + + This option allows the amount of space reserved for the table to be + adjusted, although the default of 1Mb should be ok in most cases. + + The build will fail and a valid size suggested if this is too small. + + If unsure, leave at the default value. + +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + help + Randomizes the physical and virtual address at which the + kernel image is loaded, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using any coprocessor 0 registers available. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 + default "0x10000000" + help + When kASLR is active, this provides the maximum offset that will + be applied to the kernel image. It should be set according to the + amount of physical RAM available in the target system minus + PHYSICAL_START and must be a power of 2. + + This is limited by the size of KTEXT space, 512Mb. The default is 256MB. + +config HZ + int "HZ of the short timer" + default 500 + +source "drivers/eisa/Kconfig" + +source "drivers/pcmcia/Kconfig" + +source "fs/Kconfig.binfmt" + +source "arch/sw_64/lib/Kconfig" + +endmenu + +menu "Boot options" + +config USE_OF + bool "Flattened Device Tree support" + select OF + select IRQ_DOMAIN + help + Include support for flattened device tree machine descriptions. + +config BUILTIN_DTB + bool "Embed DTB in kernel image" + depends on OF + default n + help + Embeds a device tree binary in the kernel image. + +config BUILTIN_DTB_NAME + string "Built in DTB" + depends on BUILTIN_DTB + help + Set the name of the DTB to embed, leave blank to pick one + automatically based on kernel configuration. + +config EFI + bool "UEFI runtime support" + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + + NOTE: This does *NOT* enable or encourage the use of DMI quirks, + i.e., the practice of identifying the platform via DMI to + decide whether certain workarounds for buggy hardware and/or + firmware need to be enabled. This would require the DMI subsystem + to be enabled much earlier than we do on ARM, which is non-trivial. + +config CMDLINE_BOOL + bool "Built-in kernel command line" + help + Allow for specifying boot arguments to the kernel at + build time. On some systems (e.g. embedded ones), it is + necessary or convenient to provide some or all of the + kernel boot arguments with the kernel itself (that is, + to not rely on the boot loader to provide them.) + + To compile command line arguments into the kernel, + set this option to 'Y', then fill in the + boot arguments in CONFIG_CMDLINE. + + Systems with fully functional boot loaders (i.e. non-embedded) + should leave this option set to 'N'. + +config CMDLINE + string "Built-in kernel command string" + depends on CMDLINE_BOOL + default "" + help + Enter arguments here that should be compiled into the kernel + image and used at boot time. If the boot loader provides a + command line at boot time, it is appended to this string to + form the full kernel command line, when the system boots. + + However, you can use the CONFIG_CMDLINE_OVERRIDE option to + change this behavior. + + In most cases, the command line (whether built-in or provided + by the boot loader) should specify the device for the root + file system. + +config CMDLINE_OVERRIDE + bool "Built-in command line overrides boot loader arguments" + depends on CMDLINE_BOOL + help + Set this option to 'Y' to have the kernel ignore the boot loader + command line, and use ONLY the built-in command line. + + This is used to work around broken boot loaders. This should + be set to 'N' under normal conditions. + +config FORCE_MAX_ZONEORDER + int + default "16" if (HUGETLB_PAGE) + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate up to a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + +endmenu + +source "drivers/firmware/Kconfig" + +menu "Power management options" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_SELECT_MEMORY_MODEL + def_bool ARCH_SPARSEMEM_ENABLE + +source "drivers/cpuidle/Kconfig" + +source "drivers/idle/Kconfig" + +endmenu + +source "arch/sw_64/kvm/Kconfig" diff --git a/arch/sw_64/Kconfig.debug b/arch/sw_64/Kconfig.debug new file mode 100644 index 0000000000000000000000000000000000000000..6cb3c2488b368e3acc38cd32b33fe658036ac8d6 --- /dev/null +++ b/arch/sw_64/Kconfig.debug @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +config EARLY_PRINTK + bool "Early printk" if EXPERT + depends on SYS_HAS_EARLY_PRINTK + default y + help + This option enables special console drivers which allow the kernel + to print messages very early in the bootup process. + + This is useful for kernel debugging when your machine crashes very + early before the console code is initialized. For normal operation, + it is not recommended because it looks ugly on some machines and + doesn't cooperate with an X server. You should normally say N here, + unless you want to debug such a crash. + +config UNA_PRINT + bool "Show debug info about user unalign memory access" + default n + +config MATHEMU + tristate "Kernel FP software completion" if DEBUG_KERNEL && !SMP + default y if !DEBUG_KERNEL || SMP + help + This option is required for IEEE compliant floating point arithmetic + on the SW. The only time you would ever not say Y is to say M in + order to debug the code. Say Y unless you know what you are doing. + +config STACKTRACE_SUPPORT + bool + default y + +config SW64_RRU + bool "Enable RRU(Remote Read User)" + depends on SW64 + default n + help + Duplicate user stdout and stderr to specific space. + Do not enable it in a production kernel. + +config SW64_RRK + bool "Enable RRK(Remote Read Kernel)" + depends on SW64 + default y + help + Duplicate kernel log to specific space. + Do not enable it in a production kernel. + +config DEBUG_MATCH + bool "instruction-flow and data-flow match debugfs interface" + depends on DEBUG_FS + default n + help + Turns on the DebugFS interface for instruction-flow and data-flow match. diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..84f0dca5e9f710596d5b36dbff995570c6ef304d --- /dev/null +++ b/arch/sw_64/Makefile @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sw/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + + +archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/sw_64/tools relocs + +archheaders: + $(Q)$(MAKE) $(build)=arch/sw_64/kernel/syscalls all + +NM := $(NM) -B +CCVERSION := $(shell $(CC) -dumpversion) +LDFLAGS_vmlinux := -static -N #-relax +CHECKFLAGS += -D__sw__ + +ifeq ($(CONFIG_RELOCATABLE),y) +LDFLAGS_vmlinux += --emit-relocs +endif + +CHECKFLAGS += -D__sw__ +cflags-y := -pipe -ffixed-8 -mno-fp-regs #-msmall-data +ifeq ($(CONFIG_SUBARCH_C4),y) + cflags-y += -fsw-rev +endif +cflags-y += $(call cc-option, -fno-jump-tables) + +cflags-y += $(cpuflags-y) + +KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = xuelang_defconfig + +head-y := arch/sw_64/kernel/head.o + +core-y += arch/sw_64/ +drivers-$(CONFIG_PCI) += arch/sw_64/pci/ +libs-y += arch/sw_64/lib/ + +# export what is needed by arch/sw_64/boot/Makefile +LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y)) +export LIBS_Y + +boot := arch/sw_64/boot + +#Default target when executing make with no arguments +all: $(boot)/vmlinux.bin.gz + +$(boot)/vmlinux.bin.gz: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $@ + +bootimage bootpfile bootpzfile: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/sw_64/tools + +KBUILD_IMAGE := $(boot)/vmlinux.bin + +define archhelp + echo '* boot - Compressed kernel image (arch/sw_64/boot/vmlinux.bin.gz)' +endef diff --git a/arch/sw_64/Makefile.postlink b/arch/sw_64/Makefile.postlink new file mode 100644 index 0000000000000000000000000000000000000000..248844d141dd23098d874d219adc362ee21e99e9 --- /dev/null +++ b/arch/sw_64/Makefile.postlink @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# =========================================================================== +# Post-link SW64 pass +# =========================================================================== +# +# 1. Insert relocations into vmlinux + +PHONY := __archpost +__archpost: + +-include include/config/auto.conf +include scripts/Kbuild.include + +CMD_RELOCS = arch/sw_64/tools/relocs +quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $@ + +# `@true` prevents complaint when there is nothing to be done + +vmlinux: FORCE + @true +ifeq ($(CONFIG_RELOCATABLE),y) + $(call if_changed,relocs) +endif + +%.ko: FORCE + @true + +clean: + @true + +PHONY += FORCE clean + +FORCE: + +.PHONY: $(PHONY) diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/sw_64/boot/.gitignore similarity index 59% rename from arch/loongarch/include/uapi/asm/Kbuild rename to arch/sw_64/boot/.gitignore index 4aa680ca2e5fdf6407f8692264599da101d46ab3..8a90e24c76ab83a8c475375c2ecd5f070c5bbad5 100644 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ b/arch/sw_64/boot/.gitignore @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 -generic-y += kvm_para.h +vmlinux diff --git a/arch/sw_64/boot/Makefile b/arch/sw_64/boot/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..dd09764846493072121e61e8630291d80c318232 --- /dev/null +++ b/arch/sw_64/boot/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/sw_64/boot/Makefile +# +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Based on arch/arm64/boot/Makefile. +# + +OBJCOPYFLAGS_vmlinux.bin := -O binary + +targets := vmlinux vmlinux.bin vmlinux.bin.gz + +quiet_cmd_strip = STRIP $@ + cmd_strip = $(STRIP) -o $@ $< + +# Compressed kernel image +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + @echo ' Kernel $@ is ready' + +$(obj)/vmlinux: vmlinux FORCE + $(call if_changed,strip) + +$(obj)/vmlinux.bin: $(obj)/vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/sw_64/boot/dts/Makefile b/arch/sw_64/boot/dts/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e32c159cab641fb82d09f0e9eaeb428c7770f006 --- /dev/null +++ b/arch/sw_64/boot/dts/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# Built-in dtb + +ifeq ($(CONFIG_PLATFORM_XUELANG),y) +builtindtb-y := chip3 +endif + +ifeq ($(CONFIG_PLATFORM_JUNZHANG),y) +builtindtb-y := empty +endif + +ifeq ($(CONFIG_BUILTIN_DTB), y) +ifneq ($(CONFIG_BUILTIN_DTB_NAME),"") + builtindtb-y := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_NAME)) +endif + +obj-y += $(builtindtb-y).dtb.o +dtb-y := $(builtindtb-y).dtb + +# for CONFIG_OF_ALL_DTBS test +dtstree := $(srctree)/$(src) +dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +else +dtb-y := $(builtindtb-y).dtb +endif + +clean-files := *.dtb *.dtb.S diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts new file mode 100644 index 0000000000000000000000000000000000000000..082506393ac98ffb1219d124029cafe8608ce4dd --- /dev/null +++ b/arch/sw_64/boot/dts/chip3.dts @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks { + i2cclk: i2cclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "i2cclk_25mhz"; + }; + spiclk: spiclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "spiclk_25mhz"; + }; + + }; + + intc: interrupt-controller { + compatible = "sw64,sw6_irq_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + lpc_intc: interrupt-controller@0x8037 { + compatible = "sw64,lpc_intc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <2>; + }; + + uart: serial0@8033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x8033 0x0 0x0 0x1000>; + interrupt-parent=<&intc>; + interrupts = <3>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + serial1@9033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x9033 0x0 0x0 0x1000>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + + i2c0@0x8031 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,designware-i2c"; + reg = <0x8031 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <5>; + status = "okay"; + }; + + i2c1@0x8034 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8034 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <6>; + status = "okay"; + }; + + i2c2@0x8035 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8035 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <7>; + status = "okay"; + + rtc: pcf8523@68 { + compatible = "nxp,pcf8523"; + reg = <0x68>; + }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; + }; + + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + + spi: spi@0x8032 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3-spi"; + reg = <0x8032 0x0 0x0 0x8000>; + clocks = <&spiclk>; + interrupt-parent=<&intc>; + interrupts = <4>; + status = "okay"; + + flash@0 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <0 0 0 0 >; /* 0: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares0"; + reg = <0 0x400000>; + }; + }; + }; + + flash@1 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <1 0 0 0 >; /* 1: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares1"; + reg = <0 0x400000>; + }; + }; + }; + }; + + lpc: lpc@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3_lpc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + status = "okay"; + + }; + + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + interrupt-parent=<&lpc_intc>; + interrupts = <10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + gpio: gpio@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,sw-gpio"; + reg = <0x8036 0x0 0x0 0x8000>; + status = "okay"; + + porta: gpio-contraller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0 0 0 0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent=<&intc>; + interrupts = <0>; + }; + }; + + }; +}; diff --git a/arch/sw_64/boot/dts/chip_vt.dts b/arch/sw_64/boot/dts/chip_vt.dts new file mode 100644 index 0000000000000000000000000000000000000000..f26285367f98c6715f3ff88eacb12496d12e7644 --- /dev/null +++ b/arch/sw_64/boot/dts/chip_vt.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + intc: interrupt-controller{ + compatible = "sw64,sw6_irq_vt_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart: serial0@8801 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "ns16550a"; + reg = <0x8801 0x3f8 0x0 0x10>; + interrupt-parent=<&intc>; + interrupts = <12>; + reg-shift = <0>; + reg-io-width = <1>; + clock-frequency = <24000000>; + status = "okay"; + }; + misc: misc0@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-ged"; + reg = <0x8036 0x0 0x0 0x20>; + interrupt-parent=<&intc>; + interrupts = <13>; + reg-shift = <0>; + reg-io-width = <8>; + clock-frequency = <24000000>; + status = "okay"; + }; + fw_cfg: fw_cfg@8049 { + dma-coherent; + reg = <0x8049 0x20000000 0x0 0x18>; + compatible = "qemu,fw-cfg-mmio"; + }; + }; +}; diff --git a/arch/sw_64/boot/dts/empty.dts b/arch/sw_64/boot/dts/empty.dts new file mode 100644 index 0000000000000000000000000000000000000000..f8fe34e29641f78dfdc1cc163b63d9becfcaeacd --- /dev/null +++ b/arch/sw_64/boot/dts/empty.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + }; +}; diff --git a/arch/sw_64/configs/anolis_xuelang_defconfig b/arch/sw_64/configs/anolis_xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..001dfb0187f62f4eedc903afb9bbd49c7a2b15e8 --- /dev/null +++ b/arch/sw_64/configs/anolis_xuelang_defconfig @@ -0,0 +1,1105 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=256 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_LIVEPATCH=y +CONFIG_NUMA=y +CONFIG_HZ=100 +CONFIG_BINFMT_MISC=m +CONFIG_USE_OF=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_NFIT=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_JUMP_LABEL=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_ALL is not set +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_ZSWAP=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_LRU_GEN=y +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DAMON_DBGFS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_ATM=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_X25=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_NET_SWITCHDEV=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_WIRELESS is not set +CONFIG_RFKILL=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_STUB=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_CONNECTOR=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AHA152X=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_SCSI_FDOMAIN_ISA=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_QLOGIC_FAS=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_ET131X=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_CAVIUM_PTP=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_JME=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_QCA7000_SPI=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_SFC=m +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PHYLIB=y +CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m +CONFIG_MDIO_BUS_MUX_MMIOREG=m +CONFIG_PPP=m +CONFIG_PPPOE=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=m +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_TCG_TIS=y +CONFIG_TCG_ATMEL=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=m +CONFIG_PMBUS=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_I6300ESB_WDT=m +CONFIG_SSB=y +CONFIG_RC_CORE=m +CONFIG_DRM=m +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_HID_ITE is not set +CONFIG_HID_LOGITECH=m +# CONFIG_HID_REDRAGON is not set +# CONFIG_I2C_HID is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_ERDMA=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_DAX=y +CONFIG_STM=m +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_F2FS_FS=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_CACHEFILES_ONDEMAND=y +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_FRAME_POINTER is not set +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_KEYBOARD=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=1 +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_STACK_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_BPF=m diff --git a/arch/sw_64/configs/junzhang_defconfig b/arch/sw_64/configs/junzhang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..e202359a2936afd01eea3ce79cbf2f816be44900 --- /dev/null +++ b/arch/sw_64/configs/junzhang_defconfig @@ -0,0 +1,668 @@ +CONFIG_LOCALVERSION="-junzhang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SUBARCH_C4=y +CONFIG_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=64 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_guest_defconfig b/arch/sw_64/configs/kata_guest_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..8122155c127659ffdf6b0e0b0df3a2ce5f677cdf --- /dev/null +++ b/arch/sw_64/configs/kata_guest_defconfig @@ -0,0 +1,633 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_USE_OF=y +CONFIG_SW64_BUILTIN_DTB=y +CONFIG_SW64_BUILTIN_DTB_NAME="chip_vt" +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_NONBOOT_CORE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_SUNWAY_SUPERIO_AST2400=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_xuelang_defconfig b/arch/sw_64/configs/kata_xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..f553f0e71dbf865ff8a49109e0843291b52a98f1 --- /dev/null +++ b/arch/sw_64/configs/kata_xuelang_defconfig @@ -0,0 +1,616 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +# CONFIG_SUSPEND is not set +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_DROP_MONITOR=m +# CONFIG_WIRELESS is not set +CONFIG_CAIF=m +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/xuelang_defconfig b/arch/sw_64/configs/xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..9f2944cc3b4f4f6d48223328cffa93bbb244a099 --- /dev/null +++ b/arch/sw_64/configs/xuelang_defconfig @@ -0,0 +1,669 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_CHIP3=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_SUNWAY_IOMMU=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..0dd0a704d8f157b5aebd4e9c4715e28c7227d006 --- /dev/null +++ b/arch/sw_64/include/asm/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +generic-y += clkdev.h +generic-y += export.h +generic-y += kvm_types.h +generic-y += mcs_spinlock.h +generic-y += param.h +generic-y += qrwlock.h +generic-y += qspinlock.h +generic-y += rwsem.h +generic-y += seccomp.h +generic-y += segment.h +generic-y += types.h +generic-y += user.h + +generated-y += syscall_table.h diff --git a/arch/sw_64/include/asm/acenv.h b/arch/sw_64/include/asm/acenv.h new file mode 100644 index 0000000000000000000000000000000000000000..53b2898718fe9d08ad95facb8f8b58cd36aa674d --- /dev/null +++ b/arch/sw_64/include/asm/acenv.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACENV_H +#define _ASM_SW64_ACENV_H + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ +#define ACPI_FLUSH_CPU_CACHE() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) +#endif /* _ASM_SW64_ACENV_H */ diff --git a/arch/sw_64/include/asm/acpi.h b/arch/sw_64/include/asm/acpi.h new file mode 100644 index 0000000000000000000000000000000000000000..ef46f481e1fdffa89929a0857e85b3d5055c5bad --- /dev/null +++ b/arch/sw_64/include/asm/acpi.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACPI_H +#define _ASM_SW64_ACPI_H + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; + +/* _ASM_SW64_PDC_H */ +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE + +/** + * Use the number 64 is just because this number is the most + * frequently used number in other architectures. Actually, + * SW64 does not have fixmap area in memory layout. + */ +#define NR_FIX_BTMAPS 64 + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = 0; + acpi_pci_disabled = 0; + acpi_noirq = 0; +} + +static inline void acpi_noirq_set(void) +{ + acpi_noirq = 1; +} + +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +/* Low-level suspend routine. */ +extern int (*acpi_suspend_lowlevel)(void); +extern unsigned long long arch_acpi_wakeup_start; + +/* Physical address to resume after wakeup */ +#define acpi_wakeup_address arch_acpi_wakeup_start + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + return max_cstate; +} + +static inline bool arch_has_acpi_pdc(void) +{ + return false; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ +} +#else /* !CONFIG_ACPI */ + +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define acpi_unlazy_tlb(x) +#endif /* _ASM_SW64_ACPI_H */ diff --git a/arch/sw_64/include/asm/asm-offsets.h b/arch/sw_64/include/asm/asm-offsets.h new file mode 100644 index 0000000000000000000000000000000000000000..d370ee36a182ba510c28459f856b17f321bd57fc --- /dev/null +++ b/arch/sw_64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sw_64/include/asm/asm-prototypes.h b/arch/sw_64/include/asm/asm-prototypes.h new file mode 100644 index 0000000000000000000000000000000000000000..67746d6bffb725b41d5997f983cded406e63573a --- /dev/null +++ b/arch/sw_64/include/asm/asm-prototypes.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ASM_PROTOTYPES_H +#define _ASM_SW64_ASM_PROTOTYPES_H + +#include +#include +#include +#include +#include + +#include + +extern void __divl(void); +extern void __reml(void); +extern void __divw(void); +extern void __remw(void); +extern void __divlu(void); +extern void __remlu(void); +extern void __divwu(void); +extern void __remwu(void); + +#endif /* _ASM_SW64_ASM_PROTOTYPES_H */ diff --git a/arch/sw_64/include/asm/ast2400.h b/arch/sw_64/include/asm/ast2400.h new file mode 100644 index 0000000000000000000000000000000000000000..5f4cc84ff3a8cb729ce2d59c796274b548ccd7c9 --- /dev/null +++ b/arch/sw_64/include/asm/ast2400.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Weiqiang Su + * + * Both AST2400D and AST2400F package variants are supported. + */ + +#ifndef _ASM_SW64_AST2400_H +#define _ASM_SW64_AST2400_H + +#include + +/* Logical Device Numbers (LDN). */ +#define AST2400_FDC 0x00 /* Floppy */ +#define AST2400_PP 0x01 /* Parallel port */ +#define AST2400_SP1 0x02 /* Com1 */ +#define AST2400_SP2 0x03 /* Com2 & IR */ +#define AST2400_KBC 0x05 /* PS/2 keyboard and mouse */ +#define AST2400_CIR 0x06 +#define AST2400_GPIO6789_V 0x07 +#define AST2400_WDT1_GPIO01A_V 0x08 +#define AST2400_GPIO1234567_V 0x09 +#define AST2400_ACPI 0x0A +#define AST2400_HWM_FPLED 0x0B /* Hardware monitor & front LED */ +#define AST2400_VID 0x0D +#define AST2400_CIRWKUP 0x0E /* CIR wakeup */ +#define AST2400_GPIO_PP_OD 0x0F /* GPIO Push-Pull/Open drain select */ +#define AST2400_SVID 0x14 +#define AST2400_DSLP 0x16 /* Deep sleep */ +#define AST2400_GPIOA_LDN 0x17 + +/* virtual LDN for GPIO and WDT */ +#define AST2400_WDT1 ((0 << 8) | AST2400_WDT1_GPIO01A_V) + +#define AST2400_GPIOBASE ((0 << 8) | AST2400_WDT1_GPIO01A_V) //? + +#define AST2400_GPIO0 ((1 << 8) | AST2400_WDT1_GPIO01A_V) +#define AST2400_GPIO1 ((1 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO2 ((2 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO3 ((3 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO4 ((4 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO5 ((5 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO6 ((6 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO7 ((7 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO8 ((0 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIO9 ((1 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIOA ((2 << 8) | AST2400_WDT1_GPIO01A_V) + +#define SUPERIO_PNP_PORT 0x2E +#define SUPERIO_CHIPID 0xC333 + +struct device_operations; +typedef struct pnp_device { + unsigned int port; + unsigned int device; + + struct device_operations *ops; +} *device_t; + +struct pnp_mode_ops { + void (*enter_conf_mode)(device_t dev); + void (*exit_conf_mode)(device_t dev); +}; + + +struct device_operations { + void (*read_resources)(device_t dev); + void (*set_resources)(device_t dev); + void (*enable_resources)(device_t dev); + void (*init)(device_t dev); + void (*final)(device_t dev); + void (*enable)(device_t dev); + void (*disable)(device_t dev); + + const struct pnp_mode_ops *ops_pnp_mode; +}; + +/* PNP helper operations */ +struct io_info { + unsigned int mask, set; +}; + +struct pnp_info { + bool enabled; /* set if we should enable the device */ + struct pnp_device pnp_device; + unsigned int function; /* Must be at least 16 bits (virtual LDNs)! */ +}; + +/* Chip operations */ +struct chip_operations { + void (*enable_dev)(struct device *dev); + void (*init)(void *chip_info); + void (*final)(void *chip_info); + unsigned int initialized : 1; + unsigned int finalized : 1; + const char *name; +}; + +typedef struct superio_ast2400_device { + struct device *dev; + const char *name; + unsigned int enabled : 1; /* set if we should enable the device */ + unsigned int superio_ast2400_efir; /* extended function index register */ + unsigned int superio_ast2400_efdr; /* extended function data register */ + struct chip_operations *chip_ops; + const void *chip_info; +} *superio_device_t; + + +static inline void pnp_enter_conf_mode_a5a5(device_t dev) +{ + outb(0xa5, dev->port); + outb(0xa5, dev->port); +} + +static inline void pnp_exit_conf_mode_aa(device_t dev) +{ + outb(0xaa, dev->port); +} + +/* PNP config mode wrappers */ + +static inline void pnp_enter_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->enter_conf_mode(dev); +} + +static inline void pnp_exit_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->exit_conf_mode(dev); +} + +/* PNP device operations */ +static inline u8 pnp_read_config(device_t dev, u8 reg) +{ + outb(reg, dev->port); + return inb(dev->port + 1); +} + +static inline void pnp_write_config(device_t dev, u8 reg, u8 value) +{ + outb(reg, dev->port); + outb(value, dev->port + 1); +} + +static inline void pnp_set_logical_device(device_t dev) +{ + pnp_write_config(dev, 0x07, dev->device & 0xff); +// pnp_write_config(dev, 0x07, 0x3); +} + +static inline void pnp_set_enable(device_t dev, int enable) +{ + u8 tmp; + + tmp = pnp_read_config(dev, 0x30); + + if (enable) + tmp |= 1; + else + tmp &= ~1; + + pnp_write_config(dev, 0x30, tmp); +} + +#endif /* _ASM_SW64_AST2400_H */ diff --git a/arch/sw_64/include/asm/atomic.h b/arch/sw_64/include/asm/atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..4a68da09722c352f136ec591df023413522fcfcc --- /dev/null +++ b/arch/sw_64/include/asm/atomic.h @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ATOMIC_H +#define _ASM_SW64_ATOMIC_H + +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc... + * + * But use these as seldom as possible since they are much slower + * than regular operations. + */ + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) + +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +#define arch_atomic64_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) + + +#ifdef CONFIG_SUBARCH_C3B +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " seleq %0, 1, $31, %0\n" + " wr_f %0\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %0, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + + + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#else /* !CONFIG_SUBARCH_C3B */ + +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " bne %0, 2f\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldw %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstw %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldl %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstl %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#endif /* CONFIG_SUBARCH_C3B */ + +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##w) \ + ATOMIC_OP_RETURN(op, op##w) \ + ATOMIC_FETCH_OP(op, op##w) \ + ATOMIC64_OP(op, op##l) \ + ATOMIC64_OP_RETURN(op, op##l) \ + ATOMIC64_FETCH_OP(op, op##l) \ + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + + + + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, asm) \ + ATOMIC_OP(op, asm) \ + ATOMIC_FETCH_OP(op, asm) \ + ATOMIC64_OP(op, asm) \ + ATOMIC64_FETCH_OP(op, asm) \ + + +ATOMIC_OPS(and, and) +ATOMIC_OPS(andnot, bic) +ATOMIC_OPS(or, bis) +ATOMIC_OPS(xor, xor) + + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + + +#undef ATOMIC_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic64_andnot arch_atomic64_andnot + +#endif /* _ASM_SW64_ATOMIC_H */ diff --git a/arch/sw_64/include/asm/barrier.h b/arch/sw_64/include/asm/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..bff199126c9fa49c44028fab3d4a10e0a18ce136 --- /dev/null +++ b/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BARRIER_H +#define _ASM_SW64_BARRIER_H + +#include + +#define mb() __asm__ __volatile__("memb" : : : "memory") + +#define rmb() __asm__ __volatile__("memb" : : : "memory") + +#if defined(CONFIG_SUBARCH_C3B) +#define wmb() __asm__ __volatile__("memb" : : : "memory") +#elif defined(CONFIG_SUBARCH_C4) +#define wmb() __asm__ __volatile__("wmemb" : : : "memory") +#endif + +#define imemb() __asm__ __volatile__("imemb" : : : "memory") + +#ifdef CONFIG_SMP +#define __ASM_SMP_MB "\tmemb\n" +#else +#define __ASM_SMP_MB +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +#include + +#endif /* _ASM_SW64_BARRIER_H */ diff --git a/arch/sw_64/include/asm/bitops.h b/arch/sw_64/include/asm/bitops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3cdabd95abfc0b4d49591d5b1f9d5e7fefe230a --- /dev/null +++ b/arch/sw_64/include/asm/bitops.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BITOPS_H +#define _ASM_SW64_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +#ifdef CONFIG_SUBARCH_C3B +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + * + * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). + */ + +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " selne %3, 1, $31, %1\n" //Note: here is SELNE!!! + " wr_f %1\n" + " bic %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " beq %3, 2f\n" // %3 is zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %2, 1\n" + " wr_f %2\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +#else /* !CONFIG_SUBARCH_C3B */ +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bis %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bic %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " xor %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " beq %2, 2f\n" // %2 is zero, no need to set, return + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + + +#endif /* CONFIG_SUBARCH_C3B */ + +/* + * WARNING: non atomic version. + */ +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m |= 1 << (nr & 31); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static inline void +clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +static inline void +__clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + arch___clear_bit(nr, addr); +} + +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m ^= 1 << (nr & 31); +} + +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + * + * Do a binary search on the bits. Due to the nature of large + * constants on the sw64, it is worthwhile to split the search. + */ +static inline unsigned long ffz_b(unsigned long x) +{ + unsigned long sum, x1, x2, x4; + + x = ~x & -~x; /* set first 0 bit, clear others */ + x1 = x & 0xAA; + x2 = x & 0xCC; + x4 = x & 0xF0; + sum = x2 ? 2 : 0; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +static inline unsigned long ffz(unsigned long word) +{ + return __kernel_cttz(~word); +} + +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __kernel_cttz(word); +} + +#ifdef __KERNEL__ + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above __ffs. + */ + +static inline int ffs(int word) +{ + int result = __ffs(word) + 1; + + return word ? result : 0; +} + +/* + * fls: find last bit set. + */ +static inline int fls64(unsigned long word) +{ + return 64 - __kernel_ctlz(word); +} + +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + +static inline int fls(int x) +{ + return fls64((unsigned int) x); +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return __kernel_ctpop(w); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __arch_hweight64(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight64(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight64(w & 0xff); +} + +#include + +#endif /* __KERNEL__ */ + +#ifdef __KERNEL__ + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline unsigned long +sched_find_first_bit(const unsigned long b[2]) +{ + unsigned long b0, b1, ofs, tmp; + + b0 = b[0]; + b1 = b[1]; + ofs = (b0 ? 0 : 64); + tmp = (b0 ? b0 : b1); + + return __ffs(tmp) + ofs; +} + +#include + +#include + +#include + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_BITOPS_H */ diff --git a/arch/sw_64/include/asm/bug.h b/arch/sw_64/include/asm/bug.h new file mode 100644 index 0000000000000000000000000000000000000000..4a179f236ccf9cff8f664ac291af4990cea25cf7 --- /dev/null +++ b/arch/sw_64/include/asm/bug.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BUG_H +#define _ASM_SW64_BUG_H + +#include +#include + +#endif /* _ASM_SW64_BUG_H */ diff --git a/arch/sw_64/include/asm/cache.h b/arch/sw_64/include/asm/cache.h new file mode 100644 index 0000000000000000000000000000000000000000..6a6ce4e99265a02576d448ec0577d486c7b16ffe --- /dev/null +++ b/arch/sw_64/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm/cache.h + */ +#ifndef _ASM_SW64_CACHE_H +#define _ASM_SW64_CACHE_H + +#define L1_CACHE_SHIFT 7 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_SW64_CACHE_H */ diff --git a/arch/sw_64/include/asm/cacheflush.h b/arch/sw_64/include/asm/cacheflush.h new file mode 100644 index 0000000000000000000000000000000000000000..0d49830b8493464a520276661cc49dca9f04a0a4 --- /dev/null +++ b/arch/sw_64/include/asm/cacheflush.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CACHEFLUSH_H +#define _ASM_SW64_CACHEFLUSH_H + +/* + * DCache: PIPT + * ICache: + * - C3B is VIVT with ICTAG, support coherence. + * - C4 is VIPT + */ +#include + +#endif /* _ASM_SW64_CACHEFLUSH_H */ diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h new file mode 100644 index 0000000000000000000000000000000000000000..7f3768290402bea31e0e5c445716012e81d6ad4e --- /dev/null +++ b/arch/sw_64/include/asm/checksum.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CHECKSUM_H +#define _ASM_SW64_CHECKSUM_H + +#include + +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +#define _HAVE_ARCH_IPV6_CSUM +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, __u32 len, + __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + +#endif /* _ASM_SW64_CHECKSUM_H */ diff --git a/arch/sw_64/include/asm/cmpxchg.h b/arch/sw_64/include/asm/cmpxchg.h new file mode 100644 index 0000000000000000000000000000000000000000..9f51d035313d92c00e67ad8d1183f16fe26154be --- /dev/null +++ b/arch/sw_64/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CMPXCHG_H +#define _ASM_SW64_CMPXCHG_H + +/* + * Atomic exchange routines. + */ + +#define __ASM__MB +#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include + +#define arch_xchg_local(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ +}) + +#ifdef CONFIG_SMP +#undef __ASM__MB +#define __ASM__MB "\tmemb\n" +#endif +#undef ____xchg +#undef ____cmpxchg +#undef _ASM_SW64_XCHG_H +#define ____xchg(type, args...) __arch_xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ +}) + +#define arch_cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ +}) + +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 + +#endif /* _ASM_SW64_CMPXCHG_H */ diff --git a/arch/sw_64/include/asm/core.h b/arch/sw_64/include/asm/core.h new file mode 100644 index 0000000000000000000000000000000000000000..2b6748cec93d459f9e2379399c92df746c682b29 --- /dev/null +++ b/arch/sw_64/include/asm/core.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CORE_H +#define _ASM_SW64_CORE_H + +#include + +#define II_II0 0 +#define II_II1 1 +#define II_SLEEP 2 +#define II_WAKE 3 +#define II_NMII 6 + +#define II_RESET II_NMII + +#if defined(CONFIG_SUBARCH_C3B) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 5 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 31 + +#define CORE_ID_BITS 5 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return 0; +} + +#elif defined(CONFIG_SUBARCH_C4) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 12 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 8 + +#define CORE_ID_BITS 6 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return rdhtctl() == 0x3; +} + +#endif + +#define DOMAIN_ID_MASK (GENMASK(DOMAIN_ID_BITS - 1, 0) << DOMAIN_ID_SHIFT) +#define THREAD_ID_MASK (GENMASK(THREAD_ID_BITS - 1, 0) << THREAD_ID_SHIFT) +#define CORE_ID_MASK (GENMASK(CORE_ID_BITS - 1, 0) << CORE_ID_SHIFT) +#define MAX_CORES_PER_CPU (1 << CORE_ID_BITS) + +/* + * 0x00 ~ 0xff for hardware mm fault + */ + +#define MMCSR__TNV 0x0 +#define MMCSR__IACV 0x1 +#define MMCSR__FOR 0x2 +#define MMCSR__FOE 0x3 +#define MMCSR__FOW 0x4 + +#define MMCSR__BAD_DVA 0x6 +#define MMCSR__ACV1 0x7 +#define MMCSR__ACV0 0xc +#define MMCSR__BAD_IVA 0xf + +/* 0x100 ~ 0x1ff for match debug */ +#define MMCSR__DA_MATCH 0x100 +#define MMCSR__DV_MATCH 0x101 +#define MMCSR__DAV_MATCH 0x102 +#define MMCSR__IA_MATCH 0x103 +#define MMCSR__IDA_MATCH 0x104 +#define MMCSR__IV_MATCH 0x105 + + /* entry.S */ +extern void entArith(void); +extern void entIF(void); +extern void entInt(void); +extern void entMM(void); +extern void entSys(void); +extern void entUna(void); +/* head.S */ +extern void __smp_callin(unsigned long args); +#endif /* _ASM_SW64_CORE_H */ diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..4da30bb91d89602e48c55c01fdff08d3f495b45c --- /dev/null +++ b/arch/sw_64/include/asm/cpu.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPU_H +#define _ASM_SW64_CPU_H + +#endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/include/asm/cpufreq.h b/arch/sw_64/include/asm/cpufreq.h new file mode 100644 index 0000000000000000000000000000000000000000..cf47f1fc6866860b56ec2112abc1a1449ff66d72 --- /dev/null +++ b/arch/sw_64/include/asm/cpufreq.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CPUFREQ_H +#define _ASM_SW64_CPUFREQ_H + +#include +#include +#include +#include +#include + +struct clk; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +extern struct cpufreq_frequency_table freq_table[]; + +int clk_init(void); +void sw64_set_rate(unsigned int index); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CPUFREQ_H */ diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd46b05e22840bbbe033ca200951269afa0b98f --- /dev/null +++ b/arch/sw_64/include/asm/cputime.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPUTIME_H +#define _ASM_SW64_CPUTIME_H + +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) + +#endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h new file mode 100644 index 0000000000000000000000000000000000000000..0610384208a460b32703ea7988ebbfa29be821c9 --- /dev/null +++ b/arch/sw_64/include/asm/csr.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CSR_H +#define _ASM_SW64_CSR_H + +#include + +#define CSR_EXC_SUM 0xd +#define CSR_INT_EN 0x1a +#define CSR_INT_STAT 0x1b +#define CSR_PCIE_MSI0_INT 0x1d +#define CSR_PCIE_MSI1_INT 0x1e +#define CSR_PCIE_MSI2_INT 0x1f +#define CSR_PCIE_MSI3_INT 0x20 +#define CSR_INT_VEC 0x2d +#define CSR_PCIE_MSI0_INTEN 0x35 +#define CSR_PCIE_MSI1_INTEN 0x36 +#define CSR_PCIE_MSI2_INTEN 0x37 +#define CSR_PCIE_MSI3_INTEN 0x38 +#define CSR_EXC_GPA 0x3b +#define CSR_EXC_PC 0xe +#define CSR_AS_INFO 0x3c +#define CSR_DS_STAT 0x48 +#define CSR_SOFTCID 0xc9 +#define CSR_DVA 0x54 +#define CSR_PTBR_SYS 0x68 +#define CSR_PTBR_USR 0x69 +#define CSR_APTP 0x6a +#define CSR_CID 0xc4 +#define CSR_WR_FREGS 0xc8 +#define CSR_SHTCLOCK 0xca +#define CSR_SHTCLOCK_OFFSET 0xcb + +#ifdef CONFIG_SUBARCH_C4 +#define CSR_IA_VPNMATCH 0xa +#define CSR_UPCR 0x15 +#define CSR_VPCR 0x16 +#define CSR_IA_MATCH 0x17 +#define CSR_IA_MASK 0x18 +#define CSR_IV_MATCH 0x19 +#define CSR_IA_UPNMATCH 0x3a +#define CSR_DC_CTLP 0x4e +#define CSR_DA_MATCH 0x51 +#define CSR_DA_MASK 0x52 +#define CSR_DA_MATCH_MODE 0x53 +#define CSR_DV_MATCH 0x56 +#define CSR_DV_MASK 0x57 +#define CSR_IDA_MATCH 0xc5 +#define CSR_IDA_MASK 0xc6 + +#define DA_MATCH_EN_S 4 +#define DV_MATCH_EN_S 6 +#define DAV_MATCH_EN_S 7 +#define DPM_MATCH 8 +#define DPM_MATCH_EN_S 10 +#define IDA_MATCH_EN_S 53 +#define IV_PM_EN_S 61 +#define IV_MATCH_EN_S 62 +#define IA_MATCH_EN_S 63 + +#endif + + +#ifdef CONFIG_HAVE_CSRRW +#define read_csr(x) \ + ({ unsigned long __val; \ + __asm__ __volatile__("csrr %0,%1" : "=r"(__val) : "i"(x)); \ + __val; }) + +#define write_csr(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1" ::"r"(x), "i"(y)); }) + +#define write_csr_imb(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1; imemb" ::"r"(x), "i"(y)); }) + + +#ifndef __ASSEMBLY__ +#include +static inline void update_ptbr_sys(unsigned long ptbr) +{ + imemb(); + write_csr_imb(ptbr, CSR_PTBR_SYS); +} +#endif +#else +#define read_csr(x) (0) +#define write_csr(x, y) do { } while (0) +#define write_csr_imb(x, y) do { } while (0) + +#ifndef __ASSEMBLY__ +static inline void update_ptbr_sys(unsigned long ptbr) +{ + wrptbr(ptbr); +} +#endif + +#endif +#endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/asm/current.h b/arch/sw_64/include/asm/current.h new file mode 100644 index 0000000000000000000000000000000000000000..862caabb9c7092f561dca2613ab977fe5cc34cf0 --- /dev/null +++ b/arch/sw_64/include/asm/current.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CURRENT_H +#define _ASM_SW64_CURRENT_H + +#ifndef __ASSEMBLY__ + +struct task_struct; +static __always_inline struct task_struct *get_current(void) +{ + register struct task_struct *tp __asm__("$8"); + + return tp; +} + +#define current get_current() + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SW64_CURRENT_H */ diff --git a/arch/sw_64/include/asm/debug.h b/arch/sw_64/include/asm/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..8db5a8bb9ab72dc1165157a02a111acd07b5c20b --- /dev/null +++ b/arch/sw_64/include/asm/debug.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Mao Minkai + * Author: Mao Minkai + * + * This code is taken from arch/mips/include/asm/debug.h + * Copyright (C) 2015 Imagination Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _ASM_SW64_DEBUG_H +#define _ASM_SW64_DEBUG_H + +#include + +/* + * sw64_debugfs_dir corresponds to the "sw_64" directory at the top level + * of the DebugFS hierarchy. SW64-specific DebugFS entries should be + * placed beneath this directory. + */ +extern struct dentry *sw64_debugfs_dir; + +#define UNA_MAX_ENTRIES 64 + +struct unaligned_stat { + unsigned long pc; + unsigned long va; +}; + +extern char unaligned_task[]; +extern unsigned long unaligned_count; +extern struct unaligned_stat unaligned[]; + +#endif /* _ASM_SW64_DEBUG_H */ diff --git a/arch/sw_64/include/asm/delay.h b/arch/sw_64/include/asm/delay.h new file mode 100644 index 0000000000000000000000000000000000000000..f4080753e9545c1f19cc3775b4ad362cc378b32f --- /dev/null +++ b/arch/sw_64/include/asm/delay.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DELAY_H +#define _ASM_SW64_DELAY_H + +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); + +extern void ndelay(unsigned long nsecs); +#define ndelay ndelay + +#endif /* _ASM_SW64_DELAY_H */ diff --git a/arch/sw_64/include/asm/device.h b/arch/sw_64/include/asm/device.h new file mode 100644 index 0000000000000000000000000000000000000000..d999207e07d1e3b4a9c5eb1507710cfed18c2fc7 --- /dev/null +++ b/arch/sw_64/include/asm/device.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DEVICE_H +#define _ASM_SW64_DEVICE_H + +struct dev_archdata { +#if defined(CONFIG_SUNWAY_IOMMU) || defined(CONFIG_SUNWAY_IOMMU_V2) + void *iommu; +#endif +}; + +struct pdev_archdata { +}; +#endif /* _ASM_SW64_DEVICE_H */ diff --git a/arch/sw_64/include/asm/dma-direct.h b/arch/sw_64/include/asm/dma-direct.h new file mode 100644 index 0000000000000000000000000000000000000000..dee1680b8f6d284b709e6142c552ade27ac9f111 --- /dev/null +++ b/arch/sw_64/include/asm/dma-direct.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_DIRECT_H +#define _ASM_SW64_DMA_DIRECT_H + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* _ASM_SW64_DMA_DIRECT_H */ diff --git a/arch/sw_64/include/asm/dma-mapping.h b/arch/sw_64/include/asm/dma-mapping.h new file mode 100644 index 0000000000000000000000000000000000000000..65795f8e57920ef34ce1544e54cd9bd0a996edc2 --- /dev/null +++ b/arch/sw_64/include/asm/dma-mapping.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_MAPPING_H +#define _ASM_SW64_DMA_MAPPING_H + + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(void) +{ + return dma_ops; +} + + +#endif /* _ASM_SW64_DMA_MAPPING_H */ diff --git a/arch/sw_64/include/asm/dma.h b/arch/sw_64/include/asm/dma.h new file mode 100644 index 0000000000000000000000000000000000000000..cf6a9cf75233878e8238d2f13e2e50c41fbb9564 --- /dev/null +++ b/arch/sw_64/include/asm/dma.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw_64/dma.h + * + * This is essentially the same as the i386 DMA stuff, as the SW64PCs + * use ISA-compatible dma. The only extension is support for high-page + * registers that allow to set the top 8 bits of a 32-bit DMA address. + * This register should be written last when setting up a DMA address + * as this will also enable DMA across 64 KB boundaries. + */ + +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_SW64_DMA_H +#define _ASM_SW64_DMA_H + +#include +#include + +#define dma_outb outb +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* + * ISA DMA limitations on sw64 platforms, + + * These may be due to SIO (PCI<->ISA bridge) chipset limitation, or + * just a wiring limit. + */ + +/* + * Maximum address for all the others is the complete 32-bit bus + * address space. + */ +#define MAX_ISA_DMA_ADDRESS 0x100000000UL + +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +/* + * If we have the iommu, we don't have any address limitations on DMA. + * Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone + * like i386. + */ +#define MAX_DMA_ADDRESS ~0UL + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ +#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ +#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) +#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) +#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) +#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) +#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) +#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) +#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) +#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static inline unsigned long claim_dma_lock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static inline void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static inline void enable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static inline void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static inline void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* set extended mode for a specific DMA channel */ +static inline void set_dma_ext_mode(unsigned int dmanr, char ext_mode) +{ + if (dmanr <= 3) + dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); + else + dma_outb(ext_mode | (dmanr & 3), DMA2_EXT_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register. + */ +static inline void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + dma_outb((pagenr >> 8), DMA_HIPAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + dma_outb((pagenr >> 8), DMA_HIPAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + dma_outb((pagenr >> 8), DMA_HIPAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + dma_outb((pagenr >> 8), DMA_HIPAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8), DMA_HIPAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8), DMA_HIPAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8), DMA_HIPAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + if (dmanr <= 3) { + dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + } + set_dma_page(dmanr, a >> 16); /* set hipage last to enable 32-bit mode */ +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static inline int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr <= 3) ? + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE : + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ +#define KERNEL_HAVE_CHECK_DMA +extern int check_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif /* _ASM_SW64_DMA_H */ diff --git a/arch/sw_64/include/asm/dmi.h b/arch/sw_64/include/asm/dmi.h new file mode 100644 index 0000000000000000000000000000000000000000..05e80c9a3a76dc143e505440a41b7d150b873e3d --- /dev/null +++ b/arch/sw_64/include/asm/dmi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/dmi.h + * + * Copyright (C) 2019 Deepin Limited. + * Porting by: Deepin Kernel Team (kernel@deepin.com) + * + * based on arch/x864/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_SW64_DMI_H +#define _ASM_SW64_DMI_H + +#include +#include +#include +#include + +/* Use early IO mappings for DMI because it's initialized early */ +#define dmi_early_remap(x, l) early_ioremap(x, l) +#define dmi_early_unmap(x, l) early_iounmap(x, l) +#define dmi_remap(x, l) early_ioremap(x, l) +#define dmi_unmap(x) early_iounmap(x, 0) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif /* _ASM_SW64_DMI_H */ diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h new file mode 100644 index 0000000000000000000000000000000000000000..172b96a401cb871241e3cbd0f7e316524fe211f7 --- /dev/null +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EARLY_IOREMAP_H +#define _ASM_SW64_EARLY_IOREMAP_H + +#include +#include + +static inline void __iomem * +early_ioremap(unsigned long phys_addr, unsigned long size) +{ + unsigned long y = 0; + + if (phys_addr >= __START_KERNEL_map) { + y = (unsigned long) phys_to_virt(__pa(phys_addr)); + } else { + y = phys_addr; + y |= PAGE_OFFSET; + } + + return (void __iomem *) y; +} +#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) + +static inline void early_iounmap(volatile void __iomem *addr, unsigned long size) +{ +} +#define early_memunmap(addr, size) early_iounmap(addr, size) + +#endif /* _ASM_SW64_EARLY_IOREMAP_H */ diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h new file mode 100644 index 0000000000000000000000000000000000000000..34d5637e23c2e23045764f6f316a1f5dcb5f36fb --- /dev/null +++ b/arch/sw_64/include/asm/efi.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EFI_H +#define _ASM_SW64_EFI_H + +#include +#include +#ifdef CONFIG_EFI +extern void efi_init(void); +extern unsigned long entSuspend; + +#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) + +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000001 + +/* arch specific definitions used by the stub code */ + +/* + * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from + * start of kernel and may not cross a 2MiB boundary. We set alignment to + * 2MiB so we know it won't cross a 2MiB boundary. + */ +#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +#define MAX_FDT_OFFSET SZ_512M + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +#endif /* _ASM_SW64_EFI_H */ diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h new file mode 100644 index 0000000000000000000000000000000000000000..95ba89a1aa9db11a8c8cae1fa1ca5b3ac67d445b --- /dev/null +++ b/arch/sw_64/include/asm/elf.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ELF_H +#define _ASM_SW64_ELF_H +#ifdef __KERNEL__ +#include +#endif +/* Special values for the st_other field in the symbol table. */ + + +#define STO_SW64_NOPV 0x80 +#define STO_SW64_STD_GPLOAD 0x88 + +/* + * SW-64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +#define SHF_SW64_GPREL 0x10000000 + +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_SW64_32BIT 1 /* All addresses are below 2GB */ + +/* + * ELF register definitions. + * + * For now, we just leave it at 33 (32 general regs + processor status word). + */ +#define ELF_NGREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Same with user_fpsimd_state */ +#include +typedef struct user_fpsimd_state elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_SW64) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SW64 + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + +/* + * $0 is set by ld.so to a pointer to a function which might be + * registered using atexit. This provides a mean for the dynamic + * linker to call DT_FINI functions for shared libraries that have + * been loaded before the code runs. + + * So that we can use the same startup file with static executables, + * we start programs with a value of 0 to indicate that there is no + * such function. + */ + +#define ELF_PLAT_INIT(_r, load_addr) (_r->regs[0] = 0) + +/* + * The registers are laid out in pt_regs for HMCODE and syscall + * convenience. Re-order them for the linear elf_gregset_t. + */ + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +#ifdef __KERNEL__ +struct pt_regs; +struct task_struct; +extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS); + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. + */ + +#define ELF_HWCAP 0 + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM ("sw_64") + + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) + +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk +#endif + +#endif /* _ASM_SW64_ELF_H */ diff --git a/arch/sw_64/include/asm/extable.h b/arch/sw_64/include/asm/extable.h new file mode 100644 index 0000000000000000000000000000000000000000..42f872ce6c3bb8531c67fb8c7a1f64b6bfc0b77f --- /dev/null +++ b/arch/sw_64/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EXTABLE_H +#define _ASM_SW64_EXTABLE_H + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry { + signed int insn; + union exception_fixup { + unsigned int unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs, unsigned long pc); + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ + do { \ + (a)->fixup.unit = (b)->fixup.unit; \ + (b)->fixup.unit = (tmp).fixup.unit; \ + } while (0) + +#endif /* _ASM_SW64_EXTABLE_H */ diff --git a/arch/sw_64/include/asm/fpu.h b/arch/sw_64/include/asm/fpu.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b0ff5af1689726d71ff29bbe5c14e6ce83b432 --- /dev/null +++ b/arch/sw_64/include/asm/fpu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FPU_H +#define _ASM_SW64_FPU_H + +#include +#ifdef __KERNEL__ + +/* + * The following two functions don't need trapb/excb instructions + * around the mf_fpcr/mt_fpcr instructions because (a) the kernel + * never generates arithmetic faults and (b) sys_call instructions + * are implied trap barriers. + */ + +static inline unsigned long +rdfpcr(void) +{ + unsigned long ret; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " rfpcr $f0\n\t" + " fimovd $f0, %1\n\t" + " vldd $f0, %0\n\t" + : "=m"(*fp), "=r"(ret)); + + return ret; +} + +static inline void +wrfpcr(unsigned long val) +{ + unsigned long tmp; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " ifmovd %2, $f0\n\t" + " wfpcr $f0\n\t" + " and %2, 0x3, %1\n\t" + " beq %1, 1f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 2f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 3f\n\t" + " setfpec3\n\t" + " br 6f\n\t" + "1: setfpec0\n\t" + " br 6f\n\t" + "2: setfpec1\n\t" + " br 6f\n\t" + "3: setfpec2\n\t" + "6: vldd $f0, %0\n\t" + : "=m"(*fp), "=&r"(tmp) : "r"(val)); +} + +static inline unsigned long +swcr_update_status(unsigned long swcr, unsigned long fpcr) +{ + /* + * SW64 implements most of the bits in hardware. Collect + * the acrued exception bits from the real fpcr. + */ + swcr &= ~(IEEE_STATUS_MASK0 | IEEE_STATUS_MASK1 + | IEEE_STATUS_MASK2 | IEEE_STATUS_MASK3); + swcr |= (fpcr >> 35) & IEEE_STATUS_MASK0; + swcr |= (fpcr >> 13) & IEEE_STATUS_MASK1; + swcr |= (fpcr << 14) & IEEE_STATUS_MASK2; + swcr |= (fpcr << 36) & IEEE_STATUS_MASK3; + return swcr; +} + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +extern void sw64_write_simd_fp_reg_s(unsigned long reg, + unsigned long f0, unsigned long f1); +extern void sw64_write_simd_fp_reg_d(unsigned long reg, + unsigned long f0, unsigned long f1, + unsigned long f2, unsigned long f3); +extern void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a); +extern void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value); +extern void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/asm/ftrace.h b/arch/sw_64/include/asm/ftrace.h new file mode 100644 index 0000000000000000000000000000000000000000..7ed6e3c06a333e81d96e881de91c0d1aac670ae7 --- /dev/null +++ b/arch/sw_64/include/asm/ftrace.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/ftrace.h + * + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SW64_FTRACE_H +#define _ASM_SW64_FTRACE_H + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE 20 /* 5 * SW64_INSN_SIZE */ +#define MCOUNT_LDGP_SIZE 8 /* 2 * SW64_INSN_SIZE */ + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#ifndef __ASSEMBLY__ +#include +#include + + +extern void _mcount(unsigned long); + +struct dyn_arch_ftrace { + /* No extra data needed for sw64 */ +}; + +extern unsigned long ftrace_graph_call; + + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#endif /* ifndef __ASSEMBLY__ */ +#endif /* _ASM_SW64_FTRACE_H */ diff --git a/arch/sw_64/include/asm/futex.h b/arch/sw_64/include/asm/futex.h new file mode 100644 index 0000000000000000000000000000000000000000..78379981398053e3c7c29f1dddab934a8e44a003 --- /dev/null +++ b/arch/sw_64/include/asm/futex.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FUTEX_H +#define _ASM_SW64_FUTEX_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifdef CONFIG_SUBARCH_C3B + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + " ldi %2, 1\n" \ + " wr_f %2\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: br 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " wr_f %2\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " rd_f %3\n" + " beq %2, 3f\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: br 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#else /* !CONFIG_SUBARCH_C3B */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: lbr 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " beq %2, 3f\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: lbr 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#endif /* CONFIG_SUBARCH_C3B */ + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval = 0, ret; + unsigned long tmp; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("addw %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andnot %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FUTEX_H */ diff --git a/arch/sw_64/include/asm/hardirq.h b/arch/sw_64/include/asm/hardirq.h new file mode 100644 index 0000000000000000000000000000000000000000..03368c3659dd5f7b0c2789029e91e9d92d25a25d --- /dev/null +++ b/arch/sw_64/include/asm/hardirq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HARDIRQ_H +#define _ASM_SW64_HARDIRQ_H + +void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include + +#define __ARCH_IRQ_STAT +typedef struct { + u16 __softirq_pending; + unsigned int timer_irqs_event; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define arch_irq_stat_cpu arch_irq_stat_cpu +#define arch_irq_stat arch_irq_stat +extern u64 arch_irq_stat_cpu(unsigned int cpu); +extern u64 arch_irq_stat(void); + +#endif /* _ASM_SW64_HARDIRQ_H */ diff --git a/arch/sw_64/include/asm/hcall.h b/arch/sw_64/include/asm/hcall.h new file mode 100644 index 0000000000000000000000000000000000000000..bded05779db748861e2aeea7b7bd0863bd82852e --- /dev/null +++ b/arch/sw_64/include/asm/hcall.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HCALL_H +#define _ASM_SW64_HCALL_H + +#define HMC_hcall 0x32 +/* HCALL must > 0 */ +enum HCALL_TYPE { + HCALL_HALT = 10, + HCALL_NOTIFY = 11, + HCALL_SHUTDOWN = 12, + HCALL_SET_CLOCKEVENT = 13, + HCALL_IVI = 14, /* interrupt between virtual cpu */ + HCALL_TBI = 15, /* tlb flush for virtual cpu */ + HCALL_STOP = 16, /* indicate virtual cpu stopped */ + HCALL_RESTART = 17, /* indicate virtual cpu restarted */ + HCALL_MSI = 18, /* guest request msi intr */ + HCALL_MSIX = 19, /* guest request msix intr */ + HCALL_SWNET = 20, /* guest request swnet service */ + HCALL_SWNET_IRQ = 21, /* guest request swnet intr */ + HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */ + HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */ + NR_HCALL +}; + +static inline unsigned long hcall(unsigned long hcall, unsigned long arg0, + unsigned long arg1, unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = hcall; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5 " + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#endif /* _ASM_SW64_HCALL_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h new file mode 100644 index 0000000000000000000000000000000000000000..e3bac3016740a831b78928fef7b00c2584819fd2 --- /dev/null +++ b/arch/sw_64/include/asm/hmcall.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HMCALL_H +#define _ASM_SW64_HMCALL_H + +/* + * Common HMC-code + */ +/* 0x0 - 0x3F : Kernel Level HMC routine */ +#define HMC_halt 0x00 +#define HMC_rdio64 0x01 +#define HMC_rdio32 0x02 +#define HMC_cpuid 0x03 +#define HMC_sleepen 0x05 +#define HMC_rdksp 0x06 +#define HMC_wrasid 0x08 +#define HMC_rdktp 0x09 +#define HMC_wrktp 0x0A +#define HMC_rdptbr 0x0B +#define HMC_wrptbr 0x0C +#define HMC_rdhtctl 0x0D +#define HMC_wrksp 0x0E +#define HMC_mtinten 0x0F +#define HMC_load_mm 0x11 +#define HMC_tbisasid 0x14 +#define HMC_tbivpn 0x19 +#define HMC_ret 0x1A +#define HMC_wrvpcr 0x29 +#define HMC_wrfen 0x2B +#define HMC_sflush 0x2F +#define HMC_entervm 0x31 +#define HMC_hcall 0x32 +#define HMC_tbi 0x33 +#define HMC_wrent 0x34 +#define HMC_swpipl 0x35 +#define HMC_rdps 0x36 +#define HMC_wrkgp 0x37 +#define HMC_wrusp 0x38 +#define HMC_rvpcr 0x39 +#define HMC_rdusp 0x3A +#define HMC_wrtimer 0x3B +#define HMC_whami 0x3C +#define HMC_retsys 0x3D +#define HMC_sendii 0x3E +#define HMC_rti 0x3F + + +/* 0x80 - 0xBF : User Level HMC routine */ +#include + +/* Following will be deprecated from user level invocation */ +#define HMC_rwreg 0x87 +#define HMC_sz_uflush 0xA8 +#define HMC_longtime 0xB1 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include +extern void __init fixup_hmcall(void); + +extern void halt(void) __noreturn; + +#define __CALL_HMC_VOID(NAME) \ +static inline void NAME(void) \ +{ \ + __asm__ __volatile__( \ + "sys_call %0 " : : "i" (HMC_ ## NAME)); \ +} + +#define __CALL_HMC_R0(NAME, TYPE) \ +static inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "sys_call %1 # " #NAME \ + : "=r" (__r0) \ + : "i" (HMC_ ## NAME) \ + : "$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_W1(NAME, TYPE0) \ +static inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_W2(NAME, TYPE0, TYPE1) \ +static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_RW1(NAME, RTYPE, TYPE0) \ +static inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW3(NAME, RTYPE, TYPE0, TYPE1, TYPE2) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1, TYPE2 arg2) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + register TYPE2 __r18 __asm__("$18") = arg2; \ + __asm__ __volatile__( \ + "sys_call %4 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17), "2"(__r18) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + + +__CALL_HMC_VOID(imb); +__CALL_HMC_VOID(sflush); +__CALL_HMC_VOID(wrfen); +#define fpu_enable() wrfen() + +__CALL_HMC_VOID(sleepen); +__CALL_HMC_VOID(mtinten); + +__CALL_HMC_VOID(rdktp); +#define restore_ktp() rdktp() +__CALL_HMC_VOID(wrktp); +#define save_ktp() wrktp() + +__CALL_HMC_R0(rdps, unsigned long); + +__CALL_HMC_R0(rdusp, unsigned long); +__CALL_HMC_W1(wrusp, unsigned long); + +__CALL_HMC_R0(rdksp, unsigned long); +__CALL_HMC_W1(wrksp, unsigned long); +__CALL_HMC_R0(rdhtctl, unsigned long); + +/* + * Load a mm context. This is needed when we change the page + * table pointer(CSR:PTBR) or when we update the ASID. + * load_mm(asid, ptbr) + * + */ +__CALL_HMC_W2(load_mm, unsigned long, unsigned long); + +__CALL_HMC_W1(wrasid, unsigned long); +__CALL_HMC_R0(rdptbr, unsigned long); +__CALL_HMC_W1(wrptbr, unsigned long); + +__CALL_HMC_RW1(swpipl, unsigned long, unsigned long); +__CALL_HMC_R0(whami, unsigned long); +__CALL_HMC_RW1(rdio64, unsigned long, unsigned long); +__CALL_HMC_RW1(rdio32, unsigned int, unsigned long); +__CALL_HMC_W2(wrent, void*, unsigned long); +__CALL_HMC_W2(tbisasid, unsigned long, unsigned long); +__CALL_HMC_W1(wrkgp, unsigned long); +__CALL_HMC_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW3(sendii, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_W1(wrtimer, unsigned long); +__CALL_HMC_RW3(tbivpn, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW2(cpuid, unsigned long, unsigned long, unsigned long); + +__CALL_HMC_W1(wrtp, unsigned long); +/* + * TB routines.. + */ +#define __tbi(nr, arg, arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "sys_call %3 #__tbi" \ + : "=r" (__r16), "=r" (__r17) \ + : "0" (__r16), "i" (HMC_tbi), ##arg1 \ + : "$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x, y) __tbi(x, __r17 = (y), "1" (__r17)) + +/* Invalidate all TLB, only used by hypervisor */ +#define tbia() __tbi(-2, /* no second argument */) + +/* Invalidate TLB for all processes with currnet VPN */ +#define tbivp() __tbi(-1, /* no second argument */) + +/* Invalidate all TLB with current VPN */ +#define tbiv() __tbi(0, /* no second argument */) + +/* Invalidate ITLB of addr with current UPN and VPN */ +#define tbisi(addr) __tbi(1, __r17 = (addr), "1" (__r17)) + +/* Invalidate DTLB of addr with current UPN and VPN */ +#define tbisd(addr) __tbi(2, __r17 = (addr), "1" (__r17)) + +/* Invalidate TLB of addr with current UPN and VPN */ +#define tbis(addr) __tbi(3, __r17 = (addr), "1" (__r17)) + +/* Invalidate all user TLB with current UPN and VPN */ +#define tbiu() __tbi(4, /* no second argument */) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/asm/hugetlb.h b/arch/sw_64/include/asm/hugetlb.h new file mode 100644 index 0000000000000000000000000000000000000000..f4c8cbe0891a9fd138dfdd1e049a09b82b08ad44 --- /dev/null +++ b/arch/sw_64/include/asm/hugetlb.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HUGETLB_H +#define _ASM_SW64_HUGETLB_H + +#include + +#ifdef CONFIG_SUBARCH_C4 +#define __HAVE_ARCH_HUGE_PTE_CLEAR +extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +extern void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t pte, int dirty); + +#define arch_make_huge_pte arch_make_huge_pte +extern pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags); + +#define set_huge_swap_pte_at set_huge_swap_pte_at +extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); +#endif + +#include + +#endif /* _ASM_SW64_HUGETLB_H */ diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h new file mode 100644 index 0000000000000000000000000000000000000000..2078c66d1c4fd3912392ba73790320657f5a83cc --- /dev/null +++ b/arch/sw_64/include/asm/hw_init.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_INIT_H +#define _ASM_SW64_HW_INIT_H +#include +#include + +#include + +#define MMSIZE __va(0x2040) + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned int size; /* Bytes per way */ + unsigned int sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +struct cpuinfo_sw64 { + unsigned long last_asid; + unsigned long last_vpn; + unsigned long ipi_count; + struct cache_desc icache; /* Primary I-cache */ + struct cache_desc dcache; /* Primary D or combined I/D cache */ + struct cache_desc scache; /* Secondary cache */ + struct cache_desc tcache; /* Tertiary/split secondary cache */ +} __aligned(SMP_CACHE_BYTES); + +struct cpu_desc_t { + __u8 model; + __u8 family; + __u8 chip_var; + __u8 arch_var; + __u8 arch_rev; + __u8 pa_bits; + __u8 va_bits; + char vendor_id[16]; + char model_id[64]; + unsigned long frequency; +} __randomize_layout; + +#define MAX_NUMSOCKETS 8 +struct socket_desc_t { + bool is_online; /* 1 for online, 0 for offline */ + int numcores; + unsigned long socket_mem; +}; + +enum memmap_types { + memmap_reserved, + memmap_pci, + memmap_initrd, + memmap_kvm, + memmap_crashkernel, + memmap_acpi, + memmap_use, + memmap_protected, +}; + +#define MAX_NUMMEMMAPS 64 +struct memmap_entry { + u64 addr; /* start of memory segment */ + u64 size; /* size of memory segment */ + enum memmap_types type; +}; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; +extern void store_cpu_data(int cpu); + +extern struct cpu_desc_t cpu_desc; +extern struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +extern int memmap_nr; +extern struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +extern cpumask_t cpu_offline; +extern bool memblock_initialized; + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type); +void __init process_memmap(void); + +static inline unsigned long get_cpu_freq(void) +{ + return cpu_desc.frequency; +} + +static inline void update_cpu_freq(unsigned long khz) +{ + cpu_desc.frequency = khz * 1000; +} + +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) + +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); + +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) + +#define CPU_SW3231 0x31 +#define CPU_SW831 0x32 +#define CPU_SW8A 0x41 + +#define GET_TABLE_ENTRY 1 +#define GET_VENDOR_ID 2 +#define GET_MODEL 3 +#define GET_CPU_FREQ 4 +#define GET_CACHE_INFO 5 + +#define TABLE_ENTRY_MAX 32 +#define VENDOR_ID_MAX 2 +#define MODEL_MAX 8 +#define CACHE_INFO_MAX 4 + +#define L1_ICACHE 0 +#define L1_DCACHE 1 +#define L2_CACHE 2 +#define L3_CACHE 3 + +#define CPUID_ARCH_REV_MASK 0xf +#define CPUID_ARCH_REV(val) ((val) & CPUID_ARCH_REV_MASK) +#define CPUID_ARCH_VAR_SHIFT 4 +#define CPUID_ARCH_VAR_MASK (0xf << CPUID_ARCH_VAR_SHIFT) +#define CPUID_ARCH_VAR(val) \ + (((val) & CPUID_ARCH_VAR_MASK) >> CPUID_ARCH_VAR_SHIFT) +#define CPUID_CHIP_VAR_SHIFT 8 +#define CPUID_CHIP_VAR_MASK (0xf << CPUID_CHIP_VAR_SHIFT) +#define CPUID_CHIP_VAR(val) \ + (((val) & CPUID_CHIP_VAR_MASK) >> CPUID_CHIP_VAR_SHIFT) +#define CPUID_FAMILY_SHIFT 12 +#define CPUID_FAMILY_MASK (0xf << CPUID_FAMILY_SHIFT) +#define CPUID_FAMILY(val) \ + (((val) & CPUID_FAMILY_MASK) >> CPUID_FAMILY_SHIFT) +#define CPUID_MODEL_SHIFT 24 +#define CPUID_MODEL_MASK (0xff << CPUID_MODEL_SHIFT) +#define CPUID_MODEL(val) \ + (((val) & CPUID_MODEL_MASK) >> CPUID_MODEL_SHIFT) +#define CPUID_PA_BITS_SHIFT 32 +#define CPUID_PA_BITS_MASK (0x7fUL << CPUID_PA_BITS_SHIFT) +#define CPUID_PA_BITS(val) \ + (((val) & CPUID_PA_BITS_MASK) >> CPUID_PA_BITS_SHIFT) +#define CPUID_VA_BITS_SHIFT 39 +#define CPUID_VA_BITS_MASK (0x7fUL << CPUID_VA_BITS_SHIFT) +#define CPUID_VA_BITS(val) \ + (((val) & CPUID_VA_BITS_MASK) >> CPUID_VA_BITS_SHIFT) + + +#define CACHE_SIZE_SHIFT 0 +#define CACHE_SIZE_MASK (0xffffffffUL << CACHE_SIZE_SHIFT) +#define CACHE_SIZE(val) \ + (((val) & CACHE_SIZE_MASK) >> CACHE_SIZE_SHIFT) +#define CACHE_LINE_BITS_SHIFT 32 +#define CACHE_LINE_BITS_MASK (0xfUL << CACHE_LINE_BITS_SHIFT) +#define CACHE_LINE_BITS(val) \ + (((val) & CACHE_LINE_BITS_MASK) >> CACHE_LINE_BITS_SHIFT) +#define CACHE_INDEX_BITS_SHIFT 36 +#define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) +#define CACHE_INDEX_BITS(val) \ + (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] + +#endif /* _ASM_SW64_HW_INIT_H */ diff --git a/arch/sw_64/include/asm/hw_irq.h b/arch/sw_64/include/asm/hw_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..3cfc725f7517dcacadeffb3f47a675aef142288e --- /dev/null +++ b/arch/sw_64/include/asm/hw_irq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_IRQ_H +#define _ASM_SW64_HW_IRQ_H + +#include + +extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); + +#define ACTUAL_NR_IRQS NR_IRQS + +#ifdef CONFIG_PCI_MSI +typedef unsigned int vector_irq_t[PERCPU_MSI_IRQS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); +#endif +#endif /* _ASM_SW64_HW_IRQ_H */ diff --git a/arch/sw_64/include/asm/idle.h b/arch/sw_64/include/asm/idle.h new file mode 100644 index 0000000000000000000000000000000000000000..95e145f25306ada8722f28ad9976601076658afb --- /dev/null +++ b/arch/sw_64/include/asm/idle.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IDLE_H +#define _ASM_SW64_IDLE_H + +extern void arch_cpu_idle(void); + +#endif /* _ASM_SW64_IDLE_H */ diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..437cb48d1e9306bc46f206b10a1b0fb176a8e65a --- /dev/null +++ b/arch/sw_64/include/asm/insn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_INSN_H +#define _ASM_SW64_INSN_H +#include + +/* Register numbers */ +enum { + R26 = 26, + R27, + R28, + R31 = 31, +}; + +#define BR_MAX_DISP 0xfffff +/* SW64 instructions are always 32 bits. */ +#define SW64_INSN_SIZE 4 + +#define ___SW64_RA(a) (((a) & 0x1f) << 21) +#define ___SW64_RB(b) (((b) & 0x1f) << 16) +#define ___SW64_SIMP_RC(c) (((c) & 0x1f)) +#define ___SW64_ST_DISP(disp) (((disp) & 0xffff)) +#define ___SW64_SYSCALL_FUNC(func) ((func) & 0xff) +#define ___SW64_BR_DISP(disp) (((disp) & 0x1fffff)) + + +#define SW64_INSN_BIS 0x40000740 +#define SW64_INSN_CALL 0x04000000 +#define SW64_INSN_SYS_CALL 0x02000000 +#define SW64_INSN_BR 0x10000000 + +#define SW64_NOP (0x43ff075f) +#define SW64_BIS(a, b, c) (SW64_INSN_BIS | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_SIMP_RC(c)) +#define SW64_CALL(a, b, disp) (SW64_INSN_CALL | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_ST_DISP(disp)) +#define SW64_SYS_CALL(func) (SW64_INSN_SYS_CALL | ___SW64_SYSCALL_FUNC(func)) +#define SW64_BR(a, disp) (SW64_INSN_BR | ___SW64_RA(a) | ___SW64_BR_DISP(disp)) + +extern int sw64_insn_read(void *addr, u32 *insnp); +extern int sw64_insn_write(void *addr, u32 insn); +extern int sw64_insn_double_write(void *addr, u64 insn); +extern unsigned int sw64_insn_nop(void); +extern unsigned int sw64_insn_call(unsigned int ra, unsigned int rb); +extern unsigned int sw64_insn_sys_call(unsigned int num); +extern unsigned int sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc); + +#define SW64_OPCODE_RA(opcode) ((opcode >> 21) & 0x1f) + +#define SW64_INSN(name, opcode, mask) \ +static inline bool sw64_insn_is_##name(u32 insn) \ +{ \ + return (insn & mask) == opcode; \ +} + +SW64_INSN(sys_call_b, 0x00000000, 0xfc000000); +SW64_INSN(sys_call, 0x00000001, 0xfc000000); +SW64_INSN(call, 0x04000000, 0xfc000000); +SW64_INSN(ret, 0x08000000, 0xfc000000); +SW64_INSN(jmp, 0x0c000000, 0xfc000000); +SW64_INSN(br, 0x10000000, 0xfc000000); +SW64_INSN(bsr, 0x14000000, 0xfc000000); +SW64_INSN(memb, 0x18000000, 0xfc00ffff); +SW64_INSN(imemb, 0x18000001, 0xfc00ffff); +SW64_INSN(rtc, 0x18000020, 0xfc00ffff); +SW64_INSN(halt, 0x18000080, 0xfc00ffff); +SW64_INSN(rd_f, 0x18001000, 0xfc00ffff); +SW64_INSN(beq, 0xc0000000, 0xfc000000); +SW64_INSN(bne, 0xc4000000, 0xfc000000); +SW64_INSN(blt, 0xc8000000, 0xfc000000); +SW64_INSN(ble, 0xcc000000, 0xfc000000); +SW64_INSN(bgt, 0xd0000000, 0xfc000000); +SW64_INSN(bge, 0xd4000000, 0xfc000000); +SW64_INSN(blbc, 0xd8000000, 0xfc000000); +SW64_INSN(blbs, 0xdc000000, 0xfc000000); +SW64_INSN(fbeq, 0xe0000000, 0xfc000000); +SW64_INSN(fbne, 0xe4000000, 0xfc000000); +SW64_INSN(fblt, 0xe8000000, 0xfc000000); +SW64_INSN(fble, 0xec000000, 0xfc000000); +SW64_INSN(fbgt, 0xf0000000, 0xfc000000); +SW64_INSN(fbge, 0xf4000000, 0xfc000000); +SW64_INSN(lldw, 0x20000000, 0xfc00f000); +SW64_INSN(lldl, 0x20001000, 0xfc00f000); + +#endif /* _ASM_SW64_INSN_H */ diff --git a/arch/sw_64/include/asm/io.h b/arch/sw_64/include/asm/io.h new file mode 100644 index 0000000000000000000000000000000000000000..2b045be5257e0e472bc221875117bd6560db2eae --- /dev/null +++ b/arch/sw_64/include/asm/io.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IO_H +#define _ASM_SW64_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* The generic header contains only prototypes. Including it ensures that + * the implementation we have here matches that interface. + */ +#include + +/* We don't use IO slowdowns on the sw64, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +#define page_to_phys(page) page_to_pa(page) + +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffffffffffffffff + +/* + * Generic IO read/write. These perform native-endian accesses. + */ + +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("stb %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("sth %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("stw %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("stl %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("ldbu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("ldhu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("ldw %0, 0(%1)\n" + "zapnot %0, 0xf, %0\n" + : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + + asm volatile("ldl %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ + +#define __iormb() rmb() +#define __iowmb() wmb() +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) __raw_readb(c) +#define readw_relaxed(c) __raw_readw(c) +#define readl_relaxed(c) __raw_readl(c) +#define readq_relaxed(c) __raw_readq(c) + +#define writeb_relaxed(v, c) __raw_writeb((v), (c)) +#define writew_relaxed(v, c) __raw_writew((v), (c)) +#define writel_relaxed(v, c) __raw_writel((v), (c)) +#define writeq_relaxed(v, c) __raw_writeq((v), (c)) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v, c) ({ __iowmb(); writeb_relaxed((v), (c)); }) +#define writew(v, c) ({ __iowmb(); writew_relaxed((v), (c)); }) +#define writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) +#define writeq(v, c) ({ __iowmb(); writeq_relaxed((v), (c)); }) +/* + * We always have external versions of these routines. + */ +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl + +static inline void __iomem *__ioremap(phys_addr_t addr, size_t size, + pgprot_t prot) +{ + unsigned long tmp = addr | PAGE_OFFSET; + + return (void __iomem *)(tmp); +} + +#define ioremap(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_cache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_uc ioremap_nocache + +#define ioport_map ioport_map +#define ioport_unmap ioport_unmap + +static inline void __iounmap(volatile void __iomem *addr) +{ +} + +#define iounmap __iounmap + +#define ioread16be(p) be16_to_cpu(ioread16(p)) +#define ioread32be(p) be32_to_cpu(ioread32(p)) +#define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p)) +#define iowrite32be(v, p) iowrite32(cpu_to_be32(v), (p)) + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + + +/* + * String version of IO memory access ops: + */ +#define memcpy_fromio memcpy_fromio +extern void memcpy_fromio(void *buffer, const volatile void __iomem *addr, long len); + +#define memcpy_toio memcpy_toio +extern void memcpy_toio(volatile void __iomem *addr, const void *buffer, long len); + +extern void _memset_c_io(volatile void __iomem *addr, unsigned long c, long len); + +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} + +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} + +/* + * String versions of in/out ops: + */ +extern void insb(unsigned long port, void *dst, unsigned long count); +extern void insw(unsigned long port, void *dst, unsigned long count); +extern void insl(unsigned long port, void *dst, unsigned long count); +extern void outsb(unsigned long port, const void *src, unsigned long count); +extern void outsw(unsigned long port, const void *src, unsigned long count); +extern void outsl(unsigned long port, const void *src, unsigned long count); + +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl + +/* + * These defines will override the defaults when doing RTC queries + */ + +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 0 + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +/* + * These get provided from since sw64 does not + * select GENERIC_IOMAP. + */ +#define ioread8 ioread8 +#define ioread16 ioread16 +#define ioread32 ioread32 +#define ioread64 ioread64 +#define iowrite8 iowrite8 +#define iowrite16 iowrite16 +#define iowrite32 iowrite32 +#define iowrite64 iowrite64 +#define ioread64be ioread64be +#define iowrite64be iowrite64be +#define ioread8_rep ioread8_rep +#define ioread16_rep ioread16_rep +#define ioread32_rep ioread32_rep +#define iowrite8_rep iowrite8_rep +#define iowrite16_rep iowrite16_rep +#define iowrite32_rep iowrite32_rep +#define pci_iounmap pci_iounmap + +#include + +/* + * Change addresses as seen by the kernel (virtual) to addresses as + * seen by a device (bus), and vice versa. + * + * Note that this only works for a limited range of kernel addresses, + * and very well may not span all memory. Consider this interface + * deprecated in favour of the DMA-mapping API. + */ +static inline unsigned long __deprecated virt_to_bus(void *address) +{ + return virt_to_phys(address); +} +#define isa_virt_to_bus virt_to_bus + +static inline void * __deprecated bus_to_virt(unsigned long address) +{ + void *virt; + + /* This check is a sanity check but also ensures that bus address 0 + * maps to virtual address 0 which is useful to detect null pointers + * (the NCR driver is much simpler if NULL pointers are preserved). + */ + virt = phys_to_virt(address); + return (long)address <= 0 ? NULL : virt; +} +#define isa_bus_to_virt bus_to_virt + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_IO_H */ diff --git a/arch/sw_64/include/asm/irq.h b/arch/sw_64/include/asm/irq.h new file mode 100644 index 0000000000000000000000000000000000000000..b3ac4105c29e14fc5f0cd2fde62c8db63f3cc525 --- /dev/null +++ b/arch/sw_64/include/asm/irq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQ_H +#define _ASM_SW64_IRQ_H + +/* + * arch/sw/include/asm/irq.h + * + * (C) 2012 OSKernel JN + */ + +#include + +#define NR_VECTORS_PERCPU 256 +#define NR_IRQS_LEGACY 16 +#define NR_IRQS ((NR_VECTORS_PERCPU + NR_IRQS_LEGACY) * NR_CPUS) + +static inline int irq_canonicalize(int irq) +{ + /* + * XXX is this true for all Sw? The old serial driver + * did it this way for years without any complaints, so.... + */ + return ((irq == 2) ? 9 : irq); +} + +struct pt_regs; +extern void (*perf_irq)(unsigned long vector, struct pt_regs *regs); +extern void fixup_irqs(void); +extern void sw64_timer_interrupt(void); + +#endif /* _ASM_SW64_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq_impl.h b/arch/sw_64/include/asm/irq_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..797af433a1267c8b49cfda9901baed051ff16b98 --- /dev/null +++ b/arch/sw_64/include/asm/irq_impl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the IRQ handling routines in irq.c. + */ + +#ifndef _ASM_SW64_IRQ_IMPL_H +#define _ASM_SW64_IRQ_IMPL_H + +#include +#include +#include + +#include + +#define SW64_PCIE0_INT_BASE 17 +#define SW64_PCIE0_MSI_BASE 21 + +#define SW64_PCIE1_INT_BASE 277 +#define SW64_PCIE1_MSI_BASE 281 + +#define RTC_IRQ 8 +#define SWI2C_IRQ 14 + +enum sw64_irq_type { + INT_IPI = 1, + INT_PC0 = 2, + INT_PC1 = 3, + INT_INTx = 5, + INT_MSI = 6, + INT_MT = 7, + INT_RTC = 9, + INT_FAULT = 10, + INT_VT_SERIAL = 12, + INT_VT_HOTPLUG = 13, + INT_DEV = 17, + INT_NMI = 18, + INT_LEGACY = 31, +}; + +extern struct irqaction timer_irqaction; +extern void init_rtc_irq(irq_handler_t handler); +extern void handle_irq(int irq); +extern void handle_ipi(struct pt_regs *regs); +extern void __init sw64_init_irq(void); +extern irqreturn_t timer_interrupt(int irq, void *dev); + +#endif /* _ASM_SW64_IRQ_IMPL_H */ diff --git a/arch/sw_64/include/asm/irqflags.h b/arch/sw_64/include/asm/irqflags.h new file mode 100644 index 0000000000000000000000000000000000000000..b4440f25a51d622402198e1239bcac10908ee5a5 --- /dev/null +++ b/arch/sw_64/include/asm/irqflags.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQFLAGS_H +#define _ASM_SW64_IRQFLAGS_H + +#include + +#define IPL_MIN 0 +#define IPL_MAX 7 + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +static inline unsigned long arch_local_save_flags(void) +{ + return rdps(); +} + +static inline void arch_local_irq_disable(void) +{ + setipl(IPL_MAX); + barrier(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = swpipl(IPL_MAX); + + barrier(); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + barrier(); + setipl(IPL_MIN); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + barrier(); + setipl(flags); + barrier(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags > IPL_MIN; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(getipl()); +} + +#endif /* _ASM_SW64_IRQFLAGS_H */ diff --git a/arch/sw_64/include/asm/jump_label.h b/arch/sw_64/include/asm/jump_label.h new file mode 100644 index 0000000000000000000000000000000000000000..32fbf7573b206bb2c935cc173de392b100d02010 --- /dev/null +++ b/arch/sw_64/include/asm/jump_label.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_JUMP_LABEL_H +#define _ASM_SW64_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE SW64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: br %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_JUMP_LABEL_H */ diff --git a/arch/sw_64/include/asm/kdebug.h b/arch/sw_64/include/asm/kdebug.h new file mode 100644 index 0000000000000000000000000000000000000000..73793057c3e87d5e9f40fa0032b8522cd1c31126 --- /dev/null +++ b/arch/sw_64/include/asm/kdebug.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KDEBUG_H +#define _ASM_SW64_KDEBUG_H + +#include + +enum die_val { + DIE_OOPS = 1, + DIE_BREAK, + DIE_SSTEPBP, + DIE_UPROBE, + DIE_UPROBE_XOL, +}; + +#endif /* _ASM_SW64_KDEBUG_H */ diff --git a/arch/sw_64/include/asm/kexec.h b/arch/sw_64/include/asm/kexec.h new file mode 100644 index 0000000000000000000000000000000000000000..25e0d8da84f8dbe98908179bb061ea5f4759aa6e --- /dev/null +++ b/arch/sw_64/include/asm/kexec.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KEXEC_H +#define _ASM_SW64_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 8192 + +#define KEXEC_ARCH KEXEC_ARCH_SW64 + +#define KEXEC_SW64_ATAGS_OFFSET 0x1000 +#define KEXEC_SW64_ZIMAGE_OFFSET 0x8000 + +#ifndef __ASSEMBLY__ + +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ("stl $0, %0" : "=m" (newregs->regs[0])); + __asm__ __volatile__ ("stl $1, %0" : "=m" (newregs->regs[1])); + __asm__ __volatile__ ("stl $2, %0" : "=m" (newregs->regs[2])); + __asm__ __volatile__ ("stl $3, %0" : "=m" (newregs->regs[3])); + __asm__ __volatile__ ("stl $4, %0" : "=m" (newregs->regs[4])); + __asm__ __volatile__ ("stl $5, %0" : "=m" (newregs->regs[5])); + __asm__ __volatile__ ("stl $6, %0" : "=m" (newregs->regs[6])); + __asm__ __volatile__ ("stl $7, %0" : "=m" (newregs->regs[7])); + __asm__ __volatile__ ("stl $8, %0" : "=m" (newregs->regs[8])); + __asm__ __volatile__ ("stl $9, %0" : "=m" (newregs->regs[9])); + __asm__ __volatile__ ("stl $10, %0" : "=m" (newregs->regs[10])); + __asm__ __volatile__ ("stl $11, %0" : "=m" (newregs->regs[11])); + __asm__ __volatile__ ("stl $12, %0" : "=m" (newregs->regs[12])); + __asm__ __volatile__ ("stl $13, %0" : "=m" (newregs->regs[13])); + __asm__ __volatile__ ("stl $14, %0" : "=m" (newregs->regs[14])); + __asm__ __volatile__ ("stl $15, %0" : "=m" (newregs->regs[15])); + __asm__ __volatile__ ("stl $16, %0" : "=m" (newregs->regs[16])); + __asm__ __volatile__ ("stl $17, %0" : "=m" (newregs->regs[17])); + __asm__ __volatile__ ("stl $18, %0" : "=m" (newregs->regs[18])); + __asm__ __volatile__ ("stl $19, %0" : "=m" (newregs->regs[19])); + __asm__ __volatile__ ("stl $20, %0" : "=m" (newregs->regs[20])); + __asm__ __volatile__ ("stl $21, %0" : "=m" (newregs->regs[21])); + __asm__ __volatile__ ("stl $22, %0" : "=m" (newregs->regs[22])); + __asm__ __volatile__ ("stl $23, %0" : "=m" (newregs->regs[23])); + __asm__ __volatile__ ("stl $24, %0" : "=m" (newregs->regs[24])); + __asm__ __volatile__ ("stl $25, %0" : "=m" (newregs->regs[25])); + __asm__ __volatile__ ("stl $26, %0" : "=m" (newregs->regs[26])); + __asm__ __volatile__ ("stl $27, %0" : "=m" (newregs->regs[27])); + __asm__ __volatile__ ("stl $28, %0" : "=m" (newregs->regs[28])); + __asm__ __volatile__ ("stl $29, %0" : "=m" (newregs->regs[29])); + __asm__ __volatile__ ("stl $30, %0" : "=m" (newregs->regs[30])); + newregs->pc = (unsigned long)current_text_addr(); + } +} + +/* Function pointer to optional machine-specific reinitialization */ +extern void (*kexec_reinit)(void); + +#endif /* __ASSEMBLY__ */ + +struct kimage; +extern unsigned long kexec_args[4]; + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_SW64_KEXEC_H */ diff --git a/arch/sw_64/include/asm/kgdb.h b/arch/sw_64/include/asm/kgdb.h new file mode 100644 index 0000000000000000000000000000000000000000..a00a45ce767ca74361319836d3b188db9178285b --- /dev/null +++ b/arch/sw_64/include/asm/kgdb.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sw64 KGDB support + * + * Based on arch/arm64/include/kgdb.h + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_KGDB_H +#define _ASM_SW64_KGDB_H + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define GDB_ADJUSTS_BREAK_OFFSET +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__("sys_call %0" : : "i"(HMC_bpt)); +} + +void sw64_task_to_gdb_regs(struct task_struct *task, unsigned long *regs); + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; +extern unsigned long get_reg(struct task_struct *task, unsigned long regno); + +#endif /* !__ASSEMBLY__ */ + +/* + * general purpose registers size in bytes. + */ +#define DBG_MAX_REG_NUM (67) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 4096 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ +#define NUMREGBYTES (DBG_MAX_REG_NUM * 8) + +#endif /* _ASM_SW64_KGDB_H */ diff --git a/arch/sw_64/include/asm/kprobes.h b/arch/sw_64/include/asm/kprobes.h new file mode 100644 index 0000000000000000000000000000000000000000..0c7be8109ed29423cadec91e4f0ffc9d65e7ab0b --- /dev/null +++ b/arch/sw_64/include/asm/kprobes.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Probes (KProbes) + * Based on arch/mips/include/asm/kprobes.h + */ + +#ifndef _ASM_SW64_KPROBES_H +#define _ASM_SW64_KPROBES_H + +#include + +#define BREAK_KPROBE 0x40ffffff +#define BREAK_KPROBE_SS 0x40fffeff + +#ifdef CONFIG_KPROBES +#include +#include + +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct kprobe; +struct pt_regs; + +typedef u32 kprobe_opcode_t; + +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) \ +do { \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ + (unsigned long)p->addr + \ + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ +} while (0) + + +#define kretprobe_blacklist_size 0 + +void arch_remove_kprobe(struct kprobe *p); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +#define SKIP_DELAYSLOT 0x0001 + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + /* Per-thread fields, used while emulating branches */ + unsigned long flags; + unsigned long target_pc; + struct prev_kprobe prev_kprobe; +}; +extern int kprobe_handler(struct pt_regs *regs); +extern int post_kprobe_handler(struct pt_regs *regs); +extern int kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr); + + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_SW64_KPROBES_H */ diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h new file mode 100644 index 0000000000000000000000000000000000000000..fd1b25018fc8c37f97a176201f0b8e444bd2ed3e --- /dev/null +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_ASM_H +#define _ASM_SW64_KVM_ASM_H + +#define SW64_KVM_EXIT_HOST_INTR 0 +#define SW64_KVM_EXIT_IO 1 +#define SW64_KVM_MIGRATION_SET_DIRTY 2 +#define SW64_KVM_MIGRATION_SET_DIRTY_HM 3 +#define SW64_KVM_EXIT_HALT 10 +#define SW64_KVM_EXIT_SHUTDOWN 12 +#define SW64_KVM_EXIT_TIMER 13 +#define SW64_KVM_EXIT_IPI 14 +#define SW64_KVM_EXIT_STOP 16 +#define SW64_KVM_EXIT_RESTART 17 +#define SW64_KVM_EXIT_APT_FAULT 18 +#define SW64_KVM_EXIT_FATAL_ERROR 22 +#define SW64_KVM_EXIT_MEMHOTPLUG 23 +#define SW64_KVM_EXIT_DEBUG 24 + + +#define kvm_sw64_exception_type \ + {0, "HOST_INTR" }, \ + {1, "IO" }, \ + {10, "HALT" }, \ + {12, "SHUTDOWN" }, \ + {13, "TIMER" }, \ + {14, "IPI" }, \ + {16, "STOP" }, \ + {17, "RESTART" }, \ + {18, "APT_FAULT" }, \ + {22, "FATAL_ERROR" }, \ + {23, "MEMHOTPLUG" }, \ + {24, "DEBUG" } + + +#include + +#endif /* _ASM_SW64_KVM_ASM_H */ diff --git a/arch/sw_64/include/asm/kvm_cma.h b/arch/sw_64/include/asm/kvm_cma.h new file mode 100644 index 0000000000000000000000000000000000000000..d50ba599ceb716ac0309122cf77c1e2ea38c70d7 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_cma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_CMA_H +#define _ASM_SW64_KVM_CMA_H + +#include + +extern int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma); +#endif /* _ASM_SW64_KVM_CMA_H */ diff --git a/arch/sw_64/include/asm/kvm_emulate.h b/arch/sw_64/include/asm/kvm_emulate.h new file mode 100644 index 0000000000000000000000000000000000000000..915aa6c0bce212b8b06bb3aa19aa6aee8c04e532 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_emulate.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_EMULATE_H +#define _ASM_SW64_KVM_EMULATE_H + +#include +#include + +#define R(x) ((size_t) &((struct kvm_regs *)0)->x) + +static int reg_offsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15), + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), + 0, 0, +}; + + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + regs_ptr += reg_offsets[reg_num]; + *(unsigned long *)regs_ptr = val; +} + +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + if (reg_num == 31) + return 0; + regs_ptr += reg_offsets[reg_num]; + return *(unsigned long *)regs_ptr; +} + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, + struct kvm_run *run); + +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more); +void clear_vcpu_irq(struct kvm_vcpu *vcpu); +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq); +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more); +#endif /* _ASM_SW64_KVM_EMULATE_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h new file mode 100644 index 0000000000000000000000000000000000000000..09a995218a2cbf473933f89d45631997ad6c73bd --- /dev/null +++ b/arch/sw_64/include/asm/kvm_host.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_HOST_H +#define _ASM_SW64_KVM_HOST_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define last_vpn(cpu) (cpu_data[cpu].last_vpn) + +#ifdef CONFIG_SUBARCH_C3B +#define VPN_BITS 8 +#define GUEST_RESET_PC 0xffffffff80011100 +#endif + +#ifdef CONFIG_SUBARCH_C4 +#define VPN_BITS 10 +#define GUEST_RESET_PC 0xfff0000000011002 +#endif + +#define VPN_FIRST_VERSION (1UL << VPN_BITS) +#define VPN_MASK ((1UL << VPN_BITS) - 1) +#define VPN_SHIFT (64 - VPN_BITS) + +#define KVM_MAX_VCPUS 64 +#define KVM_INTERNAL_MEM_SLOTS (KVM_MEM_SLOTS_NUM - 512) + +#define KVM_HALT_POLL_NS_DEFAULT 0 +#define KVM_IRQCHIP_NUM_PINS 256 +/* KVM Hugepage definitions for sw64 */ +#define KVM_NR_PAGE_SIZES 3 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) +#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) +#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) +#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) + +/* + * The architecture supports 48-bit GPA as input to the addtional stage translations. + */ +#define KVM_PHYS_SHIFT (48) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + +struct kvm_arch_memory_slot { + unsigned long host_phys_addr; + bool valid; +}; + +struct kvm_arch { + unsigned long host_phys_addr; + unsigned long size; + + /* segment table */ + unsigned long *seg_pgd; + + struct swvm_mem mem; + /* Addtional stage page table*/ + pgd_t *pgd; +}; + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_arch { + struct kvm_regs regs __aligned(32); + struct vcpucb vcb; + struct task_struct *tsk; + unsigned int pcpu_id; /* current running pcpu id */ + + /* Virtual clock device */ + struct hrtimer hrt; + unsigned long timer_next_event; + unsigned long vtimer_freq; + + int first_run; + int halted; + int stopped; + int restart; + + /* Pending virtual interrupts */ + DECLARE_BITMAP(irqs_pending, SWVM_IRQS); + unsigned long vpnc[NR_CPUS]; + + /* Detect first run of a vcpu */ + bool has_run_once; + + /* WAIT executed */ + int wait; + + /* vcpu power-off state */ + bool power_off; + + /* Don't run the guest (internal implementation need) */ + bool pause; + + struct kvm_decode mmio_decode; + + /* Cache some mmu pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* guest live migration */ + unsigned long migration_mark; + unsigned long shtclock; +}; + +struct vmem_info { + unsigned long start; + size_t size; + atomic_t refcnt; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 pid; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 migration_set_dirty; + u64 shutdown_exits; + u64 restart_exits; + u64 stop_exits; + u64 ipi_exits; + u64 timer_exits; + u64 debug_exits; +#ifdef CONFIG_KVM_MEMHOTPLUG + u64 memhotplug_exits; +#endif + u64 fatal_error_exits; + u64 halt_exits; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_poll_invalid; + u64 signal_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; +}; + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr); +#endif +#ifdef CONFIG_SUBARCH_C4 +#define KVM_ARCH_WANT_MMU_NOTIFIER +#endif +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat); +void check_vcpu_requests(struct kvm_vcpu *vcpu); +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu); +int vmem_init(void); +void vmem_exit(void); +int __sw64_vcpu_run(unsigned long vcb_pa, struct kvm_regs *regs, + struct hcall_args *args); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs); +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); + +int kvm_sw64_perf_init(void); +int kvm_sw64_perf_teardown(void); +void kvm_flush_tlb_all(void); +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn); +int kvm_sw64_init_vm(struct kvm *kvm); +void kvm_sw64_destroy_vm(struct kvm *kvm); +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu); +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg); +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg); + +void update_aptp(unsigned long pgd); +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu); +#endif /* _ASM_SW64_KVM_HOST_H */ diff --git a/arch/sw_64/include/asm/kvm_mmio.h b/arch/sw_64/include/asm/kvm_mmio.h new file mode 100644 index 0000000000000000000000000000000000000000..c87b259e9395f0943b062c4d4a6e08a433bacf1d --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmio.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMIO_H +#define _ASM_SW64_KVM_MMIO_H + +#include +#include + +struct kvm_decode { + unsigned long rt; + bool sign_extend; +}; + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs); + +#endif /* _ASM_SW64_KVM_MMIO_H */ diff --git a/arch/sw_64/include/asm/kvm_mmu.h b/arch/sw_64/include/asm/kvm_mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..f4493de934bab402a088d0deec64fe86b86a7b70 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmu.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMU_H +#define _ASM_SW64_KVM_MMU_H + +#define AF_ACCESS_TYPE_SHIFT 55 +#define AF_INV_LEVEL_SHIFT 53 +#define AF_FAULT_STATUS_SHIFT 48 + +#define AF_ACCESS_TYPE_MASK 0x3 +#define AF_INV_LEVEL_MASK 0x3 +#define AF_FAULT_STATUS_MASK 0x1f +#define AF_ENTRY_ADDR_MASK ((0x1UL << AF_FAULT_STATUS_SHIFT) - 1) + +/* access type defination */ +#define AF_READ_ACCESS_TYPE 0x1 +#define AF_WRITE_ACCESS_TYPE 0x2 +#define AF_EXEC_ACCESS_TYPE 0x3 + +/* invalid page level */ +#define AF_INV_LEVEL_1 0 +#define AF_INV_LEVEL_2 1 +#define AF_INV_LEVEL_3 2 +#define AF_INV_LEVEL_4 3 + +/* fault status */ +#define AF_STATUS_MISCONFIG 0x1 +#define AF_STATUS_FOR 0x2 +#define AF_STATUS_FOW 0x4 +#define AF_STATUS_FOE 0x8 +#define AF_STATUS_INV 0x10 + +#define KVM_MMU_CACHE_MIN_PAGES 2 + +static inline void kvm_set_aptpte_readonly(pte_t *pte) +{ + pte_val(*pte) |= _PAGE_FOW; +} + +static inline bool kvm_aptpte_readonly(pte_t *pte) +{ + return (pte_val(*pte) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpmd_readonly(pmd_t *pmd) +{ + pmd_val(*pmd) |= _PAGE_FOW; +} + +static inline bool kvm_aptpmd_readonly(pmd_t *pmd) +{ + return (pmd_val(*pmd) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpud_readonly(pud_t *pud) +{ + pud_val(*pud) |= _PAGE_FOW; +} + +static inline bool kvm_aptpud_readonly(pud_t *pud) +{ + return (pud_val(*pud) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline pte_t kvm_pte_mkwrite(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t kvm_pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOE; + return pte; +} + +static inline bool kvm_pte_exec(pte_t *pte) +{ + return !(pte_val(*pte) & _PAGE_FOE); +} + +static inline pmd_t kvm_pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t kvm_pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOE; + return pmd; +} + +static inline bool kvm_pmd_exec(pmd_t *pmd) +{ + return !(pmd_val(*pmd) & _PAGE_FOE); +} + +static inline pud_t kvm_pud_mkwrite(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOW; + return pud; +} + +static inline pud_t kvm_pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOE; + return pud; +} + +static inline bool kvm_pud_exec(pud_t *pud) +{ + return !(pud_val(*pud) & _PAGE_FOE); +} + +void kvm_core4_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change); +void kvm_core4_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +void kvm_core4_flush_shadow_all(struct kvm *kvm); +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_handle_apt_fault(struct kvm_vcpu *vcpu); +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm); +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); +void apt_unmap_vm(struct kvm *kvm); +#endif /* _ASM_SW64_KVM_MMU_H */ diff --git a/arch/sw_64/include/asm/kvm_para.h b/arch/sw_64/include/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..442f1c7d9f832159bcb04068b8939c1fa3b107cc --- /dev/null +++ b/arch/sw_64/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_PARA_H +#define _ASM_SW64_KVM_PARA_H + +#include + +#define HMC_hcall 0x32 + +static inline unsigned long kvm_hypercall3(unsigned long num, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = num; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5" + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} +#endif /* _ASM_SW64_KVM_PARA_H */ diff --git a/arch/sw_64/include/asm/kvm_timer.h b/arch/sw_64/include/asm/kvm_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..8080873c684f82b0e0c06b7e55c72ba475f497cd --- /dev/null +++ b/arch/sw_64/include/asm/kvm_timer.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_TIMER_H +#define _ASM_SW64_KVM_TIMER_H + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta); +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +enum hrtimer_restart clockdev_fn(struct hrtimer *timer); + +#endif /* _ASM_SW64_KVM_TIMER_H */ diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h new file mode 100644 index 0000000000000000000000000000000000000000..1721753b4d98a5b0d2621a448761ee18e4038b98 --- /dev/null +++ b/arch/sw_64/include/asm/linkage.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_LINKAGE_H +#define _ASM_SW64_LINKAGE_H + +#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") +#define SYSCALL_ALIAS(alias, name) \ + asm (#alias " = " #name "\n\t.globl " #alias) + +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name + +#endif /* _ASM_SW64_LINKAGE_H */ diff --git a/arch/sw_64/include/asm/livepatch.h b/arch/sw_64/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1feec0f6be76ddad2c1e65e0bfacf3d511510af0 --- /dev/null +++ b/arch/sw_64/include/asm/livepatch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * livepatch.h - sw64-specific Kernel Live Patching Core + */ + +#ifndef _ASM_SW64_LIVEPATCH_H +#define _ASM_SW64_LIVEPATCH_H + +#include + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->regs[27] = ip; + regs->regs[28] = ip; +} + +#endif /* _ASM_SW64_LIVEPATCH_H */ diff --git a/arch/sw_64/include/asm/memory.h b/arch/sw_64/include/asm/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..b2b7492ae477d81e92bc806e093ace0c4ade9c2f --- /dev/null +++ b/arch/sw_64/include/asm/memory.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MEMORY_H +#define _ASM_SW64_MEMORY_H + +#ifdef CONFIG_NUMA +#include +#endif + +#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30) +#define NODE0_START (_TEXT_START - __START_KERNEL_map) + +#define MAX_PHYSMEM_BITS 48 + +struct mem_desc_t { + unsigned long phys_base; /* start address of physical memory */ + unsigned long phys_size; /* size of physical memory */ + phys_addr_t base; /* start address of memory managed by kernel */ + phys_addr_t size; /* size of memory managed by kernel */ +}; +extern struct mem_desc_t mem_desc; + +struct numa_node_desc_t { + phys_addr_t base; + phys_addr_t size; +}; +extern struct numa_node_desc_t numa_nodes_desc[]; + +void __init callback_init(void); +void __init mem_detect(void); +void __init sw64_memblock_init(void); +void __init zone_sizes_init(void); +void __init sw64_numa_init(void); +void __init sw64_memory_present(void); + +#endif /* _ASM_SW64_MEMORY_H */ diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..f24219fac654bb3381ea0e85e5faeeb3486d23d3 --- /dev/null +++ b/arch/sw_64/include/asm/mmu.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_H +#define _ASM_SW64_MMU_H + +/* The sw64 MMU context is one "unsigned long" bitmap per CPU*/ +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; +#endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h new file mode 100644 index 0000000000000000000000000000000000000000..420ad5f745be49541200a9c68ff22abf1f9731b0 --- /dev/null +++ b/arch/sw_64/include/asm/mmu_context.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_CONTEXT_H +#define _ASM_SW64_MMU_CONTEXT_H + +#include + +#include +#include + +/* + * The maximum ASID's the processor supports. + */ + +#if defined(CONFIG_SUBARCH_C3B) || defined(CONFIG_SUBARCH_C4) +#define ASID_BITS 10 +#endif + +#include +#define last_asid(cpu) (cpu_data[cpu].last_asid) + +#define ASID_FIRST_VERSION (1UL << ASID_BITS) +#define ASID_MASK ((1UL << ASID_BITS) - 1) + +#define cpu_asid(cpu, mm) ((mm)->context.asid[cpu] & ASID_MASK) + +static inline bool asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + return !((mm->context.asid[cpu] ^ last_asid(cpu)) & ~ASID_MASK); +} + +/* + * NOTE! The way this is set up, the high bits of the "last_asid" (and + * the "mm->context.asid[cpu]") are the ASID _version_ code. A version + * of 0 is always considered invalid, so to invalidate another process + * you only need to do "p->mm->context.asid[cpu] = 0". + * + * If we need more ASID's than the processor has, we invalidate the old + * user TLB's (tbivp()) and start a new ASID version. That will force a + * new asid for any other processes the next time they want to run. + */ + +static inline void __get_new_mm_context(struct mm_struct *mm, long cpu) +{ + unsigned long asid = last_asid(cpu); + + if (!(++asid & ASID_MASK)) + tbivp(); + mm->context.asid[cpu] = last_asid(cpu) = asid; + +} + +static inline void +switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* Check if our ASID is of an older version, and thus invalid. */ + unsigned long asid, ptbr; + long cpu = smp_processor_id(); + + if (!asid_valid(next_mm, cpu)) + __get_new_mm_context(next_mm, cpu); + + /* Update CSR:UPN and CSR:PTBR. Another thread may have allocated + * a new mm->context[asid] (via flush_tlb_mm) without the ASID serial + * number wrapping. We have no way to detect when this is needed. + */ + asid = cpu_asid(cpu, next_mm); + ptbr = virt_to_pfn(next_mm->pgd); + load_mm(asid, ptbr); + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev_mm, next_mm, tsk); + local_irq_restore(flags); +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(tsk, mm) do { } while (0) + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + mm->context.asid[i] = 0; + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, + bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_SW64_MMU_CONTEXT_H */ diff --git a/arch/sw_64/include/asm/mmzone.h b/arch/sw_64/include/asm/mmzone.h new file mode 100644 index 0000000000000000000000000000000000000000..363e2bc98a95b8b4acf2e1bb69ee19392f8d90df --- /dev/null +++ b/arch/sw_64/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMZONE_H +#define _ASM_SW64_MMZONE_H + +#include + +/* + * Following are macros that are specific to this numa platform. + */ + +extern pg_data_t *node_data[]; + +#ifdef CONFIG_NUMA +#define NODE_DATA(nid) (node_data[(nid)]) +#endif + +#endif /* _ASM_SW64_MMZONE_H */ diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h new file mode 100644 index 0000000000000000000000000000000000000000..d1663aab4097ab2cca1d2bb3ae4496245c8d0ea7 --- /dev/null +++ b/arch/sw_64/include/asm/module.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MODULE_H +#define _ASM_SW64_MODULE_H + +#include + +struct mod_arch_specific { + unsigned int gotsecindex; +}; + +#define ARCH_SHF_SMALL SHF_SW64_GPREL + +#ifdef MODULE +asm(".section .got, \"aw\", @progbits; .align 3; .previous"); +#endif + +#endif /* _ASM_SW64_MODULE_H */ diff --git a/arch/sw_64/include/asm/msi.h b/arch/sw_64/include/asm/msi.h new file mode 100644 index 0000000000000000000000000000000000000000..dbf6f81843beb7ff37f9031bd5943dc09ecea698 --- /dev/null +++ b/arch/sw_64/include/asm/msi.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MSI_H +#define _ASM_SW64_MSI_H + +#include + +#define NR_VECTORS NR_IRQS +#define NR_IRQ_VECTORS NR_IRQS + +#define AUTO_ASSIGN 0 + +#define LAST_DEVICE_VECTOR 31 + +#define MSI_OFFSET 0x44 + +#define NUM_MSI_IRQS 256 + +#define PERCPU_MSI_IRQS 256 + +#define VT_MSIX_MSG_ADDR (0x8000fee00000UL) +#define VT_MSIX_ADDR_DEST_ID_SHIFT 12 +#define VT_MSIX_ADDR_DEST_ID_MASK (0xff << VT_MSIX_ADDR_DEST_ID_SHIFT) +#define VT_MSIX_ADDR_DEST_ID(dest) \ + (((dest) << VT_MSIX_ADDR_DEST_ID_SHIFT) & VT_MSIX_ADDR_DEST_ID_MASK) + + +#ifdef CONFIG_PCI_MSI +extern void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector); +extern int msi_compose_msg(unsigned int irq, struct msi_msg *msg); +extern void sw64_irq_noop(struct irq_data *d); +extern struct irq_chip sw64_irq_chip; +extern void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, + unsigned long pci_msi1_addr); + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0x91abc0 + +#define MSI_ADDR_SHIFT 20 +#define MSI_ADDR_DEST_ID_SHIFT 10 + +struct sw64_msi_chip_data { + spinlock_t cdata_lock; + union { + unsigned long msi_config; + unsigned long msiaddr; + }; + unsigned long rc_node; + unsigned long rc_index; + unsigned int msi_config_index; + unsigned int dst_cpu; + unsigned int vector; + unsigned int prev_cpu; + unsigned int prev_vector; + unsigned int multi_msi; + bool move_in_progress; +}; + +static inline int rcid_to_msicid(int rcid) +{ + int msicid = 0; + + msicid |= (rcid_to_domain_id(rcid) << 7); + msicid |= (rcid_to_thread_id(rcid) << 6); + msicid |= (rcid_to_core_id(rcid) << 0); + + return msicid; +} + +extern void arch_init_msi_domain(struct irq_domain *domain); +enum irq_alloc_type { + IRQ_ALLOC_TYPE_MSI, + IRQ_ALLOC_TYPE_MSIX, + IRQ_ALLOC_TYPE_INTX, +}; +struct irq_alloc_info { + struct msi_desc *desc; + enum irq_alloc_type type; + struct pci_dev *msi_dev; + irq_hw_number_t hwirq; +}; +typedef struct irq_alloc_info msi_alloc_info_t; +#else /* !CONFIG_PCI_MSI */ +static inline void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, unsigned long pci_msi1_addr) +{ + pr_warn("SW arch disable CONFIG_PCI_MSI option.\n"); +} +#endif /* CONFIG_PCI_MSI */ +#endif /* _ASM_SW64_MSI_H */ diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h new file mode 100644 index 0000000000000000000000000000000000000000..a2e3171caff1a5d666ae2e93a05ce851405160d2 --- /dev/null +++ b/arch/sw_64/include/asm/numa.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_NUMA_H +#define _ASM_SW64_NUMA_H + +#include +#include + +#ifdef CONFIG_NUMA +extern nodemask_t numa_nodes_parsed __initdata; +extern int numa_off; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void numa_clear_node(unsigned int cpu); +extern void __init numa_set_distance(int from, int to, int distance); +extern void __init early_map_cpu_to_node(unsigned int cpu, int nid); + +#else /* CONFIG_NUMA */ + +static inline void numa_clear_node(unsigned int cpu) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_SW64_NUMA_H */ diff --git a/arch/sw_64/include/asm/page.h b/arch/sw_64/include/asm/page.h new file mode 100644 index 0000000000000000000000000000000000000000..68b4f2fc1b488c4fe85fc9de47f54dec6d57bb86 --- /dev/null +++ b/arch/sw_64/include/asm/page.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PAGE_H +#define _ASM_SW64_PAGE_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void clear_page(void *page); +#define clear_user_page(page, vaddr, pg) clear_page(page) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +extern void copy_page(void *_to, void *_from); +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +typedef struct page *pgtable_t; + +extern unsigned long __phys_addr(unsigned long addr); +#ifdef CONFIG_SUBARCH_C3B +extern unsigned long __boot_phys_addr(unsigned long addr); +#else +#define __boot_phys_addr(x) __phys_addr(x) +#endif + +#endif /* !__ASSEMBLY__ */ + +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) + +#include + +#define __START_KERNEL_map PAGE_OFFSET + +#define __pa(x) __phys_addr((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET)) + +#define __boot_pa(x) __boot_phys_addr((unsigned long)(x)) +#define __boot_va(x) __va(x) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn))) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif /* CONFIG_FLATMEM */ + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC +#include +#include +#endif + +#endif /* _ASM_SW64_PAGE_H */ diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h new file mode 100644 index 0000000000000000000000000000000000000000..21bfcef21c5f4fa9c88072cea2e638e08dc4bcb0 --- /dev/null +++ b/arch/sw_64/include/asm/pci.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PCI_H +#define _ASM_SW64_PCI_H + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * The following structure is used to manage multiple PCI busses. + */ + +struct pci_dev; +struct pci_bus; +struct resource; +struct sunway_iommu; +struct page; + +struct piu_saved { + unsigned long piuconfig0; + unsigned long piuconfig1; + unsigned long epdmabar; + unsigned long msiaddr; + unsigned long msiconfig[256]; + unsigned long iommuexcpt_ctrl; + unsigned long dtbaseaddr; + unsigned long hpintconfig; + unsigned long pmeintconfig; + unsigned long aererrintconfig; + unsigned long intaconfig; + unsigned long intbconfig; + unsigned long intcconfig; + unsigned long intdconfig; +}; + +/* A controller. Used to manage multiple PCI busses. */ +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + struct resource *io_space; + struct resource *mem_space; + struct resource *pre_mem_space; + struct resource *busn_space; + unsigned long sparse_mem_base; + unsigned long dense_mem_base; + unsigned long sparse_io_base; + unsigned long dense_io_base; + + /* This one's for the kernel only. It's in KSEG somewhere. */ + void __iomem *ep_config_space_base; + void __iomem *rc_config_space_base; + + unsigned long index; + unsigned long node; + DECLARE_BITMAP(piu_msiconfig, 256); + int int_irq; + int service_irq; + /* For compatibility with current (as of July 2003) pciutils + * and XFree86. Eventually will be removed. + */ + unsigned int need_domain_info; + bool iommu_enable; + struct sunway_iommu *pci_iommu; + int first_busno; + int last_busno; + int self_busno; + void *sysdata; +}; + +/* Override the logic in pci_scan_bus for skipping already-configured + * bus numbers. + */ + +#define pcibios_assign_all_busses() (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +extern void __init sw64_init_pci(void); +extern void __init sw64_device_interrupt(unsigned long vector); +extern void __init sw64_init_irq(void); +extern void __init sw64_init_arch(void); +extern struct pci_ops sw64_pci_ops; +extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +extern struct pci_controller *hose_head; +#ifdef CONFIG_PCI_SW64 +extern void __init setup_chip_pci_ops(void); +#else +#define setup_chip_pci_ops() do { } while (0) +#endif + +extern struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus); +extern struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num); + +extern void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge); +extern void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge); +extern int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + return hose->need_domain_info; +} +#endif + +#ifdef CONFIG_NUMA +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = pci_bus_to_pci_controller(bus); + if (!node_online(hose->node)) + return next_node_in(hose->node, node_online_map); + else + return hose->node; +} +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#endif + +#endif /* __KERNEL__ */ + +/* Values for the `which' argument to sys_pciconfig_iobase. */ +#define IOBASE_HOSE 0 +#define IOBASE_SPARSE_MEM 1 +#define IOBASE_DENSE_MEM 2 +#define IOBASE_SPARSE_IO 3 +#define IOBASE_DENSE_IO 4 +#define IOBASE_ROOT_BUS 5 +#define IOBASE_FROM_HOSE 0x10000 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); +#define HAVE_PCI_LEGACY 1 + +extern int pci_create_resource_files(struct pci_dev *dev); +extern void pci_remove_resource_files(struct pci_dev *dev); +extern void __init reserve_mem_for_pci(void); +extern int chip_pcie_configure(struct pci_controller *hose); + +#define PCI_VENDOR_ID_JN 0x5656 +#define PCI_DEVICE_ID_SW64_ROOT_BRIDGE 0x3231 +#define PCI_DEVICE_ID_JN_PCIESW 0x1000 +#define PCI_DEVICE_ID_JN_PCIEUSIP 0x1200 +#define PCI_DEVICE_ID_JN_PCIE2PCI 0x1314 + +#define NR_IRQ_VECTORS NR_IRQS + +#define LAST_DEVICE_VECTOR 31 + +#define PCITODMA_OFFSET 0x0 /*0 offset*/ + +#endif /* _ASM_SW64_PCI_H */ diff --git a/arch/sw_64/include/asm/pci_impl.h b/arch/sw_64/include/asm/pci_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..aa17a69b73f88330ab175d4921b662f306ab810d --- /dev/null +++ b/arch/sw_64/include/asm/pci_impl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#ifndef _SW64_KERNEL_PCI_IMPL_H +#define _SW64_KERNEL_PCI_IMPL_H + +#include + +struct pci_dev; +struct pci_controller; + +/* The hose list. */ +extern struct pci_controller *hose_head, **hose_tail; + +extern void common_init_pci(void); +extern struct pci_controller *alloc_pci_controller(void); +extern struct resource *alloc_resource(void); + +extern unsigned long size_for_memory(unsigned long max); + +extern const struct dma_map_ops sw64_dma_direct_ops; + +extern struct cma *sw64_kvm_cma; +extern struct gen_pool *sw64_kvm_pool; +#endif /* _SW64_KERNEL_PCI_IMPL_H */ diff --git a/arch/sw_64/include/asm/percpu.h b/arch/sw_64/include/asm/percpu.h new file mode 100644 index 0000000000000000000000000000000000000000..3acdf36bcf5590a7f148db537edbd465ea871839 --- /dev/null +++ b/arch/sw_64/include/asm/percpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERCPU_H +#define _ASM_SW64_PERCPU_H + +/* + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. + * + * Always use weak definitions for percpu variables in modules. + */ +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU +#endif + +#include + +#endif /* _ASM_SW64_PERCPU_H */ diff --git a/arch/sw_64/include/asm/perf_event.h b/arch/sw_64/include/asm/perf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..dc55a361babd015aa92fbf7b0387f1e2beeecc40 --- /dev/null +++ b/arch/sw_64/include/asm/perf_event.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERF_EVENT_H +#define _ASM_SW64_PERF_EVENT_H + +#include +#include + +#ifdef CONFIG_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs +#endif + +#endif /* _ASM_SW64_PERF_EVENT_H */ diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc03e3be5b67419b80f00891cd9a27d8f0bcd40 --- /dev/null +++ b/arch/sw_64/include/asm/pgalloc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGALLOC_H +#define _ASM_SW64_PGALLOC_H + +#include +#include +#include /* for pte_{alloc,free}_one */ + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + unsigned long pfn = page_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define check_pgt_cache() do { } while (0) + +#endif /* _ASM_SW64_PGALLOC_H */ diff --git a/arch/sw_64/include/asm/pgtable-4level.h b/arch/sw_64/include/asm/pgtable-4level.h new file mode 100644 index 0000000000000000000000000000000000000000..719e2c5377e349b8765a947a57f273835f2dc52b --- /dev/null +++ b/arch/sw_64/include/asm/pgtable-4level.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_4LEVEL_H +#define _ASM_SW64_PGTABLE_4LEVEL_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pud_val(x) ((x).pud) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pmd(x) ((pmd_t) { (x) }) +#define __pud(x) ((pud_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) +#endif /* !__ASSEMBLY__ */ + +#define PAGE_OFFSET 0xfff0000000000000 + +#endif +#endif /* _ASM_SW64_PGTABLE_4LEVEL_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1f825eb74c6e7a01cd80ebf11785a3d2d6a665 --- /dev/null +++ b/arch/sw_64/include/asm/pgtable.h @@ -0,0 +1,789 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_H +#define _ASM_SW64_PGTABLE_H + + +#include + +/* + * This file contains the functions and defines necessary to modify and use + * the sw64 page table tree. + * + * This hopefully works with any standard sw64 page-size, as defined + * in (currently 8192). + */ +#include +#include + +#include +#include +#include /* For TASK_SIZE */ +#include + +struct mm_struct; +struct vm_area_struct; + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} +/* PGDIR_SHIFT determines what a forth-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* PUD_SHIFT determines the size of the area a third-level page table can map */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) + +#define CONT_PMD_SHIFT 6 +#define CONT_PMDS (1 << CONT_PMD_SHIFT) +#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) +#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) + +/* + * Entries per page directory level: the sw64 is three-level, with + * all levels having a one-page page table. + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PUD (1UL << (PAGE_SHIFT - 3)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* Number of pointers that fit on a page: this will go away. */ +#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT - 3)) + +#define VMALLOC_START (-2 * PGDIR_SIZE) +#ifndef CONFIG_SPARSEMEM_VMEMMAP +#define VMALLOC_END (-PGDIR_SIZE) +#else +#define VMEMMAP_END (-PGDIR_SIZE) +#define vmemmap ((struct page *)VMEMMAP_END - (1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT))) +#define VMALLOC_END ((unsigned long)vmemmap) +#endif + +/* + * HMcode-imposed page table bits + */ +#if defined(CONFIG_SUBARCH_C3B) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_CONT 0x0020 /* used for 256M page size bit */ +#define _PAGE_LEAF 0x0040 /* used for 8M page size bit */ +#define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 +#define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0800 /* xxx */ +#define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x8000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 + +#define _PAGE_SPLITTING 0x200000 /* For Transparent Huge Page */ +#define _PAGE_DEVMAP 0x400000 /* For ZONE DEVICE page */ + +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 18 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 21 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 22 /* bit of _PAGE_DEVMAP */ +/* + * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly + * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. + * Under Linux/sw64, the "accessed" bit just means "read", and I'll just use + * the KRE/URE bits to watch for it. That way we don't need to overload the + * KWE/UWE bits with both handling dirty and accessed. + * + * Note that the kernel uses the accessed bit just to check whether to page + * out a page or not, so it doesn't have to be exact anyway. + */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) +#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) + +#define _PFN_SHIFT 28 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) + +#define page_valid_kern(x) (0) + +#elif defined(CONFIG_SUBARCH_C4) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_FIXED 0x0010 +#define _PAGE_CONT 0x0020 /* used for 512M page size bit*/ +#define _PAGE_LEAF 0x0040 /* used for huge page bit */ +#define _PAGE_PCD 0x0080 /* used for page cache disabled */ + +/* and these are sw definition */ +#define _PAGE_WCD 0x0100 +#define _PAGE_ACCESSED 0x0200 +#define _PAGE_SPLITTING 0x0400 /* For Transparent Huge Page */ +#define _PAGE_SPECIAL 0x0800 +#define _PAGE_DEVMAP 0x1000 /* For ZONE DEVICE page */ +#define _PAGE_KERN 0x2000 +#define _PAGE_DIRTY _BITUL(62) +#define _PAGE_PROTNONE _BITUL(63) +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 9 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 10 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 12 /* bit of _PAGE_DEVMAP */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS _PAGE_DIRTY +#define __ACCESS_BITS _PAGE_ACCESSED + +#define _PFN_SHIFT 24 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_LEAF | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF | (x)) + +#define page_valid_kern(x) ((x & (_PAGE_VALID | _PAGE_KERN)) == (_PAGE_VALID | _PAGE_KERN)) +#endif + +#define PFN_PTE_SHIFT _PFN_SHIFT + +#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define _PFN_MASK (GENMASK(_PFN_BITS - 1, 0) << _PFN_SHIFT) + +#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL | _PAGE_LEAF | _PAGE_CONT) + +#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW) +#define _PAGE_S(x) _PAGE_NORMAL(x) + +/* + * pgprot_noncached() is only for infiniband pci support, and a real + * implementation for RAM would be more complicated. + */ +#define pgprot_noncached(prot) (prot) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + + if (page_valid_kern(pte_val(pteval))) { + mb(); + if ((pte_val(pteval) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + pte_t pte; + + pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pte; +} + +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pmd; +} +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) +{ + pud_t pud; + + pud_val(pud) = (pfn << _PFN_SHIFT) | pgprot_val(pgprot); + return pud; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pmd; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define p4d_pfn(p4d) ((p4d_val(p4d) & _PFN_MASK) >> _PFN_SHIFT) +#define pud_pfn(pud) ((pud_val(pud) & _PFN_MASK) >> _PFN_SHIFT) +#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & _PFN_MASK) >> _PFN_SHIFT) + +#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) + +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pfn_to_virt(pud_pfn(pud)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)pfn_to_virt(pud_pfn(pud)); +} + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)pfn_to_virt(pmd_pfn(pmd)); +} + +static inline int pte_none(pte_t pte) +{ + return !pte_val(pte); +} + +static inline int pte_valid(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_VALID); +} + +static inline int pte_present(pte_t pte) +{ + return !!(pte_val(pte) & (_PAGE_VALID | _PAGE_PROTNONE)); +} + +static inline int pte_huge(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_LEAF); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) + +static inline int pmd_none(pmd_t pmd) +{ + return !pmd_val(pmd); +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because + * split_huge_page will temporarily clear the valid bit (but + * the _PAGE_LEAF flag will remain set at all times while the + * _PAGE_VALID bit is clear). + */ + return !!(pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE | _PAGE_LEAF)); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = 0; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DIRTY); +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !(pmd_val(pmd) & _PAGE_FOW); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_VALID | _PAGE_PROTNONE); + return pmd; +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__DIRTY_BITS); + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__ACCESS_BITS); + return pmd; +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= __DIRTY_BITS; + return pmd; +} + +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_DEVMAP; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= __ACCESS_BITS; + return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_LEAF; + return pmd; +} + +static inline pmd_t pmd_mkcont(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_CONT; + return pmd; +} + +static inline int pud_none(pud_t pud) +{ + return !pud_val(pud); +} + +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pud_present(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_VALID); +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = 0; +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + pud_val(pud) |= _PAGE_LEAF; + return pud; +} + +static inline int p4d_none(p4d_t p4d) +{ + return !p4d_val(p4d); +} + +static inline int p4d_bad(p4d_t p4d) +{ + return (p4d_val(p4d) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int p4d_present(p4d_t p4d) +{ + return !!(p4d_val(p4d) & _PAGE_VALID); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = 0; +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_FOW); +} + +static inline int pte_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_ACCESSED); +} + +static inline int pte_special(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SPECIAL); +} + +static inline int pte_cont(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_CONT); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(__DIRTY_BITS); + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(__ACCESS_BITS); + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= __DIRTY_BITS; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= __ACCESS_BITS; + return pte; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_LEAF; + return pte; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +static inline pte_t pte_mkdevmap(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} +#endif + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t a) +{ + return (pte_val(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_SPLITTING); +} + +static inline int pmd_trans_cont(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_CONT); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_LEAF); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DEVMAP); +} + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_DEVMAP); +} +#else +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +#endif + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long pmd_val = xchg(&pmdp->pmd, 0); + pmd_t pmd = (pmd_t){pmd_val}; + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp); +} + +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot)) + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +extern pgd_t swapper_pg_dir[1024]; + +/* + * The sw64 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +#define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ +} + +#if defined(CONFIG_SUBARCH_C3B) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bit 7: _PAGE_PROTNONE (must be zero) + * bits 8-15: swap type + * bits 16-63: swap offset + */ +#define __SWP_TYPE_SHIFT 8 +#define __SWP_TYPE_BITS 8 + +#elif defined(CONFIG_SUBARCH_C4) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bits 7-11: swap type + * bits 12-58: swap offset + * bit 63: _PAGE_PROTNONE (must be zero) + */ +#define __SWP_TYPE_SHIFT 7 +#define __SWP_TYPE_BITS 5 + +#endif + +#define __SWP_OFFSET_BITS 47 +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +#define kern_addr_valid(addr) (1) + +#define pte_ERROR(e) \ + pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s: %d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s: %d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s: %d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +extern void paging_init(void); + +/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* _ASM_SW64_PGTABLE_H */ diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..ad54cdc772e13e8a5f9d8430406d11064dfb7ccb --- /dev/null +++ b/arch/sw_64/include/asm/platform.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PLATFORM_H +#define _ASM_SW64_PLATFORM_H + +#include +#if defined(CONFIG_UNCORE_XUELANG) +#include +#elif defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#ifdef CONFIG_EFI +#define BIOS_VERSION_GUID EFI_GUID(0xc47a23c3, 0xcebb, 0x4cc9, 0xa5, 0xe2, 0xde, 0xd0, 0x8f, 0xe4, 0x20, 0xb5) + +#define BIOS_SUPPORT_RESET_CLALLBACK(bios_version) ((bios_version) != NULL) + +extern unsigned long bios_version; + +#endif + +extern struct boot_params *sunway_boot_params; + +extern void sw64_halt(void); +extern void sw64_poweroff(void); +extern void sw64_restart(void); +extern void (*pm_restart)(void); +extern void (*pm_halt)(void); +extern int i2c_set_adapter(void); +extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); +extern void fix_jm585_reset(void); + +#endif /* _ASM_SW64_PLATFORM_H */ diff --git a/arch/sw_64/include/asm/pmc.h b/arch/sw_64/include/asm/pmc.h new file mode 100644 index 0000000000000000000000000000000000000000..d5672dd940a791c62e0edbe1e5e2356183cdd131 --- /dev/null +++ b/arch/sw_64/include/asm/pmc.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for use with the sw64 PMC interface. + */ + +#ifndef _ASM_SW64_PMC_H +#define _ASM_SW64_PMC_H + +#define PMC_PC0 0 +#define PMC_PC1 1 + +/* Following commands are implemented on all CPUs */ +#define PMC_CMD_DISABLE 0 +#define PMC_CMD_ENABLE 1 +#define PMC_CMD_EVENT_BASE 2 +#define PMC_CMD_PM 4 +#define PMC_CMD_READ 5 +#define PMC_CMD_READ_CLEAR 6 +#define PMC_CMD_WRITE_BASE 7 + +#define PMC_DISABLE_BASE 1 + +#define PMC_ENABLE_BASE 1 + +#define PC0_RAW_BASE 0x0 +#define PC1_RAW_BASE 0x100 +#define PC0_MAX 0xF +#define PC1_MAX 0x3D + +#define SW64_PERFCTRL_KM 2 +#define SW64_PERFCTRL_UM 3 +#define SW64_PERFCTRL_AM 4 + +/* pc0 events */ +#define PC0_INSTRUCTIONS 0x0 +#define PC0_BRANCH_INSTRUCTIONS 0x3 +#define PC0_CPU_CYCLES 0x8 +#define PC0_ITB_READ 0x9 +#define PC0_DTB_READ 0xA +#define PC0_ICACHE_READ 0xB +#define PC0_DCACHE_READ 0xC +#define PC0_SCACHE_REFERENCES 0xD + +/* pc1 events */ +#define PC1_BRANCH_MISSES 0xB +#define PC1_SCACHE_MISSES 0x10 +#define PC1_ICACHE_READ_MISSES 0x16 +#define PC1_ITB_MISSES 0x17 +#define PC1_DTB_SINGLE_MISSES 0x30 +#define PC1_DCACHE_MISSES 0x32 + +#define MAX_HWEVENTS 2 +#define PMC_COUNT_MASK ((1UL << 58) - 1) + +#endif /* _ASM_SW64_PMC_H */ diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..ec68fe6cc6f220e6db5912b4916c8884a05b5502 --- /dev/null +++ b/arch/sw_64/include/asm/processor.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw64/processor.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef _ASM_SW64_PROCESSOR_H +#define _ASM_SW64_PROCESSOR_H + +#include /* for ADDR_LIMIT_32BIT */ +#include + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task->stack + THREAD_SIZE) - 1) + +/* + * Returns current instruction pointer ("program counter"). + */ +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0, .+4" : "=r"(__pc)); __pc; }) + +/* + * SW64 does have an arch_pick_mmap_layout() + */ +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 + +/* + * We have a 52-bit user address space: 4PB user VM... + */ +#define TASK_SIZE (0x10000000000000UL) +#define UNMAPPED_BASE (TASK_SIZE >> 6) +#define STACK_TOP \ + (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) + +#define STACK_TOP_MAX 0x00120000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE) + +struct thread_struct { + struct user_fpsimd_state fpstate; + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; + unsigned long s[7]; /* s0 ~ s6 */ +}; +#define INIT_THREAD { } + +struct task_struct; +struct pt_regs; + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *dead_task); + +unsigned long __get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) + +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[30]) + +#define cpu_relax() barrier() + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifndef CONFIG_SMP +/* Nothing to prefetch. */ +#define spin_lock_prefetch(lock) do { } while (0) +#endif + +static inline void prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 0, 3); +} + +static inline void prefetchw(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} + +#ifdef CONFIG_SMP +static inline void spin_lock_prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} +#endif + +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("halt"); +} +#endif /* _ASM_SW64_PROCESSOR_H */ diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h new file mode 100644 index 0000000000000000000000000000000000000000..964f4fc730f2c62be11a3d8fa1feb4d40e9f79d6 --- /dev/null +++ b/arch/sw_64/include/asm/ptrace.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PTRACE_H +#define _ASM_SW64_PTRACE_H + +#include +#include +#include + +#define NO_SYSCALL _AC(-1, UL) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + */ + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + unsigned long regs[31]; + unsigned long pc; + unsigned long ps; + }; + }; + unsigned long orig_r0; + unsigned long orig_r19; + /* These are saved by HMcode: */ + unsigned long hm_ps; + unsigned long hm_pc; + unsigned long hm_gp; + unsigned long hm_r16; + unsigned long hm_r17; + unsigned long hm_r18; +}; + +#define arch_has_single_step() (1) +#define user_mode(regs) (((regs)->ps & 8) != 0) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(pt_regs) ((pt_regs)->regs[30]) +#define kernel_stack_pointer(regs) ((unsigned long)((regs) + 1)) +#define instruction_pointer_set(regs, val) ((regs)->pc = val) + +#define force_successful_syscall_return() (current_pt_regs()->orig_r0 = NO_SYSCALL) + +#define MAX_REG_OFFSET (offsetof(struct pt_regs, orig_r0)) + +extern short regoffsets[]; + +extern unsigned long syscall_trace_enter(void); +extern void syscall_trace_leave(void); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +static inline int is_syscall_success(struct pt_regs *regs) +{ + return !regs->regs[19]; +} + +static inline long regs_return_value(struct pt_regs *regs) +{ + if ((regs->orig_r0 == NO_SYSCALL) || is_syscall_success(regs)) + return regs->regs[0]; + else + return -regs->regs[0]; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/asm/setup.h b/arch/sw_64/include/asm/setup.h new file mode 100644 index 0000000000000000000000000000000000000000..2d557b3495553fc66a9c33d9f0b191983a2db279 --- /dev/null +++ b/arch/sw_64/include/asm/setup.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SETUP_H +#define _ASM_SW64_SETUP_H + +#include + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16 * 1024) + +#define KERNEL_START_PHYS CONFIG_PHYSICAL_START +#define KERNEL_START (__START_KERNEL_map + CONFIG_PHYSICAL_START) + +/* INIT_STACK may be used for merging lwk to kernel*/ +#define INIT_STACK (KERNEL_START + 0x02000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM (KERNEL_START + 0x0A000) +#define COMMAND_LINE ((char *)(KERNEL_START + 0x0B000)) +#define INITRD_START (*(unsigned long *)(PARAM + 0x100)) +#define INITRD_SIZE (*(unsigned long *)(PARAM + 0x108)) +#define DTB_START (*(unsigned long *)(PARAM + 0x118)) + +#define _TEXT_START (KERNEL_START + 0x10000) + +#define COMMAND_LINE_OFF (0x10000UL - 0xB000UL) +#define INITRD_START_OFF (0x10000UL - 0xA100UL) +#define INITRD_SIZE_OFF (0x10000UL - 0xA108UL) + +/* Motherboard Configuration Tables */ +#define MB_CONFIG_START 0x908000 +#define MB_MCLK (MB_CONFIG_START + 0x1) +#define MB_EXTCLK (MB_CONFIG_START + 0x11) + +#ifndef __ASSEMBLY__ +#include +extern struct boot_params *sunway_boot_params; +#endif + +#endif /* _ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/asm/sfp-machine.h b/arch/sw_64/include/asm/sfp-machine.h new file mode 100644 index 0000000000000000000000000000000000000000..156bebc9c515ea038efd05713ace96d618b623e0 --- /dev/null +++ b/arch/sw_64/include/asm/sfp-machine.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Machine-dependent software floating-point definitions. + * sw64 kernel version. + * Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + * This file is part of the GNU C Library. + * Contributed by Richard Henderson (rth@cygnus.com), + * Jakub Jelinek (jakub@redhat.com) and + * David S. Miller (davem@redhat.com). + */ + +#ifndef _ASM_SW64_SFP_MACHINE_H +#define _ASM_SW64_SFP_MACHINE_H + +#define _FP_W_TYPE_SIZE 64 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R, X, Y) \ + _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S, R, X, Y) +#define _FP_MUL_MEAT_D(R, X, Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_Q(R, X, Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_imm(S, R, X, Y, _FP_DIV_HELP_imm) +#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv(D, R, X, Y) +#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv(Q, R, X, Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D +#define _FP_NANFRAC_Q _FP_QNANBIT_Q +#define _FP_NANSIGN_S 1 +#define _FP_NANSIGN_D 1 +#define _FP_NANSIGN_Q 1 + +#define _FP_KEEPNANFRACP 1 + +/* Sw_64 Architecture Handbook, 4.7.10.4 sais that + * we should prefer any type of NaN in Fb, then Fa. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R, X); \ + R##_c = FP_CLS_NAN; \ +} while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE mode +#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) +#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) +#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) +#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) + +/* Exception flags. */ +#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV +#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF +#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF +#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE +#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE +#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO + +#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) + +/* We write the results always */ +#define FP_INHIBIT_RESULTS 0 + +#endif /* _ASM_SW64_SFP_MACHINE_H */ diff --git a/arch/sw_64/include/asm/signal.h b/arch/sw_64/include/asm/signal.h new file mode 100644 index 0000000000000000000000000000000000000000..4dc3b6510b8641b7721693d15ab16aba40e47aac --- /dev/null +++ b/arch/sw_64/include/asm/signal.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SIGNAL_H +#define _ASM_SW64_SIGNAL_H + +#include + +/* Digital Unix defines 64 signals. Most things should be clean enough + * to redefine this at will, if care is taken to make libc match. + */ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +struct odd_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + int sa_flags; +}; + +#include +#endif /* _ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2fcf62b30cabe5512a85946a31852d339e62b4 --- /dev/null +++ b/arch/sw_64/include/asm/smp.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SMP_H +#define _ASM_SW64_SMP_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ + +extern cpumask_t core_start; + +static inline unsigned long +read_vpcr(void) +{ + register unsigned long __r0 __asm__("$0"); + __asm__ __volatile__( + "sys_call %1 #rvpcr" + : "=r"(__r0) + : "i" (0x39) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#ifdef CONFIG_SMP +/* SMP initialization hook for setup_arch */ +void __init setup_smp(void); + +#include + +/* smp reset control block */ +struct smp_rcb_struct { + void (*restart_entry)(unsigned long args); + unsigned long restart_args; + unsigned long ready; + unsigned long init_done; +}; + +#define INIT_SMP_RCB ((struct smp_rcb_struct *) __va(0x820000UL)) + + +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#include +#define raw_smp_processor_id() (*((unsigned int *)((void *)current + TASK_CPU))) +#endif +#define hard_smp_processor_id() cpu_to_rcid(raw_smp_processor_id()) + +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[cpu] +#define cpu_physical_id(cpu) __cpu_to_rcid[cpu] + +extern unsigned long tidle_pcb[NR_CPUS]; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 +#define smp_call_function_on_cpu(func, info, wait, cpu) ({ 0; }) +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[0] +#define cpu_physical_id(cpu) __cpu_to_rcid[0] +#endif /* CONFIG_SMP */ + +#define NO_PROC_ID (-1) + +static inline void send_ipi(int cpu, unsigned long type) +{ + int rcid; + + rcid = cpu_to_rcid(cpu); + + if (is_in_guest()) + hcall(HCALL_IVI, rcid, type, 0); + else + sendii(rcid, type, 0); +} + +#define reset_cpu(cpu) send_ipi((cpu), II_RESET) + +#endif /* _ASM_SW64_SMP_H */ diff --git a/arch/sw_64/include/asm/socket.h b/arch/sw_64/include/asm/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..e8704346777552f53e1c8f1bcf67223d40a9e317 --- /dev/null +++ b/arch/sw_64/include/asm/socket.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SOCKET_H +#define _ASM_SW64_SOCKET_H + +#include + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 +#endif /* _ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/asm/sparsemem.h b/arch/sw_64/include/asm/sparsemem.h new file mode 100644 index 0000000000000000000000000000000000000000..a60e757f3838791dfb1b35245a17780d8ecf4b83 --- /dev/null +++ b/arch/sw_64/include/asm/sparsemem.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPARSEMEM_H +#define _ASM_SW64_SPARSEMEM_H + +#include + +#define SECTION_SIZE_BITS 28 + +#endif /* _ASM_SW64_SPARSEMEM_H */ diff --git a/arch/sw_64/include/asm/spinlock.h b/arch/sw_64/include/asm/spinlock.h new file mode 100644 index 0000000000000000000000000000000000000000..64358f32cd9a80b587a023dae6d5eecb1cf270e6 --- /dev/null +++ b/arch/sw_64/include/asm/spinlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_SPINLOCK_H +#define _ASM_SW64_SPINLOCK_H + +#include +#include + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* _ASM_SW64_SPINLOCK_H */ diff --git a/arch/sw_64/include/asm/spinlock_types.h b/arch/sw_64/include/asm/spinlock_types.h new file mode 100644 index 0000000000000000000000000000000000000000..62e554e4f48c35b2d4578072231b58c75b202a4b --- /dev/null +++ b/arch/sw_64/include/asm/spinlock_types.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPINLOCK_TYPES_H +#define _ASM_SW64_SPINLOCK_TYPES_H + +#include +#include + +#endif /* _ASM_SW64_SPINLOCK_TYPES_H */ diff --git a/arch/sw_64/include/asm/stacktrace.h b/arch/sw_64/include/asm/stacktrace.h new file mode 100644 index 0000000000000000000000000000000000000000..958c9892fd6d0943bf78484c7e870323694fbde8 --- /dev/null +++ b/arch/sw_64/include/asm/stacktrace.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_STACKTRACE_H +#define _ASM_SW64_STACKTRACE_H + +#include +#include +#include +#include +#include + +struct stackframe { + unsigned long pc; + unsigned long fp; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + unsigned long return_address; + struct stack_frame *next_frame; +}; + +extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); +extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data); + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; +} + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) +{ + if (on_task_stack(tsk, sp, info)) + return true; + if (tsk != current || preemptible()) + return false; + + return false; +} + +#endif /* _ASM_SW64_STACKTRACE_H */ diff --git a/arch/sw_64/include/asm/string.h b/arch/sw_64/include/asm/string.h new file mode 100644 index 0000000000000000000000000000000000000000..87d93f4cd4d5eabb964cca67d64afa5a47b05a39 --- /dev/null +++ b/arch/sw_64/include/asm/string.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_STRING_H +#define _ASM_SW64_STRING_H + +#ifdef __KERNEL__ + +/* + * GCC of any recent vintage doesn't do stupid things with bcopy. + * EGCS 1.1 knows all about expanding memcpy inline, others don't. + * + * Similarly for a memset with data = 0. + */ + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *dest, const void *src, size_t n); +/* For backward compatibility with modules. Unused otherwise. */ +extern void *__memcpy(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMSET +extern void *__constant_c_memset(void *s, unsigned long c, size_t n); +extern void *___memset(void *s, int c, size_t n); +extern void *__memset(void *s, int c, size_t n); +extern void *memset(void *s, int c, size_t n); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *dest, const char *src); + +#define __HAVE_ARCH_STRNCPY +extern char *strncpy(char *dest, const char *src, size_t n); + +/* The following routine is like memset except that it writes 16-bit + * aligned values. The DEST and COUNT parameters must be even for + * correct operation. + */ + +#define __HAVE_ARCH_MEMSETW +extern void *__memsetw(void *dest, unsigned short c, size_t count); + +#define memsetw(s, c, n) \ +(__builtin_constant_p(c) \ + ? __constant_c_memset((s), 0x0001000100010001UL * (unsigned short)(c), (n)) \ + : __memsetw((s), (c), (n))) + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +#define __HAVE_ARCH_MEMCPY_FLUSHCACHE +void memcpy_flushcache(void *dst, const void *src, size_t cnt); +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_STRING_H */ diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h new file mode 100644 index 0000000000000000000000000000000000000000..833e27f9d5e14a729a285406234e90fc03afbdfe --- /dev/null +++ b/arch/sw_64/include/asm/suspend.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SUSPEND_H +#define _ASM_SW64_SUSPEND_H + +#include +#include +#include +#define SOFTINF_SLEEP_MAGIC 0x0123456789ABCDEFUL + +#ifdef CONFIG_HIBERNATION +#include +#include +#endif + +struct callee_saved_regs { + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long ra; +}; + +struct callee_saved_fpregs { + unsigned long f2[4]; + unsigned long f3[4]; + unsigned long f4[4]; + unsigned long f5[4]; + unsigned long f6[4]; + unsigned long f7[4]; + unsigned long f8[4]; + unsigned long f9[4]; +} __aligned(32); /* 256 bits aligned for simd */ + +struct processor_state { + struct callee_saved_regs regs; + struct callee_saved_fpregs fpregs; + unsigned long fpcr; + unsigned long ktp; +#ifdef CONFIG_HIBERNATION + unsigned long sp; + struct vcpucb vcb; +#endif +}; + +extern void sw64_suspend_deep_sleep(struct processor_state *state); +extern const struct platform_suspend_ops native_suspend_ops; +#endif /* _ASM_SW64_SUSPEND_H */ diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h new file mode 100644 index 0000000000000000000000000000000000000000..86ddd2cb65f839bfa9cc609aa6878740b4c484b3 --- /dev/null +++ b/arch/sw_64/include/asm/sw64_init.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64_INIT_H +#define _ASM_SW64_SW64_INIT_H + +#include +#include + +#include + +struct sw64_early_init_ops { + void (*setup_core_map)(struct cpumask *cpumask); + unsigned long (*get_node_mem)(int nodeid); + void (*get_smp_info)(void); +}; + +struct sw64_pci_init_ops { + int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + unsigned long (*get_rc_enable)(unsigned long node); + void (*hose_init)(struct pci_controller *hose); + void (*set_rc_piu)(unsigned long node, unsigned long index); + int (*check_pci_linkup)(unsigned long node, unsigned long index); + void (*set_intx)(unsigned long node, unsigned long index, + unsigned long int_conf); +}; + + +struct sw64_chip_init_ops { + struct sw64_early_init_ops early_init; + struct sw64_pci_init_ops pci_init; + void (*fixup)(void); +}; + +struct sw64_chip_ops { + int (*get_cpu_num)(void); + void (*device_interrupt)(unsigned long irq_info); + void (*suspend)(bool wake); + void (*fixup)(void); +}; + +extern void sw64_init_noop(void); +extern void setup_chip_ops(void); +extern struct sw64_chip_ops *sw64_chip; +extern struct sw64_chip_init_ops *sw64_chip_init; +#ifdef CONFIG_PM +extern struct syscore_ops io_syscore_ops; +#endif + +DECLARE_PER_CPU(unsigned long, hard_node_id); + +#endif /* _ASM_SW64_SW64_INIT_H */ diff --git a/arch/sw_64/include/asm/sw64io.h b/arch/sw_64/include/asm/sw64io.h new file mode 100644 index 0000000000000000000000000000000000000000..d52cd8cc86bf24aa3a7b45356bcbbf2651cf7b01 --- /dev/null +++ b/arch/sw_64/include/asm/sw64io.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64IO_H +#define _ASM_SW64_SW64IO_H + +#include +#include + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#define MK_RC_CFG(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) +#define MK_PIU_IOR0(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) +#define MK_PIU_IOR1(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) + +static inline unsigned int +read_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + return readl(addr); +} + +static inline void +write_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset, unsigned int data) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + writel(data, addr); +} + +static inline unsigned long +read_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +read_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +sw64_io_read(unsigned long node, unsigned long reg) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + return readq(addr); +} + +static inline void +sw64_io_write(unsigned long node, unsigned long reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + writeq(data, addr); +} + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#endif /* _ASM_SW64_SW64IO_H */ diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2db4b9e26613645995a3dd1ee40b798b460860 --- /dev/null +++ b/arch/sw_64/include/asm/switch_to.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SWITCH_TO_H +#define _ASM_SW64_SWITCH_TO_H + +#include + +extern void __fpstate_save(struct task_struct *save_to); +extern void __fpstate_restore(struct task_struct *restore_from); +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); +extern void restore_da_match_after_sched(void); + +static inline void aux_save(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + pcb->tp = rtid(); + __fpstate_save(task); + } +} + +static inline void aux_restore(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + wrtp(pcb->tp); + __fpstate_restore(task); + } +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + aux_save(prev); + aux_restore(next); +} + + +#define switch_to(prev, next, last) \ +do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_aux(__prev, __next); \ + (last) = __switch_to(__prev, __next); \ +} while (0) + + +/* TODO: finish_arch_switch has been removed from arch-independent code. */ + +/* + * finish_arch_switch will be called after switch_to + */ +#define finish_arch_post_lock_switch restore_da_match_after_sched + + +#endif /* _ASM_SW64_SWITCH_TO_H */ diff --git a/arch/sw_64/include/asm/syscall.h b/arch/sw_64/include/asm/syscall.h new file mode 100644 index 0000000000000000000000000000000000000000..a821bf68be1643d7f457ad738d9cde9cc1930d77 --- /dev/null +++ b/arch/sw_64/include/asm/syscall.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SYSCALL_H +#define _ASM_SW64_SYSCALL_H + +#include + +#ifndef __ASSEMBLY__ + +typedef long (*syscall_fn_t)(ulong, ulong, ulong, ulong, ulong, ulong); + +extern syscall_fn_t sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return regs->regs[19] ? -regs->regs[0] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + regs->regs[0] = -error; + regs->regs[19] = 1; + } else { + regs->regs[0] = val; + regs->regs[19] = 0; + } +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + *args++ = regs->regs[16]; + *args++ = regs->regs[17]; + *args++ = regs->regs[18]; + *args++ = regs->regs[19]; + *args++ = regs->regs[20]; + *args = regs->regs[21]; +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->regs[16] = *args++; + regs->regs[17] = *args++; + regs->regs[18] = *args++; + regs->regs[19] = *args++; + regs->regs[20] = *args++; + regs->regs[21] = *args; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_SW64; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_SW64_SYSCALL_H */ diff --git a/arch/sw_64/include/asm/tc.h b/arch/sw_64/include/asm/tc.h new file mode 100644 index 0000000000000000000000000000000000000000..aa39c3528e3fa923022365c1db31c1ef2c61e220 --- /dev/null +++ b/arch/sw_64/include/asm/tc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TC_H +#define _ASM_SW64_TC_H + +static inline unsigned long rdtc(void) +{ + unsigned long ret; + + __asm__ __volatile__ ("rtc %0" : "=r"(ret)); + return ret; +} + +extern void tc_sync_clear(void); +extern void tc_sync_ready(void *ignored); +extern void tc_sync_set(void); +#endif /* _ASM_SW64_TC_H */ diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h new file mode 100644 index 0000000000000000000000000000000000000000..4f3b837e2e908d7d74d871269a65343c13689633 --- /dev/null +++ b/arch/sw_64/include/asm/thread_info.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_THREAD_INFO_H +#define _ASM_SW64_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +typedef struct { + unsigned long seg; +} mm_segment_t; + + +struct pcb_struct { + unsigned long tp; + unsigned long da_match, da_mask; + unsigned long dv_match, dv_mask; + union { + unsigned long dc_ctl; + unsigned long match_ctl; + }; + unsigned long ia_match, ia_mask; + unsigned long iv_match; + unsigned long ida_match, ida_mask; +}; + +struct thread_info { + struct pcb_struct pcb; /* hmcode state */ + + unsigned int flags; /* low level flags */ + unsigned int ieee_state; /* see fpu.h */ + + mm_segment_t addr_limit; /* thread address space */ + unsigned int cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + unsigned int status; /* thread-synchronous flags */ + + int bpt_nsaved; + unsigned long bpt_addr[2]; /* breakpoint handling */ + unsigned int bpt_insn[2]; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long dyn_ftrace_addr; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long dyn_ftrace_regs_addr; +#endif +#endif +}; + +static __always_inline u64 rtid(void) +{ + u64 val; + + asm volatile("rtid %0" : "=r" (val) : :); + return val; +} + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .addr_limit = KERNEL_DS, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + + +#endif /* __ASSEMBLY__ */ + +/* Thread information allocation. */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (2 * PAGE_SIZE) + +/* + * Thread information flags: + * - these are process state flags and used from assembly + * - pending work-to-be-done flags come first and must be assigned to be + * within bits 0 to 7 to fit in and immediate operand. + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* secure computing */ +#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + +/* Work to do on interrupt/exception return. */ +#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +/* Work to do on any return to userspace. */ +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE) + +#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ +#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ +#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'prctl' */ + +#define SET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + if (value & PR_UNALIGN_NOPRINT) \ + status |= TS_UAC_NOPRINT; \ + if (value & PR_UNALIGN_SIGBUS) \ + status |= TS_UAC_SIGBUS; \ + if (value & PR_NOFIX) /* sw-specific */ \ + status |= TS_UAC_NOFIX; \ + task_thread_info(task)->status = status; \ + 0; }) + +#define GET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + __u32 res = 0; \ + if (status & TS_UAC_NOPRINT) \ + res |= PR_UNALIGN_NOPRINT; \ + if (status & TS_UAC_SIGBUS) \ + res |= PR_UNALIGN_SIGBUS; \ + if (status & TS_UAC_NOFIX) \ + res |= PR_NOFIX; \ + put_user(res, (int __user *)(value)); \ + }) + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_THREAD_INFO_H */ diff --git a/arch/sw_64/include/asm/timer.h b/arch/sw_64/include/asm/timer.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea9e0a538d01f84360f0d032450062835e1b1d0 --- /dev/null +++ b/arch/sw_64/include/asm/timer.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMER_H +#define _ASM_SW64_TIMER_H + +extern void sw64_setup_clocksource(void); + +extern void sw64_setup_timer(void); + +extern void __init setup_sched_clock(void); + +#endif /* _ASM_SW64_TIMER_H */ diff --git a/arch/sw_64/include/asm/timex.h b/arch/sw_64/include/asm/timex.h new file mode 100644 index 0000000000000000000000000000000000000000..a5760bf8abd484628232265675d51610f2b7a959 --- /dev/null +++ b/arch/sw_64/include/asm/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMEX_H +#define _ASM_SW64_TIMEX_H + +#include + +/* With only one or two oddballs, we use the RTC as the ticker, selecting + * the 32.768kHz reference clock, which nicely divides down to our HZ. + */ +#define CLOCK_TICK_RATE 32768 + +/* + * Standard way to access the cycle counter. + */ + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return rdtc(); +} + +#endif /* _ASM_SW64_TIMEX_H */ diff --git a/arch/sw_64/include/asm/tlb.h b/arch/sw_64/include/asm/tlb.h new file mode 100644 index 0000000000000000000000000000000000000000..08c8f4f97de13a0e5fdb87e943dc03997631ddf8 --- /dev/null +++ b/arch/sw_64/include/asm/tlb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLB_H +#define _ASM_SW64_TLB_H + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) + +#endif /* _ASM_SW64_TLB_H */ diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h new file mode 100644 index 0000000000000000000000000000000000000000..73995d9663a6e793f70db4dd33270894d9fe8672 --- /dev/null +++ b/arch/sw_64/include/asm/tlbflush.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLBFLUSH_H +#define _ASM_SW64_TLBFLUSH_H + +#include +#include +#include +#include +#include +#include +#include + +static inline void local_flush_tlb_all(void) +{ + tbiv(); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + + cpu = smp_processor_id(); + if (!asid_valid(mm, cpu)) { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + goto out; + } + + if (current->mm == mm) { + __get_new_mm_context(mm, cpu); + wrasid(cpu_asid(cpu, mm)); + } else { + mm->context.asid[cpu] = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +out: + local_irq_restore(flags); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int cpu; + struct mm_struct *mm; + + cpu = smp_processor_id(); + mm = vma->vm_mm; + + if (asid_valid(mm, cpu)) + tbisasid(cpu_asid(cpu, mm), addr); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +} + +/* + * It flushes the whole user tlb now. + */ +static inline void +local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + local_flush_tlb_mm(vma->vm_mm); +} + +/* + * There is no way to invalidate kernel pages only, so it has to + * inlvalidate all mapping. + */ +static inline void +local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + local_flush_tlb_all(); +} + + +#ifdef CONFIG_SMP +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#else +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma, start, end) +#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, end) + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_SW64_TLBFLUSH_H */ diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h new file mode 100644 index 0000000000000000000000000000000000000000..25ec7b9e943172316fd6f66cc27ee69af914a32e --- /dev/null +++ b/arch/sw_64/include/asm/topology.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TOPOLOGY_H +#define _ASM_SW64_TOPOLOGY_H + +#include +#include +#include +#include +#include +#include + +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(int cpuid); +void remove_cpu_topology(int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +static inline int rcid_to_thread_id(int rcid) +{ + return (rcid & THREAD_ID_MASK) >> THREAD_ID_SHIFT; +} + +static inline int rcid_to_core_id(int rcid) +{ + return (rcid & CORE_ID_MASK) >> CORE_ID_SHIFT; +} + +static inline int rcid_to_domain_id(int rcid) +{ + return (rcid & DOMAIN_ID_MASK) >> DOMAIN_ID_SHIFT; +} + +#ifdef CONFIG_NUMA + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else +extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); +extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) +#define parent_node(node) (node) +#define cpumask_of_pcibus(bus) (cpu_online_mask) +#else /* !CONFIG_NUMA */ +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void numa_store_cpu_info(unsigned int cpu) { } +#endif /* CONFIG_NUMA */ + +extern void get_vt_smp_info(void); + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } + +#endif /* _ASM_SW64_TOPOLOGY_H */ diff --git a/arch/sw_64/include/asm/uaccess.h b/arch/sw_64/include/asm/uaccess.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b119f7fa78c70af35312f3a4dadfbbbc02f9cf --- /dev/null +++ b/arch/sw_64/include/asm/uaccess.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UACCESS_H +#define _ASM_SW64_UACCESS_H + +#include + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * Or at least it did once upon a time. Nowadays it is a mask that + * defines which bits of the address space are off limits. This is a + * wee bit faster than the above. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { -0x10000000000000UL }) + +#define get_fs() (current_thread_info()->addr_limit) +#define get_ds() (KERNEL_DS) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * As the sw64 uses the same address space for kernel and user + * data, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +/* + * The "ldi %1, 2b-1b(%0)" bits are magic to get the assembler to + * encode the bits we need for resolving the exception. See the + * more extensive comments with fixup_inline_exception below for + * more information. + */ + +extern void __get_user_unknown(void); + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err = 0; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __get_user_8(ptr); \ + break; \ + case 2: \ + __get_user_16(ptr); \ + break; \ + case 4: \ + __get_user_32(ptr); \ + break; \ + case 8: \ + __get_user_64(ptr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok(__gu_addr, size)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: \ + __get_user_8(__gu_addr); \ + break; \ + case 2: \ + __get_user_16(__gu_addr); \ + break; \ + case 4: \ + __get_user_32(__gu_addr); \ + break; \ + case 8: \ + __get_user_64(__gu_addr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_64(addr) \ + __asm__("1: ldl %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_32(addr) \ + __asm__("1: ldw %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_16(addr) \ + __asm__("1: ldhu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_8(addr) \ + __asm__("1: ldbu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +extern void __put_user_unknown(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __put_user_8(x, ptr); \ + break; \ + case 2: \ + __put_user_16(x, ptr); \ + break; \ + case 4: \ + __put_user_32(x, ptr); \ + break; \ + case 8: \ + __put_user_64(x, ptr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok(__pu_addr, size)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: \ + __put_user_8(x, __pu_addr); \ + break; \ + case 2: \ + __put_user_16(x, __pu_addr); \ + break; \ + case 4: \ + __put_user_32(x, __pu_addr); \ + break; \ + case 8: \ + __put_user_64(x, __pu_addr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + } \ + __pu_err; \ +}) + +/* + * The "__put_user_xx()" macros tell gcc they read from memory + * instead of writing: this is because they do not write to + * any memory gcc knows about, so there are no aliasing issues + */ +#define __put_user_64(x, addr) \ +__asm__ __volatile__("1: stl %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) + +#define __put_user_32(x, addr) \ +__asm__ __volatile__("1: stw %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_16(x, addr) \ +__asm__ __volatile__("1: sth %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_8(x, addr) \ +__asm__ __volatile__("1: stb %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +/* + * Complex access routines + */ + +extern long __copy_user(void *to, const void *from, long len); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long len) +{ + return __copy_user(to, (__force const void *)from, len); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long len) +{ + return __copy_user((__force void *)to, from, len); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern long __clear_user(void __user *to, long len); + +static inline long +clear_user(void __user *to, long len) +{ + if (__access_ok(to, len)) + len = __clear_user(to, len); + return len; +} + +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +struct page; +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); +extern unsigned long __must_check __copy_user_flushcache(void *to, + const void __user *from, unsigned long n); + +static inline int +__copy_from_user_flushcache(void *dst, const void __user *src, unsigned long size) +{ + kasan_check_write(dst, size); + return __copy_user_flushcache(dst, src, size); +} +#endif + +#include +#endif /* _ASM_SW64_UACCESS_H */ diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h new file mode 100644 index 0000000000000000000000000000000000000000..37cfe1fd68070c3ec194c181e4f416a790713947 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_JUNZHANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_VT_MEMIO (0xc0000000UL) +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | SPBU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0xfff00000UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define SPBU_BASE (0x3UL << 36) +#define INTPU_BASE (0x3aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38016 + +/*-----------------------addr-----------------------*/ +/* INTPU REG */ +enum { + DEVINT_MISS = INTPU_BASE | 0x100UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + SPBUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR0 = INTPU_BASE | 0x1080UL, + IINT_MISS_VECTOR1 = INTPU_BASE | 0x1100UL, + IINT_MISS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + ADR_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* SPBU CSR */ +enum { + SMP_INFO = SPBU_BASE | 0x80UL, + INIT_CTL = SPBU_BASE | 0x680UL, + CORE_ONLINE = SPBU_BASE | 0x780UL, + DLI_RLTD_FAULT = SPBU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = SPBU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = SPBU_BASE | 0xa80UL, + CFG_INFO = SPBU_BASE | 0x1100UL, + IO_START = SPBU_BASE | 0x1300UL, + I2C0_SRST_L = SPBU_BASE | 0x1900UL, + I2C1_SRST_L = SPBU_BASE | 0x1980UL, + I2C2_SRST_L = SPBU_BASE | 0x1a00UL, + MCU_DVC_INT = SPBU_BASE | 0x3000UL, + MCU_DVC_INT_EN = SPBU_BASE | 0x3080UL, + SI_FAULT_STAT = SPBU_BASE | 0x3100UL, + SI_FAULT_STAT_EN = SPBU_BASE | 0x3180UL, + SI_FAULT_INT_EN = SPBU_BASE | 0x3200UL, + ADR_CTL = SPBU_BASE | 0x3600UL, + MC_ONLINE = SPBU_BASE | 0x3780UL, + PIU_TOP0_CONFIG = SPBU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = SPBU_BASE | 0x4d00UL, + SOFT_INFO0 = SPBU_BASE | 0xa000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h new file mode 100644 index 0000000000000000000000000000000000000000..95a3b5c8053192398cc48764ff790c4d032e7cbd --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long cfg_info; + + cfg_info = sw64_io_read(0, CFG_INFO); + cfg_info = (cfg_info >> 33) & 0x3; + cpus = 1 << cfg_info; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long total_mem; + + total_mem = sw64_io_read(node, CFG_INFO) >> 3; + total_mem = (total_mem & 0xffff) << 28; + node_mem = total_mem / __get_cpu_nums(); + + return node_mem; +} + +#define __io_read_longtime(node) (0UL) +#define __io_write_longtime(node, data) do { } while (0) +#define __io_write_longtime_start_en(node, data) do { } while (0) + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, FAULT_INT_CONFIG, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_xuelang.h b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h new file mode 100644 index 0000000000000000000000000000000000000000..9336e473211d7ff131d54df51a3f820a4eaca4e7 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_XUELANG_H +#define _ASM_SW64_UNCORE_IO_OPS_XUELANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long trkmode; + + trkmode = sw64_io_read(0, TRKMODE); + trkmode = (trkmode >> 6) & 0x3; + cpus = 1 << trkmode; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long mc_config; + unsigned long mc_online; + unsigned long mc_cap; + unsigned long mc_num; + + mc_config = sw64_io_read(node, MC_CAP_CFG) & 0xf; + mc_cap = (1UL << mc_config) << 28; + mc_online = sw64_io_read(node, MC_ONLINE) & 0xff; + mc_num = __kernel_ctpop(mc_online); + node_mem = mc_cap * mc_num; + + return node_mem; +} + +static inline unsigned long +__io_read_longtime(int node) +{ + return sw64_io_read(node, LONG_TIME); +} + +static inline void +__io_write_longtime(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME, data); +} + +static inline void +__io_write_longtime_start_en(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME_START_EN, data); +} + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, DUAL_CG0_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG1_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG2_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG3_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG4_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG5_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG6_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG7_FAULT_INTEN, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_XUELANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_xuelang.h b/arch/sw_64/include/asm/uncore_io_xuelang.h new file mode 100644 index 0000000000000000000000000000000000000000..aeaadec5be16378e0944490319f6225b57d418be --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_xuelang.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_XUELANG_H +#define _ASM_SW64_UNCORE_IO_XUELANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0x91abc0UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define MCU_BASE (0x3UL << 36) +#define CAB0_BASE (0x10UL << 32) +#define INTPU_BASE (0x2aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define DLIA_BASE (0x20UL << 32) +#define DLIB_BASE (0x21UL << 32) +#define DLIC_BASE (0x22UL << 32) +#define DLI_PHY_CTL (0x10UL << 24) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38056 + +/*-----------------------addr-----------------------*/ +/* CAB0 REG */ +enum { + TRKMODE = CAB0_BASE | 0x80UL, +}; + +/* DLIA IO REG */ +enum { + DLIA_BWTEST_PAT = DLIA_BASE | 0x100980UL, + DLIA_PHY_VLDLANE = DLIA_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIB IO REG */ +enum { + DLIB_BWTEST_PAT = DLIB_BASE | 0x100980UL, + DLIB_PHY_VLDLANE = DLIB_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIC IO REG */ +enum { + DLIC_BWTEST_PAT = DLIC_BASE | 0x100980UL, + DLIC_PHY_VLDLANE = DLIC_BASE | DLI_PHY_CTL | 0x300UL, +}; +/* INTPU REG */ +enum { + LCORE_SLEEPY = INTPU_BASE | 0x0UL, + LCORE_SLEEP = INTPU_BASE | 0x80UL, + DEVICE_MISS = INTPU_BASE | 0x100UL, + LONG_TIME = INTPU_BASE | 0x180UL, + LCORE_IDLE = INTPU_BASE | 0x280UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + MCUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR = INTPU_BASE | 0x1100UL, + IINT_MIS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + NMI_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* MC IO REG */ +enum { + CFGDEC = 0x400UL, + CFGCR = 0x480UL, + INIT_CTRL = 0x580UL, + CFGERR = 0xd00UL, + FSMSTAT = 0xe00UL, + PUB_INTERFACE = 0x1000UL, + POWERCTRL = 0x1080UL, + CFGMR0 = 0x1280UL, + CFGMR1 = 0x1300UL, + CFGMR2 = 0x1380UL, + CFGMR3 = 0x1400UL, + PERF_CTRL = 0x1480UL, + MC_PERF0 = 0x1500UL, + CFGMR4 = 0x1800UL, + CFGMR5 = 0x1880UL, + CFGMR6 = 0x1900UL, + MC_CTRL = 0x1c00UL, + MEMSERR_P = 0x1c80UL, + MEMSERR = 0x1d00UL, +}; + +/* MCU CSR */ +enum { + SMP_INFO = MCU_BASE | 0x80UL, + INIT_CTL = MCU_BASE | 0x680UL, + MT_STATE = MCU_BASE | 0x700UL, + CORE_ONLINE = MCU_BASE | 0x780UL, + MT_INT = MCU_BASE | 0x800UL, + MT_INT_END = MCU_BASE | 0x880UL, + CPU_ID = MCU_BASE | 0x900UL, + DLI_RLTD_FAULT = MCU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = MCU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = MCU_BASE | 0xa80UL, + FAULT_SOURCE = MCU_BASE | 0xb00UL, + INT_SOURCE = MCU_BASE | 0xb80UL, + CORE_STATE0 = MCU_BASE | 0xc00UL, + CORE_STATE1 = MCU_BASE | 0xc80UL, + CFG_INFO = MCU_BASE | 0x1100UL, + MC_CAP_CFG = MCU_BASE | 0x1180UL, + IO_START = MCU_BASE | 0x1300UL, + UART_ONLINE = MCU_BASE | 0x1780UL, + I2C0_SRST_L = MCU_BASE | 0x1900UL, + I2C1_SRST_L = MCU_BASE | 0x1980UL, + I2C2_SRST_L = MCU_BASE | 0x1a00UL, + MCU_DVC_INT = MCU_BASE | 0x3000UL, + MCU_DVC_INT_EN = MCU_BASE | 0x3080UL, + SI_FAULT_STAT = MCU_BASE | 0x3100UL, + SI_FAULT_EN = MCU_BASE | 0x3180UL, + SI_FAULT_INT_EN = MCU_BASE | 0x3200UL, + FIFO_SYNSEL = MCU_BASE | 0x3400UL, + CPU_INFO = MCU_BASE | 0x3480UL, + WAKEUP_CTL = MCU_BASE | 0x3500UL, + FLAGREG = MCU_BASE | 0x3580UL, + NMI_CTL = MCU_BASE | 0x3600UL, + PIUPLL_CNT = MCU_BASE | 0x3680UL, + MC_ONLINE = MCU_BASE | 0x3780UL, + FLASH_INFO = MCU_BASE | 0x3800UL, + RTPUSROMCNT = MCU_BASE | 0x3880UL, + CLU_LV1_SEL = MCU_BASE | 0x3a80UL, + CLU_LV2_SEL = MCU_BASE | 0x3b00UL, + CLK_CTL = MCU_BASE | 0x3b80UL, + SLEEP_WAIT_CNT = MCU_BASE | 0x4980UL, + CHIP_ID = MCU_BASE | 0x4b00UL, + PIU_TOP0_CONFIG = MCU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = MCU_BASE | 0x4d00UL, + LVDS_CTL = MCU_BASE | 0x4d80UL, + LPC_DMAREQ_TOTH = MCU_BASE | 0x5100UL, + DLI_ONLINE = MCU_BASE | 0x6180UL, + LPC_DMAREQ_HADR = MCU_BASE | 0x6200UL, + PIU_PHY_SRST_H = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE0 = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE1 = MCU_BASE | 0x6300UL, + CLK_SEL_PCIE2 = MCU_BASE | 0x6380UL, + CLK_SEL_PCIE3 = MCU_BASE | 0x6400UL, + CLK_SEL_PCIE4 = MCU_BASE | 0x6480UL, + CLK_SEL_PCIE5 = MCU_BASE | 0x6500UL, + PERST_N_PCIE0 = MCU_BASE | 0x6680UL, + PERST_N_PCIE1 = MCU_BASE | 0x6700UL, + PERST_N_PCIE2 = MCU_BASE | 0x6780UL, + PERST_N_PCIE3 = MCU_BASE | 0x6800UL, + PERST_N_PCIE4 = MCU_BASE | 0x6880UL, + PERST_N_PCIE5 = MCU_BASE | 0x6900UL, + BUTTON_RST_N_PCIE0 = MCU_BASE | 0x6a80UL, + BUTTON_RST_N_PCIE1 = MCU_BASE | 0x6b00UL, + BUTTON_RST_N_PCIE2 = MCU_BASE | 0x6b80UL, + BUTTON_RST_N_PCIE3 = MCU_BASE | 0x6c00UL, + BUTTON_RST_N_PCIE4 = MCU_BASE | 0x6c80UL, + BUTTON_RST_N_PCIE5 = MCU_BASE | 0x6d00UL, + DUAL_CG0_FAULT = MCU_BASE | 0x6d80UL, + DUAL_CG1_FAULT = MCU_BASE | 0x6e00UL, + DUAL_CG2_FAULT = MCU_BASE | 0x6e80UL, + DUAL_CG3_FAULT = MCU_BASE | 0x6f00UL, + DUAL_CG4_FAULT = MCU_BASE | 0x6f80UL, + DUAL_CG5_FAULT = MCU_BASE | 0x7000UL, + DUAL_CG6_FAULT = MCU_BASE | 0x7080UL, + DUAL_CG7_FAULT = MCU_BASE | 0x7100UL, + DUAL_CG0_FAULT_EN = MCU_BASE | 0x7180UL, + DUAL_CG1_FAULT_EN = MCU_BASE | 0x7200UL, + DUAL_CG2_FAULT_EN = MCU_BASE | 0x7280UL, + DUAL_CG3_FAULT_EN = MCU_BASE | 0x7300UL, + DUAL_CG4_FAULT_EN = MCU_BASE | 0x7380UL, + DUAL_CG5_FAULT_EN = MCU_BASE | 0x7400UL, + DUAL_CG6_FAULT_EN = MCU_BASE | 0x7480UL, + DUAL_CG7_FAULT_EN = MCU_BASE | 0x7500UL, + DUAL_CG0_FAULT_INTEN = MCU_BASE | 0x7580UL, + DUAL_CG1_FAULT_INTEN = MCU_BASE | 0x7600UL, + DUAL_CG2_FAULT_INTEN = MCU_BASE | 0x7680UL, + DUAL_CG3_FAULT_INTEN = MCU_BASE | 0x7700UL, + DUAL_CG4_FAULT_INTEN = MCU_BASE | 0x7780UL, + DUAL_CG5_FAULT_INTEN = MCU_BASE | 0x7800UL, + DUAL_CG6_FAULT_INTEN = MCU_BASE | 0x7880UL, + DUAL_CG7_FAULT_INTEN = MCU_BASE | 0x7900UL, + SOFT_INFO0 = MCU_BASE | 0x7f00UL, + LONG_TIME_START_EN = MCU_BASE | 0x9000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_XUELANG_H */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..6d1b8d1e201167d56c5e6ca8e0ecff89160b7af1 --- /dev/null +++ b/arch/sw_64/include/asm/unistd.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNISTD_H +#define _ASM_SW64_UNISTD_H + +#include + +#define NR_SYSCALLS __NR_syscalls +#define NR_syscalls NR_SYSCALLS + +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 + +#endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/asm/uprobes.h b/arch/sw_64/include/asm/uprobes.h new file mode 100644 index 0000000000000000000000000000000000000000..fcd2026c3622e20a781107c70d414f075d1bf588 --- /dev/null +++ b/arch/sw_64/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_SW64_UPROBES_H +#define _ASM_SW64_UPROBES_H + +#include +#include +#include + +/* + * We want this to be defined as union sw64_instruction but that makes the + * generic code blow up. + */ +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES SW64_INSN_SIZE +#define UPROBE_XOL_SLOT_BYTES SW64_INSN_SIZE + +#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */ +#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */ + +#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE +#define UPROBE_SWBP_INSN_SIZE MAX_UINSN_BYTES + +struct arch_uprobe { + u32 insn; + u32 ixol[2]; +}; + +struct arch_uprobe_task { + unsigned long saved_trap_nr; +}; + +#ifdef CONFIG_UPROBES +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc); +#else +static inline void +sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) {} +#endif + +#endif /* _ASM_SW64_UPROBES_H */ diff --git a/arch/sw_64/include/asm/vcpu.h b/arch/sw_64/include/asm/vcpu.h new file mode 100644 index 0000000000000000000000000000000000000000..c4e3caacbc70c7409a3539317d781d8997eb5d6c --- /dev/null +++ b/arch/sw_64/include/asm/vcpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VCPU_H +#define _ASM_SW64_VCPU_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SUBARCH_C3B + +struct vcpucb { + unsigned long go_flag; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr; + unsigned long soft_tid; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_irq_vector; + unsigned long pri_base; + unsigned long stack_pc_dfault; + unsigned long guest_p20; + unsigned long guest_dfault_double; + unsigned long guest_irqs_pending; + unsigned long guest_hm_r30; + unsigned long migration_mark; + unsigned long guest_longtime; + unsigned long guest_longtime_offset; + unsigned long reserved[3]; +}; + +#else + +struct vcpucb { + unsigned long ktp; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long dtb_upcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr_usr; + unsigned long ptbr_sys; + unsigned long soft_tid; + unsigned long int_stat0; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_pc_save; + unsigned long shtclock_offset; + unsigned long reserved[8]; +}; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_VCPU_H */ diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h new file mode 100644 index 0000000000000000000000000000000000000000..7a2e23c648f3d48c50ab72709e9d2091aee9410c --- /dev/null +++ b/arch/sw_64/include/asm/vdso.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SW64 Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_VDSO_H +#define _ASM_SW64_VDSO_H + +#ifdef __KERNEL__ + +/* + * Default link address for the vDSO. + * Since we randomise the VDSO mapping, there's little point in trying + * to prelink this. + */ +#define VDSO_LBASE 0x0 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const unsigned long __vdso_##name; \ + ((unsigned long)(base) + __vdso_##name); \ +}) + + +struct vdso_data { + u64 xtime_sec; + u64 xtime_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; + u32 cs_shift; + u32 cs_mult; + u64 cs_cycle_last; + u64 cs_mask; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 seq_count; +}; + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr, tmp; + __asm__ __volatile__( + " br %1, 1f\n" + "1: ldi %0, 0(%1)\n" + : "=r" (addr), "=&r" (tmp) + ::); + + addr &= ~(PAGE_SIZE - 1); + return addr; +} + +static inline const struct vdso_data *get_vdso_data(void) +{ + return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE); +} + +static inline u32 vdso_data_read_begin(const struct vdso_data *data) +{ + u32 seq; + + while (true) { + seq = READ_ONCE(data->seq_count); + if (likely(!(seq & 1))) { + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return seq; + } + + cpu_relax(); + } +} + +static inline bool vdso_data_read_retry(const struct vdso_data *data, + u32 start_seq) +{ + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return unlikely(data->seq_count != start_seq); +} + +static inline void vdso_data_write_begin(struct vdso_data *data) +{ + ++data->seq_count; + + /* Ensure sequence update is written before other data page values. */ + smp_wmb(); +} + +static inline void vdso_data_write_end(struct vdso_data *data) +{ + /* Ensure data values are written before updating sequence again. */ + smp_wmb(); + ++data->seq_count; +} + + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_VDSO_H */ diff --git a/arch/sw_64/include/asm/vmalloc.h b/arch/sw_64/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..a76d1133d6c6f1851615c9546c4ca36ec3f927ee --- /dev/null +++ b/arch/sw_64/include/asm/vmalloc.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VMALLOC_H +#define _ASM_SW64_VMALLOC_H + +#endif /* _ASM_SW64_VMALLOC_H */ diff --git a/arch/sw_64/include/asm/word-at-a-time.h b/arch/sw_64/include/asm/word-at-a-time.h new file mode 100644 index 0000000000000000000000000000000000000000..623efbec4429713d0ad20f116266b8f87cc4fc1d --- /dev/null +++ b/arch/sw_64/include/asm/word-at-a-time.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_WORD_AT_A_TIME_H +#define _ASM_SW64_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for SW64. + */ + +/* + * We do not use the word_at_a_time struct on SW64, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpgeb(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ + return __kernel_cttz(bits); +} + +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + +#endif /* _ASM_SW64_WORD_AT_A_TIME_H */ diff --git a/arch/sw_64/include/asm/xchg.h b/arch/sw_64/include/asm/xchg.h new file mode 100644 index 0000000000000000000000000000000000000000..38f067d5ed04330ad206fb10673f29f6f71d5001 --- /dev/null +++ b/arch/sw_64/include/asm/xchg.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_XCHG_H +#define _ASM_SW64_XCHG_H + +#ifndef _ASM_SW64_CMPXCHG_H +#error Do not include xchg.h directly. Use cmpxchg.h +#endif +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/cmpxchg.h. + */ + +#if defined(CONFIG_SUBARCH_C3B) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#elif defined(CONFIG_SUBARCH_C4) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#endif + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long ____cmpxchg(, volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif /* _ASM_SW64_XCHG_H */ diff --git a/arch/sw_64/include/asm/xor.h b/arch/sw_64/include/asm/xor.h new file mode 100644 index 0000000000000000000000000000000000000000..0aff8804f503e3f50f57d625b2454142e932d0b9 --- /dev/null +++ b/arch/sw_64/include/asm/xor.h @@ -0,0 +1,857 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized RAID-5 checksumming functions. + */ + +#ifndef _ASM_SW64_XOR_H +#define _ASM_SW64_XOR_H + +extern void xor_sw64_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +extern void xor_sw64_prefetch_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_prefetch_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_prefetch_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_prefetch_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +asm(" \n\ + .text \n\ + .align 3 \n\ + .ent xor_sw64_2 \n\ +xor_sw64_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + xor $0, $1, $0 # 7 cycles from $1 load \n\ + \n\ + ldl $27, 56($18) \n\ + xor $2, $3, $2 \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + \n\ + ret \n\ + .end xor_sw64_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_3 \n\ +xor_sw64_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 6 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + xor $0, $1, $1 # 4 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + stl $2, 40($17) \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + \n\ + stl $5, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $20, 56($17) \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_4 \n\ +xor_sw64_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + \n\ + ldl $25, 48($19) \n\ + xor $3, $5, $5 \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + ldl $3, 56($20) \n\ + \n\ + stl $5, 32($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + xor $7, $22, $22 \n\ + xor $23, $24, $24 # 5 cycles from $24 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $25, $27, $27 # 5 cycles from $27 load \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 5 cycles from $1 load \n\ + \n\ + stl $27, 48($17) \n\ + xor $2, $3, $3 # 4 cycles from $3 load \n\ + xor $1, $3, $3 \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_5 \n\ +xor_sw64_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + xor $28, $1, $1 \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + subl $16, 1, $16 \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + ret \n\ + .end xor_sw64_5 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_2 \n\ +xor_sw64_prefetch_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + ldl $27, 56($18) \n\ + \n\ + fillde 256($17) \n\ + xor $0, $1, $0 # 8 cycles from $1 load \n\ + fillde 256($18) \n\ + xor $2, $3, $2 \n\ + \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + ret \n\ + .end xor_sw64_prefetch_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_3 \n\ +xor_sw64_prefetch_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + fillde 256($17) \n\ + fillde 256($18) \n\ + fillde 256($19) \n\ + \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + stl $2, 40($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $5, 48($17) \n\ + addl $19, 64, $19 \n\ + stl $20, 56($17) \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_prefetch_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_4 \n\ +xor_sw64_prefetch_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + xor $3, $5, $5 \n\ + \n\ + ldl $24, 48($18) \n\ + ldl $25, 48($19) \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + ldl $3, 56($20) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + \n\ + fillde 256($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + fillde 256($18) \n\ + xor $7, $22, $22 \n\ + \n\ + fillde 256($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + fillde 256($20) \n\ + xor $25, $27, $27 # 6 cycles from $27 load \n\ + \n\ + stl $5, 32($17) \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 7 cycles from $1 load \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $1, $3, $3 \n\ + stl $27, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_prefetch_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_5 \n\ +xor_sw64_prefetch_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + fillde 0($21) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + fillde 64($21) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + fillde 128($21) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + fillde 192($21) \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + fillde 256($17) \n\ + xor $28, $1, $1 \n\ + fillde 256($18) \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + \n\ + fillde 256($19) \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + fillde 256($20) \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + fillde 256($21) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + subl $16, 1, $16 \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + \n\ + ret \n\ + .end xor_sw64_prefetch_5 \n\ +"); + +static struct xor_block_template xor_block_sw64 = { + .name = "sw64", + .do_2 = xor_sw64_2, + .do_3 = xor_sw64_3, + .do_4 = xor_sw64_4, + .do_5 = xor_sw64_5, +}; + +static struct xor_block_template xor_block_sw64_prefetch = { + .name = "sw64 prefetch", + .do_2 = xor_sw64_prefetch_2, + .do_3 = xor_sw64_prefetch_3, + .do_4 = xor_sw64_prefetch_4, + .do_5 = xor_sw64_prefetch_5, +}; + +/* For grins, also test the generic routines. */ +#include + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_sw64); \ + xor_speed(&xor_block_sw64_prefetch); \ + } while (0) + +/* Force the use of sw64_prefetch as it is significantly + * faster in the cold cache case. + */ +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sw64_prefetch) + +#endif /* _ASM_SW64_XOR_H */ diff --git a/arch/sw_64/include/uapi/asm/Kbuild b/arch/sw_64/include/uapi/asm/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..15700040f13870d24902a9cb9ed60961b6144cca --- /dev/null +++ b/arch/sw_64/include/uapi/asm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# UAPI Header export list + +generic-y += kvm_para.h +generated-y += unistd_64.h diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h new file mode 100644 index 0000000000000000000000000000000000000000..309a8294be7a839fac7c55fe5959d8b1ad1404fc --- /dev/null +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_AUXVEC_H +#define _UAPI_ASM_SW64_AUXVEC_H + +/* VDSO location. */ +#define AT_SYSINFO_EHDR 33 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 + +#endif /* _UAPI_ASM_SW64_AUXVEC_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..712c823e23d82cc177f74a77fed021a68e35a941 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BITSPERLONG_H +#define _UAPI_ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* _UAPI_ASM_SW64_BITSPERLONG_H */ diff --git a/arch/sw_64/include/uapi/asm/bootparam.h b/arch/sw_64/include/uapi/asm/bootparam.h new file mode 100644 index 0000000000000000000000000000000000000000..6ce75d65e86e293e541a21a15e3f0a68afc96293 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bootparam.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BOOTPARAM_H +#define _UAPI_ASM_SW64_BOOTPARAM_H + +#ifndef __ASSEMBLY__ + +#include + +struct boot_params { + __u64 initrd_start; /* logical address of initrd */ + __u64 initrd_size; /* size of initrd */ + __u64 dtb_start; /* logical address of dtb */ + __u64 efi_systab; /* logical address of EFI system table */ + __u64 efi_memmap; /* logical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u64 efi_memdesc_version; /* memory descriptor version */ + __u64 cmdline; /* logical address of cmdline */ +}; +#endif + +#endif /* _UAPI_ASM_SW64_BOOTPARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/bpf_perf_event.h b/arch/sw_64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..52f6f1e555f162ef7668965386cc758125726224 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BPF_PERF_EVENT_H +#define _UAPI_ASM_SW64_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI_ASM_SW64_BPF_PERF_EVENT_H */ diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h new file mode 100644 index 0000000000000000000000000000000000000000..ededdd045e96b2dc915be110f6d278a5bbc58654 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BYTEORDER_H +#define _UAPI_ASM_SW64_BYTEORDER_H + +#include + +#endif /* _UAPI_ASM_SW64_BYTEORDER_H */ diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..64786df0f2668734957147e7ba30dbb52feb8dd1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_COMPILER_H +#define _UAPI_ASM_SW64_COMPILER_H + +/* + * Herein are macros we use when describing various patterns we want to GCC. + * In all cases we can get better schedules out of the compiler if we hide + * as little as possible inside inline assembly. However, we want to be + * able to know what we'll get out before giving up inline assembly. Thus + * these tests and macros. + */ + +#define __kernel_inslb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inslh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_insll(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("insll %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inshw(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inshw %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_cmpgeb(a, b) \ +({ \ + unsigned long __kir; \ + __asm__("cmpgeb %r2, %1, %0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ + __kir; \ +}) + +#define __kernel_cttz(x) \ +({ \ + unsigned long __kir; \ + __asm__("cttz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctlz(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctlz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctpop(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctpop %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#endif /* _UAPI_ASM_SW64_COMPILER_H */ diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 0000000000000000000000000000000000000000..969ee99ee86c7a72c125a903e438a2373cf36c4a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_ERRNO_H +#define _UAPI_ASM_SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _UAPI_ASM_SW64_ERRNO_H */ diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h new file mode 100644 index 0000000000000000000000000000000000000000..be2daae2cc4dc1a1f7d9a8b653ade486e7329db9 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FCNTL_H +#define _UAPI_ASM_SW64_FCNTL_H + +#define O_CREAT 01000 /* not fcntl */ +#define O_TRUNC 02000 /* not fcntl */ +#define O_EXCL 04000 /* not fcntl */ +#define O_NOCTTY 010000 /* not fcntl */ + +#define O_NONBLOCK 00004 +#define O_APPEND 00010 +#define O_DSYNC 040000 /* used to be O_SYNC, see below */ +#define O_DIRECTORY 0100000 /* must be a directory */ +#define O_NOFOLLOW 0200000 /* don't follow links */ +#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ +#define O_DIRECT 02000000 /* direct disk access */ +#define O_NOATIME 04000000 +#define O_CLOEXEC 010000000 /* set close_on_exec */ +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 020000000 +#define O_SYNC (__O_SYNC|O_DSYNC) + +#define O_PATH 040000000 +#define __O_TMPFILE 0100000000 + +#define F_GETLK 7 +#define F_SETLK 8 +#define F_SETLKW 9 + +#define F_SETOWN 5 /* for sockets. */ +#define F_GETOWN 6 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 1 +#define F_WRLCK 2 +#define F_UNLCK 8 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 16 /* or 3 */ +#define F_SHLCK 32 /* or 4 */ + +#include + +#endif /* _UAPI_ASM_SW64_FCNTL_H */ diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8945816c542b07ac363b11fc4f9150313ebb23ad --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FPU_H +#define _UAPI_ASM_SW64_FPU_H + +/* + * SW-64 floating-point control register defines: + */ +#define FPCR_DNOD (1UL << 47) /* denorm INV trap disable */ +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_DNZ (1UL << 48) /* denorms to zero */ +#else +#define FPCR_DNOE (1UL << 48) /* hardware denormal support */ +#endif +#define FPCR_INVD (1UL << 49) /* invalid op disable (opt.) */ +#define FPCR_DZED (1UL << 50) /* division by zero disable (opt.) */ +#define FPCR_OVFD (1UL << 51) /* overflow disable (optional) */ +#define FPCR_INV (1UL << 52) /* invalid operation */ +#define FPCR_DZE (1UL << 53) /* division by zero */ +#define FPCR_OVF (1UL << 54) /* overflow */ +#define FPCR_UNF (1UL << 55) /* underflow */ +#define FPCR_INE (1UL << 56) /* inexact */ +#define FPCR_IOV (1UL << 57) /* integer overflow */ +#define FPCR_UNDZ (1UL << 60) /* underflow to zero (opt.) */ +#define FPCR_UNFD (1UL << 61) /* underflow disable (opt.) */ +#define FPCR_INED (1UL << 62) /* inexact disable (opt.) */ +#define FPCR_SUM (1UL << 63) /* summary bit */ + +#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ +#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ +#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ +#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ +#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ +#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) + +#define FPCR_MASK 0xffff800000000000L + +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_INIT FPCR_DYN_NORMAL +#else +#define FPCR_INIT (FPCR_DYN_NORMAL | FPCR_DNOE) +#endif + +/* status bit coming from hardware fpcr . definde by fire3 */ +#define FPCR_STATUS_INV0 (1UL << 52) +#define FPCR_STATUS_DZE0 (1UL << 53) +#define FPCR_STATUS_OVF0 (1UL << 54) +#define FPCR_STATUS_UNF0 (1UL << 55) +#define FPCR_STATUS_INE0 (1UL << 56) +#define FPCR_STATUS_OVI0 (1UL << 57) + +#define FPCR_STATUS_INV1 (1UL << 36) +#define FPCR_STATUS_DZE1 (1UL << 37) +#define FPCR_STATUS_OVF1 (1UL << 38) +#define FPCR_STATUS_UNF1 (1UL << 39) +#define FPCR_STATUS_INE1 (1UL << 40) +#define FPCR_STATUS_OVI1 (1UL << 41) + +#define FPCR_STATUS_INV2 (1UL << 20) +#define FPCR_STATUS_DZE2 (1UL << 21) +#define FPCR_STATUS_OVF2 (1UL << 22) +#define FPCR_STATUS_UNF2 (1UL << 23) +#define FPCR_STATUS_INE2 (1UL << 24) +#define FPCR_STATUS_OVI2 (1UL << 25) + +#define FPCR_STATUS_INV3 (1UL << 4) +#define FPCR_STATUS_DZE3 (1UL << 5) +#define FPCR_STATUS_OVF3 (1UL << 6) +#define FPCR_STATUS_UNF3 (1UL << 7) +#define FPCR_STATUS_INE3 (1UL << 8) +#define FPCR_STATUS_OVI3 (1UL << 9) + +#define FPCR_STATUS_MASK0 (FPCR_STATUS_INV0 | FPCR_STATUS_DZE0 | \ + FPCR_STATUS_OVF0 | FPCR_STATUS_UNF0 | \ + FPCR_STATUS_INE0 | FPCR_STATUS_OVI0) + +#define FPCR_STATUS_MASK1 (FPCR_STATUS_INV1 | FPCR_STATUS_DZE1 | \ + FPCR_STATUS_OVF1 | FPCR_STATUS_UNF1 | \ + FPCR_STATUS_INE1 | FPCR_STATUS_OVI1) + +#define FPCR_STATUS_MASK2 (FPCR_STATUS_INV2 | FPCR_STATUS_DZE2 | \ + FPCR_STATUS_OVF2 | FPCR_STATUS_UNF2 | \ + FPCR_STATUS_INE2 | FPCR_STATUS_OVI2) + +#define FPCR_STATUS_MASK3 (FPCR_STATUS_INV3 | FPCR_STATUS_DZE3 | \ + FPCR_STATUS_OVF3 | FPCR_STATUS_UNF3 | \ + FPCR_STATUS_INE3 | FPCR_STATUS_OVI3) + + +/* + * IEEE trap enables are implemented in software. These per-thread + * bits are stored in the "ieee_state" field of "struct thread_info". + * Thus, the bits are defined so as not to conflict with the + * floating-point enable bit (which is architected). + */ +#define IEEE_TRAP_ENABLE_INV (1UL << 1) /* invalid op */ +#define IEEE_TRAP_ENABLE_DZE (1UL << 2) /* division by zero */ +#define IEEE_TRAP_ENABLE_OVF (1UL << 3) /* overflow */ +#define IEEE_TRAP_ENABLE_UNF (1UL << 4) /* underflow */ +#define IEEE_TRAP_ENABLE_INE (1UL << 5) /* inexact */ +#define IEEE_TRAP_ENABLE_DNO (1UL << 6) /* denorm */ +#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ + IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ + IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define IEEE_MAP_DMZ (1UL << 12) /* Map denorm inputs to zero */ +#define IEEE_MAP_UMZ (1UL << 13) /* Map underflowed outputs to zero */ + +#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define IEEE_STATUS_INV (1UL << 17) +#define IEEE_STATUS_DZE (1UL << 18) +#define IEEE_STATUS_OVF (1UL << 19) +#define IEEE_STATUS_UNF (1UL << 20) +#define IEEE_STATUS_INE (1UL << 21) +#define IEEE_STATUS_DNO (1UL << 22) + + +#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ + IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ + IEEE_STATUS_INE | IEEE_STATUS_DNO) + +#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ + IEEE_STATUS_MASK | IEEE_MAP_MASK) + +#define IEEE_CURRENT_RM_SHIFT 32 +#define IEEE_CURRENT_RM_MASK (3UL << IEEE_CURRENT_RM_SHIFT) + +#define IEEE_STATUS_TO_EXCSUM_SHIFT 16 + +#define IEEE_INHERIT (1UL << 63) /* inherit on thread create? */ + +/* ieee_state expand to surport simd added by fire3 */ + +#define IEEE_STATUS_INV0 (1UL << 17) +#define IEEE_STATUS_DZE0 (1UL << 18) +#define IEEE_STATUS_OVF0 (1UL << 19) +#define IEEE_STATUS_UNF0 (1UL << 20) +#define IEEE_STATUS_INE0 (1UL << 21) +#define IEEE_STATUS_DNO0 (1UL << 22) +#define IEEE_STATUS_MASK0 (IEEE_STATUS_INV0 | IEEE_STATUS_DZE0 | \ + IEEE_STATUS_OVF0 | IEEE_STATUS_UNF0 | \ + IEEE_STATUS_INE0 | IEEE_STATUS_DNO0) + +#define IEEE_STATUS0_TO_EXCSUM_SHIFT 16 + +#define IEEE_STATUS_INV1 (1UL << 23) +#define IEEE_STATUS_DZE1 (1UL << 24) +#define IEEE_STATUS_OVF1 (1UL << 25) +#define IEEE_STATUS_UNF1 (1UL << 26) +#define IEEE_STATUS_INE1 (1UL << 27) +#define IEEE_STATUS_DNO1 (1UL << 28) +#define IEEE_STATUS_MASK1 (IEEE_STATUS_INV1 | IEEE_STATUS_DZE1 | \ + IEEE_STATUS_OVF1 | IEEE_STATUS_UNF1 | \ + IEEE_STATUS_INE1 | IEEE_STATUS_DNO1) + +#define IEEE_STATUS1_TO_EXCSUM_SHIFT 22 + +#define IEEE_STATUS_INV2 (1UL << 34) +#define IEEE_STATUS_DZE2 (1UL << 35) +#define IEEE_STATUS_OVF2 (1UL << 36) +#define IEEE_STATUS_UNF2 (1UL << 37) +#define IEEE_STATUS_INE2 (1UL << 38) +#define IEEE_STATUS_DNO2 (1UL << 39) +#define IEEE_STATUS_MASK2 (IEEE_STATUS_INV2 | IEEE_STATUS_DZE2 | \ + IEEE_STATUS_OVF2 | IEEE_STATUS_UNF2 | \ + IEEE_STATUS_INE2 | IEEE_STATUS_DNO2) + +#define IEEE_STATUS2_TO_EXCSUM_SHIFT 33 + +#define IEEE_STATUS_INV3 (1UL << 40) +#define IEEE_STATUS_DZE3 (1UL << 41) +#define IEEE_STATUS_OVF3 (1UL << 42) +#define IEEE_STATUS_UNF3 (1UL << 43) +#define IEEE_STATUS_INE3 (1UL << 44) +#define IEEE_STATUS_DNO3 (1UL << 45) +#define IEEE_STATUS_MASK3 (IEEE_STATUS_INV3 | IEEE_STATUS_DZE3 | \ + IEEE_STATUS_OVF3 | IEEE_STATUS_UNF3 | \ + IEEE_STATUS_INE3 | IEEE_STATUS_DNO3) + +#define IEEE_STATUS3_TO_EXCSUM_SHIFT 39 + + +/* + * Convert the software IEEE trap enable and status bits into the + * hardware fpcr format. + * + * Digital Unix engineers receive my thanks for not defining the + * software bits identical to the hardware bits. The chip designers + * receive my thanks for making all the not-implemented fpcr bits + * RAZ forcing us to use system calls to read/write this value. + */ +static inline unsigned long +ieee_swcr_to_fpcr(unsigned long sw) +{ + unsigned long fp; + + fp = (sw & IEEE_STATUS_MASK0) << 35; + fp |= (sw & IEEE_STATUS_MASK1) << 13; + fp |= (sw & IEEE_STATUS_MASK2) >> 14; + fp |= (sw & IEEE_STATUS_MASK3) >> 36; + + fp |= (sw & IEEE_MAP_DMZ) << 36; + fp |= (sw & IEEE_STATUS_MASK0 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK1 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK2 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK3 ? FPCR_SUM : 0); + fp |= (~sw & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF)) << 48; + fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; + fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); + fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; + return fp; +} + +static inline unsigned long +ieee_fpcr_to_swcr(unsigned long fp) +{ + unsigned long sw; + + sw = (fp >> 35) & IEEE_STATUS_MASK; + sw |= (fp >> 36) & IEEE_MAP_DMZ; + sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF); + sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); + sw |= (fp >> 47) & IEEE_MAP_UMZ; + sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; + return sw; +} +#endif /* _UAPI_ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h new file mode 100644 index 0000000000000000000000000000000000000000..3786b8b52add336464589167bf99296d59e74c65 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_GENTRAP_H +#define _UAPI_ASM_SW64_GENTRAP_H + +/* + * Definitions for gentrap causes. They are generated by user-level + * programs and therefore should be compatible with the corresponding + * legacy definitions. + */ +#define GEN_INTOVF -1 /* integer overflow */ +#define GEN_INTDIV -2 /* integer division by zero */ +#define GEN_FLTOVF -3 /* fp overflow */ +#define GEN_FLTDIV -4 /* fp division by zero */ +#define GEN_FLTUND -5 /* fp underflow */ +#define GEN_FLTINV -6 /* invalid fp operand */ +#define GEN_FLTINE -7 /* inexact fp operand */ +#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define GEN_DECDIV -9 /* decimal division by zero */ +#define GEN_DECINV -10 /* invalid decimal operand */ +#define GEN_ROPRAND -11 /* reserved operand */ +#define GEN_ASSERTERR -12 /* assertion error */ +#define GEN_NULPTRERR -13 /* null pointer error */ +#define GEN_STKOVF -14 /* stack overflow */ +#define GEN_STRLENERR -15 /* string length error */ +#define GEN_SUBSTRERR -16 /* substring error */ +#define GEN_RANGERR -17 /* range error */ +#define GEN_SUBRNG -18 +#define GEN_SUBRNG1 -19 +#define GEN_SUBRNG2 -20 +#define GEN_SUBRNG3 -21 /* these report range errors for */ +#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ +#define GEN_SUBRNG5 -23 +#define GEN_SUBRNG6 -24 +#define GEN_SUBRNG7 -25 + +/* the remaining codes (-26..-1023) are reserved. */ + +#endif /* _UAPI_ASM_SW64_GENTRAP_H */ diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h new file mode 100644 index 0000000000000000000000000000000000000000..6867fb7b4d244ee325b1d6294d1cae7453246af2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_HMCALL_H +#define _UAPI_ASM_SW64_HMCALL_H + +/* hmcall may be used in user mode */ + +#define HMC_bpt 0x80 +#define HMC_callsys 0x83 +#define HMC_imb 0x86 +#define HMC_rdtp 0x9E +#define HMC_wrtp 0x9F +#define HMC_rdunique HMC_rdtp +#define HMC_wrunique HMC_wrtp +#define HMC_gentrap 0xAA +#define HMC_wrperfmon 0xB0 + +#endif /* _UAPI_ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..fb5267b034fca832f44e0c0794b5d0248f0e86b8 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTL_H +#define _UAPI_ASM_SW64_IOCTL_H + +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +/* + * Direction bits _IOC_NONE could be 0, but legacy version gives it a bit. + * And this turns out useful to catch old ioctl numbers in header files for + * us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#include + +#endif /* _UAPI_ASM_SW64_IOCTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h new file mode 100644 index 0000000000000000000000000000000000000000..36a7fc205aa787fe3d44b2bdf51fd4df47466e4b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTLS_H +#define _UAPI_ASM_SW64_IOCTLS_H + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TIOCGETP _IOR('t', 8, struct sgttyb) +#define TIOCSETP _IOW('t', 9, struct sgttyb) +#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ + +#define TIOCSETC _IOW('t', 17, struct tchars) +#define TIOCGETC _IOR('t', 18, struct tchars) +#define TCGETS _IOR('t', 19, struct termios) +#define TCSETS _IOW('t', 20, struct termios) +#define TCSETSW _IOW('t', 21, struct termios) +#define TCSETSF _IOW('t', 22, struct termios) + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCGLTC _IOR('t', 116, struct ltchars) +#define TIOCSLTC _IOW('t', 117, struct ltchars) +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E + +#define TIOCSTI 0x5412 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 + + +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ +#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ +#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816) +#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816) + +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ + +#endif /* _UAPI_ASM_SW64_IOCTLS_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..2253475deaa5a12ca3c14637573f79ea4baf02c7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_KVM_H +#define _UAPI_ASM_SW64_KVM_H + +/* + * KVM SW specific structures and definitions. + */ +#define SWVM_IRQS 256 +#define IRQ_PENDING_INTX_SHIFT 16 +#define IRQ_PENDING_MSI_VECTORS_SHIFT 17 + +enum SW64_KVM_IRQ { + SW64_KVM_IRQ_IPI = 27, + SW64_KVM_IRQ_TIMER = 9, + SW64_KVM_IRQ_KBD = 29, + SW64_KVM_IRQ_MOUSE = 30, +}; + +#define SWVM_VM_TYPE_DEFAULT 0 +#define SWVM_VM_TYPE_PHYVCPU 1 +#define __KVM_HAVE_IRQ_LINE + +#define SWVM_NUM_NUMA_MEMBANKS 1 +#define KVM_NR_IRQCHIPS 1 +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + + unsigned long r27; + unsigned long r28; + unsigned long __padding0; + unsigned long fpcr; + + unsigned long fp[124]; + /* These are saved by HMcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + + +/* + * return stack for __sw64_vcpu_run + */ +struct vcpu_run_ret_stack { + unsigned long ra; + unsigned long r0; +}; + +struct host_int_args { + unsigned long r18; + unsigned long r17; + unsigned long r16; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { +}; + +struct hcall_args { + unsigned long arg0, arg1, arg2; +}; + +struct phyvcpu_hcall_args { + unsigned long call; + struct hcall_args args; +}; + +struct kvm_debug_exit_arch { + unsigned long epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct swvm_mem_bank { + unsigned long guest_phys_addr; + unsigned long host_phys_addr; + unsigned long host_addr; + unsigned long size; +}; + +struct swvm_mem { + struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; +}; + +#endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..c83c4b50662a44c5f849990355ac763d4f210329 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_MMAN_H +#define _UAPI_ASM_SW64_MMAN_H + +#include + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x100 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x10 /* don't use a file */ + +/* not used by linux, may be deprecated */ +#define _MAP_HASSEMAPHORE 0x0200 +#define _MAP_INHERIT 0x0400 +#define _MAP_UNALIGNED 0x0800 + +/* These are linux-specific */ +#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ +#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ +#define MAP_LOCKED 0x08000 /* lock the mapping */ +#define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x200000 /* MAP_FIXED which doesn't unmap underlying mapping */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_SYNC 2 /* synchronous memory sync */ +#define MS_INVALIDATE 4 /* invalidate the caches */ + +#define MCL_CURRENT 8192 /* lock all currently mapped pages */ +#define MCL_FUTURE 16384 /* lock all additions to address space */ +#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_SPACEAVAIL 5 /* ensure resources are available */ +#define MADV_DONTNEED 6 /* don't need these pages */ + +/* common/generic parameters */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + +/* compatibility flags */ +#define MAP_FILE 0 + + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + +#endif /* _UAPI_ASM_SW64_MMAN_H */ diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h new file mode 100644 index 0000000000000000000000000000000000000000..d38e8202dd97e8b4867e70bb3853da9440a1a0c1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/param.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PARAM_H +#define _UAPI_ASM_SW64_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include + +#endif /* _UAPI_ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..871ad4663d1dbd29cd23395b977615323c67d81e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_SW64_PERF_REGS_H +#define _UAPI_ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _UAPI_ASM_SW64_PERF_REGS_H */ diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h new file mode 100644 index 0000000000000000000000000000000000000000..3fd53450e418bcedc8faf45f37cb0e0bc8dca8ad --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PTRACE_H +#define _UAPI_ASM_SW64_PTRACE_H + +#include + +#ifndef __ASSEMBLY__ +/* + * User structures for general purpose, floating point and debug registers. + */ +struct user_pt_regs { + __u64 regs[31]; + __u64 pc; + __u64 pstate; +}; + +/* 256 bits aligned for simd */ +struct fpreg { + __u64 v[4] __attribute__((aligned(32))); +}; + +struct user_fpsimd_state { + struct fpreg fp[31]; + __u64 fpcr; + __u64 __reserved[3]; +}; +#endif + +/* PTRACE_ATTACH is 16 */ +/* PTRACE_DETACH is 17 */ + +#define PT_REG_BASE 0 +#define PT_REG_END 30 +#define PT_FPREG_BASE 32 +#define PT_FPREG_END 62 +#define PT_FPCR 63 +#define PT_PC 64 +#define PT_TP 65 +#define PT_UNIQUE PT_TP +#define PT_VECREG_BASE 67 +#define PT_VECREG_END 161 +#define PT_F31_V1 98 +#define PT_F31_V2 130 +#define PT_DA_MATCH 163 +#define PT_DA_MASK 164 +#define PT_DV_MATCH 165 +#define PT_DV_MASK 166 +#define PT_DC_CTL 167 +#define PT_MATCH_CTL 167 +#define PT_IA_MATCH 168 +#define PT_IA_MASK 169 +#define PT_IV_MATCH 170 +#define PT_IDA_MATCH 171 +#define PT_IDA_MASK 172 + +#endif /* _UAPI_ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h new file mode 100644 index 0000000000000000000000000000000000000000..7460a987c7267b85ffbe5238cf258944e7c0309c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_REGDEF_H +#define _UAPI_ASM_SW64_REGDEF_H + +#define v0 $0 /* function return value */ + +#define t0 $1 /* temporary registers (caller-saved) */ +#define t1 $2 +#define t2 $3 +#define t3 $4 +#define t4 $5 +#define t5 $6 +#define t6 $7 +#define t7 $8 + +#define s0 $9 /* saved-registers (callee-saved registers) */ +#define s1 $10 +#define s2 $11 +#define s3 $12 +#define s4 $13 +#define s5 $14 +#define s6 $15 +#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ + +#define a0 $16 /* argument registers (caller-saved) */ +#define a1 $17 +#define a2 $18 +#define a3 $19 +#define a4 $20 +#define a5 $21 + +#define t8 $22 /* more temps (caller-saved) */ +#define t9 $23 +#define t10 $24 +#define t11 $25 +#define ra $26 /* return address register */ +#define t12 $27 + +#define pv t12 /* procedure-variable register */ +#define AT $at /* assembler temporary */ +#define gp $29 /* global pointer */ +#define sp $30 /* stack pointer */ +#define zero $31 /* reads as zero, writes are noops */ + +#endif /* _UAPI_ASM_SW64_REGDEF_H */ diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1ce8f6ee64cdf82e04eed3a5a571489040bb97 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_RESOURCE_H +#define _UAPI_ASM_SW64_RESOURCE_H + +/* + * SW-64/Linux-specific ordering of these four resource limit IDs, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 6 /* max number of open files */ +#define RLIMIT_AS 7 /* address space limit */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +#include + +#endif /* _UAPI_ASM_SW64_RESOURCE_H */ diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h new file mode 100644 index 0000000000000000000000000000000000000000..e6cca45250491ca64bfc6a6c473da19f01984bb9 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SETUP_H +#define _UAPI_ASM_SW64_SETUP_H + +#define COMMAND_LINE_SIZE 2048 + +#endif /* _UAPI_ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h new file mode 100644 index 0000000000000000000000000000000000000000..08a0814703830370efae670d59f4262b7da50dbd --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGCONTEXT_H +#define _UAPI_ASM_SW64_SIGCONTEXT_H + +/* + * Signal context structure + * + * The context is saved before a signal handler is invoked, and it is + * restored by sys_sigreturn / sys_rt_sigreturn. + */ +struct sigcontext { + long sc_onstack; + long sc_mask; + long sc_pc; + long sc_ps; + long sc_regs[32]; + long sc_ownedfp; + long sc_fpregs[128]; /* SIMD-FP */ + unsigned long sc_fpcr; + /* TODO: Following are unused, to be removed and synced with libc */ + unsigned long sc_fp_control; + unsigned long sc_reserved1, sc_reserved2; + unsigned long sc_ssize; + char *sc_sbase; + unsigned long sc_traparg_a0; + unsigned long sc_traparg_a1; + unsigned long sc_traparg_a2; + unsigned long sc_fp_trap_pc; + unsigned long sc_fp_trigger_sum; + unsigned long sc_fp_trigger_inst; +}; + + +#endif /* _UAPI_ASM_SW64_SIGCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h new file mode 100644 index 0000000000000000000000000000000000000000..f47fb917c9b2858ec3fbc9fd01517daf9edf1864 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGINFO_H +#define _UAPI_ASM_SW64_SIGINFO_H + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) + +#include + + +#endif /* _UAPI_ASM_SW64_SIGINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h new file mode 100644 index 0000000000000000000000000000000000000000..0d7a935fe37c7827866f467c3e205e8d7b2a890a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGNAL_H +#define _UAPI_ASM_SW64_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + + +/* + * Linux/sw64 different signal numbers that Linux/i386. + */ +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGEMT 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGBUS 10 +#define SIGSEGV 11 +#define SIGSYS 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGURG 16 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGIO 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGINFO 29 +#define SIGUSR1 30 +#define SIGUSR2 31 + +#define SIGPOLL SIGIO +#define SIGPWR SIGINFO +#define SIGIOT SIGABRT + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ + +#define SA_ONSTACK 0x00000001 +#define SA_RESTART 0x00000002 +#define SA_NOCLDSTOP 0x00000004 +#define SA_NODEFER 0x00000008 +#define SA_RESETHAND 0x00000010 +#define SA_NOCLDWAIT 0x00000020 +#define SA_SIGINFO 0x00000040 + +#define SA_ONESHOT SA_RESETHAND +#define SA_NOMASK SA_NODEFER + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int sig, struct siginfo *info, void *ucontext); + } _u; + sigset_t sa_mask; + int sa_flags; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#endif /* _UAPI_ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..1094d11fff5b7c7112935bc0f93353e7490c3dc1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKET_H +#define _UAPI_ASM_SW64_SOCKET_H + +#include +#include + +/* For setsockopt(2) */ +/* + * Note: we only bother about making the SOL_SOCKET options + * same as legacy, as that's all that "normal" programs are + * likely to set. We don't necessarily want to be binary + * compatible with _everything_. + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 +#define SO_REUSEADDR 0x0004 +#define SO_KEEPALIVE 0x0008 +#define SO_DONTROUTE 0x0010 +#define SO_BROADCAST 0x0020 +#define SO_LINGER 0x0080 +#define SO_OOBINLINE 0x0100 +#define SO_REUSEPORT 0x0200 + +#define SO_TYPE 0x1008 +#define SO_ERROR 0x1007 +#define SO_SNDBUF 0x1001 +#define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b +#define SO_RCVLOWAT 0x1010 +#define SO_SNDLOWAT 0x1011 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 +#define SO_ACCEPTCONN 0x1014 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER + +#define SO_PEERNAME 28 + +#define SO_PEERSEC 30 +#define SO_PASSSEC 34 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 19 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 +#define SO_SECURITY_ENCRYPTION_NETWORK 21 + +#define SO_MARK 36 + +#define SO_RXQ_OVFL 40 + +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS +#define SO_PEEK_OFF 42 + +/* Instruct lower device to use last 4-bytes of skb data as FCS */ +#define SO_NOFCS 43 + +#define SO_LOCK_FILTER 44 +#define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 +#define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + +#define SO_ATTACH_REUSEPORT_CBPF 51 +#define SO_ATTACH_REUSEPORT_EBPF 52 + +#define SO_CNX_ADVICE 53 + +#define SCM_TIMESTAMPING_OPT_STATS 54 + +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + +#define SCM_TIMESTAMPING_PKTINFO 58 + +#define SO_PEERGROUPS 59 + +#define SO_ZEROCOPY 60 + +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#define SO_DETACH_REUSEPORT_BPF 68 + +#define SO_PREFER_BUSY_POLL 69 +#define SO_BUSY_POLL_BUDGET 70 + +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + +#define SO_RESERVE_MEM 73 + +#define SO_TXREHASH 74 + +#define SO_RCVMARK 75 + +#define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + +#endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h new file mode 100644 index 0000000000000000000000000000000000000000..88e89dcf8300ea66394003769c351ed91c14cd55 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKIOS_H +#define _UAPI_ASM_SW64_SOCKIOS_H + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */ + +#endif /* _UAPI_ASM_SW64_SOCKIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h new file mode 100644 index 0000000000000000000000000000000000000000..677a75f1cf5bfc05b81034278ebac4b25442f2d7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_STAT_H +#define _UAPI_ASM_SW64_STAT_H + +struct stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +/* The stat64 structure increases the size of dev_t, blkcnt_t, adds + * nanosecond resolution times, and padding for expansion. + */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_rdev; + long st_size; + unsigned long st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + long __unused[3]; +}; + +#endif /* _UAPI_ASM_SW64_STAT_H */ diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h new file mode 100644 index 0000000000000000000000000000000000000000..275661b346ac202afed612cee1f75bc1a0b6209e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SWAB_H +#define _UAPI_ASM_SW64_SWAB_H + +#include +#include +#include + +#ifdef __GNUC__ + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + /* + * Unfortunately, we can't use the 6 instruction sequence + * on sw64 since the latency of the UNPKBW is 3, which is + * pretty hard to hide. Just in case a future implementation + * has a lower latency, here's the sequence (also by Mike Burrows) + * + * UNPKBW a0, v0 v0: 00AA00BB00CC00DD + * SLL v0, 24, a0 a0: BB00CC00DD000000 + * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD + * EXTWL a0, 6, v0 v0: 000000000000BBAA + * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 + * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA + */ + + __u64 t0, t1, t2, t3; + + t0 = __kernel_inshw(x, 7); /* t0 : 0000000000AABBCC */ + t1 = __kernel_inslh(x, 3); /* t1 : 000000CCDD000000 */ + t1 |= t0; /* t1 : 000000CCDDAABBCC */ + t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ + t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ + t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ + t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ + + return t1; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __GNUC__ */ + +#endif /* _UAPI_ASM_SW64_SWAB_H */ diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..667405c3447cd841cda75d7e8cf624166db70d2c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm/sysinfo.h + */ + +#ifndef _UAPI_ASM_SW64_SYSINFO_H +#define _UAPI_ASM_SW64_SYSINFO_H + +#define GSI_IEEE_FP_CONTROL 45 + +#define SSI_IEEE_FP_CONTROL 14 +#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ + +#define UAC_BITMASK 7 +#define UAC_NOPRINT 1 +#define UAC_NOFIX 2 +#define UAC_SIGBUS 4 +#define PR_NOFIX 4 /* do not fix up unaligned accesses */ + +#endif /* _UAPI_ASM_SW64_SYSINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h new file mode 100644 index 0000000000000000000000000000000000000000..a71aaf33c26cc9f046e65e97d694ab0ef6ee9b06 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMBITS_H +#define _UAPI_ASM_SW64_TERMBITS_H + +#include + +typedef unsigned int tcflag_t; + +/* + * termios type and macro definitions. Be careful about adding stuff + * to this file since it's used in GNU libc and there are strict rules + * concerning namespace pollution. + */ + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has matching termios and ktermios */ + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VEOF 0 +#define VEOL 1 +#define VEOL2 2 +#define VERASE 3 +#define VWERASE 4 +#define VKILL 5 +#define VREPRINT 6 +#define VSWTC 7 +#define VINTR 8 +#define VQUIT 9 +#define VSUSP 10 +#define VSTART 12 +#define VSTOP 13 +#define VLNEXT 14 +#define VDISCARD 15 +#define VMIN 16 +#define VTIME 17 + +/* c_iflag bits */ +#define IXON 0x0200 +#define IXOFF 0x0400 +#define IUCLC 0x1000 +#define IMAXBEL 0x2000 +#define IUTF8 0x4000 + +/* c_oflag bits */ +#define ONLCR 0x00002 +#define OLCUC 0x00004 +#define NLDLY 0x00300 +#define NL0 0x00000 +#define NL1 0x00100 +#define NL2 0x00200 +#define NL3 0x00300 +#define TABDLY 0x00c00 +#define TAB0 0x00000 +#define TAB1 0x00400 +#define TAB2 0x00800 +#define TAB3 0x00c00 +#define CRDLY 0x03000 +#define CR0 0x00000 +#define CR1 0x01000 +#define CR2 0x02000 +#define CR3 0x03000 +#define FFDLY 0x04000 +#define FF0 0x00000 +#define FF1 0x04000 +#define BSDLY 0x08000 +#define BS0 0x00000 +#define BS1 0x08000 +#define VTDLY 0x10000 +#define VT0 0x00000 +#define VT1 0x10000 +/* + * Should be equivalent to TAB3, see description of TAB3 in + * POSIX.1-2008, Ch. 11.2.3 "Output Modes" + */ +#define XTABS TAB3 + +/* c_cflag bit meaning */ +#define CBAUD 0x0000001f +#define CBAUDEX 0x00000000 +#define BOTHER 0x0000001f +#define B57600 0x00000010 +#define B115200 0x00000011 +#define B230400 0x00000012 +#define B460800 0x00000013 +#define B500000 0x00000014 +#define B576000 0x00000015 +#define B921600 0x00000016 +#define B1000000 0x00000017 +#define B1152000 0x00000018 +#define B1500000 0x00000019 +#define B2000000 0x0000001a +#define B2500000 0x0000001b +#define B3000000 0x0000001c +#define B3500000 0x0000001d +#define B4000000 0x0000001e +#define CSIZE 0x00000300 +#define CS5 0x00000000 +#define CS6 0x00000100 +#define CS7 0x00000200 +#define CS8 0x00000300 +#define CSTOPB 0x00000400 +#define CREAD 0x00000800 +#define PARENB 0x00001000 +#define PARODD 0x00002000 +#define HUPCL 0x00004000 +#define CLOCAL 0x00008000 +#define CIBAUD 0x001f0000 + +/* c_lflag bits */ +#define ISIG 0x00000080 +#define ICANON 0x00000100 +#define XCASE 0x00004000 +#define ECHO 0x00000008 +#define ECHOE 0x00000002 +#define ECHOK 0x00000004 +#define ECHONL 0x00000010 +#define NOFLSH 0x80000000 +#define TOSTOP 0x00400000 +#define ECHOCTL 0x00000040 +#define ECHOPRT 0x00000020 +#define ECHOKE 0x00000001 +#define FLUSHO 0x00800000 +#define PENDIN 0x20000000 +#define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 + +/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _UAPI_ASM_SW64_TERMBITS_H */ diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h new file mode 100644 index 0000000000000000000000000000000000000000..62f4b40551b241ff1ca7fd9c1c25e65415b976c4 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMIOS_H +#define _UAPI_ASM_SW64_TERMIOS_H + +#include +#include + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with sysV)... + */ +#define _VINTR 0 +#define _VQUIT 1 +#define _VERASE 2 +#define _VKILL 3 +#define _VEOF 4 +#define _VMIN 4 +#define _VEOL 5 +#define _VTIME 5 +#define _VEOL2 6 +#define _VSWTC 7 + + +#endif /* _UAPI_ASM_SW64_TERMIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/ucontext.h b/arch/sw_64/include/uapi/asm/ucontext.h new file mode 100644 index 0000000000000000000000000000000000000000..c5d6e24e3e5fb0d8e8b9d9db8ab4c7e1a53f4c17 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ucontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UCONTEXT_H +#define _UAPI_ASM_SW64_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + old_sigset_t uc_old_sigmask; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _UAPI_ASM_SW64_UCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..be844b2be9d5591b183e6fc54ae2d682a83c89d0 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UNISTD_H +#define _UAPI_ASM_SW64_UNISTD_H + +/* + * These are traditionally the names uses for generic system calls + */ +#define __NR_umount __NR_umount2 + +#include + +#endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/.gitignore b/arch/sw_64/kernel/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..46c9537c5551f777409b6933590304ab07cad4f5 --- /dev/null +++ b/arch/sw_64/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.lds diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..abf27ad19a942ba4444c1ec15cda7779f2c76ca6 --- /dev/null +++ b/arch/sw_64/kernel/Makefile @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +extra-y := vmlinux.lds +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Wno-sign-compare + +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_insn.o = -pg +CFLAGS_REMOVE_printk.o = -pg +endif + +obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \ + irq_sw64.o signal.o setup.o ptrace.o time.o \ + systbls.o dup_print.o chip_setup.o \ + insn.o early_init.o topology.o cacheinfo.o \ + vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ + head.o termios.o + +obj-$(CONFIG_SUBARCH_C3B) += tc.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o +obj-$(CONFIG_AUDIT) += audit.o +obj-$(CONFIG_RELOCATABLE) += relocate.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o unaligned.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_DEBUG_MATCH) += match.o + +ifndef CONFIG_PCI +obj-y += pci-noop.o +endif + +# Core logic support +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o + +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_KPROBES) += kprobes/ +obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..9779d4bdea0d4ca8946bf03aa2e1125b7c7b6a82 --- /dev/null +++ b/arch/sw_64/kernel/acpi.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +#include +#endif + +int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + +int acpi_noirq = 1; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + +static bool param_acpi_on __initdata; +static bool param_acpi_off __initdata; + +int acpi_strict; +u64 arch_acpi_wakeup_start; +u64 acpi_saved_sp_s3; + +#define MAX_LOCAL_APIC 256 + +#define PREFIX "ACPI: " +/* + * The default interrupt routing model is PIC (8259). This gets + * overridden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; +void __iomem *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ + if (!phys || !size) + return NULL; + + return early_ioremap(phys, size); +} +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_iounmap(map, size); +} +/* + * Following __acpi_xx functions should be implemented for sepecific cpu. + */ +int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) +{ + if (irqp != NULL) + *irqp = acpi_register_gsi(NULL, gsi, -1, -1); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + +int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi) +{ + if (gsi) + *gsi = isa_irq; + + return 0; +} + +int (*acpi_suspend_lowlevel)(void); + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +static struct irq_domain *irq_default_domain; +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + u32 irq; + + irq = irq_find_mapping(irq_default_domain, gsi); + + return irq; +} +EXPORT_SYMBOL_GPL(acpi_register_gsi); + +void acpi_unregister_gsi(u32 gsi) +{ + +} +EXPORT_SYMBOL_GPL(acpi_unregister_gsi); + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +u8 acpi_checksum(u8 *table, u32 length) +{ + u8 ret = 0; + + while (length--) { + ret += *table; + table++; + } + return -ret; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* disable both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) + param_acpi_off = true; + else if (strcmp(arg, "on") == 0) /* prefer ACPI over device tree */ + param_acpi_on = true; + else + return -EINVAL; /* Core will printk when we return error. */ + + return 0; +} +early_param("acpi", parse_acpi); + +/* + * __acpi_acquire_global_lock + * will always return -1 indicating owning the lock. + * + * __acpi_release_global_lock will always return 0 indicating + * no acquring request pending. + */ +int __acpi_acquire_global_lock(unsigned int *lock) +{ + return -1; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + return 0; +} + +#ifdef CONFIG_ACPI_NUMA +static int rcid_to_cpu(int physical_id) +{ + int i; + + for (i = 0; i < NR_CPUS; ++i) { + if (__cpu_to_rcid[i] == physical_id) + return i; + } + + /* physical id not found */ + return -1; +} + +/* Callback for Proximity Domain -> CPUID mapping */ +void __init +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) +{ + int pxm, node; + int cpu; // logical core id + + if (srat_disabled()) + return; + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) { + pxm |= (pa->proximity_domain_hi[0] << 8); + pxm |= (pa->proximity_domain_hi[1] << 16); + pxm |= (pa->proximity_domain_hi[2] << 24); + } + + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_err("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", pxm, pa->apic_id, node); + return; + } + + /* Record the mapping from logical core id to node id */ + cpu = rcid_to_cpu(pa->apic_id); + if (cpu < 0) { + pr_err("SRAT: Can not find the logical id for physical Core 0x%02x\n", pa->apic_id); + return; + } + + early_map_cpu_to_node(cpu, node); + + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static inline int save_add_info(void) { return 1; } +#else +static inline int save_add_info(void) { return 0; } +#endif + +#endif + +void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +{ +#ifdef CONFIG_ACPI_NUMA + int nid; + + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpuid_to_node(cpu, nid); + node_set(nid, numa_nodes_parsed); + } +#endif + return 0; +} + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu) +{ + int cpu; + struct acpi_madt_local_apic *processor; + + processor = kzalloc(sizeof(struct acpi_madt_local_apic), GFP_KERNEL); + processor->id = physid; + processor->processor_id = acpi_id; + processor->lapic_flags = ACPI_MADT_ENABLED; + + cpu = set_processor_mask(processor); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; + } + + acpi_map_cpu2node(handle, cpu, physid); + + *pcpu = cpu; + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ +#ifdef CONFIG_ACPI_NUMA + set_cpuid_to_node(cpu, NUMA_NO_NODE); +#endif + set_cpu_present(cpu, false); + num_processors--; + + pr_info("cpu%d hot remove!\n", cpu); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +void __init acpi_boot_table_init(void) +{ + /** + * ACPI is disabled by default. + * ACPI is only enabled when firmware passes ACPI table + * and sets boot parameter "acpi=on". + */ + if (param_acpi_on) + enable_acpi(); + + /* + * If acpi_disabled, bail out + */ + if (!acpi_disabled) { + pr_warn("Currently, ACPI is an experimental feature!\n"); + if (acpi_table_init()) { + pr_err("Failed to init ACPI tables\n"); + disable_acpi(); + } else + pr_info("Successfully parsed ACPI table\n"); + } +} diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c new file mode 100644 index 0000000000000000000000000000000000000000..41310a8a7af12a6f1a4fabb8ec52a396a7ef6403 --- /dev/null +++ b/arch/sw_64/kernel/asm-offsets.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ +#include +#include +#include +#include + +#include +#include + +#include "traps.c" +#include "signal.c" + +void foo(void) +{ + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + BLANK(); + + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); + DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); + DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); + DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_SMP + DEFINE(TASK_CPU, offsetof(struct task_struct, thread_info.cpu)); +#endif + BLANK(); + + OFFSET(PSTATE_REGS, processor_state, regs); + OFFSET(PSTATE_FPREGS, processor_state, fpregs); + OFFSET(PSTATE_FPCR, processor_state, fpcr); + OFFSET(PSTATE_KTP, processor_state, ktp); +#ifdef CONFIG_HIBERNATION + OFFSET(PSTATE_SP, processor_state, sp); +#endif + OFFSET(PBE_ADDR, pbe, address); + OFFSET(PBE_ORIG_ADDR, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + OFFSET(CALLEE_R9, callee_saved_regs, r9); + OFFSET(CALLEE_R10, callee_saved_regs, r10); + OFFSET(CALLEE_R11, callee_saved_regs, r11); + OFFSET(CALLEE_R12, callee_saved_regs, r12); + OFFSET(CALLEE_R13, callee_saved_regs, r13); + OFFSET(CALLEE_R14, callee_saved_regs, r14); + OFFSET(CALLEE_R15, callee_saved_regs, r15); + OFFSET(CALLEE_RA, callee_saved_regs, ra); + OFFSET(CALLEE_F2, callee_saved_fpregs, f2); + OFFSET(CALLEE_F3, callee_saved_fpregs, f3); + OFFSET(CALLEE_F4, callee_saved_fpregs, f4); + OFFSET(CALLEE_F5, callee_saved_fpregs, f5); + OFFSET(CALLEE_F6, callee_saved_fpregs, f6); + OFFSET(CALLEE_F7, callee_saved_fpregs, f7); + OFFSET(CALLEE_F8, callee_saved_fpregs, f8); + OFFSET(CALLEE_F9, callee_saved_fpregs, f9); + BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_REGS_R0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS_R1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS_R2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS_R3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS_R4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS_R5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS_R6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS_R7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS_R8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS_R9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_REGS_R10, offsetof(struct pt_regs, regs[10])); + DEFINE(PT_REGS_R11, offsetof(struct pt_regs, regs[11])); + DEFINE(PT_REGS_R12, offsetof(struct pt_regs, regs[12])); + DEFINE(PT_REGS_R13, offsetof(struct pt_regs, regs[13])); + DEFINE(PT_REGS_R14, offsetof(struct pt_regs, regs[14])); + DEFINE(PT_REGS_R15, offsetof(struct pt_regs, regs[15])); + DEFINE(PT_REGS_R16, offsetof(struct pt_regs, regs[16])); + DEFINE(PT_REGS_R17, offsetof(struct pt_regs, regs[17])); + DEFINE(PT_REGS_R18, offsetof(struct pt_regs, regs[18])); + DEFINE(PT_REGS_R19, offsetof(struct pt_regs, regs[19])); + DEFINE(PT_REGS_R20, offsetof(struct pt_regs, regs[20])); + DEFINE(PT_REGS_R21, offsetof(struct pt_regs, regs[21])); + DEFINE(PT_REGS_R22, offsetof(struct pt_regs, regs[22])); + DEFINE(PT_REGS_R23, offsetof(struct pt_regs, regs[23])); + DEFINE(PT_REGS_R24, offsetof(struct pt_regs, regs[24])); + DEFINE(PT_REGS_R25, offsetof(struct pt_regs, regs[25])); + DEFINE(PT_REGS_R26, offsetof(struct pt_regs, regs[26])); + DEFINE(PT_REGS_R27, offsetof(struct pt_regs, regs[27])); + DEFINE(PT_REGS_R28, offsetof(struct pt_regs, regs[28])); + DEFINE(PT_REGS_GP, offsetof(struct pt_regs, regs[29])); + DEFINE(PT_REGS_SP, offsetof(struct pt_regs, regs[30])); + DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); + DEFINE(PT_REGS_ORIG_R0, offsetof(struct pt_regs, orig_r0)); + DEFINE(PT_REGS_ORIG_R19, offsetof(struct pt_regs, orig_r19)); + DEFINE(PT_REGS_HM_PS, offsetof(struct pt_regs, hm_ps)); + DEFINE(PT_REGS_HM_PC, offsetof(struct pt_regs, hm_pc)); + DEFINE(PT_REGS_HM_GP, offsetof(struct pt_regs, hm_gp)); + DEFINE(PT_REGS_HM_R16, offsetof(struct pt_regs, hm_r16)); + DEFINE(PT_REGS_HM_R17, offsetof(struct pt_regs, hm_r17)); + DEFINE(PT_REGS_HM_R18, offsetof(struct pt_regs, hm_r18)); + BLANK(); + + DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); + DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0)); + DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1)); + DEFINE(KVM_REGS_R2, offsetof(struct kvm_regs, r2)); + DEFINE(KVM_REGS_R3, offsetof(struct kvm_regs, r3)); + DEFINE(KVM_REGS_R4, offsetof(struct kvm_regs, r4)); + DEFINE(KVM_REGS_R5, offsetof(struct kvm_regs, r5)); + DEFINE(KVM_REGS_R6, offsetof(struct kvm_regs, r6)); + DEFINE(KVM_REGS_R7, offsetof(struct kvm_regs, r7)); + DEFINE(KVM_REGS_R8, offsetof(struct kvm_regs, r8)); + DEFINE(KVM_REGS_R9, offsetof(struct kvm_regs, r9)); + DEFINE(KVM_REGS_R10, offsetof(struct kvm_regs, r10)); + DEFINE(KVM_REGS_R11, offsetof(struct kvm_regs, r11)); + DEFINE(KVM_REGS_R12, offsetof(struct kvm_regs, r12)); + DEFINE(KVM_REGS_R13, offsetof(struct kvm_regs, r13)); + DEFINE(KVM_REGS_R14, offsetof(struct kvm_regs, r14)); + DEFINE(KVM_REGS_R15, offsetof(struct kvm_regs, r15)); + DEFINE(KVM_REGS_R19, offsetof(struct kvm_regs, r19)); + DEFINE(KVM_REGS_R20, offsetof(struct kvm_regs, r20)); + DEFINE(KVM_REGS_R21, offsetof(struct kvm_regs, r21)); + DEFINE(KVM_REGS_R22, offsetof(struct kvm_regs, r22)); + DEFINE(KVM_REGS_R23, offsetof(struct kvm_regs, r23)); + DEFINE(KVM_REGS_R24, offsetof(struct kvm_regs, r24)); + DEFINE(KVM_REGS_R25, offsetof(struct kvm_regs, r25)); + DEFINE(KVM_REGS_R26, offsetof(struct kvm_regs, r26)); + DEFINE(KVM_REGS_R27, offsetof(struct kvm_regs, r27)); + DEFINE(KVM_REGS_R28, offsetof(struct kvm_regs, r28)); + DEFINE(KVM_REGS_FPCR, offsetof(struct kvm_regs, fpcr)); + DEFINE(KVM_REGS_F0, offsetof(struct kvm_regs, fp[0 * 4])); + DEFINE(KVM_REGS_F1, offsetof(struct kvm_regs, fp[1 * 4])); + DEFINE(KVM_REGS_F2, offsetof(struct kvm_regs, fp[2 * 4])); + DEFINE(KVM_REGS_F3, offsetof(struct kvm_regs, fp[3 * 4])); + DEFINE(KVM_REGS_F4, offsetof(struct kvm_regs, fp[4 * 4])); + DEFINE(KVM_REGS_F5, offsetof(struct kvm_regs, fp[5 * 4])); + DEFINE(KVM_REGS_F6, offsetof(struct kvm_regs, fp[6 * 4])); + DEFINE(KVM_REGS_F7, offsetof(struct kvm_regs, fp[7 * 4])); + DEFINE(KVM_REGS_F8, offsetof(struct kvm_regs, fp[8 * 4])); + DEFINE(KVM_REGS_F9, offsetof(struct kvm_regs, fp[9 * 4])); + DEFINE(KVM_REGS_F10, offsetof(struct kvm_regs, fp[10 * 4])); + DEFINE(KVM_REGS_F11, offsetof(struct kvm_regs, fp[11 * 4])); + DEFINE(KVM_REGS_F12, offsetof(struct kvm_regs, fp[12 * 4])); + DEFINE(KVM_REGS_F13, offsetof(struct kvm_regs, fp[13 * 4])); + DEFINE(KVM_REGS_F14, offsetof(struct kvm_regs, fp[14 * 4])); + DEFINE(KVM_REGS_F15, offsetof(struct kvm_regs, fp[15 * 4])); + DEFINE(KVM_REGS_F16, offsetof(struct kvm_regs, fp[16 * 4])); + DEFINE(KVM_REGS_F17, offsetof(struct kvm_regs, fp[17 * 4])); + DEFINE(KVM_REGS_F18, offsetof(struct kvm_regs, fp[18 * 4])); + DEFINE(KVM_REGS_F19, offsetof(struct kvm_regs, fp[19 * 4])); + DEFINE(KVM_REGS_F20, offsetof(struct kvm_regs, fp[20 * 4])); + DEFINE(KVM_REGS_F21, offsetof(struct kvm_regs, fp[21 * 4])); + DEFINE(KVM_REGS_F22, offsetof(struct kvm_regs, fp[22 * 4])); + DEFINE(KVM_REGS_F23, offsetof(struct kvm_regs, fp[23 * 4])); + DEFINE(KVM_REGS_F24, offsetof(struct kvm_regs, fp[24 * 4])); + DEFINE(KVM_REGS_F25, offsetof(struct kvm_regs, fp[25 * 4])); + DEFINE(KVM_REGS_F26, offsetof(struct kvm_regs, fp[26 * 4])); + DEFINE(KVM_REGS_F27, offsetof(struct kvm_regs, fp[27 * 4])); + DEFINE(KVM_REGS_F28, offsetof(struct kvm_regs, fp[28 * 4])); + DEFINE(KVM_REGS_F29, offsetof(struct kvm_regs, fp[29 * 4])); + DEFINE(KVM_REGS_F30, offsetof(struct kvm_regs, fp[30 * 4])); + DEFINE(KVM_REGS_PS, offsetof(struct kvm_regs, ps)); + DEFINE(KVM_REGS_PC, offsetof(struct kvm_regs, pc)); + DEFINE(KVM_REGS_GP, offsetof(struct kvm_regs, gp)); + DEFINE(KVM_REGS_R16, offsetof(struct kvm_regs, r16)); + DEFINE(KVM_REGS_R17, offsetof(struct kvm_regs, r17)); + DEFINE(KVM_REGS_R18, offsetof(struct kvm_regs, r18)); + BLANK(); + + DEFINE(VCPU_RET_SIZE, sizeof(struct vcpu_run_ret_stack)); + DEFINE(VCPU_RET_RA, offsetof(struct vcpu_run_ret_stack, ra)); + DEFINE(VCPU_RET_R0, offsetof(struct vcpu_run_ret_stack, r0)); + BLANK(); + + DEFINE(HOST_INT_SIZE, sizeof(struct host_int_args)); + DEFINE(HOST_INT_R18, offsetof(struct host_int_args, r18)); + DEFINE(HOST_INT_R17, offsetof(struct host_int_args, r17)); + DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16)); + BLANK(); + + OFFSET(TASK_THREAD, task_struct, thread); + OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]); + OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]); + OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]); + OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]); + OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]); + OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]); + OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]); + OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]); + OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]); + OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]); + OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]); + OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]); + OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]); + OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]); + OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]); + OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]); + OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]); + OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]); + OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]); + OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]); + OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]); + OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]); + OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]); + OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]); + OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]); + OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]); + OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]); + OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]); + OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]); + OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]); + OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]); + OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr); + BLANK(); + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); + BLANK(); + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + BLANK(); + DEFINE(RT_SIGFRAME_SIZE, sizeof(struct rt_sigframe)); + OFFSET(RT_SIGFRAME_MCTX, rt_sigframe, uc.uc_mcontext); +} diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c new file mode 100644 index 0000000000000000000000000000000000000000..dcf58deee3e2018e0efa6cb355e8802aef04de35 --- /dev/null +++ b/arch/sw_64/kernel/audit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned int dir_class[] = { +#include +~0U +}; + +static unsigned int read_class[] = { +#include +~0U +}; + +static unsigned int write_class[] = { +#include +~0U +}; + +static unsigned int chattr_class[] = { +#include +~0U +}; + +static unsigned int signal_class[] = { +#include +~0U +}; + +int audit_classify_arch(int arch) +{ + return 0; +} + +int audit_classify_syscall(int abi, unsigned int syscall) +{ + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 0; + } +} + +static int __init audit_classes_init(void) +{ + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +device_initcall(audit_classes_init); diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c new file mode 100644 index 0000000000000000000000000000000000000000..e340c53690a9e486269465512e9ddf494afd1fe0 --- /dev/null +++ b/arch/sw_64/kernel/cacheinfo.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 cacheinfo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +#include + +/* Populates leaf and increments to next leaf */ +#define populate_cache(cache, leaf, c_level, c_type, c_id) \ +do { \ + leaf->id = c_id; \ + leaf->attributes = CACHE_ID; \ + leaf->type = c_type; \ + leaf->level = c_level; \ + leaf->coherency_line_size = c->cache.linesz; \ + leaf->number_of_sets = c->cache.sets; \ + leaf->ways_of_associativity = c->cache.ways; \ + leaf->size = c->cache.size; \ + leaf++; \ +} while (0) + +int init_cache_level(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + int levels = 0, leaves = 0; + + /* + * If Dcache is not set, we assume the cache structures + * are not properly initialized. + */ + if (c->dcache.size) + levels += 1; + else + return -ENOENT; + + + leaves += (c->icache.size) ? 2 : 1; + + if (c->scache.size) { + levels++; + leaves++; + } + + if (c->tcache.size) { + levels++; + leaves++; + } + + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cpu_topology *topo = &cpu_topology[cpu]; + + if (c->icache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA, cpu); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST, cpu); + + } else { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->scache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->tcache.size) { + cpumask_copy(&this_leaf->shared_cpu_map, topology_llc_cpumask(cpu)); + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED, topo->package_id); + } + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/sw_64/kernel/chip_setup.c b/arch/sw_64/kernel/chip_setup.c new file mode 100644 index 0000000000000000000000000000000000000000..b8c359db2ef67b1272600987621860cbefbfc47f --- /dev/null +++ b/arch/sw_64/kernel/chip_setup.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include + +struct sw64_chip_ops *sw64_chip; +struct sw64_chip_init_ops *sw64_chip_init; + +static int get_cpu_nums(void) +{ + if (is_guest_or_emul()) + return 1; + + return __get_cpu_nums(); +} + +static unsigned long __init get_node_mem(int nodeid) +{ + + if (is_guest_or_emul()) + return *(unsigned long *)MMSIZE & MMSIZE_MASK; + + return __get_node_mem(nodeid); +} + +static void __init setup_core_map(struct cpumask *cpumask) +{ + int i, j, cpu_num, cpuid, max_cores_per_cpu; + unsigned long coreonline; + + cpu_num = get_cpu_nums(); + cpuid = 0; + for (i = 0; i < cpu_num; i++) { + coreonline = sw64_io_read(i, CORE_ONLINE); + max_cores_per_cpu = MAX_CORES_PER_CPU; + + if (is_guest_or_emul()) + max_cores_per_cpu = 64; + + for (j = 0; j < max_cores_per_cpu; j++) { + if (coreonline & (1UL << j)) { + __cpu_to_rcid[cpuid] = (i << DOMAIN_ID_SHIFT) | (j << CORE_ID_SHIFT); + cpuid++; + } + } + } + + if (is_in_host() && core_is_ht()) { + for (i = 0; i < cpuid; i++) + __cpu_to_rcid[cpuid + i] = __cpu_to_rcid[i] | (1 << THREAD_ID_SHIFT); + + cpuid = cpuid + i; + } + + while (cpuid < NR_CPUS) { + __cpu_to_rcid[cpuid] = -1; + cpuid++; + } +} + +#ifdef CONFIG_PM +static void i2c_srst(void) +{ + sw64_io_write(0, I2C0_SRST_L, 0x0); + sw64_io_write(0, I2C0_SRST_L, 0x1); + + sw64_io_write(0, I2C1_SRST_L, 0x0); + sw64_io_write(0, I2C1_SRST_L, 0x1); + + sw64_io_write(0, I2C2_SRST_L, 0x0); + sw64_io_write(0, I2C2_SRST_L, 0x1); +} + +static void pcie_save(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + piu_save = kzalloc(sizeof(*piu_save), GFP_KERNEL); + + node = hose->node; + index = hose->index; + hose->sysdata = piu_save; + + piu_save->piuconfig0 = read_piu_ior0(node, index, PIUCONFIG0); + piu_save->piuconfig1 = read_piu_ior1(node, index, PIUCONFIG1); + piu_save->epdmabar = read_piu_ior0(node, index, EPDMABAR); + piu_save->msiaddr = read_piu_ior0(node, index, MSIADDR); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + piu_save->msiconfig[i] = read_piu_ior0(node, index, + MSICONFIG0 + (i << 7)); + } + } + + piu_save->iommuexcpt_ctrl = read_piu_ior0(node, index, IOMMUEXCPT_CTRL); + piu_save->dtbaseaddr = read_piu_ior0(node, index, DTBASEADDR); + + piu_save->intaconfig = read_piu_ior0(node, index, INTACONFIG); + piu_save->intbconfig = read_piu_ior0(node, index, INTBCONFIG); + piu_save->intcconfig = read_piu_ior0(node, index, INTCCONFIG); + piu_save->intdconfig = read_piu_ior0(node, index, INTDCONFIG); + piu_save->pmeintconfig = read_piu_ior0(node, index, PMEINTCONFIG); + piu_save->aererrintconfig = read_piu_ior0(node, index, AERERRINTCONFIG); + piu_save->hpintconfig = read_piu_ior0(node, index, HPINTCONFIG); + + } +} + +static void pcie_restore(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + u32 rc_misc_ctrl; + unsigned int value; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + node = hose->node; + index = hose->index; + piu_save = hose->sysdata; + + write_piu_ior0(node, index, PIUCONFIG0, piu_save->piuconfig0); + write_piu_ior1(node, index, PIUCONFIG1, piu_save->piuconfig1); + write_piu_ior0(node, index, EPDMABAR, piu_save->epdmabar); + write_piu_ior0(node, index, MSIADDR, piu_save->msiaddr); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), + piu_save->msiconfig[i]); + } + } + + write_piu_ior0(node, index, IOMMUEXCPT_CTRL, piu_save->iommuexcpt_ctrl); + write_piu_ior0(node, index, DTBASEADDR, piu_save->dtbaseaddr); + + write_piu_ior0(node, index, INTACONFIG, piu_save->intaconfig); + write_piu_ior0(node, index, INTBCONFIG, piu_save->intbconfig); + write_piu_ior0(node, index, INTCCONFIG, piu_save->intcconfig); + write_piu_ior0(node, index, INTDCONFIG, piu_save->intdconfig); + write_piu_ior0(node, index, PMEINTCONFIG, piu_save->pmeintconfig); + write_piu_ior0(node, index, AERERRINTCONFIG, piu_save->aererrintconfig); + write_piu_ior0(node, index, HPINTCONFIG, piu_save->hpintconfig); + + /* Enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* Fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* Set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* Disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + } + +} + +static unsigned long saved_dvc_int, saved_long_time; + +static inline void intpu_save(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + saved_long_time = __io_read_longtime(0); + default: + break; + } +} + +static inline void intpu_restore(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + __io_write_longtime(0, saved_long_time); + __io_write_longtime_start_en(0, 0x1); + break; + default: + pr_info("long time start is disable!"); + break; + } +} + +static inline void spbu_save(void) +{ + saved_dvc_int = sw64_io_read(0, MCU_DVC_INT_EN); +} + +static inline void spbu_restore(void) +{ + i2c_srst(); + sw64_io_write(0, MCU_DVC_INT_EN, saved_dvc_int); +} + +static int io_suspend(void) +{ + spbu_save(); + intpu_save(); + pcie_save(); + + return 0; +} + +static void io_resume(void) +{ + pcie_restore(); + intpu_restore(); + spbu_restore(); +} +#endif /* CONFIG_PM */ + +static struct sw64_chip_init_ops chip_init_ops = { + .early_init = { + .setup_core_map = setup_core_map, + .get_node_mem = get_node_mem, + }, +}; + +static struct sw64_chip_ops chip_ops = { + .get_cpu_num = get_cpu_nums, +}; + +void __init setup_chip_ops(void) +{ + sw64_chip_init = &chip_init_ops; + sw64_chip = &chip_ops; + setup_chip_pci_ops(); +#ifdef CONFIG_PM + io_syscore_ops.suspend = io_suspend; + io_syscore_ops.resume = io_resume; +#endif +} diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 0000000000000000000000000000000000000000..b4ea0ef080d8fae84d00af5dccab12d605aeb201 --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR_RO(available_value); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int i, min_cpu_idx; + cputime64_t min_time = arr[0]; + + for (i = 0; i < size; i++) { + if (arr[i] > 0 && arr[i] < min_time) { + min_time = arr[i]; + min_cpu_idx = i; + } + } + + return min_cpu_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[NR_CPUS]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + pr_info("The target_cpu is %d. After cpu_down, the cpu_num is %d\n", + cur_cpus, num_online_cpus()); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(smp_processor_id()); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", 0); + return; + } + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(policy->governor->name, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(policy->governor->name, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(policy->governor->name, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(policy->governor->name, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = sw64_get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + struct device *dev_root; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + ret = sysfs_create_group(&dev_root->kobj, + &cpuclass_attr_group); + put_device(dev_root); + if (ret) + return ret; + } + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +late_initcall(cpuautoplug_init); diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c new file mode 100644 index 0000000000000000000000000000000000000000..4484673823b8e6065d9efb5f2299a21df67d421a --- /dev/null +++ b/arch/sw_64/kernel/crash_dump.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/crash_dump.c + * + * Copyright (C) 2019 JN + * Author: He Sheng + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c new file mode 100644 index 0000000000000000000000000000000000000000..439ac75feb01f23ed201eb4e4dc3ba23da8ff232 --- /dev/null +++ b/arch/sw_64/kernel/dup_print.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SW64_RRK + +#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) + +static DEFINE_SPINLOCK(printk_lock); + +unsigned long sw64_printk_offset; +#define PRINTK_SIZE 0x100000UL + +int sw64_printk(const char *fmt, va_list args) +{ + char *sw64_printk_buf; + int printed_len = 0; + unsigned long flags; + + spin_lock_irqsave(&printk_lock, flags); + + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + + if (sw64_printk_offset >= (PRINTK_SIZE-1024)) { //printk wrapped + sw64_printk_offset = 0; + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + memset(sw64_printk_buf, 0, PRINTK_SIZE); + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + } else { + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + if (is_in_emul()) { + void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE); + u64 data = ((u64)sw64_printk_buf & 0xffffffffUL) + | ((u64)printed_len << 32); + *(u64 *)addr = data; + } + } + sw64_printk_offset += printed_len; + spin_unlock_irqrestore(&printk_lock, flags); + return printed_len; +} +#endif + +#ifdef CONFIG_SW64_RRU +#include + +static DEFINE_SPINLOCK(printf_lock); +#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) +#define USER_PRINT_BUFF_LEN 0x100000UL +#define USER_MESSAGE_MAX_LEN 0x100000UL +unsigned long sw64_printf_offset; +int sw64_user_printf(const char __user *buf, int len) +{ + static char *user_printf_buf; + unsigned long flags; + + if (current->pid <= 0) + return 0; + + /* + * do not write large (fake) message which may not be from + * STDOUT/STDERR any more as file descriptor could be duplicated + * in a pipe. + */ + if (len > USER_MESSAGE_MAX_LEN) + return 0; + + spin_lock_irqsave(&printf_lock, flags); + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + + if (sw64_printf_offset == 0) + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + + if ((sw64_printf_offset + len) > USER_PRINT_BUFF_LEN) { + sw64_printf_offset = 0; + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + } + copy_from_user(user_printf_buf, buf, len); + sw64_printf_offset += len; + spin_unlock_irqrestore(&printf_lock, flags); + return 0; +} +#endif diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c new file mode 100644 index 0000000000000000000000000000000000000000..2ec7a3e994436034ec5d507858d9588d0f3ed6c0 --- /dev/null +++ b/arch/sw_64/kernel/early_init.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include + +asmlinkage __visible void __init sw64_start_kernel(void) +{ + fixup_hmcall(); + save_ktp(); + start_kernel(); +} diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c new file mode 100644 index 0000000000000000000000000000000000000000..66af1165e89b266d1ea4ec5855da4baa1bfc67ff --- /dev/null +++ b/arch/sw_64/kernel/early_printk.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned long early_serial_base; /* ttyS0 */ + +#define XMTRDY 0x20 + +#define DLAB 0x80 + +#define TXR 0 /* Transmit register (WRITE) */ +#define RXR 0 /* Receive register (READ) */ +#define IER 1 /* Interrupt Enable */ +#define IIR 2 /* Interrupt ID */ +#define FCR 2 /* FIFO control */ +#define LCR 3 /* Line control */ +#define MCR 4 /* Modem control */ +#define LSR 5 /* Line Status */ +#define MSR 6 /* Modem Status */ +#define DLL 0 /* Divisor Latch Low */ +#define DLH 1 /* Divisor latch High */ + +static void mem32_serial_out(unsigned long addr, int offset, int value) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + writel(value, vaddr + offset); +} + +static unsigned int mem32_serial_in(unsigned long addr, int offset) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + return readl(vaddr + offset); +} + +static unsigned int (*serial_in)(unsigned long addr, int offset) = mem32_serial_in; +static void (*serial_out)(unsigned long addr, int offset, int value) = mem32_serial_out; + +static int early_serial_putc(unsigned char ch) +{ + unsigned int timeout = 0xffff; + + while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout) + cpu_relax(); + serial_out(early_serial_base, TXR, ch); + + return timeout ? 0 : -1; +} + +static void early_serial_write(struct console *con, const char *s, unsigned int n) +{ + while (*s && n-- > 0) { + if (*s == '\n') + early_serial_putc('\r'); + early_serial_putc(*s); + s++; + } +} + +static unsigned int uart_get_refclk(void) +{ + return 24000000UL; +} + +static unsigned int uart_calculate_baudrate_divisor(unsigned long baudrate) +{ + unsigned int refclk = uart_get_refclk(); + + return (1 + (2 * refclk) / (baudrate * 16)) / 2; +} + +static __init void early_serial_hw_init(unsigned long baud) +{ + unsigned char c; + unsigned long divisor = uart_calculate_baudrate_divisor(baud); + + serial_out(early_serial_base, LCR, 0x3); /* 8n1 */ + serial_out(early_serial_base, IER, 0); /* no interrupt */ + serial_out(early_serial_base, FCR, 0); /* no fifo */ + serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */ + + c = serial_in(early_serial_base, LCR); + serial_out(early_serial_base, LCR, c | DLAB); + serial_out(early_serial_base, DLL, divisor & 0xff); + serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff); + serial_out(early_serial_base, LCR, c & ~DLAB); +} + +#define DEFAULT_BAUD 115200 + +static __init void early_serial_init(char *s) +{ + unsigned long baud = DEFAULT_BAUD; + int err; + + if (*s == ',') + ++s; + + if (*s) { + unsigned int port; + static const long bases[] __initconst = { 0xfff0803300000000ULL, + 0xfff0903300000000ULL }; + + if (!strncmp(s, "ttyS", 4)) + s += 4; + err = kstrtouint(s, 10, &port); + if (err || port > 1) + port = 0; + early_serial_base = bases[port]; + s += strcspn(s, ","); + if (*s == ',') + s++; + } + + if (*s) { + err = kstrtoul(s, 0, &baud); + if (err || baud == 0) + baud = DEFAULT_BAUD; + } + + /* These will always be IO based ports */ + serial_in = mem32_serial_in; + serial_out = mem32_serial_out; + + /* Set up the HW */ + early_serial_hw_init(baud); +} + +static struct console early_serial_console = { + .name = "early", + .write = early_serial_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static void early_console_register(struct console *con, int keep_early) +{ + if (con->index != -1) { + pr_crit("ERROR: earlyprintk= %s already used\n", + con->name); + return; + } + early_console = con; + + if (keep_early) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + + register_console(early_console); +} + +static int __init setup_early_printk(char *buf) +{ + int keep; + + if (!buf) + return 0; + + if (early_console) + return 0; + + keep = (strstr(buf, "keep") != NULL); + + if (!strncmp(buf, "serial", 6)) { + buf += 6; + early_serial_init(buf); + early_console_register(&early_serial_console, keep); + if (!strncmp(buf, ",ttyS", 5)) + buf += 5; + } + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/sw_64/kernel/entry-ftrace.S b/arch/sw_64/kernel/entry-ftrace.S new file mode 100644 index 0000000000000000000000000000000000000000..73e8e043fc9d14fbbaa50bb164fcc4326329001b --- /dev/null +++ b/arch/sw_64/kernel/entry-ftrace.S @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/entry-ftrace.S + * + * Author: linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include + + .text + .set noat + .align 4 + +#define FTRACE_SP_OFF 0x50 + .macro mcount_enter + subl $sp, FTRACE_SP_OFF, $sp + stl $16, 0($sp) + stl $17, 0x8($sp) + stl $18, 0x10($sp) + stl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + stl $9, 0x20($sp) +#endif + stl $28, 0x28($sp) + stl $29, 0x30($sp) + stl $19, 0x38($sp) + stl $20, 0x40($sp) + stl $21, 0x48($sp) + .endm + + .macro mcount_end + ldl $16, 0($sp) + ldl $17, 0x8($sp) + ldl $18, 0x10($sp) + ldl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $9, 0x20($sp) +#endif + ldl $28, 0x28($sp) + ldl $29, 0x30($sp) + ldl $19, 0x38($sp) + ldl $20, 0x40($sp) + ldl $21, 0x48($sp) + addl $sp, FTRACE_SP_OFF, $sp + .endm + + .macro RESTORE_GRAPH_ARGS + ldi $16, 0x18($sp) /* &ra */ + bis $31, $9, $17 /* pc */ + #ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 /* fp */ + #endif + .endm + + .macro SAVE_PT_REGS + ldi $sp, -PT_REGS_SIZE($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + stl $28, PT_REGS_R28($sp) + stl $29, PT_REGS_GP($sp) + ldi $0, PT_REGS_SIZE($sp) + stl $0, PT_REGS_SP($sp) + .endm + + .macro RESTORE_PT_REGS + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $16, PT_REGS_R16($sp) + ldl $17, PT_REGS_R17($sp) + ldl $18, PT_REGS_R18($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldl $29, PT_REGS_GP($sp) + ldi $sp, PT_REGS_SIZE($sp) + .endm + + .macro RESTORE_GRAPH_REG_ARGS + ldi $16, PT_REGS_R26($sp) + bis $31, $9, $17 +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 +#endif + .endm + + /* save return value regs*/ + .macro save_return_regs + subl $sp, 0x8, $sp + stl $0, 0x0($sp) + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldl $0, 0x0($sp) + addl $sp, 0x8, $sp + .endm + + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from ftrace_caller() or ftrace_regs_caller() when function_graph + * tracer is selected. + * This function prepare_ftrace_return() fakes ra's value on the call + * stack in order to intercept instrumented function's return path and + * run return_to_handler() later on its exit. + */ + +ENTRY(ftrace_graph_caller) + ldgp $29, 0($27) + ldi $sp, -16($sp) + stl $26, 0($sp) + stl $15, 8($sp) + bis $31, $sp, $15 + + ldi $27, prepare_ftrace_return +ftrace_graph_call: + .global ftrace_graph_call + /* + * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite + * the nop below. + */ + nop /* nop, or call prepare_ftrace_return() */ + + ldl $26, 0($sp) + ldl $15, 8($sp) + ldi $sp, 16($sp) + ret $31, ($26), 1 +ENDPROC(ftrace_graph_caller) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller() + * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. + * + * It is run by "ret" instruction which does not modify $27, so it + * has to recaculate $27 before ldgp. + */ +ENTRY(return_to_handler) + br $27, 1f +1: ldgp $29, 0($27) + save_return_regs + bis $31, $15, $16 /* parent's fp */ + ldi $27, ftrace_return_to_handler + call $26, ($27) + bis $31, $0, $26 + restore_return_regs + ret $31, ($26), 1 +END(return_to_handler) + +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + .global _mcount + .ent _mcount +_mcount: + ret $31, ($28), 1 + .end _mcount + + + .global ftrace_caller + .ent ftrace_caller +ftrace_caller: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldl $18, function_trace_op + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * the graph tracer (specifically, prepare_ftrace_return) needs these + * arguments but for now the function tracer occupies the regs, so we + * save them in callee-saved regs to recover later. + */ + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_call +ftrace_call: /* tracer(pc, ra); */ + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_ARGS + call ftrace_graph_caller +#endif + mcount_end + ret $31, ($28), 1 + .end ftrace_caller +#else /* !CONFIG_DYNAMIC_FTRACE */ + + .global _mcount + .ent _mcount +_mcount: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + ldl $27, ftrace_trace_function // if (ftrace_trace_function + ldi $5, ftrace_stub // != ftrace_stub) + cmpeq $27, $5, $6 // + bne $6, skip_ftrace + + subl $28, MCOUNT_INSN_SIZE, $16 // function's pc +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + bis $26, $31, $17 // function's ra (parent's pc) + call $26, ($27) // (*ftrace_trace_function)(pc, ra); + +skip_ftrace: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $4, ftrace_graph_return // if ((ftrace_graph_return + cmpeq $4, $5, $6 // != ftrace_stub) + beq $6, 2f + ldl $4, ftrace_graph_entry // || (ftrace_graph_entry + ldi $5, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) + cmpeq $4, $5, $6 + bne $6, 3f +2: RESTORE_GRAPH_ARGS + call ftrace_graph_caller // ftrace_graph_caller(); +#endif +3: mcount_end + ret $31, ($28), 1 + .end _mcount + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + .global ftrace_regs_caller + .ent ftrace_regs_caller +ftrace_regs_caller: + SAVE_PT_REGS + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldi $4, function_trace_op + ldl $18, 0($4) + mov $sp, $19 + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_regs_call +ftrace_regs_call: + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_REG_ARGS + call ftrace_graph_caller +#endif + RESTORE_PT_REGS + ret $31, ($28), 1 + .end ftrace_regs_caller +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + + .global ftrace_stub + .ent ftrace_stub +ftrace_stub: + ret $31, ($26), 1 + .end ftrace_stub diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S new file mode 100644 index 0000000000000000000000000000000000000000..59c2ff4eb91504efc802dd8994854947aea6315f --- /dev/null +++ b/arch/sw_64/kernel/entry.S @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel entry-points. + */ + +#include +#include +#include +#include +#include +#include + + .text + .set noat +/* + * This defines the normal kernel pt-regs layout. + * + * regs 9-15 preserved by C code, saving to pt_regs will make + * them easier to be accessed in an unified way. + * regs 16-18 saved by HMcode + * regs 29-30 saved and set up by HMcode + */ + + .macro SAVE_ALL + ldi $sp, -PT_REGS_HM_PS($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $28, PT_REGS_R28($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + ldl $1, PT_REGS_HM_R16($sp) + ldl $2, PT_REGS_HM_R17($sp) + ldl $3, PT_REGS_HM_R18($sp) + ldl $4, PT_REGS_HM_GP($sp) + ldl $5, PT_REGS_HM_PC($sp) + ldl $6, PT_REGS_HM_PS($sp) + stl $1, PT_REGS_R16($sp) + stl $2, PT_REGS_R17($sp) + stl $3, PT_REGS_R18($sp) + stl $4, PT_REGS_GP($sp) + stl $5, PT_REGS_PC($sp) + stl $6, PT_REGS_PS($sp) + and $6, 0x8, $7 + beq $7, 1f + sys_call HMC_rdusp + br 2f +1: ldi $0, PT_REGS_SIZE($sp) +2: stl $0, PT_REGS_SP($sp) + ldi $1, NO_SYSCALL + stl $1, PT_REGS_ORIG_R0($sp) + sys_call HMC_rdktp + .endm + + .macro RESTORE_ALL + ldl $16, PT_REGS_SP($sp) + /* skip wrusp if returning to kernel */ + blt $16, 1f + sys_call HMC_wrusp +1: ldl $1, PT_REGS_R16($sp) + ldl $2, PT_REGS_R17($sp) + ldl $3, PT_REGS_R18($sp) + ldl $4, PT_REGS_GP($sp) + ldl $5, PT_REGS_PC($sp) + ldl $6, PT_REGS_PS($sp) + stl $1, PT_REGS_HM_R16($sp) + stl $2, PT_REGS_HM_R17($sp) + stl $3, PT_REGS_HM_R18($sp) + stl $4, PT_REGS_HM_GP($sp) + stl $5, PT_REGS_HM_PC($sp) + stl $6, PT_REGS_HM_PS($sp) + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldi $sp, PT_REGS_HM_PS($sp) + .endm + +/* + * Non-syscall kernel entry points. + */ + + .align 4 + .globl entInt + .ent entInt +entInt: + SAVE_ALL + mov $sp, $19 + call $26, do_entInt + br ret_from_sys_call + .end entInt + + .align 4 + .globl entArith + .ent entArith +entArith: + SAVE_ALL + mov $sp, $18 + call $26, do_entArith + br ret_from_sys_call + .end entArith + + .align 4 + .globl entMM + .ent entMM +entMM: + SAVE_ALL + mov $sp, $19 + call $26, do_page_fault + br ret_from_sys_call + .end entMM + + .align 4 + .globl entIF + .ent entIF +entIF: + SAVE_ALL + mov $sp, $18 + call $26, do_entIF + br ret_from_sys_call + .end entIF + +/* + * Handle unalignment exception. + * We don't handle the "gp" register correctly, but if we fault on a + * gp-register unaligned load/store, something is _very_ wrong in the + * kernel anyway. + */ + .align 4 + .globl entUna + .ent entUna +entUna: + SAVE_ALL + mov $sp, $19 + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 /* user mode ? */ + beq $0, 1f + call $26, do_entUnaUser /* return to ret_from_syscall */ + br ret_from_sys_call +1: ldl $9, PT_REGS_GP($sp) + call $26, do_entUna + stl $9, PT_REGS_GP($sp) + RESTORE_ALL + sys_call HMC_rti + .end entUna + +/* + * The system call entry point is special. Most importantly, it looks + * like a function call to userspace as far as clobbered registers. We + * do preserve the argument registers (for syscall restarts) and $26 + * (for leaf syscall functions). + * + * So much for theory. We don't take advantage of this yet. + * + * Note that a0-a2 are not saved by HMcode as with the other entry points. + */ + + .align 4 + .globl entSys + .ent entSys +entSys: + SAVE_ALL + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + mov $sp, $16 + call $26, do_entSys + br ret_from_sys_call + .end entSys + + .align 4 + .globl ret_from_sys_call + .ent ret_from_sys_call +ret_from_sys_call: +#ifdef CONFIG_SUBARCH_C3B + fillcs 0($sp) /* prefetch */ + fillcs 128($sp) /* prefetch */ +#endif + br $27, 1f +1: ldgp $29, 0($27) + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + ldi $16, 7 + sys_call HMC_swpipl + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 + beq $0, restore_all +ret_to_user: + ldw $17, TI_FLAGS($8) + and $17, _TIF_WORK_MASK, $2 + beq $2, restore_all + mov $sp, $16 + call $26, do_notify_resume +restore_all: + RESTORE_ALL + sys_call HMC_rti + .end ret_from_sys_call + +/* + * Integer register context switch + * The callee-saved registers must be saved and restored. + * + * a0: previous task_struct (must be preserved across the switch) + * a1: next task_struct + * + * The value of a0 must be preserved by this function, as that's how + * arguments are passed to schedule_tail. + */ + .align 4 + .globl __switch_to + .ent __switch_to +__switch_to: + .prologue 0 + /* Save context into prev->thread */ + stl $26, TASK_THREAD_RA($16) + stl $30, TASK_THREAD_SP($16) + stl $9, TASK_THREAD_S0($16) + stl $10, TASK_THREAD_S1($16) + stl $11, TASK_THREAD_S2($16) + stl $12, TASK_THREAD_S3($16) + stl $13, TASK_THREAD_S4($16) + stl $14, TASK_THREAD_S5($16) + stl $15, TASK_THREAD_S6($16) + /* Restore context from next->thread */ + ldl $26, TASK_THREAD_RA($17) + ldl $30, TASK_THREAD_SP($17) + ldl $9, TASK_THREAD_S0($17) + ldl $10, TASK_THREAD_S1($17) + ldl $11, TASK_THREAD_S2($17) + ldl $12, TASK_THREAD_S3($17) + ldl $13, TASK_THREAD_S4($17) + ldl $14, TASK_THREAD_S5($17) + ldl $15, TASK_THREAD_S6($17) + mov $17, $8 + sys_call HMC_wrktp + mov $16, $0 + ret + .end __switch_to + +/* + * New processes begin life here. + */ + + .globl ret_from_fork + .align 4 + .ent ret_from_fork +ret_from_fork: + call $26, schedule_tail + br ret_from_sys_call + .end ret_from_fork + +/* + * ... and new kernel threads - here + */ + .align 4 + .globl ret_from_kernel_thread + .ent ret_from_kernel_thread +ret_from_kernel_thread: + call $26, schedule_tail + mov $9, $27 + mov $10, $16 + call $26, ($9) + br ret_to_user + .end ret_from_kernel_thread diff --git a/arch/sw_64/kernel/fpu.S b/arch/sw_64/kernel/fpu.S new file mode 100644 index 0000000000000000000000000000000000000000..ddc988681fdd01986c6d2ed068f6b37c75e811d3 --- /dev/null +++ b/arch/sw_64/kernel/fpu.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text + .set noat +ENTRY(__fpstate_save) + /* a0: prev task */ +#ifdef CONFIG_SUBARCH_C4 + csrr $1, CSR_WR_FREGS + beq $1, out +#endif + vstd $f0, TASK_THREAD_F0(a0) + vstd $f1, TASK_THREAD_F1(a0) + vstd $f2, TASK_THREAD_F2(a0) + vstd $f3, TASK_THREAD_F3(a0) + vstd $f4, TASK_THREAD_F4(a0) + vstd $f5, TASK_THREAD_F5(a0) + vstd $f6, TASK_THREAD_F6(a0) + vstd $f7, TASK_THREAD_F7(a0) + vstd $f8, TASK_THREAD_F8(a0) + vstd $f9, TASK_THREAD_F9(a0) + vstd $f10, TASK_THREAD_F10(a0) + vstd $f11, TASK_THREAD_F11(a0) + vstd $f12, TASK_THREAD_F12(a0) + vstd $f13, TASK_THREAD_F13(a0) + vstd $f14, TASK_THREAD_F14(a0) + vstd $f15, TASK_THREAD_F15(a0) + vstd $f16, TASK_THREAD_F16(a0) + vstd $f17, TASK_THREAD_F17(a0) + vstd $f18, TASK_THREAD_F18(a0) + vstd $f19, TASK_THREAD_F19(a0) + vstd $f20, TASK_THREAD_F20(a0) + vstd $f21, TASK_THREAD_F21(a0) + vstd $f22, TASK_THREAD_F22(a0) + vstd $f23, TASK_THREAD_F23(a0) + vstd $f24, TASK_THREAD_F24(a0) + vstd $f25, TASK_THREAD_F25(a0) + vstd $f26, TASK_THREAD_F26(a0) + vstd $f27, TASK_THREAD_F27(a0) + rfpcr $f0 + vstd $f28, TASK_THREAD_F28(a0) + vstd $f29, TASK_THREAD_F29(a0) + vstd $f30, TASK_THREAD_F30(a0) + fstd $f0, TASK_THREAD_FPCR(a0) + vldd $f0, TASK_THREAD_F0(a0) +out: + ret +END(__fpstate_save) + +ENTRY(__fpstate_restore) + /* a0: next task */ + fldd $f0, TASK_THREAD_FPCR(a0) + wfpcr $f0 + fimovd $f0, t1 + and t1, 0x3, t1 + beq t1, $setfpec_0 + subl t1, 0x1, t1 + beq t1, $setfpec_1 + subl t1, 0x1, t1 + beq t1, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f0, TASK_THREAD_F0(a0) + vldd $f1, TASK_THREAD_F1(a0) + vldd $f2, TASK_THREAD_F2(a0) + vldd $f3, TASK_THREAD_F3(a0) + vldd $f4, TASK_THREAD_F4(a0) + vldd $f5, TASK_THREAD_F5(a0) + vldd $f6, TASK_THREAD_F6(a0) + vldd $f7, TASK_THREAD_F7(a0) + vldd $f8, TASK_THREAD_F8(a0) + vldd $f9, TASK_THREAD_F9(a0) + vldd $f10, TASK_THREAD_F10(a0) + vldd $f11, TASK_THREAD_F11(a0) + vldd $f12, TASK_THREAD_F12(a0) + vldd $f13, TASK_THREAD_F13(a0) + vldd $f14, TASK_THREAD_F14(a0) + vldd $f15, TASK_THREAD_F15(a0) + vldd $f16, TASK_THREAD_F16(a0) + vldd $f17, TASK_THREAD_F17(a0) + vldd $f18, TASK_THREAD_F18(a0) + vldd $f19, TASK_THREAD_F19(a0) + vldd $f20, TASK_THREAD_F20(a0) + vldd $f21, TASK_THREAD_F21(a0) + vldd $f22, TASK_THREAD_F22(a0) + vldd $f23, TASK_THREAD_F23(a0) + vldd $f24, TASK_THREAD_F24(a0) + vldd $f25, TASK_THREAD_F25(a0) + vldd $f26, TASK_THREAD_F26(a0) + vldd $f27, TASK_THREAD_F27(a0) + vldd $f28, TASK_THREAD_F28(a0) + vldd $f29, TASK_THREAD_F29(a0) + vldd $f30, TASK_THREAD_F30(a0) +#ifdef CONFIG_SUBARCH_C4 + csrw $31, CSR_WR_FREGS +#endif + ret +END(__fpstate_restore) diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c new file mode 100644 index 0000000000000000000000000000000000000000..fb25ffe3dbdaf4f26bf4389e63d37fd1aaaa754b --- /dev/null +++ b/arch/sw_64/kernel/ftrace.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2019 os kernel team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define TI_FTRACE_ADDR (offsetof(struct thread_info, dyn_ftrace_addr)) +#define TI_FTRACE_REGS_ADDR \ + (offsetof(struct thread_info, dyn_ftrace_regs_addr)) + +unsigned long current_tracer = (unsigned long)ftrace_stub; + +/* + * Replace a single instruction, which may be a branch or NOP. + */ +static int ftrace_modify_code(unsigned long pc, u32 new) +{ + if (sw64_insn_write((void *)pc, new)) + return -EPERM; + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + int ret; + + current_tracer = (unsigned long)func; + pc = (unsigned long)&ftrace_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + } + + return ret; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int insn[3]; + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned long offset; + + if (addr == FTRACE_ADDR) + offset = TI_FTRACE_ADDR; + else + offset = TI_FTRACE_REGS_ADDR; + + insn[0] = SW64_NOP; + /* ldl r28,(ftrace_addr_offset)(r8) */ + insn[1] = (0x23U << 26) | (28U << 21) | (8U << 16) | offset; + insn[2] = SW64_CALL(R28, R28, 0); + + /* replace the 3 mcount instructions at once */ + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned int insn[3] = {SW64_NOP, SW64_NOP, SW64_NOP}; + + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); +} + +int __init ftrace_dyn_arch_init(void) +{ + struct thread_info *ti = task_thread_info(&init_task); + + ti->dyn_ftrace_addr = FTRACE_ADDR; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ti->dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; +#endif + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return 0; +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + * + * Note that @frame_pointer is used only for sanity check later. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; +} + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 new = SW64_NOP; + + if (enable) + new = SW64_CALL(R26, R27, 0); + return ftrace_modify_code(pc, new); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sw_64/kernel/head.S b/arch/sw_64/kernel/head.S new file mode 100644 index 0000000000000000000000000000000000000000..fd0fbfbcf5b6418b561d3d56265d567d36ed437b --- /dev/null +++ b/arch/sw_64/kernel/head.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * initial boot stuff.. At this point, the bootloader has already + * switched into HMcode, and loaded us at the correct address + * (START_ADDR). So there isn't much left for us to do: just set up + * the kernel global pointer and jump to the kernel entry-point. + */ + +#include +#include +#include +#include + +__HEAD + .globl _stext + .set noreorder + .globl __start + .ent __start +_stext: +__start: + .prologue 0 + br $27, 1f +1: ldgp $29, 0($27) + /* We need to get current_task_info loaded up... */ + ldi $8, init_task + ldl $30, TASK_STACK($8) + /* ... and find our stack ... */ + ldi $30, ASM_THREAD_SIZE($30) + + /* ... and then we can clear bss data. */ + ldi $16, __bss_start + ldi $18, __bss_stop + subl $18, $16, $18 + mov $31, $17 + call $26, __constant_c_memset +#ifdef CONFIG_RELOCATABLE + ldi $30, -8($30) + stl $29, 0($30) + /* Copy kernel and apply the relocations */ + call $26, relocate_kernel + ldl $29, 0($30) + addl $29, $0, $29 + addl $8, $0, $8 + ldi $30, 8($30) + /* Repoint the sp into the new kernel image */ + addl $30, $0, $30 +#endif + /* ... and then we can start the kernel. */ + call $26, sw64_start_kernel + sys_call HMC_halt + .end __start + +#ifdef CONFIG_SMP + .align 3 + .globl __smp_callin + .ent __smp_callin + /* On entry here the PCB of the idle task for this processor + * has been loaded. We've arranged for the tilde_pcb[x] for + * this process to contain the PCBB of the target idle task. + */ +__smp_callin: + .prologue 1 + br $27, 2f # we copy this from above "br $27 1f" +2: ldgp $29, 0($27) # First order of business, load the GP. + + bis $31, $31, $16 # invalidate all TLB with current VPN + sys_call HMC_tbi + +#if defined(CONFIG_SUBARCH_C3B) + sys_call HMC_whami # Get hard cid + ldi $1, __cpu_to_rcid + ldi $2, 0($31) + ldi $4, CONFIG_NR_CPUS +3: ldw $3, 0($1) + cmpeq $3, $0, $3 + bne $3, 4f + addl $1, 4, $1 + addl $2, 1, $2 + cmpeq $2, $4, $5 + bne $5, 5f + br $31, 3b +4: ldi $0, 0($2) +#else + rcid $0 +#endif + + ldi $2, idle_task_pointer + s8addl $0, $2, $2 + ldl $8, 0($2) # Get ksp of idle thread + sys_call HMC_wrktp + + ldl $30, TASK_STACK($8) + ldi $30, ASM_THREAD_SIZE($30) + + call $26, smp_callin +5: + sys_call HMC_halt + .end __smp_callin +#endif /* CONFIG_SMP */ + # + # It is handy, on occasion, to make halt actually just loop. + # Putting it here means we dont have to recompile the whole + # kernel. + # + + .align 3 + .globl halt + .ent halt +halt: + .prologue 0 + sys_call HMC_halt + .end halt diff --git a/arch/sw_64/kernel/hibernate.c b/arch/sw_64/kernel/hibernate.c new file mode 100644 index 0000000000000000000000000000000000000000..644ea85043136066c1129b059735d3feb7dc9f71 --- /dev/null +++ b/arch/sw_64/kernel/hibernate.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +struct processor_state hibernate_state; +/* Defined in hibernate_asm.S */ +extern int restore_image(void); + +void save_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + vcb->ksp = rdksp(); + vcb->usp = rdusp(); + vcb->soft_tid = rtid(); + vcb->ptbr = rdptbr(); +} + +void restore_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + wrksp(vcb->ksp); + wrusp(vcb->usp); + wrtp(vcb->soft_tid); + wrptbr(vcb->ptbr); + sflush(); + tbiv(); +} + +int swsusp_arch_resume(void) +{ + restore_image(); + return 0; +} +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +struct restore_data_record { + unsigned long magic; +}; + +#define RESTORE_MAGIC 0x0123456789ABCDEFUL + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->magic = RESTORE_MAGIC; + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; +} diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..ff997cd76c5aef4bb9fa2eaaced2f57c21a0c631 --- /dev/null +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(swsusp_arch_suspend) + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + + stl $8, PSTATE_KTP($16) + stl sp, PSTATE_SP($16) + call swsusp_save + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + ldl $26, CALLEE_RA($1) + + /* save current_thread_info()->pcbb */ + ret +END(swsusp_arch_suspend) + +ENTRY(restore_image) + /* prepare to copy image data to their original locations */ + ldi t0, restore_pblist + ldl t0, 0(t0) +$loop: + beq t0, $done + + /* get addresses from the pbe and copy the page */ + ldl t1, PBE_ADDR(t0) /* source */ + ldl t2, PBE_ORIG_ADDR(t0) /* destination */ + ldi t3, PAGE_SIZE + addl t1, t3, t3 +$cpyloop: + ldl t8, 0(t1) + stl t8, 0(t2) + addl t1, 8, t1 + addl t2, 8, t2 + cmpeq t1, t3, t4 + beq t4, $cpyloop + + /* progress to the next pbe */ + ldl t0, PBE_NEXT(t0) + bne t0, $loop +$done: + + /* tell the hibernation core that we've just restored the memory */ + ldi $0, in_suspend + stl $31, 0($0) + + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($16) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $hibernate_setfpec_0 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_1 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_2 + setfpec3 + br $hibernate_setfpec_over +$hibernate_setfpec_0: + setfpec0 + br $hibernate_setfpec_over +$hibernate_setfpec_1: + setfpec1 + br $hibernate_setfpec_over +$hibernate_setfpec_2: + setfpec2 +$hibernate_setfpec_over: + ldi $1, PSTATE_FPREGS($16) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + + ldl sp, PSTATE_SP($16) + ldl $8, PSTATE_KTP($16) + sys_call HMC_wrktp + + ldi $0, 0($31) + + ret +END(restore_image) diff --git a/arch/sw_64/kernel/hmcall.c b/arch/sw_64/kernel/hmcall.c new file mode 100644 index 0000000000000000000000000000000000000000..d2054a930bd72f648c363bee4282ddddd0f36572 --- /dev/null +++ b/arch/sw_64/kernel/hmcall.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/hmcall.c + * + * Copyright (C) 2022 WXIAT + * Author: He Sheng + */ + +#include +#include + +#define A0(func) (((HMC_##func & 0xFF) >> 6) & 0x1) +#define A1(func) ((((HMC_##func & 0xFF)>>6) & 0x2) >> 1) +#define A2(func) ((HMC_##func & 0x3F) << 7) + +#define T(func) ((A0(func) ^ A1(func)) & 0x1) +#define B0(func) ((T(func) | A0(func)) << 13) +#define B1(func) (((~T(func) & 1) | A1(func)) << 14) + +#define PRI_BASE 0x10000UL + +#define HMCALL_ENTRY(func) (PRI_BASE | B1(func) | B0(func) | A2(func)) + + +static inline void fixup_rdtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdtp)); + + entry[0] = 0x181ffec7; /* pri_rcsr $0, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrtp)); + + entry[0] = 0x1a1fffc7; /* pri_wcsr $16, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_tbiasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(tbisasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x1a3fff46; /* pri_wcsr r17, CSR__DTB_IS */ + entry[8] = 0x18ffff47; /* pri_wcsr p7, CSR__DTB_PCR */ + entry[9] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[10] = 0x189ffe22; /* pri_rcsr p4, CSR__UPCR_UPN */ + entry[11] = 0x18dfff22; /* pri_wcsr p6, CSR__UPCR_UPN */ + entry[12] = 0x1a3fff06; /* pri_wcsr r17, CSR__ITB_IS */ + entry[13] = 0x1bffff15; /* pri_wcsr r31, CSR__IC_FLUSH */ + entry[14] = 0x189fff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[15] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_wrasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[8] = 0x18dfff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[9] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_rdktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdktp)); + + entry[0] = 0x95161000; /* pri_ldl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrktp)); + + entry[0] = 0xb5161000; /* pri_stl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_rdusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdusp)); + + entry[0] = 0x94161018; /* pri_ldl/p $0, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrusp)); + + entry[0] = 0xb6161018; /* pri_stl/p $16, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +void __init fixup_hmcall(void) +{ +#if defined(CONFIG_SUBARCH_C3B) + fixup_rdtp(); + fixup_wrtp(); + fixup_tbiasid(); + fixup_wrasid(); + fixup_rdktp(); + fixup_wrktp(); + fixup_rdusp(); + fixup_wrusp(); + imemb(); +#endif +} + +#undef A0 +#undef A1 +#undef A2 +#undef T +#undef B0 +#undef B1 diff --git a/arch/sw_64/kernel/idle.c b/arch/sw_64/kernel/idle.c new file mode 100644 index 0000000000000000000000000000000000000000..d26bdc405b53a946b8974ca821239490b5b9b848 --- /dev/null +++ b/arch/sw_64/kernel/idle.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 idle loop support. + * + */ +#include +#include +#include +#include +#include + +void arch_cpu_idle(void) +{ + local_irq_enable(); + cpu_relax(); + + if (is_in_guest()) { + if (!need_resched()) + hcall(HCALL_HALT, 0, 0, 0); + } else { + asm( + ".globl __idle_start\n" + "__idle_start = .\n" + "ldw $1, %0($8)\n" + "srl $1, %1, $1\n" + "blbs $1, $need_resched\n" + "halt\n" + ".globl __idle_end\n" + "__idle_end = .\n" + "$need_resched:" + :: "i"(TI_FLAGS), "i"(TIF_NEED_RESCHED) + : "$1"); + } + local_irq_disable(); +} diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c new file mode 100644 index 0000000000000000000000000000000000000000..281578e1bfc03b708be124e0e3d28644d811b512 --- /dev/null +++ b/arch/sw_64/kernel/insn.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +//static DEFINE_RAW_SPINLOCK(patch_lock); + +int __kprobes sw64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, SW64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __sw64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +static int __kprobes __sw64_insn_double_write(void *addr, __le64 insn) +{ + void *waddr = addr; + //unsigned long flags = 0; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, 2 * SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes sw64_insn_write(void *addr, u32 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes sw64_insn_double_write(void *addr, u64 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_double_write(addr, cpu_to_le64(insn)); +} +unsigned int __kprobes sw64_insn_nop(void) +{ + return SW64_BIS(R31, R31, R31); +} + +unsigned int __kprobes sw64_insn_call(unsigned int ra, unsigned int rb) +{ + return SW64_CALL(ra, rb, 0); +} + +unsigned int __kprobes sw64_insn_sys_call(unsigned int num) +{ + return SW64_SYS_CALL(num); +} + +/* 'pc' is the address of br instruction, not the +4 PC. 'new_pc' is the target address. */ +unsigned int __kprobes sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc) +{ + int offset = new_pc - pc; + unsigned int disp, minus = 0x1fffff; + + if (!(offset <= BR_MAX_DISP && offset >= -BR_MAX_DISP)) + return -1; + if (offset > 0) + disp = (offset - 4) / 4; + else + disp = ~(-offset / 4) & minus; + + return SW64_BR(ra, disp); + +} diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c new file mode 100644 index 0000000000000000000000000000000000000000..126fe2f70495e10c9cc313dc2cdecb0e6b65516d --- /dev/null +++ b/arch/sw_64/kernel/irq.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/irq.c + * + * Copyright (C) 1995 Linus Torvalds + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + */ + +#include +#include +#include +#include + +volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + +void ack_bad_irq(unsigned int irq) +{ + irq_err_count++; + pr_crit("Unexpected IRQ trap at vector %u\n", irq); +} + +u64 arch_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; + + return sum; +} + +u64 arch_irq_stat(void) +{ + return 0; +} + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "TIMER"); + for_each_online_cpu(j) + seq_printf(p, "%10u", per_cpu(irq_stat, j).timer_irqs_event); + seq_puts(p, "\n"); + +#ifdef CONFIG_SMP + seq_printf(p, "%*s: ", prec, "IPI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", cpu_data[j].ipi_count); + seq_puts(p, "\n"); +#endif + seq_printf(p, "%*s: ", prec, "PMI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, "\n"); + + seq_printf(p, "ERR: %10lu\n", irq_err_count); + return 0; +} + +/* + * handle_irq handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ + +#define MAX_ILLEGAL_IRQS 16 + +void +handle_irq(int irq) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + static unsigned int illegal_count; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc || ((unsigned int) irq > ACTUAL_NR_IRQS && + illegal_count < MAX_ILLEGAL_IRQS)) { + irq_err_count++; + illegal_count++; + pr_crit("device_interrupt: invalid interrupt %d\n", irq); + return; + } + + irq_enter(); + generic_handle_irq_desc(desc); + irq_exit(); +} + +#ifdef CONFIG_HOTPLUG_CPU +void fixup_irqs(void) +{ + irq_migrate_all_off_this_cpu(); +} +#endif diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..989d55ee1b1b83afd6aad586d0d29b28e48f7284 --- /dev/null +++ b/arch/sw_64/kernel/irq_sw64.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 specific irq code. + */ + +#include +#include + +#include +#include + +void __init +init_IRQ(void) +{ + /* + * Just in case the platform init_irq() causes interrupts/mchecks + * (as is the case with RAWHIDE, at least). + */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + + wrent(entInt, 0); + + sw64_init_irq(); + irqchip_init(); +} + +DEFINE_SPINLOCK(irq_lock); + +static void +__enable_irq(struct irq_data *d) +{ +} + +static void +__disable_irq(struct irq_data *d) +{ +} + +static unsigned int +__startup_irq(struct irq_data *d) +{ + __enable_irq(d); + return 0; +} + +static void +__mask_and_ack_irq(struct irq_data *d) +{ + spin_lock(&irq_lock); + __disable_irq(d); + spin_unlock(&irq_lock); +} + +struct irq_chip sw64_irq_chip = { + .name = "SW64_NODE", + .irq_startup = __startup_irq, + .irq_unmask = __enable_irq, + .irq_mask = __disable_irq, + .irq_mask_ack = __mask_and_ack_irq, +}; + +void __weak arch_init_msi_domain(struct irq_domain *parent) {} + +int __init arch_early_irq_init(void) +{ + int i; + + for (i = 0; i < NR_IRQS; ++i) { + irq_set_chip_and_handler(i, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } + arch_init_msi_domain(NULL); + return 0; +} + +int __init arch_probe_nr_irqs(void) +{ + return NR_IRQS_LEGACY; +} diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c new file mode 100644 index 0000000000000000000000000000000000000000..f3bc40370e4de9b77889343338b509d6bdcad8c6 --- /dev/null +++ b/arch/sw_64/kernel/jump_label.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *insnp = (u32 *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_JMP) { + insn = sw64_insn_br(R31, (entry->code), entry->target); + BUG_ON(insn == -1); + } else { + insn = sw64_insn_nop(); + } + + *insnp = insn; + + flush_icache_range(entry->code, entry->code + SW64_INSN_SIZE); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * no need to rewrite NOP + */ +} diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c new file mode 100644 index 0000000000000000000000000000000000000000..833f72a1577ca8f2d2f01113c0443739a9a9c025 --- /dev/null +++ b/arch/sw_64/kernel/kgdb.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 KGDB support + * + * Based on arch/arm64/kernel/kgdb.c + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "r0", 8, offsetof(struct pt_regs, regs[0])}, + { "r1", 8, offsetof(struct pt_regs, regs[1])}, + { "r2", 8, offsetof(struct pt_regs, regs[2])}, + { "r3", 8, offsetof(struct pt_regs, regs[3])}, + { "r4", 8, offsetof(struct pt_regs, regs[4])}, + { "r5", 8, offsetof(struct pt_regs, regs[5])}, + { "r6", 8, offsetof(struct pt_regs, regs[6])}, + { "r7", 8, offsetof(struct pt_regs, regs[7])}, + { "r8", 8, offsetof(struct pt_regs, regs[8])}, + + { "r9", 8, offsetof(struct pt_regs, regs[9])}, + { "r10", 8, offsetof(struct pt_regs, regs[10])}, + { "r11", 8, offsetof(struct pt_regs, regs[11])}, + { "r12", 8, offsetof(struct pt_regs, regs[12])}, + { "r13", 8, offsetof(struct pt_regs, regs[13])}, + { "r14", 8, offsetof(struct pt_regs, regs[14])}, + { "r15", 8, offsetof(struct pt_regs, regs[15])}, + + { "r16", 8, offsetof(struct pt_regs, regs[16])}, + { "r17", 8, offsetof(struct pt_regs, regs[17])}, + { "r18", 8, offsetof(struct pt_regs, regs[18])}, + + { "r19", 8, offsetof(struct pt_regs, regs[19])}, + { "r20", 8, offsetof(struct pt_regs, regs[20])}, + { "r21", 8, offsetof(struct pt_regs, regs[21])}, + { "r22", 8, offsetof(struct pt_regs, regs[22])}, + { "r23", 8, offsetof(struct pt_regs, regs[23])}, + { "r24", 8, offsetof(struct pt_regs, regs[24])}, + { "r25", 8, offsetof(struct pt_regs, regs[25])}, + { "r26", 8, offsetof(struct pt_regs, regs[26])}, + { "r27", 8, offsetof(struct pt_regs, regs[27])}, + { "at", 8, offsetof(struct pt_regs, regs[28])}, + { "gp", 8, offsetof(struct pt_regs, regs[29])}, + { "sp", 8, offsetof(struct pt_regs, regs[30])}, + { "zero", 8, -1 }, + + { "f0", 8, -1 }, + { "f1", 8, -1 }, + { "f2", 8, -1 }, + { "f3", 8, -1 }, + { "f4", 8, -1 }, + { "f5", 8, -1 }, + { "f6", 8, -1 }, + { "f7", 8, -1 }, + { "f8", 8, -1 }, + { "f9", 8, -1 }, + { "f10", 8, -1 }, + { "f11", 8, -1 }, + { "f12", 8, -1 }, + { "f13", 8, -1 }, + { "f14", 8, -1 }, + { "f15", 8, -1 }, + { "f16", 8, -1 }, + { "f17", 8, -1 }, + { "f18", 8, -1 }, + { "f19", 8, -1 }, + { "f20", 8, -1 }, + { "f21", 8, -1 }, + { "f22", 8, -1 }, + { "f23", 8, -1 }, + { "f24", 8, -1 }, + { "f25", 8, -1 }, + { "f26", 8, -1 }, + { "f27", 8, -1 }, + { "f28", 8, -1 }, + { "f29", 8, -1 }, + { "f30", 8, -1 }, + { "fpcr", 8, -1 }, + + { "pc", 8, offsetof(struct pt_regs, pc)}, + { "", 8, -1 }, + { "tp", 8, -1}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + int i; + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + for (i = 0; i < DBG_MAX_REG_NUM; i++) + gdb_regs[i] = get_reg(task, i); +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + pr_info("BEFORE SET PC WITH %lx\n", pc); + instruction_pointer(regs) = pc; + pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); +} + +void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(void) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + char *ptr; + unsigned long address = -1; + + switch (remcom_in_buffer[0]) { + case 'c': + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + kgdb_arch_set_pc(linux_regs, address); + return 0; + } + return -1; +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + /* Userspace events, ignore. */ + if (user_mode(regs)) + return NOTIFY_DONE; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, +}; + +/* + * kgdb_arch_init - Perform any architecture specific initalization. + * This function will handle the initalization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * sw64 instructions are always in LE. + * Break instruction is encoded in LE format + */ +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x80, 00, 00, 00} +}; diff --git a/arch/sw_64/kernel/kprobes/Makefile b/arch/sw_64/kernel/kprobes/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..110ba2bf7752361442022553269447ceb802d465 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o diff --git a/arch/sw_64/kernel/kprobes/common.h b/arch/sw_64/kernel/kprobes/common.h new file mode 100644 index 0000000000000000000000000000000000000000..de10058f0376ea342c973e0e03a8ef1bd9faa72c --- /dev/null +++ b/arch/sw_64/kernel/kprobes/common.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_KPROBES_COMMON_H +#define _SW64_KERNEL_KPROBES_COMMON_H + + +extern bool sw64_insn_can_kprobe(kprobe_opcode_t *addr); + + +#endif /* _SW64_KERNEL_KPROBES_COMMON_H */ diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c new file mode 100644 index 0000000000000000000000000000000000000000..91c31111f2b73273d186d6b0c1cb9961e12dd68a --- /dev/null +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/probes/decode-insn.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include + +#include "common.h" + +static bool __kprobes sw64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (sw64_insn_is_sys_call_b(insn) || + sw64_insn_is_sys_call(insn) || + sw64_insn_is_call(insn) || + sw64_insn_is_ret(insn) || + sw64_insn_is_jmp(insn) || + sw64_insn_is_br(insn) || + sw64_insn_is_bsr(insn) || + sw64_insn_is_memb(insn) || + sw64_insn_is_imemb(insn) || + sw64_insn_is_rtc(insn) || + sw64_insn_is_lldl(insn) || + sw64_insn_is_lldw(insn) || + sw64_insn_is_beq(insn) || + sw64_insn_is_bne(insn) || + sw64_insn_is_blt(insn) || + sw64_insn_is_ble(insn) || + sw64_insn_is_bgt(insn) || + sw64_insn_is_bge(insn) || + sw64_insn_is_blbc(insn) || + sw64_insn_is_blbs(insn) || + sw64_insn_is_fbeq(insn) || + sw64_insn_is_fbne(insn) || + sw64_insn_is_fblt(insn) || + sw64_insn_is_fble(insn) || + sw64_insn_is_fbgt(insn) || + sw64_insn_is_fbge(insn)) + return false; + + return true; +} + + +#ifdef CONFIG_KPROBES +// lldl rd_f +static bool __kprobes is_probed_between_atomic(kprobe_opcode_t *addr) +{ + int count = 0; + unsigned long size = 0, offset = 0; + kprobe_opcode_t *scan_start = NULL; + + if (kallsyms_lookup_size_offset((unsigned long)addr, &size, &offset)) + scan_start = addr - (offset / sizeof(kprobe_opcode_t)); + + while (scan_start < addr) { + if (sw64_insn_is_lldl(le32_to_cpu(*scan_start)) || + sw64_insn_is_lldw(le32_to_cpu(*scan_start))) + count++; + if (sw64_insn_is_rd_f(le32_to_cpu(*scan_start))) + count--; + scan_start++; + } + if (count) + return false; + + return true; +} + +bool __kprobes sw64_insn_can_kprobe(kprobe_opcode_t *addr) +{ + u32 insn = le32_to_cpu(*addr); + + if (!sw64_insn_is_steppable(insn)) { + pr_warn("addr is not steppable\n"); + return false; + } +#ifdef CONFIG_SUBARCH_C3B + if (!is_probed_between_atomic(addr)) { + pr_warn("addr between atomic can't probe\n"); + return false; + } +#endif + return true; +} +#endif diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c new file mode 100644 index 0000000000000000000000000000000000000000..a0b33a52a9e4101aba6ca5e173265e90018f6ddf --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dynamic Ftrace based Kprobes Optimization + */ + +#include +#include +#include +#include +#include + +/* Ftrace callback handler for kprobes */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + int bit; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + preempt_disable_notrace(); + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto out; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + regs->regs[28] -= MCOUNT_INSN_SIZE; + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + regs->regs[28] += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); + } +out: + preempt_enable_notrace(); + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c new file mode 100644 index 0000000000000000000000000000000000000000..024ce7d99e61688b7b95c5120e9432a030c65735 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel Probes (KProbes) + * arch/sw_64/kernel/kprobes.c + */ + +#include +#include +#include + +#include "common.h" + +static u32 breakpoint_insn = BREAK_KPROBE; +static u32 breakpoint2_insn = BREAK_KPROBE_SS; + +int post_kprobe_handler(struct pt_regs *regs); + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + int ret = 0; + extern char __start_rodata[]; + extern char __end_rodata[]; + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x3) + return -EINVAL; + + if (!sw64_insn_can_kprobe(p->addr)) + return -EINVAL; + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + + if (probe_addr >= (unsigned long) __start_rodata && + probe_addr <= (unsigned long) __end_rodata) + return -EINVAL; + + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + */ + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = breakpoint2_insn; +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, breakpoint_insn); + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, p->opcode); + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + + +static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + /* insn simulation */ + kcb->target_pc = regs->pc; + regs->pc = (unsigned long)&p->ainsn.insn[0]; +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + return 1; +} + +int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + if (user_mode(regs)) + return 0; + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + p = get_kprobe((kprobe_opcode_t *)(addr - 4)); + + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + return 1; + } + } + return 0; + +} +int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + // resume_execution(cur, regs, kcb); + regs->pc = kcb->target_pc; + + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + regs->pc = kcb->target_pc; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" + "nop\n\t" + : : : "memory"); +} + +void __kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[26]; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->regs[26] = (unsigned long)__kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); + instruction_pointer(regs) = orig_ret_address; + regs->regs[26] = orig_ret_address; + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c new file mode 100644 index 0000000000000000000000000000000000000000..950998476cdaced4b7368cb4712a1d7081e11047 --- /dev/null +++ b/arch/sw_64/kernel/machine_kexec.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * machine_kexec.c for kexec + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include +#include + +#include + +extern void *kexec_control_page; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +static atomic_t waiting_for_crash_ipi; + +#ifdef CONFIG_SMP +extern struct smp_rcb_struct *smp_rcb; + +/* + * Wait for relocation code is prepared and send + * secondary CPUs to spin until kernel is relocated. + */ +static void kexec_smp_down(void *ignored) +{ + int cpu = smp_processor_id(); + + local_irq_disable(); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + set_cpu_online(cpu, false); + reset_cpu(cpu); +} +#endif + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + smp_call_function(kexec_smp_down, NULL, 0); + smp_wmb(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +#endif +} + +#ifdef CONFIG_SMP +static void machine_crash_nonpanic_core(void *unused) +{ + int cpu; + struct pt_regs regs; + + cpu = smp_processor_id(); + + local_irq_disable(); + crash_setup_regs(®s, NULL); + pr_debug("CPU %u will stop doing anything useful since another CPU has crashed\n", cpu); + crash_save_cpu(®s, cpu); + flush_cache_all(); + + set_cpu_online(cpu, false); + atomic_dec(&waiting_for_crash_ipi); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + if (cpu != 0) + reset_cpu(cpu); + else + machine_kexec(kexec_crash_image); +} +#else +static inline void machine_crash_nonpanic_core(void *unused) { } +#endif + +static void machine_kexec_mask_interrupts(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_chip *chip; + + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) + chip->irq_eoi(&desc->irq_data); + + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); + + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); + } +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int cpu; + unsigned long msecs; + + cpu = smp_processor_id(); + local_irq_disable(); + kernel_restart_prepare(NULL); + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + crash_save_cpu(regs, cpu); + machine_kexec_mask_interrupts(); + pr_info("Loading crashdump kernel...\n"); +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + if (cpu != 0) + reset_cpu(cpu); +#endif +} + +#define phys_to_ktext(pa) (__START_KERNEL_map + (pa)) + +typedef void (*noretfun_t)(void) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + struct boot_params *params = sunway_boot_params; + + + reboot_code_buffer = kexec_control_page; + pr_info("reboot_code_buffer = %px\n", reboot_code_buffer); + kexec_start_address = phys_to_ktext(image->start); + pr_info("kexec_start_address = %#lx\n", kexec_start_address); + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + else + kexec_indirection_page = (unsigned long)&image->head; + + pr_info("kexec_indirection_page = %#lx, image->head=%#lx\n", + kexec_indirection_page, image->head); + + params->cmdline = kexec_start_address - COMMAND_LINE_OFF; + params->initrd_start = *(__u64 *)(kexec_start_address - INITRD_START_OFF); + params->initrd_size = *(__u64 *)(kexec_start_address - INITRD_SIZE_OFF); + + pr_info("initrd_start = %#llx, initrd_size = %#llx\n" + "dtb_start = %#llx, efi_systab = %#llx\n" + "efi_memmap = %#llx, efi_memmap_size = %#llx\n" + "efi_memdesc_size = %#llx, efi_memdesc_version = %#llx\n" + "cmdline = %#llx\n", + params->initrd_start, params->initrd_size, + params->dtb_start, params->efi_systab, + params->efi_memmap, params->efi_memmap_size, + params->efi_memdesc_size, params->efi_memdesc_version, + params->cmdline); + + memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at %08lx\n", image->start); + pr_info("Bye ...\n"); + smp_wmb(); + ((noretfun_t) reboot_code_buffer)(); +} diff --git a/arch/sw_64/kernel/match.c b/arch/sw_64/kernel/match.c new file mode 100644 index 0000000000000000000000000000000000000000..3926391270daa6d3e03ce7e34dd9b04eba111a95 --- /dev/null +++ b/arch/sw_64/kernel/match.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + + +char da_match_buf[1024], dv_match_buf[1024], dav_match_buf[1024]; +char ia_match_buf[1024], iv_match_buf[1024], ida_match_buf[1024]; + +unsigned long da_match_cf1, da_match_cf2, da_match_cf3; +unsigned long dv_match_cf1, dv_match_cf2, dv_match_cf3; +unsigned long dav_match_cf1, dav_match_cf2, dav_match_cf3, + dav_match_cf4, dav_match_cf5; +unsigned long ia_match_cf1, ia_match_cf2, ia_match_cf3, ia_match_cf4; +unsigned long iv_match_cf1, iv_match_cf2; +unsigned long ida_match_cf1, ida_match_cf2; + +static int da_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", da_match_buf); + return 0; +} + +static int dv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dv_match_buf); + return 0; +} + +static int dav_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dav_match_buf); + return 0; +} + +static int ia_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ia_match_buf); + return 0; +} + +static int iv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", iv_match_buf); + return 0; +} + +static int ida_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ida_match_buf); + return 0; +} + +static int da_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, da_match_show, NULL); +} + +static int dv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dv_match_show, NULL); +} + +static int dav_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dav_match_show, NULL); +} + +static int ia_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ia_match_show, NULL); +} + +static int iv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, iv_match_show, NULL); +} + +static int ida_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ida_match_show, NULL); +} + +static void +write_da_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(da_match_cf1, CSR_DA_MATCH); + write_csr(da_match_cf2, CSR_DA_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= da_match_cf3; + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dv_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dv_match_cf1, CSR_DV_MATCH); + write_csr(dv_match_cf2, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | dv_match_cf3); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dav_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dav_match_cf1, CSR_DA_MATCH); + write_csr(dav_match_cf2, CSR_DA_MASK); + write_csr(dav_match_cf3, CSR_DV_MATCH); + write_csr(dav_match_cf4, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S) + | dav_match_cf5); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_ia_match(void *i) +{ + ia_match_cf1 |= (0x1UL << IA_MATCH_EN_S); + write_csr_imb(ia_match_cf1, CSR_IA_MATCH); + write_csr_imb(ia_match_cf2, CSR_IA_MASK); + write_csr(((0x3ffUL << 18) | ia_match_cf3), CSR_IA_VPNMATCH); + write_csr(((0x3ffUL << 18) | ia_match_cf4), CSR_IA_UPNMATCH); +} + +static void +write_iv_match(void *i) +{ + unsigned long ia_match_tmp; + + ia_match_tmp = read_csr(CSR_IA_MATCH); + ia_match_tmp &= ~(0x1UL << IV_PM_EN_S); + ia_match_tmp |= ((((iv_match_cf2 >> IV_PM_EN_S) & 0x1) << IV_PM_EN_S) + | (iv_match_cf2 & 0x3) | (0x1UL << IV_MATCH_EN_S)); + write_csr_imb(iv_match_cf1, CSR_IV_MATCH); + write_csr_imb(ia_match_tmp, CSR_IA_MATCH); +} + +static void +write_ida_match(void *i) +{ + + ida_match_cf1 |= (0x1UL << IDA_MATCH_EN_S); + write_csr(ida_match_cf1, CSR_IDA_MATCH); + write_csr(ida_match_cf2, CSR_IDA_MASK); +} + +static ssize_t da_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(da_match_buf) - 1, len); + if (copy_from_user(da_match_buf, user_buf, size)) + return -EFAULT; + + da_match_buf[size] = '\0'; + strcpy(tmp, da_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &da_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &da_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &da_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_da_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(dv_match_buf) - 1, len); + if (copy_from_user(dv_match_buf, user_buf, size)) + return -EFAULT; + + dv_match_buf[size] = '\0'; + strcpy(tmp, dv_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dv_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dv_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_dv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dav_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[500]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[500]; + int err; + char *ret = NULL; + + size = min(sizeof(dav_match_buf) - 1, len); + if (copy_from_user(dav_match_buf, user_buf, size)) + return -EFAULT; + + dav_match_buf[size] = '\0'; + strcpy(tmp, dav_match_buf); + p = tmp; + + for (i = 0 ; i < 5; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[500] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dav_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dav_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dav_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &dav_match_cf4); + if (err) + return err; + + err = kstrtoul(&tmp1[400], 0, &dav_match_cf5); + if (err) + return err; + + + if (on_each_cpu(write_dav_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t ia_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ia_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ia_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &ia_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &ia_match_cf4); + if (err) + return err; + + if (on_each_cpu(write_ia_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t iv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &iv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &iv_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_iv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + + +static ssize_t ida_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ida_match_buf) - 1, len); + if (copy_from_user(ida_match_buf, user_buf, size)) + return -EFAULT; + + ida_match_buf[size] = '\0'; + strcpy(tmp, ida_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ida_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ida_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_ida_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static const struct file_operations set_da_match_fops = { + .open = da_match_open, + .read = seq_read, + .write = da_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dv_match_fops = { + .open = dv_match_open, + .read = seq_read, + .write = dv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dav_match_fops = { + .open = dav_match_open, + .read = seq_read, + .write = dav_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_ia_match_fops = { + .open = ia_match_open, + .read = seq_read, + .write = ia_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_iv_match_fops = { + .open = iv_match_open, + .read = seq_read, + .write = iv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + + +static const struct file_operations set_ida_match_fops = { + .open = ida_match_open, + .read = seq_read, + .write = ida_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init match_debugfs_init(void) +{ + struct dentry *match_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + match_entry = debugfs_create_file("da_match", 0600, + sw64_debugfs_dir, NULL, + &set_da_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dv_match", 0600, + sw64_debugfs_dir, NULL, + &set_dv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dav_match", 0600, + sw64_debugfs_dir, NULL, + &set_dav_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ia_match", 0600, + sw64_debugfs_dir, NULL, + &set_ia_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("iv_match", 0600, + sw64_debugfs_dir, NULL, + &set_iv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ida_match", 0600, + sw64_debugfs_dir, NULL, + &set_ida_match_fops); + if (!match_entry) + return -ENOMEM; + + return 0; +} +late_initcall(match_debugfs_init); diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c new file mode 100644 index 0000000000000000000000000000000000000000..67264e3644a75341b898b9419f2b89b7119c5981 --- /dev/null +++ b/arch/sw_64/kernel/module.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define DEBUGP(fmt...) + +/* Allocate the GOT at the end of the core sections. */ + +struct got_entry { + struct got_entry *next; + Elf64_Sxword r_addend; + int got_offset; +}; + +static inline void +process_reloc_for_got(Elf64_Rela *rela, + struct got_entry *chains, Elf64_Xword *poffset) +{ + unsigned long r_sym = ELF64_R_SYM(rela->r_info); + unsigned long r_type = ELF64_R_TYPE(rela->r_info); + Elf64_Sxword r_addend = rela->r_addend; + struct got_entry *g; + + if (r_type != R_SW64_LITERAL) + return; + + for (g = chains + r_sym; g ; g = g->next) + if (g->r_addend == r_addend) { + if (g->got_offset == 0) { + g->got_offset = *poffset; + *poffset += 8; + } + goto found_entry; + } + + g = kmalloc(sizeof(*g), GFP_KERNEL); + g->next = chains[r_sym].next; + g->r_addend = r_addend; + g->got_offset = *poffset; + *poffset += 8; + chains[r_sym].next = g; + + found_entry: + /* + * Trick: most of the ELF64_R_TYPE field is unused. There are + * 42 valid relocation types, and a 32-bit field. Co-opt the + * bits above 256 to store the got offset for this reloc. + */ + rela->r_info |= g->got_offset << 8; +} + +int +module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, + char *secstrings, struct module *me) +{ + struct got_entry *chains; + Elf64_Rela *rela; + Elf64_Shdr *esechdrs, *symtab, *s, *got; + unsigned long nsyms, nrela, i; + + esechdrs = sechdrs + hdr->e_shnum; + symtab = got = NULL; + + /* Find out how large the symbol table is. Allocate one got_entry + * head per symbol. Normally this will be enough, but not always. + * We'll chain different offsets for the symbol down each head. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_SYMTAB) + symtab = s; + else if (!strcmp(".got", secstrings + s->sh_name)) { + got = s; + me->arch.gotsecindex = s - sechdrs; + } + + if (!symtab) { + pr_err("module %s: no symbol table\n", me->name); + return -ENOEXEC; + } + if (!got) { + pr_err("module %s: no got section\n", me->name); + return -ENOEXEC; + } + + nsyms = symtab->sh_size / sizeof(Elf64_Sym); + chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL); + if (!chains) { + pr_err("module %s: no memory for symbol chain buffer\n", + me->name); + return -ENOMEM; + } + + got->sh_size = 0; + got->sh_addralign = 8; + got->sh_type = SHT_NOBITS; + + /* Examine all LITERAL relocations to find out what GOT entries + * are required. This sizes the GOT section as well. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_RELA) { + nrela = s->sh_size / sizeof(Elf64_Rela); + rela = (void *)hdr + s->sh_offset; + for (i = 0; i < nrela; ++i) + process_reloc_for_got(rela+i, chains, + &got->sh_size); + } + + /* Free the memory we allocated. */ + for (i = 0; i < nsyms; ++i) { + struct got_entry *g, *n; + + for (g = chains[i].next; g ; g = n) { + n = g->next; + kfree(g); + } + } + kfree(chains); + + return 0; +} + +int +apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; + unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); + Elf64_Sym *symtab, *sym; + void *base, *location; + unsigned long got, gp; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; + + /* The small sections were sorted to the end of the segment. + * The following should definitely cover them. + */ + got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; + + for (i = 0; i < n; i++) { + unsigned long r_sym = ELF64_R_SYM(rela[i].r_info); + unsigned long r_type = ELF64_R_TYPE(rela[i].r_info); + unsigned long r_got_offset = r_type >> 8; + unsigned long value, hi, lo; + + r_type &= 0xff; + + /* This is where to make the change. */ + location = base + rela[i].r_offset; + + /* This is the symbol it is referring to. Note that all + * unresolved symbols have been resolved. + */ + sym = symtab + r_sym; + value = sym->st_value + rela[i].r_addend; + + switch (r_type) { + case R_SW64_NONE: + break; + case R_SW64_REFLONG: + *(u32 *)location = value; + break; + case R_SW64_REFQUAD: + /* BUG() can produce misaligned relocations. */ + ((u32 *)location)[0] = value; + ((u32 *)location)[1] = value >> 32; + break; + case R_SW64_GPREL32: + value -= gp; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_LITERAL: + hi = got + r_got_offset; + lo = hi - gp; + if ((short)lo != lo) { + unsigned long over_offset = (lo + 0x8000) >> 16; + + if ((over_offset & 0x8000) == 0) { + *(u16 *)(location - 0x4) = over_offset; + *(u16 *)location = lo - ((over_offset << 16) + gp); + *(u64 *)hi = value; + } else { + goto reloc_overflow; + } + } else { + *(u16 *)location = lo; + *(u64 *)hi = value; + } + break; + case R_SW64_LITERAL_GOT: + /* empty for now need to fill */ + break; + case R_SW64_LITUSE: + break; + case R_SW64_GPDISP: + value = gp - (u64)location; + lo = (short)value; + hi = (int)(value - lo); + if (hi + lo != value) + goto reloc_overflow; + *(u16 *)location = hi >> 16; + *(u16 *)(location + rela[i].r_addend) = lo; + break; + case R_SW64_BRSGP: + /* + * BRSGP is only allowed to bind to local symbols. + * If the section is undef, this means that the + * value was resolved from somewhere else. + */ + if (sym->st_shndx == SHN_UNDEF) + goto reloc_overflow; + if ((sym->st_other & STO_SW64_STD_GPLOAD) == + STO_SW64_STD_GPLOAD) + /* Omit the prologue. */ + value += 8; + fallthrough; + case R_SW64_BRADDR: + value -= (u64)location + 4; + if (value & 3) + goto reloc_overflow; + value = (long)value >> 2; + if (value + (1<<21) >= 1<<22) + goto reloc_overflow; + value &= 0x1fffff; + value |= *(u32 *)location & ~0x1fffff; + *(u32 *)location = value; + break; + case R_SW64_HINT: + break; + case R_SW64_SREL32: + value -= (u64)location; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_SREL64: + value -= (u64)location; + *(u64 *)location = value; + break; + case R_SW64_GPRELHIGH: + value = (long)(value - gp + 0x8000) >> 16; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + case R_SW64_GPRELLOW: + value -= gp; + *(u16 *)location = value; + break; + case R_SW64_GPREL16: + value -= gp; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + default: + pr_err("module %s: Unknown relocation: %lu\n", me->name, r_type); + return -ENOEXEC; +reloc_overflow: + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + pr_err("module %s: Relocation (type %lu) overflow vs section %d\n", + me->name, r_type, sym->st_shndx); + else + pr_err("module %s: Relocation (type %lu) overflow vs %s\n", + me->name, r_type, strtab + sym->st_name); + return -ENOEXEC; + } + } + + return 0; +} diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c new file mode 100644 index 0000000000000000000000000000000000000000..abfba92fa6a9c388542859b69884b208a424c9c3 --- /dev/null +++ b/arch/sw_64/kernel/pci-noop.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/pci-noop.c + * + * Stub PCI interfaces for NO PCI kernels. + */ + +#include +#include +#include +#include + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +static void *sw64_noop_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + if (!dev || *dev->dma_mask >= 0xffffffffUL) + gfp &= ~GFP_DMA; + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + return ret; +} + +static void sw64_noop_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t sw64_noop_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return page_to_pa(page) + offset; +} + +static int sw64_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + void *va; + + BUG_ON(!sg_page(sg)); + va = sg_virt(sg); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int sw64_noop_supported(struct device *dev, u64 mask) +{ + return mask < 0x00ffffffUL ? 0 : 1; +} + +const struct dma_map_ops sw64_noop_ops = { + .alloc = sw64_noop_alloc_coherent, + .free = sw64_noop_free_coherent, + .map_page = sw64_noop_map_page, + .map_sg = sw64_noop_map_sg, + .dma_supported = sw64_noop_supported, +}; + +const struct dma_map_ops *dma_ops = &sw64_noop_ops; +EXPORT_SYMBOL(dma_ops); + +void __init common_init_pci(void) +{ +} + +void __init sw64_init_arch(void) { } +void __init sw64_init_irq(void) { } diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c new file mode 100644 index 0000000000000000000000000000000000000000..83bb051be9de4767779d8783b31c8eda4277caa2 --- /dev/null +++ b/arch/sw_64/kernel/perf_event.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SW64 platforms. + * + * This code is based upon riscv and sparc perf event code. + */ + +#include +#include + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; +}; + +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct sw64_perf_event { + /* pmu index */ + int counter; + /* events selector */ + int event; +}; + +/* + * A structure to hold the description of the PMCs available on a particular + * type of SW64 CPU. + */ +struct sw64_pmu_t { + /* generic hw/cache events table */ + const struct sw64_perf_event *hw_events; + const struct sw64_perf_event (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + + /* method used to map hw/cache events */ + const struct sw64_perf_event *(*map_hw_event)(u64 config); + const struct sw64_perf_event *(*map_cache_event)(u64 config); + + /* The number of entries in the hw_event_map */ + int max_events; + + /* The number of counters on this pmu */ + int num_pmcs; + + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask; + + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period; + + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left; + + /* Subroutine for checking validity of a raw event for this PMU. */ + bool (*raw_event_valid)(u64 config); +}; + +/* + * The SW64 PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct sw64_pmu_t *sw64_pmu; + +/* + * SW64 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +#define SW64_OP_UNSUP {-1, -1} + +/* Mapping of the hw event types to the perf tool interface */ +static const struct sw64_perf_event core3_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = {PMC_PC0, PC0_CPU_CYCLES}, + [PERF_COUNT_HW_INSTRUCTIONS] = {PMC_PC0, PC0_INSTRUCTIONS}, + [PERF_COUNT_HW_CACHE_REFERENCES] = {PMC_PC0, PC0_SCACHE_REFERENCES}, + [PERF_COUNT_HW_CACHE_MISSES] = {PMC_PC1, PC1_SCACHE_MISSES}, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {PMC_PC0, PC0_BRANCH_INSTRUCTIONS}, + [PERF_COUNT_HW_BRANCH_MISSES] = {PMC_PC1, PC1_BRANCH_MISSES}, +}; + +/* Mapping of the hw cache event types to the perf tool interface */ +#define C(x) PERF_COUNT_HW_CACHE_##x +static const struct sw64_perf_event core3_cache_event_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DCACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DCACHE_MISSES} + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ICACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ICACHE_READ_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DTB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DTB_SINGLE_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ITB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ITB_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + +}; + +static const struct sw64_perf_event *core3_map_hw_event(u64 config) +{ + return &sw64_pmu->hw_events[config]; +} + +static const struct sw64_perf_event *core3_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct sw64_perf_event *perf_event; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + perf_event = &((*sw64_pmu->cache_events)[cache_type][cache_op][cache_result]); + if (perf_event->counter == -1) /* SW64_OP_UNSUP */ + return ERR_PTR(-ENOENT); + + return perf_event; +} + +/* + * r0xx for counter0, r1yy for counter1. + * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D + */ +static bool core3_raw_event_valid(u64 config) +{ + if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) || + (config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX))) + return true; + + pr_info("sw64 pmu: invalid raw event config %#llx\n", config); + return false; +} + +static const struct sw64_pmu_t core3_pmu = { + .max_events = ARRAY_SIZE(core3_hw_event_map), + .hw_events = core3_hw_event_map, + .map_hw_event = core3_map_hw_event, + .cache_events = &core3_cache_event_map, + .map_cache_event = core3_map_cache_event, + .num_pmcs = MAX_HWEVENTS, + .pmc_count_mask = PMC_COUNT_MASK, + .pmc_max_period = PMC_COUNT_MASK, + .pmc_left = 4, + .raw_event_valid = core3_raw_event_valid, +}; + +/* + * Low-level functions: reading/writing counters + */ +static void sw64_write_pmc(int idx, unsigned long val) +{ + wrperfmon(PMC_CMD_WRITE_BASE + idx, val); +} + +static unsigned long sw64_read_pmc(int idx) +{ + return wrperfmon(PMC_CMD_READ, idx); +} + +/* Set a new period to sample over */ +static int sw64_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = local64_read(&hwc->period_left); + long period = hwc->sample_period; + int overflow = 0; + unsigned long value; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (left > (long)sw64_pmu->pmc_max_period) + left = sw64_pmu->pmc_max_period; + + value = sw64_pmu->pmc_max_period - left; + local64_set(&hwc->prev_count, value); + sw64_write_pmc(idx, value); + + perf_event_update_userpage(event); + + return overflow; +} + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long sw64_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sw64_read_pmc(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & sw64_pmu->pmc_count_mask)) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) + delta += sw64_pmu->pmc_max_period + 1; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +/* + * State transition functions: + * + * add()/del() & start()/stop() + * + */ + +/* + * pmu->start: start the event. + */ +static void sw64_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + sw64_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + + /* counting in selected modes, for both counters */ + wrperfmon(PMC_CMD_PM, hwc->config_base); + wrperfmon(PMC_CMD_EVENT_BASE + hwc->idx, hwc->event_base); + wrperfmon(PMC_CMD_ENABLE, PMC_ENABLE_BASE + hwc->idx); +} + +/* + * pmu->stop: stop the counter + */ +static void sw64_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + wrperfmon(PMC_CMD_DISABLE, PMC_DISABLE_BASE + hwc->idx); + hwc->state |= PERF_HES_STOPPED; + barrier(); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + sw64_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * pmu->add: add the event to PMU. + */ +static int sw64_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int err = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + if (__test_and_set_bit(hwc->idx, cpuc->used_mask)) { + err = -ENOSPC; + goto out; + } + + cpuc->event[hwc->idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + sw64_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + local_irq_restore(irq_flags); + + return err; +} + +/* + * pmu->del: delete the event from PMU. + */ +static void sw64_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + sw64_pmu_stop(event, PERF_EF_UPDATE); + cpuc->event[hwc->idx] = NULL; + __clear_bit(event->hw.idx, cpuc->used_mask); + + /* Absorb the final count and turn off the event. */ + perf_event_update_userpage(event); + + local_irq_restore(irq_flags); +} + +/* + * pmu->read: read and update the counter + */ +static void sw64_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + sw64_perf_event_update(event, hwc, hwc->idx, 0); +} + +static bool supported_cpu(void) +{ + return true; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct sw64_perf_event *event_type; + + + /* + * SW64 does not have per-counter usr/os/guest/host bits, + * we can distinguish exclude_user and exclude_kernel by + * sample mode. + */ + if (event->attr.exclude_hv || event->attr.exclude_idle || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + /* + * SW64 does not support precise ip feature, and system hang when + * detecting precise_ip by perf_event_attr__set_max_precise_ip + * in userspace + */ + if (attr->precise_ip != 0) + return -EOPNOTSUPP; + + /* SW64 has fixed counter for given event type */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sw64_pmu->max_events) + return -EINVAL; + event_type = sw64_pmu->map_hw_event(attr->config); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + event_type = sw64_pmu->map_cache_event(attr->config); + if (IS_ERR(event_type)) /* */ + return PTR_ERR(event_type); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else { /* PERF_TYPE_RAW */ + if (!sw64_pmu->raw_event_valid(attr->config)) + return -EINVAL; + hwc->idx = attr->config >> 8; /* counter selector */ + hwc->event_base = attr->config & 0xff; /* event selector */ + } + + hwc->config_base = SW64_PERFCTRL_AM; + + if (attr->exclude_user) + hwc->config_base = SW64_PERFCTRL_KM; + if (attr->exclude_kernel) + hwc->config_base = SW64_PERFCTRL_UM; + + hwc->config = attr->config; + + if (!is_sampling_event(event)) + pr_debug("not sampling event\n"); + + event->destroy = hw_perf_event_destroy; + + if (!hwc->sample_period) { + hwc->sample_period = sw64_pmu->pmc_max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +/* + * Main entry point to initialise a HW performance event. + */ +static int sw64_pmu_event_init(struct perf_event *event) +{ + int err; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + default: + return -ENOENT; + } + + if (!sw64_pmu) + return -ENODEV; + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + return err; +} + +static struct pmu pmu = { + .name = "core3-base", + .capabilities = PERF_PMU_CAP_NO_NMI, + .event_init = sw64_pmu_event_init, + .add = sw64_pmu_add, + .del = sw64_pmu_del, + .start = sw64_pmu_start, + .stop = sw64_pmu_stop, + .read = sw64_pmu_read, +}; + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr0 = wrperfmon(PMC_CMD_READ, PMC_PC0); + pcr1 = wrperfmon(PMC_CMD_READ, PMC_PC1); + + pr_info("CPU#%d: PCTR0[%lx] PCTR1[%lx]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + +static void sw64_perf_event_irq_handler(unsigned long idx, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + + __this_cpu_inc(irq_pmi_count); + cpuc = this_cpu_ptr(&cpu_hw_events); + + event = cpuc->event[idx]; + + if (unlikely(!event)) { + irq_err_count++; + return; + } + + hwc = &event->hw; + sw64_perf_event_update(event, hwc, idx, sw64_pmu->pmc_max_period + 1); + perf_sample_data_init(&data, 0, hwc->last_period); + + if (sw64_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + sw64_pmu_stop(event, 0); + } + } +} + +bool valid_utext_addr(unsigned long addr) +{ + return addr >= current->mm->start_code && addr <= current->mm->end_code; +} + +bool valid_dy_addr(unsigned long addr) +{ + bool ret = false; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + + if (addr > TASK_SIZE || addr < TASK_UNMAPPED_BASE) + return ret; + vma = find_vma(mm, addr); + if (vma && vma->vm_start <= addr && (vma->vm_flags & VM_EXEC)) + ret = true; + return ret; +} + +#ifdef CONFIG_FRAME_POINTER +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct stack_frame frame; + unsigned long __user *fp; + int err; + + perf_callchain_store(entry, regs->pc); + + fp = (unsigned long __user *)regs->regs[15]; + + while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) { + if (!access_ok(fp, sizeof(frame))) + break; + + pagefault_disable(); + err = __copy_from_user_inatomic(&frame, fp, sizeof(frame)); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address)) + perf_callchain_store(entry, frame.return_address); + fp = (void __user *)frame.next_frame; + } +} +#else /* !CONFIG_FRAME_POINTER */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long usp = rdusp(); + unsigned long user_addr; + int err; + + perf_callchain_store(entry, regs->pc); + + while (entry->nr < entry->max_stack && usp < current->mm->start_stack) { + if (!access_ok((const void __user *)usp, 8)) + break; + + pagefault_disable(); + err = __get_user(user_addr, (unsigned long *)usp); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr)) + perf_callchain_store(entry, user_addr); + usp = usp + 8; + } +} +#endif/* CONFIG_FRAME_POINTER */ + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(unsigned long pc, void *data) +{ + struct perf_callchain_entry_ctx *entry = data; + + perf_callchain_store(entry, pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + walk_stackframe(NULL, regs, callchain_trace, entry); +} + +/* + * Gets the perf_instruction_pointer and perf_misc_flags for guest os. + */ + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_state()) + return perf_guest_get_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned int guest_state = perf_guest_state(); + int misc = 0; + + if (guest_state) { + if (guest_state & PERF_GUEST_USER) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} + +/* + * Init call to initialise performance events at kernel startup. + */ +int __init init_hw_perf_events(void) +{ + if (!supported_cpu()) { + pr_info("Performance events: Unsupported CPU type!\n"); + return 0; + } + + pr_info("Performance events: Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = sw64_perf_event_irq_handler; + + /* And set up PMU specification */ + sw64_pmu = &core3_pmu; + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c new file mode 100644 index 0000000000000000000000000000000000000000..b036f213936bc6d79214c9b7bdf1ab9a82a40b69 --- /dev/null +++ b/arch/sw_64/kernel/perf_regs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_SW64_MAX)) + return 0; + + return ((unsigned long *)regs)[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_SW64_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/sw_64/kernel/pm.c b/arch/sw_64/kernel/pm.c new file mode 100644 index 0000000000000000000000000000000000000000..f0a35e5d0486167340b44f3bac1c80104f25649e --- /dev/null +++ b/arch/sw_64/kernel/pm.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +struct syscore_ops io_syscore_ops; + +static int __init sw64_pm_init(void) +{ +#ifdef CONFIG_SUSPEND + suspend_set_ops(&native_suspend_ops); +#endif + register_syscore_ops(&io_syscore_ops); + + return 0; +} +device_initcall(sw64_pm_init); diff --git a/arch/sw_64/kernel/proc_misc.c b/arch/sw_64/kernel/proc_misc.c new file mode 100644 index 0000000000000000000000000000000000000000..ca107ec1e05e96f4a8790a3216d7f0eec6458ecd --- /dev/null +++ b/arch/sw_64/kernel/proc_misc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +extern const struct seq_operations cpu_active_mask_op; +static int cpu_active_mask_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cpu_active_mask_op); +} + +static const struct file_operations proc_cpu_active_mask_operations = { + .open = cpu_active_mask_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_cpu_active_mask_init(void) +{ + proc_create("cpu_active_mask", 0, NULL, &proc_cpu_active_mask_operations); + return 0; +} +fs_initcall(proc_cpu_active_mask_init); diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c new file mode 100644 index 0000000000000000000000000000000000000000..9a887140edef19d2a490edd7ec12ef871fcd5cae --- /dev/null +++ b/arch/sw_64/kernel/process.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file handles the architecture-dependent parts of process handling. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "proto.h" + +/* + * Re-start a thread when doing execve() + */ +void +start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + regs->pc = pc; + regs->ps = 8; + regs->regs[30] = sp; +} +EXPORT_SYMBOL(start_thread); + + +void +flush_thread(void) +{ + /* Arrange for each exec'ed process to start off with a clean slate + * with respect to the FPU. This is all exceptions disabled. + */ + current_thread_info()->ieee_state = 0; + wrfpcr(FPCR_INIT | ieee_swcr_to_fpcr(0)); + + /* Clean slate for TLS. */ + current_thread_info()->pcb.tp = 0; +} + +void +release_thread(struct task_struct *dead_task) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * aux_save() has to read the current TLS pointer from CSR:TID as it + * may be out-of-sync with the saved value. + */ + aux_save(src); + *dst = *src; + return 0; +} + +/* + * Copy architecture-specific thread state + */ + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct thread_info *childti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + struct pt_regs *regs = current_pt_regs(); + + extern void ret_from_fork(void); + extern void ret_from_kernel_thread(void); + + p->thread.sp = (unsigned long) childregs; + + if (unlikely(args->fn)) { + /* kernel thread */ + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ra = (unsigned long) ret_from_kernel_thread; + p->thread.s[0] = (unsigned long) args->fn; /* function */ + p->thread.s[1] = (unsigned long) args->fn_arg; + return 0; + } + + /* + * Note: if CLONE_SETTLS is not set, then we must inherit the + * value from the parent, which will have been set by the block + * copy in dup_task_struct. This is non-intuitive, but is + * required for proper operation in the case of a threaded + * application calling fork. + */ + if (clone_flags & CLONE_SETTLS) + childti->pcb.tp = tls; + else + regs->regs[20] = 0; + *childregs = *regs; + if (usp) + childregs->regs[30] = usp; + syscall_set_return_value(NULL, childregs, 0, 0); + p->thread.ra = (unsigned long) ret_from_fork; + return 0; +} + +/* + * Fill in the user structure for a ELF core dump. + * @regs: should be signal_pt_regs() or task_pt_reg(task) + */ +void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs) +{ + int i; + struct thread_info *ti; + + ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1)); + + for (i = 0; i < 31; i++) + dest[i] = regs->regs[i]; + dest[31] = regs->pc; + dest[32] = ti->pcb.tp; +} +EXPORT_SYMBOL(sw64_elf_core_copy_regs); + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_page(mm->brk, 0x02000000); +} diff --git a/arch/sw_64/kernel/proto.h b/arch/sw_64/kernel/proto.h new file mode 100644 index 0000000000000000000000000000000000000000..d7222334d1b99ffab3e0345bebcb1ec15177290c --- /dev/null +++ b/arch/sw_64/kernel/proto.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_PROTO_H +#define _SW64_KERNEL_PROTO_H + +#include +#include +#include +#include + +/* ptrace.c */ +extern int ptrace_set_bpt(struct task_struct *child); +extern int ptrace_cancel_bpt(struct task_struct *child); + +/* traps.c */ +extern void show_regs(struct pt_regs *regs); +extern void die(char *str, struct pt_regs *regs, long err); + +#endif /* _SW64_KERNEL_PROTO_H */ diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c new file mode 100644 index 0000000000000000000000000000000000000000..070e27ee256766b7d031dbc2a92bdefcedfe3676 --- /dev/null +++ b/arch/sw_64/kernel/ptrace.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* mangled further by Bob Manson (manson@santafe.edu) */ +/* more mutilation by David Mosberger (davidm@azstarnet.com) */ + +#include +#include +#include +#include + +#include + +#include "proto.h" +#include + +#define CREATE_TRACE_POINTS +#include + +#define BREAKINST 0x00000080 /* sys_call bpt */ + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Processes always block with the following stack-layout: + * + * +================================+ <---- task + 2*PAGE_SIZE + * | HMcode saved frame (ps, pc, | ^ + * | gp, a0, a1, a2) | | + * +================================+ | struct pt_regs + * | | | + * | frame generated by SAVE_ALL | | + * | | v + * +================================+ + */ + +/* + * The following table maps a register index into the stack offset at + * which the register is saved. Register indices are 0-31 for integer + * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and + * zero have no stack-slot and need to be treated specially (see + * get_reg/put_reg below). + */ +#define PCB_OFF(var) offsetof(struct pcb_struct, var) + +static int pcboff[] = { + [PT_TP] = PCB_OFF(tp), + [PT_DA_MATCH] = PCB_OFF(da_match), + [PT_DA_MASK] = PCB_OFF(da_mask), + [PT_DV_MATCH] = PCB_OFF(dv_match), + [PT_DV_MASK] = PCB_OFF(dv_mask), + [PT_DC_CTL] = PCB_OFF(dc_ctl), + [PT_MATCH_CTL] = PCB_OFF(match_ctl), + [PT_IA_MATCH] = PCB_OFF(ia_match), + [PT_IA_MASK] = PCB_OFF(ia_mask), + [PT_IV_MATCH] = PCB_OFF(iv_match), + [PT_IDA_MATCH] = PCB_OFF(ida_match), + [PT_IDA_MASK] = PCB_OFF(ida_mask) +}; + +static unsigned long zero; + +/* + * Get address of register REGNO in task TASK. + */ + +static unsigned long * +get_reg_addr(struct task_struct *task, unsigned long regno) +{ + void *addr; + int fno, vno; + + switch (regno) { + case PT_UNIQUE: + case PT_DA_MATCH: + case PT_DA_MASK: + case PT_DV_MATCH: + case PT_DV_MASK: + case PT_MATCH_CTL: + case PT_IA_MATCH: + case PT_IA_MASK: + case PT_IV_MATCH: + case PT_IDA_MATCH: + case PT_IDA_MASK: + addr = (void *)task_thread_info(task) + pcboff[regno]; + break; + case PT_REG_BASE ... PT_REG_END: + addr = &task_pt_regs(task)->regs[regno]; + break; + case PT_FPREG_BASE ... PT_FPREG_END: + fno = regno - PT_FPREG_BASE; + addr = &task->thread.fpstate.fp[fno].v[0]; + break; + case PT_VECREG_BASE ... PT_VECREG_END: + /* + * return addr for zero value if we catch vectors of f31 + * v0 and v3 of f31 are not in this range so ignore them + */ + if (regno == PT_F31_V1 || regno == PT_F31_V2) { + addr = &zero; + break; + } + fno = (regno - PT_VECREG_BASE) & 0x1f; + vno = 1 + ((regno - PT_VECREG_BASE) >> 5); + addr = &task->thread.fpstate.fp[fno].v[vno]; + break; + case PT_FPCR: + addr = &task->thread.fpstate.fpcr; + break; + case PT_PC: + addr = (void *)task_pt_regs(task) + PT_REGS_PC; + break; + default: + addr = &zero; + } + + return addr; +} + +/* + * Get contents of register REGNO in task TASK. + */ +unsigned long +get_reg(struct task_struct *task, unsigned long regno) +{ + return *get_reg_addr(task, regno); +} + +/* + * Write contents of register REGNO in task TASK. + */ +static int +put_reg(struct task_struct *task, unsigned long regno, unsigned long data) +{ + *get_reg_addr(task, regno) = data; + return 0; +} + +static inline int +read_int(struct task_struct *task, unsigned long addr, int *data) +{ + int copied = access_process_vm(task, addr, data, sizeof(int), FOLL_FORCE); + + return (copied == sizeof(int)) ? 0 : -EIO; +} + +static inline int +write_int(struct task_struct *task, unsigned long addr, int data) +{ + int copied = access_process_vm(task, addr, &data, sizeof(int), + FOLL_FORCE | FOLL_WRITE); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +/* + * Set breakpoint. + */ +int +ptrace_set_bpt(struct task_struct *child) +{ + int displ, i, res, reg_b, nsaved = 0; + unsigned int insn, op_code; + unsigned long pc; + + pc = get_reg(child, PT_PC); + res = read_int(child, pc, (int *)&insn); + if (res < 0) + return res; + + op_code = insn >> 26; + /* br bsr beq bne blt ble bgt bge blbc blbs fbeq fbne fblt fble fbgt fbge */ + if ((1UL << op_code) & 0x3fff000000000030UL) { + /* + * It's a branch: instead of trying to figure out + * whether the branch will be taken or not, we'll put + * a breakpoint at either location. This is simpler, + * more reliable, and probably not a whole lot slower + * than the alternative approach of emulating the + * branch (emulation can be tricky for fp branches). + */ + displ = ((s32)(insn << 11)) >> 9; + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + if (displ) /* guard against unoptimized code */ + task_thread_info(child)->bpt_addr[nsaved++] + = pc + 4 + displ; + /*call ret jmp*/ + } else if (op_code >= 0x1 && op_code <= 0x3) { + reg_b = (insn >> 16) & 0x1f; + task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); + } else { + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + } + + /* install breakpoints: */ + for (i = 0; i < nsaved; ++i) { + res = read_int(child, task_thread_info(child)->bpt_addr[i], + (int *)&insn); + if (res < 0) + return res; + task_thread_info(child)->bpt_insn[i] = insn; + res = write_int(child, task_thread_info(child)->bpt_addr[i], + BREAKINST); + if (res < 0) + return res; + } + task_thread_info(child)->bpt_nsaved = nsaved; + return 0; +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +int +ptrace_cancel_bpt(struct task_struct *child) +{ + int i, nsaved = task_thread_info(child)->bpt_nsaved; + + task_thread_info(child)->bpt_nsaved = 0; + + if (nsaved > 2) { + pr_info("%s: bogus nsaved: %d!\n", __func__, nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; ++i) { + write_int(child, task_thread_info(child)->bpt_addr[i], + task_thread_info(child)->bpt_insn[i]); + } + return (nsaved != 0); +} + +void user_enable_single_step(struct task_struct *child) +{ + /* Mark single stepping. */ + task_thread_info(child)->bpt_nsaved = -1; +} + +void user_disable_single_step(struct task_struct *child) +{ + ptrace_cancel_bpt(child); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_write(&to, task_pt_regs(target), sizeof(struct user_pt_regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_pt_regs(target), 0, sizeof(struct user_pt_regs)); +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + + return membuf_write(&to, &target->thread.fpstate, + sizeof(struct user_fpsimd_state)); +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpstate, 0, + sizeof(struct user_fpsimd_state)); +} + +enum sw64_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static const struct user_regset sw64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpr_get, + .set = fpr_set + }, +}; + +static const struct user_regset_view user_sw64_view = { + .name = "sw64", .e_machine = EM_SW64, + .regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sw64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long tmp; + size_t copied; + long ret; + + switch (request) { + /* When I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + force_successful_syscall_return(); + ret = tmp; + break; + + /* Read register number ADDR. */ + case PTRACE_PEEKUSR: + force_successful_syscall_return(); + ret = get_reg(child, addr); + break; + + /* When I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + + case PTRACE_POKEUSR: /* write the specified register */ + ret = put_reg(child, addr, data); + break; + default: + ret = ptrace_request(child, request, addr, data); + break; + } + return ret; +} + +asmlinkage unsigned long syscall_trace_enter(void) +{ + unsigned long ret = 0; + struct pt_regs *regs = current_pt_regs(); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + ptrace_report_syscall_entry(regs)) + return NO_SYSCALL; + +#ifdef CONFIG_SECCOMP + /* Do seccomp after ptrace, to catch any tracer changes. */ + if (secure_computing() == -1) + return NO_SYSCALL; +#endif + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + audit_syscall_entry(regs->regs[0], regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); + return ret ?: regs->regs[0]; +} + +asmlinkage void +syscall_trace_leave(void) +{ + struct pt_regs *regs = current_pt_regs(); + + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs_return_value(regs)); +} + +#ifdef CONFIG_SUBARCH_C3B +static long rwcsr(int rw, unsigned long csr, unsigned long value) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = rw; + register unsigned long __r17 __asm__("$17") = csr; + register unsigned long __r18 __asm__("$18") = value; + + __asm__ __volatile__( + "sys_call %4" + : "=r"(__r0), "=r"(__r16), "=r"(__r17), "=r"(__r18) + : "i"(HMC_rwreg), "1"(__r16), "2"(__r17), "3"(__r18) + : "$1", "$22", "$23", "$24", "$25"); + + return __r0; +} + +#define RCSR 0 +#define WCSR 1 + +#define CSR_DA_MATCH 0 +#define CSR_DA_MASK 1 +#define CSR_IA_MATCH 2 +#define CSR_IA_MASK 3 +#define CSR_IDA_MATCH 6 +#define CSR_IDA_MASK 7 +#define CSR_DC_CTL 11 +#define CSR_DV_MATCH 15 +#define CSR_DV_MASK 16 + +#define DV_MATCH_EN_S 19 +#define DAV_MATCH_EN_S 20 + +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + unsigned long dc_ctl; + unsigned long value; + + pr_info("%s: pid %d, name = %s,cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + if (mmcsr == MMCSR__DV_MATCH) { + value = rwcsr(RCSR, CSR_DV_MATCH, 0); + pr_notice("value is %#lx\n", value); + value = rwcsr(RCSR, CSR_DV_MASK, 0); + pr_notice("value is %#lx\n", value); + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + return 1; + } + + if (mmcsr == MMCSR__DA_MATCH) { + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void *) address); + return 1; + + case MMCSR__IA_MATCH: + rwcsr(WCSR, CSR_IA_MATCH, 0); //clear ia_match + return 1; + case MMCSR__IDA_MATCH: + rwcsr(WCSR, CSR_IDA_MATCH, 0); //clear ida_match + return 1; + } + + return 0; +} + +void restore_da_match_after_sched(void) +{ + unsigned long dc_ctl_mode; + unsigned long dc_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + + rwcsr(WCSR, CSR_DA_MATCH, 0); + rwcsr(WCSR, CSR_DA_MASK, pcb->da_mask); + rwcsr(WCSR, CSR_DA_MATCH, pcb->da_match); + dc_ctl_mode = pcb->dc_ctl; + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + dc_ctl |= ((dc_ctl_mode << DV_MATCH_EN_S) & ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S))); + if (dc_ctl_mode & 0x1) { + rwcsr(WCSR, CSR_DV_MATCH, pcb->dv_match); + rwcsr(WCSR, CSR_DV_MASK, pcb->dv_mask); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } +} + +#elif defined(CONFIG_SUBARCH_C4) +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + kernel_siginfo_t info; + unsigned long match_ctl, ia_match; + sigval_t sw64_value; + + pr_info("%s: pid %d, name = %s, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + case MMCSR__IA_MATCH: + case MMCSR__IDA_MATCH: + case MMCSR__IV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + return 1; + } + + info.si_signo = SIGTRAP; + info.si_addr = (void *) address; + sw64_value.sival_ptr = (void *)(regs->pc); + info.si_value = sw64_value; + info.si_code = TRAP_HWBKPT; + + if (mmcsr == MMCSR__DA_MATCH) { + info.si_errno = 1; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + info.si_errno = 2; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + info.si_errno = 3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + info.si_errno = 4; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + info.si_errno = 5; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + info.si_errno = 6; + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_info(&info); + return 1; + } + + return 0; +} + +/* + *pcb->match_ctl: + * [0] DA_MATCH + * [1] DV_MATCH + * [2] DAV_MATCH + * [3] IA_MATCH + * [4] IV_MATCH + * [5] IDA_MATCH + * [8:9] match_ctl_mode + * + */ +#define DA_MATCH 0x1 +#define DV_MATCH 0x2 +#define DAV_MATCH 0x4 +#define IA_MATCH 0x8 +#define IV_MATCH 0x10 +#define IDA_MATCH 0x20 + +void restore_da_match_after_sched(void) +{ + unsigned long match_ctl_mode; + unsigned long match_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + unsigned long vpn, upn; + + if (!pcb->match_ctl) + return; + pr_info("Restroe MATCH status, pid: %d\n", current->pid); + + if (pcb->match_ctl & DA_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx match_ctl:%#lx\n", pcb->da_match, pcb->da_mask, match_ctl); + } + + if (pcb->match_ctl & DV_MATCH) { + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (0x1UL << DV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & DAV_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + write_csr(0xfffffffff, CSR_DA_MATCH_MODE); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", + pcb->da_match, pcb->da_mask, pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & IA_MATCH) { + pcb->ia_match |= (0x1UL << IA_MATCH_EN_S) | 0x3; + pcb->ia_mask |= 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->ia_mask, CSR_IA_MASK); + vpn = read_csr(CSR_VPCR) >> 44; + vpn &= 0x3ff; + upn = read_csr(CSR_UPCR); + upn &= 0x3ff; + write_csr(((0x3ff << 18) | vpn), CSR_IA_VPNMATCH); + write_csr(((0x3ff << 18) | upn), CSR_IA_UPNMATCH); + pr_info("ia_match:%#lx ia_mask:%#lx\n", pcb->ia_match, pcb->ia_mask); + } + if (pcb->match_ctl & IV_MATCH) { + pcb->ia_match |= (0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S) | 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->iv_match, CSR_IV_MATCH); + pr_info("ia_match:%#lx iv_match:%#lx\n", pcb->ia_match, pcb->iv_match); + } + if (pcb->match_ctl & IDA_MATCH) { + pcb->ida_match |= (0x1UL << IDA_MATCH_EN_S) | 0x3; + pcb->ida_mask |= 0x3; + write_csr(pcb->ida_match, CSR_IDA_MATCH); + write_csr(pcb->ida_mask, CSR_IDA_MASK); + pr_info("ida_match:%#lx ida_mask:%#lx\n", pcb->ida_match, pcb->ida_mask); + } +} +#endif + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define GPR_OFFSET_NAME(r) { \ + .name = "r" #r, \ + .offset = offsetof(struct pt_regs, regs[r]) \ +} + +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ + .offset = offsetof(struct pt_regs, r) \ +} + +#define REG_OFFSET_END { \ + .name = NULL, \ + .offset = 0 \ +} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(ps), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c new file mode 100644 index 0000000000000000000000000000000000000000..ebdf7d894805e8f2c0a1853d853d5d2ef8bf6c09 --- /dev/null +++ b/arch/sw_64/kernel/relocate.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for kernel relocation at boot time. + * + * Based on arch/mips/kernel/relocate.c + * + * Copyright (C) 2019 He Sheng + * Authors: He Sheng (hesheng05@gmail.com) + */ +#include +#include +#include + +#include + +#define KTEXT_MAX 0xffffffffa0000000UL +#define RELOCATED(x) ((void *)((unsigned long)x + offset)) + +extern unsigned long _got_start[]; +extern unsigned long _got_end[]; +extern char pre_start_kernel[]; + +extern unsigned int _relocation_start[]; /* End kernel image / start relocation table */ +extern unsigned int _relocation_end[]; /* End relocation table */ + +extern unsigned long __start___ex_table; /* Start exception table */ +extern unsigned long __stop___ex_table; /* End exception table */ +extern union thread_union init_thread_union; + +/* + * This function may be defined for a platform to perform any post-relocation + * fixup necessary. + * Return non-zero to abort relocation + */ +int __weak plat_post_relocation(long offset) +{ + return 0; +} + +static int __init apply_r_sw64_refquad(unsigned long *loc_orig, unsigned long *loc_new, unsigned int offset) +{ + *(unsigned long *)loc_new += offset; + + return 0; +} + +static int (*reloc_handlers_rel[]) (unsigned long *, unsigned long *, unsigned int) __initdata = { + [R_SW64_REFQUAD] = apply_r_sw64_refquad, +}; + +int __init do_relocations(void *kbase_old, void *kbase_new, unsigned int offset) +{ + unsigned int *r; + unsigned long *loc_orig; + unsigned long *loc_new; + int type; + int res; + + for (r = _relocation_start; r < _relocation_end; r++) { + /* Sentinel for last relocation */ + if (*r == 0) + break; + + type = (*r >> 24) & 0xff; + loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); + loc_new = RELOCATED(loc_orig); + + if (reloc_handlers_rel[type] == NULL) { + /* Unsupported relocation */ + pr_err("Unhandled relocation type %d at 0x%pK\n", + type, loc_orig); + return -ENOEXEC; + } + + res = reloc_handlers_rel[type](loc_orig, loc_new, offset); + if (res) + return res; + } + + return 0; +} + +static int __init relocate_got(unsigned int offset) +{ + unsigned long *got_start, *got_end, *e; + + got_start = RELOCATED(&_got_start); + got_end = RELOCATED(&_got_end); + + for (e = got_start; e < got_end; e++) + *e += offset; + + return 0; +} + +#ifdef CONFIG_RANDOMIZE_BASE + +static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) +{ + size_t i; + unsigned long start, *ptr; + /* Make sure start is 8 byte aligned */ + start = ALIGN((unsigned long)area, 8); + size -= (start - (unsigned long)area); + ptr = (unsigned long *) start; + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + return hash; +} + +static inline __init unsigned long get_random_boot(void) +{ + unsigned long entropy = random_get_entropy(); + unsigned long hash = 0; + + /* Attempt to create a simple but unpredictable starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); + + /* Add in any runtime entropy we can get */ + hash = rotate_xor(hash, &entropy, sizeof(entropy)); + + return hash; +} + +static inline __init bool kaslr_disabled(void) +{ + char *str; + + str = strstr(COMMAND_LINE, "nokaslr"); + if (str == COMMAND_LINE || (str > COMMAND_LINE && *(str - 1) == ' ')) + return true; + + return false; +} + +static unsigned long __init determine_relocation_offset(void) +{ + /* Choose a new address for the kernel */ + unsigned long kernel_length; + unsigned long offset; + + if (kaslr_disabled()) + return 0; + + kernel_length = (unsigned long)_end - (unsigned long)(&_text); + + /* TODO: offset is 64K align. maybe 8KB align is okay. */ + offset = get_random_boot() << 16; + offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); + if (offset < kernel_length) + offset += ALIGN(kernel_length, 0x10000); + + /* + * TODO:new location should not overlaps initrd, dtb, acpi + * tables, etc. + */ + + if ((KTEXT_MAX - (unsigned long)_end) < offset) + offset = 0; + + return offset; +} + +#else + +static inline unsigned long __init determine_relocation_offset(void) +{ + /* + * Choose a new address for the kernel + * For now we'll hard code the destination offset. + */ + return 0; +} + +#endif + +static inline int __init relocation_offset_valid(unsigned long offset) +{ + unsigned long loc_new = (unsigned long)_text + offset; + + if (loc_new & 0x0000ffff) { + /* Inappropriately aligned new location */ + return 0; + } + if (loc_new < (unsigned long)&_end) { + /* New location overlaps original kernel */ + return 0; + } + return 1; +} + +unsigned int __init relocate_kernel(void) +{ + void *loc_new; + unsigned long kernel_length; + unsigned long bss_length; + unsigned int offset = 0; + int res = 1; + + kernel_length = (unsigned long)(&_relocation_start) - (long)(&_text); + bss_length = (unsigned long)&__bss_stop - (long)&__bss_start; + + offset = determine_relocation_offset(); + /* Reset the command line now so we don't end up with a duplicate */ + + /* Sanity check relocation address */ + if (offset && relocation_offset_valid(offset)) { + + loc_new = RELOCATED(&_text); + /* Copy the kernel to it's new location */ + memcpy(loc_new, &_text, kernel_length); + + /* Perform relocations on the new kernel */ + res = do_relocations(&_text, loc_new, offset); + if (res < 0) + goto out; + + res = relocate_got(offset); + if (res < 0) + goto out; + + /* + * The original .bss has already been cleared, and + * some variables such as command line parameters + * stored to it so make a copy in the new location. + */ + memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length); + + /* + * Last chance for the platform to abort relocation. + * This may also be used by the platform to perform any + * initialisation required now that the new kernel is + * resident in memory and ready to be executed. + */ + if (plat_post_relocation(offset)) + goto out; + + /* Return the new kernel's offset */ + return offset; + } +out: + return 0; +} + +/* + * Show relocation information on panic. + */ +void show_kernel_relocation(const char *level) +{ + unsigned long offset; + + offset = __pa_symbol(_text) - __pa_symbol(_TEXT_START); + + if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { + printk(level); + pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); + pr_cont(" .text @ 0x%pK\n", _text); + pr_cont(" .data @ 0x%pK\n", _sdata); + pr_cont(" .bss @ 0x%pK\n", __bss_start); + } +} + +static int kernel_location_notifier_fn(struct notifier_block *self, + unsigned long v, void *p) +{ + show_kernel_relocation(KERN_EMERG); + return NOTIFY_DONE; +} + +static struct notifier_block kernel_location_notifier = { + .notifier_call = kernel_location_notifier_fn +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_location_notifier); + return 0; +} +device_initcall(register_kernel_offset_dumper); diff --git a/arch/sw_64/kernel/relocate_kernel.S b/arch/sw_64/kernel/relocate_kernel.S new file mode 100644 index 0000000000000000000000000000000000000000..f1a160636212fed8e73dd32616edaea155c51154 --- /dev/null +++ b/arch/sw_64/kernel/relocate_kernel.S @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * Created by Jul 2 2019 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include + + .align 3 + .globl relocate_new_kernel + .ent relocate_new_kernel + +relocate_new_kernel: + .prologue 0 + ldl a0, arg0 + ldl a1, arg1 + ldl a2, arg2 + ldl a3, arg3 + + ldl s0, kexec_indirection_page + ldl s1, kexec_start_address + +process_entry: + ldl s2, 0(s0) + addl s0, 8, s0 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beq s2, done + + /* destination page */ + and s2, 0x1, s3 + beq s3, 1f + bic s2, 0x1, s4/* store destination addr in s4 */ + br $31, process_entry + +1: + /* indirection page, update s0*/ + and s2, 0x2, s3 + beq s3, 1f + bic s2, 0x2, s0 + br $31, process_entry + +1: + /* done page */ + and s2, 0x4, s3 + beq s3, 1f + br $31, done +1: + /* source page */ + and s2, 0x8, s3 + beq s3, process_entry + bic s2, 0x8, s2 + ldi s6, 0x1 + sll s6, (PAGE_SHIFT - 3), s6 + +copy_word: + /* copy page word by word */ + ldl s5, 0(s2) + stl s5, 0(s4) + addl s4, 8, s4 + addl s2, 8, s2 + subl s6, 1, s6 + beq s6, process_entry + br $31, copy_word + br $31, process_entry + +done: +#ifdef CONFIG_CRASH_SMP /* unsupported now!!!! */ + /* kexec_flag reset is signal to other CPUs what kernel + * was moved to it's location. Note - we need relocated address + * of kexec_flag. + */ + + br ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + stl zero, 0(t0) +#endif + memb + jmp ra, (s1) + .end relocate_new_kernel + .size relocate_new_kernel, .-relocate_new_kernel + +#ifdef CONFIG_CRASH_SMP + /* + * Other CPUs should wait until code is relocated and + * then start at entry (?) point. + */ + .align 3 + .globl kexec_smp_wait + .ent kexec_smp_wait +kexec_smp_wait: + ldl a0, s_arg0 + ldl a1, s_arg1 + ldl a2, s_arg2 + ldl a3, s_arg3 + ldl s1, kexec_start_address + + /* Non-relocated address works for args and kexec_start_address (old + * kernel is not overwritten). But we need relocated address of + * kexec_flag. + */ + + bsr ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + +1: stl s0, 0(t0) + bne s0, 1b + memb + jmp ra, (s1) + .end kexec_smp_wait + .size kexec_smp_wait, .-kexec_smp_wait +#endif + + .align 3 + + /* All parameters to new kernel are passed in registers a0-a3. + * kexec_args[0..3] are uses to prepare register values. + */ + +kexec_args: + .globl kexec_args +arg0: .quad 0x0 +arg1: .quad 0x0 +arg2: .quad 0x0 +arg3: .quad 0x0 + .size kexec_args, 8*4 + +#ifdef CONFIG_CRASH_SMP + /* + * Secondary CPUs may have different kernel parameters in + * their registers a0-a3. secondary_kexec_args[0..3] are used + * to prepare register values. + */ +secondary_kexec_args: + .globl secondary_kexec_args +s_arg0: .quad 0x0 +s_arg1: .quad 0x0 +s_arg2: .quad 0x0 +s_arg3: .quad 0x0 + .size secondary_kexec_args, 8*4 + +kexec_flag: + .quad 0x1 +#endif + +kexec_start_address: + .globl kexec_start_address + .quad 0x0 + .size kexec_start_address, 8 + +kexec_indirection_page: + .globl kexec_indirection_page + .quad 0 + .size kexec_indirection_page, 8 + +relocate_new_kernel_end: + +relocate_new_kernel_size: + .global relocate_new_kernel_size + .quad relocate_new_kernel_end - relocate_new_kernel + .size relocate_new_kernel_size, 8 diff --git a/arch/sw_64/kernel/reset.c b/arch/sw_64/kernel/reset.c new file mode 100644 index 0000000000000000000000000000000000000000..955339557a7a1d8eec504f0f85ad0241e82fc833 --- /dev/null +++ b/arch/sw_64/kernel/reset.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Sunway Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void fix_jm585_reset(void) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + int val; + + pdev = pci_get_device(PCI_VENDOR_ID_JMICRON, + 0x0585, NULL); + if (pdev) { + hose = pci_bus_to_pci_controller(pdev->bus); + val = read_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val | 0x8); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val); + } + +} +static void default_halt(void) +{ + local_irq_disable(); + + pr_notice("\n\n** You can safely turn off the power now **\n\n"); + + while (true) + arch_cpu_idle(); +} + +static void default_poweroff(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); +#ifdef CONFIG_EFI + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +#endif + while (true) + arch_cpu_idle(); +} + +static void default_restart(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); + + fix_jm585_reset(); +#ifdef CONFIG_EFI + if (efi_capsule_pending(NULL)) + efi_reboot(REBOOT_WARM, NULL); + else + efi_reboot(REBOOT_COLD, NULL); +#endif + + while (true) + arch_cpu_idle(); +} + +void (*pm_restart)(void); + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*pm_halt)(void); + +void machine_halt(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_halt(); +} + +void machine_power_off(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_power_off(); +} + +void machine_restart(char *command) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); + pm_restart(); +} + +static int __init sw64_reboot_setup(void) +{ + pm_restart = default_restart; + pm_power_off = default_poweroff; + pm_halt = default_halt; + + return 0; +} +arch_initcall(sw64_reboot_setup); diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c new file mode 100644 index 0000000000000000000000000000000000000000..148d639a08dbb721ee3dd8a10e076937fe254f6a --- /dev/null +++ b/arch/sw_64/kernel/segvdbg.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Zhi Tongze + * Author: Zhi Tongze + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + +#include + +extern bool segv_debug_enabled; + +static int __init segv_debug_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + + debugfs_create_bool("segv_debug", 0644, + sw64_debugfs_dir, &segv_debug_enabled); + return 0; +} +late_initcall(segv_debug_init); diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c new file mode 100644 index 0000000000000000000000000000000000000000..0c1ddb9b46d7bb0e74308a97a1830730ad3a44eb --- /dev/null +++ b/arch/sw_64/kernel/setup.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Bootup setup stuff. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MAGIC_SYSRQ +#include +#include +#endif +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +#undef DEBUG_DISCONTIG +#ifdef DEBUG_DISCONTIG +#define DBGDCONT(args...) pr_debug(args) +#else +#define DBGDCONT(args...) +#endif + +int __cpu_to_rcid[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_to_rcid); + +DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +struct cma *sw64_kvm_cma; +EXPORT_SYMBOL(sw64_kvm_cma); + +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; + +struct gen_pool *sw64_kvm_pool; +EXPORT_SYMBOL(sw64_kvm_pool); +#endif +#endif + +static inline int phys_addr_valid(unsigned long addr) +{ + /* + * At this point memory probe has not been done such that max_pfn + * and other physical address variables cannot be used, so let's + * roughly judge physical address based on arch specific bit. + */ + return !(addr >> (cpu_desc.pa_bits - 1)); +} + +extern struct atomic_notifier_head panic_notifier_list; +static int sw64_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block sw64_panic_block = { + sw64_panic_event, + NULL, + INT_MAX /* try to do it first */ +}; + +/* the value is IOR: CORE_ONLIE*/ +cpumask_t core_start = CPU_MASK_NONE; + +static struct resource data_resource = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource code_resource = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +/* A collection of per-processor data. */ +struct cpuinfo_sw64 cpu_data[NR_CPUS]; +EXPORT_SYMBOL(cpu_data); + +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); +struct cpu_desc_t cpu_desc; +struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +int memmap_nr; +struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +bool memblock_initialized; + +cpumask_t cpu_offline = CPU_MASK_NONE; + +static char command_line[COMMAND_LINE_SIZE] __initdata; +#ifdef CONFIG_CMDLINE_BOOL +static char builtin_cmdline[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; +#endif + +/* boot_params */ +struct boot_params *sunway_boot_params = (struct boot_params *) (PARAM + 0x100); + +/* + * The format of "screen_info" is strange, and due to early + * i386-setup code. This is just enough to make the console + * code think we're on a VGA color display. + */ + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; +EXPORT_SYMBOL(screen_info); + +/* + * Move global data into per-processor storage. + */ +void store_cpu_data(int cpu) +{ + cpu_data[cpu].last_asid = ASID_FIRST_VERSION; +} + +#ifdef CONFIG_KEXEC + +void *kexec_control_page; + +#define KTEXT_MAX KERNEL_IMAGE_SIZE + +static void __init kexec_control_page_init(void) +{ + phys_addr_t addr; + + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); + kexec_control_page = (void *)(__START_KERNEL_map + addr); +} + +/* + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + ret = parse_crashkernel(boot_command_line, mem_desc.size, + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + if (!crash_size) { + pr_warn("size of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!crash_base) { + pr_warn("base of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + + if (!memblock_is_region_memory(crash_base, crash_size)) + memblock_add(crash_base, crash_size); + + ret = memblock_reserve(crash_base, crash_size); + if (ret < 0) { + pr_warn("crashkernel reservation failed - memory is in use [mem %#018llx-%#018llx]\n", + crash_base, crash_base + crash_size - 1); + return; + } + + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(mem_desc.size >> 20)); + + ret = add_memmap_region(crash_base, crash_size, memmap_crashkernel); + if (ret) + pr_warn("Add crash kernel area [mem %#018llx-%#018llx] to memmap region failed.\n", + crash_base, crash_base + crash_size - 1); + + if (crash_base >= KERNEL_IMAGE_SIZE) + pr_warn("Crash base should be less than %#x\n", KERNEL_IMAGE_SIZE); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else /* !defined(CONFIG_KEXEC) */ +static void __init reserve_crashkernel(void) {} +static void __init kexec_control_page_init(void) {} +#endif /* !defined(CONFIG_KEXEC) */ + +/* + * I/O resources inherited from PeeCees. Except for perhaps the + * turbochannel SWs, everyone has these on some sort of SuperIO chip. + * + * ??? If this becomes less standard, move the struct out into the + * machine vector. + */ + +static void __init +reserve_std_resources(void) +{ + static struct resource standard_io_resources[] = { + { .name = "rtc", .start = -1, .end = -1 }, + { .name = "dma1", .start = 0x00, .end = 0x1f }, + { .name = "pic1", .start = 0x20, .end = 0x3f }, + { .name = "timer", .start = 0x40, .end = 0x5f }, + { .name = "keyboard", .start = 0x60, .end = 0x6f }, + { .name = "dma page reg", .start = 0x80, .end = 0x8f }, + { .name = "pic2", .start = 0xa0, .end = 0xbf }, + { .name = "dma2", .start = 0xc0, .end = 0xdf }, + }; + + struct resource *io = &ioport_resource; + size_t i; + + if (hose_head) { + struct pci_controller *hose; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == 0) { + io = hose->io_space; + break; + } + } + + /* Fix up for the Jensen's queer RTC placement. */ + standard_io_resources[0].start = RTC_PORT(0); + standard_io_resources[0].end = RTC_PORT(0) + 0x10; + + for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) + request_resource(io, standard_io_resources+i); +} + +static int __init parse_memmap_one(char *p) +{ + char *oldp; + u64 start_at, mem_size; + int ret; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" not valid on sw64\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + pr_err("\"memmap=nn@ss\" invalid on sw64\n"); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on sw64\n"); + } else if (*p == '$') { + start_at = memparse(p + 1, &p); + ret = add_memmap_region(start_at, mem_size, memmap_reserved); + if (ret) + return ret; + } else { + return -EINVAL; + } + return *p == '\0' ? 0 : -EINVAL; +} + +static int __init setup_memmap(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; +} +early_param("memmap", setup_memmap); + +static int __init setup_cpuoffline(char *p) +{ + cpulist_parse(p, &cpu_offline); + cpumask_clear_cpu(0, &cpu_offline); + return 0; +} +early_param("cpuoffline", setup_cpuoffline); + +#ifdef CONFIG_BLK_DEV_INITRD +static void * __init move_initrd(unsigned long mem_limit) +{ + void *start; + unsigned long size; + + size = initrd_end - initrd_start; + start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0); + if (!start || __pa(start) + size > mem_limit) { + initrd_start = initrd_end = 0; + return NULL; + } + memmove(start, (void *)initrd_start, size); + initrd_start = (unsigned long)start; + initrd_end = initrd_start + size; + pr_info("initrd moved to 0x%px\n", start); + return start; +} +#else +static void * __init move_initrd(unsigned long mem_limit) +{ + return NULL; +} +#endif + +static bool __init memmap_range_valid(phys_addr_t base, phys_addr_t *size) +{ + if (base > memblock_end_of_DRAM()) + return false; + + if ((base + *size) > memblock_end_of_DRAM()) + *size = memblock_end_of_DRAM() - base; + + return true; +} + +void __init process_memmap(void) +{ + static int i; // Make it static so we won't start over again every time. + int ret; + phys_addr_t base, size; + unsigned long dma_end __maybe_unused = (MAX_DMA32_PFN << PAGE_SHIFT); + + if (!memblock_initialized) + return; + + for (; i < memmap_nr; i++) { + base = memmap_map[i].addr; + size = memmap_map[i].size; + switch (memmap_map[i].type) { + case memmap_reserved: + if (!memmap_range_valid(base, &size)) { + pr_err("reserved memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); + } + break; + case memmap_pci: + if (!memmap_range_valid(base, &size)) { + pr_err("pci memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("pci memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + } + break; + case memmap_initrd: + if ((base + size) > memblock_end_of_DRAM()) { + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); + if (!base) { + pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", + old_base, old_base + size - 1, memblock_end_of_DRAM()); + break; + } + memmap_map[i].addr = base; + } + pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); + ret = memblock_reserve(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + break; + case memmap_kvm: + case memmap_crashkernel: + /* kvm and crashkernel are handled elsewhere, skip */ + break; + case memmap_acpi: + pr_err("ACPI memmap region is not supported.\n"); + break; + case memmap_use: + pr_err("Force usage memmap region is not supported.\n"); + break; + case memmap_protected: + pr_err("Protected memmap region is not supported.\n"); + break; + default: + pr_err("Unknown type of memmap region.\n"); + } + } +} + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type) +{ + if (memmap_nr >= ARRAY_SIZE(memmap_map)) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return -EPERM; + } + + if (addr + size <= addr) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return -EINVAL; + } + + memmap_map[memmap_nr].addr = addr; + memmap_map[memmap_nr].size = size; + memmap_map[memmap_nr].type = type; + memmap_nr++; + + process_memmap(); + + return 0; +} + +static struct resource* __init +insert_ram_resource(u64 start, u64 end, bool reserved) +{ + struct resource *res = + kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return NULL; + if (reserved) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + res->start = start; + res->end = end; + if (insert_resource(&iomem_resource, res)) { + kfree(res); + return NULL; + } + return res; +} + +static int __init request_standard_resources(void) +{ + struct memblock_region *mblk; + + extern char _text[], _etext[]; + extern char _sdata[], _edata[]; + extern char __bss_start[], __bss_stop[]; + + for_each_mem_region(mblk) { + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); + } + + code_resource.start = __pa_symbol(_text); + code_resource.end = __pa_symbol(_etext)-1; + data_resource.start = __pa_symbol(_sdata); + data_resource.end = __pa_symbol(_edata)-1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop)-1; + + insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &data_resource); + insert_resource(&iomem_resource, &bss_resource); + + return 0; +} +subsys_initcall(request_standard_resources); + +#ifdef CONFIG_NUMA +extern void cpu_set_node(void); +#endif + +static void __init show_socket_mem_layout(void) +{ + int i; + phys_addr_t base, size, end; + + base = 0; + + pr_info("Socket memory layout:\n"); + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + size = socket_desc[i].socket_mem; + end = base + size - 1; + pr_info("Socket %d: [mem %#018llx-%#018llx], size %llu\n", + i, base, end, size); + base = end + 1; + } + } + pr_info("Reserved memory size for Socket 0: %#lx\n", NODE0_START); +} + +int page_is_ram(unsigned long pfn) +{ + pfn <<= PAGE_SHIFT; + + return pfn >= mem_desc.base && pfn < (mem_desc.base + mem_desc.size); +} + +static int __init topology_init(void) +{ + int i, ret; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_devices, i); + +#ifdef CONFIG_HOTPLUG_CPU + if (i != 0) + cpu->hotpluggable = 1; +#endif + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + + return 0; +} +subsys_initcall(topology_init); + +static void __init setup_machine_fdt(void) +{ +#ifdef CONFIG_USE_OF + void *dt_virt; + const char *name; + + /* Give a chance to select kernel builtin DTB firstly */ + if (IS_ENABLED(CONFIG_BUILTIN_DTB)) + dt_virt = (void *)__dtb_start; + else { + dt_virt = (void *)sunway_boot_params->dtb_start; + if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) { + pr_emerg("BUG: DTB has been corrupted by kernel image!\n"); + while (true) + cpu_relax(); + } + } + + if (!phys_addr_valid(__boot_pa(dt_virt)) || + !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at virtual address %px\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + dt_virt); + + while (true) + cpu_relax(); + } + + name = of_flat_dt_get_machine_name(); + if (!name) + return; + + pr_info("Machine model: %s\n", name); +#else + pr_info("Kernel disable device tree support.\n"); + return; +#endif +} + +void __init device_tree_init(void) +{ + unflatten_and_copy_device_tree(); + sunway_boot_params->dtb_start = (__u64)initial_boot_params; +} + +static void __init setup_cpu_info(void) +{ + int i; + struct cache_desc *c; + unsigned long val; + + val = cpuid(GET_TABLE_ENTRY, 0); + cpu_desc.model = CPUID_MODEL(val); + cpu_desc.family = CPUID_FAMILY(val); + cpu_desc.chip_var = CPUID_CHIP_VAR(val); + cpu_desc.arch_var = CPUID_ARCH_VAR(val); + cpu_desc.arch_rev = CPUID_ARCH_REV(val); + cpu_desc.pa_bits = CPUID_PA_BITS(val); + cpu_desc.va_bits = CPUID_VA_BITS(val); + + for (i = 0; i < VENDOR_ID_MAX; i++) { + val = cpuid(GET_VENDOR_ID, i); + memcpy(cpu_desc.vendor_id + (i * 8), &val, 8); + } + + for (i = 0; i < MODEL_MAX; i++) { + val = cpuid(GET_MODEL, i); + memcpy(cpu_desc.model_id + (i * 8), &val, 8); + } + + cpu_desc.frequency = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + + for (i = 0; i < NR_CPUS; i++) { + c = &(cpu_data[i].icache); + val = cpuid(GET_CACHE_INFO, L1_ICACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].dcache); + val = cpuid(GET_CACHE_INFO, L1_DCACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].scache); + val = cpuid(GET_CACHE_INFO, L2_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].tcache); + val = cpuid(GET_CACHE_INFO, L3_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + } +} + +static void __init setup_run_mode(void) +{ + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } +} + +static void __init setup_socket_info(void) +{ + int i; + int numsockets = sw64_chip->get_cpu_num(); + + memset(socket_desc, 0, MAX_NUMSOCKETS * sizeof(struct socket_desc_t)); + + for (i = 0; i < numsockets; i++) { + socket_desc[i].is_online = 1; + if (sw64_chip_init->early_init.get_node_mem) + socket_desc[i].socket_mem = sw64_chip_init->early_init.get_node_mem(i); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init reserve_mem_for_initrd(void) +{ + int ret; + + initrd_start = sunway_boot_params->initrd_start; + if (initrd_start) { + initrd_start = __pa(initrd_start) + PAGE_OFFSET; + initrd_end = initrd_start + sunway_boot_params->initrd_size; + pr_info("Initial ramdisk at: 0x%px (%llu bytes)\n", + (void *)initrd_start, sunway_boot_params->initrd_size); + + ret = add_memmap_region(__pa(initrd_start), initrd_end - initrd_start, memmap_initrd); + if (ret) + pr_err("Add initrd area [mem %#018lx-%#018lx] to memmap region failed.\n", + __pa(initrd_start), __pa(initrd_end - 1)); + } +} +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init early_kvm_reserved_mem(char *p) +{ + if (!p) { + pr_err("Config string not provided\n"); + return -EINVAL; + } + + kvm_mem_size = memparse(p, &p); + if (*p != '@') + return -EINVAL; + kvm_mem_base = memparse(p + 1, &p); + return 0; +} +early_param("kvm_mem", early_kvm_reserved_mem); + +void __init sw64_kvm_reserve(void) +{ + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, + PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); +} +#endif +#endif + +void __init +setup_arch(char **cmdline_p) +{ + /** + * Work around the unaligned access exception to parse ACPI + * tables in the following function acpi_boot_table_init(). + */ + trap_init(); + + jump_label_init(); + setup_cpu_info(); + setup_run_mode(); + setup_chip_ops(); + setup_socket_info(); + show_socket_mem_layout(); + sw64_chip_init->early_init.setup_core_map(&core_start); + if (is_guest_or_emul()) + get_vt_smp_info(); + + setup_sched_clock(); + + setup_machine_fdt(); + + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &sw64_panic_block); + + callback_init(); + + /* command line */ + if (!sunway_boot_params->cmdline) + sunway_boot_params->cmdline = (unsigned long)COMMAND_LINE; + + strscpy(boot_command_line, (char *)sunway_boot_params->cmdline, COMMAND_LINE_SIZE); + +#if IS_ENABLED(CONFIG_CMDLINE_BOOL) +#if IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) + strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + strscpy((char *)sunway_boot_params->cmdline, boot_command_line, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + /* append builtin to boot loader cmdline */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + } +#endif /* CMDLINE_EXTEND */ +#endif + + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + /* + * Process command-line arguments. + */ + parse_early_param(); + + /* Find our memory. */ + mem_detect(); + +#ifdef CONFIG_PCI + reserve_mem_for_pci(); +#endif + +#ifdef CONFIG_BLK_DEV_INITRD + reserve_mem_for_initrd(); +#endif + + sw64_memblock_init(); + + reserve_crashkernel(); + + /* Reserve large chunks of memory for use by CMA for KVM. */ +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + sw64_kvm_reserve(); +#endif +#endif + + efi_init(); + + /* Try to upgrade ACPI tables via initrd */ + acpi_table_upgrade(); + + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + +#ifdef CONFIG_SMP + setup_smp(); +#else + store_cpu_data(0); +#endif + + sw64_numa_init(); + + memblock_dump_all(); + + sparse_init(); + + zone_sizes_init(); + + paging_init(); + + kexec_control_page_init(); + + /* + * Initialize the machine. Usually has to do with setting up + * DMA windows and the like. + */ + sw64_init_arch(); + + /* Reserve standard resources. */ + reserve_std_resources(); + + /* + * Give us a default console. TGA users will see nothing until + * chr_dev_init is called, rather late in the boot sequence. + */ + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + /* Default root filesystem to sda2. */ + ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); + + if (acpi_disabled) { +#ifdef CONFIG_NUMA + cpu_set_node(); +#endif + device_tree_init(); + } +} + + +static int +show_cpuinfo(struct seq_file *f, void *slot) +{ + int i; + unsigned long cpu_freq; + + cpu_freq = cpuid(GET_CPU_FREQ, 0); + + for_each_online_cpu(i) { + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ + seq_printf(f, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s CPU @ %lu.%lu%luGHz\n" + "cpu variation\t: %u\n" + "cpu revision\t: %u\n", + i, cpu_desc.vendor_id, cpu_desc.family, + cpu_desc.model, cpu_desc.model_id, + cpu_freq / 1000, (cpu_freq % 1000) / 100, + (cpu_freq % 100) / 10, + cpu_desc.arch_var, cpu_desc.arch_rev); + seq_printf(f, "cpu MHz\t\t: %lu.00\n" + "cache size\t: %u KB\n" + "physical id\t: %d\n" + "bogomips\t: %lu.%02lu\n", + get_cpu_freq() / 1000 / 1000, cpu_data[i].tcache.size >> 10, + cpu_topology[i].package_id, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + + seq_printf(f, "flags\t\t: fpu simd vpn upn cpuid\n"); + seq_printf(f, "page size\t: %d\n", 8192); + seq_printf(f, "cache_alignment\t: %d\n", cpu_data[i].tcache.linesz); + seq_printf(f, "address sizes\t: %u bits physical, %u bits virtual\n\n", + cpu_desc.pa_bits, cpu_desc.va_bits); + } + return 0; +} + +/* + * We show only CPU #0 info. + */ +static void * +c_start(struct seq_file *f, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void * +c_next(struct seq_file *f, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static void +c_stop(struct seq_file *f, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +static int +sw64_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} + +static __init int add_pcspkr(void) +{ + struct platform_device *pd; + int ret; + + pd = platform_device_alloc("pcspkr", -1); + if (!pd) + return -ENOMEM; + + ret = platform_device_add(pd); + if (ret) + platform_device_put(pd); + + return ret; +} +device_initcall(add_pcspkr); + +#ifdef CONFIG_DEBUG_FS +struct dentry *sw64_debugfs_dir; +EXPORT_SYMBOL(sw64_debugfs_dir); + +static int __init debugfs_sw64(void) +{ + struct dentry *d; + + d = debugfs_create_dir("sw64", NULL); + if (!d) + return -ENOMEM; + sw64_debugfs_dir = d; + return 0; +} +arch_initcall(debugfs_sw64); +#endif + +#ifdef CONFIG_OF +static int __init sw64_of_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +core_initcall(sw64_of_init); +#endif + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init sw64_kvm_pool_init(void) +{ + int status = 0; + unsigned long kvm_pool_virt; + struct page *base_page, *end_page, *p; + + if (!sw64_kvm_cma) + goto out; + + kvm_pool_virt = (unsigned long)kvm_mem_base; + + sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!sw64_kvm_pool) + goto out; + + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); + if (status < 0) { + pr_err("failed to add memory chunks to sw64 kvm pool\n"); + gen_pool_destroy(sw64_kvm_pool); + sw64_kvm_pool = NULL; + goto out; + } + gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); + + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); + + p = base_page; + while (p <= end_page && page_ref_count(p) == 0) { + set_page_count(p, 1); + page_mapcount_reset(p); + SetPageReserved(p); + p++; + } + + return status; + +out: + return -ENOMEM; +} +core_initcall_sync(sw64_kvm_pool_init); +#endif +#endif diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c new file mode 100644 index 0000000000000000000000000000000000000000..496f33bb1c89f0ae39d68e6972e08321736685df --- /dev/null +++ b/arch/sw_64/kernel/signal.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/signal.c + * + * Copyright (C) 1995 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "proto.h" + + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} + +SYSCALL_DEFINE3(odd_sigaction, int, sig, + const struct odd_sigaction __user *, act, + struct odd_sigaction __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + old_sigset_t mask; + int ret; + + if (act) { + if (!access_ok(act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) + return -EFAULT; + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) + return -EFAULT; + } + + return ret; +} + +/* + * Do a signal return; undo the signal stack. + */ + +#if _NSIG_WORDS > 1 +# error "Non SA_SIGINFO frame needs rearranging" +#endif + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +/* + * If this changes, userland unwinders that Know Things about our signal + * frame will break. Do not undertake lightly. It also implies an ABI + * change wrt the size of siginfo_t, which may cause some pain. + */ +extern char compile_time_assert + [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; + +static long +restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) +{ + long err = __get_user(regs->pc, &sc->sc_pc); + + err |= __copy_from_user(regs, sc->sc_regs, sizeof_field(struct pt_regs, regs)); + /* simd-fp */ + err |= __copy_from_user(¤t->thread.fpstate, &sc->sc_fpregs, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + if (likely(!err)) + __fpstate_restore(current); + + return err; +} + +/* + * Note that this syscall is also used by setcontext(3) to install + * a given sigcontext. This because it's impossible to set *all* + * registers and transfer control from userland. + */ + +SYSCALL_DEFINE1(sigreturn, struct sigcontext __user *, sc) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good sigcontext before using it */ + if (!access_ok(sc, sizeof(*sc))) + goto give_sigsegv; + if (__get_user(set.sig[0], &sc->sc_mask)) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(sc, regs)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + +SYSCALL_DEFINE1(rt_sigreturn, struct rt_sigframe __user *, frame) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good ucontext_t before using it */ + if (!access_ok(&frame->uc, sizeof(frame->uc))) + goto give_sigsegv; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) + goto give_sigsegv; + + if (restore_altstack(&frame->uc.uc_stack)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + + +/* + * Set up a signal frame. + */ + +static inline void __user * +get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) +{ + return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); +} + +static long +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + long err = 0; + + err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); + err |= __put_user(mask, &sc->sc_mask); + err |= __put_user(regs->pc, &sc->sc_pc); + err |= __put_user(8, &sc->sc_ps); + + err |= __copy_to_user(sc->sc_regs, regs, sizeof_field(struct pt_regs, regs)); + err |= __put_user(0, sc->sc_regs+31); + /* simd-fp */ + __fpstate_save(current); + err |= __copy_to_user(&sc->sc_fpregs, ¤t->thread.fpstate, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + return err; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + unsigned long err = 0; + struct rt_sigframe __user *frame; + + frame = get_sigframe(ksig, regs->regs[30], sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(set->sig[0], &frame->uc.uc_old_sigmask); + err |= __save_altstack(&frame->uc.uc_stack, regs->regs[30]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* "Return" to the handler */ + regs->regs[26] = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); + regs->regs[27] = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->regs[16] = ksig->sig; /* a0: signal number */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->regs[17] = (unsigned long) &frame->info; + regs->regs[18] = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->regs[17] = 0; + regs->regs[18] = (unsigned long) &frame->uc.uc_mcontext; + } + regs->regs[30] = (unsigned long) frame; + +#if DEBUG_SIG + pr_info("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->regs[26]); +#endif + + return 0; +} + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void +do_signal(struct pt_regs *regs) +{ + unsigned long single_stepping = ptrace_cancel_bpt(current); + struct ksignal ksig; + + /* This lets the debugger run, ... */ + if (get_signal(&ksig)) { + /* ... so re-check the single stepping. */ + single_stepping |= ptrace_cancel_bpt(current); + /* Whee! Actually deliver the signal. */ + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { + regs->regs[0] = EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + /* reset v0 and a3 and replay syscall */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = EINTR; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + handle_signal(&ksig, regs); + } else { + single_stepping |= ptrace_cancel_bpt(current); + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + /* Reset v0 and a3 and replay syscall. */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTART_RESTARTBLOCK: + /* Set v0 to the restart_syscall and replay */ + regs->regs[0] = __NR_restart_syscall; + regs->pc -= 4; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + restore_saved_sigmask(); + } + if (single_stepping) + ptrace_set_bpt(current); /* re-set breakpoint */ +} + +asmlinkage void +do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + local_irq_enable(); + + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + + if (thread_flags & _TIF_UPROBE) { + unsigned long pc = regs->pc; + + uprobe_notify_resume(regs); + sw64_fix_uretprobe(regs, pc - 4); + } + + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + local_irq_disable(); + thread_flags = READ_ONCE(current_thread_info()->flags); + } while (thread_flags & _TIF_WORK_MASK); +} diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c new file mode 100644 index 0000000000000000000000000000000000000000..6d1aab4be1c0c45badc962a908eed24e1e7a40dd --- /dev/null +++ b/arch/sw_64/kernel/smp.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/smp.c + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +struct smp_rcb_struct *smp_rcb; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; + +int smp_booted; + +void *idle_task_pointer[NR_CPUS]; + +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + +/* A collection of single bit ipi messages. */ +static struct { + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +enum ipi_message_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, +}; + +int smp_num_cpus = 1; /* Number that came online. */ +EXPORT_SYMBOL(smp_num_cpus); + +#define send_sleep_interrupt(cpu) send_ipi((cpu), II_SLEEP) +#define send_wakeup_interrupt(cpu) send_ipi((cpu), II_WAKE) + +/* + * Where secondaries begin a life of C. + */ +void smp_callin(void) +{ + int cpuid = smp_processor_id(); + + local_irq_disable(); + + if (cpu_online(cpuid)) { + pr_err("??, cpu 0x%x already present??\n", cpuid); + BUG(); + } + set_cpu_online(cpuid, true); + + /* clear ksp, usp */ + wrksp(0); + wrusp(0); + + /* Set trap vectors. */ + trap_init(); + + /* Set interrupt vector. */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + wrent(entInt, 0); + + /* Get our local ticker going. */ + sw64_setup_timer(); + + /* All kernel threads share the same mm context. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + /* update csr:ptbr */ + update_ptbr_sys(virt_to_phys(init_mm.pgd)); + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + per_cpu(cpu_state, cpuid) = CPU_ONLINE; + per_cpu(hard_node_id, cpuid) = rcid_to_domain_id(cpu_to_rcid(cpuid)); + + /* Must have completely accurate bogos. */ + local_irq_enable(); + + /* Cpu0 init preempt_count at start_kernel, other smp cpus do here. */ + preempt_disable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + + +/* + * Set ready for secondary cpu. + */ +static inline void set_secondary_ready(int cpuid) +{ + smp_rcb->ready = cpuid; +} + +/* + * Convince the hmcode to have a secondary cpu begin execution. + */ +static int secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + unsigned long timeout; + /* + * Precalculate the target ksp. + */ + idle_task_pointer[cpuid] = idle; + + set_cpu_online(cpuid, false); + wmb(); + + set_secondary_ready(cpuid); + + /* Wait 10 seconds for secondary cpu. */ + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpuid)) + goto started; + udelay(10); + barrier(); + } + pr_err("SMP: Processor %d failed to start.\n", cpuid); + return -1; + +started: + store_cpu_topology(cpuid); + numa_add_cpu(cpuid); + return 0; +} + +/* + * Bring one cpu online. + */ +static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) +{ + per_cpu(cpu_state, cpuid) = CPU_UP_PREPARE; + + return secondary_cpu_start(cpuid, idle); +} + +static void __init process_nr_cpu_ids(void) +{ + int i; + + for (i = nr_cpu_ids; i < NR_CPUS; i++) { + set_cpu_possible(i, false); + set_cpu_present(i, false); + } + + nr_cpu_ids = num_possible_cpus(); +} + +void __init smp_rcb_init(void) +{ + smp_rcb = INIT_SMP_RCB; + memset(smp_rcb, 0, sizeof(struct smp_rcb_struct)); + /* Setup SMP_RCB fields that uses to activate secondary CPU */ + smp_rcb->restart_entry = __smp_callin; + smp_rcb->init_done = 0xDEADBEEFUL; + mb(); +} + +/* + * Called from setup_arch. Detect an SMP system and which processors + * are present. + */ +void __init setup_smp(void) +{ + int i = 0, num = 0; + + init_cpu_possible(cpu_none_mask); + + /* For unified kernel, NR_CPUS is the maximum possible value */ + for (; i < NR_CPUS; i++) { + if (cpu_to_rcid(i) != -1) { + set_cpu_possible(num, true); + store_cpu_data(num); + if (!cpumask_test_cpu(i, &cpu_offline)) + set_cpu_present(num, true); + num++; + } + } + + process_nr_cpu_ids(); + + pr_info("Detected %u possible CPU(s), %u CPU(s) are present\n", + nr_cpu_ids, num_present_cpus()); + + smp_rcb_init(); +} +/* + * Called by smp_init prepare the secondaries + */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu; + /* Take care of some initial bookkeeping. */ + memset(ipi_data, 0, sizeof(ipi_data)); + + init_cpu_topology(); + store_cpu_topology(smp_processor_id()); + numa_add_cpu(smp_processor_id()); + + for_each_possible_cpu(cpu) { + numa_store_cpu_info(cpu); + } + + /* Nothing to do on a UP box, or when told not to. */ + if (nr_cpu_ids == 1 || max_cpus == 0) { + init_cpu_possible(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + pr_info("SMP mode deactivated.\n"); + return; + } + + pr_info("SMP starting up secondaries.\n"); +} + +void smp_prepare_boot_cpu(void) +{ + int me = smp_processor_id(); + + per_cpu(cpu_state, me) = CPU_ONLINE; +} + +int vt_cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + pr_info("%s: cpu = %d\n", __func__, cpu); + + wmb(); + smp_rcb->ready = 0; + if (smp_booted) { + /* irq must be disabled before reset vCPU */ + reset_cpu(cpu); + } + smp_boot_one_cpu(cpu, tidle); + + return cpu_online(cpu) ? 0 : -EIO; +} + +#ifdef CONFIG_SUBARCH_C3B +DECLARE_STATIC_KEY_FALSE(use_tc_as_sched_clock); +#endif + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + if (is_in_guest()) + return vt_cpu_up(cpu, tidle); + + wmb(); + smp_rcb->ready = 0; + + /* send wake up signal */ + send_wakeup_interrupt(cpu); + /* send reset signal */ + if (smp_booted) { + if (is_in_host()) { + reset_cpu(cpu); + } else { + while (1) + cpu_relax(); + } + } + smp_boot_one_cpu(cpu, tidle); + +#ifdef CONFIG_SUBARCH_C3B + if (static_branch_likely(&use_tc_as_sched_clock)) { + if (smp_booted) { + tc_sync_clear(); + smp_call_function_single(cpu, tc_sync_ready, NULL, 0); + tc_sync_set(); + } + } +#endif + + return cpu_online(cpu) ? 0 : -EIO; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + smp_booted = 1; + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + + +static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + mb(); + for_each_cpu(i, to_whom) + set_bit(operation, &ipi_data[i].bits); + + mb(); + for_each_cpu(i, to_whom) + send_ipi(i, II_II0); +} + +static void ipi_cpu_stop(int cpu) +{ + local_irq_disable(); + set_cpu_online(cpu, false); + while (1) + wait_for_interrupt(); +} + +void handle_ipi(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[cpu].bits; + unsigned long ops; + + mb(); /* Order interrupt and bit testing. */ + while ((ops = xchg(pending_ipis, 0)) != 0) { + mb(); /* Order bit clearing and data access. */ + do { + unsigned long which; + + which = ops & -ops; + ops &= ~which; + which = __ffs(which); + + switch (which) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + break; + + case IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + + default: + pr_crit("Unknown IPI on CPU %d: %lu\n", cpu, which); + break; + } + } while (ops); + + mb(); /* Order data access and bit testing. */ + } + + cpu_data[cpu].ipi_count++; +} + +void arch_smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} +EXPORT_SYMBOL(arch_smp_send_reschedule); + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + /* Although we don't have any data to pass, we do want to + * synchronize with the other processors. + */ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +static void ipi_flush_tlb_mm(void *x) +{ + local_flush_tlb_mm((struct mm_struct *)x); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + + /* happens as a result of exit_mmap() + * Shall we clear mm->context.asid[] here? + */ + if (atomic_read(&mm->mm_users) == 0) + return; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_mm(mm); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +struct flush_tlb_info { + struct vm_area_struct *vma; + unsigned long addr; +#define start addr + unsigned long end; +}; + +static void ipi_flush_tlb_page(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_page(info->vma, info->addr); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + struct flush_tlb_info info = { + .vma = vma, + .addr = addr, + }; + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_page, &info, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_page(vma, addr); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +/* It always flush the whole user tlb by now. To be optimized. */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); + +static void ipi_flush_tlb_kernel_range(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_kernel_range(info->start, info->end); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_info info = { + .start = start, + .end = end, + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &info, 1); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + clear_tasks_mm_cpumask(cpu); + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* We don't do anything here: idle task is faking death itself. */ + unsigned int i; + + for (i = 0; i < 10; i++) { + /* They ack this in play_dead by setting CPU_DEAD */ + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + smp_rcb->ready = 0; + return; + } + msleep(100); + } + pr_err("CPU %u didn't die...\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + mb(); + __this_cpu_write(cpu_state, CPU_DEAD); + fixup_irqs(); + local_irq_disable(); + + if (is_in_guest()) { + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + hcall(HCALL_STOP, 0, 0, 0); + } else { + wrtimer(0); + } + +#ifdef CONFIG_SUSPEND + sleepen(); + send_sleep_interrupt(smp_processor_id()); + while (1) + asm("nop"); +#else + asm volatile("memb"); + asm volatile("halt"); +#endif +} +#endif diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c new file mode 100644 index 0000000000000000000000000000000000000000..ff00506d5b824727161449fa8c5f3602574c1e6e --- /dev/null +++ b/arch/sw_64/kernel/stacktrace.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stack trace management functions + * + * Copyright (C) 2018 snyh + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * sw_64 PCS assigns the frame pointer to r15. + * + * A simple function prologue looks like this: + * ldi sp,-xx(sp) + * stl ra,0(sp) + * stl fp,8(sp) + * mov sp,fp + * + * A simple function epilogue looks like this: + * mov fp,sp + * ldl ra,0(sp) + * ldl fp,8(sp) + * ldi sp,+xx(sp) + */ + +#ifdef CONFIG_FRAME_POINTER + +int unwind_frame(struct task_struct *tsk, struct stackframe *frame) +{ + unsigned long fp = frame->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!tsk) + tsk = current; + + if (!on_accessible_stack(tsk, fp, NULL)) + return -EINVAL; + + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + + /* + * Frames created upon entry from user have NULL FP and PC values, so + * don't bother reporting these. Frames created by __noreturn functions + * might have a valid FP even if PC is bogus, so only terminate where + * both are NULL. + */ + if (!frame->fp && !frame->pc) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(unwind_frame); + +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long pc, fp; + + struct stackframe frame; + + if (regs) { + unsigned long offset; + + pc = regs->pc; + fp = regs->regs[15]; + if (kallsyms_lookup_size_offset(pc, NULL, &offset) + && offset < 16) { + /* call stack has not been setup + * store pc first then loop from ra + */ + if (fn(pc, data)) + return; + pc = regs->regs[26]; + } + } else if (tsk == current || tsk == NULL) { + fp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)walk_stackframe; + } else { + fp = tsk->thread.s[6]; + pc = tsk->thread.ra; + } + + if (!__kernel_text_address(pc) || fn(pc, data)) + return; + + frame.pc = pc; + frame.fp = fp; + while (1) { + int ret; + + ret = unwind_frame(tsk, &frame); + if (ret < 0) + break; + + if (fn(frame.pc, data)) + break; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#else /* !CONFIG_FRAME_POINTER */ +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long *ksp; + unsigned long sp, pc; + + if (regs) { + sp = (unsigned long)(regs+1); + pc = regs->pc; + } else if (tsk == current || tsk == NULL) { + register unsigned long current_sp __asm__ ("$30"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + sp = tsk->thread.sp; + pc = tsk->thread.ra; + } + + ksp = (unsigned long *)sp; + + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && fn(pc, data)) + break; + pc = *ksp++; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#endif/* CONFIG_FRAME_POINTER */ + +static int print_address_trace(unsigned long pc, void *data) +{ + print_ip_sym((const char *)data, pc); + return 0; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_info("Trace:\n"); + walk_stackframe(task, NULL, print_address_trace, (void *)loglvl); +} + +#ifdef CONFIG_STACKTRACE +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +struct stack_trace_data { + struct stack_trace *trace; + unsigned int nosched; +}; + +int save_trace(unsigned long pc, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + + if (data->nosched && in_sched_functions(pc)) + return 0; + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = 0; + + walk_stackframe(current, regs, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +static void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = nosched; + + walk_stackframe(tsk, NULL, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current, trace, 0); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif + +static int save_pc(unsigned long pc, void *data) +{ + unsigned long *p = data; + *p = 0; + + if (!in_sched_functions(pc)) + *p = pc; + + return *p; +} + +unsigned long __get_wchan(struct task_struct *tsk) +{ + unsigned long pc; + + if (!tsk || tsk == current || task_is_running(tsk)) + return 0; + walk_stackframe(tsk, NULL, save_pc, &pc); + + return pc; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + return 0; +} +#endif diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c new file mode 100644 index 0000000000000000000000000000000000000000..27a240e6614955835f7abe8c21558b956898da43 --- /dev/null +++ b/arch/sw_64/kernel/suspend.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include + +struct processor_state suspend_state; + +static int native_suspend_state_valid(suspend_state_t pm_state) +{ + switch (pm_state) { + case PM_SUSPEND_ON: + case PM_SUSPEND_STANDBY: + case PM_SUSPEND_MEM: + return 1; + default: + return 0; + } +} + +void disable_local_timer(void) +{ + wrtimer(0); +} + +extern struct pci_controller *hose_head; + +/* + * Boot Core will enter suspend stat here. + */ +void sw64_suspend_enter(void) +{ + /* boot processor will go to deep sleep mode from here + * After wake up boot processor, pc will go here + */ + disable_local_timer(); + current_thread_info()->pcb.tp = rtid(); + + sw64_suspend_deep_sleep(&suspend_state); + wrtp(current_thread_info()->pcb.tp); + + disable_local_timer(); +} + +static int native_suspend_enter(suspend_state_t state) +{ + if (is_in_guest()) + return 0; + /* processor specific suspend */ + sw64_suspend_enter(); + return 0; +} + +const struct platform_suspend_ops native_suspend_ops = { + .valid = native_suspend_state_valid, + .enter = native_suspend_enter, +}; diff --git a/arch/sw_64/kernel/suspend_asm.S b/arch/sw_64/kernel/suspend_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..34ee349515a7c1278f24bc9c64dc3e8a6e864137 --- /dev/null +++ b/arch/sw_64/kernel/suspend_asm.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(sw64_suspend_deep_sleep) + /* a0 $16 will be the address of suspend_state */ + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) + + /* save the address of suspend_state to $18 */ + mov $16, $18 + + /* + * Now will Go to Deep Sleep + * HMcode should save pc, gp, ps, r16, r17, r18 + */ + + sys_call HMC_sleepen + sys_call HMC_whami + bis $0, $0, $16 + ldi $17, 0x2($31) + sys_call HMC_sendii + + /* wait for a while to receive interrupt */ + ldi $16, 0x1($31) + sll $16, 24, $16 +$subloop: + subl $16, 1, $16 + bis $16, $16, $16 + bis $16, $16, $16 + bne $16, $subloop + + + ldl $8, PSTATE_KTP($18) + ldi $1, PSTATE_REGS($18) + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($18) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $suspend_setfpec_0 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_1 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_2 + setfpec3 + br $suspend_setfpec_over +$suspend_setfpec_0: + setfpec0 + br $suspend_setfpec_over +$suspend_setfpec_1: + setfpec1 + br $suspend_setfpec_over +$suspend_setfpec_2: + setfpec2 +$suspend_setfpec_over: + ldi $1, PSTATE_FPREGS($18) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + ret +END(sw64_suspend_deep_sleep) diff --git a/arch/sw_64/kernel/sys_sw64.c b/arch/sw_64/kernel/sys_sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..d0198aef554d78a60dd4cb67acc78324a49d1b33 --- /dev/null +++ b/arch/sw_64/kernel/sys_sw64.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +SYSCALL_DEFINE5(getsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + unsigned long w; + + switch (op) { + case GSI_IEEE_FP_CONTROL: + /* Return current software fp control & status bits. */ + /* Note that DU doesn't verify available space here. */ + + w = current_thread_info()->ieee_state & IEEE_SW_MASK; + w = swcr_update_status(w, rdfpcr()); + if (put_user(w, (unsigned long __user *) buffer)) + return -EFAULT; + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE5(setsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + switch (op) { + case SSI_IEEE_FP_CONTROL: { + unsigned long swcr, fpcr; + unsigned int *state; + + /* + * Sw_64 Architecture Handbook 4.7.7.3: + * To be fully IEEE compiant, we must track the current IEEE + * exception state in software, because spurious bits can be + * set in the trap shadow of a software-complete insn. + */ + + if (get_user(swcr, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + + /* Update softare trap enable bits. */ + *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); + + /* Update the real fpcr. */ + fpcr = rdfpcr() & FPCR_DYN_MASK; + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + return 0; + } + + case SSI_IEEE_RAISE_EXCEPTION: { + unsigned long exc, swcr, fpcr, fex; + unsigned int *state; + + if (get_user(exc, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + exc &= IEEE_STATUS_MASK; + + /* Update softare trap enable bits. */ + swcr = (*state & IEEE_SW_MASK) | exc; + *state |= exc; + + /* Update the real fpcr. */ + fpcr = rdfpcr(); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + /* If any exceptions set by this call, and are unmasked, + * send a signal. Old exceptions are not signaled. + */ + fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; + if (fex) { + int si_code = FPE_FLTUNK; + + if (fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + + send_sig_fault(SIGFPE, si_code, (void __user *)NULL, current); + } + return 0; + } + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE2(odd_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + * This does result in negative return values, so signal + * no error. + */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->regs[20] = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->regs[20] = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->regs[20] = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(sw64_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->regs[20] = fd[1]; + res = fd[0]; + } + return res; +} diff --git a/arch/sw_64/kernel/syscalls/Makefile b/arch/sw_64/kernel/syscalls/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..cdfe761d728288b7d22a48f481ac39af165c2280 --- /dev/null +++ b/arch/sw_64/kernel/syscalls/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ + +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_64.h +kapisyshdr-y += syscall_table.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl new file mode 100644 index 0000000000000000000000000000000000000000..fdf9e4cb03eb53e7263225aae82bc00dfb53a66c --- /dev/null +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -0,0 +1,528 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sw64 +# +# The format is: +# +# +# The is always "common" for this file +# +#0 is unused +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +#5 is unused +6 common close sys_close +#7 is unused +#8 is unused +9 common link sys_link +10 common unlink sys_unlink +#11 is unused +12 common chdir sys_chdir +13 common fchdir sys_fchdir +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown +17 common brk sys_brk +#18 is unused +19 common lseek sys_lseek +20 common getxpid sys_getxpid +#21 is unused +22 common umount2 sys_umount +23 common setuid sys_setuid +24 common getxuid sys_getxuid +#25 is unused +26 common ptrace sys_ptrace +#27 is unused +#28 is unused +#29 is unused +#30 is unused +#31 is unused +#32 is unused +33 common access sys_access +#34 is unused +#35 is unused +36 common sync sys_sync +37 common kill sys_kill +#38 is unused +39 common setpgid sys_setpgid +#40 is unused +41 common dup sys_dup +42 common pipe sys_sw64_pipe +#43 is unused +#44 is unused +45 common open sys_open +#46 is unused +47 common getxgid sys_getxgid +48 common odd_sigprocmask sys_odd_sigprocmask +#49 is unused +#50 is unused +51 common acct sys_acct +52 common sigpending sys_sigpending +#53 is unused +54 common ioctl sys_ioctl +#55 is unused +#56 is unused +57 common symlink sys_symlink +58 common readlink sys_readlink +59 common execve sys_execve +60 common umask sys_umask +61 common chroot sys_chroot +#62 is unused +63 common getpgrp sys_getpgrp +#64 is unused +#65 is unused +66 common vfork sys_vfork +67 common stat sys_newstat +68 common lstat sys_newlstat +#69 is unused +#70 is unused +71 common mmap sys_mmap +#72 is unused +73 common munmap sys_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +#77 is unused +#78 is unused +79 common getgroups sys_getgroups +80 common setgroups sys_setgroups +#81 is unused +82 common setpgrp sys_setpgid +#83 is unused +#84 is unused +#85 is unused +#86 is unused +87 common gethostname sys_gethostname +88 common sethostname sys_sethostname +#89 is unused +90 common dup2 sys_dup2 +91 common fstat sys_newfstat +92 common fcntl sys_fcntl +#93 is unused +94 common poll sys_poll +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common odd_getpriority sys_odd_getpriority +101 common send sys_send +102 common recv sys_recv +103 common sigreturn sys_sigreturn +104 common bind sys_bind +105 common setsockopt sys_setsockopt +106 common listen sys_listen +#107 is unused +#108 is unused +#109 is unused +#110 is unused +111 common sigsuspend sys_sigsuspend +#112 is unused +113 common recvmsg sys_recvmsg +114 common sendmsg sys_sendmsg +#115 is unused +#116 is unused +#117 is unused +118 common getsockopt sys_getsockopt +119 common socketcall sys_socketcall +120 common readv sys_readv +121 common writev sys_writev +#122 is unused +123 common fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 common setreuid sys_setreuid +127 common setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate +130 common ftruncate sys_ftruncate +131 common flock sys_flock +132 common setgid sys_setgid +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +#138 is unused +#139 is unused +#140 is unused +141 common getpeername sys_getpeername +#142 is unused +#143 is unused +144 common getrlimit sys_getrlimit +145 common setrlimit sys_setrlimit +#146 is unused +147 common setsid sys_setsid +148 common quotactl sys_quotactl +#149 is unused +150 common getsockname sys_getsockname +#151 is unused +#152 is unused +#153 is unused +#154 is unused +#155 is unused +156 common sigaction sys_odd_sigaction +#157 is unused +#158 is unused +#159 is unused +#160 is unused +#161 is unused +#162 is unused +#163 is unused +#164 is unused +#165 is unused +166 common setdomainname sys_setdomainname +#167 is unused +#168 is unused +#169 is unused +170 common bpf sys_bpf +171 common userfaultfd sys_userfaultfd +172 common membarrier sys_membarrier +173 common mlock2 sys_mlock2 +174 common getpid sys_getpid +175 common getppid sys_getppid +176 common getuid sys_getuid +177 common geteuid sys_geteuid +178 common getgid sys_getgid +179 common getegid sys_getegid +180 common epoll_pwait2 sys_epoll_pwait2 +181 common mount_setattr sys_mount_setattr +182 common quotactl_fd sys_quotactl_fd +183 common landlock_create_ruleset sys_landlock_create_ruleset +184 common landlock_add_rule sys_landlock_add_rule +185 common landlock_restrict_self sys_landlock_restrict_self +# 186 reserved for memfd_secret +187 common process_mrelease sys_process_mrelease +188 common futex_waitv sys_futex_waitv +189 common set_mempolicy_home_node sys_ni_syscall +190 common cachestat sys_cachestat +191 common fchmodat2 sys_fchmodat2 +#192 is unused +#193 is unused +#194 is unused +#195 is unused +#196 is unused +#197 is unused +#198 is unused +#199 is unused +200 common msgctl sys_old_msgctl +201 common msgget sys_msgget +202 common msgrcv sys_msgrcv +203 common msgsnd sys_msgsnd +204 common semctl sys_old_semctl +205 common semget sys_semget +206 common semop sys_semop +#207 is unused +208 common lchown sys_lchown +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl +211 common shmdt sys_shmdt +212 common shmget sys_shmget +#213 is unused +#214 is unused +#215 is unused +#216 is unused +217 common msync sys_msync +#218 is unused +#219 is unused +#220 is unused +#221 is unused +#222 is unused +#223 is unused +#224 is unused +#225 is unused +#226 is unused +#227 is unused +#228 is unused +229 common statfs64 sys_statfs64 +230 common fstatfs64 sys_fstatfs64 +#231 is unused +#232 is unused +233 common getpgid sys_getpgid +234 common getsid sys_getsid +235 common sigaltstack sys_sigaltstack +#236 is unused +#237 is unused +#238 is unused +#239 is unused +#240 is unused +#241 is unused +#242 is unused +#243 is unused +#244 is unused +#245 is unused +#246 is unused +#247 is unused +#248 is unused +#249 is unused +#250 is unused +#251 is unused +#252 is unused +#253 is unused +254 common sysfs sys_sysfs +#255 is unused +256 common getsysinfo sys_getsysinfo +257 common setsysinfo sys_setsysinfo +#258 is unused +#259 is unused +#260 is unused +#261 is unused +#262 is unused +#263 is unused +#264 is unused +#265 is unused +#266 is unused +#267 is unused +#268 is unused +#269 is unused +#270 is unused +271 common pidfd_send_signal sys_pidfd_send_signal +272 common io_uring_setup sys_io_uring_setup +273 common io_uring_enter sys_io_uring_enter +274 common io_uring_register sys_io_uring_register +275 common open_tree sys_open_tree +276 common move_mount sys_move_mount +277 common fsopen sys_fsopen +278 common fsconfig sys_fsconfig +279 common fsmount sys_fsmount +280 common fspick sys_fspick +281 common pidfd_open sys_pidfd_open +282 common clone3 sys_clone3 +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +#291 is unused +#292 is unused +#293 is unused +#294 is unused +#295 is unused +#296 is unused +#297 is unused +298 common getpriority sys_getpriority +299 common sigprocmask sys_sigprocmask +300 common bdflush sys_ni_syscall +#301 is unused +302 common mount sys_mount +#303 is unused +304 common swapoff sys_swapoff +305 common getdents sys_getdents +306 common create_module sys_ni_syscall +307 common init_module sys_init_module +308 common delete_module sys_delete_module +309 common get_kernel_syms sys_ni_syscall +310 common syslog sys_syslog +311 common reboot sys_reboot +312 common clone sys_clone +313 common uselib sys_uselib +314 common mlock sys_mlock +315 common munlock sys_munlock +316 common mlockall sys_mlockall +317 common munlockall sys_munlockall +318 common sysinfo sys_sysinfo +#319 is unused +#320 is unused +321 common oldumount sys_oldumount +322 common swapon sys_swapon +323 common times sys_times +324 common personality sys_personality +325 common setfsuid sys_setfsuid +326 common setfsgid sys_setfsgid +327 common ustat sys_ustat +328 common statfs sys_statfs +329 common fstatfs sys_fstatfs +330 common sched_setparam sys_sched_setparam +331 common sched_getparam sys_sched_getparam +332 common sched_setscheduler sys_sched_setscheduler +333 common sched_getscheduler sys_sched_getscheduler +334 common sched_yield sys_sched_yield +335 common sched_get_priority_max sys_sched_get_priority_max +336 common sched_get_priority_min sys_sched_get_priority_min +337 common sched_rr_get_interval sys_sched_rr_get_interval +338 common afs_syscall sys_ni_syscall +339 common uname sys_newuname +340 common nanosleep sys_nanosleep +341 common mremap sys_mremap +342 common nfsservctl sys_ni_syscall +343 common setresuid sys_setresuid +344 common getresuid sys_getresuid +345 common pciconfig_read sys_pciconfig_read +346 common pciconfig_write sys_pciconfig_write +347 common query_module sys_ni_syscall +348 common prctl sys_prctl +349 common pread64 sys_pread64 +350 common pwrite64 sys_pwrite64 +351 common rt_sigreturn sys_rt_sigreturn +352 common rt_sigaction sys_rt_sigaction +353 common rt_sigprocmask sys_rt_sigprocmask +354 common rt_sigpending sys_rt_sigpending +355 common rt_sigtimedwait sys_rt_sigtimedwait +356 common rt_sigqueueinfo sys_rt_sigqueueinfo +357 common rt_sigsuspend sys_rt_sigsuspend +358 common select sys_select +359 common gettimeofday sys_gettimeofday +360 common settimeofday sys_settimeofday +361 common getitimer sys_getitimer +362 common setitimer sys_setitimer +363 common utimes sys_utimes +364 common getrusage sys_getrusage +365 common wait4 sys_wait4 +366 common adjtimex sys_adjtimex +367 common getcwd sys_getcwd +368 common capget sys_capget +369 common capset sys_capset +370 common sendfile sys_sendfile64 +371 common setresgid sys_setresgid +372 common getresgid sys_getresgid +373 common dipc sys_ni_syscall +374 common pivot_root sys_pivot_root +375 common mincore sys_mincore +376 common pciconfig_iobase sys_pciconfig_iobase +377 common getdents64 sys_getdents64 +378 common gettid sys_gettid +379 common readahead sys_readahead +#380 is unused +381 common tkill sys_tkill +382 common setxattr sys_setxattr +383 common lsetxattr sys_lsetxattr +384 common fsetxattr sys_fsetxattr +385 common getxattr sys_getxattr +386 common lgetxattr sys_lgetxattr +387 common fgetxattr sys_fgetxattr +388 common listxattr sys_listxattr +389 common llistxattr sys_llistxattr +390 common flistxattr sys_flistxattr +391 common removexattr sys_removexattr +392 common lremovexattr sys_lremovexattr +393 common fremovexattr sys_fremovexattr +394 common futex sys_futex +395 common sched_setaffinity sys_sched_setaffinity +396 common sched_getaffinity sys_sched_getaffinity +397 common tuxcall sys_ni_syscall +398 common io_setup sys_io_setup +399 common io_destroy sys_io_destroy +400 common io_getevents sys_io_getevents +401 common io_submit sys_io_submit +402 common io_cancel sys_io_cancel +403 common io_pgetevents sys_io_pgetevents +404 common rseq sys_rseq +405 common exit_group sys_exit_group +406 common lookup_dcookie sys_lookup_dcookie +407 common epoll_create sys_epoll_create +408 common epoll_ctl sys_epoll_ctl +409 common epoll_wait sys_epoll_wait +410 common remap_file_pages sys_remap_file_pages +411 common set_tid_address sys_set_tid_address +412 common restart_syscall sys_restart_syscall +413 common fadvise64 sys_fadvise64 +414 common timer_create sys_timer_create +415 common timer_settime sys_timer_settime +416 common timer_gettime sys_timer_gettime +417 common timer_getoverrun sys_timer_getoverrun +418 common timer_delete sys_timer_delete +419 common clock_settime sys_clock_settime +420 common clock_gettime sys_clock_gettime +421 common clock_getres sys_clock_getres +422 common clock_nanosleep sys_clock_nanosleep +423 common semtimedop sys_semtimedop +424 common tgkill sys_tgkill +425 common stat64 sys_stat64 +426 common lstat64 sys_lstat64 +427 common fstat64 sys_fstat64 +428 common vserver sys_ni_syscall +429 common mbind sys_mbind +430 common get_mempolicy sys_get_mempolicy +431 common set_mempolicy sys_set_mempolicy +432 common mq_open sys_mq_open +433 common mq_unlink sys_mq_unlink +434 common mq_timedsend sys_mq_timedsend +435 common mq_timedreceive sys_mq_timedreceive +436 common mq_notify sys_mq_notify +437 common mq_getsetattr sys_mq_getsetattr +438 common waitid sys_waitid +439 common add_key sys_add_key +440 common request_key sys_request_key +441 common keyctl sys_keyctl +442 common ioprio_set sys_ioprio_set +443 common ioprio_get sys_ioprio_get +444 common inotify_init sys_inotify_init +445 common inotify_add_watch sys_inotify_add_watch +446 common inotify_rm_watch sys_inotify_rm_watch +447 common fdatasync sys_fdatasync +448 common kexec_load sys_kexec_load +449 common migrate_pages sys_migrate_pages +450 common openat sys_openat +451 common mkdirat sys_mkdirat +452 common mknodat sys_mknodat +453 common fchownat sys_fchownat +454 common futimesat sys_futimesat +455 common fstatat64 sys_fstatat64 +456 common unlinkat sys_unlinkat +457 common renameat sys_renameat +458 common linkat sys_linkat +459 common symlinkat sys_symlinkat +460 common readlinkat sys_readlinkat +461 common fchmodat sys_fchmodat +462 common faccessat sys_faccessat +463 common pselect6 sys_pselect6 +464 common ppoll sys_ppoll +465 common unshare sys_unshare +466 common set_robust_list sys_set_robust_list +467 common get_robust_list sys_get_robust_list +468 common splice sys_splice +469 common sync_file_range sys_sync_file_range +470 common tee sys_tee +471 common vmsplice sys_vmsplice +472 common move_pages sys_move_pages +473 common getcpu sys_getcpu +474 common epoll_pwait sys_epoll_pwait +475 common utimensat sys_utimensat +476 common signalfd sys_signalfd +477 common timerfd sys_ni_syscall +478 common eventfd sys_eventfd +479 common recvmmsg sys_recvmmsg +480 common fallocate sys_fallocate +481 common timerfd_create sys_timerfd_create +482 common timerfd_settime sys_timerfd_settime +483 common timerfd_gettime sys_timerfd_gettime +484 common signalfd4 sys_signalfd4 +485 common eventfd2 sys_eventfd2 +486 common epoll_create1 sys_epoll_create1 +487 common dup3 sys_dup3 +488 common pipe2 sys_pipe2 +489 common inotify_init1 sys_inotify_init1 +490 common preadv sys_preadv +491 common pwritev sys_pwritev +492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +493 common perf_event_open sys_perf_event_open +494 common fanotify_init sys_fanotify_init +495 common fanotify_mark sys_fanotify_mark +496 common prlimit64 sys_prlimit64 +497 common name_to_handle_at sys_name_to_handle_at +498 common open_by_handle_at sys_open_by_handle_at +499 common clock_adjtime sys_clock_adjtime +500 common syncfs sys_syncfs +501 common setns sys_setns +502 common accept4 sys_accept4 +503 common sendmmsg sys_sendmmsg +504 common process_vm_readv sys_process_vm_readv +505 common process_vm_writev sys_process_vm_writev +506 common kcmp sys_kcmp +507 common finit_module sys_finit_module +508 common sched_setattr sys_sched_setattr +509 common sched_getattr sys_sched_getattr +510 common renameat2 sys_renameat2 +511 common getrandom sys_getrandom +512 common memfd_create sys_memfd_create +513 common execveat sys_execveat +514 common seccomp sys_seccomp +515 common copy_file_range sys_copy_file_range +516 common preadv2 sys_preadv2 +517 common pwritev2 sys_pwritev2 +518 common statx sys_statx diff --git a/arch/sw_64/kernel/systbls.S b/arch/sw_64/kernel/systbls.S new file mode 100644 index 0000000000000000000000000000000000000000..010ca3f8e016b773a014aeb0bca648a3ef79b14d --- /dev/null +++ b/arch/sw_64/kernel/systbls.S @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/systbls.S + * + * The system call table. + */ + +#include + +#define __SYSCALL(nr, entry) .quad entry + .data + .align 3 + .globl sys_call_table +sys_call_table: +#include diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c new file mode 100644 index 0000000000000000000000000000000000000000..f2de5ac3d9dc440ca8685e33354c5e0b35919f91 --- /dev/null +++ b/arch/sw_64/kernel/tc.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + */ + + +#include +#include + +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +unsigned long time_sync; + +DEFINE_PER_CPU(u64, tc_offset); + +void tc_sync_clear(void) +{ + time_sync = 0; +} + +void tc_sync_ready(void *ignored) +{ + /* make sure we can see time_sync been set to 0 */ + smp_mb(); + while (!time_sync) + cpu_relax(); + + __this_cpu_write(tc_offset, time_sync - rdtc()); +} + +void tc_sync_set(void) +{ + time_sync = rdtc() + __this_cpu_read(tc_offset); +} diff --git a/arch/sw_64/kernel/termios.c b/arch/sw_64/kernel/termios.c new file mode 100644 index 0000000000000000000000000000000000000000..5c76a513c89683ac959bad926494ae58f2e41487 --- /dev/null +++ b/arch/sw_64/kernel/termios.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +int user_termio_to_kernel_termios(struct ktermios *a_termios, struct termio __user *u_termio) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon, ret; + + ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); + if (!ret) { + /* Overwrite only the low bits. */ + *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; + *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; + *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; + *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; + canon = k_termio.c_lflag & ICANON; + + k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; + k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; + k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; + k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; + k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; + k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; + k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; + k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; + } + return ret; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + * + * Note the "fun" _VMIN overloading. + */ +int kernel_termios_to_user_termio(struct termio __user *u_termio, struct ktermios *a_termios) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon; + + k_termio.c_iflag = k_termios->c_iflag; + k_termio.c_oflag = k_termios->c_oflag; + k_termio.c_cflag = k_termios->c_cflag; + canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; + + k_termio.c_line = k_termios->c_line; + k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; + k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; + k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; + k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; + k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; + k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; + k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; + k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; + + return copy_to_user(u_termio, &k_termio, sizeof(k_termio)); +} diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c new file mode 100644 index 0000000000000000000000000000000000000000..533a6a14c20051da05469ed5bd1bf0d824c8c85d --- /dev/null +++ b/arch/sw_64/kernel/time.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +#include "proto.h" + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +#define TICK_SIZE (tick_nsec / 1000) + +/* + * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting + * by 48 gives us 16 bits for HZ while keeping the accuracy good even + * for large CPU clock rates. + */ +#define FIX_SHIFT 48 + +unsigned long est_cycle_freq; + +#ifdef CONFIG_IRQ_WORK + +DEFINE_PER_CPU(u8, irq_work_pending); + +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) + +void arch_irq_work_raise(void) +{ + set_irq_work_pending_flag(); +} + +#else /* CONFIG_IRQ_WORK */ + +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() + +#endif /* CONFIG_IRQ_WORK */ + +void __init +time_init(void) +{ + unsigned long cycle_freq; + + cycle_freq = get_cpu_freq(); + + pr_info("CPU Cycle frequency = %ld Hz\n", cycle_freq); + + /* Register clocksource */ + sw64_setup_clocksource(); + of_clk_init(NULL); + /* Startup the timer source. */ + sw64_setup_timer(); + /* Calibrate the delay loop directly */ + lpj_fine = cycle_freq / HZ; +} diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c new file mode 100644 index 0000000000000000000000000000000000000000..8371c013446fc222e2a20ae8aab79c7d252dd56c --- /dev/null +++ b/arch/sw_64/kernel/topology.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +static int __init parse_dt_topology(void) +{ + return 0; +} + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +int topo_nr_threads, topo_nr_cores, topo_nr_maxcpus; + +static int topo_nr_cpus; +static int topo_threads[NR_CPUS]; +static int topo_cores[NR_CPUS]; +static int topo_packages[NR_CPUS]; + +void __init get_vt_smp_info(void) +{ + unsigned long smp_info; + + smp_info = sw64_io_read(0, SMP_INFO); + if (smp_info == -1UL) + smp_info = 0; + topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; + topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; + topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; +} + +static void __init init_topo_threads(void) +{ + int i, j; + + if (topo_nr_threads == 0) + topo_nr_threads = 1; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_threads) { + for (j = 0; j < topo_nr_threads; j++) + topo_threads[i+j] = j; + } +} + +static void __init init_topo_cores(void) +{ + int i, j; + + if (topo_nr_cores == 0) + topo_nr_cores = topo_nr_cpus; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_cores) { + for (j = 0; j < topo_nr_cores; j++) + topo_cores[i+j] = j; + } +} + +static void __init init_topo_packages(void) +{ + int i, j, packet_index = 0; + int topo_nr_packages = topo_nr_cpus / (topo_nr_cores * topo_nr_threads); + int div_package = topo_nr_cpus / topo_nr_packages; + + for (i = 0; i < topo_nr_cpus; i += div_package) { + for (j = 0 ; j < div_package; j++) + topo_packages[i+j] = packet_index; + packet_index++; + } + if (packet_index > topo_nr_packages) + pr_err("topo_cores init failed.\n"); +} + +static void __init init_topology_array(void) +{ + topo_nr_cpus = num_present_cpus(); + if (topo_nr_maxcpus > topo_nr_cpus) + topo_nr_cpus = topo_nr_maxcpus; + init_topo_threads(); + init_topo_cores(); + init_topo_packages(); +} + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return topology_llc_cpumask(cpu); +} + +static void update_siblings_masks(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + int sib; + + /* update core and thread sibling masks */ + for_each_online_cpu(sib) { + struct cpu_topology *sib_topo = &cpu_topology[sib]; + + if (cpu_topo->package_id == sib_topo->package_id) { + cpumask_set_cpu(cpu, &sib_topo->core_sibling); + cpumask_set_cpu(sib, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &sib_topo->llc_sibling); + cpumask_set_cpu(sib, &cpu_topo->llc_sibling); + + if (cpu_topo->core_id == sib_topo->core_id) { + cpumask_set_cpu(cpu, &sib_topo->thread_sibling); + cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + } + } + } +} + +void store_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + if (cpu_topo->package_id != -1) + goto topology_populated; + + if (is_guest_or_emul()) { + cpu_topo->package_id = topo_packages[cpu]; + cpu_topo->core_id = topo_cores[cpu]; + cpu_topo->thread_id = topo_threads[cpu]; + goto topology_populated; + } + + cpu_topo->package_id = rcid_to_domain_id(cpu_to_rcid(cpu)); + cpu_topo->core_id = rcid_to_core_id(cpu_to_rcid(cpu)); + cpu_topo->thread_id = rcid_to_thread_id(cpu_to_rcid(cpu)); + + pr_debug("CPU%u: socket %d core %d thread %d\n", + cpu, cpu_topo->package_id, cpu_topo->core_id, + cpu_topo->thread_id); + +topology_populated: + update_siblings_masks(cpu); +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +static void __init reset_cpu_topology(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = 0; + cpu_topo->package_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +#ifdef CONFIG_ACPI +static int __init parse_acpi_topology(void) +{ + return 0; +} +#else +static inline int __init parse_acpi_topology(void) +{ + return -EINVAL; +} +#endif + +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + if (is_guest_or_emul()) + init_topology_array(); + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (!acpi_disabled && parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c new file mode 100644 index 0000000000000000000000000000000000000000..a30e18ad1f00e5878264653da04db8cde14a845c --- /dev/null +++ b/arch/sw_64/kernel/traps.c @@ -0,0 +1,1542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/traps.c + * + * (C) Copyright 1994 Linus Torvalds + */ + +/* + * This file initializes the trap entry points + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +enum SW64_IF_TYPES { + IF_BREAKPOINT = 0, + IF_RESERVED, + IF_GENTRAP, + IF_FEN, + IF_OPDEC, + IF_SIMDEMU, +}; + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + + printk(KERN_DEFAULT "pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", + regs->pc, regs->regs[26], regs->ps, print_tainted()); + printk(KERN_DEFAULT "pc is at %pSR\n", (void *)regs->pc); + printk(KERN_DEFAULT "ra is at %pSR\n", (void *)regs->regs[26]); + printk(KERN_DEFAULT "v0 = %016lx t0 = %016lx t1 = %016lx\n", + regs->regs[0], regs->regs[1], regs->regs[2]); + printk(KERN_DEFAULT "t2 = %016lx t3 = %016lx t4 = %016lx\n", + regs->regs[3], regs->regs[4], regs->regs[5]); + printk(KERN_DEFAULT "t5 = %016lx t6 = %016lx t7 = %016lx\n", + regs->regs[6], regs->regs[7], regs->regs[8]); + + printk(KERN_DEFAULT "s0 = %016lx s1 = %016lx s2 = %016lx\n", + regs->regs[9], regs->regs[10], regs->regs[11]); + printk(KERN_DEFAULT "s3 = %016lx s4 = %016lx s5 = %016lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + printk(KERN_DEFAULT "s6 = %016lx\n", + regs->regs[15]); + + printk(KERN_DEFAULT "a0 = %016lx a1 = %016lx a2 = %016lx\n", + regs->regs[16], regs->regs[17], regs->regs[18]); + printk(KERN_DEFAULT "a3 = %016lx a4 = %016lx a5 = %016lx\n", + regs->regs[19], regs->regs[20], regs->regs[21]); + printk(KERN_DEFAULT "t8 = %016lx t9 = %016lx t10 = %016lx\n", + regs->regs[22], regs->regs[23], regs->regs[24]); + printk(KERN_DEFAULT "t11= %016lx pv = %016lx at = %016lx\n", + regs->regs[25], regs->regs[27], regs->regs[28]); + printk(KERN_DEFAULT "gp = %016lx sp = %016lx\n", regs->regs[29], regs->regs[30]); +} + +static void show_code(unsigned int *pc) +{ + long i; + unsigned int insn; + + printk(KERN_DEFAULT "Code:"); + for (i = -6; i < 2; i++) { + if (__get_user(insn, (unsigned int __user *)pc + i)) + break; + printk(KERN_DEFAULT "%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); + } + printk(KERN_DEFAULT "\n"); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + unsigned long flags; + int ret; + + oops_enter(); + + spin_lock_irqsave(&die_lock, flags); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + + print_modules(); + show_regs(regs); + show_code((unsigned int *)regs->pc); + show_stack(current, NULL, KERN_EMERG); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irqrestore(&die_lock, flags); + oops_exit(); + + if (kexec_should_crash(current)) + crash_kexec(regs); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +#ifndef CONFIG_MATHEMU +static long dummy_emul(void) +{ + return 0; +} + +long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul_imprecise); + +long (*sw64_fp_emul)(unsigned long pc) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul); +#else +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); +long sw64_fp_emul(unsigned long pc); +#endif + +asmlinkage void +do_entArith(unsigned long summary, unsigned long write_mask, + struct pt_regs *regs) +{ + long si_code = FPE_FLTINV; + + if (summary & 1) { + /* Software-completion summary bit is set, so try to + * emulate the instruction. If the processor supports + * precise exceptions, we don't have to search. + */ + si_code = sw64_fp_emul(regs->pc - 4); + if (si_code == 0) + return; + } + + if (!user_mode(regs)) + die("Arithmetic fault", regs, 0); + + /*summary<39> means integer divide by zero in C4.*/ + if ((summary >> 39) & 1) + si_code = FPE_INTDIV; + + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc); +} + +void simd_emulate(unsigned int inst, unsigned long va) +{ + unsigned long *fp; + int instr_opc, reg; + + instr_opc = (inst >> 26) & 0x3f; + reg = (inst >> 21) & 0x1f; + fp = (unsigned long *) va; + + switch (instr_opc) { + case 0x0d: /* vldd */ + sw64_write_simd_fp_reg_d(reg, fp[0], fp[1], fp[2], fp[3]); + return; + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + return; + } +} + +/* + * BPT/GENTRAP/OPDEC make regs->pc = exc_pc + 4. debugger should + * do something necessary to handle it correctly. + */ +asmlinkage void +do_entIF(unsigned long inst_type, unsigned long va, struct pt_regs *regs) +{ + int signo, code; + unsigned int inst, type; + + type = inst_type & 0xffffffff; + inst = inst_type >> 32; + + if (type == IF_SIMDEMU) { + simd_emulate(inst, va); + return; + } + + if (!user_mode(regs) && type != IF_OPDEC) { + if (type == IF_BREAKPOINT) { + /* support kgdb */ + notify_die(0, "kgdb trap", regs, 0, 0, SIGTRAP); + return; + } + die((type == IF_RESERVED ? "Kernel Bug" : "Instruction fault"), + regs, type); + } + + switch (type) { + case IF_BREAKPOINT: /* gdb do pc-4 for sigtrap */ + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + return; + + case IF_GENTRAP: + regs->pc -= 4; + switch ((long)regs->regs[16]) { + case GEN_INTOVF: + signo = SIGFPE; + code = FPE_INTOVF; + break; + case GEN_INTDIV: + signo = SIGFPE; + code = FPE_INTDIV; + break; + case GEN_FLTOVF: + signo = SIGFPE; + code = FPE_FLTOVF; + break; + case GEN_FLTDIV: + signo = SIGFPE; + code = FPE_FLTDIV; + break; + case GEN_FLTUND: + signo = SIGFPE; + code = FPE_FLTUND; + break; + case GEN_FLTINV: + signo = SIGFPE; + code = FPE_FLTINV; + break; + case GEN_FLTINE: + signo = SIGFPE; + code = FPE_FLTRES; + break; + case GEN_ROPRAND: + signo = SIGFPE; + code = FPE_FLTUNK; + break; + + case GEN_DECOVF: + case GEN_DECDIV: + case GEN_DECINV: + case GEN_ASSERTERR: + case GEN_NULPTRERR: + case GEN_STKOVF: + case GEN_STRLENERR: + case GEN_SUBSTRERR: + case GEN_RANGERR: + case GEN_SUBRNG: + case GEN_SUBRNG1: + case GEN_SUBRNG2: + case GEN_SUBRNG3: + case GEN_SUBRNG4: + case GEN_SUBRNG5: + case GEN_SUBRNG6: + case GEN_SUBRNG7: + default: + regs->pc += 4; + signo = SIGTRAP; + code = TRAP_UNK; + break; + } + + force_sig_fault(signo, code, (void __user *)regs->pc); + return; + + case IF_FEN: + fpu_enable(); + return; + + case IF_OPDEC: + switch (inst) { +#ifdef CONFIG_KPROBES + case BREAK_KPROBE: + if (notify_die(DIE_BREAK, "kprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case BREAK_KPROBE_SS: + if (notify_die(DIE_SSTEPBP, "single_step", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; +#endif +#ifdef CONFIG_UPROBES + case UPROBE_BRK_UPROBE: + if (notify_die(DIE_UPROBE, "uprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case UPROBE_BRK_UPROBE_XOL: + if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; +#endif + } + + if (user_mode(regs)) + regs->pc -= 4; + else + die("Instruction fault", regs, type); + break; + + default: /* unexpected instruction-fault type */ + regs->pc -= 4; + break; + } + + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc); +} + +asmlinkage void +do_entUna(void *va, unsigned long opcode, unsigned long reg, + struct pt_regs *regs) +{ + long error; + unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + unsigned long pc = regs->pc - 4; + + /* + * We don't want to use the generic get/put unaligned macros as + * we want to trap exceptions. Only if we actually get an + * exception will we decide whether we should have caught it. + */ + + switch (opcode) { + case 0x21: + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x22: + __asm__ __volatile__( + "1: ldl_u %1,0(%3)\n" + "2: ldl_u %2,3(%3)\n" + " extlw %1,%3,%1\n" + " exthw %2,%3,%2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = (int)(tmp1 | tmp2); + return; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x29: /* sth */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2a: /* stw */ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1,%2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + } + + pr_warn("Bad unaligned kernel access at %016lx: %p %lx %lu\n", + pc, va, opcode, reg); + make_task_dead(SIGSEGV); + +got_exception: + /* Ok, we caught the exception, but we don't want it. Is there + * someone to pass it along to? + */ + if (fixup_exception(regs, pc)) { + pr_info("Forwarding unaligned exception at %lx (%lx)\n", + pc, regs->pc); + return; + } + + /* + * Yikes! No one to forward the exception to. + * Since the registers are in a weird format, dump them ourselves. + */ + + die("Unhandled unaligned exception", regs, error); +} + +/* + * Handle user-level unaligned fault. Handling user-level unaligned + * faults is *extremely* slow and produces nasty messages. A user + * program *should* fix unaligned faults ASAP. + * + * Notice that we have (almost) the regular kernel stack layout here, + * so finding the appropriate registers is a little more difficult + * than in the kernel case. + * + * Finally, we handle regular integer load/stores only. In + * particular, load-linked/store-conditionally and floating point + * load/stores are not supported. The former make no sense with + * unaligned faults (they are guaranteed to fail) and I don't think + * the latter will occur in any decent program. + * + * Sigh. We *do* have to handle some FP operations, because GCC will + * uses them as temporary storage for integer memory to memory copies. + * However, we need to deal with stt/ldt and sts/lds only. + */ +#define OP_INT_MASK (1L << 0x22 | 1L << 0x2a | /* ldw stw */ \ + 1L << 0x23 | 1L << 0x2b | /* ldl stl */ \ + 1L << 0x21 | 1L << 0x29 | /* ldhu sth */ \ + 1L << 0x20 | 1L << 0x28) /* ldbu stb */ + +asmlinkage void +do_entUnaUser(void __user *va, unsigned long opcode, + unsigned long reg, struct pt_regs *regs) +{ +#ifdef CONFIG_UNA_PRINT + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); +#endif + + unsigned long tmp1, tmp2, tmp3, tmp4; + unsigned long fake_reg, *reg_addr = &fake_reg; + int si_code; + long error; + unsigned long tmp, tmp5, tmp6, tmp7, tmp8, vb; + unsigned long fp[4]; + unsigned long instr, instr_op, value; + +#ifdef CONFIG_DEBUG_FS + /* + * If command name is specified, record some information + * to debugfs. + */ + if (unaligned_task[0] && !strcmp(unaligned_task, current->comm)) { + int idx; + + idx = unaligned_count % UNA_MAX_ENTRIES; + unaligned[idx].va = (unsigned long)va; + unaligned[idx].pc = regs->pc; + unaligned_count++; + } +#endif + + /* Check the UAC bits to decide what the user wants us to do + * with the unaliged access. + */ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, regs, regs->pc - 4); + +#ifdef CONFIG_UNA_PRINT + if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { + if (__ratelimit(&ratelimit)) { + pr_info("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", + current->comm, task_pid_nr(current), + regs->pc - 4, va, opcode, reg); + } + } +#endif + if ((current_thread_info()->status & TS_UAC_SIGBUS)) + goto give_sigbus; + /* Not sure why you'd want to use this, but... */ + if ((current_thread_info()->status & TS_UAC_NOFIX)) + return; + + /* Don't bother reading ds in the access check since we already + * know that this came from the user. Also rely on the fact that + * the page at TASK_SIZE is unmapped and so can't be touched anyway. + */ + if ((unsigned long)va >= TASK_SIZE) + goto give_sigsegv; + + if ((1L << opcode) & OP_INT_MASK) { + /* it's an integer load/store */ + if (reg < 31) { + reg_addr = ®s->regs[reg]; + } else { + /* zero "register" */ + fake_reg = 0; + } + } + + get_user(instr, (__u32 *)(regs->pc - 4)); + instr_op = (instr >> 26) & 0x3f; + + get_user(value, (__u64 *)va); + + switch (instr_op) { + + case 0x0c: /* vlds */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp1 = tmp1 | tmp4; + tmp2 = tmp5 | tmp3; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } + case 0x0a: /* ldse */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + tmp = tmp | (tmp << 32); + + sw64_write_simd_fp_reg_s(reg, tmp, tmp); + + return; + + case 0x0d: /* vldd */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3: ldl %3, 16(%5)\n" + "4: ldl %4, 24(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi %4, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_d(reg, tmp1, tmp2, tmp3, tmp4); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp7 = tmp1 | tmp4; //f0 + tmp8 = tmp5 | tmp3; //f1 + + vb = ((unsigned long)(va))+16; + + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(vb), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp4; // f2 + tmp2 = tmp5 | tmp3; // f3 + + sw64_write_simd_fp_reg_d(reg, tmp7, tmp8, tmp, tmp2); + return; + } + + case 0x0b: /* ldde */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + + sw64_write_simd_fp_reg_d(reg, tmp, tmp, tmp, tmp); + return; + + case 0x09: /* ldwe */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_ldwe(reg, (int)(tmp1 | tmp2)); + + return; + + case 0x0e: /* vsts */ + sw64_read_simd_fp_m_s(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va)+16; + + + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[2]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + } + switch (opcode) { + case 0x21: /* ldhu */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + case 0x26: /* flds */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg_s(reg, tmp1 | tmp2); + return; + + case 0x27: /* fldd */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg(reg, tmp1 | tmp2); + return; + + case 0x22: /* ldw */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = (int)(tmp1 | tmp2); + break; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + /* Note that the store sequences do not indicate that they change + * memory because it _should_ be affecting nothing in this context. + * (Otherwise we have other, much larger, problems.) + */ + case 0x29: /* sth with stb */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2e: /* fsts*/ + fake_reg = sw64_read_fp_reg_s(reg); + fallthrough; + + case 0x2a: /* stw with stb*/ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2f: /* fstd */ + fake_reg = sw64_read_fp_reg(reg); + fallthrough; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + default: + /* What instruction were you trying to use, exactly? */ + goto give_sigbus; + } + + return; + +give_sigsegv: + regs->pc -= 4; /* make pc point to faulting insn */ + + /* We need to replicate some of the logic in mm/fault.c, + * since we don't have access to the fault code in the + * exception handling return path. + */ + if ((unsigned long)va >= TASK_SIZE) + si_code = SEGV_ACCERR; + else { + struct mm_struct *mm = current->mm; + + down_read(&mm->mmap_lock); + if (find_vma(mm, (unsigned long)va)) + si_code = SEGV_ACCERR; + else + si_code = SEGV_MAPERR; + up_read(&mm->mmap_lock); + } + force_sig_fault(SIGSEGV, si_code, va); + return; + +give_sigbus: + regs->pc -= 4; + force_sig_fault(SIGBUS, BUS_ADRALN, va); +} + +asmlinkage void do_entSys(struct pt_regs *regs) +{ + long ret = -ENOSYS; + unsigned long nr; + unsigned long ti_flags = current_thread_info()->flags; + + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + nr = regs->regs[0]; + + if (ti_flags & _TIF_SYSCALL_WORK) { + nr = syscall_trace_enter(); + if (nr == NO_SYSCALL) + goto syscall_out; + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + } + + if (nr < __NR_syscalls) { + syscall_fn_t syscall_fn = sys_call_table[nr]; + + ret = syscall_fn(regs->regs[16], regs->regs[17], regs->regs[18], + regs->regs[19], regs->regs[20], regs->regs[21]); + } + + if ((nr != __NR_sigreturn) && (nr != __NR_rt_sigreturn)) { + if (likely((ret >= 0) || regs->orig_r0 == NO_SYSCALL)) + syscall_set_return_value(current, regs, 0, ret); + else + syscall_set_return_value(current, regs, ret, 0); + } + +syscall_out: + rseq_syscall(regs); + + if (ti_flags & _TIF_SYSCALL_WORK) + syscall_trace_leave(); +} + +void +trap_init(void) +{ + /* Tell HMcode what global pointer we want in the kernel. */ + register unsigned long gptr __asm__("$29"); + wrkgp(gptr); + + wrent(entArith, 1); + wrent(entMM, 2); + wrent(entIF, 3); + wrent(entUna, 4); + wrent(entSys, 5); +#ifdef CONFIG_EFI + if (smp_processor_id() == 0) + wrent((void *)entSuspend, 6); +#endif +} diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c new file mode 100644 index 0000000000000000000000000000000000000000..40a17fb9cbd2c7e0ac75041644e2bd46ff30a370 --- /dev/null +++ b/arch/sw_64/kernel/unaligned.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +unsigned long unaligned_count; +char unaligned_task[TASK_COMM_LEN]; +struct unaligned_stat unaligned[UNA_MAX_ENTRIES]; + +static ssize_t unaligned_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + + unaligned_count = 0; + size = min(sizeof(unaligned_task), len); + if (copy_from_user(unaligned_task, user_buf, size)) + return -EFAULT; + unaligned_task[size - 1] = '\0'; + + return len; +} + +static int unaligned_show(struct seq_file *m, void *v) +{ + int i, idx, nr; + + if (!unaligned_task[0]) { + seq_puts(m, "No task traced\n"); + return 0; + } + seq_printf(m, "Task command:\t\t%s\n", unaligned_task); + seq_printf(m, "Unaligned count:\t%ld\n", unaligned_count); + if (!unaligned_count) + return 0; + nr = 0; + idx = unaligned_count % UNA_MAX_ENTRIES; + seq_printf(m, "Latest %d unaligned stat:\nNo.\tVA\t\tPC\n", UNA_MAX_ENTRIES); + if (unaligned_count >= UNA_MAX_ENTRIES) { + for (i = idx; i < UNA_MAX_ENTRIES; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + } + for (i = 0; i < idx; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + return 0; +} + +static int unaligned_open(struct inode *inode, struct file *file) +{ + return single_open(file, unaligned_show, NULL); +} + +static const struct file_operations unaligned_fops = { + .read = seq_read, + .write = unaligned_set, + .open = unaligned_open, + .llseek = default_llseek, +}; + +static int __init unaligned_init(void) +{ + struct dentry *unaligned; + + if (!sw64_debugfs_dir) + return -ENODEV; + + unaligned = debugfs_create_file("unaligned", 0644, + sw64_debugfs_dir, NULL, + &unaligned_fops); + if (!unaligned) + return -ENOMEM; + + return 0; +} + +late_initcall(unaligned_init); diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c new file mode 100644 index 0000000000000000000000000000000000000000..928312d62cfd172f20edcc113fea897a5c054a56 --- /dev/null +++ b/arch/sw_64/kernel/uprobes.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +/** + * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. + * @mm: the probed address space. + * @arch_uprobe: the probepoint information. + * @addr: virtual address at which to install the probepoint + * Return 0 on success or a -ve number on error. + */ +int arch_uprobe_analyze_insn(struct arch_uprobe *aup, + struct mm_struct *mm, unsigned long addr) +{ + u32 inst; + + if (addr & 0x03) + return -EINVAL; + + inst = aup->insn; + + aup->ixol[0] = aup->insn; + aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ + + return 0; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + unsigned long kaddr, kstart; + + /* Initialize the slot */ + kaddr = (unsigned long)kmap_local_page(page); + kstart = kaddr + (vaddr & ~PAGE_MASK); + memcpy((void *)kstart, src, len); + flush_icache_range(kstart, kstart + len); + kunmap_local((void *)kaddr); +} + +/* + * arch_uprobe_pre_xol - prepare to execute out of line. + * @auprobe: the probepoint information. + * @regs: reflects the saved user state of current task. + */ +int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute ol */ + instruction_pointer_set(regs, utask->xol_vaddr); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute next to breakpoint address */ + instruction_pointer_set(regs, utask->vaddr + 4); + + return 0; +} + +/* + * If xol insn itself traps and generates a signal(Say, + * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped + * instruction jumps back to its own address. It is assumed that anything + * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. + * + * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, + * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to + * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). + */ +bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) +{ + return false; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + struct pt_regs *regs = args->regs; + + /* regs == NULL is a kernel bug */ + if (WARN_ON(!regs)) + return NOTIFY_DONE; + + /* We are only interested in userspace traps */ + if (!user_mode(regs)) + return NOTIFY_DONE; + + switch (val) { + case DIE_UPROBE: + if (uprobe_pre_sstep_notifier(regs)) + return NOTIFY_STOP; + break; + case DIE_UPROBE_XOL: + if (uprobe_post_sstep_notifier(regs)) + return NOTIFY_STOP; + default: + break; + } + + return 0; +} + +/* + * This function gets called when XOL instruction either gets trapped or + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. + */ +void arch_uprobe_abort_xol(struct arch_uprobe *aup, + struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + instruction_pointer_set(regs, utask->vaddr); +} + +unsigned long arch_uretprobe_hijack_return_addr( + unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->regs[26]; + + /* Replace the return address with the trampoline address */ + regs->regs[26] = trampoline_vaddr; + + return ra; +} + +/* + * See if the instruction can be emulated. + * Returns true if instruction was emulated, false otherwise. + * + * For now we always emulate so this function just returns 0. + */ +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return 0; +} + +/* + * struct xol_area and get_trampoline_vaddr() are copied from + * kernel/events/uprobes.c to avoid modifying arch-independent + * code. + */ +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + unsigned long *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + unsigned long vaddr; +}; + +static unsigned long get_trampoline_vaddr(void) +{ + struct xol_area *area; + unsigned long trampoline_vaddr = -1; + + area = READ_ONCE(current->mm->uprobes_state.xol_area); + if (area) + trampoline_vaddr = area->vaddr; + + return trampoline_vaddr; +} + +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) +{ + /* + * regs->pc has been changed to orig_ret_vaddr in handle_trampoline(). + */ + if (exc_pc == get_trampoline_vaddr()) + regs->regs[26] = regs->pc; +} diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c new file mode 100644 index 0000000000000000000000000000000000000000..b4126cbaa4bda220635ac284a6fe526ee02a923b --- /dev/null +++ b/arch/sw_64/kernel/vdso.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + +#include + +extern char vdso_start, vdso_end; +static unsigned long vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +static struct vm_special_mapping vdso_spec[2]; + +static int __init vdso_init(void) +{ + int i; + + if (memcmp(&vdso_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; + } + + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); + + /* Allocate the vDSO pagelist, plus a page for the data. */ + vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + + /* Grab the vDSO code pages. */ + for (i = 0; i < vdso_pages; i++) + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = &vdso_pagelist[1], + }; + + return 0; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; + + vdso_text_len = vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + + if (down_write_killable(&mm->mmap_lock)) + return -EINTR; + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto up_fail; + } + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[0]); + if (IS_ERR(ret)) + goto up_fail; + + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[1]); + if (IS_ERR(ret)) + goto up_fail; + + up_write(&mm->mmap_lock); + return 0; + +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_lock); + return PTR_ERR(ret); +} + +void update_vsyscall(struct timekeeper *tk) +{ + vdso_data_write_begin(vdso_data); + + vdso_data->xtime_sec = tk->xtime_sec; + vdso_data->xtime_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; + vdso_data->cs_shift = tk->tkr_mono.shift; + + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->cs_mask = tk->tkr_mono.mask; + + vdso_data_write_end(vdso_data); +} + +void update_vsyscall_tz(void) +{ + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; +} diff --git a/arch/sw_64/kernel/vdso/.gitignore b/arch/sw_64/kernel/vdso/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2b6a8b0ed7ca3c7bfcf8165790cf8a2a08814ae3 --- /dev/null +++ b/arch/sw_64/kernel/vdso/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +vdso.lds +vdso.so.dbg.tmp +vdso-syms.S diff --git a/arch/sw_64/kernel/vdso/Makefile b/arch/sw_64/kernel/vdso/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..190cc345dbb909a618467d85aedc7761cbb5bd33 --- /dev/null +++ b/arch/sw_64/kernel/vdso/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +# Symbols present in the vdso +vdso-syms = rt_sigreturn gettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, v%.o, $(vdso-syms)) + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +extra-y += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# vDSO code runs in userspace and -pg doesn't help with profiling anyway. +CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vrt_sigreturn.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = -pg + +ifdef CONFIG_FEEDBACK_COLLECT +# vDSO code runs in userspace, not collecting feedback data. +CFLAGS_REMOVE_vdso.o = -ffeedback-generate +CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate +CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate +endif + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +# link rule for the .so file, .lds has to be first +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=both) + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions. +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + + +vdso_install: vdso.so diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh new file mode 100755 index 0000000000000000000000000000000000000000..e1763af8e7301a0ec8ca7e9f901c6bf438c5920d --- /dev/null +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S new file mode 100644 index 0000000000000000000000000000000000000000..edd9be27db9d5b90652553bf48196b4f0f999e3a --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/sw_64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..de1782ccb7b678c44497377f2a7985355b94a4a4 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GNU linker script for the VDSO library. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) + +SECTIONS +{ + PROVIDE(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { *(.text*) } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + local: *; + }; +} diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..0aa16e988e88efaeda40178f46583ee6647e9f59 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call %3\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime), "i"(HMC_callsys) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} + +static __always_inline int do_realtime_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + } while (vdso_data_read_retry(data, start_seq)); + + return 0; +} + + +static __always_inline int do_monotonic_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + timespec64_add_ns(ts, to_mono_nsec); + + return 0; +} + +#if defined(CONFIG_SUBARCH_C3B) +static __always_inline u64 read_longtime(void) +{ + register unsigned long __r0 __asm__("$0"); + + __asm__ __volatile__( + "sys_call %1" : "=r"(__r0) : "i" (HMC_longtime)); + + return __r0; +} +#elif defined(CONFIG_SUBARCH_C4) +static __always_inline u64 read_longtime(void) +{ + return read_csr(CSR_SHTCLOCK); +} +#endif + +static __always_inline u64 get_ns(const struct vdso_data *data) +{ + u64 cycle_now, delta, nsec; + + cycle_now = read_longtime(); + delta = (cycle_now - data->cs_cycle_last) & data->cs_mask; + + nsec = (delta * data->cs_mult) + data->xtime_nsec; + nsec >>= data->cs_shift; + + return nsec; +} + + +static __always_inline int do_realtime(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns); + + return 0; +} + +static __always_inline int do_monotonic(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns + to_mono_nsec); + + return 0; +} + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + const struct vdso_data *data = get_vdso_data(); + struct timespec64 ts; + int ret; + + ret = do_realtime(&ts, data); + if (ret) + return ret; + + if (tv) { + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; + } + + if (tz) { + tz->tz_minuteswest = data->tz_minuteswest; + tz->tz_dsttime = data->tz_dsttime; + } + + return 0; +} + +int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) +{ + const struct vdso_data *data = get_vdso_data(); + int ret; + + switch (clkid) { + case CLOCK_REALTIME_COARSE: + ret = do_realtime_coarse(ts, data); + break; + case CLOCK_MONOTONIC_COARSE: + ret = do_monotonic_coarse(ts, data); + break; + case CLOCK_REALTIME: + ret = do_realtime(ts, data); + break; + case CLOCK_MONOTONIC: + ret = do_monotonic(ts, data); + break; + default: + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); + } + + return ret; +} diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S new file mode 100644 index 0000000000000000000000000000000000000000..cdbf6501ad6457799e5557da825e59e8ad658211 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + + .text + + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS (-RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX) + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop +ENTRY(__vdso_rt_sigreturn) + mov $sp, $16 + ldi $0, __NR_rt_sigreturn + sys_call HMC_callsys +ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc diff --git a/arch/sw_64/kernel/vmlinux.lds.S b/arch/sw_64/kernel/vmlinux.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..9b81b2c7afb8d69e379bc799b9dc2a930259dbfc --- /dev/null +++ b/arch/sw_64/kernel/vmlinux.lds.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define RUNTIME_DISCARD_EXIT +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 16 + +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) +ENTRY(__start) +PHDRS { text PT_LOAD; note PT_NOTE; } +jiffies = jiffies_64; +SECTIONS +{ + . = _TEXT_START; + + __start = .; + _text = .; /* Text and read-only data */ + _stext = .; + .text : { + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } :text + _etext = .; /* End of text section */ + + RO_DATA(PAGE_SIZE) + + /* Will be freed after init */ + __init_begin = ALIGN(PAGE_SIZE); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(L1_CACHE_BYTES) + + /* + * Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page + * needed for the THREAD_SIZE aligned init_task gets freed after init + */ + . = ALIGN(THREAD_SIZE); + __init_end = .; + /* Freed after init ends here */ + + _sdata = .; /* Start of rw data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + .got : { +#ifdef CONFIG_RELOCATABLE + _got_start = .; +#endif + *(.got) +#ifdef CONFIG_RELOCATABLE + _got_end = .; +#endif + } + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + +#ifdef CONFIG_RELOCATABLE + _. = ALIGN(4); + .data.reloc : { + _relocation_start = .; + /* + * Space for relocation table + * This needs to be filled so that the + * relocs tool can overwrite the content. + * An invalid value is left at the start of the + * section to abort relocation if the table + * has not been filled in. + */ + LONG(0xFFFFFFFF); + FILL(0); + . += CONFIG_RELOCATION_TABLE_SIZE - 4; + _relocation_end = .; + } +#endif + BSS_SECTION(0, 0, 0) + _end = .; + + .mdebug 0 : { + *(.mdebug) + } + .note 0 : { + *(.note) + } + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/sw_64/kvm/Kconfig b/arch/sw_64/kvm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..b7e43d0bae510ebe8297d60f0ff35fe30d97cbea --- /dev/null +++ b/arch/sw_64/kvm/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + select PREEMPT_NOTIFIERS + select CMA + depends on NET + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI + select KVM_VFIO + select MMU_NOTIFIER + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select TUN + select GENERIC_ALLOCATOR + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + help + Support for hosting Guest kernels. + We don't support KVM with 3-level page tables yet. + + If unsure, say N. + +config KVM_MEMHOTPLUG + bool "Memory hotplug support for guest" + depends on KVM && MEMORY_HOTPLUG && SUBARCH_C3B + help + Provides memory hotplug support for SW64 guest. + + +source "drivers/vhost/Kconfig" + +endif # VIRTUALIZATION diff --git a/arch/sw_64/kvm/Makefile b/arch/sw_64/kvm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8111014c5cca82fcfbf050d751a04c7281a4c32a --- /dev/null +++ b/arch/sw_64/kvm/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += sw64.o +kvm-y += entry.o +kvm-y += emulate.o +kvm-y += mmio.o +kvm-y += kvm_timer.o +kvm-y += handle_exit.o +kvm-y += perf.o +kvm-$(CONFIG_SUBARCH_C3B) += kvm_core3.o kvm_cma.o +kvm-$(CONFIG_SUBARCH_C4) += kvm_core4.o mmu.o diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c new file mode 100644 index 0000000000000000000000000000000000000000..fc37461b97a031f3f7a26c9c208e3803975ff1d5 --- /dev/null +++ b/arch/sw_64/kvm/emulate.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) +{ + int opc, ra; + +#ifdef CONFIG_SUBARCH_C3B + opc = (insn >> 26) & 0x3f; + ra = (insn >> 21) & 0x1f; +#elif defined(CONFIG_SUBARCH_C4) + unsigned long ds_stat, exc_sum; + + ds_stat = read_csr(CSR_DS_STAT); + exc_sum = read_csr(CSR_EXC_SUM); + + opc = (ds_stat >> 4) & 0x3f; + ra = (exc_sum >> 8) & 0x1f; +#endif + + switch (opc) { + case 0x20: /* LDBU */ + run->mmio.is_write = 0; + run->mmio.len = 1; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x21: /* LDHU */ + run->mmio.is_write = 0; + run->mmio.len = 2; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x22: /* LDW */ + run->mmio.is_write = 0; + run->mmio.len = 4; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x23: /* LDL */ + case 0x24: /* LDL_U */ + run->mmio.is_write = 0; + run->mmio.len = 8; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x28: /* STB */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffUL; + run->mmio.len = 1; + break; + case 0x29: /* STH */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffUL; + run->mmio.len = 2; + break; + case 0x2a: /* STW */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffffffUL; + run->mmio.len = 4; + break; + case 0x2b: /* STL */ + case 0x2c: /* STL_U */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); + run->mmio.len = 8; + break; + default: + pr_info("Miss done opc %d\n", opc); + break; + } +} + +/* + * Virtual Interrupts. + */ +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more) +{ + unsigned int irq; + DECLARE_BITMAP(blk, SWVM_IRQS); + + bitmap_copy(blk, vcpu->arch.irqs_pending, SWVM_IRQS); + + irq = find_last_bit(blk, SWVM_IRQS); + + return irq; +} + +void clear_vcpu_irq(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vcb.vcpu_irq = 0xffffffffffffffffUL; +} + +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + vcpu->arch.vcb.vcpu_irq = irq; +} + +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more) +{ + BUG_ON(irq >= SWVM_IRQS); + + /* Otherwise we check if they have interrupts disabled. */ + if (vcpu->arch.vcb.vcpu_irq_disabled) { + clear_vcpu_irq(vcpu); + return; + } + + /* If they don't have a handler (yet?), we just ignore it */ + if (vcpu->arch.vcb.ent_int != 0) { + /* OK, mark it no longer pending and deliver it. */ + clear_bit(irq, (vcpu->arch.irqs_pending)); + /* + * set_guest_interrupt() takes the interrupt descriptor and a + * flag to say whether this interrupt pushes an error code onto + * the stack as well: virtual interrupts never do. + */ + inject_vcpu_irq(vcpu, irq); + } +} diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S new file mode 100644 index 0000000000000000000000000000000000000000..a61ecc387d260497bd414b588a8fac00d67d8bbe --- /dev/null +++ b/arch/sw_64/kvm/entry.S @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 + */ + .text +#include +#include +#include +#include + + .set noat + +/* + * r16: physical address of guest kvm_vcpu.arch.vcb + * r17: pointer to guest kvm_vcpu.arch.kvm_regs + * r18: pointer to hcall args + */ +ENTRY(__sw64_vcpu_run) + /* save host fpregs */ + rfpcr $f0 + fstd $f0, TASK_THREAD_FPCR($8) + vstd $f2, TASK_THREAD_F2($8) + vstd $f3, TASK_THREAD_F3($8) + vstd $f4, TASK_THREAD_F4($8) + vstd $f5, TASK_THREAD_F5($8) + vstd $f6, TASK_THREAD_F6($8) + vstd $f7, TASK_THREAD_F7($8) + vstd $f8, TASK_THREAD_F8($8) + vstd $f9, TASK_THREAD_F9($8) + + ldi sp, -VCPU_RET_SIZE(sp) + /* save host pt_regs to current kernel stack */ + ldi sp, -PT_REGS_SIZE(sp) + stl $9, PT_REGS_R9(sp) + stl $8, PT_REGS_R8(sp) + stl $10, PT_REGS_R10(sp) + stl $11, PT_REGS_R11(sp) + stl $12, PT_REGS_R12(sp) + stl $13, PT_REGS_R13(sp) + stl $14, PT_REGS_R14(sp) + stl $15, PT_REGS_R15(sp) + stl $26, PT_REGS_R26(sp) + + /* restore guest switch stack from guest kvm_regs struct */ + ldl $0, KVM_REGS_R0($17) + ldl $1, KVM_REGS_R1($17) + /* restore $2 later */ + ldl $3, KVM_REGS_R3($17) + ldl $4, KVM_REGS_R4($17) + ldl $5, KVM_REGS_R5($17) + ldl $6, KVM_REGS_R6($17) + ldl $7, KVM_REGS_R7($17) + ldl $8, KVM_REGS_R8($17) + ldl $9, KVM_REGS_R9($17) + ldl $10, KVM_REGS_R10($17) + ldl $11, KVM_REGS_R11($17) + ldl $12, KVM_REGS_R12($17) + ldl $13, KVM_REGS_R13($17) + ldl $14, KVM_REGS_R14($17) + ldl $15, KVM_REGS_R15($17) + ldl $19, KVM_REGS_R19($17) + ldl $20, KVM_REGS_R20($17) + ldl $21, KVM_REGS_R21($17) + ldl $22, KVM_REGS_R22($17) + ldl $23, KVM_REGS_R23($17) + ldl $24, KVM_REGS_R24($17) + ldl $25, KVM_REGS_R25($17) + ldl $26, KVM_REGS_R26($17) + ldl $27, KVM_REGS_R27($17) + ldl $28, KVM_REGS_R28($17) + + fldd $f0, KVM_REGS_FPCR($17) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $g_setfpec_0 + subl $2, 0x1, $2 + beq $2, $g_setfpec_1 + subl $2, 0x1, $2 + beq $2, $g_setfpec_2 + setfpec3 + br $g_setfpec_over +$g_setfpec_0: + setfpec0 + br $g_setfpec_over +$g_setfpec_1: + setfpec1 + br $g_setfpec_over +$g_setfpec_2: + setfpec2 +$g_setfpec_over: + ldl $2, KVM_REGS_R2($17) + vldd $f0, KVM_REGS_F0($17) + vldd $f1, KVM_REGS_F1($17) + vldd $f2, KVM_REGS_F2($17) + vldd $f3, KVM_REGS_F3($17) + vldd $f4, KVM_REGS_F4($17) + vldd $f5, KVM_REGS_F5($17) + vldd $f6, KVM_REGS_F6($17) + vldd $f7, KVM_REGS_F7($17) + vldd $f8, KVM_REGS_F8($17) + vldd $f9, KVM_REGS_F9($17) + vldd $f10, KVM_REGS_F10($17) + vldd $f11, KVM_REGS_F11($17) + vldd $f12, KVM_REGS_F12($17) + vldd $f13, KVM_REGS_F13($17) + vldd $f14, KVM_REGS_F14($17) + vldd $f15, KVM_REGS_F15($17) + vldd $f16, KVM_REGS_F16($17) + vldd $f17, KVM_REGS_F17($17) + vldd $f18, KVM_REGS_F18($17) + vldd $f19, KVM_REGS_F19($17) + vldd $f20, KVM_REGS_F20($17) + vldd $f21, KVM_REGS_F21($17) + vldd $f22, KVM_REGS_F22($17) + vldd $f23, KVM_REGS_F23($17) + vldd $f24, KVM_REGS_F24($17) + vldd $f25, KVM_REGS_F25($17) + vldd $f26, KVM_REGS_F26($17) + vldd $f27, KVM_REGS_F27($17) + vldd $f28, KVM_REGS_F28($17) + vldd $f29, KVM_REGS_F29($17) + vldd $f30, KVM_REGS_F30($17) + + ldi $17, KVM_REGS_PS($17) + + /* enter guest */ + /* r16 = guest vcpucb pointer */ + /* r17 = base of guest kvm_regs.ps, saved/restored by hmcode */ + + /* enter guest now */ + sys_call 0x31 + /* exit guest now */ + + ldi $17, -KVM_REGS_PS($17) /* r17: base of kvm_regs */ + + vstd $f0, KVM_REGS_F0($17) + vstd $f1, KVM_REGS_F1($17) + vstd $f2, KVM_REGS_F2($17) + vstd $f3, KVM_REGS_F3($17) + vstd $f4, KVM_REGS_F4($17) + vstd $f5, KVM_REGS_F5($17) + vstd $f6, KVM_REGS_F6($17) + vstd $f7, KVM_REGS_F7($17) + vstd $f8, KVM_REGS_F8($17) + vstd $f9, KVM_REGS_F9($17) + vstd $f10, KVM_REGS_F10($17) + vstd $f11, KVM_REGS_F11($17) + vstd $f12, KVM_REGS_F12($17) + vstd $f13, KVM_REGS_F13($17) + vstd $f14, KVM_REGS_F14($17) + vstd $f15, KVM_REGS_F15($17) + vstd $f16, KVM_REGS_F16($17) + vstd $f17, KVM_REGS_F17($17) + vstd $f18, KVM_REGS_F18($17) + vstd $f19, KVM_REGS_F19($17) + vstd $f20, KVM_REGS_F20($17) + vstd $f21, KVM_REGS_F21($17) + vstd $f22, KVM_REGS_F22($17) + vstd $f23, KVM_REGS_F23($17) + vstd $f24, KVM_REGS_F24($17) + vstd $f25, KVM_REGS_F25($17) + vstd $f26, KVM_REGS_F26($17) + vstd $f27, KVM_REGS_F27($17) + vstd $f28, KVM_REGS_F28($17) + vstd $f29, KVM_REGS_F29($17) + vstd $f30, KVM_REGS_F30($17) + + rfpcr $f0 + fstd $f0, KVM_REGS_FPCR($17) + + /* don't save r0 Hmcode have saved r0 for us */ + stl $1, KVM_REGS_R1($17) + stl $2, KVM_REGS_R2($17) + stl $3, KVM_REGS_R3($17) + stl $4, KVM_REGS_R4($17) + stl $5, KVM_REGS_R5($17) + stl $6, KVM_REGS_R6($17) + stl $7, KVM_REGS_R7($17) + stl $8, KVM_REGS_R8($17) + stl $9, KVM_REGS_R9($17) + stl $10, KVM_REGS_R10($17) + stl $11, KVM_REGS_R11($17) + stl $12, KVM_REGS_R12($17) + stl $13, KVM_REGS_R13($17) + stl $14, KVM_REGS_R14($17) + stl $15, KVM_REGS_R15($17) + stl $19, KVM_REGS_R19($17) + stl $20, KVM_REGS_R20($17) + stl $21, KVM_REGS_R21($17) + stl $22, KVM_REGS_R22($17) + stl $23, KVM_REGS_R23($17) + stl $24, KVM_REGS_R24($17) + stl $25, KVM_REGS_R25($17) + stl $26, KVM_REGS_R26($17) + stl $27, KVM_REGS_R27($17) + stl $28, KVM_REGS_R28($17) + + /* restore host regs from host sp */ + ldl $8, PT_REGS_R8(sp) + ldl $9, PT_REGS_R9(sp) + ldl $10, PT_REGS_R10(sp) + ldl $11, PT_REGS_R11(sp) + ldl $12, PT_REGS_R12(sp) + ldl $13, PT_REGS_R13(sp) + ldl $14, PT_REGS_R14(sp) + ldl $15, PT_REGS_R15(sp) + ldl $26, PT_REGS_R26(sp) + ldi sp, PT_REGS_SIZE(sp) + + /* restore host fpregs */ + fldd $f0, TASK_THREAD_FPCR($8) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $setfpec_0 + subl $2, 0x1, $2 + beq $2, $setfpec_1 + subl $2, 0x1, $2 + beq $2, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f2, TASK_THREAD_F2($8) + vldd $f3, TASK_THREAD_F3($8) + vldd $f4, TASK_THREAD_F4($8) + vldd $f5, TASK_THREAD_F5($8) + vldd $f6, TASK_THREAD_F6($8) + vldd $f7, TASK_THREAD_F7($8) + vldd $f8, TASK_THREAD_F8($8) + vldd $f9, TASK_THREAD_F9($8) + + /* if $0 > 0, handle hcall */ + bgt $0, $ret_to + + stl $26, VCPU_RET_RA(sp) + stl $0, VCPU_RET_R0(sp) + + /* Hmcode will setup in */ + /* restore $16 $17 $18, do interrupt trick */ + ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp) + ldl $16, HOST_INT_R16(sp) + ldl $17, HOST_INT_R17(sp) + ldl $18, HOST_INT_R18(sp) + ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp) + + ldi $19, -PT_REGS_SIZE(sp) + call $26, do_entInt + ldl $26, VCPU_RET_RA(sp) + ldl $0, VCPU_RET_R0(sp) +$ret_to: + /* ret($0) indicate hcall number */ + ldi sp, VCPU_RET_SIZE(sp) /* pop stack */ + ret diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c new file mode 100644 index 0000000000000000000000000000000000000000..69b97860db88885af651aeb1c45896752832c72a --- /dev/null +++ b/arch/sw_64/kvm/handle_exit.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include +#include +#include +#include + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs) +{ + gfn_t gfn __maybe_unused; + + switch (exception_index) { + case SW64_KVM_EXIT_IO: + vcpu->stat.io_exits++; + return io_mem_abort(vcpu, run, hargs); + case SW64_KVM_MIGRATION_SET_DIRTY_HM: + case SW64_KVM_MIGRATION_SET_DIRTY: + vcpu->stat.migration_set_dirty++; + gfn = hargs->arg2 >> 24; + mutex_lock(&vcpu->kvm->slots_lock); + kvm_vcpu_mark_page_dirty(vcpu, gfn); + mutex_unlock(&vcpu->kvm->slots_lock); + return 1; + case SW64_KVM_EXIT_HALT: + vcpu->stat.halt_exits++; + vcpu->arch.halted = 1; + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_SHUTDOWN: + vcpu->stat.shutdown_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; + return 0; + case SW64_KVM_EXIT_RESTART: + vcpu->stat.restart_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; + return 0; + case SW64_KVM_EXIT_STOP: + vcpu->stat.stop_exits++; + vcpu->arch.halted = 1; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_TIMER: + vcpu->stat.timer_exits++; + set_timer(vcpu, hargs->arg0); + return 1; + case SW64_KVM_EXIT_IPI: + vcpu->stat.ipi_exits++; + vcpu_send_ipi(vcpu, hargs->arg0, hargs->arg1); + return 1; + case SW64_KVM_EXIT_DEBUG: + vcpu->stat.debug_exits++; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + return 0; +#ifdef CONFIG_KVM_MEMHOTPLUG + case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu->stat.memhotplug_exits++; + vcpu_mem_hotplug(vcpu, hargs->arg0); + return 1; +#endif +#ifdef CONFIG_SUBARCH_C4 + case SW64_KVM_EXIT_APT_FAULT: + return kvm_handle_guest_abort(vcpu, run); +#endif + case SW64_KVM_EXIT_FATAL_ERROR: + vcpu->stat.fatal_error_exits++; + pr_err("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = hargs->arg0; + return 0; + } + + return 1; +} diff --git a/arch/sw_64/kvm/irq.h b/arch/sw_64/kvm/irq.h new file mode 100644 index 0000000000000000000000000000000000000000..9268ab6af4920818566e2d39e056be795429d20d --- /dev/null +++ b/arch/sw_64/kvm/irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * irq.h: in kernel interrupt controller related definitions + */ + +#ifndef _SW64_KVM_IRQ_H +#define _SW64_KVM_IRQ_H +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return 1; +} +#endif /* _SW64_KVM_IRQ_H */ diff --git a/arch/sw_64/kvm/kvm_cma.c b/arch/sw_64/kvm/kvm_cma.c new file mode 100644 index 0000000000000000000000000000000000000000..de04eb5d20d746ee436ec40b449dc7385722443f --- /dev/null +++ b/arch/sw_64/kvm/kvm_cma.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Contiguous Memory Allocator for KVM + * + * This program is modified on the basis of CMA, to achieve cross-node + * memory reservation, as well as reserved memory information statistics. + */ + +#define pr_fmt(fmt) "kvm_cma: " fmt + +#include +#include +#include +#include +#include +#include + +#include "../../../mm/cma.h" +#include "../../../mm/internal.h" + +struct cma kvm_cma_areas[MAX_CMA_AREAS]; +unsigned int kvm_cma_area_count; + +static void __init init_kvm_cma_reserved_pageblock(struct page *page) +{ + unsigned int i = pageblock_nr_pages; + struct page *p = page; + + do { + __ClearPageReserved(p); + set_page_count(p, 0); + } while (++p, --i); + + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + + adjust_managed_page_count(page, pageblock_nr_pages); +} + +static int __init kvm_cma_activate_area(struct cma *cma) +{ + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned int i = cma->count >> pageblock_order; + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) { + cma->count = 0; + return -ENOMEM; + } + + WARN_ON_ONCE(!pfn_valid(pfn)); + + do { + unsigned int j; + + base_pfn = pfn; + + for (j = pageblock_nr_pages; j; --j, pfn++) + WARN_ON_ONCE(!pfn_valid(pfn)); + + init_kvm_cma_reserved_pageblock(pfn_to_page(base_pfn)); + } while (--i); + + spin_lock_init(&cma->lock); + + return 0; +} + +static int __init kvm_cma_init_reserved_areas(void) +{ + int i; + + for (i = 0; i < kvm_cma_area_count; i++) { + int ret = kvm_cma_activate_area(&kvm_cma_areas[i]); + + if (ret) + return ret; + } + + return 0; +} +core_initcall(kvm_cma_init_reserved_areas); + +/** + * kvm_cma_init_reserved_mem() - create custom contiguous area + * from reserved memory + * @base: Base address of the reserved area + * @size: Size of the reserved area (in bytes), + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. If this parameter is NULL, the name of + * the area will be set to "cmaN", where N is a running counter of + * used areas. + * @res_cma: Pointer to store the created cma region. + * + * This function creates custom contiguous area from already reserved memory. + */ +int __init kvm_cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name, + struct cma **res_cma) +{ + struct cma *cma; + phys_addr_t alignment; + + /* Sanity checks */ + if (kvm_cma_area_count == ARRAY_SIZE(kvm_cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* ensure minimal alignment required by mm core */ + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); + + /* alignment should be aligned with order_per_bit */ + if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + return -EINVAL; + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &kvm_cma_areas[kvm_cma_area_count]; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + kvm_cma_area_count++; + totalcma_pages += (size / PAGE_SIZE); + + return 0; +} + +/** + * kvm_cma_declare_contiguous() - reserve contiguous area for VM + * @base: Base address of the reserved area optional, + * @size: Size of the reserved area (in bytes), + * @limit: End address of the reserved memory (optional, 0 for any). + * @alignment: Alignment for the CMA area, should be power of 2 or zero + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. See function cma_init_reserved_mem() + * @res_cma: Pointer to store the created cma region. + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas. + */ +int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma) +{ + phys_addr_t memblock_end = memblock_end_of_DRAM(); + phys_addr_t highmem_start; + int ret = 0; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + highmem_start = __pa(high_memory - 1) + 1; + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + /* + * Sanitise input arguments. + * Pages both ends in CMA area could be merged into adjacent unmovable + * migratetype page by page allocator's buddy algorithm. In the case, + * you couldn't get a contiguous memory, which is not what we want. + */ + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); + if (base & (alignment - 1)) { + ret = -EINVAL; + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + goto err; + } + base = ALIGN(base, alignment); + size = ALIGN(size, alignment); + limit &= ~(alignment - 1); + + if (!base) { + pr_err("Base address of region must be needed!\n"); + goto err; + } + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * The request region must not cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + ret = -EINVAL; + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + goto err; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit == 0 || limit > memblock_end) + limit = memblock_end; + + if (base + size > limit) { + ret = -EINVAL; + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + goto err; + } + + /* Reserve memory */ + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + ret = -EBUSY; + goto err; + } + ret = kvm_cma_init_reserved_mem(base, size, order_per_bit, + name, res_cma); + if (ret) + goto free_mem; + + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, + &base); + return 0; + +free_mem: + memblock_free((void *)base, size); +err: + pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); + return ret; +} diff --git a/arch/sw_64/kvm/kvm_core3.c b/arch/sw_64/kvm/kvm_core3.c new file mode 100644 index 0000000000000000000000000000000000000000..f7e9150d40e0e4a88ac14bb08869302bc97b8e6e --- /dev/null +++ b/arch/sw_64/kvm/kvm_core3.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include "vmem.c" + +__read_mostly bool bind_vcpu_enabled; + +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_NUMA) +static int __init bind_vcpu_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + debugfs_create_bool("bind_vcpu", 0644, + sw64_debugfs_dir, &bind_vcpu_enabled); + return 0; +} + +static void bind_vcpu_exit(void) +{ + bind_vcpu_enabled = false; +} +#else +static int __init bind_vcpu_init(void) +{ + return 0; +} + +static void bind_vcpu_exit(void) { } + +#endif + +static unsigned long longtime_offset; + +#ifdef CONFIG_KVM_MEMHOTPLUG +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base; + + base = virt_to_phys(vcpu->kvm->arch.seg_pgd); + return base | ((vpn & VPN_MASK) << 44); +} +#else +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base, size; + + base = vcpu->kvm->arch.host_phys_addr; + size = vcpu->kvm->arch.size; + return (base >> 23) | ((size >> 23) << 16) | ((vpn & VPN_MASK) << 44); +} +#endif + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.vcb.vpcr == 0) { + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); +#ifndef CONFIG_KVM_MEMHOTPLUG + if (unlikely(bind_vcpu_enabled)) { + int nid; + unsigned long end; + + end = vcpu->kvm->arch.host_phys_addr + vcpu->kvm->arch.size; + nid = pfn_to_nid(PHYS_PFN(vcpu->kvm->arch.host_phys_addr)); + if (pfn_to_nid(PHYS_PFN(end)) == nid) + set_cpus_allowed_ptr(vcpu->arch.tsk, cpumask_of_node(nid)); + } +#endif + vcpu->arch.vcb.upcr = 0x7; + } +} + +void kvm_flush_tlb_all(void) +{ + tbia(); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = ((vcpu->arch.vcb.vpcr) & (~(VPN_MASK << 44))) | (vpn << 44); + vcpu->arch.vcb.dtb_vpcr = ((vcpu->arch.vcb.dtb_vpcr) & (~(VPN_MASK << VPN_SHIFT))) | (vpn << VPN_SHIFT); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ +#ifdef CONFIG_KVM_MEMHOTPLUG + unsigned long *seg_pgd; + + if (kvm->arch.seg_pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + seg_pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!seg_pgd) + return -ENOMEM; + + kvm->arch.seg_pgd = seg_pgd; + #endif + return 0; +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + #ifdef CONFIG_KVM_MEMHOTPLUG + void *seg_pgd = NULL; + + if (kvm->arch.seg_pgd) { + seg_pgd = READ_ONCE(kvm->arch.seg_pgd); + kvm->arch.seg_pgd = NULL; + } + + if (seg_pgd) + free_pages_exact(seg_pgd, PAGE_SIZE); + #endif + kvm_destroy_vcpus(kvm); +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +static void setup_segment_table(struct kvm *kvm, + struct kvm_memory_slot *memslot, unsigned long addr, size_t size) +{ + unsigned long *seg_pgd = kvm->arch.seg_pgd; + unsigned long num_of_entry; + unsigned long base_hpa = addr; + unsigned long i; + + num_of_entry = round_up(size, 1 << 30) >> 30; + + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } +} +#endif + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + unsigned long addr; + struct file *vm_file; + struct vm_area_struct *vma; + struct vmem_info *info; + struct kvm_userspace_memory_region new_mem; + struct kvm_userspace_memory_region *mem = &new_mem; + unsigned long ret; + size_t size; + + mem->flags = new->flags; + mem->guest_phys_addr = ((new->base_gfn) << PAGE_SHIFT); + mem->memory_size = ((new->npages) << PAGE_SHIFT); + mem->userspace_addr = new->userspace_addr; + + if (change == KVM_MR_FLAGS_ONLY || change == KVM_MR_DELETE) + return 0; + + if (test_bit(IO_MARK_BIT, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + + if (test_bit(IO_MARK_BIT + 1, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + +#ifndef CONFIG_KVM_MEMHOTPLUG + if (mem->guest_phys_addr) { + pr_info("%s, No KVM MEMHOTPLUG support!\n", __func__); + return 0; + } +#endif + if (!sw64_kvm_pool) + return -ENOMEM; + + pr_info("%s: %#llx %#llx, user addr: %#llx\n", __func__, + mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + vm_file = vma->vm_file; + + if (!vm_file) { + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + + size = round_up(mem->memory_size, 8<<20); + addr = gen_pool_alloc(sw64_kvm_pool, size); + if (!addr) + return -ENOMEM; + vm_munmap(mem->userspace_addr, mem->memory_size); + ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + + info->start = addr; + info->size = size; + vma->vm_private_data = (void *) info; + + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) + return ret; + } else { + info = vm_file->private_data; + addr = info->start; + } + + pr_info("guest phys addr = %#lx, size = %#lx\n", + addr, vma->vm_end - vma->vm_start); + + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8<<20); + + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +/* + * kvm_mark_migration write the mark on every vcpucbs of the kvm, which tells + * the system to do migration while the mark is on, and flush all vcpu's tlbs + * at the beginning of the migration. + */ +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.vcb.migration_mark = mark << 2; + + kvm_flush_remote_tlbs(kvm); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + + + /* If it's the first time dirty logging, flush all vcpu tlbs. */ + if ((change == KVM_MR_FLAGS_ONLY) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mark_migration(kvm, 1); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + unsigned long addr = vcpu->kvm->arch.host_phys_addr; + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + if (vcpu->vcpu_id == 0) + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.vcb.migration_mark) { + unsigned long result = sw64_io_read(0, LONG_TIME) + + vcpu->arch.vcb.guest_longtime_offset; + vcpu->arch.vcb.guest_longtime = result; + vcpu->arch.vcb.guest_irqs_pending = vcpu->arch.irqs_pending[0]; + } + + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + unsigned long result; + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.vcb.migration_mark) { + /* updated vpcr needed by destination vm */ + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) { + result = sw64_io_read(0, LONG_TIME); + vcpu->arch.vcb.guest_longtime_offset = vcpu->arch.vcb.guest_longtime - result; + longtime_offset = vcpu->arch.vcb.guest_longtime_offset; + } else + vcpu->arch.vcb.guest_longtime_offset = longtime_offset; + + set_timer(vcpu, 200000000); + vcpu->arch.vcb.migration_mark = 0; + } + + return 0; +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + unsigned long start_pfn = start_addr >> PAGE_SHIFT; + + kvm_for_each_memslot(slot, kvm_memslots(kvm)) { + if (start_pfn == slot->base_gfn) { + unsigned long *seg_pgd; + unsigned long num_of_entry = slot->npages >> 17; + unsigned long base_hpa = slot->arch.host_phys_addr; + unsigned long i; + + seg_pgd = kvm->arch.seg_pgd + (start_pfn >> 17); + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } + } + } +} +#endif + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, + unsigned long mask) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void update_aptp(unsigned long pgd) +{ +} + +static int __init kvm_core3_init(void) +{ + int i, ret; + + bind_vcpu_init(); + + ret = vmem_init(); + if (unlikely(ret)) + goto out; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (likely(!ret)) + return 0; + + vmem_exit(); +out: + bind_vcpu_exit(); + return ret; +} + +static void __exit kvm_core3_exit(void) +{ + kvm_exit(); + vmem_exit(); + bind_vcpu_exit(); +} + +module_init(kvm_core3_init); +module_exit(kvm_core3_exit); diff --git a/arch/sw_64/kvm/kvm_core4.c b/arch/sw_64/kvm/kvm_core4.c new file mode 100644 index 0000000000000000000000000000000000000000..08d28a365a3b0d13e64c8e703371b5c52456dcba --- /dev/null +++ b/arch/sw_64/kvm/kvm_core4.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long shtclock_offset; + +void update_aptp(unsigned long pgd) +{ + imemb(); + write_csr_imb(pgd, CSR_APTP); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = vpn << 44; + vcpu->arch.vcb.dtb_vpcr = vpn; +} + +void kvm_flush_tlb_all(void) +{ + tbivpn(-1, 0, 0); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ + return kvm_alloc_addtional_stage_pgd(kvm); +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.has_run_once) + apt_unmap_vm(vcpu->kvm); + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.migration_mark) + vcpu->arch.shtclock = read_csr(CSR_SHTCLOCK) + + vcpu->arch.vcb.shtclock_offset; + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.migration_mark) { + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) + shtclock_offset = vcpu->arch.shtclock - read_csr(CSR_SHTCLOCK); + vcpu->arch.vcb.shtclock_offset = shtclock_offset; + set_timer(vcpu, 200000000); + vcpu->arch.migration_mark = 0; + } + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ +} + +static int __init kvm_core4_init(void) +{ + int i, ret; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + return 0; +} + +static void __exit kvm_core4_exit(void) +{ + kvm_exit(); +} + +module_init(kvm_core4_init); +module_exit(kvm_core4_exit); diff --git a/arch/sw_64/kvm/kvm_timer.c b/arch/sw_64/kvm/kvm_timer.c new file mode 100644 index 0000000000000000000000000000000000000000..895be63cd8d132b316b02388764744d863c36131 --- /dev/null +++ b/arch/sw_64/kvm/kvm_timer.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + */ +#include +#include +#include +#include + +/* + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in ticks). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + */ + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta) +{ + ktime_t expires; + + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + return; + } + + /* Convert clock event device ticks to nanoseconds */ + delta = delta * NSEC_PER_SEC; + do_div(delta, vcpu->arch.vtimer_freq); + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + + expires = ktime_add_ns(ktime_get_real(), delta); + vcpu->arch.timer_next_event = expires; + hrtimer_start(&vcpu->arch.hrt, expires, HRTIMER_MODE_ABS); +} + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_bit(irq, (vcpu->arch.irqs_pending)); + + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ + kvm_vcpu_kick(vcpu); +} + +enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + ktime_t now, delta; + + vcpu = container_of(timer, struct kvm_vcpu, arch.hrt); + + now = ktime_get_real(); + + if (now < vcpu->arch.timer_next_event) { + delta = vcpu->arch.timer_next_event - now; + hrtimer_forward_now(timer, delta); + return HRTIMER_RESTART; + } + + set_interrupt(vcpu, SW64_KVM_IRQ_TIMER); + return HRTIMER_NORESTART; +} diff --git a/arch/sw_64/kvm/mmio.c b/arch/sw_64/kvm/mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..21ad89722f9ae12f2246f88625966a718058c250 --- /dev/null +++ b/arch/sw_64/kvm/mmio.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +static unsigned long mmio_read_buf(char *buf, unsigned int len) +{ + unsigned long data = 0; + union { + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + data = buf[0]; + break; + case 2: + memcpy(&tmp.hword, buf, len); + data = tmp.hword; + break; + case 4: + memcpy(&tmp.word, buf, len); + data = tmp.word; + break; + case 8: + memcpy(&tmp.dword, buf, len); + data = tmp.dword; + break; + } + + return data; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long data; + unsigned int len; + + if (!run->mmio.is_write) { + len = run->mmio.len; + if (len > sizeof(unsigned long)) + return -EINVAL; + + data = mmio_read_buf(run->mmio.data, len); + vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); + } + + vcpu->stat.mmio_exits++; + vcpu->arch.regs.pc += 4; + + return 0; +} + +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs) +{ + int ret; + +#ifdef CONFIG_SUBARCH_C3B + run->mmio.phys_addr = hargs->arg1 & 0xfffffffffffffUL; + sw64_decode(vcpu, hargs->arg2, run); +#elif defined(CONFIG_SUBARCH_C4) + run->mmio.phys_addr = read_csr(CSR_DVA) & 0xfffffffffffffUL; + sw64_decode(vcpu, 0, run); +#endif + if (run->mmio.is_write) + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + else + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + + if (!ret) { + /* We handled the access successfully in the kernel. */ + kvm_handle_mmio_return(vcpu, run); + return 1; + } + + run->exit_reason = KVM_EXIT_MMIO; + return 0; +} diff --git a/arch/sw_64/kvm/mmu.c b/arch/sw_64/kvm/mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..b0b492a4fbff22dd58d64872d6b6abb15365aede --- /dev/null +++ b/arch/sw_64/kvm/mmu.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - os kernal + * Author: lff + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define KVM_APT_FLAG_LOGGING_ACTIVE (1UL << 1) + +static bool memslot_is_logging(struct kvm_memory_slot *memslot) +{ + return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); +} + +/* + * Return values of kvm_handle_mmio_page_fault and mmu.page_fault: + * RET_AF_RETRY: let CPU fault again on the address. + * RET_AF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For kvm_handle_mmio_page_fault only: + * RET_AF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_AF_RETRY = 0, + RET_AF_EMULATE = 1, + RET_AF_INVALID = 2, +}; + +/** + * apt_dissolve_pmd() - clear and flush huge PMD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pmd: pmd pointer for IPA + * + * Function clears a PMD entry, flushes TLBs. + */ +static void apt_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) +{ + int i; + + if (!pmd_trans_huge(*pmd)) + return; + + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); +} + +/** + * apt_dissolve_pud() - clear and flush huge PUD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pud: pud pointer for IPA + * + * Function clears a PUD entry, flushes TLBs. + */ +static void apt_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) +{ + if (!pud_huge(*pudp)) + return; + + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pudp)); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + int min, int max) +{ + void *page; + + BUG_ON(max > KVM_NR_MEM_OBJS); + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < max) { + page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + BUG_ON(!mc || !mc->nobjs); + p = mc->objects[--mc->nobjs]; + return p; +} + +static void unmap_apt_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte, *start_pte; + struct page *ptr_page; + + start_pte = pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + put_page(virt_to_page(pte)); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + + ptr_page = virt_to_page(start_pte); + if (page_count(ptr_page) == 1) { + pte_t *pte_table = pte_offset_kernel(pmd, 0); + + pmd_clear(pmd); + free_page((unsigned long)pte_table); + put_page(virt_to_page(pmd)); + } +} + +static void unmap_apt_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pmd_t *pmd, *start_pmd; + struct page *ptr_page; + int i; + + start_pmd = pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); + } else { + unmap_apt_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pmd); + if (page_count(ptr_page) == 1) { + pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0UL); + + pud_clear(pud); + free_page((unsigned long)pmd_table); + put_page(virt_to_page(pud)); + } +} + +static void unmap_apt_puds(struct kvm *kvm, p4d_t *p4d, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pud_t *pud, *start_pud; + struct page *ptr_page; + + start_pud = pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + pud_clear(pud); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pud)); + } else { + unmap_apt_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pud); + if (page_count(ptr_page) == 1) { + pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); + + p4d_clear(p4d); + kvm_flush_remote_tlbs(kvm); + free_page((unsigned long)pud_table); + put_page(virt_to_page(p4d)); + } +} + +/** + * unmap_apt_range -- Clear addtional page table entries to unmap a range + * @kvm: The VM pointer + * @start: The intermediate physical base address of the range to unmap + * @size: The size of the area to unmap + * + * Clear a range of apt mappings, lowering the various ref-counts. Must + * be called while holding mmu_lock (unless for freeing the apt pgd before + * destroying the VM), otherwise another faulting VCPU may come in and mess + * with things behind our backs. + */ +static void unmap_apt_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t addr = start, end = start + size; + phys_addr_t next; + + assert_spin_locked(&kvm->mmu_lock); + WARN_ON(size & ~PAGE_MASK); + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + do { + /* + * Make sure the page table is still active, as another thread + * could have possibly freed the page table, while we released + * the lock. + */ + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (!p4d_none(*p4d)) + unmap_apt_puds(kvm, p4d, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); +} + +static void apt_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + + unmap_apt_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * apt_unmap_vm - Unmap Additional Stage RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void apt_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_lock); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + apt_unmap_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + +static pud_t *apt_get_pud(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) +{ + p4d_t *p4d; + pud_t *pud; + + pgd += pgd_index(addr); + if (pgd_none(*pgd)) { + /* Not used on SW64 yet */ + VM_BUG_ON(pgd); + return NULL; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + if (!cache) + return NULL; + pud = mmu_memory_cache_alloc(cache); + p4d_populate(NULL, p4d, pud); + get_page(virt_to_page(p4d)); + } + return pud_offset(p4d, addr); +} + +static pmd_t *apt_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, unsigned long sz) +{ + pud_t *pud; + pmd_t *pmd; + + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud || pud_huge(*pud)) + return NULL; + + if (pud_none(*pud)) { + if (!cache) + return NULL; + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + return pmd_offset(pud, addr); +} + +static bool kvm_is_write_fault(unsigned long access_type) +{ + if (access_type == AF_WRITE_ACCESS_TYPE) + return true; + + return false; +} + +static bool kvm_is_exec_fault(unsigned long access_type) +{ + if (access_type == AF_EXEC_ACCESS_TYPE) + return true; + + return false; +} +/** + * apt_wp_ptes - write protect PMD range + * @pmd: pointer to pmd entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + if (!kvm_aptpte_readonly(pte)) + kvm_set_aptpte_readonly(pte); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +/** + * apt_wp_pmds - write protect PUD range + * @pud: pointer to pud entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (!kvm_aptpmd_readonly(pmd)) + kvm_set_aptpmd_readonly(pmd); + } else { + apt_wp_ptes(pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +/** + * apt_wp_puds - write protect PGD range + * @pgd: pointer to pgd entry + * @addr: range start address + * @end: range end address + * + * Process PUD entries, for a huge PUD we cause a panic. + */ +static void apt_wp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + if (!kvm_aptpud_readonly(pud)) + kvm_set_aptpud_readonly(pud); + } else { + /* TODO:PUD not supported, revisit later if supported */ +// BUG_ON(pud_trans_huge(*pud)); + apt_wp_pmds(pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +/** + * apt_wp_range() - write protect apt memory region range + * @kvm: The KVM pointer + * @addr: Start address of range + * @end: End address of range + */ +static void apt_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t next; + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + + do { + /* + * Release kvm_mmu_lock periodically if the memory region is + * large. Otherwise, we may see kernel panics with + * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, + * CONFIG_LOCKDEP. Additionally, holding the lock too long + * will also starve other vCPUs. We have to also make sure + * that the page tables are not freed while we released + * the lock. + */ + cond_resched_lock(&kvm->mmu_lock); + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (p4d_present(*p4d)) + apt_wp_puds(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + +/** + * kvm_mmu_wp_memory_region() - write protect apt entries for memory slot + * @kvm: The KVM pointer + * @slot: The memory slot to write protect + * + * Called to start logging dirty pages after memory region + * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns + * all present PMD and PTEs are write protected in the memory region. + * Afterwards read of dirty page log can be called. + * + * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, + * serializing operations for VM memory regions. + */ +void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); + phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + apt_wp_range(kvm, start, end); + spin_unlock(&kvm->mmu_lock); + kvm_flush_remote_tlbs(kvm); // 需要通知其他vcpu进行tlb刷新,利用request机制 +} + +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.migration_mark = mark; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + if (change == KVM_MR_FLAGS_ONLY && (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + kvm_mark_migration(kvm, 1); + kvm_mmu_wp_memory_region(kvm, new->id); + } + /* If dirty logging has been stopped, do nothing for now. */ + if ((change != KVM_MR_DELETE) + && (old->flags & KVM_MEM_LOG_DIRTY_PAGES) + && (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))) { + kvm_mark_migration(kvm, 0); + return; + } +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); +// flush_apt_tlbs(kvm); + unmap_apt_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); +} + +/** + * kvm_alloc_addtional_stage_pgd - allocate level-1 table for addtional stage translation. + * @kvm: The KVM struct pointer for the VM. + * + * Allocates only the addtional stage HW PGD level table(s) (can support full + * 48-bit input addresses). Clears the allocated pages. + * + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm) +{ + pgd_t *pgd; + + if (kvm->arch.pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + /* Allocate the HW PGD, making sure that each page gets its own refcount */ + pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!pgd) + return -ENOMEM; + + kvm->arch.pgd = pgd; + return 0; +} + +/** + * kvm_free_apt_pgd - free all apt tables + * @kvm: The KVM struct pointer for the VM. + * + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. + */ +void kvm_free_apt_pgd(struct kvm *kvm) +{ + void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + unmap_apt_range(kvm, 0, KVM_PHYS_SIZE); + pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + } + spin_unlock(&kvm->mmu_lock); + + /* Free the HW pgd, one page at a time */ + if (pgd) + free_pages_exact(pgd, PAGE_SIZE); +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_free_apt_pgd(kvm); +} + +static void kvm_send_hwpoison_signal(unsigned long address, + struct vm_area_struct *vma) +{ + kernel_siginfo_t info; + + clear_siginfo(&info); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_MCEERR_AR; + info.si_addr = (void __user *)address; + + if (is_vm_hugetlb_page(vma)) + info.si_addr_lsb = huge_page_shift(hstate_vma(vma)); + else + info.si_addr_lsb = PAGE_SHIFT; + + send_sig_info(SIGBUS, &info, current); +} + +static bool fault_supports_apt_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, + unsigned long map_size) +{ + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + size_t size; + + /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ + if (map_size == PAGE_SIZE) + return true; + + size = memslot->npages * PAGE_SIZE; + + gpa_start = memslot->base_gfn << PAGE_SHIFT; + + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 + * PMD/PUD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SHIFT: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * apt_get_leaf_entry - walk the stage2 VM page tables and return + * true if a valid and present leaf-entry is found. A pointer to the + * leaf-entry is returned in the appropriate level variable - pudpp, + * pmdpp, ptepp. + */ +static bool apt_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + *pudpp = NULL; + *pmdpp = NULL; + *ptepp = NULL; + + pudp = apt_get_pud(kvm->arch.pgd, NULL, addr); + if (!pudp || pud_none(*pudp) || !pud_present(*pudp)) + return false; + + if (pud_huge(*pudp)) { + *pudpp = pudp; + return true; + } + + pmdp = pmd_offset(pudp, addr); + if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) + return false; + + if (pmd_trans_huge(*pmdp)) { + *pmdpp = pmdp; + return true; + } + + ptep = pte_offset_kernel(pmdp, addr); + if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) + return false; + + *ptepp = ptep; + return true; +} + +static bool apt_is_exec(struct kvm *kvm, phys_addr_t addr) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + bool found; + + found = apt_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); + if (!found) + return false; + + if (pudp) + return kvm_pud_exec(pudp); + else if (pmdp) + return kvm_pmd_exec(pmdp); + else + return kvm_pte_exec(ptep); +} + +static int apt_set_pte_fast(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + int inv_level = ((read_csr(CSR_AS_INFO)) >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + unsigned long inv_hpa = read_csr(CSR_AS_INFO) & AF_ENTRY_ADDR_MASK; + + VM_BUG_ON(logging_active && !cache); + + if (inv_level == 1) { + pud = (pud_t *)(inv_hpa | PAGE_OFFSET); + goto find_pud; + } else if (inv_level == 2) { + pmd = (pmd_t *)(inv_hpa | PAGE_OFFSET); + goto find_pmd; + } else if (inv_level == 3) { + pte = (pte_t *)(inv_hpa | PAGE_OFFSET); + goto find_pte; + } + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + +find_pud: + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + +find_pmd: + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + +find_pte: + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + +static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + + VM_BUG_ON(logging_active && !cache); + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + + + +static int apt_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + *cache, phys_addr_t addr, const pmd_t *new_pmd, unsigned long sz) +{ + pmd_t *pmd, old_pmd, *ori_pmd; + int i; +retry: + pmd = apt_get_pmd(kvm, cache, addr, sz); + VM_BUG_ON(!pmd); + ori_pmd = pmd; + old_pmd = *pmd; + if (pmd_present(old_pmd)) { + /* + * If we already have PTE level mapping for this block, + * we must unmap it to avoid inconsistent TLB state and + * leaking the table page. We could end up in this situation + * if the memory slot was marked for dirty logging and was + * reverted, leaving PTE level mappings for the pages accessed + * during the period. So, unmap the PTE level mapping for this + * block and retry, as we could have released the upper level + * table in the process. + * + * Normal THP split/merge follows mmu_notifier callbacks and do + * get handled accordingly. + */ + if (!pmd_trans_huge(old_pmd)) { + unmap_apt_range(kvm, addr & PMD_MASK, PMD_SIZE); + goto retry; + } + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge + * page, the individual subpages of that huge page + * should be unmapped through MMU notifiers before we + * get here. + * + * Merging of CompoundPages is not supported; they + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pmd)); + } + + /* Do we need WRITE_ONCE(pmd, new_pmd)? */ + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, ori_pmd++) + set_pmd(ori_pmd, *new_pmd); + } else + set_pmd(pmd, *new_pmd); + return 0; +} + +static int apt_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pud_t *new_pudp) +{ + pud_t *pudp, old_pud; + +retry: + pudp = apt_get_pud(kvm->arch.pgd, cache, addr); + VM_BUG_ON(!pudp); + + old_pud = *pudp; + + /* + * A large number of vcpus faulting on the same stage 2 entry, + * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). + * Skip updating the page tables if there is no change. + */ + if (pud_val(old_pud) == pud_val(*new_pudp)) + return 0; + + if (pud_present(old_pud)) { + /* + * If we already have table level mapping for this block, unmap + * the range for this block and retry. + */ + if (!pud_huge(old_pud)) { + unmap_apt_range(kvm, addr & PUD_MASK, PUD_SIZE); + goto retry; + } + +// WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pudp)); + } + + set_pud(pudp, *new_pudp); + return 0; +} + +static unsigned long +transparent_hugepage_adjust(struct kvm_memory_slot *memslot, + unsigned long hva, kvm_pfn_t *pfnp, + phys_addr_t *gpap) +{ + kvm_pfn_t pfn = *pfnp; + struct page *page = pfn_to_page(pfn); + + /* + * Make sure the adjustment is done only for THP pages. Also make + * sure that the HVA and IPA are sufficiently aligned and that the + * block map is contained within the memslot. + */ + if (!PageHuge(page) && PageTransCompoundMap(page) && + fault_supports_apt_huge_mapping(memslot, hva, PMD_SIZE)) { + /* + * The address we faulted on is backed by a transparent huge + * page. However, because we map the compound huge page and + * not the individual tail page, we need to transfer the + * refcount to the head page. We have to be careful that the + * THP doesn't start to split while we are adjusting the + * refcounts. + * + * We are sure this doesn't happen, because mmu_notifier_retry + * was successful and we are holding the mmu_lock, so if this + * THP is trying to split, it will be blocked in the mmu + * notifier before touching any of the pages, specifically + * before being able to call __split_huge_page_refcount(). + * + * We can therefore safely transfer the refcount from PG_tail + * to PG_head and switch the pfn from a tail page to the head + * page accordingly. + */ + *gpap &= PMD_MASK; + kvm_release_pfn_clean(pfn); + pfn &= ~(PTRS_PER_PMD - 1); + kvm_get_pfn(pfn); + *pfnp = pfn; + return PMD_SIZE; + } + + return PAGE_SIZE; +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_gpa, + struct kvm_memory_slot *memslot, unsigned long hva, + unsigned long fault_status) +{ + int ret; + bool write_fault, exec_fault, writable, force_pte = false; + unsigned long mmu_seq; + gfn_t gfn = fault_gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct vm_area_struct *vma; + kvm_pfn_t pfn; + pgprot_t mem_type = PAGE_READONLY; + bool logging_active = memslot_is_logging(memslot); + unsigned long vma_pagesize, flags = 0; + unsigned long as_info, access_type; + unsigned int vma_shift; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + write_fault = kvm_is_write_fault(access_type); + exec_fault = kvm_is_exec_fault(access_type); + VM_BUG_ON(write_fault && exec_fault); + + if (fault_status == AF_STATUS_FOR) { + kvm_err("Unexpected APT read permission error\n"); + return -EFAULT; + } + + /* Let's check if we will get back a huge page backed by hugetlbfs */ + down_read(¤t->mm->mmap_lock); + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_lock); + return -EFAULT; + } + + if (is_vm_hugetlb_page(vma)) + vma_shift = huge_page_shift(hstate_vma(vma)); + else + vma_shift = PAGE_SHIFT; + + vma_pagesize = 1ULL << vma_shift; + if (logging_active || (vma->vm_flags & VM_PFNMAP) || + !fault_supports_apt_huge_mapping(memslot, hva, vma_pagesize)) { + force_pte = true; + vma_pagesize = PAGE_SIZE; + } + + if (vma_pagesize == PMD_SIZE || vma_pagesize == CONT_PMD_SIZE || vma_pagesize == PUD_SIZE) + gfn = (fault_gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + up_read(¤t->mm->mmap_lock); + /* We need minimum second+third level pages */ + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); + if (ret) + return ret; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* + * Ensure the read of mmu_notifier_seq happens before we call + * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk + * the page we just got a reference to gets unmapped before we have a + * chance to grab the mmu_lock, which ensure that if the page gets + * unmapped afterwards, the call to kvm_unmap_hva will take it away + * from us again properly. This smp_rmb() interacts with the smp_wmb() + * in kvm_mmu_notifier_invalidate_. + */ + smp_rmb(); + + pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(hva, vma); + return 0; + } + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + if (logging_active) { + /* + * Faults on pages in a memslot with logging enabled + * should not be mapped with huge pages (it introduces churn + * and performance degradation), so force a pte mapping. + */ + flags |= KVM_APT_FLAG_LOGGING_ACTIVE; + + /* + * Only actually map the page as writable if this was a write + * fault. + */ + if (!write_fault) + writable = false; + } + + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) + goto out_unlock; + + /* + * If we are not forced to use page mapping, check if we are + * backed by a THP and thus use block mapping if possible. + */ + if (vma_pagesize == PAGE_SIZE && !force_pte) { + vma_pagesize = transparent_hugepage_adjust(memslot, hva, + &pfn, &fault_gpa); + } + + if (vma_pagesize == PUD_SIZE) { + pud_t new_pud = pfn_pud(pfn, mem_type); + + new_pud = pud_mkhuge(new_pud); + + if (writable) { + new_pud = kvm_pud_mkwrite(new_pud); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pud = kvm_pud_mkexec(new_pud); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pud = kvm_pud_mkexec(new_pud); + } + + ret = apt_set_pud_huge(kvm, memcache, fault_gpa, &new_pud); + } else if (vma_pagesize == CONT_PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + new_pmd = pmd_mkcont(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else if (vma_pagesize == PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else { + pte_t new_pte = pfn_pte(pfn, mem_type); + + if (writable) { + new_pte = kvm_pte_mkwrite(new_pte); + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pte = kvm_pte_mkexec(new_pte); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pte = kvm_pte_mkexec(new_pte); + } + + ret = apt_set_pte_fast(kvm, memcache, fault_gpa, &new_pte, flags); + if (!ret) + goto out_unlock; + } + +out_unlock: + spin_unlock(&kvm->mmu_lock); + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); + return ret; +} + +/** + * kvm_handle_guest_abort - handles all 2nd stage aborts + * @vcpu: the VCPU pointer + * @run: the kvm_run structure + * + * Any abort that gets to the host is almost guaranteed to be caused by a + * missing second stage translation table entry, which can mean that either the + * guest simply needs more memory and we must allocate an appropriate page or it + * can mean that the guest tried to access I/O memory, which is emulated by user + * space. The distinction is based on the IPA causing the fault and whether this + * memory region has been registered as standard RAM by user space. + */ +#ifdef CONFIG_SUBARCH_C4 +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long as_info; /* the value of CSR: AS_INFO */ + unsigned int access_type, inv_level; + unsigned int fault_status; + unsigned long fault_entry_addr; + phys_addr_t fault_gpa; + struct kvm_memory_slot *memslot; + unsigned long hva; + bool write_fault, writable; + gfn_t gfn; + + int ret, idx; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + inv_level = (as_info >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + fault_status = (as_info >> AF_FAULT_STATUS_SHIFT) & AF_FAULT_STATUS_MASK; + fault_entry_addr = (as_info & AF_ENTRY_ADDR_MASK) >> 3; + + fault_gpa = read_csr(CSR_EXC_GPA); + idx = srcu_read_lock(&vcpu->kvm->srcu); + + gfn = fault_gpa >> PAGE_SHIFT; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); + + write_fault = kvm_is_write_fault(access_type); + + /* The memory slot for IO doesn't register in memory region + * with kvm, if hva == KVM_HVA_ERR_BAD, the gpa used for MMIO + * needs emulation. + */ + + if (hva == KVM_HVA_ERR_BAD) { + ret = io_mem_abort(vcpu, run, NULL); + goto out_unlock; + } + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_gpa >= KVM_PHYS_SIZE); + + ret = user_mem_abort(vcpu, fault_gpa, memslot, hva, fault_status); + if (ret == 0) + ret = 1; +out_unlock: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; +} +#endif +static int handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, + int (*handler)(struct kvm *kvm, gpa_t gpa, u64 size, void *data), + void *data) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = 0; + + slots = kvm_memslots(kvm); + + /* we only care about the pages that the guest sees */ + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gpa; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + + gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; + ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); + } + + return ret; +} + +static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + unmap_apt_range(kvm, gpa, size); + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end, bool blockable) +{ + if (!kvm->arch.pgd) + return 0; + + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + return 1; +} + +static int apt_ptep_test_and_clear_young(pte_t *pte) +{ + if (pte_young(*pte)) { + *pte = pte_mkold(*pte); + return 1; + } + return 0; +} + +static int apt_pmdp_test_and_clear_young(pmd_t *pmd) +{ + return apt_ptep_test_and_clear_young((pte_t *)pmd); +} + +static int apt_pudp_test_and_clear_young(pud_t *pud) +{ + return apt_ptep_test_and_clear_young((pte_t *)pud); +} + +static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + if (!kvm->arch.pgd) + return 0; + + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.pgd) + return 0; + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); +} + +static int kvm_set_apte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pte_t *pte = (pte_t *)data; + + WARN_ON(size != PAGE_SIZE); + + apt_set_pte(kvm, NULL, gpa, pte, 0); + return 0; +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + unsigned long end = hva + PAGE_SIZE; + pte_t apt_pte; + + if (!kvm->arch.pgd) + return 0; + + apt_pte = pte_wrprotect(pte); + handle_hva_to_gpa(kvm, hva, end, &kvm_set_apte_handler, &apt_pte); + return 0; +} + +/** + * kvm_mmu_write_protect_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire kvm_mmu_lock. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + phys_addr_t base_gfn = slot->base_gfn + gfn_offset; + phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; + phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; + + apt_wp_range(kvm, start, end); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * dirty pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} diff --git a/arch/sw_64/kvm/perf.c b/arch/sw_64/kvm/perf.c new file mode 100644 index 0000000000000000000000000000000000000000..730dd1feeccf83e4de96ea93608b7a7752ef5ebc --- /dev/null +++ b/arch/sw_64/kvm/perf.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for KVM. + */ + +#include +#include + +#include + + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.regs.ps & 8) != 0; +} + +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.regs.pc; +} + + + +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..f6bfb2452938c0e573c222e23b7ea04a2a4e2ab6 --- /dev/null +++ b/arch/sw_64/kvm/sw64.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "irq.h" + +bool set_msi_flag; + + + +static unsigned long get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) +{ + unsigned long vpn = last_vpn(cpu); + unsigned long next = vpn + 1; + + if ((vpn & VPN_MASK) >= VPN_MASK) { + kvm_flush_tlb_all(); + next = (vpn & ~VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ + } + last_vpn(cpu) = next; + return next; +} + +int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) +{ + set_bit(number, (vcpu->arch.irqs_pending)); + kvm_vcpu_kick(vcpu); + return 0; +} + +int kvm_arch_check_processor_compat(void *opaque) +{ + return 0; +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + unsigned int vcid; + unsigned int vcpu_idx; + struct kvm_vcpu *vcpu = NULL; + int irq = e->msi.data & 0xff; + + vcid = (e->msi.address_lo & VT_MSIX_ADDR_DEST_ID_MASK) >> VT_MSIX_ADDR_DEST_ID_SHIFT; + vcpu_idx = vcid & 0x1f; + vcpu = kvm_get_vcpu(kvm, vcpu_idx); + + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq, true); +} + +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + unsigned long vpnc; + long cpu = smp_processor_id(); + + vpn = last_vpn(cpu); + vpnc = vcpu->arch.vpnc[cpu]; + + if ((vpnc ^ vpn) & ~VPN_MASK) { + /* vpnc and cpu vpn not in the same version, get new vpnc and vpn */ + vpnc = get_new_vpn_context(vcpu, cpu); + vcpu->arch.vpnc[cpu] = vpnc; + } + + vpn = vpnc & VPN_MASK; + + /* Always update vpn */ + /* Just setup vcb, hardware CSR will be changed later in HMcode */ + kvm_sw64_update_vpn(vcpu, vpn); + + /* + * If vcpu migrate to a new physical cpu, the new physical cpu may keep + * old tlb entries for this vcpu's vpn, upn in the old tlb entries and + * current vcpu's upn may not in the same version. + * For now, we don't know the vcpu's upn version and the current version. + * If we keep track of the vcpu's upn version, the TLB-flush could be less. + * To be safe and correct, flush all tlb entries of current vpn for now. + */ + + if (vcpu->arch.pcpu_id != cpu) { + tbivpn(0, 0, vpn); + vcpu->arch.pcpu_id = cpu; + vcpu->cpu = cpu; + } +} + +void check_vcpu_requests(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + long cpu = smp_processor_id(); + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + vpn = vcpu->arch.vpnc[cpu] & VPN_MASK; + tbivpn(0, 0, vpn); + } + } +} + + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return ((!bitmap_empty(vcpu->arch.irqs_pending, SWVM_IRQS) || !vcpu->arch.halted) + && !vcpu->arch.power_off); +} + +int kvm_arch_hardware_enable(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r = 0; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_SYNC_MMU: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(SW64_KVM_IRQ_TIMER, vcpu->arch.irqs_pending); +} + +int kvm_arch_hardware_setup(void *opaque) +{ + return 0; +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + + return kvm_sw64_init_vm(kvm); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + return kvm_sw64_destroy_vm(kvm); +} + +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + kvm_mmu_free_memory_caches(vcpu); + hrtimer_cancel(&vcpu->arch.hrt); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + /* Set up the timer for Guest */ + pr_info("vcpu: [%d], regs addr = %#lx, vcpucb = %#lx\n", vcpu->vcpu_id, + (unsigned long)&vcpu->arch.regs, (unsigned long)&vcpu->arch.vcb); + vcpu->arch.vtimer_freq = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + hrtimer_init(&vcpu->arch.hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); + vcpu->arch.hrt.function = clockdev_fn; + vcpu->arch.tsk = current; + + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + + return 0; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + e->msi.flags = ue->flags; + e->msi.devid = ue->u.msi.devid; + set_msi_flag = true; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; /* not implemented yet */ +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return 0; +} + + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->cpu = cpu; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* + * The arch-generic KVM code expects the cpu field of a vcpu to be -1 + * if the vcpu is no longer assigned to a cpu. This is used for the + * optimized make_all_cpus_request path. + */ + vcpu->cpu = -1; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(&(vcpu->arch.regs), regs, sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(regs, &(vcpu->arch.regs), sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return 0; +} + + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + struct vcpucb *vcb = &(vcpu->arch.vcb); + struct hcall_args hargs; + int irq, ret; + bool more; + sigset_t sigsaved; + + /* Set guest vcb */ + /* vpn will update later when vcpu is running */ + vcpu_set_numa_affinity(vcpu); +#ifdef CONFIG_PERF_EVENTS + vcpu_load(vcpu); +#endif + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (run->exit_reason == KVM_EXIT_MMIO) + kvm_handle_mmio_return(vcpu, run); + + run->exit_reason = KVM_EXIT_UNKNOWN; + ret = 1; + while (ret > 0) { + /* Check conditions before entering the guest */ + cond_resched(); + + preempt_disable(); + local_irq_disable(); + + if (signal_pending(current)) { + ret = -EINTR; + run->exit_reason = KVM_EXIT_INTR; + } + + if (ret <= 0) { + local_irq_enable(); + preempt_enable(); + continue; + } + + memset(&hargs, 0, sizeof(hargs)); + + clear_vcpu_irq(vcpu); + + if (vcpu->arch.restart == 1) { + /* handle reset vCPU */ + vcpu->arch.regs.pc = GUEST_RESET_PC; + vcpu->arch.restart = 0; + } + + irq = interrupt_pending(vcpu, &more); + if (irq < SWVM_IRQS) + try_deliver_interrupt(vcpu, irq, more); + + vcpu->arch.halted = 0; + + sw64_kvm_switch_vpn(vcpu); + check_vcpu_requests(vcpu); + guest_enter_irqoff(); + + /* update aptp before the guest runs */ + update_aptp((unsigned long)vcpu->kvm->arch.pgd); + + /* Enter the guest */ + trace_kvm_sw64_entry(vcpu->vcpu_id, vcpu->arch.regs.pc); + vcpu->mode = IN_GUEST_MODE; + + ret = __sw64_vcpu_run(__pa(vcb), &(vcpu->arch.regs), &hargs); + + /* Back from guest */ + vcpu->mode = OUTSIDE_GUEST_MODE; + + local_irq_enable(); + guest_exit_irqoff(); + + trace_kvm_sw64_exit(ret, vcpu->arch.regs.pc); + + preempt_enable(); + + /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ + ret = handle_exit(vcpu, run, ret, &hargs); + } + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + +#ifdef CONFIG_PERF_EVENTS + vcpu_put(vcpu); +#endif + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + int r; + + switch (ioctl) { + case KVM_SW64_VCPU_INIT: + r = kvm_sw64_vcpu_reset(vcpu); + break; + case KVM_SW64_GET_VCB: + r = kvm_sw64_get_vcb(filp, arg); + break; + case KVM_SW64_SET_VCB: + r = kvm_sw64_set_vcb(filp, arg); + break; + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm *kvm __maybe_unused = filp->private_data; + long r; + + switch (ioctl) { + case KVM_CREATE_IRQCHIP: { + struct kvm_irq_routing_entry routing; + + r = -EINVAL; + memset(&routing, 0, sizeof(routing)); + r = kvm_set_irq_routing(kvm, &routing, 0, 0); + break; + } + default: + r = -ENOIOCTLCMD; + } + return r; +} + +int kvm_arch_init(void *opaque) +{ + return 0; +} + +void kvm_arch_exit(void) +{ +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + /* Let implementation handle TLB/GVA invalidation */ + kvm_arch_flush_shadow_memslot(kvm, memslot); +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_IOEVENTFD: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type) +{ + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(vcpu->kvm, target_vcpuid); + + if (type == II_RESET) + target_vcpu->arch.restart = 1; + + if (target_vcpu != NULL) + vcpu_interrupt_line(target_vcpu, 1, 1); +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, + bool line_status) +{ + u32 irq = irq_level->irq; + unsigned int irq_num; + struct kvm_vcpu *vcpu = NULL; + bool level = irq_level->level; + + irq_num = irq; + /* target core for Intx is core0 */ + vcpu = kvm_get_vcpu(kvm, 0); + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq_num, level); +} + + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + + + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} diff --git a/arch/sw_64/kvm/trace.h b/arch/sw_64/kvm/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..2611df3d3fa57658881319c3384979aad2ea302c --- /dev/null +++ b/arch/sw_64/kvm/trace.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_SW64_KVM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SW64_KVM_TRACE_H + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_sw64_entry, + TP_PROTO(unsigned int vcpu_id, unsigned int vcpu_pc), + TP_ARGS(vcpu_id, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, vcpu_pc) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("VCPU %u: PC: 0x%08x", __entry->vcpu_id, __entry->vcpu_pc) +); + +/* + * Tracepoint for guest mode exit. + */ + +TRACE_EVENT(kvm_sw64_exit, + TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), + TP_ARGS(exit_reason, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, exit_reason) + __field(unsigned long, vcpu_pc) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("exit_reason: 0x%04x (%11s), PC: 0x%08lx", + __entry->exit_reason, + __print_symbolic(__entry->exit_reason, kvm_sw64_exception_type), + __entry->vcpu_pc) +); + +#endif /* _SW64_KVM_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c new file mode 100644 index 0000000000000000000000000000000000000000..688449b65fa5a4df26bcb97594683129971fb27c --- /dev/null +++ b/arch/sw_64/kvm/vmem.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +static bool addr_in_pool(struct gen_pool *pool, + unsigned long start, size_t size) +{ + bool found = false; + unsigned long end = start + size - 1; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vm_flags_init(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + +static void vmem_vm_open(struct vm_area_struct *vma) +{ + struct vmem_info *info = vma->vm_private_data; + + atomic_inc(&info->refcnt); +} + +static void vmem_vm_close(struct vm_area_struct *vma) +{ + unsigned long addr; + size_t size; + struct vmem_info *info; + + info = vma->vm_private_data; + addr = info->start; + size = round_up(info->size, 8 << 20); + + if (atomic_dec_and_test(&info->refcnt)) { + if (sw64_kvm_pool && addr_in_pool(sw64_kvm_pool, addr, size)) { + pr_info("gen pool free addr: %#lx, size: %#lx\n", + addr, size); + gen_pool_free(sw64_kvm_pool, addr, size); + } + kfree(info); + } +} + +const struct vm_operations_struct vmem_vm_ops = { + .open = vmem_vm_open, + .close = vmem_vm_close, +}; +EXPORT_SYMBOL_GPL(vmem_vm_ops); + +static int vmem_open(struct inode *inode, struct file *flip) +{ + flip->private_data = NULL; + return 0; +} + +static loff_t vmem_llseek(struct file *filp, loff_t offset, int whence) +{ + loff_t newpos = 256UL << 30; + return newpos; +} + +static int vmem_release(struct inode *inode, struct file *flip) +{ + return 0; +} + +static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) +{ + unsigned long addr; + static struct vmem_info *info; + size_t size = vma->vm_end - vma->vm_start; + int ret; + + if (!(vma->vm_flags & VM_SHARED)) { + pr_err("%s: mapping must be shared\n", __func__); + return -EINVAL; + } + + if (!sw64_kvm_pool) + return -ENOMEM; + + if (flip->private_data == NULL) { + addr = gen_pool_alloc(sw64_kvm_pool, round_up(size, 8 << 20)); + if (!addr) + return -ENOMEM; + + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + pr_info("guest phys addr=%#lx, size=%#lx\n", addr, size); + info->start = addr; + info->size = size; + flip->private_data = (void *)info; + } else { + info = flip->private_data; + addr = info->start; + } + + vma->vm_private_data = (void *)info; + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + /*to do if size bigger than vm_mem_size*/ + pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); + + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; + + return 0; +} + +static const struct file_operations vmem_fops = { + .owner = THIS_MODULE, + .open = vmem_open, + .llseek = vmem_llseek, + .release = vmem_release, + .mmap = vmem_mmap, +}; + +static struct miscdevice vmem_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw64_vmem", + .fops = &vmem_fops, +}; + +int __init vmem_init(void) +{ + int err; + + err = misc_register(&vmem_dev); + if (err != 0) { + pr_err("Could not register sw64_vmem device\n"); + return err; + } + return 0; +} + +void vmem_exit(void) +{ + misc_deregister(&vmem_dev); +} diff --git a/arch/sw_64/lib/Kconfig b/arch/sw_64/lib/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e22751a457ceb1053fddd5411f90cf14d02af20a --- /dev/null +++ b/arch/sw_64/lib/Kconfig @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Library optimization options" + +config DEEP_CLEAR_PAGE + bool "Clear Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear page routine. + Say N if you want to use the generic version. + +config DEEP_CLEAR_USER + bool "Clear User with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear user routine. + Say N if you want to use the generic version. + +config DEEP_COPY_PAGE + bool "Copy Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy page routine. + Say N if you want to use the generic version. + +config DEEP_COPY_USER + bool "Copy User with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy user routine. + Say N if you want to use the generic version. + + +config DEEP_MEMCPY + bool "Memory Copy with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory copy routine. + Say N if you want to use the generic version. + +config DEEP_MEMSET + bool "Memory Set with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory set routine. + Say N if you want to use the generic version. + +endmenu diff --git a/arch/sw_64/lib/Makefile b/arch/sw_64/lib/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e6455bb5113911fa7c2fb599449aad5ef1391fea --- /dev/null +++ b/arch/sw_64/lib/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for sw-specific library files.. +# + +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Werror + +lib-y = __divlu.o __remlu.o __divwu.o __remwu.o \ + udelay.o \ + memmove.o \ + checksum.o \ + csum_partial_copy.o \ + fpreg.o \ + strcpy.o \ + strncpy.o \ + fls.o \ + csum_ipv6_magic.o + +lib-clear_page-y := clear_page.o +lib-clear_page-$(CONFIG_DEEP_CLEAR_PAGE) := deep-clear_page.o + +lib-clear_user-y := clear_user.o +lib-clear_user-$(CONFIG_DEEP_CLEAR_USER) := deep-clear_user.o + +lib-copy_page-y := copy_page.o +lib-copy_page-$(CONFIG_DEEP_COPY_PAGE) := deep-copy_page.o + +lib-copy_user-y := copy_user.o +lib-copy_user-$(CONFIG_DEEP_COPY_USER) := deep-copy_user.o + +lib-memcpy-y := memcpy.o +lib-memcpy-$(CONFIG_DEEP_MEMCPY) := deep-memcpy.o + +lib-memset-y := memset.o +lib-memset-$(CONFIG_DEEP_MEMSET) := deep-memset.o + +lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +lib-y += $(lib-clear_page-y) $(lib-clear_user-y) $(lib-copy_page-y) $(lib-copy_user-y) $(lib-memcpy-y) $(lib-memset-y) + +obj-y = iomap.o +obj-y += iomap_copy.o + +# The division routines are built from single source, with different defines. +AFLAGS___divlu.o = -DDIV +AFLAGS___remlu.o = -DREM +AFLAGS___divwu.o = -DDIV -DINTSIZE +AFLAGS___remwu.o = -DREM -DINTSIZE + +$(addprefix $(obj)/,__divlu.o __remlu.o __divwu.o __remwu.o): \ + $(src)/divide.S FORCE + $(call if_changed_rule,as_o_S) diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c new file mode 100644 index 0000000000000000000000000000000000000000..d1314caa15bf44591ec1f6121017a77da16c6e2e --- /dev/null +++ b/arch/sw_64/lib/checksum.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + * Comments in other versions indicate that the algorithms are from RFC1071 + */ +#include +#include +#include +#include + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented. + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + return (__force __sum16)~from64to16( + (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8)); +} +EXPORT_SYMBOL(csum_tcpudp_magic); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long result; + + result = (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8); + + /* + * Fold down to 32-bits so we don't lose in the typedef-less + * network stack. + * + * 64 to 33 + */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_tcpudp_nofold); + +/* + * Do a 64-bit checksum on an arbitrary memory area.. + */ +static inline unsigned long do_csum(const unsigned char *buff, int len) +{ + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + } + + return from64to16(checksum); +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} +EXPORT_SYMBOL(ip_fast_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned long result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += (__force u32)sum; + /* 32+c bits -> 32 bits */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~from64to16(do_csum(buff, len)); +} +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/sw_64/lib/clear_page.S b/arch/sw_64/lib/clear_page.S new file mode 100644 index 0000000000000000000000000000000000000000..e1cc7cddfd2f70e403afdb8bb8a9ca9e69e36d18 --- /dev/null +++ b/arch/sw_64/lib/clear_page.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + + stl_nc $31, 0x0($16) + stl_nc $31, 0x8($16) + stl_nc $31, 0x10($16) + stl_nc $31, 0x18($16) + + stl_nc $31, 0x20($16) + stl_nc $31, 0x28($16) + stl_nc $31, 0x30($16) + stl_nc $31, 0x38($16) + + stl_nc $31, 0x40($16) + stl_nc $31, 0x48($16) + stl_nc $31, 0x50($16) + stl_nc $31, 0x58($16) + + stl_nc $31, 0x60($16) + stl_nc $31, 0x68($16) + subl $0, 1, $0 + + stl_nc $31, 0x70($16) + stl_nc $31, 0x78($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/clear_user.S b/arch/sw_64/lib/clear_user.S new file mode 100644 index 0000000000000000000000000000000000000000..5ac77fc8ca0d70adc59b232126c614b64e688a31 --- /dev/null +++ b/arch/sw_64/lib/clear_user.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EX(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $exception-99b($31); \ + .previous + + .set noat + .set noreorder + .align 4 + + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 + .prologue 0 +__clear_user: + and $17, $17, $0 + and $16, 7, $4 + beq $0, $zerolength + addl $0, $4, $1 + and $1, 7, $2 + srl $1, 3, $1 + beq $4, $loop + + subl $4, 8, $4 + addl $0, $4, $0 + beq $1, $oneword + +$head: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $head + subl $1, 1, $1 + br $loop + unop + +$oneword: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $oneword + clr $0 + +$zerolength: +$exception: + ret $31, ($26), 1 + +$loop: + and $1, 3, $4 + beq $4, 1f + +0: EX(stl $31, 0($16)) + subl $0, 8, $0 + subl $4, 1, $4 + addl $16, 8, $16 + bne $4, 0b + unop + +1: bic $1, 3, $1 + beq $1, $tail + +2: EX(stl $31, 0($16)) + subl $0, 8, $0 + EX(stl $31, 8($16)) + subl $0, 8, $0 + EX(stl $31, 16($16)) + subl $0, 8, $0 + EX(stl $31, 24($16)) + subl $0, 8, $0 + subl $1, 4, $1 + addl $16, 32, $16 + bne $1, 2b + +$tail: + bne $2, 1f + ret $31, ($26), 1 + +1: + EX(stb $31, 0($16)) + addl $16, 1, $16 + subl $2, 1, $2 + bne $2, 1b + clr $0 + ret $31, ($26), 1 + + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/copy_page.S b/arch/sw_64/lib/copy_page.S new file mode 100644 index 0000000000000000000000000000000000000000..898472c36c80bcd6975b7e300665b665b3ffd6b7 --- /dev/null +++ b/arch/sw_64/lib/copy_page.S @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + ldl $0, 0($17) + ldl $1, 8($17) + ldl $2, 16($17) + ldl $3, 24($17) + + stl_nc $0, 0($16) + stl_nc $1, 8($16) + stl_nc $2, 16($16) + stl_nc $3, 24($16) + + ldl $4, 32($17) + ldl $5, 40($17) + ldl $6, 48($17) + ldl $7, 56($17) + + stl_nc $4, 32($16) + stl_nc $5, 40($16) + stl_nc $6, 48($16) + stl_nc $7, 56($16) + + ldl $0, 64($17) + ldl $1, 72($17) + ldl $2, 80($17) + ldl $3, 88($17) + + stl_nc $0, 64($16) + stl_nc $1, 72($16) + stl_nc $2, 80($16) + stl_nc $3, 88($16) + + ldl $4, 96($17) + ldl $5, 104($17) + ldl $6, 112($17) + ldl $7, 120($17) + + stl_nc $4, 96($16) + stl_nc $5, 104($16) + stl_nc $6, 112($16) + stl_nc $7, 120($16) + + ldwe $f31, 3 * 0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/copy_user.S b/arch/sw_64/lib/copy_user.S new file mode 100644 index 0000000000000000000000000000000000000000..2c3dd0b5656ced04b9ccfd7eeaea593d56c45ed7 --- /dev/null +++ b/arch/sw_64/lib/copy_user.S @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copy to/from user space, handling exceptions as we go.. This + * isn't exactly pretty. + * + * This is essentially the same as "memcpy()", but with a few twists. + * Notably, we have to make sure that $0 is always up-to-date and + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EXI(x,y...) \ + 99: x,##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x, ##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitout-99b($31); \ + .previous + + .set noat + .align 4 + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + and $18, $18, $0 + and $16, 7, $3 + beq $0, $35 + beq $3, $36 + subl $3, 8, $3 + .align 4 +$37: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $3, 1, $3 + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + beq $0, $41 + bne $3, $37 +$36: + and $17, 7, $1 + bic $0, 7, $4 + beq $1, $43 + beq $4, $48 + EXI(ldl_u $3, 0($17)) + .align 4 +$50: + EXI(ldl_u $2, 8($17)) + subl $4, 8, $4 + extll $3, $17, $3 + exthl $2, $17, $1 + bis $3, $1, $1 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bis $2, $2, $3 + bne $4, $50 +$48: + beq $0, $41 + .align 4 +$57: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + bne $0, $57 + br $31, $41 + .align 4 +$43: + beq $4, $65 + .align 4 +$66: + EXI(ldl $1, 0($17)) + subl $4, 8, $4 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bne $4, $66 +$65: + beq $0, $41 + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $17, 1, $17 + addl $16, 1, $16 + subl $0, 1, $0 + br $31, $65 +$41: +$35: +$exitin: +$exitout: + ret $31, ($26), 1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/csum_ipv6_magic.S b/arch/sw_64/lib/csum_ipv6_magic.S new file mode 100644 index 0000000000000000000000000000000000000000..755e1c13cb25ee04ecab64651080ef6d4e905781 --- /dev/null +++ b/arch/sw_64/lib/csum_ipv6_magic.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * unsigned short csum_ipv6_magic(struct in6_addr *saddr, + * struct in6_addr *daddr, __u32 len, + * unsigned short proto, unsigned int csum); + * + * Misalignment handling (which costs 16 instructions / 8 cycles) + * added by Ivan Kokshaysky + */ +#include + .globl csum_ipv6_magic + .align 4 + .ent csum_ipv6_magic + .frame $30, 0, $26, 0 +csum_ipv6_magic: + .prologue 0 + + ldl_u $0, 0($16) + zapnot $20, 15, $20 + exthl $18, 1, $4 + ldl_u $21, 7($16) + + extlb $18, 1, $5 + ldl_u $1, 8($16) + extlb $18, 2, $6 + ldl_u $22, 15($16) + + extlb $18, 3, $18 + ldl_u $2, 0($17) + sra $4, 32, $4 + ldl_u $23, 7($17) + + extll $0, $16, $0 + ldl_u $3, 8($17) + exthl $21, $16, $21 + ldl_u $24, 15($17) + + sll $5, 16, $5 + or $0, $21, $0 + extll $1, $16, $1 + addl $20, $0, $20 + + exthl $22, $16, $22 + cmpult $20, $0, $0 + sll $6, 8, $6 + or $1, $22, $1 + + extll $2, $17, $2 + or $4, $18, $18 + exthl $23, $17, $23 + or $5, $6, $5 + + extll $3, $17, $3 + or $2, $23, $2 + exthl $24, $17, $24 + or $18, $5, $18 + + exthh $19, 7, $7 + or $3, $24, $3 + extlb $19, 1, $19 + addl $20, $1, $20 + + or $19, $7, $19 + cmpult $20, $1, $1 + sll $19, 48, $19 + + sra $19, 32, $19 + addl $20, $2, $20 + cmpult $20, $2, $2 + addl $20, $3, $20 + + cmpult $20, $3, $3 + addl $20, $18, $20 + cmpult $20, $18, $18 + addl $20, $19, $20 + + cmpult $20, $19, $19 + addl $0, $1, $0 + addl $2, $3, $2 + addl $18, $19, $18 + + addl $0, $2, $0 + addl $20, $18, $20 + addl $0, $20, $0 + unop + + extlh $0, 2, $2 + zapnot $0, 3, $3 + extlh $0, 4, $1 + addl $2, $3, $3 + + extlh $0, 6, $0 + addl $3, $1, $3 + addl $0, $3, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + not $0, $0 + + zapnot $0, 3, $0 + ret + + .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c new file mode 100644 index 0000000000000000000000000000000000000000..1a8c18757e095f289d4bed109d37cd4b6c0f2dbb --- /dev/null +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * csum_partial_copy - do IP checksumming and copy + * + * (C) Copyright 1996 Linus Torvalds + * + * Don't look at this too closely - you'll go mad. The things + * we do for performance.. + */ + +#include +#include +#include +#include + + +#define ldl_u(x, y) \ + __asm__ __volatile__("ldl_u %0, %1":"=r" (x):"m" (*(const unsigned long *)(y))) + +#define stl_u(x, y) \ + __asm__ __volatile__("stl_u %1, %0":"=m" (*(unsigned long *)(y)):"r" (x)) + +static inline void stll_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; doff < 8; i++, doff++) + *((char *)dst + i) = *((char *)&data + i); +} + +static inline void sthl_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; i < doff; i++) + *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); +} + +#define __get_word(insn, x, ptr) \ +({ \ + long __guu_err; \ + __asm__ __volatile__( \ + "1: "#insn" %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0,2b-1b(%1)\n" \ + ".previous" \ + : "=r"(x), "=r"(__guu_err) \ + : "m"(__m(ptr)), "1"(0)); \ + __guu_err; \ +}) + +static inline unsigned long +csum_partial_cfu_dest_aligned(const unsigned long __user *src, + unsigned long *dst, long len) +{ + unsigned long word; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + checksum += word; + checksum += (checksum < word); + + return checksum; +} + +static inline unsigned long +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) +{ + unsigned long word, patch; + unsigned long partial_dest, second_dest; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + + return checksum; +} + +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) +{ + unsigned long checksum; + unsigned long doff = 7 & (unsigned long) dst; + + if (!doff) { + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + } else { + checksum = csum_partial_cfu_dest_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, doff, len-8); + } + return (__force __wsum)from64to16(checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); +} +EXPORT_SYMBOL(csum_and_copy_from_user); + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_and_copy((__force const void __user *)src, + dst, len); +} +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/sw_64/lib/deep-clear_page.S b/arch/sw_64/lib/deep-clear_page.S new file mode 100644 index 0000000000000000000000000000000000000000..52a3db33fc1736c4497bd3c4e670f86e2f32aa0d --- /dev/null +++ b/arch/sw_64/lib/deep-clear_page.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0,64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + +/* + stl_nc $31,0x0($16) + stl_nc $31,0x8($16) + stl_nc $31,0x10($16) + stl_nc $31,0x18($16) + + stl_nc $31,0x20($16) + stl_nc $31,0x28($16) + stl_nc $31,0x30($16) + stl_nc $31,0x38($16) + + stl_nc $31,0x40($16) + stl_nc $31,0x48($16) + stl_nc $31,0x50($16) + stl_nc $31,0x58($16) + + stl_nc $31,0x60($16) + stl_nc $31,0x68($16) + stl_nc $31,0x70($16) + stl_nc $31,0x78($16) +*/ + + vstd_nc $f31, 0x0($16) + vstd_nc $f31, 0x20($16) + subl $0, 1, $0 + vstd_nc $f31, 0x40($16) + + vstd_nc $f31, 0x60($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/deep-clear_user.S b/arch/sw_64/lib/deep-clear_user.S new file mode 100644 index 0000000000000000000000000000000000000000..c81418ed99a26b7bfc197863bb0ecc911b43a62c --- /dev/null +++ b/arch/sw_64/lib/deep-clear_user.S @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Mao Minkai + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $18: bytes left to copy + * + */ + .globl __clear_user + .ent __clear_user +__clear_user: + .prologue 0 + bis $31, $31, $7 + mov $17, $18 + bis $31, $31, $17 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/deep-copy_page.S b/arch/sw_64/lib/deep-copy_page.S new file mode 100644 index 0000000000000000000000000000000000000000..a9b9d97f318af18a02bc7307d8d7d1b0cc1f0088 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_page.S @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + subl $sp, 0x60, $sp + ldi $4, 0x40($sp) + stl $4, 0($sp) + bic $4, 0x1f, $4 + vstd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrr $5, CSR_WR_FREGS +#endif + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + vldd $f16, 0($17) + vstd_nc $f16, 0($16) + + vldd $f16, 32($17) + vstd_nc $f16, 32($16) + + vldd $f16, 64($17) + vstd_nc $f16, 64($16) + + vldd $f16, 96($17) + vstd_nc $f16, 96($16) + + ldwe $f31, 5*0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ldl $4, 0($sp) + ldi $4, 0x40($sp) + bic $4, 0x1f, $4 + vldd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrw $5, CSR_WR_FREGS +#endif + addl $sp, 0x60, $sp + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/deep-copy_template.S b/arch/sw_64/lib/deep-copy_template.S new file mode 100644 index 0000000000000000000000000000000000000000..7705eb3f36d4edab9e09c49bafbe3129086b6252 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template.S @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $4: 8-byte misalignment of src when dest is 8-byte aligned + * $5: 32-byte misalignment of src when dest is 32-byte aligned + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0xc0($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f3, 0x80($23); \ + ldi $7, 2 + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f3, 0x80($23); \ + ldi $sp, 0xc0($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + +$byte_loop_head: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + and $17, 7, $4 + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + bne $4, $quad_u_loop_head + +$quad_loop_head: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 4 +$simd_loop_nc: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + bne $4, $prep_quad_u_loop_tail + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out + +/* misaligned src and dst */ +$quad_u_loop_head: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 7($17) ) + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + FIXUP_LDST( vldd $f4, 0($3) ) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 4 +$simd_u_loop_nc: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f3, $f4, $f31, $f3 + FIXUP_LDST( vstd_nc $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f5 + FIXUP_LDST( vstd_nc $f5, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 4 +$simd_u_loop: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop + +$simd_u_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd_u + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 + +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + FIXUP_LDST( ldl_u $2, 0($17) ) + .align 4 +$quad_u_loop_tail: + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + FIXUP_LDST( ldl_u $2, 16($17) ) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + FIXUP_LDST( stl $24, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_u_loop_tail + br $31, $quad_loop_end + +$move_one_quad_u: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail diff --git a/arch/sw_64/lib/deep-copy_template_c4.S b/arch/sw_64/lib/deep-copy_template_c4.S new file mode 100644 index 0000000000000000000000000000000000000000..e0740874dfa32722b903793a9eaa797ba5007f1d --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template_c4.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd and simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-copy_user.S b/arch/sw_64/lib/deep-copy_user.S new file mode 100644 index 0000000000000000000000000000000000000000..b79f8f3f0f4ac1e85dd5d2f130e88f90c1e062c0 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_user.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x, y) \ + 99: x, y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status for C3B + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $7: SIMD status for C4 + * 0: not in simd loop + * 1: in simd and simd_u loop + * $18: bytes left to copy + * + */ + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + .set noreorder + bis $31, $31, $7 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + subl $7, 1, $7 + beq $7, $restore_simd + +#if defined(CONFIG_SUBARCH_C3B) +$restore_simd_u: + RESTORE_SIMD_U_REGS + br $31, $return +#endif + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..78a6bd85cf016a49948c973ef3a8652a869bb8db --- /dev/null +++ b/arch/sw_64/lib/deep-memcpy.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + mov $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + ret + .end memcpy + EXPORT_SYMBOL(memcpy) +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S new file mode 100644 index 0000000000000000000000000000000000000000..c6b5355beec64d3d5557ae7b26c80cbb7c8b114a --- /dev/null +++ b/arch/sw_64/lib/deep-memset.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized memset() for SW64 with SIMD instructions + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Fill SIZE bytes pointed to by SRC with CHAR. + * + * Input: + * $16: SRC, clobbered + * $17: CHAR, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: SRC + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr), tmp data + * $2: tmp data + * $3: tmp data + * $4: tmp data + * $5: compare result + * $f10: 32 bytes data (manually saved) + * + */ + +#include +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .set noat + .set noreorder + .text + .align 4 + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + .ent ___memset +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + +#ifdef CONFIG_SUBARCH_C4 + csrr $6, CSR_WR_FREGS +#endif +/* expand 1 byte data to 8 bytes */ + and $17, 0xff, $17 + sll $17, 8, $4 + bis $17, $4, $17 + sll $17, 16, $4 + bis $17, $4, $17 + sll $17, 32, $4 + bis $17, $4, $17 + +__constant_c_memset: + bis $31, $31, $7 + bis $31, $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: +#ifdef CONFIG_SUBARCH_C4 + csrw $6, CSR_WR_FREGS +#endif + ret + + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + bis $1, $2, $1 + inslh $17, 6, $4 + bis $1, $3, $1 + bis $1, $4, $17 + br $31, __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/deep-set_template.S b/arch/sw_64/lib/deep-set_template.S new file mode 100644 index 0000000000000000000000000000000000000000..f9073d638468dbb77d991ddbbc276f2f57c865ff --- /dev/null +++ b/arch/sw_64/lib/deep-set_template.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + + .align 3 +$byte_loop_head: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + + .align 3 +$quad_loop_head: + FIXUP_LDST( stl $17, 0($16) ) + addl $16, 8, $16 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 3 +$simd_loop_nc: + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-set_template_c4.S b/arch/sw_64/lib/deep-set_template_c4.S new file mode 100644 index 0000000000000000000000000000000000000000..2b1bcab8fec96d9495f85477ca4ab87abfbf8308 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template_c4.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memset and clear_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/divide.S b/arch/sw_64/lib/divide.S new file mode 100644 index 0000000000000000000000000000000000000000..ceef343a6084f79774bfb122e51c34533a56839f --- /dev/null +++ b/arch/sw_64/lib/divide.S @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) 1995 Linus Torvalds + * + * The sw64 chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divlu: 64-bit unsigned long divide + * __remlu: 64-bit unsigned long remainder + * __divls/__remqs: signed 64-bit + * __divwu/__remlu: unsigned 32-bit + * __divws/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ +#include + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x, ##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24, $25, x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x, ##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24, $24, x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(wu) +#define sfunction func(w) +#define LONGIFY(x) zapnot x, 15, x +#define SLONGIFY(x) addw x, 0, x +#else +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + +7: stl $1, 0($30) + bis $25, $25, divisor + stl $2, 8($30) + bis $24, $24, modulus + stl $0, 16($30) + bis $31, $31, quotient + LONGIFY(divisor) + stl tmp1, 24($30) + LONGIFY(modulus) + bis $31, 1, mask + DIV_ONLY(stl tmp2, 32($30)) + beq divisor, 9f # div by zero + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor, modulus, compare + s8addl divisor, $31, divisor + s8addl mask, $31, mask + bne compare, 1b +#else +1: cmpult divisor, modulus, compare + blt divisor, 2f + addl divisor, divisor, divisor + addl mask, mask, mask + bne compare, 1b +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addl quotient, mask, tmp2) + srl mask, 1, mask + cmpule divisor, modulus, compare + subl modulus, divisor, tmp1 + DIV_ONLY(selne compare, tmp2, quotient, quotient) + srl divisor, 1, divisor + selne compare, tmp1, modulus, modulus + bne mask, 2b + +9: ldl $1, 0($30) + ldl $2, 8($30) + ldl $0, 16($30) + ldl tmp1, 24($30) + DIV_ONLY(ldl tmp2, 32($30)) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end ufunction + EXPORT_SYMBOL(ufunction) +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + bis $24, $25, $28 + SLONGIFY($28) + bge $28, 7b + stl $24, 0($30) + subl $31, $24, $28 + stl $25, 8($30) + sellt $24, $28, $24, $24 # abs($24) + stl $23, 16($30) + subl $31, $25, $28 + stl tmp1, 24($30) + sellt $25, $28, $25, $25 # abs($25) + bsr $23, ufunction + ldl $24, 0($30) + ldl $25, 8($30) + GETSIGN($28) + subl $31, $27, tmp1 + SLONGIFY($28) + ldl $23, 16($30) + sellt $28, tmp1, $27, $27 + ldl tmp1, 24($30) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end sfunction + EXPORT_SYMBOL(sfunction) diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c new file mode 100644 index 0000000000000000000000000000000000000000..aa4231f7e472dc1fdca58ea2d63631b64cc2fc4f --- /dev/null +++ b/arch/sw_64/lib/fls.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* This is fls(x)-1, except zero is held to zero. This allows most + * efficient input into extbl, plus it allows easy handling of fls(0)=0. + */ + +const unsigned char __flsm1_tab[256] = { + 0, + 0, + 1, 1, + 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +}; +EXPORT_SYMBOL(__flsm1_tab); diff --git a/arch/sw_64/lib/fpreg.c b/arch/sw_64/lib/fpreg.c new file mode 100644 index 0000000000000000000000000000000000000000..1788703109086d803c4ac0f13277e4eba02a7095 --- /dev/null +++ b/arch/sw_64/lib/fpreg.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 1998 Linus Torvalds + */ + +#include +#include + +#define STT(reg, val) \ + asm volatile("fimovd $f"#reg", %0" : "=r"(val)) +#define STS(reg, val) \ + asm volatile("fimovs $f"#reg", %0" : "=r"(val)) +#define LDT(reg, val) \ + asm volatile("ifmovd %0, $f"#reg : : "r"(val)) +#define LDS(reg, val) \ + asm volatile("ifmovs %0, $f"#reg : : "r"(val)) +#define VLDD(reg, val) \ + asm volatile("vldd $f"#reg", %0" : : "m"(val) : "memory") +#define VSTD(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") +#define VLDS(reg, val) \ + asm volatile("vlds $f"#reg", %0" : : "m"(val) : "memory") +#define LDWE(reg, val) \ + asm volatile("ldwe $f"#reg", %0" : : "m"(val) : "memory") +#define VSTS(reg, val) \ + asm volatile("vsts $f"#reg", %0" : "=m"(val) : : "memory") +#define STDH(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") + +void +sw64_write_simd_fp_reg_s(unsigned long reg, unsigned long f0, unsigned long f1) +{ + + unsigned long tmpa[4] __aligned(16); + + tmpa[0] = f0; + tmpa[1] = f1; + + switch (reg) { + case 0: + VLDS(0, *tmpa); + break; + case 1: + VLDS(1, *tmpa); + break; + case 2: + VLDS(2, *tmpa); + break; + case 3: + VLDS(3, *tmpa); + break; + case 4: + VLDS(4, *tmpa); + break; + case 5: + VLDS(5, *tmpa); + break; + case 6: + VLDS(6, *tmpa); + break; + case 7: + VLDS(7, *tmpa); + break; + case 8: + VLDS(8, *tmpa); + break; + case 9: + VLDS(9, *tmpa); + break; + case 10: + VLDS(10, *tmpa); + break; + case 11: + VLDS(11, *tmpa); + break; + case 12: + VLDS(12, *tmpa); + break; + case 13: + VLDS(13, *tmpa); + break; + case 14: + VLDS(14, *tmpa); + break; + case 15: + VLDS(15, *tmpa); + break; + case 16: + VLDS(16, *tmpa); + break; + case 17: + VLDS(17, *tmpa); + break; + case 18: + VLDS(18, *tmpa); + break; + case 19: + VLDS(19, *tmpa); + break; + case 20: + VLDS(20, *tmpa); + break; + case 21: + VLDS(21, *tmpa); + break; + case 22: + VLDS(22, *tmpa); + break; + case 23: + VLDS(23, *tmpa); + break; + case 24: + VLDS(24, *tmpa); + break; + case 25: + VLDS(25, *tmpa); + break; + case 26: + VLDS(26, *tmpa); + break; + case 27: + VLDS(27, *tmpa); + break; + case 28: + VLDS(28, *tmpa); + break; + case 29: + VLDS(29, *tmpa); + break; + case 30: + VLDS(30, *tmpa); + break; + case 31: + break; + } + +} + + +void sw64_write_simd_fp_reg_d(unsigned long reg, unsigned long f0, + unsigned long f1, unsigned long f2, unsigned long f3) +{ + unsigned long tmpa[4] __aligned(32); + + tmpa[0] = f0; + tmpa[1] = f1; + tmpa[2] = f2; + tmpa[3] = f3; + + switch (reg) { + case 0: + VLDD(0, *tmpa); + break; + case 1: + VLDD(1, *tmpa); + break; + case 2: + VLDD(2, *tmpa); + break; + case 3: + VLDD(3, *tmpa); + break; + case 4: + VLDD(4, *tmpa); + break; + case 5: + VLDD(5, *tmpa); + break; + case 6: + VLDD(6, *tmpa); + break; + case 7: + VLDD(7, *tmpa); + break; + case 8: + VLDD(8, *tmpa); + break; + case 9: + VLDD(9, *tmpa); + break; + case 10: + VLDD(10, *tmpa); + break; + case 11: + VLDD(11, *tmpa); + break; + case 12: + VLDD(12, *tmpa); + break; + case 13: + VLDD(13, *tmpa); + break; + case 14: + VLDD(14, *tmpa); + break; + case 15: + VLDD(15, *tmpa); + break; + case 16: + VLDD(16, *tmpa); + break; + case 17: + VLDD(17, *tmpa); + break; + case 18: + VLDD(18, *tmpa); + break; + case 19: + VLDD(19, *tmpa); + break; + case 20: + VLDD(20, *tmpa); + break; + case 21: + VLDD(21, *tmpa); + break; + case 22: + VLDD(22, *tmpa); + break; + case 23: + VLDD(23, *tmpa); + break; + case 24: + VLDD(24, *tmpa); + break; + case 25: + VLDD(25, *tmpa); + break; + case 26: + VLDD(26, *tmpa); + break; + case 27: + VLDD(27, *tmpa); + break; + case 28: + VLDD(28, *tmpa); + break; + case 29: + VLDD(29, *tmpa); + break; + case 30: + VLDD(30, *tmpa); + break; + case 31: + break; + } + + +} + + +void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a) +{ + switch (reg) { + case 0: + LDWE(0, a); + break; + case 1: + LDWE(1, a); + break; + case 2: + LDWE(2, a); + break; + case 3: + LDWE(3, a); + break; + case 4: + LDWE(4, a); + break; + case 5: + LDWE(5, a); + break; + case 6: + LDWE(6, a); + break; + case 7: + LDWE(7, a); + break; + case 8: + LDWE(8, a); + break; + case 9: + LDWE(9, a); + break; + case 10: + LDWE(10, a); + break; + case 11: + LDWE(11, a); + break; + case 12: + LDWE(12, a); + break; + case 13: + LDWE(13, a); + break; + case 14: + LDWE(14, a); + break; + case 15: + LDWE(15, a); + break; + case 16: + LDWE(16, a); + break; + case 17: + LDWE(17, a); + break; + case 18: + LDWE(18, a); + break; + case 19: + LDWE(19, a); + break; + case 20: + LDWE(20, a); + break; + case 21: + LDWE(21, a); + break; + case 22: + LDWE(22, a); + break; + case 23: + LDWE(23, a); + break; + case 24: + LDWE(24, a); + break; + case 25: + LDWE(25, a); + break; + case 26: + LDWE(26, a); + break; + case 27: + LDWE(27, a); + break; + case 28: + LDWE(28, a); + break; + case 29: + LDWE(29, a); + break; + case 30: + LDWE(30, a); + break; + case 31: + break; + } +} + + +void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[2] __aligned(16); + + switch (reg) { + case 0: + VSTS(0, *tmpa); + break; + case 1: + VSTS(1, *tmpa); + break; + case 2: + VSTS(2, *tmpa); + break; + case 3: + VSTS(3, *tmpa); + break; + case 4: + VSTS(4, *tmpa); + break; + case 5: + VSTS(5, *tmpa); + break; + case 6: + VSTS(6, *tmpa); + break; + case 7: + VSTS(7, *tmpa); + break; + case 8: + VSTS(8, *tmpa); + break; + case 9: + VSTS(9, *tmpa); + break; + case 10: + VSTS(10, *tmpa); + break; + case 11: + VSTS(11, *tmpa); + break; + case 12: + VSTS(12, *tmpa); + break; + case 13: + VSTS(13, *tmpa); + break; + case 14: + VSTS(14, *tmpa); + break; + case 15: + VSTS(15, *tmpa); + break; + case 16: + VSTS(16, *tmpa); + break; + case 17: + VSTS(17, *tmpa); + break; + case 18: + VSTS(18, *tmpa); + break; + case 19: + VSTS(19, *tmpa); + break; + case 20: + VSTS(20, *tmpa); + break; + case 21: + VSTS(21, *tmpa); + break; + case 22: + VSTS(22, *tmpa); + break; + case 23: + VSTS(23, *tmpa); + break; + case 24: + VSTS(24, *tmpa); + break; + case 25: + VSTS(25, *tmpa); + break; + case 26: + VSTS(26, *tmpa); + break; + case 27: + VSTS(27, *tmpa); + break; + case 28: + VSTS(28, *tmpa); + break; + case 29: + VSTS(29, *tmpa); + break; + case 30: + VSTS(30, *tmpa); + break; + case 31: + VSTS(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; +} + +void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[4] __aligned(32); + + switch (reg) { + case 0: + VSTD(0, *tmpa); + break; + case 1: + VSTD(1, *tmpa); + break; + case 2: + VSTD(2, *tmpa); + break; + case 3: + VSTD(3, *tmpa); + break; + case 4: + VSTD(4, *tmpa); + break; + case 5: + VSTD(5, *tmpa); + break; + case 6: + VSTD(6, *tmpa); + break; + case 7: + VSTD(7, *tmpa); + break; + case 8: + VSTD(8, *tmpa); + break; + case 9: + VSTD(9, *tmpa); + break; + case 10: + VSTD(10, *tmpa); + break; + case 11: + VSTD(11, *tmpa); + break; + case 12: + VSTD(12, *tmpa); + break; + case 13: + VSTD(13, *tmpa); + break; + case 14: + VSTD(14, *tmpa); + break; + case 15: + VSTD(15, *tmpa); + break; + case 16: + VSTD(16, *tmpa); + break; + case 17: + VSTD(17, *tmpa); + break; + case 18: + VSTD(18, *tmpa); + break; + case 19: + VSTD(19, *tmpa); + break; + case 20: + VSTD(20, *tmpa); + break; + case 21: + VSTD(21, *tmpa); + break; + case 22: + VSTD(22, *tmpa); + break; + case 23: + VSTD(23, *tmpa); + break; + case 24: + VSTD(24, *tmpa); + break; + case 25: + VSTD(25, *tmpa); + break; + case 26: + VSTD(26, *tmpa); + break; + case 27: + VSTD(27, *tmpa); + break; + case 28: + VSTD(28, *tmpa); + break; + case 29: + VSTD(29, *tmpa); + break; + case 30: + VSTD(30, *tmpa); + break; + case 31: + VSTD(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; + *(fp_value+2) = tmpa[2]; + *(fp_value+3) = tmpa[3]; +} + +unsigned long sw64_read_fp_reg(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STT(0, val); + break; + case 1: + STT(1, val); + break; + case 2: + STT(2, val); + break; + case 3: + STT(3, val); + break; + case 4: + STT(4, val); + break; + case 5: + STT(5, val); + break; + case 6: + STT(6, val); + break; + case 7: + STT(7, val); + break; + case 8: + STT(8, val); + break; + case 9: + STT(9, val); + break; + case 10: + STT(10, val); + break; + case 11: + STT(11, val); + break; + case 12: + STT(12, val); + break; + case 13: + STT(13, val); + break; + case 14: + STT(14, val); + break; + case 15: + STT(15, val); + break; + case 16: + STT(16, val); + break; + case 17: + STT(17, val); + break; + case 18: + STT(18, val); + break; + case 19: + STT(19, val); + break; + case 20: + STT(20, val); + break; + case 21: + STT(21, val); + break; + case 22: + STT(22, val); + break; + case 23: + STT(23, val); + break; + case 24: + STT(24, val); + break; + case 25: + STT(25, val); + break; + case 26: + STT(26, val); + break; + case 27: + STT(27, val); + break; + case 28: + STT(28, val); + break; + case 29: + STT(29, val); + break; + case 30: + STT(30, val); + break; + case 31: + STT(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg); + +void sw64_write_fp_reg(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDT(0, val); + break; + case 1: + LDT(1, val); + break; + case 2: + LDT(2, val); + break; + case 3: + LDT(3, val); + break; + case 4: + LDT(4, val); + break; + case 5: + LDT(5, val); + break; + case 6: + LDT(6, val); + break; + case 7: + LDT(7, val); + break; + case 8: + LDT(8, val); + break; + case 9: + LDT(9, val); + break; + case 10: + LDT(10, val); + break; + case 11: + LDT(11, val); + break; + case 12: + LDT(12, val); + break; + case 13: + LDT(13, val); + break; + case 14: + LDT(14, val); + break; + case 15: + LDT(15, val); + break; + case 16: + LDT(16, val); + break; + case 17: + LDT(17, val); + break; + case 18: + LDT(18, val); + break; + case 19: + LDT(19, val); + break; + case 20: + LDT(20, val); + break; + case 21: + LDT(21, val); + break; + case 22: + LDT(22, val); + break; + case 23: + LDT(23, val); + break; + case 24: + LDT(24, val); + break; + case 25: + LDT(25, val); + break; + case 26: + LDT(26, val); + break; + case 27: + LDT(27, val); + break; + case 28: + LDT(28, val); + break; + case 29: + LDT(29, val); + break; + case 30: + LDT(30, val); + break; + case 31: + LDT(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg); + +unsigned long sw64_read_fp_reg_s(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STS(0, val); + break; + case 1: + STS(1, val); + break; + case 2: + STS(2, val); + break; + case 3: + STS(3, val); + break; + case 4: + STS(4, val); + break; + case 5: + STS(5, val); + break; + case 6: + STS(6, val); + break; + case 7: + STS(7, val); + break; + case 8: + STS(8, val); + break; + case 9: + STS(9, val); + break; + case 10: + STS(10, val); + break; + case 11: + STS(11, val); + break; + case 12: + STS(12, val); + break; + case 13: + STS(13, val); + break; + case 14: + STS(14, val); + break; + case 15: + STS(15, val); + break; + case 16: + STS(16, val); + break; + case 17: + STS(17, val); + break; + case 18: + STS(18, val); + break; + case 19: + STS(19, val); + break; + case 20: + STS(20, val); + break; + case 21: + STS(21, val); + break; + case 22: + STS(22, val); + break; + case 23: + STS(23, val); + break; + case 24: + STS(24, val); + break; + case 25: + STS(25, val); + break; + case 26: + STS(26, val); + break; + case 27: + STS(27, val); + break; + case 28: + STS(28, val); + break; + case 29: + STS(29, val); + break; + case 30: + STS(30, val); + break; + case 31: + STS(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg_s); + +void sw64_write_fp_reg_s(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDS(0, val); + break; + case 1: + LDS(1, val); + break; + case 2: + LDS(2, val); + break; + case 3: + LDS(3, val); + break; + case 4: + LDS(4, val); + break; + case 5: + LDS(5, val); + break; + case 6: + LDS(6, val); + break; + case 7: + LDS(7, val); + break; + case 8: + LDS(8, val); + break; + case 9: + LDS(9, val); + break; + case 10: + LDS(10, val); + break; + case 11: + LDS(11, val); + break; + case 12: + LDS(12, val); + break; + case 13: + LDS(13, val); + break; + case 14: + LDS(14, val); + break; + case 15: + LDS(15, val); + break; + case 16: + LDS(16, val); + break; + case 17: + LDS(17, val); + break; + case 18: + LDS(18, val); + break; + case 19: + LDS(19, val); + break; + case 20: + LDS(20, val); + break; + case 21: + LDS(21, val); + break; + case 22: + LDS(22, val); + break; + case 23: + LDS(23, val); + break; + case 24: + LDS(24, val); + break; + case 25: + LDS(25, val); + break; + case 26: + LDS(26, val); + break; + case 27: + LDS(27, val); + break; + case 28: + LDS(28, val); + break; + case 29: + LDS(29, val); + break; + case 30: + LDS(30, val); + break; + case 31: + LDS(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg_s); diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c new file mode 100644 index 0000000000000000000000000000000000000000..d9c66a89131e4028ced9454d27c94dc5aae388f9 --- /dev/null +++ b/arch/sw_64/lib/iomap.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sw_64 IO and memory functions. + */ + +#include + +#include +#include + +/* + * Here comes the sw64 implementation of the IOMAP interfaces. + */ +unsigned int ioread8(const void __iomem *addr) +{ + return readb(addr); +} +EXPORT_SYMBOL(ioread8); + +unsigned int ioread16(const void __iomem *addr) +{ + return readw(addr); +} +EXPORT_SYMBOL(ioread16); + +unsigned int ioread32(const void __iomem *addr) +{ + return readl(addr); +} +EXPORT_SYMBOL(ioread32); + +void iowrite8(u8 b, void __iomem *addr) +{ + writeb(b, addr); +} +EXPORT_SYMBOL(iowrite8); + +void iowrite16(u16 b, void __iomem *addr) +{ + writew(b, addr); +} +EXPORT_SYMBOL(iowrite16); + +void iowrite32(u32 b, void __iomem *addr) +{ + writel(b, addr); +} +EXPORT_SYMBOL(iowrite32); + +u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} +EXPORT_SYMBOL(inb); + +u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} +EXPORT_SYMBOL(inw); + +u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} +EXPORT_SYMBOL(inl); + +void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} +EXPORT_SYMBOL(outb); + +void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} +EXPORT_SYMBOL(outw); + +void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} +EXPORT_SYMBOL(outl); + + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. + */ +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) +{ + while ((unsigned long)dst & 0x3) { + if (!count) + return; + count--; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + + count -= 4; + w = ioread8(port); + w |= ioread8(port) << 8; + w |= ioread8(port) << 16; + w |= ioread8(port) << 24; + *(unsigned int *)dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } +} +EXPORT_SYMBOL(ioread8_rep); + +void insb(unsigned long port, void *dst, unsigned long count) +{ + ioread8_rep(ioport_map(port, 1), dst, count); +} +EXPORT_SYMBOL(insb); + +/* + * Read COUNT 16-bit words from port PORT into memory starting at + * SRC. SRC must be at least short aligned. This is used by the + * IDE driver to read disk sectors. Performance is important, but + * the interfaces seems to be slow: just using the inlined version + * of the inw() breaks things. + */ +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)dst & 0x1); + count--; + *(unsigned short *)dst = ioread16(port); + dst += 2; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = ioread16(port); + w |= ioread16(port) << 16; + *(unsigned int *)dst = w; + dst += 4; + } + + if (count) + *(unsigned short *)dst = ioread16(port); +} +EXPORT_SYMBOL(ioread16_rep); + +void insw(unsigned long port, void *dst, unsigned long count) +{ + ioread16_rep(ioport_map(port, 2), dst, count); +} +EXPORT_SYMBOL(insw); + + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the inl() breaks things. + */ +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + while (count--) { + struct S { int x __packed; }; + ((struct S *)dst)->x = ioread32(port); + dst += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + *(unsigned int *)dst = ioread32(port); + dst += 4; + } + } +} +EXPORT_SYMBOL(ioread32_rep); + +void insl(unsigned long port, void *dst, unsigned long count) +{ + ioread32_rep(ioport_map(port, 4), dst, count); +} +EXPORT_SYMBOL(insl); + + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + */ +void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) +{ + const unsigned char *src = xsrc; + + while (count--) + iowrite8(*src++, port); +} +EXPORT_SYMBOL(iowrite8_rep); + +void outsb(unsigned long port, const void *src, unsigned long count) +{ + iowrite8_rep(ioport_map(port, 1), src, count); +} +EXPORT_SYMBOL(outsb); + + +/* + * Like insw but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Performance is important, but the + * interfaces seems to be slow: just using the inlined version of the + * outw() breaks things. + */ +void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)src & 0x1); + iowrite16(*(unsigned short *)src, port); + src += 2; + --count; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = *(unsigned int *)src; + src += 4; + iowrite16(w >> 0, port); + iowrite16(w >> 16, port); + } + + if (count) + iowrite16(*(unsigned short *)src, port); +} +EXPORT_SYMBOL(iowrite16_rep); + +void outsw(unsigned long port, const void *src, unsigned long count) +{ + iowrite16_rep(ioport_map(port, 2), src, count); +} +EXPORT_SYMBOL(outsw); + + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + */ +void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + while (count--) { + struct S { int x __packed; }; + iowrite32(((struct S *)src)->x, port); + src += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + iowrite32(*(unsigned int *)src, port); + src += 4; + } + } +} +EXPORT_SYMBOL(iowrite32_rep); + +void outsl(unsigned long port, const void *src, unsigned long count) +{ + iowrite32_rep(ioport_map(port, 4), src, count); +} +EXPORT_SYMBOL(outsl); + + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + * FIXME -- align FROM. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((u64)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((u64)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* Handle any initial odd word */ + if (count >= 4 && ((u64)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* + * Handle all full-sized quadwords: we're aligned + * (or have a small count) + */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) + __raw_writeb(c, to); + mb(); +} +EXPORT_SYMBOL(_memset_c_io); + +void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + unsigned long io_offset; + + if (port < 0x100000) { + io_offset = is_in_host() ? LPC_LEGACY_IO : PCI_VT_LEGACY_IO; + port = port | io_offset; + } + + return __va(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/sw_64/lib/iomap_copy.c b/arch/sw_64/lib/iomap_copy.c new file mode 100644 index 0000000000000000000000000000000000000000..1c75bd602d7e7fcd01591cdcbdd73b8e6d258aec --- /dev/null +++ b/arch/sw_64/lib/iomap_copy.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +/** + * __iowrite32_copy - copy data to MMIO space, in 32-bit units + * @to: destination, in MMIO space (must be 32-bit aligned) + * @from: source (must be 32-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite32_copy(void __iomem *to, + const void *from, + size_t count) +{ + u32 __iomem *dst = to; + const u32 *src = from; + const u32 *end = src + count; + + while (src < end) { + __raw_writel(*src++, dst++); + mb(); + } + +} + +/** + * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units + * @to: destination, in MMIO space (must be 64-bit aligned) + * @from: source (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite64_copy(void __iomem *to, + const void *from, + size_t count) +{ + u64 __iomem *dst = to; + const u64 *src = from; + const u64 *end = src + count; + + while (src < end) { + __raw_writeq(*src++, dst++); + mb(); + } +} diff --git a/arch/sw_64/lib/memcpy.S b/arch/sw_64/lib/memcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..31c422b393eeb4b8795758540b8f6a8735121d4d --- /dev/null +++ b/arch/sw_64/lib/memcpy.S @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Reasonably optimized memcpy() routine for the sw64 + * + * - memory accessed as aligned quadwords only + * - uses bcmpge to compare 8 bytes in parallel + * + * Temp usage notes: + * $1, $2, - scratch + */ +#include + .set noreorder + .set noat + + .align 4 + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + + mov $16, $0 + ble $18, $nomoredata + xor $16, $17, $1 + and $1, 7, $1 + + bne $1, $misaligned + /* source and dest are same mod 8 address */ + and $16, 7, $1 + beq $1, $both_0mod8 + + /* + * source and dest are same misalignment. move a byte at a time + * until a 0mod8 alignment for both is reached. + * At least one byte more to move + */ + +$head_align: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + stb $1, 0($16) + addl $16, 1, $16 + and $16, 7, $1 + ble $18, $nomoredata + bne $1, $head_align + +$both_0mod8: + cmple $18, 127, $1 + bne $1, $no_unroll + and $16, 63, $1 + beq $1, $do_unroll + +$single_head_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + and $16, 63, $1 + bne $1, $single_head_quad + +$do_unroll: + addl $16, 64, $7 + cmple $18, 127, $1 + bne $1, $tail_quads + +$unroll_body: + #wh64 ($7) + fillde 0($7) + + ldl $6, 0($17) + + ldl $4, 8($17) + ldl $5, 16($17) + addl $7, 64, $7 + + ldl $3, 24($17) + addl $16, 64, $1 + + addl $17, 32, $17 + stl $6, 0($16) + + stl $4, 8($16) + stl $5, 16($16) + subl $18, 192, $2 + + stl $3, 24($16) + addl $16, 32, $16 + + ldl $6, 0($17) + ldl $4, 8($17) + #cmovlt $2, $1, $7 + sellt $2, $1, $7, $7 + + ldl $5, 16($17) + ldl $3, 24($17) + addl $16, 32, $16 + subl $18, 64, $18 + + addl $17, 32, $17 + stl $6, -32($16) + stl $4, -24($16) + cmple $18, 63, $1 + + stl $5, -16($16) + stl $3, -8($16) + beq $1, $unroll_body + +$tail_quads: +$no_unroll: + .align 4 + subl $18, 8, $18 + blt $18, $less_than_8 + +$move_a_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + bge $18, $move_a_quad + +$less_than_8: + .align 4 + addl $18, 8, $18 + ble $18, $nomoredata + + /* Trailing bytes */ +$tail_bytes: + subl $18, 1, $18 + ldbu $1, 0($17) + addl $17, 1, $17 + + stb $1, 0($16) + addl $16, 1, $16 + bgt $18, $tail_bytes + + /* branching to exit takes 3 extra cycles, so replicate exit here */ + ret $31, ($26), 1 + +$misaligned: + mov $0, $4 + and $0, 7, $1 + beq $1, $dest_0mod8 + +$aligndest: + ble $18, $nomoredata + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + and $4, 7, $1 + bne $1, $aligndest + + /* Source has unknown alignment, but dest is known to be 0mod8 */ +$dest_0mod8: + subl $18, 8, $18 + blt $18, $misalign_tail + ldl_u $3, 0($17) + +$mis_quad: + ldl_u $16, 8($17) + extll $3, $17, $3 + exthl $16, $17, $1 + bis $3, $1, $1 + + subl $18, 8, $18 + addl $17, 8, $17 + stl $1, 0($4) + mov $16, $3 + + addl $4, 8, $4 + bge $18, $mis_quad + +$misalign_tail: + addl $18, 8, $18 + ble $18, $nomoredata + +$misalign_byte: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + bgt $18, $misalign_byte + + +$nomoredata: + ret $31, ($26), 1 + + .end memcpy + EXPORT_SYMBOL(memcpy) +/* For backwards module compatibility. */ +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/memmove.S b/arch/sw_64/lib/memmove.S new file mode 100644 index 0000000000000000000000000000000000000000..3e34fcd5b217fb2891cabfa99d3cef9139c79a1c --- /dev/null +++ b/arch/sw_64/lib/memmove.S @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Barely optimized memmove routine for sw64. + * This is hand-massaged output from the original memcpy.c. We defer to + * memcpy whenever possible; the backwards copy loops are not unrolled. + */ +#include + .set noat + .set noreorder + .text + + .align 4 + .globl memmove + .ent memmove +memmove: + ldgp $29, 0($27) + unop + .prologue 1 + + addl $16, $18, $4 + addl $17, $18, $5 + cmpule $4, $17, $1 # dest + n <= src + cmpule $5, $16, $2 # dest >= src + n + + bis $1, $2, $1 + mov $16, $0 + xor $16, $17, $2 + bne $1, memcpy # samegp + + and $2, 7, $2 # Test for src/dest co-alignment. + and $16, 7, $1 + cmpule $16, $17, $3 + bne $3, $memmove_up # dest < src + + and $4, 7, $1 + bne $2, $misaligned_dn + unop + beq $1, $skip_aligned_byte_loop_head_dn + +$aligned_byte_loop_head_dn: + ldi $4, -1($4) + ldi $5, -1($5) + unop + ble $18, $egress + + ldbu $1, 0($5) + ldi $18, -1($18) + stb $1, 0($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_dn + +$skip_aligned_byte_loop_head_dn: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_dn + +$aligned_word_loop_dn: + ldl $1, -8($5) + ldi $5, -8($5) + ldi $18, -8($18) + + stl $1, -8($4) + ldi $4, -8($4) + bge $18, $aligned_word_loop_dn + +$skip_aligned_word_loop_dn: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_dn + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_dn: + fnop + unop + beq $18, $egress + +$byte_loop_tail_dn: + ldbu $1, -1($5) + ldi $5, -1($5) + ldi $4, -1($4) + + ldi $18, -1($18) + stb $1, 0($4) + + bgt $18, $byte_loop_tail_dn + br $egress + +$memmove_up: + mov $16, $4 + mov $17, $5 + bne $2, $misaligned_up + beq $1, $skip_aligned_byte_loop_head_up + +$aligned_byte_loop_head_up: + unop + ble $18, $egress + ldbu $1, 0($5) + + ldi $18, -1($18) + + ldi $5, 1($5) + stb $1, 0($4) + ldi $4, 1($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_up + +$skip_aligned_byte_loop_head_up: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_up + +$aligned_word_loop_up: + ldl $1, 0($5) + ldi $5, 8($5) + ldi $18, -8($18) + + stl $1, 0($4) + ldi $4, 8($4) + bge $18, $aligned_word_loop_up + +$skip_aligned_word_loop_up: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_up + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_up: + fnop + unop + beq $18, $egress + +$byte_loop_tail_up: + ldbu $1, 0($5) + ldi $18, -1($18) + + stb $1, 0($4) + + ldi $5, 1($5) + ldi $4, 1($4) + bgt $18, $byte_loop_tail_up + +$egress: + ret $31, ($26), 1 + + .end memmove + EXPORT_SYMBOL(memmove) diff --git a/arch/sw_64/lib/memset.S b/arch/sw_64/lib/memset.S new file mode 100644 index 0000000000000000000000000000000000000000..dbc4d775c7ea83d140ea7121ddefd46ca047a604 --- /dev/null +++ b/arch/sw_64/lib/memset.S @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is an efficient (and small) implementation of the C library "memset()" + * function for the sw. + * + * (C) Copyright 1996 Linus Torvalds + * + * This routine is "moral-ware": you are free to use it any way you wish, and + * the only obligation I put on you is a moral one: if you make any improvements + * to the routine, please send me your improvements for me to use similarly. + * + * The scheduling comments are according to the documentation (and done by + * hand, so they might well be incorrect, please do tell me about it..) + */ + +#include + + .set noat + .set noreorder +.text + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + + .ent ___memset +.align 5 +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + + and $17, 255, $1 + inslb $17, 1, $17 + bis $17, $1, $17 + sll $17, 16, $1 + + bis $17, $1, $17 + sll $17, 32, $1 + bis $17, $1, $17 + ldl_u $31, 0($30) + +.align 5 +__constant_c_memset: + addl $18, $16, $6 + bis $16, $16, $0 + xor $16, $6, $1 + ble $18, end + + bic $1, 7, $1 + beq $1, within_one_quad + and $16, 7, $3 + beq $3, aligned + + bis $16, $16, $5 + subl $3, 8, $3 + addl $18, $3, $18 + subl $16, $3, $16 + + eqv $3, $31, $3 + addl $3, 1, $3 +unaligned_start_loop: + stb $17, 0($5) + subl $3, 1, $3 + addl $5, 1, $5 + bgt $3, unaligned_start_loop + + +.align 4 +aligned: + sra $18, 3, $3 + and $18, 7, $18 + bis $16, $16, $5 + beq $3, no_quad + +/*added by JJ*/ + ldi $3, -8($3) + blt $3, nounrol + +.align 3 +wloop: + fillde 256($5) + stl $17, 0($5) + stl $17, 8($5) + stl $17, 16($5) + stl $17, 24($5) + subl $3, 8, $3 + stl $17, 32($5) + stl $17, 40($5) + stl $17, 48($5) + stl $17, 56($5) + addl $5, 0x40, $5 + bge $3, wloop + +nounrol: + addl $3, 8, $3 + beq $3, no_quad +/*end JJ*/ + +.align 3 +loop: + stl $17, 0($5) + subl $3, 1, $3 + addl $5, 8, $5 + bne $3, loop + +no_quad: + bis $31, $31, $31 + beq $18, end + and $6, 7, $6 +no_quad_loop: + stb $17, 0($5) + subl $6, 1, $6 + addl $5, 1, $5 + bgt $6, no_quad_loop + ret $31, ($26), 1 + +.align 3 +within_one_quad: + bis $18, $18, $1 + bis $16, $16, $5 +within_one_quad_loop: + stb $17, 0($5) + subl $1, 1, $1 + addl $5, 1, $5 + bgt $1, within_one_quad_loop + +end: + ret $31, ($26), 1 + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + or $1, $2, $1 + inslh $17, 6, $4 + or $1, $3, $1 + or $1, $4, $17 + br __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/strcpy.S b/arch/sw_64/lib/strcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..61b6141f88e2393e28d0388ae726210a2d9cb2a1 --- /dev/null +++ b/arch/sw_64/lib/strcpy.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strcpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a null-terminated string from SRC to DST. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strcpy + .ent strcpy +strcpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + bis $31, $31, $6 + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +/* copy is done, return */ +$out: + ret + + .end strcpy + EXPORT_SYMBOL(strcpy) diff --git a/arch/sw_64/lib/strncpy.S b/arch/sw_64/lib/strncpy.S new file mode 100644 index 0000000000000000000000000000000000000000..f50c70599bb4685518ed68379845d9b298af105d --- /dev/null +++ b/arch/sw_64/lib/strncpy.S @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strncpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a string from SRC to DST. At most SIZE bytes are coppied. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy in head + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strncpy + .ent strncpy +strncpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + beq $18, $out # return if size is 0 + cmplt $18, 8, $5 # size less than 8, do 1-byte copy + bne $5, $tail_loop + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $18, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + subl $18, $6, $18 # sub bytes going to be copy + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + +$mis_una_head: + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $5, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +$null_padding: + addl $16, 1, $16 + subl $18, 1, $18 + stb $31, 0($16) + beq $18, $out + br $31, $null_padding + +/* copy is done, return */ +$out: + ret + + .end strncpy + EXPORT_SYMBOL(strncpy) diff --git a/arch/sw_64/lib/uaccess_flushcache.c b/arch/sw_64/lib/uaccess_flushcache.c new file mode 100644 index 0000000000000000000000000000000000000000..353d5ac152481743a47776666ae6132626da5236 --- /dev/null +++ b/arch/sw_64/lib/uaccess_flushcache.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(memcpy_flushcache); + +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len) +{ + memcpy_flushcache(to, page_address(page) + offset, len); +} + +unsigned long __copy_user_flushcache(void *to, const void __user *from, + unsigned long n) +{ + unsigned long rc = __copy_from_user(to, from, n); + + flush_cache_all(); + return rc; +} + +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); +#endif diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c new file mode 100644 index 0000000000000000000000000000000000000000..59ca8a97d748895a49e4fbaf83a85eee99f0d459 --- /dev/null +++ b/arch/sw_64/lib/udelay.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1993, 2000 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + */ + +#include + +/* + * Use only for very small delays (< 1 msec). + * + * The active part of our cycle counter is only 32-bits wide, and + * we're treating the difference between two marks as signed. On + * a 1GHz box, that's about 2 seconds. + */ +void __delay(unsigned long loops) +{ + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(__delay); + +void udelay(unsigned long usecs) +{ + unsigned long loops = usecs * get_cpu_freq() / 1000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(udelay); + +void ndelay(unsigned long nsecs) +{ + unsigned long loops = nsecs * get_cpu_freq() / 1000000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(ndelay); diff --git a/arch/sw_64/math-emu/Makefile b/arch/sw_64/math-emu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..72e750d138e6ce9281b24b1e2c84e8907798d421 --- /dev/null +++ b/arch/sw_64/math-emu/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the FPU instruction emulation. +# + +ccflags-y := -w + +obj-$(CONFIG_MATHEMU) += math-emu.o + +math-emu-objs := math.o qrnnd.o diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c new file mode 100644 index 0000000000000000000000000000000000000000..b578752f0730481eef0d08e28c4cff53350de228 --- /dev/null +++ b/arch/sw_64/math-emu/math.c @@ -0,0 +1,2255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Modify History + * + * who when what + * --- ---- ---- + * stone 2004-09-02 Add SIMD floating emulation code + * fire3 2008-12-27 Add SIMD floating emulation code for SW64 + */ + +#include + +#include + +#include "sfp-util.h" + +#include +#include +#include + +/* + * This is for sw64 + */ + +#define IEEE_E_STATUS_MASK IEEE_STATUS_MASK +#define IEEE_E_STATUS_TO_EXCSUM_SHIFT 0 +#define SW64_FP_DENOMAL 1 /* A denormal data */ +#define SW64_FP_NORMAL 0 /* A denormal data */ +#define SW64_FP_NAN 2 + +#define SW64_FP_NAN_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 255: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + +#define SW64_FP_NAN_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 2047: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + + +#define SW64_FP_NORMAL_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +#define SW64_FP_NORMAL_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +/* Operation Code for SW64 */ +#define OP_SIMD_1 0x1A +#define OP_SIMD_2 0x1B +#define OP_SIMD_MUL_ADD 0x1B +#define OP_SIMD_NORMAL 0x1A +#define OP_MUL_ADD 0x19 + +#define FNC_FMAS 0x0 +#define FNC_FMAD 0x1 +#define FNC_FMSS 0x2 +#define FNC_FMSD 0x3 +#define FNC_FNMAS 0x4 +#define FNC_FNMAD 0x5 +#define FNC_FNMSS 0x6 +#define FNC_FNMSD 0x7 + +#define FNC_VADDS 0x80 +#define FNC_VADDD 0x81 +#define FNC_VSUBS 0x82 +#define FNC_VSUBD 0x83 +#define FNC_VMULS 0x84 +#define FNC_VMULD 0x85 +#define FNC_VDIVS 0x86 +#define FNC_VDIVD 0x87 +#define FNC_VSQRTS 0x88 +#define FNC_VSQRTD 0x89 + +#define FNC_VFCMPEQ 0x8c +#define FNC_VFCMPLE 0x8d +#define FNC_VFCMPLT 0x8e +#define FNC_VFCMPUN 0x8f + +#define FNC_VCPYS 0x90 +#define FNC_VCPYSE 0x91 +#define FNC_VCPYSN 0x92 + +#define FNC_VMAS 0x0 +#define FNC_VMAD 0x1 +#define FNC_VMSS 0x2 +#define FNC_VMSD 0x3 +#define FNC_VNMAS 0x4 +#define FNC_VNMAD 0x5 +#define FNC_VNMSS 0x6 +#define FNC_VNMSD 0x7 + +long simd_fp_emul_s(unsigned long pc); +long simd_fp_emul_d(unsigned long pc); +long mul_add_fp_emul(unsigned long pc); +long simd_cmp_emul_d(unsigned long pc); + +long simd_mul_add_fp_emul_d(unsigned long pc); +long simd_mul_add_fp_emul_s(unsigned long pc); + +void read_fp_reg_s(unsigned long reg, unsigned long *p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +#define LOW_64_WORKING 1 +#define HIGH_64_WORKING 2 + +/* + * End for sw64 + */ + +#define OPC_HMC 0x00 +#define OPC_INTA 0x10 +#define OPC_INTL 0x11 +#define OPC_INTS 0x12 +#define OPC_INTM 0x13 +#define OPC_FLTC 0x14 +#define OPC_FLTV 0x15 +#define OPC_FLTI 0x16 +#define OPC_FLTL 0x17 +#define OPC_MISC 0x18 +#define OPC_JSR 0x1a + +#define FOP_SRC_S 0 +#define FOP_SRC_T 2 +#define FOP_SRC_Q 3 + +#define FOP_FNC_ADDx 0 +#define FOP_FNC_CVTQL 0 +#define FOP_FNC_SUBx 1 +#define FOP_FNC_MULx 2 +#define FOP_FNC_DIVx 3 +#define FOP_FNC_CMPxUN 4 +#define FOP_FNC_CMPxEQ 5 +#define FOP_FNC_CMPxLT 6 +#define FOP_FNC_CMPxLE 7 +#define FOP_FNC_SQRTx 11 +#define FOP_FNC_CVTxS 12 +#define FOP_FNC_CVTxT 14 +#define FOP_FNC_CVTxQ 15 + +/* this is for sw64 added by fire3*/ +#define FOP_FNC_ADDS 0 +#define FOP_FNC_ADDD 1 +#define FOP_FNC_SUBS 2 +#define FOP_FNC_SUBD 3 +#define FOP_FNC_MULS 4 +#define FOP_FNC_MULD 5 +#define FOP_FNC_DIVS 6 +#define FOP_FNC_DIVD 7 +#define FOP_FNC_SQRTS 8 +#define FOP_FNC_SQRTD 9 + +#define FOP_FNC_CMPEQ 0x10 +#define FOP_FNC_CMPLE 0x11 +#define FOP_FNC_CMPLT 0x12 +#define FOP_FNC_CMPUN 0x13 + +#define FOP_FNC_CVTSD 0x20 +#define FOP_FNC_CVTDS 0x21 +#define FOP_FNC_CVTLS 0x2D +#define FOP_FNC_CVTLD 0x2F +#define FOP_FNC_CVTDL 0x27 +#define FOP_FNC_CVTDL_G 0x22 +#define FOP_FNC_CVTDL_P 0x23 +#define FOP_FNC_CVTDL_Z 0x24 +#define FOP_FNC_CVTDL_N 0x25 + +#define FOP_FNC_CVTWL 0x28 +#define FOP_FNC_CVTLW 0x29 + +/* fire3 added end */ + + +#define MISC_TRAPB 0x0000 +#define MISC_EXCB 0x0400 + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +#ifdef MODULE + +MODULE_DESCRIPTION("FP Software completion module"); + +extern long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +extern long (*sw64_fp_emul)(unsigned long pc); + +static long (*save_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +static long (*save_emul)(unsigned long pc); + +long do_sw_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask); +long do_sw_fp_emul(unsigned long pc); + +int init_module(void) +{ + save_emul_imprecise = sw64_fp_emul_imprecise; + save_emul = sw64_fp_emul; + sw64_fp_emul_imprecise = do_sw_fp_emul_imprecise; + sw64_fp_emul = do_sw_fp_emul; + return 0; +} + +void cleanup_module(void) +{ + sw64_fp_emul_imprecise = save_emul_imprecise; + sw64_fp_emul = save_emul; +} + +#undef sw64_fp_emul_imprecise +#define sw64_fp_emul_imprecise do_sw_fp_emul_imprecise +#undef sw64_fp_emul +#define sw64_fp_emul do_sw_fp_emul + +#endif /* MODULE */ + + +/* + * Emulate the floating point instruction at address PC. Returns -1 if the + * instruction to be emulated is illegal (such as with the opDEC trap), else + * the SI_CODE for a SIGFPE signal, else 0 if everything's ok. + * + * Notice that the kernel does not and cannot use FP regs. This is good + * because it means that instead of saving/restoring all fp regs, we simply + * stick the result of the operation into the appropriate register. + */ +long sw64_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); + + unsigned long fa, fb, fc, func, mode, mode_bk, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long opcode; + + get_user(insn, (__u32 *)pc); + opcode = (insn >> 26) & 0x3f; + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + pr_debug("======= Entering Floating mathe emulation =====\n"); + pr_debug("Floating math emulation insn = %#lx, opcode=%d, func=%d\n", insn, opcode, func); + pr_debug("SW64 hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("SW64 software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + + if (opcode == OP_SIMD_NORMAL) { /* float simd math */ + if (func == FNC_VADDS || func == FNC_VSUBS || func == FNC_VSQRTS + || func == FNC_VMULS || func == FNC_VDIVS) + si_code = simd_fp_emul_s(pc); + if (func == FNC_VADDD || func == FNC_VSUBD || func == FNC_VSQRTD + || func == FNC_VMULD || func == FNC_VDIVD) + si_code = simd_fp_emul_d(pc); + if (func == FNC_VFCMPUN || func == FNC_VFCMPLT || func == FNC_VFCMPLE + || func == FNC_VFCMPEQ) + si_code = simd_cmp_emul_d(pc); + return si_code; + } + if (opcode == OP_SIMD_MUL_ADD) {/* simd mul and add */ + func = (insn >> 10) & 0x3f; + if (func == FNC_VMAS || func == FNC_VMSS || func == FNC_VNMAS + || func == FNC_VNMSS) { + si_code = simd_mul_add_fp_emul_s(pc); + return si_code; + } + + if (func == FNC_VMAD || func == FNC_VMSD || func == FNC_VNMAD + || func == FNC_VNMSD) { + si_code = simd_mul_add_fp_emul_d(pc); + return si_code; + } + func = (insn >> 5) & 0xff; + } + + if (opcode == OP_MUL_ADD) { + si_code = mul_add_fp_emul(pc); + return si_code; + } + switch (func) { + case FOP_FNC_SUBS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SUB_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_SUBD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SUB_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_ADDS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_ADD_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_ADDD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_ADD_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_MULS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_MUL_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_MULD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_MUL_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_DIVS: + pr_debug("FOP_FNC_DIVS\n"); + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_DIV_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_DIVD: + pr_debug("FOP_FNC_DIVD\n"); + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_DIV_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_SQRTS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SQRT_S(SR, SB); + goto pack_s; + case FOP_FNC_SQRTD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SQRT_D(DR, DB); + goto pack_d; + } + + + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + if ((func & ~0xf) == FOP_FNC_CMPEQ) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + switch (func) { + case FOP_FNC_CMPUN: + if (res != 3) + vc = 0; + break; + case FOP_FNC_CMPEQ: + if (res) + vc = 0; + break; + case FOP_FNC_CMPLT: + if (res != -1) + vc = 0; + break; + case FOP_FNC_CMPLE: + if ((long)res > 0) + vc = 0; + break; + } + goto done_d; + } + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + if (func == FOP_FNC_CVTSD) { + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SB, &vb); + DR_c = DB_c; + DR_s = DB_s; + DR_e = DB_e + (1024 - 128); + DR_f = SB_f << (52 - 23); + goto pack_d; + } + + if (func == FOP_FNC_CVTDS) { + FP_CONV(S, D, 1, 1, SR, DB); + goto pack_s; + } + + if (func == FOP_FNC_CVTDL || func == FOP_FNC_CVTDL_G || func == FOP_FNC_CVTDL_P + || func == FOP_FNC_CVTDL_Z || func == FOP_FNC_CVTDL_N) { + mode_bk = mode; + if (func == FOP_FNC_CVTDL_Z) + mode = 0x0UL; + else if (func == FOP_FNC_CVTDL_N) + mode = 0x1UL; + else if (func == FOP_FNC_CVTDL_G) + mode = 0x2UL; + else if (func == FOP_FNC_CVTDL_P) + mode = 0x3UL; + + if (DB_c == FP_CLS_NAN && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) { + /* AAHB Table B-2 says QNaN should not trigger INV */ + vc = 0; + } else + FP_TO_INT_ROUND_D(vc, DB, 64, 2); + mode = mode_bk; + goto done_d; + } + + vb = sw64_read_fp_reg(fb); + + switch (func) { + case FOP_FNC_CVTLW: + /* + * Notice: We can get here only due to an integer + * overflow. Such overflows are reported as invalid + * ops. We return the result the hw would have + * computed. + */ + vc = ((vb & 0xc0000000) << 32 | /* sign and msb */ + (vb & 0x3fffffff) << 29); /* rest of the int */ + FP_SET_EXCEPTION(FP_EX_INVALID); + goto done_d; + + case FOP_FNC_CVTLS: + FP_FROM_INT_S(SR, ((long)vb), 64, long); + goto pack_s; + + case FOP_FNC_CVTLD: + FP_FROM_INT_D(DR, ((long)vb), 64, long); + goto pack_d; + } + goto bad_insn; + + +pack_s: + FP_PACK_SP(&vc, SR); + + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); + sw64_write_fp_reg_s(fc, vc); + goto done; + +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +done_d: + sw64_write_fp_reg(fc, vc); + goto done; + + /* + * Take the appropriate action for each possible + * floating-point result: + * + * - Set the appropriate bits in the FPCR + * - If the specified exception is enabled in the FPCR, + * return. The caller (entArith) will dispatch + * the appropriate signal to the translated program. + * + * In addition, properly track the exception state in software + * as described in the SW64 Architecture Handbook section 4.7.7.3. + */ +done: + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + pr_debug("SW64 before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask) +{ + unsigned long trigger_pc = regs->pc - 4; + unsigned long insn, opcode, rc, si_code = 0; + + + /* + * Turn off the bits corresponding to registers that are the + * target of instructions that set bits in the exception + * summary register. We have some slack doing this because a + * register that is the target of a trapping instruction can + * be written at most once in the trap shadow. + * + * Branches, jumps, TRAPBs, EXCBs and calls to HMcode all + * bound the trap shadow, so we need not look any further than + * up to the first occurrence of such an instruction. + */ + while (write_mask) { + get_user(insn, (__u32 *)(trigger_pc)); + opcode = insn >> 26; + rc = insn & 0x1f; + + switch (opcode) { + case OPC_HMC: + case OPC_JSR: + case 0x30 ... 0x3f: /* branches */ + goto egress; + + case OPC_MISC: + switch (insn & 0xffff) { + case MISC_TRAPB: + case MISC_EXCB: + goto egress; + + default: + break; + } + break; + + case OPC_INTA: + case OPC_INTL: + case OPC_INTS: + case OPC_INTM: + write_mask &= ~(1UL << rc); + break; + + case OPC_FLTC: + case OPC_FLTV: + case OPC_FLTI: + case OPC_FLTL: + write_mask &= ~(1UL << (rc + 32)); + break; + } + if (!write_mask) { + /* Re-execute insns in the trap-shadow. */ + regs->pc = trigger_pc + 4; + si_code = sw64_fp_emul(trigger_pc); + goto egress; + } + trigger_pc -= 4; + } + +egress: + return si_code; +} + +#define WORKING_PART_0 0 +#define WORKING_PART_1 1 +#define WORKING_PART_2 2 +#define WORKING_PART_3 3 + + +/* + * This is for sw64 + */ + +long simd_cmp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD floating-CMP math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + break; + } + pr_debug("Before unpack va:%#lx, vb:%#lx\n", va, vb); + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + pr_debug("DA_e:%d, _FP_FRAC_ZEROP_1(DA):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DB_e:%d, _FP_FRAC_ZEROP_1(DB):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DA iszero:%d, DB iszero:%d\n", ((!DA_e && _FP_FRAC_ZEROP_1(DA)) ? 1 : 0), + ((!DB_e && _FP_FRAC_ZEROP_1(DB)))); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + pr_debug("res:%d\n", res); + switch (func) { + case FNC_VFCMPUN: + if (res != 3) + vc = 0; + break; + case FNC_VFCMPEQ: + if (res) + vc = 0; + break; + case FNC_VFCMPLT: + if (res != -1) + vc = 0; + break; + case FNC_VFCMPLE: + if ((long)res > 0) + vc = 0; + break; + } +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD D-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + switch (func) { + case FNC_VSUBD: + pr_debug("FNC_VSUBD\n"); + FP_SUB_D(DR, DA, DB); + goto pack_d; + case FNC_VMULD: + pr_debug("FNC_VMULD\n"); + FP_MUL_D(DR, DA, DB); + goto pack_d; + case FNC_VADDD: + pr_debug("FNC_VADDD\n"); + FP_ADD_D(DR, DA, DB); + goto pack_d; + case FNC_VDIVD: + pr_debug("FNC_VDIVD\n"); + FP_DIV_D(DR, DA, DB); + goto pack_d; + case FNC_VSQRTD: + pr_debug("FNC_VSQRTD\n"); + FP_SQRT_D(DR, DB); + goto pack_d; + } +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_d, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD S-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART0: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART1: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART2: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART3: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + + switch (func) { + case FNC_VSUBS: + pr_debug("FNC_VSUBS\n"); + FP_SUB_S(SR, SA, SB); + goto pack_s; + case FNC_VMULS: + pr_debug("FNC_VMULS\n"); + FP_MUL_S(SR, SA, SB); + goto pack_s; + case FNC_VADDS: + pr_debug("FNC_VADDS\n"); + FP_ADD_S(SR, SA, SB); + goto pack_s; + case FNC_VDIVS: + pr_debug("FNC_VDIVS\n"); + FP_DIV_S(SR, SA, SB); + goto pack_s; + case FNC_VSQRTS: + pr_debug("FNC_VSQRTS\n"); + FP_SQRT_S(SR, SB); + goto pack_s; + } +pack_s: + FP_PACK_SP(&vc, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_s, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + pr_debug("fex_p0: fpcr_p0:%#lx\n", fpcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + pr_debug("fex_p1: fpcr_p1:%#lx\n", fpcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + pr_debug("fex_p2: fpcr_p2:%#lx\n", fpcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + pr_debug("fex_p3: fpcr_p3:%#lx\n", fpcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; + +} + +static inline unsigned long negative_value(unsigned long va) +{ + return (va ^ 0x8000000000000000UL); +} + +static inline unsigned long s_negative_value(unsigned long va) +{ + return (va ^ 0x80000000UL); +} + +/* + * sw64 mul-add floating emulation + */ +long mul_add_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_S(S_ZERO); + FP_DECL_D(D_ZERO); + FP_DECL_S(S_TMP2); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("===== Entering SW64 MUL-ADD Emulation =====\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) { + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + vc = sw64_read_fp_reg_s(fc); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + } + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + vc = sw64_read_fp_reg(fc); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + } + pr_debug("va = %#lx, vb = %#lx, vc = %#lx\n", va, vb, vc); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + default: + goto bad_insn; + + } +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg_s(fd, vd); + goto done; + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg(fd, vd); + +done: + pr_debug("vd = %#lx\n", vd); + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); /** wrfpcr will destroy vector register! */ + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) + sw64_write_fp_reg_s(fd, vd); + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) + sw64_write_fp_reg(fd, vd); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_mul_add_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_S(S_ZERO); + FP_DECL_S(S_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD S-floating mul-add emulation =======\n"); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("hardware fpcr = %#lx\n", fpcr); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_s(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + pr_debug("FPCR_STATUS_MASK0 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + pr_debug("FPCR_STATUS_MASK1 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + default: + goto bad_insn; + } + +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_mul_add_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_D(D_ZERO); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD D-floating mul-add emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_d(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + vd = vd_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + vd = vd_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + vd = vd_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + vd = vd_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + + switch (func) { + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + + goto pack_d; + default: + goto bad_insn; + } + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +void read_fp_reg_s(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[2]; + + sw64_read_simd_fp_m_s(reg, fp); + *val_p0 = fp[0] & 0xffffffffUL; + *val_p1 = (fp[0] >> 32) & 0xffffffffUL; + *val_p2 = fp[1] & 0xffffffffUL; + *val_p3 = (fp[1] >> 32) & 0xffffffffUL; +} + +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[4]; + + sw64_read_simd_fp_m_d(reg, fp); + *val_p0 = fp[0]; + *val_p1 = fp[1]; + *val_p2 = fp[2]; + *val_p3 = fp[3]; +} + +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + unsigned long fp[2]; + + fp[0] = ((val_p1 & 0xffffffffUL) << 32) | (val_p0 & 0xffffffffUL); + fp[1] = ((val_p3 & 0xffffffffUL) << 32) | (val_p2 & 0xffffffffUL); + sw64_write_simd_fp_reg_s(reg, fp[0], fp[1]); +} + +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + sw64_write_simd_fp_reg_d(reg, val_p0, val_p1, val_p2, val_p3); +} diff --git a/arch/sw_64/math-emu/qrnnd.S b/arch/sw_64/math-emu/qrnnd.S new file mode 100644 index 0000000000000000000000000000000000000000..1e732f2e68c086d33bc2be097d5ede9bd041ff4f --- /dev/null +++ b/arch/sw_64/math-emu/qrnnd.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + # __udiv_qrnnd + # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. + + # This file is part of GCC. + + .set noreorder + .set noat + + .text + + .globl __udiv_qrnnd + .ent __udiv_qrnnd +__udiv_qrnnd: + .frame $30, 0, $26, 0 + .prologue 0 + + # ldiq $2,16 + ldi $2, 16($31) + blt $19, $largedivisor + +$loop1: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop1 + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$largedivisor: + and $18, 1, $4 + + srl $18, 1, $18 + sll $17, 63, $3 + or $3, $18, $18 + srl $17, 1, $17 + + and $19, 1, $6 + srl $19, 1, $5 + addl $5, $6, $5 + +$loop2: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop2 + + addl $17, $17, $17 + addl $4, $17, $17 + bne $6, $Odd + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$Odd: + # q' in $18. r' in $17 + addl $17, $18, $17 + + cmpult $17, $18, $3 # $3 := carry from addl + subl $17, $19, $at + addl $18, $3, $18 + selne $3, $at, $17, $17 + + cmpult $17, $19, $3 + addl $18, 1, $at + seleq $3, $at, $18, $18 + subl $17, $19, $at + seleq $3, $at, $17, $17 + + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + + .end __udiv_qrnnd diff --git a/arch/sw_64/math-emu/sfp-util.h b/arch/sw_64/math-emu/sfp-util.h new file mode 100644 index 0000000000000000000000000000000000000000..0769c0223e0d7e43e0b73fbd45882333e95f765f --- /dev/null +++ b/arch/sw_64/math-emu/sfp-util.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_MATH_EMU_SFP_UTIL_H +#define _SW64_MATH_EMU_SFP_UTIL_H + +#include +#include +#include +#include +#include + +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl))) + +#define umul_ppmm(wh, wl, u, v) \ + __asm__ ("mull %2, %3, %1; umulh %2, %3, %0" \ + : "=r" ((UDItype)(wh)), \ + "=&r" ((UDItype)(wl)) \ + : "r" ((UDItype)(u)), \ + "r" ((UDItype)(v))) + +#define udiv_qrnnd(q, r, n1, n0, d) \ +do { unsigned long __r; \ + (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ + (r) = __r; \ +} while (0) +extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, + unsigned long, unsigned long); + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() goto bad_insn + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN -1 +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _SW64_MATH_EMU_SFP_UTIL_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8b9d6e4d2ebfb31bda1ee41cb00c163cd70ce577 --- /dev/null +++ b/arch/sw_64/mm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux sw_64-specific parts of the memory manager. +# + +#ccflags-y := -Werror + +obj-y := init.o fault.o physaddr.o mmap.o extable.o + +obj-$(CONFIG_NUMA) += numa.o +ifeq ($(CONFIG_SUBARCH_C4),y) +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage_c4.o +else +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +endif +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += thp.o diff --git a/arch/sw_64/mm/extable.c b/arch/sw_64/mm/extable.c new file mode 100644 index 0000000000000000000000000000000000000000..d2678e12a1b1cb85ab257d6260fa33f0a7c36179 --- /dev/null +++ b/arch/sw_64/mm/extable.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +int fixup_exception(struct pt_regs *regs, unsigned long pc) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(pc); + if (fixup) { + unsigned int valreg = fixup->fixup.bits.valreg; + unsigned int errreg = fixup->fixup.bits.errreg; + + if (valreg != 31) + regs->regs[valreg] = 0; + if (errreg != 31) + regs->regs[errreg] = -EFAULT; + pc += fixup->fixup.bits.nextinsn; + regs->pc = pc; + + return 1; + } + return 0; +} diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c new file mode 100644 index 0000000000000000000000000000000000000000..e76560a7edca3fb796482aa91daae69146f6852f --- /dev/null +++ b/arch/sw_64/mm/fault.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include + +#include + +__read_mostly bool segv_debug_enabled; + +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + int ret = 0; + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, mmcsr)) + ret = 1; + preempt_enable(); + } + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + return 0; +} +#endif + +extern void die(char *, struct pt_regs *, long); +extern void show_regs(struct pt_regs *regs); + +void show_all_vma(void) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + MA_STATE(mas, 0, 0, 0); + + if (!mm) + return; + + mas.tree = &mm->mm_mt; + + for (int i = 0; (vma = mas_find(&mas, ULONG_MAX)) != NULL; i++) { + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + struct file *file = vma->vm_file; + + if (file) + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, file = %s, name = %s\n", + i, start, end, (end - start), vma->vm_flags, + file->f_path.dentry->d_name.name, current->comm); + else + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, name = %s\n", + i, start, end, (end - start), vma->vm_flags, current->comm); + } +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to handle_mm_fault(). + * + * mmcsr: + * 0 = translation not valid + * 1 = access violation + * 2 = fault-on-read + * 3 = fault-on-execute + * 4 = fault-on-write + * + * cause: + * -1 = instruction fetch + * 0 = load + * 1 = store + * + * Registers $9 through $15 are saved in a block just prior to `regs' and + * are saved and restored around the call to allow exception code to + * modify them. + */ + +unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd = NULL; + p4d_t *p4d = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + unsigned long ret = 0UL; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx\n", addr, pgd_val(*pgd)); + goto out; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, p4d = %#lx\n", + addr, pgd_val(*pgd), p4d_val(*p4d)); + goto out; + } + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud)); + goto out; + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud), pmd_val(*pmd)); + goto out; + + } + pte = pte_offset_map(pmd, addr); + if (pte_present(*pte)) { + ret = (unsigned long)pfn_to_virt(pte_pfn(*pte)); + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx, pte = %#lx, ret = %#lx\n", + addr, *(unsigned long *)pgd, *(unsigned long *)pud, + *(unsigned long *)pmd, *(unsigned long *)pte, ret); + } +out: + return ret; +} + +extern int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs); + +asmlinkage void notrace +do_page_fault(unsigned long address, unsigned long mmcsr, + long cause, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + int si_code = SEGV_MAPERR; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_DEFAULT; + + if (notify_page_fault(regs, mmcsr)) + return; + + if (unlikely(mmcsr >= MMCSR__DA_MATCH)) { + if (do_match(address, mmcsr, cause, regs) == 1) + return; + } + + if (unlikely(mmcsr == MMCSR__ACV1)) { + if (!user_mode(regs)) + goto no_context; + else { + mmap_read_unlock(mm); + goto bad_area; + } + } + + /* + * If we're in an interrupt context, or have no user context, + * we must not take the fault. + */ + if (!mm || faulthandler_disabled()) + goto no_context; + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +retry: + vma = lock_mm_and_find_vma(mm, address, regs); + if (!vma) + goto bad_area_nosemaphore; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it. + */ + si_code = SEGV_ACCERR; + if (cause < 0) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + current->maj_flt++; + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + current->min_flt++; + } + + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + + mmap_read_unlock(mm); + + return; + + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + bad_area: + mmap_read_unlock(mm); + + bad_area_nosemaphore: + if (user_mode(regs)) + goto do_sigsegv; + + no_context: + /* Are we prepared to handle this fault as an exception? */ + if (fixup_exception(regs, regs->pc)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + pr_alert("Unable to handle kernel paging request at virtual address %016lx\n", + address); + die("Oops", regs, cause); + make_task_dead(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that + * made us unable to handle the page fault gracefully. + */ + out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; + + do_sigbus: + mmap_read_unlock(mm); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); + if (!user_mode(regs)) + goto no_context; + return; + + do_sigsegv: + force_sig_fault(SIGSEGV, si_code, (void __user *) address); + + if (unlikely(segv_debug_enabled)) { + pr_info("fault: want to send_segv: pid %d, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + current->pid, cause, mmcsr, address, regs->pc); + show_regs(regs); + show_all_vma(); + } +} diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c new file mode 100644 index 0000000000000000000000000000000000000000..fae1fa8bf7df50342b2608d33286619b57fe2d1c --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 Huge TLB Page Support for Kernel. + */ + +#include +#include +#include +#include + +#include +#include + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_VALID | _PAGE_LEAF)) != _PAGE_VALID; +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +pte_t *sw64_256m_hugepte_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) +{ + int i; + struct page *page; + pmd_t *pmd; + pte_t *pte = NULL; + + pmd = pmd_alloc(mm, pud, addr); + if (pmd == NULL) + return NULL; + + pte = pte_alloc_map(mm, pmd, addr); + if (pte == NULL) + return NULL; + + page = virt_to_page(pte); + pmd_val(*pmd) = pmd_val(*pmd) | _PAGE_LEAF | _PAGE_CONT; + for (i = 1; i < 32; i++) + pmd_val(*(pmd+i)) = pmd_val(*pmd); + return pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) { + if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE << 5)) { + pte = sw64_256m_hugepte_alloc(mm, pud, addr); + } else { + pr_warn("Unsupported page size %lx\n", sz); + return NULL; + } + } + BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + return NULL; + if (pmd_val(*pmd) & _PAGE_CONT) + pte = pte_offset_map(pmd, addr); + else + pte = (pte_t *) pmd; + } + } + } + return pte; +} + +static inline int sw64_huge_pmd_bad(pmd_t pmd) +{ + return !(((pmd_val(pmd) & ~_PFN_MASK) == _PAGE_TABLE) || + ((pmd_val(pmd) & _PAGE_CONT) == _PAGE_CONT)); +} + +static inline int sw64_huge_pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(sw64_huge_pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static void sw64_huge_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + if ((((unsigned long)pmd & 0xffUL) == 0) && + ((pmd_val(*pmd) & _PAGE_CONT) == _PAGE_CONT)) { + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + mm_dec_nr_ptes(tlb->mm); + } else { + pmd_clear(pmd); + } +} + +static inline void sw64_huge_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (sw64_huge_pmd_none_or_clear_bad(pmd)) + continue; + sw64_huge_free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); +} + +static inline void sw64_huge_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + sw64_huge_free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(p4d, start); + p4d_clear(p4d); + pud_free_tlb(tlb, pud, start); + mm_dec_nr_puds(tlb->mm); +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + +#if (defined(CONFIG_FORCE_MAX_ZONEORDER) && (CONFIG_FORCE_MAX_ZONEORDER >= 16)) +static __init int sw64_256m_hugetlb_init(void) +{ + if (!size_to_hstate(1UL << (PMD_SHIFT + 5))) + hugetlb_add_hstate(PMD_SHIFT + 5 - PAGE_SHIFT); + return 0; +} +arch_initcall(sw64_256m_hugetlb_init); +#endif +#endif /* CONFIG_HUGETLB_PAGE */ + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + switch (size) { + case PMD_SIZE: + case (PMD_SIZE<<5): + return true; + } + + return false; +} diff --git a/arch/sw_64/mm/hugetlbpage_c4.c b/arch/sw_64/mm/hugetlbpage_c4.c new file mode 100644 index 0000000000000000000000000000000000000000..913389cd257704f67f0bdaa9be97754d15a9b8a9 --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage_c4.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW_64 Huge TLB Page Support for Kernel. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + +int pud_huge(pud_t pud) +{ + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} +EXPORT_SYMBOL(pud_huge); + +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 0; + + *pgsize = size; + + switch (size) { + case PUD_SIZE: + case PMD_SIZE: + contig_ptes = 1; + break; + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + default: + break; + } + + return contig_ptes; +} + +static pte_t get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long pgsize, unsigned long ncontig) +{ + pte_t orig_pte = huge_ptep_get(ptep); + unsigned long i; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); + return orig_pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + } else if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE * CONT_PMDS)) { + pte = (pte_t *)pmd_alloc(mm, pud, addr); + WARN_ON(addr & (sz - 1)); + } + + WARN_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + return NULL; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + + pud = pud_offset(p4d, addr); + + if (sz != PUD_SIZE && pud_none(*pud)) + return NULL; + /* hugepage or swap? */ + if (pud_huge(*pud) || !pud_present(*pud)) + return (pte_t *)pud; + /* table; check the next level */ + + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmd = pmd_offset(pud, addr); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(*pmd)) + return NULL; + if (pmd_huge(*pmd) || !pmd_present(*pmd)) + return (pte_t *)pmd; + + return NULL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + if (pagesize == CONT_PMD_SIZE) { + entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); + } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + } + return entry; +} + +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + size_t pgsize; + int i; + int ncontig; + unsigned long pfn; + pgprot_t hugeprot; + + /* + * Code needs to be expanded to handle huge swap and migration + * entries. Needed for HUGETLB and MEMORY_FAILURE. + */ + WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + ncontig = num_contig_ptes(sz, &pgsize); + pfn = pte_pfn(pte); + hugeprot = pte_pgprot(pte); + + get_and_clear(mm, addr, ptep, pgsize, ncontig); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte(ptep, pte); +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long pfn; + pgprot_t hugeprot; + int ncontig, i; + size_t pgsize; + pte_t pte; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + ncontig = CONT_PMDS; + + pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + hugeprot = pte_pgprot(pte); + pfn = pte_pfn(pte); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int ncontig; + size_t pgsize; + pte_t orig_pte = huge_ptep_get(ptep); + + if (!pte_cont(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + ncontig = CONT_PMDS; + + return get_and_clear(mm, addr, ptep, pgsize, ncontig); +} + +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + struct mm_struct *mm = vma->vm_mm; + size_t pgsize; + int ncontig; + + if (!pte_cont(READ_ONCE(*ptep))) + return ptep_clear_flush(vma, addr, ptep); + + ncontig = CONT_PMDS; + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); +} + +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = huge_ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; + } + + return 0; +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int ncontig, i; + size_t pgsize = 0; + unsigned long pfn = pte_pfn(pte); + pgprot_t hugeprot; + pte_t orig_pte; + + if (!pte_cont(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = CONT_PMDS; + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_and_clear(vma->vm_mm, addr, ptep, pgsize, ncontig); + flush_tlb_fix_spurious_fault(vma, addr, ptep); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + hugeprot = pte_pgprot(pte); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); + + return 1; +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + + unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + + switch (ps) { + case PUD_SIZE: + case PMD_SIZE * CONT_PMDS: + case PMD_SIZE: + hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + return 1; + } + + pr_err("hugepagesz: Unsupported page size %lu M\n", + ps >> 20); + return 0; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c new file mode 100644 index 0000000000000000000000000000000000000000..1f402809128f21d133e92dc436026f58cc6a6e5a --- /dev/null +++ b/arch/sw_64/mm/init.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +/* 2.3.x zone allocator, 1999 Andrea Arcangeli */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct mem_desc_t mem_desc; +#ifndef CONFIG_NUMA +struct numa_node_desc_t numa_nodes_desc[1]; +#endif /* CONFIG_NUMA */ + +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); +pg_data_t *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); + +pgd_t swapper_pg_dir[1024] __aligned(PAGE_SIZE); +static pud_t vmalloc_pud[1024] __aligned(PAGE_SIZE); + +static phys_addr_t mem_start; +static phys_addr_t mem_size_limit; + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +unsigned long memory_block_size_bytes(void) +{ + if (is_in_guest()) + return MIN_MEMORY_BLOCK_SIZE_VM_MEMHP; + else + return MIN_MEMORY_BLOCK_SIZE; +} +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static int __init setup_mem_size(char *p) +{ + char *oldp; + unsigned long start, size; + + start = 0; + oldp = p; + size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') + start = memparse(p + 1, &p); + + mem_start = start; + mem_size_limit = size; + return 0; +} +early_param("mem", setup_mem_size); + +#if defined(CONFIG_SUBARCH_C3B) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + init = pgd_offset(&init_mm, 0UL); + if (ret) + pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); + + return ret; +} +#elif defined(CONFIG_SUBARCH_C4) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + return ret; +} +#endif + +/* Set up initial PCB, VPTB, and other such nicities. */ + +static inline void +switch_to_system_map(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); + update_ptbr_sys(virt_to_phys(swapper_pg_dir)); + tbiv(); +} + +void __init callback_init(void) +{ + pgd_t *pgd; + p4d_t *p4d; + + switch_to_system_map(); + + /* Allocate one PGD and one PUD. */ + pgd = pgd_offset_k(VMALLOC_START); + p4d = p4d_offset(pgd, VMALLOC_START); + p4d_populate(&init_mm, p4d, (pud_t *)vmalloc_pud); +} + +void __init zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +/* + * paging_init() sets up the memory map. + */ +void __init paging_init(void) +{ +} + +void __init mem_detect(void) +{ + int i; + + mem_desc.phys_base = 0; + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) + mem_desc.phys_size += socket_desc[i].socket_mem; + } + + if (mem_start >= NODE0_START) { + mem_desc.base = mem_start; + } else { + mem_desc.base = NODE0_START; + mem_size_limit -= NODE0_START - mem_start; + } + + if (mem_size_limit && mem_size_limit < mem_desc.phys_size - NODE0_START) + mem_desc.size = mem_size_limit; + else + mem_desc.size = mem_desc.phys_size - NODE0_START; +} + +void __init sw64_memblock_init(void) +{ + memblock_add(mem_desc.base, mem_desc.size); + + memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); + + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + memblock_allow_resize(); + memblock_initialized = true; + process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __boot_pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); +} + +#ifndef CONFIG_NUMA +void __init sw64_numa_init(void) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + memblock_set_node(mem_desc.base, mem_desc.size, &memblock.memory, 0); + nd_pa = memblock_phys_alloc(nd_size, SMP_CACHE_BYTES); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != 0) + pr_info("NODE_DATA(%d) on node %d\n", 0, tnid); + + node_data[0] = nd; + memset(NODE_DATA(0), 0, sizeof(pg_data_t)); + NODE_DATA(0)->node_id = 0; + NODE_DATA(0)->node_start_pfn = mem_desc.base >> PAGE_SHIFT; + NODE_DATA(0)->node_spanned_pages = mem_desc.size >> PAGE_SHIFT; + node_set_online(0); +} +#endif /* CONFIG_NUMA */ + +void __init +mem_init(void) +{ + set_max_mapnr(max_low_pfn); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +#ifdef CONFIG_SWIOTLB + swiotlb_init(true, SWIOTLB_VERBOSE); +#endif + memblock_free_all(); +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node, altmap); +} + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +} +#endif + +#ifdef CONFIG_HAVE_MEMBLOCK +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + const u64 phys_offset = MIN_MEMBLOCK_ADDR; + + if (acpi_disabled) { + if (!PAGE_ALIGNED(base)) { + if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } + size &= PAGE_MASK; + + if (base > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base < phys_offset) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + base, phys_offset); + size -= phys_offset - base; + base = phys_offset; + } + memblock_add(base, size); + } else + return; +} +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sw_64/mm/mmap.c b/arch/sw_64/mm/mmap.c new file mode 100644 index 0000000000000000000000000000000000000000..a7a189fc36d675c44bdf4f9192867de1409dc480 --- /dev/null +++ b/arch/sw_64/mm/mmap.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + unsigned long limit; + + /* Support 32 bit heap. */ + if (current->personality & ADDR_LIMIT_32BIT) + limit = 0x80000000; + else + limit = TASK_SIZE; + + if (len > limit) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (addr + len > TASK_SIZE) + return -EINVAL; + + return addr; + } + + if (addr) { + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = pgoff << PAGE_SHIFT; + + return vm_unmapped_area(&info); +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + /* 8MB for 32bit, 256MB for 64bit */ + if (current->personality & ADDR_LIMIT_32BIT) + rnd = get_random_long() & 0x7ffffful; + else + rnd = get_random_long() & 0xffffffful; + + return rnd << PAGE_SHIFT; +} + +/* + * This function, called very early during the creation of a new process VM + * image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + /* + * Fall back to the standard layout if the personality bit is set, or + * if the expected stack growth is unlimited: + */ + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; +} + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + unsigned long ret = -EINVAL; + + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + out: + return ret; +} diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c new file mode 100644 index 0000000000000000000000000000000000000000..fcf1f97a7840fae1232654735e23c4e29432b56c --- /dev/null +++ b/arch/sw_64/mm/numa.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DISCONTIGMEM NUMA sw64 support. + */ + +#include +#include +#include +#include + +#include + +int cpu_to_node_map[NR_CPUS]; +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +struct numa_node_desc_t numa_nodes_desc[MAX_NUMNODES]; +nodemask_t numa_nodes_parsed __initdata; + +static int numa_distance_cnt; +static u8 *numa_distance; +int numa_off; + +static __init int numa_setup(char *opt) +{ + if (!opt) + return -EINVAL; + if (!strncmp(opt, "off", 3)) + numa_off = 1; + return 0; +} +early_param("numa", numa_setup); + +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: cpumask_of_node() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +static void __init setup_node_to_cpumask_map(void) +{ + int node; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); + + /* allocate and clear the mapping */ + for (node = 0; node < nr_node_ids; node++) { + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); + cpumask_clear(node_to_cpumask_map[node]); + } + + /* cpumask_of_node() will now work */ + pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); +} + +/** + * numa_add_memblk - Set node id to memblk + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int ret; + + ret = memblock_set_node(start, (end - start), &memblock.memory, nid); + if (ret < 0) { + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", + start, (end - 1), nid); + return ret; + } + + node_set(nid, numa_nodes_parsed); + return ret; +} + +/** + * Initialize NODE_DATA for a node on the local memory + */ +static void __init setup_node_data(int nid, unsigned long start_pfn, unsigned long end_pfn) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (start_pfn >= end_pfn) + pr_info("Initmem setup node %d []\n", nid); + + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +/** + * numa_free_distance + * + * Free current distance table. + */ +void __init numa_free_distance(void) +{ + size_t size; + + if (!numa_distance) + return; + + size = numa_distance_cnt * numa_distance_cnt * + sizeof(numa_distance[0]); + + memblock_free(numa_distance, size); + numa_distance_cnt = 0; + numa_distance = NULL; +} + +/** + * + * Create a new NUMA distance table. + * + */ +static int __init numa_alloc_distance(void) +{ + size_t size; + phys_addr_t phys; + int i, j; + + size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (WARN_ON(!phys)) + return -ENOMEM; + + numa_distance = __va(phys); + numa_distance_cnt = nr_node_ids; + + /* fill with the default distances */ + for (i = 0; i < numa_distance_cnt; i++) + for (j = 0; j < numa_distance_cnt; j++) { + numa_distance[i * numa_distance_cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + } + + pr_info("Initialized distance table, cnt=%d\n", numa_distance_cnt); + + return 0; +} + +/** + * numa_set_distance - Set inter node NUMA distance from node to node. + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. + * If distance table doesn't exist, a warning is printed. + * + * If @from or @to is higher than the highest known node or lower than zero + * or @distance doesn't make sense, the call is ignored. + * + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + pr_warn_once("Warning: distance table not allocated yet\n"); + return; + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +/** + * Return NUMA distance @from to @to + */ +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +static int __init numa_register_nodes(void) +{ + int nid; + struct memblock_region *mblk; + + /* Check that valid nid is set to memblks */ + for_each_mem_region(mblk) { + pr_info("memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { + pr_warn("Warning: invalid memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + return -EINVAL; + } + } + + /* Finally register nodes */ + for_each_node_mask(nid, numa_nodes_parsed) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + setup_node_data(nid, start_pfn, end_pfn); + node_set_online(nid); + } + + /* Setup online nodes to actual nodes */ + node_possible_map = numa_nodes_parsed; + + return 0; +} + +static int __init numa_init(int (*init_func)(void)) +{ + int ret; + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + numa_free_distance(); + + ret = numa_alloc_distance(); + if (ret < 0) + return ret; + + ret = init_func(); + if (ret < 0) + return ret; + + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); + return -EINVAL; + } + + ret = numa_register_nodes(); + if (ret < 0) + return ret; + + setup_node_to_cpumask_map(); + + return 0; +} + +static void __init get_numa_info_socket(void) +{ + int i; + + phys_addr_t base = 0; + + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + numa_nodes_desc[i].base = base; + numa_nodes_desc[i].size = socket_desc[i].socket_mem; + base += numa_nodes_desc[i].size; + } + } +} + +static int __init manual_numa_init(void) +{ + int ret, nid; + struct memblock_region *mblk; + phys_addr_t node_base, node_size, node_end; + + if (numa_off) { + pr_info("NUMA disabled\n"); /* Forced off on command line. */ + pr_info("Faking one node at [mem %#018llx-%#018llx]\n", + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); + for_each_mem_region(mblk) { + ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); + if (!ret) + continue; + + pr_err("NUMA init failed\n"); + return ret; + } + } else { + get_numa_info_socket(); + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + node_base = numa_nodes_desc[nid].base; + node_size = numa_nodes_desc[nid].size; + node_end = node_base + node_size; + ret = 0; + + if (!node_end) + continue; + + for_each_mem_region(mblk) { + if (mblk->base >= node_base && mblk->base < node_end) { + if (mblk->base + mblk->size < node_end) + ret = numa_add_memblk(nid, mblk->base, mblk->base + mblk->size); + else + ret = numa_add_memblk(nid, mblk->base, node_end); + } + } + + if (!node_size) { + memblock_add_node(node_base, node_size, nid, MEMBLOCK_NONE); + node_set(nid, numa_nodes_parsed); + pr_info("Setup empty node %d from %#llx\n", nid, node_base); + } + + if (!ret) + continue; + + pr_err("NUMA init failed for node %d, [mem %#018llx-%#018llx]", + nid, node_base, node_end - 1); + } + } + + return 0; +} + +void __init sw64_numa_init(void) +{ + if (!numa_off) { + if (!acpi_disabled && !numa_init(acpi_numa_init)) + return; + if (acpi_disabled && !numa_init(of_numa_init)) + return; + } + + numa_init(manual_numa_init); +} + +void cpu_set_node(void) +{ + int i; + + if (numa_off) { + for (i = 0; i < nr_cpu_ids; i++) + cpu_to_node_map[i] = 0; + } else { + int rr, default_node, cid; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + cid = cpu_to_rcid(i); + default_node = rcid_to_domain_id(cid); + if (node_online(default_node)) { + cpu_to_node_map[i] = default_node; + } else { + cpu_to_node_map[i] = rr; + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } + } + } + /* + * Setup numa_node for cpu 0 before per_cpu area for booting. + * Actual setup of numa_node will be done in native_smp_prepare_cpus(). + */ + set_cpu_numa_node(0, cpu_to_node_map[0]); +} + +void numa_store_cpu_info(unsigned int cpu) +{ + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); +} + +void __init early_map_cpu_to_node(unsigned int cpu, int nid) +{ + /* fallback to node 0 */ + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) + nid = 0; + + cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const struct cpumask *cpumask_of_node(int node) +{ + + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); + return cpu_all_mask; + } + + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); + return cpu_none_mask; + } + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); + return cpu_online_mask; + } + + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); +#endif + +static void numa_update_cpu(unsigned int cpu, bool remove) +{ + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void numa_add_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, false); +} + +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); + set_cpu_numa_node(cpu, NUMA_NO_NODE); +} diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c new file mode 100644 index 0000000000000000000000000000000000000000..3c6ecb8ee86ab65cf0575977aab841e4161d05f9 --- /dev/null +++ b/arch/sw_64/mm/physaddr.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +unsigned long __phys_addr(unsigned long addr) +{ + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + return addr; +} +EXPORT_SYMBOL(__phys_addr); + +bool __virt_addr_valid(unsigned long addr) +{ + if (addr < PAGE_OFFSET) + return false; + addr &= ~PAGE_OFFSET; + return pfn_valid(addr >> PAGE_SHIFT); +} +EXPORT_SYMBOL(__virt_addr_valid); + +#ifdef CONFIG_SUBARCH_C3B +#define LEGACY_BOOT_VA 0xffffffff80000000 +unsigned long __boot_phys_addr(unsigned long addr) +{ + if (addr >= LEGACY_BOOT_VA) { + addr &= ~LEGACY_BOOT_VA; + VIRTUAL_BUG_ON(addr >= KERNEL_IMAGE_SIZE); + } else { + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + } + return addr; +} +#endif diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c new file mode 100644 index 0000000000000000000000000000000000000000..833bb59f79d0e9f01fb9813eec8fc5cb24df9da3 --- /dev/null +++ b/arch/sw_64/mm/thp.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + *pmdp = entry; + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + int ret = 0; + + if (pmd_young(*pmdp)) + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, + (unsigned long *)pmdp); + return ret; +} + +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + return young; +} +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int set; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set = !test_and_set_bit(_PAGE_BIT_SPLITTING, (unsigned long *)pmdp); + if (set) { + /* need tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } +} diff --git a/arch/sw_64/net/Makefile b/arch/sw_64/net/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4663b4bf509894e62c3b02c69726ee5717c2dd4 --- /dev/null +++ b/arch/sw_64/net/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/sw_64/net/bpf_jit.h b/arch/sw_64/net/bpf_jit.h new file mode 100644 index 0000000000000000000000000000000000000000..929036d8ea6b10daec13166c1e87f63165d99f1a --- /dev/null +++ b/arch/sw_64/net/bpf_jit.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SW64_NET_BPF_JIT_H +#define _SW64_NET_BPF_JIT_H + +/* SW64 instruction field shift */ +#define SW64_BPF_OPCODE_OFFSET 26 +#define SW64_BPF_RA_OFFSET 21 +#define SW64_BPF_RB_OFFSET 16 +#define SW64_BPF_SIMPLE_ALU_IMM_OFFSET 13 +#define SW64_BPF_SIMPLE_ALU_FUNC_OFFSET 5 +#define SW64_BPF_SIMPLE_ALU_RC_OFFSET 0 +#define SW64_BPF_LS_FUNC_OFFSET 12 + +/* SW64 instruction opcodes */ +#define SW64_BPF_OPCODE_CALL 0x01 +#define SW64_BPF_OPCODE_RET 0x02 +#define SW64_BPF_OPCODE_JMP 0x03 +#define SW64_BPF_OPCODE_BR 0x04 +#define SW64_BPF_OPCODE_BSR 0x05 +#define SW64_BPF_OPCODE_MISC 0x06 +#define SW64_BPF_OPCODE_LOCK 0x08 +#define SW64_BPF_OPCODE_ALU_REG 0x10 +#define SW64_BPF_OPCODE_ALU_IMM 0x12 +#define SW64_BPF_OPCODE_LDBU 0x20 +#define SW64_BPF_OPCODE_LDHU 0x21 +#define SW64_BPF_OPCODE_LDW 0x22 +#define SW64_BPF_OPCODE_LDL 0x23 +#define SW64_BPF_OPCODE_STB 0x28 +#define SW64_BPF_OPCODE_STH 0x29 +#define SW64_BPF_OPCODE_STW 0x2A +#define SW64_BPF_OPCODE_STL 0x2B +#define SW64_BPF_OPCODE_BEQ 0x30 +#define SW64_BPF_OPCODE_BNE 0x31 +#define SW64_BPF_OPCODE_BLT 0x32 +#define SW64_BPF_OPCODE_BLE 0x33 +#define SW64_BPF_OPCODE_BGT 0x34 +#define SW64_BPF_OPCODE_BGE 0x35 +#define SW64_BPF_OPCODE_BLBC 0x36 +#define SW64_BPF_OPCODE_BLBS 0x37 +#define SW64_BPF_OPCODE_LDI 0x3E +#define SW64_BPF_OPCODE_LDIH 0x3F + +/* SW64 MISC instructions function codes */ +#define SW64_BPF_FUNC_MISC_RD_F 0x1000 +#define SW64_BPF_FUNC_MISC_WR_F 0x1020 + +/* SW64 LOCK instructions function codes */ +#define SW64_BPF_FUNC_LOCK_LLDW 0x0 +#define SW64_BPF_FUNC_LOCK_LLDL 0x1 +#define SW64_BPF_FUNC_LOCK_LSTW 0x8 +#define SW64_BPF_FUNC_LOCK_LSTL 0x9 + +/* SW64 ALU instructions function codes */ +#define SW64_BPF_FUNC_ALU_ADDW 0x00 +#define SW64_BPF_FUNC_ALU_SUBW 0x01 +#define SW64_BPF_FUNC_ALU_ADDL 0x08 +#define SW64_BPF_FUNC_ALU_SUBL 0x09 +#define SW64_BPF_FUNC_ALU_MULW 0x10 +#define SW64_BPF_FUNC_ALU_MULL 0x18 +#define SW64_BPF_FUNC_ALU_CMPEQ 0x28 +#define SW64_BPF_FUNC_ALU_CMPLT 0x29 +#define SW64_BPF_FUNC_ALU_CMPLE 0x2A +#define SW64_BPF_FUNC_ALU_CMPULT 0x2B +#define SW64_BPF_FUNC_ALU_CMPULE 0x2C +#define SW64_BPF_FUNC_ALU_AND 0x38 +#define SW64_BPF_FUNC_ALU_BIC 0x39 +#define SW64_BPF_FUNC_ALU_BIS 0x3A +#define SW64_BPF_FUNC_ALU_ORNOT 0x3B +#define SW64_BPF_FUNC_ALU_XOR 0x3C +#define SW64_BPF_FUNC_ALU_EQV 0x3D +#define SW64_BPF_FUNC_ALU_SLL 0x48 +#define SW64_BPF_FUNC_ALU_SRL 0x49 +#define SW64_BPF_FUNC_ALU_SRA 0x4A +#define SW64_BPF_FUNC_ALU_ZAP 0x68 +#define SW64_BPF_FUNC_ALU_ZAPNOT 0x69 +#define SW64_BPF_FUNC_ALU_SEXTB 0x6A +#define SW64_BPF_FUNC_ALU_SEXTH 0x6B + +/* special instuction used in jit_fill_hole() */ +#define SW64_BPF_ILLEGAL_INSN (0x1ff00000) /* pri_ret/b $31 */ + +enum sw64_bpf_registers { + SW64_BPF_REG_V0 = 0, /* keep return value */ + SW64_BPF_REG_T0 = 1, + SW64_BPF_REG_T1 = 2, + SW64_BPF_REG_T2 = 3, + SW64_BPF_REG_T3 = 4, + SW64_BPF_REG_T4 = 5, + SW64_BPF_REG_T5 = 6, + SW64_BPF_REG_T6 = 7, + SW64_BPF_REG_T7 = 8, + SW64_BPF_REG_S0 = 9, /* callee saved */ + SW64_BPF_REG_S1 = 10, /* callee saved */ + SW64_BPF_REG_S2 = 11, /* callee saved */ + SW64_BPF_REG_S3 = 12, /* callee saved */ + SW64_BPF_REG_S4 = 13, /* callee saved */ + SW64_BPF_REG_S5 = 14, /* callee saved */ + SW64_BPF_REG_S6 = 15, /* callee saved */ + SW64_BPF_REG_FP = 15, /* frame pointer if necessary */ + SW64_BPF_REG_A0 = 16, /* argument 0 */ + SW64_BPF_REG_A1 = 17, /* argument 1 */ + SW64_BPF_REG_A2 = 18, /* argument 2 */ + SW64_BPF_REG_A3 = 19, /* argument 3 */ + SW64_BPF_REG_A4 = 20, /* argument 4 */ + SW64_BPF_REG_A5 = 21, /* argument 5 */ + SW64_BPF_REG_T8 = 22, + SW64_BPF_REG_T9 = 23, + SW64_BPF_REG_T10 = 24, + SW64_BPF_REG_T11 = 25, + SW64_BPF_REG_RA = 26, /* callee saved, keep retuen address */ + SW64_BPF_REG_T12 = 27, + SW64_BPF_REG_PV = 27, + SW64_BPF_REG_AT = 28, /* reserved by assembler */ + SW64_BPF_REG_GP = 29, /* global pointer */ + SW64_BPF_REG_SP = 30, /* callee saved, stack pointer */ + SW64_BPF_REG_ZR = 31 /* read 0 */ +}; + +/* SW64 load and store instructions */ +#define SW64_BPF_LDBU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDBU, dst, rb, offset16) +#define SW64_BPF_LDHU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDHU, dst, rb, offset16) +#define SW64_BPF_LDW(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDW, dst, rb, offset16) +#define SW64_BPF_LDL(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDL, dst, rb, offset16) +#define SW64_BPF_STB(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STB, src, rb, offset16) +#define SW64_BPF_STH(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STH, src, rb, offset16) +#define SW64_BPF_STW(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STW, src, rb, offset16) +#define SW64_BPF_STL(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STL, src, rb, offset16) +#define SW64_BPF_LDI(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDI, dst, rb, imm16) +#define SW64_BPF_LDIH(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDIH, dst, rb, imm16) + +/* SW64 lock instructions */ +#define SW64_BPF_LLDW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDW) +#define SW64_BPF_LLDL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDL) +#define SW64_BPF_LSTW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTW) +#define SW64_BPF_LSTL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTL) +#define SW64_BPF_RD_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_RD_F) +#define SW64_BPF_WR_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_WR_F) + +/* SW64 ALU instructions REG format */ +#define SW64_BPF_ADDW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 ALU instructions IMM format */ +#define SW64_BPF_ADDW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 bit shift instructions REG format */ +#define SW64_BPF_SLL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 bit shift instructions IMM format */ +#define SW64_BPF_SLL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 control instructions */ +#define SW64_BPF_CALL(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_CALL, ra, rb, 0) +#define SW64_BPF_RET(rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_RET, SW64_BPF_REG_ZR, rb, 0) +#define SW64_BPF_JMP(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_JMP, ra, rb, 0) +#define SW64_BPF_BR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BR, ra, offset) +#define SW64_BPF_BSR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BSR, ra, offset) +#define SW64_BPF_BEQ(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BEQ, ra, offset) +#define SW64_BPF_BNE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BNE, ra, offset) +#define SW64_BPF_BLT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLT, ra, offset) +#define SW64_BPF_BLE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLE, ra, offset) +#define SW64_BPF_BGT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGT, ra, offset) +#define SW64_BPF_BGE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGE, ra, offset) +#define SW64_BPF_BLBC(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBC, ra, offset) +#define SW64_BPF_BLBS(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBS, ra, offset) + +/* SW64 bit logic instructions REG format */ +#define SW64_BPF_AND_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 bit logic instructions IMM format */ +#define SW64_BPF_AND_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 compare instructions REG format */ +#define SW64_BPF_CMPEQ_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULE) + +/* SW64 compare instructions imm format */ +#define SW64_BPF_CMPEQ_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULE) + +#endif /* _SW64_NET_BPF_JIT_H */ diff --git a/arch/sw_64/net/bpf_jit_comp.c b/arch/sw_64/net/bpf_jit_comp.c new file mode 100644 index 0000000000000000000000000000000000000000..31202dd0f9cf8dd8fd51d0d30c94ea422d74c8b7 --- /dev/null +++ b/arch/sw_64/net/bpf_jit_comp.c @@ -0,0 +1,1455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This file is taken from arch/arm64/net/bpf_jit_comp.c + * Copyright (C) 2014-2016 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "bpf_jit.h" + +#define TCALL_CNT (MAX_BPF_JIT_REG + 0) + +static const int bpf2sw64[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = SW64_BPF_REG_V0, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = SW64_BPF_REG_A0, + [BPF_REG_2] = SW64_BPF_REG_A1, + [BPF_REG_3] = SW64_BPF_REG_A2, + [BPF_REG_4] = SW64_BPF_REG_A3, + [BPF_REG_5] = SW64_BPF_REG_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = SW64_BPF_REG_S0, + [BPF_REG_7] = SW64_BPF_REG_S1, + [BPF_REG_8] = SW64_BPF_REG_S2, + [BPF_REG_9] = SW64_BPF_REG_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = SW64_BPF_REG_FP, + /* tail_call_cnt */ + [TCALL_CNT] = SW64_BPF_REG_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = SW64_BPF_REG_T11, +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; // JITed instruction index + int current_tmp_reg; + int epilogue_offset; + int *insn_offset; // [bpf_insn_idx] = jited_insn_idx + int exentry_idx; + u32 *image; // JITed instruction + u32 stack_size; +}; + +struct sw64_jit_data { + struct bpf_binary_header *header; + u8 *image; // bpf instruction + struct jit_ctx ctx; +}; + +static inline u32 sw64_bpf_gen_format_br(int opcode, enum sw64_bpf_registers ra, u32 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + return opcode | ra | (disp & 0x1fffff); +} + +static inline u32 sw64_bpf_gen_format_ls(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + return opcode | ra | rb | (disp & 0xffff); +} + +static inline u32 sw64_bpf_gen_format_ls_func(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + function = function << SW64_BPF_LS_FUNC_OFFSET; + return opcode | ra | rb | function | (disp & 0xfff); +} + +static inline u32 sw64_bpf_gen_format_simple_alu_reg(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | rb | function | rc; +} + +static inline u32 sw64_bpf_gen_format_simple_alu_imm(int opcode, enum sw64_bpf_registers ra, + u32 imm, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + imm = (imm & 0xff) << SW64_BPF_SIMPLE_ALU_IMM_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | imm | function | rc; +} + +static inline void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = insn; + + ctx->idx++; +} + +static inline int get_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg++; + /* Do not use 22-25. Should be more than enough. */ + if (unlikely(ctx->current_tmp_reg == 8)) { + pr_err("eBPF JIT %s[%d]: not enough temporary registers!\n", + current->comm, current->pid); + return -1; + } + return ctx->current_tmp_reg; +} + +static inline void put_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg--; + if (ctx->current_tmp_reg == 21) + ctx->current_tmp_reg = 7; +} + +static void emit_sw64_ldu32(const int dst, const u32 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= U32_MAX - S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 30) & 3; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 30, dst), ctx); + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_lds32(const int dst, const s32 imm, struct jit_ctx *ctx) +{ + s16 hi = imm >> 16; + s16 lo = imm & 0xffff; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + emit(SW64_BPF_LDIH(dst, SW64_BPF_REG_ZR, hi), ctx); + if (lo & 0x8000) { // sign bit is 1 + lo = lo & 0x7fff; + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } else { // sign bit is 0 + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } + + put_tmp_reg(ctx); +} + +static void emit_sw64_ldu64(const int dst, const u64 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= U32_MAX) { + put_tmp_reg(ctx); + return emit_sw64_ldu32(dst, (u32)imm, ctx); + } + + if (imm >= (U64_MAX - S16_MAX) || imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 60) & 0xf; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 60, dst), ctx); + + imm_tmp = (imm >> 45) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 45, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 30) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 30, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +/* Do not change!!! See arch/sw_64/lib/divide.S for more detail */ +#define REG(x) "$"str(x) +#define str(x) #x +#define DIV_RET_ADDR 23 +#define DIVIDEND 24 +#define DIVISOR 25 +#define RESULT 27 + +#include +static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, u8 code) +{ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, dst, DIVIDEND), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, DIVISOR), ctx); + switch (BPF_CLASS(code)) { + case BPF_ALU: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divwu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remwu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_ZAP_IMM(RESULT, 0xf0, dst), ctx); + break; + case BPF_ALU64: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divlu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remlu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, RESULT, dst), ctx); + break; + } +} + +#undef REG +#undef str +#undef DIVIDEND +#undef DIVISOR +#undef RESULT + +/* STX XADD: lock *(u32 *)(dst + off) += src */ +static void emit_sw64_xadd32(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDW(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDW_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTW(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +/* STX XADD: lock *(u64 *)(dst + off) += src */ +static void emit_sw64_xadd64(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDL(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDL_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTL(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe16(const int dst, struct jit_ctx *ctx) +{ + u8 tmp = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp), ctx); + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SRL_IMM(tmp, 8, tmp), ctx); + emit(SW64_BPF_SLL_IMM(dst, 8, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp, dst), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe32(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x8, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x4, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 24, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe64(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x80, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 56, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x40, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x20, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x10, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x08, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x04, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x02, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x01, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 56, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + unsigned long c = SW64_BPF_ILLEGAL_INSN; + + c |= c << 32; + __constant_c_memset(area, c, size); +} + +static int offset_to_epilogue(const struct jit_ctx *ctx); +static int bpf2sw64_offset(int bpf_idx, s32 off, const struct jit_ctx *ctx) +{ + int from = ctx->insn_offset[bpf_idx + 1]; + int to = ctx->insn_offset[bpf_idx + 1 + off]; + + if (ctx->image == NULL) + return 0; + + return to - from; +} + +static int offset_to_epilogue(const struct jit_ctx *ctx) +{ + if (ctx->image == NULL) + return 0; + + return ctx->epilogue_offset - ctx->idx; +} + +/* For tail call, jump to set up function call stack */ +#define PROLOGUE_OFFSET 11 + +static void build_prologue(struct jit_ctx *ctx, bool was_classic) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Save callee-saved registers */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -64), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_STL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_STL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_STL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_STL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_STL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_STL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + + /* Set up BPF prog stack base register */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_SP, fp), ctx); + if (!was_classic) + /* Initialize tail_call_cnt */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, tcc), ctx); + + /* Set up function call stack */ + ctx->stack_size = (ctx->prog->aux->stack_depth + 15) & (~15); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -ctx->stack_size), ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Destroy function call stack */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + + /* Restore callee-saved registers */ + emit(SW64_BPF_LDL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_LDL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_LDL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_LDL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_LDL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_LDL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_LDL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_LDL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, 64), ctx); + + /* Return */ + emit(SW64_BPF_RET(SW64_BPF_REG_RA), ctx); +} + +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + /* bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) */ + const u8 r2 = bpf2sw64[BPF_REG_2]; /* struct bpf_array *array */ + const u8 r3 = bpf2sw64[BPF_REG_3]; /* u32 index */ + + const u8 tmp = get_tmp_reg(ctx); + const u8 prg = get_tmp_reg(ctx); + const u8 tcc = bpf2sw64[TCALL_CNT]; + u64 offset; + static int out_idx; +#define out_offset (ctx->image ? (out_idx - ctx->idx - 1) : 0) + + /* if (index >= array->map.max_entries) + * goto out; + */ + offset = offsetof(struct bpf_array, map.max_entries); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &map.max_entries */ + emit(SW64_BPF_LDW(tmp, tmp, 0), ctx); /* tmp = *tmp = map.max_entries */ + emit(SW64_BPF_ZAP_IMM(tmp, 0xf0, tmp), ctx); /* map.max_entries is u32 */ + emit(SW64_BPF_ZAP_IMM(r3, 0xf0, r3), ctx); /* index is u32 */ + emit(SW64_BPF_CMPULE_REG(tmp, r3, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + emit_sw64_ldu64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(SW64_BPF_CMPULT_REG(tmp, tcc, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + emit(SW64_BPF_ADDL_IMM(tcc, 1, tcc), ctx); + + /* prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + */ + offset = offsetof(struct bpf_array, ptrs); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &ptrs[0] */ + emit(SW64_BPF_SLL_IMM(r3, 3, prg), ctx); /* prg = r3 * 8, each entry is a pointer */ + emit(SW64_BPF_ADDL_REG(tmp, prg, prg), ctx); /* prg = tmp + prg = &ptrs[index] */ + emit(SW64_BPF_LDL(prg, prg, 0), ctx); /* prg = *prg = ptrs[index] = prog */ + emit(SW64_BPF_BEQ(prg, out_offset), ctx); + + /* goto *(prog->bpf_func + prologue_offset); */ + offset = offsetof(struct bpf_prog, bpf_func); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(prg, tmp, tmp), ctx); /* tmp = prg + tmp = &bpf_func */ + emit(SW64_BPF_LDL(tmp, tmp, 0), ctx); /* tmp = *tmp = bpf_func */ + emit(SW64_BPF_BEQ(tmp, out_offset), ctx); + emit(SW64_BPF_LDI(tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + emit(SW64_BPF_JMP(SW64_BPF_REG_ZR, tmp), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + + /* out */ + if (ctx->image == NULL) + out_idx = ctx->idx; + if (ctx->image != NULL && out_idx <= 0) + return -1; +#undef out_offset + return 0; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + off_t offset; + unsigned long pc; + struct exception_table_entry *ex; + + if (!ctx->image) + /* First pass */ + return 0; + + if (!ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->exentry_idx]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = (long)&ex->insn - pc; + ex->insn = offset; + + ex->fixup.bits.nextinsn = sizeof(u32); + ex->fixup.bits.valreg = dst_reg; + ex->fixup.bits.errreg = SW64_BPF_REG_ZR; + + ctx->exentry_idx++; + return 0; +} + +/* JITs an eBPF instruction. + * Returns: + * 0 - successfully JITed an 8-byte eBPF instruction. + * >0 - successfully JITed a 16-byte eBPF instruction. + * <0 - failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + u8 dst = bpf2sw64[insn->dst_reg]; + u8 src = bpf2sw64[insn->src_reg]; + const u8 tmp1 __maybe_unused = get_tmp_reg(ctx); + const u8 tmp2 __maybe_unused = get_tmp_reg(ctx); + const s16 off = insn->off; + const s32 imm = insn->imm; + const int bpf_idx = insn - ctx->prog->insnsi; + s32 jmp_offset; + u64 func; + struct bpf_insn insn1; + u64 imm64; + int ret; + + switch (code) { + case BPF_ALU | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + break; + case BPF_ALU | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + emit(SW64_BPF_MULW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(SW64_BPF_MULL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + fallthrough; + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(SW64_BPF_SRL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_NEG: + emit(SW64_BPF_SUBW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_NEG: + emit(SW64_BPF_SUBL_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + break; + case BPF_ALU | BPF_END | BPF_TO_LE: + switch (imm) { + case 16: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x3, dst), ctx); + break; + case 32: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0xf, dst), ctx); + break; + case 64: + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_LE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + case BPF_ALU | BPF_END | BPF_TO_BE: + switch (imm) { + case 16: + emit_sw64_htobe16(dst, ctx); + break; + case 32: + emit_sw64_htobe32(dst, ctx); + break; + case 64: + emit_sw64_htobe64(dst, ctx); + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_BE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + + case BPF_ALU | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_ldu32(dst, imm, ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_lds32(dst, imm, ctx); + break; + case BPF_ALU | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_ADDW_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_ADDL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_DIV | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_RSH | BPF_K: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU64 | BPF_RSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_ARSH | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + break; + + case BPF_JMP | BPF_JA: + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, src, tmp1), ctx); + src = tmp1; + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(src, dst, tmp1), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, src, tmp1), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(src, dst, tmp1), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, src, tmp1), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + emit(SW64_BPF_XOR_IMM(tmp1, 1, tmp1), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, src, tmp1), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, src, tmp1), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, src, tmp1), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp1, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + emit(SW64_BPF_XOR_IMM(tmp2, 1, tmp2), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, tmp1, tmp2), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp2, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP | BPF_CALL: + func = (u64)__bpf_call_base + imm; + if ((func & ~(KERNEL_IMAGE_SIZE - 1)) != __START_KERNEL_map) + /* calling bpf program, switch to vmalloc addr */ + func = (func & U32_MAX) | VMALLOC_START; + emit_sw64_ldu64(SW64_BPF_REG_PV, func, ctx); + emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + break; + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + + case BPF_JMP | BPF_EXIT: + // if this is the last bpf instruction, skip to epilogue + if (bpf_idx == ctx->prog->len - 1) + break; + jmp_offset = offset_to_epilogue(ctx) - 1; + // epilogue is always at the end, must jump forward + if (jmp_offset >= -1 && jmp_offset <= 0xfffff) { + if (ctx->image && !jmp_offset) + // if this is the last jited instruction, generate nop + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + else + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_EXIT out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_LD | BPF_IMM | BPF_DW: + insn1 = insn[1]; + imm64 = ((u64)insn1.imm << 32) | (u32)imm; + emit_sw64_ldu64(dst, imm64, ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 1; + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_LDW(dst, src, off), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_H: + emit(SW64_BPF_LDHU(dst, src, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_LDBU(dst, src, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_LDL(dst, src, off), ctx); + break; + } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + /* Load imm to a register then store it */ + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_STW(tmp1, dst, off), ctx); + break; + case BPF_H: + emit(SW64_BPF_STH(tmp1, dst, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_STB(tmp1, dst, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_STL(tmp1, dst, off), ctx); + break; + } + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + emit(SW64_BPF_STW(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + emit(SW64_BPF_STH(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_B: + emit(SW64_BPF_STB(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + emit(SW64_BPF_STL(src, dst, off), ctx); + break; + + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + emit_sw64_xadd32(src, dst, off, ctx); + break; + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + emit_sw64_xadd64(src, dst, off, ctx); + break; + + default: + pr_err("eBPF JIT %s[%d]: unknown opcode 0x%02x\n", + current->comm, current->pid, code); + return -EINVAL; + } + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->idx; + ret = build_insn(insn, ctx); + if (ret < 0) + return ret; + while (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->insn_offset[i - 1]; + ret--; + } + } + + return 0; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->image[i] == SW64_BPF_ILLEGAL_INSN) + return -1; + } + + if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) + return -1; + + return 0; +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + struct sw64_jit_data *jit_data; + bool was_classic = bpf_prog_was_classic(prog); + bool tmp_blinded = false; + bool extra_pass = false; + struct jit_ctx ctx; + int image_size, prog_size, extable_size; + u8 *image_ptr; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.insn_offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + prog_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.insn_offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); + if (ctx.insn_offset == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 1. Initial fake pass to compute ctx->idx. */ + + /* Fake pass to fill in ctx->offset. */ + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } + + ctx.insn_offset[prog->len] = ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + + /* Now we know the actual image size. */ + /* And we need extra 8 bytes for lock instructions alignment */ + prog_size = sizeof(u32) * ctx.idx + 8; + image_size = prog_size + extable_size; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 2. Now, the actual pass. */ + + /* lock instructions need 8-byte alignment */ + ctx.image = (u32 *)(((unsigned long)image_ptr + 7) & (~7)); + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; +skip_init_ctx: + ctx.idx = 0; + ctx.exentry_idx = 0; + + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code. */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + /* And we're done. */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); + + bpf_flush_icache(header, ctx.image + ctx.idx); + + if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + prog->jited_len = prog_size; + if (ctx.current_tmp_reg) { + pr_err("eBPF JIT %s[%d]: unreleased temporary regsters %d\n", + current->comm, current->pid, ctx.current_tmp_reg); + } + + if (!prog->is_func || extra_pass) { +out_off: + kfree(ctx.insn_offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/sw_64/pci/Makefile b/arch/sw_64/pci/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..327efb163b12f7f517c94f4591550fb50688f441 --- /dev/null +++ b/arch/sw_64/pci/Makefile @@ -0,0 +1,8 @@ +PDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +obj-y += pci.o pci-legacy.o pci-sysfs.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/sw_64/pci/acpi.c b/arch/sw_64/pci/acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..1353994320b3cfa15cbded2383382e79b0051e3c --- /dev/null +++ b/arch/sw_64/pci/acpi.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +struct pci_root_info { + struct acpi_pci_root_info info; + struct pci_config_window *cfg; +}; + +static void pci_acpi_release_root_info(struct acpi_pci_root_info *ci) +{ + struct pci_root_info *pci_ri; + + pci_ri = container_of(ci, struct pci_root_info, info); + pci_ecam_free(pci_ri->cfg); + kfree(ci->ops); + kfree(pci_ri); +} + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +/** + * Lookup the MCFG table entry corresponding to the current + * PCI host controller, and set up config space mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct pci_config_window *cfg = NULL; + const struct pci_ecam_ops *ecam_ops = NULL; + struct resource *bus_res = &root->secondary; + struct resource cfg_res; + struct acpi_device *adev = NULL; + int ret = 0, bus_shift = 0; + u16 seg = root->segment; + + ret = pci_mcfg_lookup(root, &cfg_res, &ecam_ops); + if (ret < 0) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + /** + * Do the quirk of bus shift here, since we can not + * know the ECAM addr in MCFG table when fill mcfg_quirks + */ + bus_shift = ecam_ops->bus_shift; + cfg_res.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfg_res.end = cfg_res.start + ((resource_size(bus_res)) << bus_shift) - 1; + cfg_res.flags = IORESOURCE_MEM; + + /** + * ECAM area considered as the mem resource of the current + * PCI host controller, we'd better record this resource + * in ACPI namespace(_CRS). + */ + adev = acpi_resource_consumer(&cfg_res); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfg_res, + dev_name(&adev->dev)); + else + dev_info(dev, "Note: ECAM area %pR not reserved in ACPI namespace\n", + &cfg_res); + + cfg = pci_ecam_create(dev, &cfg_res, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +static int pci_acpi_prepare_root_resources(struct acpi_pci_root_info *ci) +{ + int status = 0; + acpi_status rc; + unsigned long long mem_space_base = 0; + struct resource_entry *entry = NULL, *tmp = NULL; + struct acpi_device *device = ci->bridge; + + /** + * Get host bridge resources via _CRS method, the return value + * is the num of resource parsed. + */ + status = acpi_pci_probe_root_resources(ci); + if (status > 0) { + /** + * To distinguish between mem and pre_mem, firmware only pass the + * lower 32bits of mem via acpi and use vendor specific "MEMH" to + * record the upper 32 bits of mem. + * + * Get the upper 32 bits here. + */ + rc = acpi_evaluate_integer(ci->bridge->handle, + "MEMH", NULL, &mem_space_base); + if (rc != AE_OK) { + dev_err(&device->dev, "unable to retrieve MEMH\n"); + return -EEXIST; + } + + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (entry->res->flags & IORESOURCE_MEM) { + if (!(entry->res->end & 0xFFFFFFFF00000000ULL)) { + /* Patch the mem resource with upper 32 bits */ + entry->res->start |= (mem_space_base << 32); + entry->res->end |= (mem_space_base << 32); + } else { + /** + * Add PREFETCH and MEM_64 flags for pre_mem, + * so that we can distinguish between mem and + * pre_mem. + */ + entry->res->flags |= IORESOURCE_PREFETCH; + entry->res->flags |= IORESOURCE_MEM_64; + } + } + + dev_dbg(&device->dev, + "host bridge resource: 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + } + return status; + } + + /** + * If not successfully parse resources, destroy + * resources which have been parsed. + */ + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + dev_info(&device->dev, + "host bridge resource(ignored): 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + resource_list_destroy_entry(entry); + } + + return 0; +} + +/** + * This function is called from ACPI code and used to + * setup PCI host controller. + */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct pci_bus *bus = NULL, *child = NULL; + struct pci_root_info *pci_ri = NULL; + struct acpi_pci_root_ops *root_ops = NULL; + int domain = root->segment; + int busnum = root->secondary.start; + + pci_ri = kzalloc(sizeof(*pci_ri), GFP_KERNEL); + if (!pci_ri) + goto out_of_mem_0; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) + goto out_of_mem_1; + + pci_ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!pci_ri->cfg) + goto setup_ecam_err; + + root_ops->release_info = pci_acpi_release_root_info; + root_ops->prepare_resources = pci_acpi_prepare_root_resources; + root_ops->pci_ops = (struct pci_ops *)&pci_ri->cfg->ops->pci_ops; + + bus = pci_find_bus(domain, busnum); + if (bus) { + memcpy(bus->sysdata, pci_ri->cfg, sizeof(struct pci_config_window)); + kfree(pci_ri->cfg); + kfree(pci_ri); + kfree(root_ops); + } else { + bus = acpi_pci_root_create(root, root_ops, &pci_ri->info, pci_ri->cfg); + + /** + * No need to do kfree here, because acpi_pci_root_create will free + * mem alloced when it cannot create pci_bus. + */ + if (!bus) + return NULL; + + /* Some quirks for pci controller of Sunway after scanning Root Complex */ + sw64_pci_root_bridge_scan_finish_up(pci_find_host_bridge(bus)); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + return bus; + +setup_ecam_err: + kfree(root_ops); +out_of_mem_1: + kfree(pci_ri); +out_of_mem_0: + pr_warn("RC [%04x:%02x:] failed (out of memory or setup ecam error)!\n", + domain, busnum); + + return NULL; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + if (!acpi_disabled) { + struct pci_config_window *cfg = bridge->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct pci_controller *hose = cfg->priv; + struct device *bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, hose->node); + + /* Some quirks for pci controller of Sunway before scanning Root Complex */ + sw64_pci_root_bridge_prepare(bridge); + } + + return 0; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} diff --git a/arch/sw_64/pci/msi.c b/arch/sw_64/pci/msi.c new file mode 100644 index 0000000000000000000000000000000000000000..fc2c122c37efa0c9d84dfcbdd8bc615305f032d9 --- /dev/null +++ b/arch/sw_64/pci/msi.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +int msi_compose_msg(unsigned int irq, struct msi_msg *msg) +{ + msg->address_hi = (unsigned int)(MSIX_MSG_ADDR >> 32); + msg->address_lo = (unsigned int)(MSIX_MSG_ADDR & 0xffffffff); + msg->data = irq; + return irq; +} + +void sw64_irq_noop(struct irq_data *d) +{ +} + +void arch_teardown_msi_irq(unsigned int irq) +{ +} diff --git a/arch/sw_64/pci/pci-legacy.c b/arch/sw_64/pci/pci-legacy.c new file mode 100644 index 0000000000000000000000000000000000000000..2a44463db0a42e99f1bc86c84a24762d06e36afc --- /dev/null +++ b/arch/sw_64/pci/pci-legacy.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +unsigned long rc_linkup; + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus); + +static int __init +pcibios_init(void) +{ + if (acpi_disabled) + sw64_init_pci(); + return 0; +} +subsys_initcall(pcibios_init); + +void __init pcibios_claim_one_bus(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &b->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + if (r->flags & IORESOURCE_PCI_FIXED) { + if (pci_claim_resource(dev, i) == 0) + continue; + + pci_claim_bridge_resource(dev, i); + } + } + } + + list_for_each_entry(child_bus, &b->children, node) + pcibios_claim_one_bus(child_bus); +} + +static void __init +pcibios_claim_console_setup(void) +{ + struct pci_bus *b; + + list_for_each_entry(b, &pci_root_buses, node) + pcibios_claim_one_bus(b); +} + +int __weak chip_pcie_configure(struct pci_controller *hose) +{ + return 0; +} + +unsigned char last_bus = PCI0_BUS; +void __init common_init_pci(void) +{ + struct pci_controller *hose; + struct pci_host_bridge *bridge; + struct pci_bus *bus; + unsigned int init_busnr; + int need_domain_info = 0; + int ret; + unsigned long offset; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + bridge = pci_alloc_host_bridge(0); + if (!bridge) + continue; + hose->busn_space->start = last_bus; + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) + hose->first_busno = last_bus + 1; + else + hose->first_busno = last_bus; + pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); + pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); + pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); + pci_add_resource_offset(&bridge->windows, hose->busn_space, 0); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = hose->busn_space->start; + bridge->ops = &sw64_pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + continue; + } + + bus = hose->bus = bridge->bus; + hose->need_domain_info = need_domain_info; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + + hose->last_busno = hose->busn_space->end = last_bus; + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + } + + pcibios_claim_console_setup(); + + if (is_in_host()) { + list_for_each_entry(bus, &pci_root_buses, node) + pcibios_reserve_legacy_regions(bus); + } + + pr_info("SW arch assign unassigned resources.\n"); + + pci_assign_unassigned_resources(); + + for (hose = hose_head; hose; hose = hose->next) { + bus = hose->bus; + if (bus) + pci_bus_add_devices(bus); + } +} + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + resource_size_t offset; + struct resource *res; + + pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_space->flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_space->start; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffffffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(hose->io_space, res)) { + pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + +no_io: + return; +} + +/* PCIe RC operations */ +int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw6_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (test_bit(hose->node * 8 + hose->index, &rc_linkup)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +int sw6_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw6_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/* + *sw6_pcie_valid_device - Check if a valid device is present on bus + *@bus: PCI Bus structure + *@devfn: device/function + * + *Return: 'true' on success and 'false' if invalid device is found + */ +static bool sw6_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/* + *sw6_pcie_map_bus - Get configuration base + *@bus: PCI Bus structure + *@devfn: Device/function + *@where: Offset from base + * + *Return: Base address of the configuration space needed to be + *accessed. + */ +static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw6_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +struct pci_ops sw64_pci_ops = { + .map_bus = sw6_pcie_map_bus, + .read = sw6_pcie_config_read, + .write = sw6_pcie_config_write, +}; + +int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return sw64_chip_init->pci_init.map_irq(dev, slot, pin); +} + +static void __init +sw64_init_host(unsigned long node, unsigned long index) +{ + struct pci_controller *hose; + int ret = 0; + + hose = alloc_pci_controller(); + if (!hose) { + pr_warn("alloc NODE %ld RC %ld hose failed\n", node, index); + return; + } + hose->iommu_enable = false; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + hose->pre_mem_space = alloc_resource(); + hose->busn_space = alloc_resource(); + hose->index = index; + hose->node = node; + + sw64_chip_init->pci_init.hose_init(hose); + + if (sw64_chip_init->pci_init.set_rc_piu) + sw64_chip_init->pci_init.set_rc_piu(node, index); + + ret = sw64_chip_init->pci_init.check_pci_linkup(node, index); + if (ret == 0) { + /* Root Complex downstream port is link up */ + set_bit(node * 8 + index, &rc_linkup); //8-bit per node + } +} + +void __weak set_devint_wken(int node) {} +void __weak set_adr_int(int node) {} + +void __init sw64_init_arch(void) +{ + if (IS_ENABLED(CONFIG_PCI)) { + unsigned long node, cpu_num; + unsigned long rc_enable; + char id[8], msg[64]; + int i; + + cpu_num = sw64_chip->get_cpu_num(); + + for (node = 0; node < cpu_num; node++) { + if (is_in_host()) { + set_devint_wken(node); + set_adr_int(node); + } + } + + if (!acpi_disabled) + return; + + pr_info("SW arch PCI initialize!\n"); + for (node = 0; node < cpu_num; node++) { + rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); + if (rc_enable == 0) { + pr_notice("PCIe is disabled on node %ld\n", node); + continue; + } + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_enable >> i) & 0x1) + sw64_init_host(node, i); + } + if ((rc_linkup >> node * 8) & 0xff) { + memset(msg, 0, 64); + sprintf(msg, "Node %ld: RC [ ", node); + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_linkup >> (i + node * 8)) & 1) { + memset(id, 0, 8); + sprintf(id, "%d ", i); + strcat(msg, id); + } + } + strcat(msg, "] link up"); + pr_info("%s\n", msg); + } else { + pr_info("Node %ld: no RC link up\n", node); + } + } + } +} + +void __weak set_pcieport_service_irq(int node, int index) {} + +static void __init sw64_init_intx(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + if (sw64_chip_init->pci_init.set_intx) + sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +void __init sw64_init_irq(void) +{ + struct pci_controller *hose; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) + sw64_init_intx(hose); +} + +void __init +sw64_init_pci(void) +{ + pci_add_flags(PCI_REASSIGN_ALL_BUS); + common_init_pci(); + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/pci/pci-sysfs.c b/arch/sw_64/pci/pci-sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..5b52a534fa808ebf1c8b1548ef1acaa1a905b96f --- /dev/null +++ b/arch/sw_64/pci/pci-sysfs.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Sw_64 PCI resource files. + * + * Loosely based on generic HAVE_PCI_MMAP implementation in + * drivers/pci/pci-sysfs.c + */ + +#include + +static int hose_mmap_page_range(struct pci_controller *hose, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_type, int sparse) +{ + unsigned long base; + + if (mmap_type == pci_mmap_mem) + base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; + else + base = sparse ? hose->sparse_io_base : hose->dense_io_base; + + vma->vm_pgoff |= base >> PAGE_SHIFT; + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static int __pci_mmap_fits(struct pci_dev *pdev, int num, + struct vm_area_struct *vma, int sparse) +{ + unsigned long nr, start, size; + int shift = sparse ? 5 : 0; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + pci_name(pdev), num, size); + return 0; +} + +/** + * pci_mmap_resource - map a PCI resource into user memory space + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * @sparse: address space type + * + * Use the bus mapping routines to map a PCI resource into userspace. + */ +static int pci_mmap_resource(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma, int sparse) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + struct resource *res = attr->private; + enum pci_mmap_state mmap_type; + struct pci_bus_region bar; + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if (res == &pdev->resource[i]) + break; + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + + if (!__pci_mmap_fits(pdev, i, vma, sparse)) + return -EINVAL; + + pcibios_resource_to_bus(pdev->bus, &bar, res); + vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); + mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + + return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); +} + +static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 1); +} + +static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 0); +} + +/** + * pci_remove_resource_files - cleanup resource files + * @dev: dev to cleanup + * + * If we created resource files for @dev, remove them from sysfs and + * free their resources. + */ +void pci_remove_resource_files(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct bin_attribute *res_attr; + + res_attr = pdev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + + res_attr = pdev->res_attr_wc[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) +{ + struct pci_bus_region bar; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + long dense_offset; + unsigned long sparse_size; + + pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); + + /* + * All core logic chips have 4G sparse address space, except + * CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM + * definitions in asm/core_xxx.h files). This corresponds + * to 128M or 512M of the bus space. + */ + dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); + sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; + + return bar.end < sparse_size; +} + +static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, + char *suffix, struct bin_attribute *res_attr, + unsigned long sparse) +{ + size_t size = pci_resource_len(pdev, num); + + sprintf(name, "resource%d%s", num, suffix); + res_attr->mmap = sparse ? pci_mmap_resource_sparse : + pci_mmap_resource_dense; + res_attr->attr.name = name; + res_attr->attr.mode = 0600; + res_attr->size = sparse ? size << 5 : size; + res_attr->private = &pdev->resource[num]; + return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +} + +static int pci_create_attr(struct pci_dev *pdev, int num) +{ + /* allocate attribute structure, piggyback attribute name */ + int retval, nlen1, nlen2 = 0, res_count = 1; + unsigned long sparse_base, dense_base; + struct bin_attribute *attr; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + char *suffix, *attr_name; + + suffix = ""; + nlen1 = 10; + + if (pdev->resource[num].flags & IORESOURCE_MEM) { + sparse_base = hose->sparse_mem_base; + dense_base = hose->dense_mem_base; + if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { + sparse_base = 0; + suffix = "_dense"; + nlen1 = 16; /* resourceN_dense */ + } + } else { + sparse_base = hose->sparse_io_base; + dense_base = hose->dense_io_base; + } + + if (sparse_base) { + suffix = "_sparse"; + nlen1 = 17; + if (dense_base) { + nlen2 = 16; /* resourceN_dense */ + res_count = 2; + } + } + + attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr_name = (char *)(attr + res_count); + pdev->res_attr[num] = attr; + retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, + sparse_base); + if (retval || res_count == 1) + return retval; + + /* Create dense file */ + attr_name += nlen1; + attr++; + pdev->res_attr_wc[num] = attr; + return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); +} + +/** + * pci_create_resource_files - create resource files in sysfs for @dev + * @dev: dev in question + * + * Walk the resources in @dev creating files for each resource available. + */ +int pci_create_resource_files(struct pci_dev *pdev) +{ + int i; + int retval; + + /* Expose the PCI resources from this device as files */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + + /* skip empty resources */ + if (!pci_resource_len(pdev, i)) + continue; + + retval = pci_create_attr(pdev, i); + if (retval) { + pci_remove_resource_files(pdev); + return retval; + } + } + return 0; +} + +/* Legacy I/O bus mapping stuff. */ + +static int __legacy_mmap_fits(struct pci_controller *hose, + struct vm_area_struct *vma, + unsigned long res_size, int sparse) +{ + unsigned long nr, start, size; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((res_size - 1) >> PAGE_SHIFT) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %ld (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + hose->index, size); + return 0; +} + +static inline int has_sparse(struct pci_controller *hose, + enum pci_mmap_state mmap_type) +{ + unsigned long base; + + base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : + hose->sparse_io_base; + + return base != 0; +} + +int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int sparse = has_sparse(hose, mmap_type); + unsigned long res_size; + + res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : + bus->legacy_io->size; + if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) + return -EINVAL; + + return hose_mmap_page_range(hose, vma, mmap_type, sparse); +} + +/** + * pci_adjust_legacy_attr - adjustment of legacy file attributes + * @b: bus to create files under + * @mmap_type: I/O port or memory + * + * Adjust file name and size for sparse mappings. + */ +void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (!has_sparse(hose, mmap_type)) + return; + + if (mmap_type == pci_mmap_mem) { + bus->legacy_mem->attr.name = "legacy_mem_sparse"; + bus->legacy_mem->size <<= 5; + } else { + bus->legacy_io->attr.name = "legacy_io_sparse"; + bus->legacy_io->size <<= 5; + } +} + +/* Legacy I/O bus read/write functions */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + *((u8 *)val) = inb(port); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = inw(port); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = inl(port); + return 4; + } + return -EINVAL; +} + +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + outb(port, val); + return 1; + case 2: + if (port & 1) + return -EINVAL; + outw(port, val); + return 2; + case 4: + if (port & 3) + return -EINVAL; + outl(port, val); + return 4; + } + return -EINVAL; +} diff --git a/arch/sw_64/pci/pci.c b/arch/sw_64/pci/pci.c new file mode 100644 index 0000000000000000000000000000000000000000..3db9816e19f1c8facca61e627b2ee465fe404082 --- /dev/null +++ b/arch/sw_64/pci/pci.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +/* + * raw_pci_read/write - Platform-specific PCI config space access. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + +/** + * Just declaring that the power-of-ten prefixes are actually the + * power-of-two ones doesn't make it true :) + */ +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long alignto; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start - hose->io_space->start < PCIBIOS_MIN_IO) + start = PCIBIOS_MIN_IO + hose->io_space->start; + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) + start = PCIBIOS_MIN_MEM + hose->mem_space->start; + /* + * The following holds at least for the Low Cost + * SW64 implementation of the PCI interface: + * + * In sparse memory address space, the first + * octant (16MB) of every 128MB segment is + * aliased to the very first 16 MB of the + * address space (i.e., it aliases the ISA + * memory address space). Thus, we try to + * avoid allocating PCI devices in that range. + * Can be allocated in 2nd-7th octant only. + * Devices that need more than 112MB of + * address space must be accessed through + * dense memory space only! + */ + + /* Align to multiple of size of minimum base. */ + alignto = max_t(resource_size_t, 0x1000UL, align); + start = ALIGN(start, alignto); + if (hose->sparse_mem_base && size <= 7 * 16*MB) { + if (((start / (16*MB)) & 0x7) == 0) { + start &= ~(128*MB - 1); + start += 16*MB; + start = ALIGN(start, alignto); + } + if (start/(128*MB) != (start + size - 1)/(128*MB)) { + start &= ~(128*MB - 1); + start += (128 + 16)*MB; + start = ALIGN(start, alignto); + } + } + } + + return start; +} + +#undef KB +#undef MB +#undef GB + +char *pcibios_setup(char *str) +{ + return str; +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + /* Propagate hose info into the subordinate devices. */ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + struct pci_dev *dev = bus->self; + + if (!dev || bus->number == hose->first_busno) { + bus->resource[0] = hose->io_space; + bus->resource[1] = hose->mem_space; + bus->resource[2] = hose->pre_mem_space; + } +} + +/** + * Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right hose. + */ +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + + hose = bus_num_to_pci_controller(bus); + if (hose == NULL) + return -ENODEV; + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ +} +EXPORT_SYMBOL(pci_iounmap); + +void __init reserve_mem_for_pci(void) +{ + int ret; + unsigned long base = PCI_32BIT_MEMIO; + + ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); + if (ret) { + pr_err("reserved pages for pcie memory space failed\n"); + return; + } + + pr_info("reserved pages for pcie memory space %lx:%lx\n", base >> PAGE_SHIFT, + (base + PCI_32BIT_MEMIO_SIZE) >> PAGE_SHIFT); +} + +const struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +/* Quirks */ +static void quirk_isa_bridge(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_ISA << 8; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); + +/* + * Early fix up the Root Complex settings + */ +static void fixup_root_complex(struct pci_dev *dev) +{ + int i; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + hose->self_busno = hose->busn_space->start; + + if (likely(bus->number == hose->self_busno)) { + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { + /* Check Root Complex port again */ + dev->is_hotplug_bridge = 0; + dev->current_state = PCI_D0; + } + + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; + } + } + atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_SW64_ROOT_BRIDGE, fixup_root_complex); + +static int setup_bus_dma_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); + return 0; +} + +static void fix_bus_dma_limit(struct pci_dev *dev) +{ + pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL); + pr_info("Set zx200 bus_dma_limit to 32-bit\n"); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit); + +#ifdef CONFIG_DCA +static void enable_sw_dca(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long node, rc_index, dca_ctl, dca_conf; + int i; + + if (dev->class >> 8 != PCI_CLASS_NETWORK_ETHERNET) + return; + + node = hose->node; + rc_index = hose->index; + + for (i = 0; i < 256; i++) { + dca_conf = read_piu_ior1(node, rc_index, DEVICEID0 + (i << 7)); + if (dca_conf >> 63) + continue; + else { + dca_conf = (1UL << 63) | (dev->bus->number << 8) | dev->devfn; + pr_info("dca device index %d, dca_conf = %#lx\n", i, dca_conf); + write_piu_ior1(node, rc_index, DEVICEID0 + (i << 7), dca_conf); + break; + } + } + + dca_ctl = read_piu_ior1(node, rc_index, DCACONTROL); + if (dca_ctl & 0x1) { + dca_ctl = 0x2; + write_piu_ior1(node, rc_index, DCACONTROL, dca_ctl); + pr_info("Node %ld RC %ld enable DCA 1.0\n", node, rc_index); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); +#endif + +/** + * There are some special aspects to the Root Complex of Sunway: + * 1. Root Complex config space base addr is different + * from EP config space base addr. + * 2. For the case of multiple Root Complex, different + * Root Complex have config space base addr. + * + * These means that even if multiple Root Complex share + * the same segment group number, their bus numbers can + * still overlap. + * + * But due to a Xorg related issue, we can not overlap + * the bus numbers of multiple Root Complex. So, after + * scanning the Root Complex, use "last_bus" to record + * the next bus number of the current maximum used bus + * number, and use it as the start bus number of the + * next Root Complex to be scanned. + * + * A question: when there is too much RCs, may 256 bus + * numbers be insufficient? + */ +static unsigned char last_bus; + +void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct resource_entry *entry = NULL; + struct pci_bus *bus = bridge->bus; + unsigned long flags = 0; + unsigned int init_busnr = 0; + + hose = pci_bus_to_pci_controller(bus); + + resource_list_for_each_entry(entry, &bridge->windows) { + flags = entry->res->flags; + if (flags & IORESOURCE_IO) { + entry->offset = entry->res->start; + hose->io_space = entry->res; + } else if (flags & IORESOURCE_BUS) { + entry->res->start = last_bus; + hose->busn_space = entry->res; + } else if (flags & IORESOURCE_MEM) { + if (!(flags & IORESOURCE_PREFETCH)) { + entry->offset = entry->res->start - PCI_32BIT_MEMIO; + hose->mem_space = entry->res; + } else + hose->pre_mem_space = entry->res; + } + } + + /** + * We scan Root Complex and update bus num in kernel, + * not in firmware. Firmware just pass 0x0-0xff via _CRS. + * + * So, need to update bus num of pci host bridge here. + */ + bridge->busnr = last_bus; + dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), last_bus); + + /** + * At this point, pci_bus has been created and use old + * bridge->busnr, so need to update bus->number here. + */ + bus->number = last_bus; + + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_pci_map_irq; + + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + hose->first_busno = last_bus + (is_in_host() ? 1 : 0); + + pci_add_flags(PCI_REASSIGN_ALL_BUS); +} + +static void sw64_pci_root_bridge_reserve_legacy_io(struct pci_host_bridge *bridge) +{ + struct pci_bus *bus = bridge->bus; + struct resource_entry *entry = NULL; + struct resource *res = NULL; + + resource_list_for_each_entry(entry, &bridge->windows) { + if (!(entry->res->flags & IORESOURCE_IO)) + continue; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + if (res == NULL) { + pr_err("alloc resource for legacy io out of mem\n"); + return; + } + + res->name = "legacy io"; + res->flags = IORESOURCE_IO; + res->start = entry->res->start; + res->end = (res->start + 0xFFF) & 0xFFFFFFFFFFFFFFFFUL; + + pr_info("reserving legacy io %pR for domain %04x\n", + res, pci_domain_nr(bus)); + if (request_resource(entry->res, res)) { + pr_err("pci %04x:%02x reserve legacy io %pR failed\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + } +} + +void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct pci_bus *bus = NULL; + unsigned int init_busnr = 0; + + bus = bridge->bus; + + hose = pci_bus_to_pci_controller(bus); + hose->bus = bus; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else { + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + } + + hose->last_busno = last_bus; + hose->busn_space->end = last_bus; + + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + + pr_info("bus number update to %u\n", last_bus); + + if (is_in_host()) + sw64_pci_root_bridge_reserve_legacy_io(bridge); + + /** + * Root Complex of SW64 does not support ASPM, causing + * control field(_OSC) unable to be updated. + * + * Related logic can be found in "negotiate_os_control". + */ + bridge->native_aer = 1; + bridge->native_pme = 1; + + /** + * Since some buggy firmwares may configure invalid bridge bus numbers, + * the kernel re-assigns all PCI bus numbers when scan Root Complex. + * + * However, users may trigger a pci bus rescan in the userspace by the + * command below: + * + * > echo 1 > /sys/bus/pci/rescan + * + * Unexpected errors may occur on the endpoint devices due to the re-assign + * bus numbers of upstream bridges. + * + * To work around this problem, the flag PCI_REASSIGN_ALL_BUS is set before + * scanning Root Complex and cleared after scanning Root Complex. + */ + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/platform/Makefile b/arch/sw_64/platform/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4c0edceb4a2c1f4f7c8a5ee16617e80161b771a1 --- /dev/null +++ b/arch/sw_64/platform/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += cpufreq_xuelang.o diff --git a/arch/sw_64/platform/cpufreq_xuelang.c b/arch/sw_64/platform/cpufreq_xuelang.c new file mode 100644 index 0000000000000000000000000000000000000000..1259e58dc874ffa691d189dfd46d4e120cdb2cef --- /dev/null +++ b/arch/sw_64/platform/cpufreq_xuelang.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include +#include + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_RESV +}; + +struct cpufreq_frequency_table freq_table[] = { + {0, 200, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, + {0, DC_14, 0}, + {0, DC_15, 0}, + {-1, DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + int i; + unsigned char external_clk; + unsigned long max_rate, freq_off; + + max_rate = get_cpu_freq() / 1000; + + external_clk = *((unsigned char *)__va(MB_EXTCLK)); + + if (external_clk == 240) + freq_off = 60000; + else + freq_off = 50000; + + /* clock table init */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (i == 1) + freq_table[i].driver_data = freq_off * 24; + if (i == 2) + freq_table[i].frequency = freq_off * 36; + if (i > 2) + freq_table[i].frequency = freq_off * 38 + ((i - 3) * freq_off); + + if (freq_table[i].frequency == max_rate) + freq_table[i + 1].frequency = CPUFREQ_TABLE_END; + } + + return platform_device_register(&sw64_cpufreq_device); +} +arch_initcall(sw64_cpufreq_init); + +char curruent_policy[CPUFREQ_NAME_LEN]; + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy) +{ + int i; + u64 val; + struct cpufreq_frequency_table *ft = policy->freq_table; + + val = sw64_io_read(0, CLK_CTL) >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; ft[i].frequency != CPUFREQ_TABLE_END; i++) { + if (val == i) + return ft[i].frequency; + } + return 0; +} +EXPORT_SYMBOL(__sw64_cpufreq_get); + +void sw64_set_rate(unsigned int index) +{ + unsigned int i, val; + int cpu_num; + + cpu_num = sw64_chip->get_cpu_num(); + + for (i = 0; i < cpu_num; i++) { + sw64_io_write(i, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(i, CLK_CTL); + + sw64_io_write(i, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(i, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + } +} +EXPORT_SYMBOL_GPL(sw64_set_rate); diff --git a/arch/sw_64/tools/.gitignore b/arch/sw_64/tools/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f73e86272b7616f0c2ce1d704e1966da94aed182 --- /dev/null +++ b/arch/sw_64/tools/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +relocs diff --git a/arch/sw_64/tools/Makefile b/arch/sw_64/tools/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..66f55b035e223cc3f9073c6fbd252385293e4475 --- /dev/null +++ b/arch/sw_64/tools/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +hostprogs += relocs +relocs-objs += relocs.o +relocs-objs += relocs_main.o +PHONY += relocs +relocs: $(obj)/relocs + @: diff --git a/arch/sw_64/tools/relocs.c b/arch/sw_64/tools/relocs.c new file mode 100644 index 0000000000000000000000000000000000000000..ec0ed422a8369172d2db92550dfed98619419961 --- /dev/null +++ b/arch/sw_64/tools/relocs.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "relocs.h" + +#define ELF_BITS 64 + +#define ELF_MACHINE EM_SW64 +#define ELF_MACHINE_NAME "SW64" +#define SHT_REL_TYPE SHT_RELA +#define Elf_Rel Elf64_Rela + +#define ELF_CLASS ELFCLASS64 +#define ELF_R_SYM(val) ELF64_R_SYM(val) +#define ELF_R_TYPE(val) ELF64_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF64_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) + +#define ElfW(type) _ElfW(ELF_BITS, type) +#define _ElfW(bits, type) __ElfW(bits, type) +#define __ElfW(bits, type) Elf##bits##_##type + +#define Elf_Addr ElfW(Addr) +#define Elf_Ehdr ElfW(Ehdr) +#define Elf_Phdr ElfW(Phdr) +#define Elf_Shdr ElfW(Shdr) +#define Elf_Sym ElfW(Sym) + +static Elf_Ehdr ehdr; + +struct relocs { + uint32_t *offset; + unsigned long count; + unsigned long size; +}; + +static struct relocs relocs; + +struct section { + Elf_Shdr shdr; + struct section *link; + Elf_Sym *symtab; + Elf_Rel *reltab; + char *strtab; + long shdr_offset; +}; +static struct section *secs; + +static const char * const regex_sym_kernel = { +/* Symbols matching these regex's should never be relocated */ + "^(__crc_)", +}; + +static regex_t sym_regex_c; + +static int regex_skip_reloc(const char *sym_name) +{ + return !regexec(&sym_regex_c, sym_name, 0, NULL, 0); +} + +static void regex_init(void) +{ + char errbuf[128]; + int err; + + err = regcomp(&sym_regex_c, regex_sym_kernel, + REG_EXTENDED|REG_NOSUB); + + if (err) { + regerror(err, &sym_regex_c, errbuf, sizeof(errbuf)); + die("%s", errbuf); + } +} + +static const char *rel_type(unsigned int type) +{ + static const char * const type_name[] = { +#define REL_TYPE(X)[X] = #X + REL_TYPE(R_SW64_NONE), + REL_TYPE(R_SW64_REFQUAD), + REL_TYPE(R_SW64_LITERAL), + REL_TYPE(R_SW64_LITUSE), + REL_TYPE(R_SW64_GPDISP), + REL_TYPE(R_SW64_BRADDR), + REL_TYPE(R_SW64_HINT), + REL_TYPE(R_SW64_SREL32), + REL_TYPE(R_SW64_GPRELHIGH), + REL_TYPE(R_SW64_GPRELLOW), +#undef REL_TYPE + }; + const char *name = "unknown type rel type name"; + + if (type < ARRAY_SIZE(type_name) && type_name[type]) + name = type_name[type]; + return name; +} + +static const char *sec_name(unsigned int shndx) +{ + const char *sec_strtab; + const char *name; + + sec_strtab = secs[ehdr.e_shstrndx].strtab; + if (shndx < ehdr.e_shnum) + name = sec_strtab + secs[shndx].shdr.sh_name; + else if (shndx == SHN_ABS) + name = "ABSOLUTE"; + else if (shndx == SHN_COMMON) + name = "COMMON"; + else + name = ""; + return name; +} + +static struct section *sec_lookup(const char *secname) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) + if (strcmp(secname, sec_name(i)) == 0) + return &secs[i]; + + return NULL; +} + +static const char *sym_name(const char *sym_strtab, Elf_Sym *sym) +{ + const char *name; + + if (sym->st_name) + name = sym_strtab + sym->st_name; + else + name = sec_name(sym->st_shndx); + return name; +} + +#define le16_to_cpu(val) (val) +#define le32_to_cpu(val) (val) +#define le64_to_cpu(val) (val) + +#define cpu_to_le16(val) (val) +#define cpu_to_le32(val) (val) +#define cpu_to_le64(val) (val) + +static uint16_t elf16_to_cpu(uint16_t val) +{ + return le16_to_cpu(val); +} + +static uint32_t elf32_to_cpu(uint32_t val) +{ + return le32_to_cpu(val); +} + +static uint32_t cpu_to_elf32(uint32_t val) +{ + return cpu_to_le32(val); +} + +#define elf_half_to_cpu(x) elf16_to_cpu(x) +#define elf_word_to_cpu(x) elf32_to_cpu(x) + +#if ELF_BITS == 64 +static uint64_t elf64_to_cpu(uint64_t val) +{ + return le64_to_cpu(val); +} +#define elf_addr_to_cpu(x) elf64_to_cpu(x) +#define elf_off_to_cpu(x) elf64_to_cpu(x) +#define elf_xword_to_cpu(x) elf64_to_cpu(x) +#else +#define elf_addr_to_cpu(x) elf32_to_cpu(x) +#define elf_off_to_cpu(x) elf32_to_cpu(x) +#define elf_xword_to_cpu(x) elf32_to_cpu(x) +#endif + +static void read_ehdr(FILE *fp) +{ + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + die("Cannot read ELF header: %s\n", strerror(errno)); + + if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) + die("No ELF magic\n"); + + if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) + die("Not a %d bit executable\n", ELF_BITS); + + if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) && + (ehdr.e_ident[EI_DATA] != ELFDATA2MSB)) + die("Unknown ELF Endianness\n"); + + if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) + die("Unknown ELF version\n"); + + /* Convert the fields to native endian */ + ehdr.e_type = elf_half_to_cpu(ehdr.e_type); + ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine); + ehdr.e_version = elf_word_to_cpu(ehdr.e_version); + ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry); + ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff); + ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff); + ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags); + ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize); + ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize); + ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum); + ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize); + ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum); + ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx); + + if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) + die("Unsupported ELF header type\n"); + + if (ehdr.e_machine != ELF_MACHINE) + die("Not for %s\n", ELF_MACHINE_NAME); + + if (ehdr.e_version != EV_CURRENT) + die("Unknown ELF version\n"); + + if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) + die("Bad Elf header size\n"); + + if (ehdr.e_phentsize != sizeof(Elf_Phdr)) + die("Bad program header entry\n"); + + if (ehdr.e_shentsize != sizeof(Elf_Shdr)) + die("Bad section header entry\n"); + + if (ehdr.e_shstrndx >= ehdr.e_shnum) + die("String table index out of bounds\n"); +} + +static void read_shdrs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); + if (!secs) + die("Unable to allocate %d section headers\n", ehdr.e_shnum); + + if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno)); + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + sec->shdr_offset = ftell(fp); + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); + sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type); + sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags); + sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr); + sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset); + sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size); + sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link); + sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info); + sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign); + sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize); + if (sec->shdr.sh_link < ehdr.e_shnum) + sec->link = &secs[sec->shdr.sh_link]; + } +} + +static void read_strtabs(FILE *fp) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_STRTAB) + continue; + + sec->strtab = malloc(sec->shdr.sh_size); + if (!sec->strtab) + die("malloc of %d bytes for strtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + } +} + +static void read_symtabs(FILE *fp) +{ + int i, j; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_SYMTAB) + continue; + + sec->symtab = malloc(sec->shdr.sh_size); + if (!sec->symtab) + die("malloc of %d bytes for symtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym = &sec->symtab[j]; + + sym->st_name = elf_word_to_cpu(sym->st_name); + sym->st_value = elf_addr_to_cpu(sym->st_value); + sym->st_size = elf_xword_to_cpu(sym->st_size); + sym->st_shndx = elf_half_to_cpu(sym->st_shndx); + } + } +} + +static void read_relocs(FILE *fp) +{ + static unsigned long base; + int i, j; + + if (!base) { + struct section *sec = sec_lookup(".text"); + + if (!sec) + die("Could not find .text section\n"); + + base = sec->shdr.sh_addr; + } + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + sec->reltab = malloc(sec->shdr.sh_size); + if (!sec->reltab) + die("malloc of %d bytes for relocs failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + + rel->r_offset = elf_addr_to_cpu(rel->r_offset); + /* Set offset into kernel image */ + rel->r_offset -= base; + /* Convert SW64 RELA format - only the symbol + * index needs converting to native endianness + */ + rel->r_info = elf_xword_to_cpu(rel->r_info); +#if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +#endif + } + } +} + +static void remove_relocs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + + /* Set relocation section size to 0, effectively removing it. + * This is necessary due to lack of support for relocations + * in objcopy when creating 32bit elf from 64bit elf. + */ + shdr.sh_size = 0; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot write ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + } +} + +static void add_reloc(struct relocs *r, uint32_t offset, unsigned int type) +{ + /* Relocation representation in binary table: + * |76543210|76543210|76543210|76543210| + * | Type | offset from _text >> 2 | + */ + offset >>= 2; + if (offset > 0x00FFFFFF) + die("Kernel image exceeds maximum size for relocation!\n"); + + offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24); + + if (r->count == r->size) { + unsigned long newsize = r->size + 50000; + void *mem = realloc(r->offset, newsize * sizeof(r->offset[0])); + + if (!mem) + die("realloc failed\n"); + + r->offset = mem; + r->size = newsize; + } + r->offset[r->count++] = offset; +} + +static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) +{ + int i; + + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; + int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + sec_symtab = sec->link; + sec_applies = &secs[sec->shdr.sh_info]; + if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) + continue; + + sh_symtab = sec_symtab->symtab; + sym_strtab = sec_symtab->link->strtab; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; + const char *symname = sym_name(sym_strtab, sym); + + process(sec, rel, sym, symname); + } + } +} + +static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) +{ + unsigned int r_type = ELF_R_TYPE(rel->r_info); + unsigned int bind = ELF_ST_BIND(sym->st_info); + + if ((bind == STB_WEAK) && (sym->st_value == 0)) { + /* Don't relocate weak symbols without a target */ + return 0; + } + + if (regex_skip_reloc(symname)) + return 0; + + switch (r_type) { + case R_SW64_NONE: + case R_SW64_LITERAL: /* relocated by GOT */ + case R_SW64_LITUSE: + case R_SW64_GPDISP: + case R_SW64_BRADDR: + case R_SW64_HINT: + case R_SW64_SREL32: + case R_SW64_GPRELHIGH: + case R_SW64_GPRELLOW: + case R_SW64_LITERAL_GOT: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_SW64_REFQUAD: + add_reloc(&relocs, rel->r_offset, r_type); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; +} + +static int write_reloc_as_bin(uint32_t v, FILE *f) +{ + unsigned char buf[4]; + + v = cpu_to_elf32(v); + + memcpy(buf, &v, sizeof(uint32_t)); + return fwrite(buf, 1, 4, f); +} + +static int write_reloc_as_text(uint32_t v, FILE *f) +{ + int res; + + res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v); + if (res < 0) + return res; + else + return sizeof(uint32_t); +} + +static void emit_relocs(int as_text, int as_bin, FILE *outf) +{ + int i; + int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin; + int size = 0; + int size_reserved; + struct section *sec_reloc; + + sec_reloc = sec_lookup(".data.reloc"); + if (!sec_reloc) + die("Could not find relocation section\n"); + + size_reserved = sec_reloc->shdr.sh_size; + /* Collect up the relocations */ + walk_relocs(do_reloc); + + /* Print the relocations */ + if (as_text) { + /* Print the relocations in a form suitable that + * gas will like. + */ + printf(".section \".data.reloc\",\"a\"\n"); + printf(".balign 8\n"); + /* Output text to stdout */ + write_reloc = write_reloc_as_text; + outf = stdout; + } else if (as_bin) { + /* Output raw binary to stdout */ + outf = stdout; + } else { + /* + * Seek to offset of the relocation section. + * Each relocation is then written into the + * vmlinux kernel image. + */ + if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + sec_reloc->shdr.sh_offset, strerror(errno)); + } + } + + for (i = 0; i < relocs.count; i++) + size += write_reloc(relocs.offset[i], outf); + + /* Print a stop, but only if we've actually written some relocs */ + if (size) + size += write_reloc(0, outf); + + if (size > size_reserved) + /* + * Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE + * which will fix this problem and allow a bit of headroom + * if more kernel features are enabled + */ + die("Relocations overflow available space!\n" + "Please adjust CONFIG_RELOCATION_TABLE_SIZE " + "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF); +} + +/* + * As an aid to debugging problems with different linkers + * print summary information about the relocs. + * Since different linkers tend to emit the sections in + * different orders we use the section names in the output. + */ +static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) * sym, + const char *symname) +{ + printf("%16s 0x%x %16s %40s %16s\n", + sec_name(sec->shdr.sh_info), + (unsigned int)rel->r_offset, + rel_type(ELF_R_TYPE(rel->r_info)), + symname, + sec_name(sym->st_shndx)); + return 0; +} + +static void print_reloc_info(void) +{ + printf("%16s %10s %16s %40s %16s\n", + "reloc section", + "offset", + "reloc type", + "symbol", + "symbol section"); + walk_relocs(do_reloc_info); +} + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs) +{ + regex_init(); + read_ehdr(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); + read_relocs(fp); + if (show_reloc_info) { + print_reloc_info(); + return; + } + emit_relocs(as_text, as_bin, fp); + if (!keep_relocs) + remove_relocs(fp); +} diff --git a/arch/sw_64/tools/relocs.h b/arch/sw_64/tools/relocs.h new file mode 100644 index 0000000000000000000000000000000000000000..17c7e31113a0e5f93ac2b596d54e31bc9de7fe58 --- /dev/null +++ b/arch/sw_64/tools/relocs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_TOOLS_RELOCS_H +#define _SW64_TOOLS_RELOCS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define USE_BSD +#include +#include + +#define EM_SW64 0x9916 +/* + * SW64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +void die(char *fmt, ...); + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs); +#endif /* _SW64_TOOLS_RELOCS_H */ diff --git a/arch/sw_64/tools/relocs_main.c b/arch/sw_64/tools/relocs_main.c new file mode 100644 index 0000000000000000000000000000000000000000..30a830a070dbe98b4b0b4770b9fc3ca1d3406941 --- /dev/null +++ b/arch/sw_64/tools/relocs_main.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "relocs.h" + +void die(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(1); +} + +static void usage(void) +{ + die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n"); +} + +int main(int argc, char **argv) +{ + int show_reloc_info, as_text, as_bin, keep_relocs; + const char *fname; + FILE *fp; + int i; + unsigned char e_ident[EI_NIDENT]; + + show_reloc_info = 0; + as_text = 0; + as_bin = 0; + keep_relocs = 0; + fname = NULL; + for (i = 1; i < argc; i++) { + char *arg = argv[i]; + + if (*arg == '-') { + if (strcmp(arg, "--reloc-info") == 0) { + show_reloc_info = 1; + continue; + } + if (strcmp(arg, "--text") == 0) { + as_text = 1; + continue; + } + if (strcmp(arg, "--bin") == 0) { + as_bin = 1; + continue; + } + if (strcmp(arg, "--keep") == 0) { + keep_relocs = 1; + continue; + } + } else if (!fname) { + fname = arg; + continue; + } + usage(); + } + if (!fname) + usage(); + + fp = fopen(fname, "r+"); + if (!fp) + die("Cannot open %s: %s\n", fname, strerror(errno)); + + if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) + die("Cannot read %s: %s", fname, strerror(errno)); + + rewind(fp); + if (e_ident[EI_CLASS] == ELFCLASS64) + process(fp, as_text, as_bin, show_reloc_info, keep_relocs); + else + die("Unsupport ELF class on SW64: %s", fname); + //process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs); + fclose(fp); + return 0; +} diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 05c82fd5d0f60b5712bbf9083e231ae65157a5de..6018b6900a684f4d65cff7aad668ef325d6af593 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -100,6 +100,7 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -479,8 +480,11 @@ config GOLDFISH config X86_CPU_RESCTRL bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) + depends on MISC_FILESYSTEMS select KERNFS - select PROC_CPU_RESCTRL if PROC_FS + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS + select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -896,6 +900,7 @@ config INTEL_TDX_GUEST endif # HYPERVISOR_GUEST source "arch/x86/Kconfig.cpu" +source "arch/x86/Kconfig.fpu" config HPET_TIMER def_bool X86_64 @@ -1312,18 +1317,43 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON + +config MICROCODE_INITRD32 + def_bool y + depends on MICROCODE && X86_32 && BLK_DEV_INITRD config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n - depends on MICROCODE + depends on MICROCODE && SMP help Loading microcode late, when the system is up and executing instructions is a tricky business and should be avoided if possible. Just the sequence of synchronizing all cores and SMT threads is one fragile dance which does not guarantee that cores might not softlock after the loading. Therefore, - use this at your own risk. Late loading taints the kernel too. + use this at your own risk. Late loading taints the kernel unless the + microcode header indicates that it is safe for late loading via the + minimal revision check. This minimal revision check can be enforced on + the kernel command line with "microcode.minrev=Y". + +config MICROCODE_LATE_FORCE_MINREV + bool "Enforce late microcode loading minimal revision check" + default n + depends on MICROCODE_LATE_LOADING + help + To prevent that users load microcode late which modifies already + in use features, newer microcode patches have a minimum revision field + in the microcode header, which tells the kernel which minimum + revision must be active in the CPU to safely load that new microcode + late into the running system. If disabled the check will not + be enforced but the kernel will be tainted when the minimal + revision check fails. + + This minimal revision check can also be controlled via the + "microcode.minrev" parameter on the kernel command line. + + If unsure say Y. config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" @@ -1537,6 +1567,26 @@ config NUMA Otherwise, you should say N. +config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA + depends on QUEUED_SPINLOCKS + depends on 64BIT + # For now, we depend on PARAVIRT_SPINLOCKS to make the patching work. + # This is awkward, but hopefully would be resolved once static_call() + # is available. + depends on PARAVIRT_SPINLOCKS + default y + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + In this variant of qspinlock, the kernel will try to keep the lock + on the same node, thus reducing the number of remote cache misses, + while trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" @@ -2016,6 +2066,31 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config HYGON_CSV + bool "Hygon secure virtualization CSV support" + default y + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT + select MMU + select CMA + help + Hygon CSV integrates secure processor, memory encryption and + memory isolation to provide the ability to protect guest's private + data. It has evolved from CSV, CSV2 to CSV3. + + For CSV, the guest's memory is encrypted. + + For CSV2, not only the guest's memory, but also the guest's vCPU + registers are encrypted, neither other guests nor the host can tamper + with the vCPU registers. + + For CSV3, the guest's context like vCPU registers, control block and + nested page table is accessed only by the guest itself and the secure + processor. Neither other guests nor the host can tamper with the + guest's context. + + Say Y here to enable support for the whole capbilities of Hygon secure + virtualization on hygon processor. + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_KEXEC diff --git a/arch/x86/Kconfig.fpu b/arch/x86/Kconfig.fpu new file mode 100644 index 0000000000000000000000000000000000000000..04a235105186415d8ca2f28cec3a8c7953929116 --- /dev/null +++ b/arch/x86/Kconfig.fpu @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 + +menuconfig USING_FPU_IN_KERNEL_NONATOMIC + bool "Hygon large memory copy support" + help + This option enables support for optimized large memory copy operations + on Hygon processors in the kernel space using SSE2 or AVX2 non-temporal (NT) + copy instructions. NT instructions are streaming store instructions that bypass + the on-chip cache and send data directly to a write-combining buffer. + + When this option is enabled, you can choose the specific instruction set to use + for large memory copy: SSE2 or AVX2. Using these instruction sets can improve data + throughput and reduce the number of cache misses during memory copy operations. + +if USING_FPU_IN_KERNEL_NONATOMIC + +choice + prompt "X86_HYGON_LMC" + depends on X86_64 && CPU_SUP_HYGON + default X86_HYGON_LMC_SSE2_ON + help + Select the type of non-temporal (NT) copy instructions to use for + large memory copy operations between kernel and user mode. You can + choose between SSE2 or AVX2 instructions based on the processor + capabilities and the size of the memory being copied. + + To use this feature, you also need to configure the data copy size. + The file is in `/sys/c86_features/hygon_c86/nt_cpy_mini_len`. Please + refer to configuration 4096 and above. + +config X86_HYGON_LMC_SSE2_ON + bool "Using sse2 nt copy for large memory copy" + help + When this feature is enabled, the kernel will use the + copy_user_sse2_opt_string function for large memory copy operations. + + SSE2 (Streaming SIMD Extensions 2) instructions support non-temporal + (NT) stores that bypass the CPU cache and write data directly to + memory. This can improve performance for large memory copies by reducing + cache pollution and taking advantage of the write-combining buffer. + + However, using SSE2 NT copy may require saving and restoring MMX and + SSE2 register contexts during thread switching if an interruption occurs. + +config X86_HYGON_LMC_AVX2_ON + bool "Using avx2 nt copy for large memory copy" + help + When this feature is enabled, the kernel will use the + copy_user_avx2_pf64_nt_string function for large memory copy operations. + + AVX2 (Advanced Vector Extensions 2) instructions provide enhanced + vector processing capabilities and support for non-temporal (NT) stores, + which can significantly improve memory copy performance for large blocks + of data. By bypassing the cache and writing data directly to memory, + AVX2 NT copy can achieve higher throughput than SSE2 NT copy. + + Similar to SSE2, using AVX2 NT copy may require saving and restoring + AVX2 register contexts if an interruption occurs during large memory + copying, to ensure the process continues smoothly after resuming. + +endchoice +endif diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 658e9ec065c47615e453f04b1da7fcfc155def60..872bb46f4640367bcae9144fbc477876aa9d50eb 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -108,6 +108,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..18e0bde5bca220ef62f8bdbcb6ee95f5ddc3eef2 --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + +#include +#include + +/* Include code for early secure calls */ +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_DEC); + } +} + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8a33551895c2c87281ac993a7deb92e5ead653 --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + +#else + +static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } + +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1dcb794c5479ed7a0108275661c2b134d43b11c1..e02a88b880f13fc86051dc6e72e43bccccdfaf3d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* Preserve only the CR4 bits that must be preserved, and clear the rest */ movq %cr4, %rax @@ -468,6 +478,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0bf0ecc9f9f4b5e1a7121b22ae215..a7b4148a943f656eb61f97b409b369eb23d11c76 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251037ef88a2e2277acc1d95d92f479..674433c522ed51f801363b0ff36becfaf7268de0 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 9bbfd01cfa2f13fde9124ca521fa76c33f7661de..2d2d807e3b00c162b57ab1a830ad962f824901ad 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -231,6 +231,26 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 If unsure, say N. +config CRYPTO_SM4_ZHAOXIN_GMI + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR, CFB, OFB (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_SKCIPHER + select CRYPTO_SIMD + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (Zhaoxin GMI Instruction). + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + This is SM4 optimized implementation using Zhaoxin GMI + instruction set for block cipher. + + If unsure, say N. + config CRYPTO_TWOFISH_586 tristate "Ciphers: Twofish (32-bit)" depends on (X86 || UML_X86) && !64BIT @@ -477,6 +497,20 @@ config CRYPTO_SM3_AVX_X86_64 If unsure, say N. +config CRYPTO_SM3_ZHAOXIN_GMI + tristate "Hash functions: SM3 (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 cipher algorithms (Zhaoxin GMI Instruction). + + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). + It is part of the Chinese Commercial Cryptography suite. + + If unsure, say N. + config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "Hash functions: GHASH (CLMUL-NI)" depends on X86 && 64BIT @@ -519,4 +553,15 @@ config CRYPTO_CRCT10DIF_PCLMUL Architecture: x86_64 using: - PCLMULQDQ (carry-less multiplication) +config CRYPTO_SM2_ZHAOXIN_GMI + tristate "SM2 Cipher algorithm (Zhaoxin GMI Instruction)" + depends on X86 && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN) + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + help + SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + endmenu diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 9aa46093c91b619da0d1641cefa73afa7d97d2e6..e5480c50a8d9bf7b5f79943d3f93f8fc321cfc8e 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,10 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM2_ZHAOXIN_GMI) += sm2-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o + quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index feccb5254c7e5ec59179d21072f15cfacf6a1fb1..91d318b08fb70b14abc6c77026fc527ff5bd5f8d 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -224,6 +224,11 @@ static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; + + /* Don't merit use low performance CRC32C instruction */ + if (boot_cpu_has(X86_FEATURE_CRC32C_LOW_PERF)) + return -ENODEV; + #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; diff --git a/arch/x86/crypto/sm2-zhaoxin-gmi.c b/arch/x86/crypto/sm2-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..a0430c6611fcfac4b921bda40af3d24afc05724c --- /dev/null +++ b/arch/x86/crypto/sm2-zhaoxin-gmi.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2023 Shanghai Zhaoxin Semiconductor LTD. + * Authors: YunShen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCRATCH_SIZE (4 * 2048) + +#define SM2_CWORD_VERIFY 0x8 +#define SM2_VERIFY_PASS 1 + +struct sm2_cipher_data { + u8 pub_key[65]; /* public key */ +}; + +/* Load supported features of the CPU to see if the SM2 is available. */ +static int zhaoxin_gmi_available(void) +{ + if (!boot_cpu_has(X86_FEATURE_SM2_EN)) { + pr_err("can't enable hardware SM2 if Zhaoxin GMI SM2 is not enabled\n"); + return -ENODEV; + } + return 0; +} + +/* Zhaoxin sm2 verify function */ +static inline size_t zhaoxin_gmi_sm2_verify(unsigned char *key, unsigned char *hash, + unsigned char *sig, unsigned char *scratch) +{ + size_t result; + + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xc0" + : "=c"(result) + : "a"(hash), "b"(key), "d"(SM2_CWORD_VERIFY), "S"(scratch), "D"(sig) + : "memory"); + + return result; +} + +/* Zhaoxin sm2 verify function */ +static int _zhaoxin_sm2_verify(struct sm2_cipher_data *ec, unsigned char *hash, unsigned char *sig) +{ + unsigned char *scratch = kzalloc(SCRATCH_SIZE, GFP_KERNEL); + int ret = -EKEYREJECTED; + size_t result; + + result = zhaoxin_gmi_sm2_verify(ec->pub_key, hash, sig, scratch); + if (result == SM2_VERIFY_PASS) + ret = 0; + + kfree(scratch); + + return ret; +} + +static int zhaoxin_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + unsigned char *buffer; + int ret, buf_len; + + buf_len = req->src_len + req->dst_len; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, buf_len), buffer, buf_len, 0); + ret = _zhaoxin_sm2_verify(ec, buffer + req->src_len, buffer); + + kfree(buffer); + + return ret; +} + +static int zhaoxin_sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memcpy(ec->pub_key, key, keylen); + + return 0; +} + +static unsigned int zhaoxin_sm2_max_size(struct crypto_akcipher *tfm) +{ + /* Unlimited max size */ + return PAGE_SIZE; +} + +static int zhaoxin_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + return zhaoxin_gmi_available(); +} + +static void zhaoxin_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memset(ec, 0, sizeof(*ec)); +} + +static struct akcipher_alg zhaoxin_sm2 = { + .verify = zhaoxin_sm2_verify, + .set_pub_key = zhaoxin_sm2_set_pub_key, + .max_size = zhaoxin_sm2_max_size, + .init = zhaoxin_sm2_init_tfm, + .exit = zhaoxin_sm2_exit_tfm, + .base = { + .cra_name = "sm2", + .cra_driver_name = "zhaoxin-gmi-sm2", + .cra_priority = 150, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct sm2_cipher_data), + }, +}; + +static const struct x86_cpu_id zhaoxin_sm2_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SM2, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sm2_cpu_ids); + +static int __init zhaoxin_sm2_init(void) +{ + if (!x86_match_cpu(zhaoxin_sm2_cpu_ids)) + return -ENODEV; + + return crypto_register_akcipher(&zhaoxin_sm2); +} + +static void __exit zhaoxin_sm2_exit(void) +{ + crypto_unregister_akcipher(&zhaoxin_sm2); +} + +module_init(zhaoxin_sm2_init); +module_exit(zhaoxin_sm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("YunShen "); +MODULE_DESCRIPTION("SM2 Zhaoxin GMI Algorithm"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm2"); diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..c39b7de97ce118a9e8534649e32e1f232f20be63 --- /dev/null +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sm3_zhaoxin_gmi.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM3 detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM3 is available\n"); + return 0; + } + return -ENODEV; +} + +static void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +{ + unsigned long in, out, cnt, blksz, ctrl; + + if (!blockcnt) + return; + + in = (unsigned long)inp; + out = (unsigned long)(sst->state); + cnt = (unsigned long)blockcnt; + blksz = 0x20; + ctrl = -1; + + __asm__ __volatile__( + ".byte 0xf3,0x0f,0xa6,0xe8\n" + : "+S"(in) + : "S"(in), "D"(out), "c"(cnt), "b"(blksz), "a"(ctrl) + : "memory" + ); +} + +static inline int zx_sm3_init(struct shash_desc *desc) +{ + struct sm3_state *sctx; + + if (!desc) + return -EINVAL; + + sctx = shash_desc_ctx(desc); + + sctx->state[0] = 0x6f168073UL; + sctx->state[1] = 0xb9b21449UL; + sctx->state[2] = 0xd7422417UL; + sctx->state[3] = 0x00068adaUL; + sctx->state[4] = 0xbc306fa9UL; + sctx->state[5] = 0xaa383116UL; + sctx->state[6] = 0x4dee8de3UL; + sctx->state[7] = 0x4e0efbb0UL; + + sctx->count = 0; + + return 0; +} + +static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + + memcpy(digest, sctx->state, SM3_DIGEST_SIZE); + + *sctx = (struct sm3_state){}; + return 0; +} + +static int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); +} + +static int zx_sm3_final(struct shash_desc *desc, u8 *out) +{ + sm3_base_do_finalize(desc, sm3_generic_block_fn); + + return zx_sm3_base_finish(desc, out); +} + +static int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) +{ + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + + return zx_sm3_final(desc, hash); +} + +static struct shash_alg zx_sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init zx_sm3_generic_mod_init(void) +{ + if (gmi_available() == 0) + return crypto_register_shash(&zx_sm3_alg); + + pr_warn("GMI is unavailable on this platform."); + return -ENODEV; +} + +static void __exit zx_sm3_generic_mod_fini(void) +{ + crypto_unregister_shash(&zx_sm3_alg); +} + +module_init(zx_sm3_generic_mod_init); +module_exit(zx_sm3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("sm3-zhaoxin"); +MODULE_ALIAS_CRYPTO("sm3-zhaoxin-gmi"); diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..288bae2936074e70c3292883511ec919967b17d2 --- /dev/null +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -0,0 +1,808 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SM4_ECB (1<<6) +#define SM4_CBC (1<<7) +#define SM4_CFB (1<<8) +#define SM4_OFB (1<<9) +#define SM4_CTR (1<<10) + +#define ZX_GMI_ALIGNMENT 16 + +#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) + +/* Control word. */ +struct sm4_cipher_data { + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + union { + u32 pad; + struct { + u32 encdec:1; + u32 func:5; + u32 mode:5; + u32 digest:1; + } b; + } cword; /* Control word */ + struct sm4_ctx keys; /* Encryption key */ +}; + +static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + unsigned long rax = sm4_data->cword.pad; + + /* Set the flag for encryption or decryption */ + if (sm4_data->cword.b.encdec == 1) + rax &= ~0x01; + else + rax |= 0x01; + + __asm__ __volatile__( + ".byte 0xf3, 0x0f, 0xa7, 0xf0\n" + : + : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + + return iv; +} + +static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + u8 oiv[SM4_BLOCK_SIZE] = {0}; + u16 cnt_tmp; + u32 i; + u8 *in_tmp = (u8 *)input, *out_tmp = output; + + /* Backup the original IV if it is not NULL. */ + if (iv) + memcpy(oiv, iv, SM4_BLOCK_SIZE); + + /* Get the current counter. */ + cnt_tmp = GETU16(&iv[14]); + + /* Get the available counter space before overflow. */ + cnt_tmp = 0x10000 - cnt_tmp; + + /* + * Check there is enough counter space for the required blocks. + */ + if (cnt_tmp < count) { + /* Process the first part of data blocks. */ + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + /* Only increase the counter by SW when overflow occurs. */ + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + for (i = 0; i < cnt_tmp; i++) + crypto_inc(iv, SM4_BLOCK_SIZE); + + out_tmp = output + cnt_tmp * SM4_BLOCK_SIZE; + in_tmp = (u8 *)(input + cnt_tmp * SM4_BLOCK_SIZE); + + /* Get the number of data blocks that have not been encrypted. */ + cnt_tmp = count - cnt_tmp; + /* Process the remaining part of data blocks. */ + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + } else { + /* Counter space is big enough, the counter will not overflow. */ + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, count); + } + + /* Restore the iv if not null */ + if (iv) + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + return iv; +} + +static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_ECB; + + return rep_xcrypt(input, output, key, iv, &cw, 1); +} + +/** + * gmi_sm4_set_key - Set the sm4 key. + * @tfm: The %crypto_skcipher that is used in the context. + * @in_key: The input key. + * @key_len:The size of the key. + */ +static int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) { + pr_warn("The key_len must be 16 bytes. please check\n"); + return -EINVAL; + } + + memcpy(ctx->rkey_enc, in_key, key_len); + memcpy(ctx->rkey_dec, in_key, key_len); + + return 0; +} + +static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, cw, + blocks); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +static int ecb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int ecb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_cipher_ctr is used for ZX-E and newer + */ +static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks, nbytes; + int err; + u8 *dst, *src; + u8 keystream[SM4_BLOCK_SIZE]; + u32 i; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes/SM4_BLOCK_SIZE; + rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + for (i = 0; i < blocks; i++) + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; + } + + if (walk.nbytes == walk.total && nbytes > 0) { + rep_xcrypt_ecb_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, keystream, src, nbytes); + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt is used for ZX-E and newer + */ +static int ctr_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * ctr_decrypt is used for ZX-E and newer + */ +static int ctr_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * sm4_ctr_zxc is used for ZXC+ + */ +static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u8 *dst, *src; + u8 en_iv[SM4_BLOCK_SIZE] = {0}; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + + dst += SM4_BLOCK_SIZE; + src += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + + // tail + if (walk.nbytes == walk.total && nbytes > 0) { + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, en_iv, src, nbytes); + + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt_zxc is used for ZX-C+ + */ +static int ctr_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ctr_decrypt_zxc is used for ZX-C+ + */ +static int ctr_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20 | SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ofb_encrypt is used for ZX-E and newer + */ +static int ofb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * ofb_decrypt is used for ZX-E and newer + */ +static int ofb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_ofb_zxc is used for ZX-C+ + */ +static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + + u32 n; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ + *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * ofb_encrypt_zxc is used for ZX-C+ + */ +static int ofb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + +/* + * ofb_decrypt_zxc is used for ZX-C+ + */ +static int ofb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20 | SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + + +/* + * cfb_encrypt is used for ZX-E and newer. + */ +static int cfb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * cfb_decrypt is used for ZX-E and newer. + */ + +static int cfb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_cfb_zxc is used for ZX-C+ + */ +static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u32 n; + size_t t; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + if (cw->cword.b.encdec) + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^= + *(size_t *)(walk.src.virt.addr + n); + + else + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { + t = *(size_t *)(walk.src.virt.addr + n); + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ t; + *(size_t *)(walk.iv + n) = t; + } + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * cfb_encrypt_zxc is used for ZX-C+ + */ +static int cfb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20 | SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +/* + * cfb_decrypt_zxc is used for ZX-C+ + */ +static int cfb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20 | SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + + { + .base = { + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + + { + .base = { + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, + }, + + { + .base = { + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, + }, + + { + .base = { + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, + } +}; + +static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; + +static int gmi_zxc_check(void) +{ + int f_zxc = 0; + + struct cpuinfo_x86 *c = &cpu_data(0); + + if ((c->x86 > 6)) + f_zxc = 0; + else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09))) + f_zxc = 1; + + return f_zxc; +} + +/* + * Load supported features of the CPU to see if the SM4 is available. + */ +static int gmi_ccs_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid" : "=d"(edx) : "a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_debug("GMI SM4 is detected by CPUID\n"); + return 0; + } + pr_debug("GMI SM4 is available\n"); + return 0; + + } + return -ENODEV; +} + +static void gmi_sm4_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++) + simd_skcipher_free(sm4_simd_algs[i]); + + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} +static int __init gmi_sm4_init(void) +{ + struct simd_skcipher_alg *simd; + const char *basename; + const char *algname; + const char *drvname; + int err; + int i; + + if (gmi_ccs_available() != 0) + return -ENODEV; + + if (gmi_zxc_check()) { + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { + sm4_algs[i].encrypt = ctr_encrypt_zxc; + sm4_algs[i].decrypt = ctr_decrypt_zxc; + } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { + sm4_algs[i].encrypt = cfb_encrypt_zxc; + sm4_algs[i].decrypt = cfb_decrypt_zxc; + } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { + sm4_algs[i].encrypt = ofb_encrypt_zxc; + sm4_algs[i].decrypt = ofb_decrypt_zxc; + } + } + } + + err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + algname = sm4_algs[i].base.cra_name + 2; + drvname = sm4_algs[i].base.cra_driver_name + 2; + basename = sm4_algs[i].base.cra_driver_name; + simd = simd_skcipher_create_compat(sm4_algs + i, algname, + drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; + + sm4_simd_algs[i] = simd; + } + + return 0; + +unregister_simds: + gmi_sm4_exit(); + return err; +} + +late_initcall(gmi_sm4_init); +module_exit(gmi_sm4_exit); + +MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); +MODULE_AUTHOR("GRX"); +MODULE_LICENSE("GPL"); diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index aa8fc2cf1bde778ed170d44954837a1e9f32def5..4eacac39a5018b6f38457f618de925936fb2870f 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -273,8 +273,23 @@ static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] = [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, }; +static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, + [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120, +}; + static u64 amd_pmu_event_map(int hw_event) { + if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a) + return amd_zen4_perfmon_event_map[hw_event]; + if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19) return amd_zen2_perfmon_event_map[hw_event]; @@ -954,7 +969,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) continue; if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3f4834b20ea588146dfe913a850d0..5bc616a638f0b79040c51736a826424333700fdb 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -196,10 +196,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -224,8 +235,20 @@ static int amd_uncore_event_init(struct perf_event *event) if (event->attr.type != event->pmu->type) return -ENOENT; - if (pmu_version >= 2 && is_nb_event(event)) + if (pmu_version >= 2 && is_nb_event(event)) { event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + is_nb_event(event)) { + event_mask = HYGON_F18H_RAW_EVENT_MASK_NB; + if (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) + event_mask = HYGON_F18H_M4H_RAW_EVENT_MASK_NB; + if (boot_cpu_data.x86_model == 0x6 || + boot_cpu_data.x86_model == 0x7 || + boot_cpu_data.x86_model == 0x10) + event_mask = HYGON_F18H_M6H_RAW_EVENT_MASK_NB; + } /* * NB and Last level cache counters (MSRs) are shared across all cores @@ -268,6 +291,14 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -315,8 +346,11 @@ static struct device_attribute format_attr_##_var = \ DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */ DEFINE_UNCORE_FORMAT_ATTR(event14v2, event, "config:0-7,32-37"); /* PerfMonV2 DF */ +DEFINE_UNCORE_FORMAT_ATTR(event14f18h, event, "config:0-7,32-35,61-62"); /* F18h DF */ DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */ DEFINE_UNCORE_FORMAT_ATTR(umask8, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(umask10f18h, umask, "config:8-17"); /* F18h M4h DF */ +DEFINE_UNCORE_FORMAT_ATTR(umask12f18h, umask, "config:8-19"); /* F18h M6h DF */ DEFINE_UNCORE_FORMAT_ATTR(umask12, umask, "config:8-15,24-27"); /* PerfMonV2 DF */ DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */ @@ -325,6 +359,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -347,6 +383,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -372,6 +414,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -396,6 +444,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -679,8 +732,19 @@ static int __init amd_uncore_init(void) if (pmu_version >= 2) { *df_attr++ = &format_attr_event14v2.attr; *df_attr++ = &format_attr_umask12.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *df_attr = &format_attr_event14.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *df_attr++ = &format_attr_event14f18h.attr; + if (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) + *df_attr++ = &format_attr_umask10f18h.attr; + else if (boot_cpu_data.x86_model == 0x6 || + boot_cpu_data.x86_model == 0x7 || + boot_cpu_data.x86_model == 0x10) + *df_attr++ = &format_attr_umask12f18h.attr; } amd_uncore_nb = alloc_percpu(struct amd_uncore *); @@ -709,10 +773,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 150a365b4fbc89d69fd5dd094212d8cbd4e4fa18..a620ef3e790f9b8b1c252949f5c66ddf15ae02d0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -603,7 +603,7 @@ int x86_pmu_hw_config(struct perf_event *event) } } - if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) + if (branch_sample_call_stack(event)) event->attach_state |= PERF_ATTACH_TASK_DATA; /* @@ -1705,7 +1705,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); @@ -1890,9 +1890,9 @@ ssize_t events_hybrid_sysfs_show(struct device *dev, str = pmu_attr->event_str; for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) + if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) continue; - if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { + if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { next_str = strchr(str, ';'); if (next_str) return snprintf(page, next_str - str + 1, "%s", str); @@ -2172,7 +2172,7 @@ static int __init init_hw_perf_events(void) hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, - (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); + (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); if (err) break; } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 37c8badd270155d8f5cc2e0e81673c4a6f7a2f19..e2c1c51d8a01f8ade172b666aa8aa7d8a9b071e3 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -299,7 +299,7 @@ static struct extra_reg intel_icl_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct extra_reg intel_spr_extra_regs[] __read_mostly = { +static struct extra_reg intel_glc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -309,7 +309,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct event_constraint intel_spr_event_constraints[] = { +static struct event_constraint intel_glc_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -349,7 +349,7 @@ static struct event_constraint intel_spr_event_constraints[] = { EVENT_CONSTRAINT_END }; -static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { +static struct extra_reg intel_rwc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -473,7 +473,7 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } -static __initconst const u64 spr_hw_cache_event_ids +static __initconst const u64 glc_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -552,7 +552,7 @@ static __initconst const u64 spr_hw_cache_event_ids }, }; -static __initconst const u64 spr_hw_cache_extra_regs +static __initconst const u64 glc_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -2518,9 +2518,14 @@ static void intel_pmu_assign_event(struct perf_event *event, int idx) perf_report_aux_output_id(event, idx); } +static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; +} + static void intel_pmu_del_event(struct perf_event *event) { - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_del(event); if (event->attr.precise_ip) intel_pmu_pebs_del(event); @@ -2556,16 +2561,6 @@ static int icl_set_topdown_event_period(struct perf_event *event) return 0; } -static int adl_set_topdown_event_period(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_set_topdown_event_period(event); -} - DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) @@ -2708,16 +2703,6 @@ static u64 icl_update_topdown_event(struct perf_event *event) x86_pmu.num_topdown_events - 1); } -static u64 adl_update_topdown_event(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_update_topdown_event(event); -} - DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); static void intel_pmu_read_topdown_event(struct perf_event *event) @@ -2798,6 +2783,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { + u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -2806,8 +2792,10 @@ static void intel_pmu_enable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: + if (branch_sample_counters(event)) + enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: @@ -2831,7 +2819,7 @@ static void intel_pmu_add_event(struct perf_event *event) { if (event->attr.precise_ip) intel_pmu_pebs_add(event); - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_add(event); } @@ -3058,7 +3046,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + intel_pmu_lbr_save_brstack(&data, cpuc, event); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); @@ -3623,6 +3611,13 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); + /* Not all counters support the branch counter feature. */ + if (branch_sample_counters(event)) { + c2 = dyn_constraint(cpuc, c2, idx); + c2->idxmsk64 &= x86_pmu.lbr_counters; + c2->weight = hweight64(c2->idxmsk64); + } + return c2; } @@ -3869,7 +3864,7 @@ static inline bool require_mem_loads_aux_event(struct perf_event *event) return false; if (is_hybrid()) - return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; + return hybrid_pmu(event->pmu)->pmu_type == hybrid_big; return true; } @@ -3908,7 +3903,62 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); } - if (needs_branch_stack(event)) { + if (needs_branch_stack(event) && is_sampling_event(event)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + if (branch_sample_counters(event)) { + struct perf_event *leader, *sibling; + int num = 0; + + if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || + (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) + return -EINVAL; + + /* + * The branch counter logging is not supported in the call stack + * mode yet, since we cannot simply flush the LBR during e.g., + * multiplexing. Also, there is no obvious usage with the call + * stack mode. Simply forbids it for now. + * + * If any events in the group enable the branch counter logging + * feature, the group is treated as a branch counter logging + * group, which requires the extra space to store the counters. + */ + leader = event->group_leader; + if (branch_sample_call_stack(leader)) + return -EINVAL; + if (branch_sample_counters(leader)) + num++; + leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; + + for_each_sibling_event(sibling, leader) { + if (branch_sample_call_stack(sibling)) + return -EINVAL; + if (branch_sample_counters(sibling)) + num++; + } + + if (num > fls(x86_pmu.lbr_counters)) + return -EINVAL; + /* + * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't + * require any branch stack setup. + * Clear the bit to avoid unnecessary branch stack setup. + */ + if (0 == (event->attr.branch_sample_type & + ~(PERF_SAMPLE_BRANCH_PLM_ALL | + PERF_SAMPLE_BRANCH_COUNTERS))) + event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + /* + * Force the leader to be a LBR event. So LBRs can be reset + * with the leader event. See intel_pmu_lbr_del() for details. + */ + if (!intel_pmu_needs_branch_stack(leader)) + return -EINVAL; + } + + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) return ret; @@ -4278,7 +4328,7 @@ icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } static struct event_constraint * -spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, +glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { struct event_constraint *c; @@ -4366,9 +4416,9 @@ adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) - return spr_get_event_constraints(cpuc, idx, event); - else if (pmu->cpu_type == hybrid_small) + if (pmu->pmu_type == hybrid_big) + return glc_get_event_constraints(cpuc, idx, event); + else if (pmu->pmu_type == hybrid_small) return tnt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4391,8 +4441,13 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, */ if (event->attr.precise_ip == 3) { /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ - if (constraint_match(&fixed0_constraint, event->hw.config)) - return &fixed0_counter0_1_constraint; + if (constraint_match(&fixed0_constraint, event->hw.config)) { + /* The fixed counter 0 doesn't support LBR event logging. */ + if (branch_sample_counters(event)) + return &counter0_1_constraint; + else + return &fixed0_counter0_1_constraint; + } switch (c->idxmsk64 & 0x3ull) { case 0x1: @@ -4414,7 +4469,7 @@ rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; - c = spr_get_event_constraints(cpuc, idx, event); + c = glc_get_event_constraints(cpuc, idx, event); /* The Retire Latency is not supported by the fixed counter 0. */ if (event->attr.precise_ip && @@ -4438,9 +4493,9 @@ mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return rwc_get_event_constraints(cpuc, idx, event); - if (pmu->cpu_type == hybrid_small) + if (pmu->pmu_type == hybrid_small) return cmt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4451,18 +4506,18 @@ static int adl_hw_config(struct perf_event *event) { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return hsw_hw_config(event); - else if (pmu->cpu_type == hybrid_small) + else if (pmu->pmu_type == hybrid_small) return intel_pmu_hw_config(event); WARN_ON(1); return -EOPNOTSUPP; } -static u8 adl_get_hybrid_cpu_type(void) +static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) { - return hybrid_big; + return HYBRID_INTEL_CORE; } static inline bool erratum_hsw11(struct perf_event *event) @@ -4513,7 +4568,7 @@ static void nhm_limit_period(struct perf_event *event, s64 *left) *left = max(*left, 32LL); } -static void spr_limit_period(struct perf_event *event, s64 *left) +static void glc_limit_period(struct perf_event *event, s64 *left) { if (event->attr.precise_ip == 3) *left = max(*left, 128LL); @@ -4589,7 +4644,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) goto err; } - if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); @@ -4641,6 +4696,23 @@ static void intel_pmu_check_num_counters(int *num_counters, int *num_counters_fixed, u64 *intel_ctrl, u64 fixed_mask); +static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, + int num_counters, + int num_counters_fixed, + u64 intel_ctrl); + +static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); + +static inline bool intel_pmu_broken_perf_cap(void) +{ + /* The Perf Metric (Bit 15) is always cleared */ + if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) || + (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L)) + return true; + + return false; +} + static void update_pmu_cap(struct x86_hybrid_pmu *pmu) { unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); @@ -4651,27 +4723,83 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) &eax, &ebx, &ecx, &edx); pmu->num_counters = fls(eax); pmu->num_counters_fixed = fls(ebx); - intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, - &pmu->intel_ctrl, ebx); + } + + + if (!intel_pmu_broken_perf_cap()) { + /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ + rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); } } -static bool init_hybrid_pmu(int cpu) +static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) +{ + intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, + &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1); + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + + if (pmu->intel_cap.perf_metrics) + pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + else + pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + + if (pmu->intel_cap.pebs_output_pt_available) + pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + else + pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT; + + intel_pmu_check_event_constraints(pmu->event_constraints, + pmu->num_counters, + pmu->num_counters_fixed, + pmu->intel_ctrl); + + intel_pmu_check_extra_regs(pmu->extra_regs); +} + +static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); u8 cpu_type = get_this_hybrid_cpu_type(); - struct x86_hybrid_pmu *pmu = NULL; int i; - if (!cpu_type && x86_pmu.get_hybrid_cpu_type) - cpu_type = x86_pmu.get_hybrid_cpu_type(); + /* + * This is running on a CPU model that is known to have hybrid + * configurations. But the CPU told us it is not hybrid, shame + * on it. There should be a fixup function provided for these + * troublesome CPUs (->get_hybrid_cpu_type). + */ + if (cpu_type == HYBRID_INTEL_NONE) { + if (x86_pmu.get_hybrid_cpu_type) + cpu_type = x86_pmu.get_hybrid_cpu_type(); + else + return NULL; + } + /* + * This essentially just maps between the 'hybrid_cpu_type' + * and 'hybrid_pmu_type' enums: + */ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { - pmu = &x86_pmu.hybrid_pmu[i]; - break; - } + enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; + + if (cpu_type == HYBRID_INTEL_CORE && + pmu_type == hybrid_big) + return &x86_pmu.hybrid_pmu[i]; + if (cpu_type == HYBRID_INTEL_ATOM && + pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; } + + return NULL; +} + +static bool init_hybrid_pmu(int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); + if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { cpuc->pmu = NULL; return false; @@ -4684,6 +4812,8 @@ static bool init_hybrid_pmu(int cpu) if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(pmu); + intel_pmu_check_hybrid_pmus(pmu); + if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) return false; @@ -5360,14 +5490,14 @@ static struct attribute *icl_tsx_events_attrs[] = { EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); -static struct attribute *spr_events_attrs[] = { +static struct attribute *glc_events_attrs[] = { EVENT_PTR(mem_ld_hsw), EVENT_PTR(mem_st_spr), EVENT_PTR(mem_ld_aux), NULL, }; -static struct attribute *spr_td_events_attrs[] = { +static struct attribute *glc_td_events_attrs[] = { EVENT_PTR(slots), EVENT_PTR(td_retiring), EVENT_PTR(td_bad_spec), @@ -5380,7 +5510,7 @@ static struct attribute *spr_td_events_attrs[] = { NULL, }; -static struct attribute *spr_tsx_events_attrs[] = { +static struct attribute *glc_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_abort), EVENT_PTR(tx_commit), @@ -5486,11 +5616,41 @@ static ssize_t branches_show(struct device *cdev, static DEVICE_ATTR_RO(branches); +static ssize_t branch_counter_nr_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); +} + +static DEVICE_ATTR_RO(branch_counter_nr); + +static ssize_t branch_counter_width_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); +} + +static DEVICE_ATTR_RO(branch_counter_width); + static struct attribute *lbr_attrs[] = { &dev_attr_branches.attr, + &dev_attr_branch_counter_nr.attr, + &dev_attr_branch_counter_width.attr, NULL }; +static umode_t +lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + /* branches */ + if (i == 0) + return x86_pmu.lbr_nr ? attr->mode : 0; + + return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; +} + static char pmu_name_str[30]; static ssize_t pmu_name_show(struct device *cdev, @@ -5517,6 +5677,15 @@ static struct attribute *intel_pmu_attrs[] = { NULL, }; +static umode_t +default_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + if (attr == &dev_attr_allow_tsx_force_abort.attr) + return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; + + return attr->mode; +} + static umode_t tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -5538,27 +5707,12 @@ mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) return pebs_is_visible(kobj, attr, i); } -static umode_t -lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - return x86_pmu.lbr_nr ? attr->mode : 0; -} - static umode_t exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) { return x86_pmu.version >= 2 ? attr->mode : 0; } -static umode_t -default_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - if (attr == &dev_attr_allow_tsx_force_abort.attr) - return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; - - return attr->mode; -} - static umode_t td_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -5736,7 +5890,7 @@ static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) struct perf_pmu_events_hybrid_attr *pmu_attr = container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); - return pmu->cpu_type & pmu_attr->pmu_type; + return pmu->pmu_type & pmu_attr->pmu_type; } static umode_t hybrid_events_is_visible(struct kobject *kobj, @@ -5773,7 +5927,7 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj, container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); int cpu = hybrid_find_supported_cpu(pmu); - return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; + return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; } static umode_t hybrid_td_is_visible(struct kobject *kobj, @@ -5788,7 +5942,7 @@ static umode_t hybrid_td_is_visible(struct kobject *kobj, /* Only the big core supports perf metrics */ - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return pmu->intel_cap.perf_metrics ? attr->mode : 0; return attr->mode; @@ -5935,40 +6089,95 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) } } -static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) +static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { + { hybrid_small, "cpu_atom" }, + { hybrid_big, "cpu_core" }, +}; + +static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) { + unsigned long pmus_mask = pmus; struct x86_hybrid_pmu *pmu; - int i; + int idx = 0, bit; - for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - pmu = &x86_pmu.hybrid_pmu[i]; + x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); + x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, + sizeof(struct x86_hybrid_pmu), + GFP_KERNEL); + if (!x86_pmu.hybrid_pmu) + return -ENOMEM; - intel_pmu_check_num_counters(&pmu->num_counters, - &pmu->num_counters_fixed, - &pmu->intel_ctrl, - fixed_mask); + static_branch_enable(&perf_is_hybrid); + x86_pmu.filter = intel_pmu_filter; - if (pmu->intel_cap.perf_metrics) { - pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; - } - - if (pmu->intel_cap.pebs_output_pt_available) - pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { + pmu = &x86_pmu.hybrid_pmu[idx++]; + pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; + pmu->name = intel_hybrid_pmu_type_map[bit].name; - intel_pmu_check_event_constraints(pmu->event_constraints, - pmu->num_counters, - pmu->num_counters_fixed, - pmu->intel_ctrl); + pmu->num_counters = x86_pmu.num_counters; + pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); - intel_pmu_check_extra_regs(pmu->extra_regs); + pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; + if (pmu->pmu_type & hybrid_small) { + pmu->intel_cap.perf_metrics = 0; + pmu->intel_cap.pebs_output_pt_available = 1; + pmu->mid_ack = true; + } else if (pmu->pmu_type & hybrid_big) { + pmu->intel_cap.perf_metrics = 1; + pmu->intel_cap.pebs_output_pt_available = 0; + pmu->late_ack = true; + } } + + return 0; } -static __always_inline bool is_mtl(u8 x86_model) -{ - return (x86_model == INTEL_FAM6_METEORLAKE) || - (x86_model == INTEL_FAM6_METEORLAKE_L); +static __always_inline void intel_pmu_init_glc(struct pmu *pmu) +{ + x86_pmu.late_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); + x86_pmu.lbr_pt_coexist = true; + x86_pmu.num_topdown_events = 8; + static_call_update(intel_pmu_update_topdown_event, + &icl_update_topdown_event); + static_call_update(intel_pmu_set_topdown_event_period, + &icl_set_topdown_event_period); + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid(pmu, event_constraints) = intel_glc_event_constraints; + hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; +} + +static __always_inline void intel_pmu_init_grt(struct pmu *pmu) +{ + x86_pmu.mid_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + hybrid(pmu, event_constraints) = intel_slm_event_constraints; + hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; + hybrid(pmu, extra_regs) = intel_grt_extra_regs; } __init int intel_pmu_init(void) @@ -6249,28 +6458,10 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GRACEMONT: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; - x86_pmu.extra_regs = intel_grt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - + intel_pmu_init_grt(NULL); intel_pmu_pebs_data_source_grt(); x86_pmu.pebs_latency_data = adl_latency_data_small; x86_pmu.get_event_constraints = tnt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; td_attr = tnt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = nhm_format_attr; @@ -6280,28 +6471,11 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_CRESTMONT: case INTEL_FAM6_ATOM_CRESTMONT_X: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; + intel_pmu_init_grt(NULL); x86_pmu.extra_regs = intel_cmt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - intel_pmu_pebs_data_source_cmt(); x86_pmu.pebs_latency_data = mtl_latency_data_small; x86_pmu.get_event_constraints = cmt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; td_attr = cmt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = cmt_format_attr; @@ -6619,46 +6793,29 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_EMERALDRAPIDS_X: x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - x86_pmu.extra_regs = intel_spr_extra_regs; - fallthrough; + x86_pmu.extra_regs = intel_glc_extra_regs; + pr_cont("Sapphire Rapids events, "); + name = "sapphire_rapids"; + goto glc_common; + case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: - pmem = true; - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - - x86_pmu.event_constraints = intel_spr_event_constraints; - x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; - if (!x86_pmu.extra_regs) - x86_pmu.extra_regs = intel_gnr_extra_regs; - x86_pmu.limit_period = spr_limit_period; - x86_pmu.pebs_ept = 1; - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + x86_pmu.extra_regs = intel_rwc_extra_regs; + pr_cont("Granite Rapids events, "); + name = "granite_rapids"; + glc_common: + intel_pmu_init_glc(NULL); + x86_pmu.pebs_ept = 1; x86_pmu.hw_config = hsw_hw_config; - x86_pmu.get_event_constraints = spr_get_event_constraints; + x86_pmu.get_event_constraints = glc_get_event_constraints; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_skl_attr = skl_format_attr; - mem_attr = spr_events_attrs; - td_attr = spr_td_events_attrs; - tsx_attr = spr_tsx_events_attrs; - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); - x86_pmu.lbr_pt_coexist = true; - intel_pmu_pebs_data_source_skl(pmem); - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &icl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &icl_set_topdown_event_period); - pr_cont("Sapphire Rapids events, "); - name = "sapphire_rapids"; + mem_attr = glc_events_attrs; + td_attr = glc_td_events_attrs; + tsx_attr = glc_tsx_events_attrs; + intel_pmu_pebs_data_source_skl(true); break; case INTEL_FAM6_ALDERLAKE: @@ -6666,47 +6823,17 @@ __init int intel_pmu_init(void) case INTEL_FAM6_RAPTORLAKE: case INTEL_FAM6_RAPTORLAKE_P: case INTEL_FAM6_RAPTORLAKE_S: - case INTEL_FAM6_METEORLAKE: - case INTEL_FAM6_METEORLAKE_L: /* * Alder Lake has 2 types of CPU, core and atom. * * Initialize the common PerfMon capabilities here. */ - x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, - sizeof(struct x86_hybrid_pmu), - GFP_KERNEL); - if (!x86_pmu.hybrid_pmu) - return -ENOMEM; - static_branch_enable(&perf_is_hybrid); - x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; + intel_pmu_init_hybrid(hybrid_big_small); - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - x86_pmu.lbr_pt_coexist = true; x86_pmu.pebs_latency_data = adl_latency_data_small; - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &adl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &adl_set_topdown_event_period); - - x86_pmu.filter = intel_pmu_filter; x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.hw_config = adl_hw_config; - x86_pmu.limit_period = spr_limit_period; x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; - /* - * The rtm_abort_event is used to check whether to enable GPRs - * for the RTM abort event. Atom doesn't have the RTM abort - * event. There is no harmful to set it in the common - * x86_pmu.rtm_abort_event. - */ - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); td_attr = adl_hybrid_events_attrs; mem_attr = adl_hybrid_mem_attrs; @@ -6716,9 +6843,7 @@ __init int intel_pmu_init(void) /* Initialize big core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; - pmu->name = "cpu_core"; - pmu->cpu_type = hybrid_big; - pmu->late_ack = true; + intel_pmu_init_glc(&pmu->pmu); if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { pmu->num_counters = x86_pmu.num_counters + 2; pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; @@ -6743,54 +6868,45 @@ __init int intel_pmu_init(void) pmu->unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 1; - pmu->intel_cap.pebs_output_pt_available = 0; + pmu->extra_regs = intel_glc_extra_regs; + + /* Initialize Atom core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + intel_pmu_init_grt(&pmu->pmu); + + x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; + intel_pmu_pebs_data_source_adl(); + pr_cont("Alderlake Hybrid events, "); + name = "alderlake_hybrid"; + break; + + case INTEL_FAM6_METEORLAKE: + case INTEL_FAM6_METEORLAKE_L: + intel_pmu_init_hybrid(hybrid_big_small); + + x86_pmu.pebs_latency_data = mtl_latency_data_small; + x86_pmu.get_event_constraints = mtl_get_event_constraints; + x86_pmu.hw_config = adl_hw_config; + + td_attr = adl_hybrid_events_attrs; + mem_attr = mtl_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; - memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->event_constraints = intel_spr_event_constraints; - pmu->pebs_constraints = intel_spr_pebs_event_constraints; - pmu->extra_regs = intel_spr_extra_regs; + /* Initialize big core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + intel_pmu_init_glc(&pmu->pmu); + pmu->extra_regs = intel_rwc_extra_regs; /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; - pmu->name = "cpu_atom"; - pmu->cpu_type = hybrid_small; - pmu->mid_ack = true; - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; - pmu->max_pebs_events = x86_pmu.max_pebs_events; - pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 0; - pmu->intel_cap.pebs_output_pt_available = 1; - - memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - pmu->event_constraints = intel_slm_event_constraints; - pmu->pebs_constraints = intel_grt_pebs_event_constraints; - pmu->extra_regs = intel_grt_extra_regs; - if (is_mtl(boot_cpu_data.x86_model)) { - x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; - x86_pmu.pebs_latency_data = mtl_latency_data_small; - extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? - mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; - mem_attr = mtl_hybrid_mem_attrs; - intel_pmu_pebs_data_source_mtl(); - x86_pmu.get_event_constraints = mtl_get_event_constraints; - pmu->extra_regs = intel_cmt_extra_regs; - pr_cont("Meteorlake Hybrid events, "); - name = "meteorlake_hybrid"; - } else { - x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - intel_pmu_pebs_data_source_adl(); - pr_cont("Alderlake Hybrid events, "); - name = "alderlake_hybrid"; - } + intel_pmu_init_grt(&pmu->pmu); + pmu->extra_regs = intel_cmt_extra_regs; + + intel_pmu_pebs_data_source_mtl(); + pr_cont("Meteorlake Hybrid events, "); + name = "meteorlake_hybrid"; break; default: @@ -6902,9 +7018,6 @@ __init int intel_pmu_init(void) if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - if (is_hybrid()) - intel_pmu_check_hybrid_pmus((u64)fixed_mask); - if (x86_pmu.intel_cap.pebs_timing_info) x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index cc6609cbfc8dad39c48c05c18e81b8e831c0e735..d7ce7eec0cd2f7508bafc9ebb4c5580a2c77ecfa 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL + * MTL,SRF,GRR * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,8 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, + * GRR * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -75,7 +76,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -96,6 +97,10 @@ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, * TNT,RKL,ADL,RPL,MTL * Scope: Package (physical package) + * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. + * perf code: 0x00 + * Available model: SRF,GRR + * Scope: A cluster of cores shared L2 cache * */ @@ -129,6 +134,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, struct cstate_model { unsigned long core_events; unsigned long pkg_events; + unsigned long module_events; unsigned long quirks; }; @@ -188,20 +194,20 @@ static struct attribute *attrs_empty[] = { * "events" group (with empty attrs) before updating * it with detected events. */ -static struct attribute_group core_events_attr_group = { +static struct attribute_group cstate_events_attr_group = { .name = "events", .attrs = attrs_empty, }; -DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); -static struct attribute *core_format_attrs[] = { - &format_attr_core_event.attr, +DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63"); +static struct attribute *cstate_format_attrs[] = { + &format_attr_cstate_event.attr, NULL, }; -static struct attribute_group core_format_attr_group = { +static struct attribute_group cstate_format_attr_group = { .name = "format", - .attrs = core_format_attrs, + .attrs = cstate_format_attrs, }; static cpumask_t cstate_core_cpu_mask; @@ -216,9 +222,9 @@ static struct attribute_group cpumask_attr_group = { .attrs = cstate_cpumask_attrs, }; -static const struct attribute_group *core_attr_groups[] = { - &core_events_attr_group, - &core_format_attr_group, +static const struct attribute_group *cstate_attr_groups[] = { + &cstate_events_attr_group, + &cstate_format_attr_group, &cpumask_attr_group, NULL, }; @@ -267,30 +273,30 @@ static struct perf_msr pkg_msr[] = { [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, }; -static struct attribute_group pkg_events_attr_group = { - .name = "events", - .attrs = attrs_empty, -}; +static cpumask_t cstate_pkg_cpu_mask; -DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); -static struct attribute *pkg_format_attrs[] = { - &format_attr_pkg_event.attr, - NULL, -}; -static struct attribute_group pkg_format_attr_group = { - .name = "format", - .attrs = pkg_format_attrs, +/* cstate_module PMU */ +static struct pmu cstate_module_pmu; +static bool has_cstate_module; + +enum perf_cstate_module_events { + PERF_CSTATE_MODULE_C6_RES = 0, + + PERF_CSTATE_MODULE_EVENT_MAX, }; -static cpumask_t cstate_pkg_cpu_mask; +PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00"); -static const struct attribute_group *pkg_attr_groups[] = { - &pkg_events_attr_group, - &pkg_format_attr_group, - &cpumask_attr_group, - NULL, +static unsigned long module_msr_mask; + +PMU_EVENT_GROUP(events, cstate_module_c6); + +static struct perf_msr module_msr[] = { + [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, }; +static cpumask_t cstate_module_cpu_mask; + static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -301,6 +307,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); else if (pmu == &cstate_pkg_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); + else if (pmu == &cstate_module_pmu) + return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); else return 0; } @@ -338,6 +346,15 @@ static int cstate_pmu_event_init(struct perf_event *event) event->hw.event_base = pkg_msr[cfg].msr; cpu = cpumask_any_and(&cstate_pkg_cpu_mask, topology_die_cpumask(event->cpu)); + } else if (event->pmu == &cstate_module_pmu) { + if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) + return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX); + if (!(module_msr_mask & (1 << cfg))) + return -EINVAL; + event->hw.event_base = module_msr[cfg].msr; + cpu = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(event->cpu)); } else { return -ENOENT; } @@ -425,6 +442,17 @@ static int cstate_cpu_exit(unsigned int cpu) perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); } } + + if (has_cstate_module && + cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { + + target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); + /* Migrate events if there is a valid target */ + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &cstate_module_cpu_mask); + perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); + } + } return 0; } @@ -451,6 +479,15 @@ static int cstate_cpu_init(unsigned int cpu) if (has_cstate_pkg && target >= nr_cpu_ids) cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); + /* + * If this is the first online thread of that cluster, set it + * in the cluster cpu mask as the designated reader. + */ + target = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(cpu)); + if (has_cstate_module && target >= nr_cpu_ids) + cpumask_set_cpu(cpu, &cstate_module_cpu_mask); + return 0; } @@ -473,8 +510,13 @@ static const struct attribute_group *pkg_attr_update[] = { NULL, }; +static const struct attribute_group *module_attr_update[] = { + &group_cstate_module_c6, + NULL +}; + static struct pmu cstate_core_pmu = { - .attr_groups = core_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, .name = "cstate_core", .task_ctx_nr = perf_invalid_context, @@ -489,7 +531,7 @@ static struct pmu cstate_core_pmu = { }; static struct pmu cstate_pkg_pmu = { - .attr_groups = pkg_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = pkg_attr_update, .name = "cstate_pkg", .task_ctx_nr = perf_invalid_context, @@ -503,6 +545,21 @@ static struct pmu cstate_pkg_pmu = { .module = THIS_MODULE, }; +static struct pmu cstate_module_pmu = { + .attr_groups = cstate_attr_groups, + .attr_update = module_attr_update, + .name = "cstate_module", + .task_ctx_nr = perf_invalid_context, + .event_init = cstate_pmu_event_init, + .add = cstate_pmu_event_add, + .del = cstate_pmu_event_del, + .start = cstate_pmu_event_start, + .stop = cstate_pmu_event_stop, + .read = cstate_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .module = THIS_MODULE, +}; + static const struct cstate_model nhm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -615,6 +672,22 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model grr_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + +static const struct cstate_model srf_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), @@ -667,6 +740,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), @@ -708,10 +783,14 @@ static int __init cstate_probe(const struct cstate_model *cm) pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, true, (void *) &cm->pkg_events); + module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX, + true, (void *) &cm->module_events); + has_cstate_core = !!core_msr_mask; has_cstate_pkg = !!pkg_msr_mask; + has_cstate_module = !!module_msr_mask; - return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; + return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV; } static inline void cstate_cleanup(void) @@ -724,6 +803,9 @@ static inline void cstate_cleanup(void) if (has_cstate_pkg) perf_pmu_unregister(&cstate_pkg_pmu); + + if (has_cstate_module) + perf_pmu_unregister(&cstate_module_pmu); } static int __init cstate_init(void) @@ -760,6 +842,16 @@ static int __init cstate_init(void) return err; } } + + if (has_cstate_module) { + err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1); + if (err) { + has_cstate_module = false; + pr_info("Failed to register cstate cluster pmu\n"); + cstate_cleanup(); + return err; + } + } return 0; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d9a51b638931c6e51932049cf5e04460d0ec3542..c165c8202ad0439be5702ad4a39227bbaaf14fe0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -261,7 +261,7 @@ static u64 __adl_latency_data_small(struct perf_event *event, u64 status, { u64 val; - WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big); + WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); dse &= PERF_PEBS_DATA_SOURCE_MASK; val = hybrid_var(event->pmu, pebs_data_source)[dse]; @@ -1058,7 +1058,7 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; -struct event_constraint intel_spr_pebs_event_constraints[] = { +struct event_constraint intel_glc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), @@ -1755,7 +1755,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, setup_pebs_time(event, data, pebs->tsc); if (has_branch_stack(event)) - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } static void adaptive_pebs_save_regs(struct pt_regs *regs, @@ -1916,7 +1916,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + intel_pmu_lbr_save_brstack(data, cpuc, event); } } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c3b0d15a9841b5edec20e10240850facae803772..78cd5084104e9c205a6e949f6ee1ce6b93060fb0 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -676,6 +676,25 @@ void intel_pmu_lbr_del(struct perf_event *event) WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->pmu); + + /* + * The logged occurrences information is only valid for the + * current LBR group. If another LBR group is scheduled in + * later, the information from the stale LBRs will be wrongly + * interpreted. Reset the LBRs here. + * + * Only clear once for a branch counter group with the leader + * event. Because + * - Cannot simply reset the LBRs with the !cpuc->lbr_users. + * Because it's possible that the last LBR user is not in a + * branch counter group, e.g., a branch_counters group + + * several normal LBR events. + * - The LBR reset can be done with any one of the events in a + * branch counter group, since they are always scheduled together. + * It's easy to force the leader event an LBR event. + */ + if (is_branch_counters_group(event) && event == event->group_leader) + intel_pmu_lbr_reset(); } static inline bool vlbr_exclude_host(void) @@ -866,6 +885,8 @@ static __always_inline u16 get_lbr_cycles(u64 info) return cycles; } +static_assert((64 - PERF_BRANCH_ENTRY_INFO_BITS_MAX) > LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS); + static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, struct lbr_entry *entries) { @@ -898,11 +919,67 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, e->abort = !!(info & LBR_INFO_ABORT); e->cycles = get_lbr_cycles(info); e->type = get_lbr_br_type(info); + + /* + * Leverage the reserved field of cpuc->lbr_entries[i] to + * temporarily store the branch counters information. + * The later code will decide what content can be disclosed + * to the perf tool. Pleae see intel_pmu_lbr_counters_reorder(). + */ + e->reserved = (info >> LBR_INFO_BR_CNTR_OFFSET) & LBR_INFO_BR_CNTR_FULL_MASK; } cpuc->lbr_stack.nr = i; } +/* + * The enabled order may be different from the counter order. + * Update the lbr_counters with the enabled order. + */ +static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i, j, pos = 0, order[X86_PMC_IDX_MAX]; + struct perf_event *leader, *sibling; + u64 src, dst, cnt; + + leader = event->group_leader; + if (branch_sample_counters(leader)) + order[pos++] = leader->hw.idx; + + for_each_sibling_event(sibling, leader) { + if (!branch_sample_counters(sibling)) + continue; + order[pos++] = sibling->hw.idx; + } + + WARN_ON_ONCE(!pos); + + for (i = 0; i < cpuc->lbr_stack.nr; i++) { + src = cpuc->lbr_entries[i].reserved; + dst = 0; + for (j = 0; j < pos; j++) { + cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK; + dst |= cnt << j * LBR_INFO_BR_CNTR_BITS; + } + cpuc->lbr_counters[i] = dst; + cpuc->lbr_entries[i].reserved = 0; + } +} + +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_branch_counters_group(event)) { + intel_pmu_lbr_counters_reorder(cpuc, event); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters); + return; + } + + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); +} + static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc) { intel_pmu_store_lbr(cpuc, NULL); @@ -1173,8 +1250,10 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) for (i = 0; i < cpuc->lbr_stack.nr; ) { if (!cpuc->lbr_entries[i].from) { j = i; - while (++j < cpuc->lbr_stack.nr) + while (++j < cpuc->lbr_stack.nr) { cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; + cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j]; + } cpuc->lbr_stack.nr--; if (!cpuc->lbr_entries[i].from) continue; @@ -1525,8 +1604,12 @@ void __init intel_pmu_arch_lbr_init(void) x86_pmu.lbr_mispred = ecx.split.lbr_mispred; x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr; x86_pmu.lbr_br_type = ecx.split.lbr_br_type; + x86_pmu.lbr_counters = ecx.split.lbr_counters; x86_pmu.lbr_nr = lbr_nr; + if (!!x86_pmu.lbr_counters) + x86_pmu.flags |= PMU_FL_BR_CNTR; + if (x86_pmu.lbr_mispred) static_branch_enable(&x86_lbr_mispred); if (x86_pmu.lbr_timed_lbr) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 69043e02e8a7d828ec2f096ba940648c5c3f39ea..4d856b51307f3b183743764e868a18d27a071f9e 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -263,6 +263,9 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, return; } + if (intel_generic_uncore_assign_hw_event(event, box)) + return; + hwc->config_base = uncore_event_ctl(box, hwc->idx); hwc->event_base = uncore_perf_ctr(box, hwc->idx); } @@ -843,7 +846,9 @@ static void uncore_pmu_disable(struct pmu *pmu) static ssize_t uncore_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); + struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); + + return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); @@ -860,7 +865,10 @@ static const struct attribute_group uncore_pmu_attr_group = { static inline int uncore_get_box_id(struct intel_uncore_type *type, struct intel_uncore_pmu *pmu) { - return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx; + if (type->boxes) + return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx); + + return pmu->pmu_idx; } void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu) @@ -961,6 +969,9 @@ static void uncore_type_exit(struct intel_uncore_type *type) if (type->cleanup_mapping) type->cleanup_mapping(type); + if (type->cleanup_extra_boxes) + type->cleanup_extra_boxes(type); + if (pmu) { for (i = 0; i < type->num_boxes; i++, pmu++) { uncore_pmu_unregister(pmu); @@ -969,10 +980,7 @@ static void uncore_type_exit(struct intel_uncore_type *type) kfree(type->pmus); type->pmus = NULL; } - if (type->box_ids) { - kfree(type->box_ids); - type->box_ids = NULL; - } + kfree(type->events_group); type->events_group = NULL; } @@ -1076,22 +1084,19 @@ static struct intel_uncore_pmu * uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev) { struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; - u64 box_ctl; - int i, die; + struct rb_node *node; for (; *types; types++) { type = *types; - for (die = 0; die < __uncore_max_dies; die++) { - for (i = 0; i < type->num_boxes; i++) { - if (!type->box_ctls[die]) - continue; - box_ctl = type->box_ctls[die] + type->pci_offsets[i]; - if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) && - pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) && - pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl)) - return &type->pmus[i]; - } + + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) && + pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) && + pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr)) + return &type->pmus[unit->pmu_idx]; } } @@ -1367,28 +1372,25 @@ static struct notifier_block uncore_pci_notifier = { static void uncore_pci_pmus_register(void) { struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; + struct rb_node *node; struct pci_dev *pdev; - u64 box_ctl; - int i, die; for (; *types; types++) { type = *types; - for (die = 0; die < __uncore_max_dies; die++) { - for (i = 0; i < type->num_boxes; i++) { - if (!type->box_ctls[die]) - continue; - box_ctl = type->box_ctls[die] + type->pci_offsets[i]; - pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl), - UNCORE_DISCOVERY_PCI_BUS(box_ctl), - UNCORE_DISCOVERY_PCI_DEVFN(box_ctl)); - if (!pdev) - continue; - pmu = &type->pmus[i]; - - uncore_pci_pmu_register(pdev, type, pmu, die); - } + + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr), + UNCORE_DISCOVERY_PCI_BUS(unit->addr), + UNCORE_DISCOVERY_PCI_DEVFN(unit->addr)); + + if (!pdev) + continue; + pmu = &type->pmus[unit->pmu_idx]; + uncore_pci_pmu_register(pdev, type, pmu, unit->die); } } @@ -1453,6 +1455,18 @@ static void uncore_pci_exit(void) } } +static bool uncore_die_has_box(struct intel_uncore_type *type, + int die, unsigned int pmu_idx) +{ + if (!type->boxes) + return true; + + if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0) + return false; + + return true; +} + static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) { @@ -1468,18 +1482,25 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, if (old_cpu < 0) { WARN_ON_ONCE(box->cpu != -1); - box->cpu = new_cpu; + if (uncore_die_has_box(type, die, pmu->pmu_idx)) { + box->cpu = new_cpu; + cpumask_set_cpu(new_cpu, &pmu->cpu_mask); + } continue; } - WARN_ON_ONCE(box->cpu != old_cpu); + WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu); box->cpu = -1; + cpumask_clear_cpu(old_cpu, &pmu->cpu_mask); if (new_cpu < 0) continue; + if (!uncore_die_has_box(type, die, pmu->pmu_idx)) + continue; uncore_pmu_cancel_hrtimer(box); perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); box->cpu = new_cpu; + cpumask_set_cpu(new_cpu, &pmu->cpu_mask); } } @@ -1502,7 +1523,7 @@ static void uncore_box_unref(struct intel_uncore_type **types, int id) pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { box = pmu->boxes[id]; - if (box && atomic_dec_return(&box->refcnt) == 0) + if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0) uncore_box_exit(box); } } @@ -1592,7 +1613,7 @@ static int uncore_box_ref(struct intel_uncore_type **types, pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { box = pmu->boxes[id]; - if (box && atomic_inc_return(&box->refcnt) == 1) + if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1) uncore_box_init(box); } } @@ -1814,6 +1835,14 @@ static const struct intel_uncore_init_fun spr_uncore_init __initconst = { .uncore_units_ignore = spr_uncore_units_ignore, }; +static const struct intel_uncore_init_fun gnr_uncore_init __initconst = { + .cpu_init = gnr_uncore_cpu_init, + .pci_init = gnr_uncore_pci_init, + .mmio_init = gnr_uncore_mmio_init, + .use_discovery = true, + .uncore_units_ignore = gnr_uncore_units_ignore, +}; + static const struct intel_uncore_init_fun generic_uncore_init __initconst = { .cpu_init = intel_uncore_generic_uncore_cpu_init, .pci_init = intel_uncore_generic_uncore_pci_init, @@ -1865,8 +1894,12 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c30fb5bb1222b39ed54e4eabc28e1fd826b03455..027ef292c60223119071fd5ebd1a253a96058fd0 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -62,7 +62,6 @@ struct intel_uncore_type { unsigned fixed_ctr; unsigned fixed_ctl; unsigned box_ctl; - u64 *box_ctls; /* Unit ctrl addr of the first box of each die */ union { unsigned msr_offset; unsigned mmio_offset; @@ -72,11 +71,10 @@ struct intel_uncore_type { unsigned single_fixed:1; unsigned pair_ctr_ctl:1; union { - unsigned *msr_offsets; - unsigned *pci_offsets; - unsigned *mmio_offsets; + u64 *msr_offsets; + u64 *pci_offsets; + u64 *mmio_offsets; }; - unsigned *box_ids; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; @@ -86,6 +84,7 @@ struct intel_uncore_type { const struct attribute_group *attr_groups[4]; const struct attribute_group **attr_update; struct pmu *pmu; /* for custom pmu ops */ + struct rb_root *boxes; /* * Uncore PMU would store relevant platform topology configuration here * to identify which platform component each PMON block of that type is @@ -98,6 +97,10 @@ struct intel_uncore_type { int (*get_topology)(struct intel_uncore_type *type); void (*set_mapping)(struct intel_uncore_type *type); void (*cleanup_mapping)(struct intel_uncore_type *type); + /* + * Optional callbacks for extra uncore units cleanup + */ + void (*cleanup_extra_boxes)(struct intel_uncore_type *type); }; #define pmu_group attr_groups[0] @@ -125,6 +128,7 @@ struct intel_uncore_pmu { int func_id; bool registered; atomic_t activeboxes; + cpumask_t cpu_mask; struct intel_uncore_type *type; struct intel_uncore_box **boxes; }; @@ -593,6 +597,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; extern int spr_uncore_units_ignore[]; +extern int gnr_uncore_units_ignore[]; /* uncore_snb.c */ int snb_uncore_pci_init(void); @@ -634,6 +639,9 @@ void icx_uncore_mmio_init(void); int spr_uncore_pci_init(void); void spr_uncore_cpu_init(void); void spr_uncore_mmio_init(void); +int gnr_uncore_pci_init(void); +void gnr_uncore_cpu_init(void); +void gnr_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index cb488e41807c76cbf7c200c88be9ceff97ff04d2..571e44b496910d865b1e89d0d81cb1f4be8bef2c 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -89,9 +89,7 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) if (!type) return NULL; - type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL); - if (!type->box_ctrl_die) - goto free_type; + type->units = RB_ROOT; type->access_type = unit->access_type; num_discovered_types[type->access_type]++; @@ -100,12 +98,6 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) rb_add(&type->node, &discovery_tables, __type_less); return type; - -free_type: - kfree(type); - - return NULL; - } static struct intel_uncore_discovery_type * @@ -120,13 +112,118 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit) return add_uncore_discovery_type(unit); } +static inline int pmu_idx_cmp(const void *key, const struct rb_node *b) +{ + struct intel_uncore_discovery_unit *unit; + const unsigned int *id = key; + + unit = rb_entry(b, struct intel_uncore_discovery_unit, node); + + if (unit->pmu_idx > *id) + return -1; + else if (unit->pmu_idx < *id) + return 1; + + return 0; +} + +static struct intel_uncore_discovery_unit * +intel_uncore_find_discovery_unit(struct rb_root *units, int die, + unsigned int pmu_idx) +{ + struct intel_uncore_discovery_unit *unit; + struct rb_node *pos; + + if (!units) + return NULL; + + pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp); + if (!pos) + return NULL; + unit = rb_entry(pos, struct intel_uncore_discovery_unit, node); + + if (die < 0) + return unit; + + for (; pos; pos = rb_next(pos)) { + unit = rb_entry(pos, struct intel_uncore_discovery_unit, node); + + if (unit->pmu_idx != pmu_idx) + break; + + if (unit->die == die) + return unit; + } + + return NULL; +} + +int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, + unsigned int pmu_idx) +{ + struct intel_uncore_discovery_unit *unit; + + unit = intel_uncore_find_discovery_unit(units, die, pmu_idx); + if (unit) + return unit->id; + + return -1; +} + +static inline bool unit_less(struct rb_node *a, const struct rb_node *b) +{ + struct intel_uncore_discovery_unit *a_node, *b_node; + + a_node = rb_entry(a, struct intel_uncore_discovery_unit, node); + b_node = rb_entry(b, struct intel_uncore_discovery_unit, node); + + if (a_node->pmu_idx < b_node->pmu_idx) + return true; + if (a_node->pmu_idx > b_node->pmu_idx) + return false; + + if (a_node->die < b_node->die) + return true; + if (a_node->die > b_node->die) + return false; + + return 0; +} + +static inline struct intel_uncore_discovery_unit * +uncore_find_unit(struct rb_root *root, unsigned int id) +{ + struct intel_uncore_discovery_unit *unit; + struct rb_node *node; + + for (node = rb_first(root); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (unit->id == id) + return unit; + } + + return NULL; +} + +void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units) +{ + struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id); + + if (unit) + node->pmu_idx = unit->pmu_idx; + else if (num_units) + node->pmu_idx = (*num_units)++; + + rb_add(&node->node, root, unit_less); +} + static void uncore_insert_box_info(struct uncore_unit_discovery *unit, - int die, bool parsed) + int die) { + struct intel_uncore_discovery_unit *node; struct intel_uncore_discovery_type *type; - unsigned int *box_offset, *ids; - int i; if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { pr_info("Invalid address is detected for uncore type %d box %d, " @@ -135,71 +232,29 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, return; } - if (parsed) { - type = search_uncore_discovery_type(unit->box_type); - if (!type) { - pr_info("A spurious uncore type %d is detected, " - "Disable the uncore type.\n", - unit->box_type); - return; - } - /* Store the first box of each die */ - if (!type->box_ctrl_die[die]) - type->box_ctrl_die[die] = unit->ctl; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) return; - } - type = get_uncore_discovery_type(unit); - if (!type) - return; + node->die = die; + node->id = unit->box_id; + node->addr = unit->ctl; - box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); - if (!box_offset) + type = get_uncore_discovery_type(unit); + if (!type) { + kfree(node); return; + } - ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); - if (!ids) - goto free_box_offset; + uncore_find_add_unit(node, &type->units, &type->num_units); /* Store generic information for the first box */ - if (!type->num_boxes) { - type->box_ctrl = unit->ctl; - type->box_ctrl_die[die] = unit->ctl; + if (type->num_units == 1) { type->num_counters = unit->num_regs; type->counter_width = unit->bit_width; type->ctl_offset = unit->ctl_offset; type->ctr_offset = unit->ctr_offset; - *ids = unit->box_id; - goto end; - } - - for (i = 0; i < type->num_boxes; i++) { - ids[i] = type->ids[i]; - box_offset[i] = type->box_offset[i]; - - if (unit->box_id == ids[i]) { - pr_info("Duplicate uncore type %d box ID %d is detected, " - "Drop the duplicate uncore unit.\n", - unit->box_type, unit->box_id); - goto free_ids; - } } - ids[i] = unit->box_id; - box_offset[i] = unit->ctl - type->box_ctrl; - kfree(type->ids); - kfree(type->box_offset); -end: - type->ids = ids; - type->box_offset = box_offset; - type->num_boxes++; - return; - -free_ids: - kfree(ids); - -free_box_offset: - kfree(box_offset); - } static bool @@ -278,7 +333,7 @@ static int parse_discovery_table(struct pci_dev *dev, int die, if (uncore_ignore_unit(&unit, ignore)) continue; - uncore_insert_box_info(&unit, die, *parsed); + uncore_insert_box_info(&unit, die); } *parsed = true; @@ -338,9 +393,16 @@ bool intel_uncore_has_discovery_tables(int *ignore) void intel_uncore_clear_discovery_tables(void) { struct intel_uncore_discovery_type *type, *next; + struct intel_uncore_discovery_unit *pos; + struct rb_node *node; rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) { - kfree(type->box_ctrl_die); + while (!RB_EMPTY_ROOT(&type->units)) { + node = rb_first(&type->units); + pos = rb_entry(node, struct intel_uncore_discovery_unit, node); + rb_erase(node, &type->units); + kfree(pos); + } kfree(type); } } @@ -365,19 +427,31 @@ static const struct attribute_group generic_uncore_format_group = { .attrs = generic_uncore_formats_attr, }; +static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box) +{ + struct intel_uncore_discovery_unit *unit; + + unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes, + -1, box->pmu->pmu_idx); + if (WARN_ON_ONCE(!unit)) + return 0; + + return unit->addr; +} + void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); + wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); } void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); + wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); } void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), 0); + wrmsrl(intel_generic_uncore_box_ctl(box), 0); } static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, @@ -405,10 +479,47 @@ static struct intel_uncore_ops generic_uncore_msr_ops = { .read_counter = uncore_msr_read_counter, }; +bool intel_generic_uncore_assign_hw_event(struct perf_event *event, + struct intel_uncore_box *box) +{ + struct hw_perf_event *hwc = &event->hw; + u64 box_ctl; + + if (!box->pmu->type->boxes) + return false; + + if (box->io_addr) { + hwc->config_base = uncore_pci_event_ctl(box, hwc->idx); + hwc->event_base = uncore_pci_perf_ctr(box, hwc->idx); + return true; + } + + box_ctl = intel_generic_uncore_box_ctl(box); + if (!box_ctl) + return false; + + if (box->pci_dev) { + box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl); + hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx); + hwc->event_base = box_ctl + uncore_pci_perf_ctr(box, hwc->idx); + return true; + } + + hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx; + hwc->event_base = box_ctl + box->pmu->type->perf_ctr + hwc->idx; + + return true; +} + +static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box) +{ + return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box)); +} + void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT); @@ -417,7 +528,7 @@ void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ); } @@ -425,7 +536,7 @@ void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); pci_write_config_dword(pdev, box_ctl, 0); } @@ -472,34 +583,30 @@ static struct intel_uncore_ops generic_uncore_pci_ops = { #define UNCORE_GENERIC_MMIO_SIZE 0x4000 -static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box) -{ - struct intel_uncore_type *type = box->pmu->type; - - if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets) - return 0; - - return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx]; -} - void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) { - u64 box_ctl = generic_uncore_mmio_box_ctl(box); + static struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type = box->pmu->type; resource_size_t addr; - if (!box_ctl) { + unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx); + if (!unit) { + pr_warn("Uncore type %d id %d: Cannot find box control address.\n", + type->type_id, box->pmu->pmu_idx); + return; + } + + if (!unit->addr) { pr_warn("Uncore type %d box %d: Invalid box control address.\n", - type->type_id, type->box_ids[box->pmu->pmu_idx]); + type->type_id, unit->id); return; } - addr = box_ctl; + addr = unit->addr; box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE); if (!box->io_addr) { pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n", - type->type_id, type->box_ids[box->pmu->pmu_idx], - (unsigned long long)addr); + type->type_id, unit->id, (unsigned long long)addr); return; } @@ -559,34 +666,22 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, struct intel_uncore_discovery_type *type) { uncore->type_id = type->type; - uncore->num_boxes = type->num_boxes; uncore->num_counters = type->num_counters; uncore->perf_ctr_bits = type->counter_width; - uncore->box_ids = type->ids; + uncore->perf_ctr = (unsigned int)type->ctr_offset; + uncore->event_ctl = (unsigned int)type->ctl_offset; + uncore->boxes = &type->units; + uncore->num_boxes = type->num_units; switch (type_id) { case UNCORE_ACCESS_MSR: uncore->ops = &generic_uncore_msr_ops; - uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset; - uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset; - uncore->box_ctl = (unsigned int)type->box_ctrl; - uncore->msr_offsets = type->box_offset; break; case UNCORE_ACCESS_PCI: uncore->ops = &generic_uncore_pci_ops; - uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset; - uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset; - uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl); - uncore->box_ctls = type->box_ctrl_die; - uncore->pci_offsets = type->box_offset; break; case UNCORE_ACCESS_MMIO: uncore->ops = &generic_uncore_mmio_ops; - uncore->perf_ctr = (unsigned int)type->ctr_offset; - uncore->event_ctl = (unsigned int)type->ctl_offset; - uncore->box_ctl = (unsigned int)type->box_ctrl; - uncore->box_ctls = type->box_ctrl_die; - uncore->mmio_offsets = type->box_offset; uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE; break; default: diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 6ee80ad3423e4e71d68d6da9fd7214cf3a0f33e0..0e94aa7db8e7d7d34c9670637c246d6ecdf5d8e1 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -113,19 +113,24 @@ struct uncore_unit_discovery { }; }; +struct intel_uncore_discovery_unit { + struct rb_node node; + unsigned int pmu_idx; /* The idx of the corresponding PMU */ + unsigned int id; /* Unit ID */ + unsigned int die; /* Die ID */ + u64 addr; /* Unit Control Address */ +}; + struct intel_uncore_discovery_type { struct rb_node node; enum uncore_access_type access_type; - u64 box_ctrl; /* Unit ctrl addr of the first box */ - u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */ + struct rb_root units; /* Unit ctrl addr for all units */ u16 type; /* Type ID of the uncore block */ u8 num_counters; u8 counter_width; u8 ctl_offset; /* Counter Control 0 offset */ u8 ctr_offset; /* Counter 0 offset */ - u16 num_boxes; /* number of boxes for the uncore block */ - unsigned int *ids; /* Box IDs */ - unsigned int *box_offset; /* Box offset */ + u16 num_units; /* number of units */ }; bool intel_uncore_has_discovery_tables(int *ignore); @@ -156,3 +161,10 @@ u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box, struct intel_uncore_type ** intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra); + +int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, + unsigned int pmu_idx); +bool intel_generic_uncore_assign_hw_event(struct perf_event *event, + struct intel_uncore_box *box); +void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 173e2674be6ef24293c5113b43d738af3d3f1981..56eea2c66cfb8cc7d9cc395e58c24a7319afc944 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -306,7 +306,7 @@ static const struct attribute_group nhmex_uncore_cbox_format_group = { }; /* msr offset for each instance of cbox */ -static unsigned nhmex_cbox_msr_offsets[] = { +static u64 nhmex_cbox_msr_offsets[] = { 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index a8f11e60b98794b3461eb7420d3ea4c38307292e..dcfabf6788073c3b3033c17295adbd76ab1bafb7 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5280,7 +5280,7 @@ void snr_uncore_mmio_init(void) /* ICX uncore support */ -static unsigned icx_cha_msr_offsets[] = { +static u64 icx_cha_msr_offsets[] = { 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, @@ -5328,7 +5328,7 @@ static struct intel_uncore_type icx_uncore_chabox = { .format_group = &snr_uncore_chabox_format_group, }; -static unsigned icx_msr_offsets[] = { +static u64 icx_msr_offsets[] = { 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, }; @@ -5931,10 +5931,11 @@ static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN); struct intel_uncore_type *type = box->pmu->type; + int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx); if (tie_en) { reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 + - HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx]; + HSWEP_CBO_MSR_OFFSET * id; reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID; reg1->idx = 0; } @@ -6087,13 +6088,16 @@ static struct uncore_event_desc spr_uncore_imc_events[] = { { /* end: all zeroes */ }, }; +#define SPR_UNCORE_MMIO_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_ops + static struct intel_uncore_type spr_uncore_imc = { - SPR_UNCORE_COMMON_FORMAT(), + SPR_UNCORE_MMIO_COMMON_FORMAT(), .name = "imc", .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .ops = &spr_uncore_mmio_ops, .event_descs = spr_uncore_imc_events, }; @@ -6157,7 +6161,55 @@ static struct intel_uncore_type spr_uncore_mdf = { .name = "mdf", }; -#define UNCORE_SPR_NUM_UNCORE_TYPES 12 +static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box) +{ + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + intel_generic_uncore_mmio_init_box(box); +} + +static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = { + .init_box = spr_uncore_mmio_offs8_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = intel_generic_uncore_mmio_disable_box, + .enable_box = intel_generic_uncore_mmio_enable_box, + .disable_event = intel_generic_uncore_mmio_disable_event, + .enable_event = spr_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +#define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_offs8_ops + +static struct event_constraint spr_uncore_cxlcm_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x02, 0x0f), + UNCORE_EVENT_CONSTRAINT(0x05, 0x0f), + UNCORE_EVENT_CONSTRAINT(0x40, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x41, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x42, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x43, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x52, 0xf0), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type spr_uncore_cxlcm = { + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), + .name = "cxlcm", + .constraints = spr_uncore_cxlcm_constraints, +}; + +static struct intel_uncore_type spr_uncore_cxldp = { + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), + .name = "cxldp", +}; + +static struct intel_uncore_type spr_uncore_hbm = { + SPR_UNCORE_COMMON_FORMAT(), + .name = "hbm", +}; + +#define UNCORE_SPR_NUM_UNCORE_TYPES 15 #define UNCORE_SPR_CHA 0 #define UNCORE_SPR_IIO 1 #define UNCORE_SPR_IMC 6 @@ -6181,6 +6233,9 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { NULL, NULL, &spr_uncore_mdf, + &spr_uncore_cxlcm, + &spr_uncore_cxldp, + &spr_uncore_hbm, }; /* @@ -6189,10 +6244,28 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { */ #define SPR_UNCORE_UPI_NUM_BOXES 4 -static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { +static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { 0, 0x8000, 0x10000, 0x18000 }; +static void spr_extra_boxes_cleanup(struct intel_uncore_type *type) +{ + struct intel_uncore_discovery_unit *pos; + struct rb_node *node; + + if (!type->boxes) + return; + + while (!RB_EMPTY_ROOT(type->boxes)) { + node = rb_first(type->boxes); + pos = rb_entry(node, struct intel_uncore_discovery_unit, node); + rb_erase(node, type->boxes); + kfree(pos); + } + kfree(type->boxes); + type->boxes = NULL; +} + static struct intel_uncore_type spr_uncore_upi = { .event_mask = SNBEP_PMON_RAW_EVENT_MASK, .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, @@ -6207,10 +6280,11 @@ static struct intel_uncore_type spr_uncore_upi = { .num_counters = 4, .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, .perf_ctr_bits = 48, - .perf_ctr = ICX_UPI_PCI_PMON_CTR0, - .event_ctl = ICX_UPI_PCI_PMON_CTL0, + .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL, + .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL, .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, .pci_offsets = spr_upi_pci_offsets, + .cleanup_extra_boxes = spr_extra_boxes_cleanup, }; static struct intel_uncore_type spr_uncore_m3upi = { @@ -6220,11 +6294,12 @@ static struct intel_uncore_type spr_uncore_m3upi = { .num_counters = 4, .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, .perf_ctr_bits = 48, - .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, - .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, + .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL, + .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL, .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, .pci_offsets = spr_upi_pci_offsets, .constraints = icx_uncore_m3upi_constraints, + .cleanup_extra_boxes = spr_extra_boxes_cleanup, }; enum perf_uncore_spr_iio_freerunning_type_id { @@ -6420,7 +6495,8 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type, static struct intel_uncore_type ** uncore_get_uncores(enum uncore_access_type type_id, int num_extra, - struct intel_uncore_type **extra) + struct intel_uncore_type **extra, int max_num_types, + struct intel_uncore_type **uncores) { struct intel_uncore_type **types, **start_types; int i; @@ -6429,9 +6505,9 @@ uncore_get_uncores(enum uncore_access_type type_id, int num_extra, /* Only copy the customized features */ for (; *types; types++) { - if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES) + if ((*types)->type_id >= max_num_types) continue; - uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]); + uncore_type_customized_copy(*types, uncores[(*types)->type_id]); } for (i = 0; i < num_extra; i++, types++) @@ -6454,18 +6530,21 @@ uncore_find_type_by_id(struct intel_uncore_type **types, int type_id) static int uncore_type_max_boxes(struct intel_uncore_type **types, int type_id) { + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; - int i, max = 0; + struct rb_node *node; + int max = 0; type = uncore_find_type_by_id(types, type_id); if (!type) return 0; - for (i = 0; i < type->num_boxes; i++) { - if (type->box_ids[i] > max) - max = type->box_ids[i]; - } + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (unit->id > max) + max = unit->id; + } return max + 1; } @@ -6478,7 +6557,9 @@ void spr_uncore_cpu_init(void) uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, UNCORE_SPR_MSR_EXTRA_UNCORES, - spr_msr_uncores); + spr_msr_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); if (type) { @@ -6505,10 +6586,11 @@ void spr_uncore_cpu_init(void) static void spr_update_device_location(int type_id) { + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; struct pci_dev *dev = NULL; + struct rb_root *root; u32 device, devfn; - u64 *ctls; int die; if (type_id == UNCORE_SPR_UPI) { @@ -6522,27 +6604,35 @@ static void spr_update_device_location(int type_id) } else return; - ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL); - if (!ctls) { + root = kzalloc(sizeof(struct rb_root), GFP_KERNEL); + if (!root) { type->num_boxes = 0; return; } + *root = RB_ROOT; while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) { - if (devfn != dev->devfn) - continue; die = uncore_device_to_die(dev); if (die < 0) continue; - ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET | - dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET | - devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET | - type->box_ctl; + unit = kzalloc(sizeof(*unit), GFP_KERNEL); + if (!unit) + continue; + unit->die = die; + unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn); + unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET | + dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET | + devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET | + type->box_ctl; + + unit->pmu_idx = unit->id; + + uncore_find_add_unit(unit, root, NULL); } - type->box_ctls = ctls; + type->boxes = root; } int spr_uncore_pci_init(void) @@ -6560,7 +6650,9 @@ int spr_uncore_pci_init(void) spr_update_device_location(UNCORE_SPR_M3UPI); uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, UNCORE_SPR_PCI_EXTRA_UNCORES, - spr_pci_uncores); + spr_pci_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); return 0; } @@ -6568,15 +6660,116 @@ void spr_uncore_mmio_init(void) { int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true); - if (ret) - uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL); - else { + if (ret) { + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); + } else { uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, UNCORE_SPR_MMIO_EXTRA_UNCORES, - spr_mmio_uncores); + spr_mmio_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; } } /* end of SPR uncore support */ + +/* GNR uncore support */ + +#define UNCORE_GNR_NUM_UNCORE_TYPES 23 +#define UNCORE_GNR_TYPE_15 15 +#define UNCORE_GNR_B2UPI 18 +#define UNCORE_GNR_TYPE_21 21 +#define UNCORE_GNR_TYPE_22 22 + +int gnr_uncore_units_ignore[] = { + UNCORE_SPR_UPI, + UNCORE_GNR_TYPE_15, + UNCORE_GNR_B2UPI, + UNCORE_GNR_TYPE_21, + UNCORE_GNR_TYPE_22, + UNCORE_IGNORE_END +}; + +static struct intel_uncore_type gnr_uncore_ubox = { + .name = "ubox", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type gnr_uncore_b2cmi = { + SPR_UNCORE_PCI_COMMON_FORMAT(), + .name = "b2cmi", +}; + +static struct intel_uncore_type gnr_uncore_b2cxl = { + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), + .name = "b2cxl", +}; + +static struct intel_uncore_type gnr_uncore_mdf_sbo = { + .name = "mdf_sbo", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { + &spr_uncore_chabox, + &spr_uncore_iio, + &spr_uncore_irp, + NULL, + &spr_uncore_pcu, + &gnr_uncore_ubox, + &spr_uncore_imc, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + &gnr_uncore_b2cmi, + &gnr_uncore_b2cxl, + NULL, + NULL, + &gnr_uncore_mdf_sbo, + NULL, + NULL, +}; + +static struct freerunning_counters gnr_iio_freerunning[] = { + [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 }, + [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 }, + [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 }, +}; + +void gnr_uncore_cpu_init(void) +{ + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, + UNCORE_SPR_MSR_EXTRA_UNCORES, + spr_msr_uncores, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); + spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning; +} + +int gnr_uncore_pci_init(void) +{ + uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + return 0; +} + +void gnr_uncore_mmio_init(void) +{ + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +/* end of GNR uncore support */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d4490af31bad6f31891f224900f2f..fb56518356ecfbacdb8d79b426bd1c82e4012082 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -110,6 +110,11 @@ static inline bool is_topdown_event(struct perf_event *event) return is_metric_event(event) || is_slots_event(event); } +static inline bool is_branch_counters_group(struct perf_event *event) +{ + return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ @@ -283,6 +288,7 @@ struct cpu_hw_events { int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ union { struct er_account *lbr_sel; struct er_account *lbr_ctl; @@ -652,10 +658,29 @@ enum { #define PERF_PEBS_DATA_SOURCE_MAX 0x10 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) +enum hybrid_cpu_type { + HYBRID_INTEL_NONE, + HYBRID_INTEL_ATOM = 0x20, + HYBRID_INTEL_CORE = 0x40, +}; + +enum hybrid_pmu_type { + not_hybrid, + hybrid_small = BIT(0), + hybrid_big = BIT(1), + + hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ +}; + +#define X86_HYBRID_PMU_ATOM_IDX 0 +#define X86_HYBRID_PMU_CORE_IDX 1 + +#define X86_HYBRID_NUM_PMUS 2 + struct x86_hybrid_pmu { struct pmu pmu; const char *name; - u8 cpu_type; + enum hybrid_pmu_type pmu_type; cpumask_t supported_cpus; union perf_capabilities intel_cap; u64 intel_ctrl; @@ -721,18 +746,6 @@ extern struct static_key_false perf_is_hybrid; __Fp; \ }) -enum hybrid_pmu_type { - hybrid_big = 0x40, - hybrid_small = 0x20, - - hybrid_big_small = hybrid_big | hybrid_small, -}; - -#define X86_HYBRID_PMU_ATOM_IDX 0 -#define X86_HYBRID_PMU_CORE_IDX 1 - -#define X86_HYBRID_NUM_PMUS 2 - /* * struct x86_pmu - generic x86 pmu */ @@ -881,6 +894,7 @@ struct x86_pmu { unsigned int lbr_mispred:1; unsigned int lbr_timed_lbr:1; unsigned int lbr_br_type:1; + unsigned int lbr_counters:4; void (*lbr_reset)(void); void (*lbr_read)(struct cpu_hw_events *cpuc); @@ -940,7 +954,7 @@ struct x86_pmu { */ int num_hybrid_pmus; struct x86_hybrid_pmu *hybrid_pmu; - u8 (*get_hybrid_cpu_type) (void); + enum hybrid_cpu_type (*get_hybrid_cpu_type) (void); }; struct x86_perf_task_context_opt { @@ -1005,6 +1019,7 @@ do { \ #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ +#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -1521,7 +1536,7 @@ extern struct event_constraint intel_skl_pebs_event_constraints[]; extern struct event_constraint intel_icl_pebs_event_constraints[]; -extern struct event_constraint intel_spr_pebs_event_constraints[]; +extern struct event_constraint intel_glc_pebs_event_constraints[]; struct event_constraint *intel_pebs_constraints(struct perf_event *event); @@ -1545,6 +1560,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); void intel_ds_init(void); +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event); + void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index 1dc19b9b4426abde8e1b005a7561e8f97b5a3995..6c977c19f2cd7b0a1c1947611a2b28f72376202e 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -20,3 +20,5 @@ PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ +PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ +PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */ diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d662650192e31c213ffb48e29a19951c..767d6212bac1636b648ba0c0cc32006d32097f99 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += core.o +obj-y += uncore.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 3e9acdaeed1ec8ed3b68d0d35ccc7300fb001dd2..e493b176b336ca90936ee0b88e547b3e7ba1f8eb 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -19,15 +19,15 @@ #include "../perf_event.h" /* - * Zhaoxin PerfMon, used on zxc and later. + * Zhaoxin PerfMon, used on Lujiazui and later. */ static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, - [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, }; static struct event_constraint zxc_event_constraints[] __read_mostly = { @@ -36,7 +36,7 @@ static struct event_constraint zxc_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static struct event_constraint zxd_event_constraints[] __read_mostly = { +static struct event_constraint wudaokou_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ @@ -44,7 +44,7 @@ static struct event_constraint zxd_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static __initconst const u64 zxd_hw_cache_event_ids +static __initconst const u64 wudaokou_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -148,7 +148,7 @@ static __initconst const u64 zxd_hw_cache_event_ids }, }; -static __initconst const u64 zxe_hw_cache_event_ids +static __initconst const u64 lujiazui_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -259,7 +259,10 @@ static void zhaoxin_pmu_disable_all(void) static void zhaoxin_pmu_enable_all(int added) { - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, + x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); } static inline u64 zhaoxin_pmu_get_status(void) @@ -286,13 +289,31 @@ static inline void zxc_pmu_ack_status(u64 ack) zhaoxin_pmu_disable_all(); } -static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) +static inline void zhaoxin_set_masks(struct perf_event *event, int idx) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, mask; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - mask = 0xfULL << (idx * 4); + if (event->attr.exclude_host) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + if (event->attr.exclude_guest) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} + +static inline void zhaoxin_clear_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} +static void zhaoxin_pmu_disable_fixed(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 ctrl_val, mask; + int idx = hwc->idx; + + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; wrmsrl(hwc->config_base, ctrl_val); @@ -301,19 +322,23 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_clear_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_disable_fixed(hwc); + zhaoxin_pmu_disable_fixed(event); return; } x86_pmu_disable_event(event); } -static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) +static void zhaoxin_pmu_enable_fixed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; + struct hw_perf_event *hwc = &event->hw; + u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; /* * Enable IRQ generation (0x8), @@ -326,6 +351,7 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) bits |= 0x1; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); @@ -338,9 +364,12 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_set_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_enable_fixed(hwc); + zhaoxin_pmu_enable_fixed(event); return; } @@ -456,6 +485,19 @@ static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } +static struct perf_guest_switch_msr *zhaoxin_guest_get_msrs(int *nr, void *data) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; + + arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; + arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; + arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + *nr = 1; + + return arr; +} + static const struct x86_pmu zhaoxin_pmu __initconst = { .name = "zhaoxin", .handle_irq = zhaoxin_pmu_handle_irq, @@ -471,13 +513,15 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .max_events = ARRAY_SIZE(zx_pmon_event_map), .apic = 1, /* - * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits. */ .max_period = (1ULL << 47) - 1, .get_event_constraints = zhaoxin_get_event_constraints, .format_attrs = zx_arch_formats_attr, .events_sysfs_show = zhaoxin_event_sysfs_show, + + .guest_get_msrs = zhaoxin_guest_get_msrs, }; static const struct { int id; char *name; } zx_arch_events_map[] __initconst = { @@ -559,6 +603,8 @@ __init int zhaoxin_pmu_init(void) zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; pr_cont("ZXC events, "); break; @@ -574,26 +620,50 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86_model) { case 0x1b: - memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + memcpy(hw_cache_event_ids, wudaokou_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; - pr_cont("ZXD events, "); + pr_cont("Wudaokou events, "); break; case 0x3b: - memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + + pr_cont("Lujiazui events, "); + break; + case 0x5b: + case 0x6b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, + .cmask = 0x01); + + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; + + if (boot_cpu_data.x86_model == 0x5b) + pr_cont("Yongfeng events, "); + + if (boot_cpu_data.x86_model == 0x6b) + pr_cont("Shijidadao events, "); - pr_cont("ZXE events, "); break; default: return -ENODEV; @@ -616,4 +686,3 @@ __init int zhaoxin_pmu_init(void) return 0; } - diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 0000000000000000000000000000000000000000..30a51324f41a0f5a7c5381bc47ccac84d7e15e78 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,2818 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; + +static bool pcidrv_registered; +static struct pci_driver *uncore_pci_driver; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; +static cpumask_t uncore_cpu_subnode_mask; +static cpumask_t uncore_cpu_cluster_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = + EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages, max_subnodes, max_clusters; +static int clusters_per_subnode; +static int subnodes_per_die; +static int dies_per_socket; + +#define KH40000_MAX_SUBNODE_NUMBER 8 +static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; + +/* get CPU topology register */ +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 + +/* KX5000/KX6000 event control */ +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ + KX5000_UNC_CTL_UMASK_MASK | \ + KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | \ + KX5000_UNC_CTL_CMASK_MASK) + +/* KX5000/KX6000 uncore global register */ +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 + +/* KX5000/KX6000 uncore global control */ +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* KX5000/KX6000 uncore register */ +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 + +/* KH40000 event control */ +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_THRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ + KH40000_PMON_CTL_UMASK_MASK | \ + KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | \ + KH40000_PMON_CTL_THRESH_MASK) + +/* KH40000 LLC register*/ +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 + +/* KH40000 HIF register*/ +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b + +/* KH40000 ZZI(ZPI+ZOI+INI) register*/ +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f + +/* KH40000 MC register*/ +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 + +/* KH40000 PCI register*/ +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 + +/* KH40000 ZPI_DLL register*/ +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 ZDI_DLL register*/ +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 PXPTRF register*/ +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 + +/* KH40000 Box level control */ +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) + +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS) + +#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) + + +/* KX7000 event control */ +#define KX7000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KX7000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KX7000_PMON_CTL_RST (1 << 17) +#define KX7000_PMON_CTL_EDGE_DET (1 << 18) +#define KX7000_PMON_CTL_LOGIC_OP0 (1 << 19) +#define KX7000_PMON_CTL_LOGIC_OP1 (1 << 21) +#define KX7000_PMON_CTL_EN (1 << 22) +#define KX7000_PMON_CTL_INVERT (1 << 23) +#define KX7000_PMON_CTL_THRESH_MASK 0xff000000 +#define KX7000_PMON_RAW_EVENT_MASK (KX7000_PMON_CTL_EV_SEL_MASK | \ + KX7000_PMON_CTL_UMASK_MASK | \ + KX7000_PMON_CTL_EDGE_DET | \ + KX7000_PMON_CTL_LOGIC_OP0 | \ + KX7000_PMON_CTL_LOGIC_OP1 | \ + KX7000_PMON_CTL_INVERT | \ + KX7000_PMON_CTL_THRESH_MASK) + +/* KX7000 LLC register*/ +#define KX7000_LLC_MSR_PMON_CTL0 0x1979 +#define KX7000_LLC_MSR_PMON_CTR0 0x1975 +#define KX7000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX7000 MESH register*/ +#define KX7000_MESH_MSR_PMON_CTL0 0x1983 +#define KX7000_MESH_MSR_PMON_CTR0 0x197f +#define KX7000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX7000 HOMESTOP register*/ +#define KX7000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX7000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX7000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX7000 CCDie ZDI_PL register*/ +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX7000 cIODie ZDI_PL register*/ +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX7000 MC register*/ +#define KX7000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX7000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX7000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX7000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX7000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX7000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX7000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX7000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX7000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX7000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX7000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX7000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX7000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX7000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX7000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX7000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX7000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX7000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX7000_ZDI_DL_MMIO_SIZE 0x1000 + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op0, logic_op0, "config:19"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op1, logic_op1, "config:21"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); + +static void get_hdw_config_msr(void *config) +{ + u64 *data = (u64 *)config; + + rdmsrl(BJ_HDW_CONFIG_MSR, *data); +} + +static void get_global_status_msr(void *status) +{ + u64 *data = (u64 *)status; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, *data); +} + +/*topology number : get max packages/subnode/clusters number*/ +static void get_topology_number(void) +{ + int clusters; + int subnodes; + int dies; + int packages; + u64 data; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, data); + + /* check packages number */ + packages = data & 0x1; + if (packages) + max_packages = 2; + else + max_packages = 1; + + /* only Yongfeng needs die/subnode/cluster info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + return; + + /* check dies_per_socket */ + dies = (data >> 12) & 0x1; + if (dies) + dies_per_socket = 2; + else + dies_per_socket = 1; + + /* check subnodes_per_die */ + subnodes = (data >> 32) & 0x3; + if (subnodes == 0x3) + subnodes_per_die = 2; + else + subnodes_per_die = 1; + + /* check clusters_per_subnode */ + clusters = (data >> 6) & 0x3; + if (clusters == 0x3) + clusters_per_subnode = 2; + else + clusters_per_subnode = 1; + + max_subnodes = max_packages * dies_per_socket * subnodes_per_die; + max_clusters = clusters_per_subnode * max_subnodes; +} + +static int get_pcibus_limit(void) +{ + struct pci_dev *dev; + u32 val; + int i = 0; + + dev = pci_get_device(0x1D17, 0x31B1, NULL); + if (dev == NULL) + return -ENODEV; + + pci_read_config_dword(dev, 0x94, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + + if (max_packages == 2) { + pci_read_config_dword(dev, 0x9c, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + } + + return 0; +} + +static int uncore_pcibus_to_subnodeid(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < KH40000_MAX_SUBNODE_NUMBER; i++) { + if (bus->number < kh40000_pcibus_limit[i]) + break; + } + + return i; +} + +DEFINE_PER_CPU(int, zx_package_id); +DEFINE_PER_CPU(int, zx_subnode_id); +DEFINE_PER_CPU(int, zx_cluster_id); + +static void get_topology_info(void) +{ + int cpu; + int cluster_id; + int socket_id; + int die_id; + int subnode_id; + + int die_info; + int subnode_info; + int cluster_info; + + u64 config; + + for_each_present_cpu(cpu) { + smp_call_function_single(cpu, get_global_status_msr, &config, 1); + socket_id = (int)((config >> 3) & 0x1); + per_cpu(zx_package_id, cpu) = socket_id; + + /* only kh40000 needs cluster and subnode info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + continue; + + smp_call_function_single(cpu, get_hdw_config_msr, &config, 1); + + die_info = (int)((config >> 21) & 0x3); + die_id = socket_id * dies_per_socket + die_info; + + subnode_info = (int)((config >> 20) & 0x1); + subnode_id = die_id * subnodes_per_die + subnode_info; + per_cpu(zx_subnode_id, cpu) = subnode_id; + + cluster_info = (int)((config >> 18) & 0x3); + cluster_id = subnode_id * clusters_per_subnode + cluster_info; + per_cpu(zx_cluster_id, cpu) = cluster_id; + } +} + +static int zx_topology_cluster_id(int cpu) +{ + return per_cpu(zx_cluster_id, cpu); +} + +static int zx_topology_subnode_id(int cpu) +{ + return per_cpu(zx_subnode_id, cpu); +} + +static int zx_topology_package_id(int cpu) +{ + return per_cpu(zx_package_id, cpu); +} + +DEFINE_PER_CPU(cpumask_t, zx_cluster_core_bits); +DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); + +static void zx_gen_core_map(void) +{ + int cpu, i; + int cluster_id, subnode_id; + + for_each_present_cpu(cpu) { + cluster_id = zx_topology_cluster_id(cpu); + + for_each_present_cpu(i) { + if (zx_topology_cluster_id(i) == cluster_id) + cpumask_set_cpu(i, &per_cpu(zx_cluster_core_bits, cpu)); + } + } + + for_each_present_cpu(cpu) { + subnode_id = zx_topology_subnode_id(cpu); + + for_each_present_cpu(i) { + if (zx_topology_subnode_id(i) == subnode_id) + cpumask_set_cpu(i, &per_cpu(zx_subnode_core_bits, cpu)); + } + } +} + +static struct cpumask *topology_cluster_core_cpumask(int cpu) +{ + return &per_cpu(zx_cluster_core_bits, cpu); +} + +static struct cpumask *topology_subnode_core_cpumask(int cpu) +{ + return &per_cpu(zx_subnode_core_bits, cpu); +} + +static void uncore_free_pcibus_map(void) +{ + +} + +static int kh40000_pci2node_map_init(void) +{ + return 0; +} + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + return pmu->boxes[zx_topology_cluster_id(cpu)]; + else + return pmu->boxes[zx_topology_subnode_id(cpu)]; + } else { + return pmu->boxes[zx_topology_package_id(cpu)]; + } +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count; + + WARN_ON_ONCE(box->cpu != smp_processor_id()); + rdmsrl(event->hw.event_base, count); + return count; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, struct perf_event *event, + int idx) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->last_tag = ++box->tags[idx]; + + if (uncore_pmc_fixed(hwc->idx)) { + hwc->event_base = uncore_fixed_ctr(box); + hwc->config_base = uncore_fixed_ctl(box); + return; + } + + hwc->config_base = uncore_event_ctl(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 prev_count, new_count, delta; + int shift; + + if (uncore_pmc_fixed(event->hw.idx)) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +/*KX5000/KX6000 uncore ops start*/ +static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, 0); +} + +static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); +} + +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | KX5000_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, KX5000_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *kx5000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask3.attr, + NULL, +}; + +static struct attribute_group kx5000_uncore_format_group = { + .name = "format", + .attrs = kx5000_uncore_formats_attr, +}; + +static struct uncore_event_desc kx5000_uncore_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kx5000_uncore_box = { + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { + &kx5000_uncore_box, + NULL, +}; +/*KX5000/KX6000 uncore ops end*/ + +/*KH40000 msr ops start*/ +static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_init_box(struct zhaoxin_uncore_box *box) +{ + unsigned int msr = uncore_msr_box_ctl(box); + + if (msr) { + wrmsrl(msr, KH40000_PMON_BOX_CTL_INT); + wrmsrl(msr, 0); + } +} + +static struct attribute *kh40000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kh40000_uncore_format_group = { + .name = "format", + .attrs = kh40000_uncore_formats_attr, +}; + +static struct uncore_event_desc kh40000_uncore_llc_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_hif_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { + &kh40000_uncore_llc_box, + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; +/*KH40000 msr ops end*/ + +/*KH40000 pci ops start*/ +static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_pci_disable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count + 1); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count); + + return count; +} + +static void kh40000_uncore_pci_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, KH40000_PMON_PCI_BOX_CTL_INT); +} + +static struct uncore_event_desc kh40000_uncore_imc_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pci_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zpi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zdi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +enum { + KH40000_PCI_UNCORE_MC0, + KH40000_PCI_UNCORE_MC1, + KH40000_PCI_UNCORE_PCI, + KH40000_PCI_UNCORE_ZPI_DLL, + KH40000_PCI_UNCORE_ZDI_DLL, + KH40000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kh40000_uncore_pci_ids[] = { + { /* MC Channe0/1 */ + PCI_DEVICE(0x1D17, 0x31b2), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), + }, + + { /* ZPI_DLL */ + PCI_DEVICE(0x1D17, 0x91c1), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), + }, + + { /* ZDI_DLL */ + PCI_DEVICE(0x1D17, 0x3b03), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kh40000_uncore_pci_driver = { + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, +}; +/*KH40000 pci ops end*/ + +/*KX7000 msr ops start*/ +static unsigned int kx7000_uncore_msr_offsets[] = { + 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b +}; + +static struct attribute *kx7000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_logic_op0.attr, + &format_attr_logic_op1.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kx7000_uncore_format_group = { + .name = "format", + .attrs = kx7000_uncore_formats_attr, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX7000_MESH_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX7000_LLC_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX7000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX7000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + + +static struct zhaoxin_uncore_type *kx7000_msr_uncores[] = { + &kx7000_uncore_llc_box, + &kx7000_uncore_mesh_box, + &kx7000_uncore_hif_box, + &kx7000_uncore_homestop, + &kx7000_uncore_ccd_zdi_pl, + &kx7000_uncore_iod_zdi_pl, + NULL, +}; +/*KX7000 msr ops end*/ + +/*KX7000 pci ops start*/ +static unsigned int kx7000_mc_ctr_lh_offsets[] = { + 0xc, 0xe, 0x10, 0x12, 0x14 +}; + +static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_dword(pdev, hwc->event_base + kx7000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); + + return count; +} + +static struct zhaoxin_uncore_ops kx7000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx7000_uncore_pci_mc_read_counter +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, +}; + +enum { + KX7000_PCI_UNCORE_MC_A0, + KX7000_PCI_UNCORE_MC_A1, + KX7000_PCI_UNCORE_MC_B0, + KX7000_PCI_UNCORE_MC_B1, + KX7000_PCI_UNCORE_PCI, + KX7000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx7000_pci_uncores[] = { + [KX7000_PCI_UNCORE_MC_A0] = &kx7000_uncore_mc_a0, + [KX7000_PCI_UNCORE_MC_A1] = &kx7000_uncore_mc_a1, + [KX7000_PCI_UNCORE_MC_B0] = &kx7000_uncore_mc_b0, + [KX7000_PCI_UNCORE_MC_B1] = &kx7000_uncore_mc_b1, + [KX7000_PCI_UNCORE_PCI] = &kx7000_uncore_pci, + [KX7000_PCI_UNCORE_PXPTRF] = &kx7000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kx7000_uncore_pci_ids[] = { + { /* MC Channe A0/A1/B0/B1 */ + PCI_DEVICE(0x1D17, 0x31B2), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_MC_A0, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kx7000_uncore_pci_driver = { + .name = "kx7000_uncore", + .id_table = kx7000_uncore_pci_ids, +}; +/*KX7000 pci ops end*/ + +/*KX7000 mmio ops start*/ +static void kx7000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = NULL; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + resource_size_t addr; + u32 pci_dword; + int mmio_base_offset; + + pdev = pci_get_device(0x1d17, 0x31b1, pdev); + if (!pdev) + return; + + if (!strcmp(box->pmu->name, "iod_zdi_dl")) + mmio_base_offset = KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + else + mmio_base_offset = KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + + pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); + addr = (u64)(pci_dword & KX7000_ZDI_DL_MMIO_BASE_MASK) << 32; + + pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); + addr |= pci_dword & KX7000_ZDI_DL_MMIO_MEM0_MASK; + + box->io_addr = ioremap(addr, KX7000_ZDI_DL_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config |= KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); +} + +static void kx7000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count = 0; + u64 count_low = 0; + u64 count_high = 0; + + if (!box->io_addr) + return 0; + + count_high = readl(box->io_addr + event->hw.event_base) & 0xffff; + count_low = readl(box->io_addr + event->hw.event_base + 4); + count = (count_high << 32) + count_low; + + return count; +} + +static struct zhaoxin_uncore_ops kx7000_uncore_mmio_ops = { + .init_box = kx7000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx7000_uncore_mmio_disable_box, + .enable_box = kx7000_uncore_mmio_enable_box, + .disable_event = kx7000_uncore_mmio_disable_event, + .enable_event = kx7000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_dl = { + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_dl = { + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx7000_mmio_uncores[] = { + &kx7000_uncore_iod_zdi_dl, + &kx7000_uncore_ccd_zdi_dl, + NULL, +}; + +/*KX7000 mmio ops end*/ +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ + struct zhaoxin_uncore_box *box; + struct perf_event *event; + unsigned long flags; + int bit; + + box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); + if (!box->n_active || box->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + /* + * disable local interrupt to prevent uncore_pmu_event_start/stop + * to interrupt the update process + */ + local_irq_save(flags); + + /* + * handle boxes with an active event list as opposed to active + * counters + */ + list_for_each_entry(event, &box->active_list, active_entry) { + uncore_perf_event_update(box, event); + } + + for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) + uncore_perf_event_update(box, box->events[bit]); + + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), + HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + box->hrtimer.function = uncore_pmu_hrtimer; +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, int node) +{ + int i, size, numshared = type->num_shared_regs; + struct zhaoxin_uncore_box *box; + + size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + + box = kzalloc_node(size, GFP_KERNEL, node); + if (!box) + return NULL; + + for (i = 0; i < numshared; i++) + raw_spin_lock_init(&box->shared_regs[i].lock); + + uncore_pmu_init_hrtimer(box); + box->cpu = -1; + box->package_id = -1; + box->cluster_id = -1; + box->subnode_id = -1; + + /* set default hrtimer timeout */ + box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + + INIT_LIST_HEAD(&box->active_list); + + return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return &box->pmu->pmu == event->pmu; +} + +static int uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = box->pmu->type->num_counters; + if (box->pmu->type->fixed_ctl) + max_count++; + + if (box->n_events >= max_count) + return -EINVAL; + + n = box->n_events; + + if (is_box_event(box, leader)) { + box->event_list[n] = leader; + n++; + } + + if (!dogrp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_box_event(box, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + box->event_list[n] = event; + n++; + } + return n; +} + +static struct event_constraint *uncore_get_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct zhaoxin_uncore_type *type = box->pmu->type; + struct event_constraint *c; + + if (type->ops->get_constraint) { + c = type->ops->get_constraint(box, event); + if (c) + return c; + } + + if (event->attr.config == UNCORE_FIXED_EVENT) + return &uncore_constraint_fixed; + + if (type->constraints) { + for_each_event_constraint(c, type->constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + if (box->pmu->type->ops->put_constraint) + box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ + unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + struct event_constraint *c; + int i, wmin, wmax, ret = 0; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + + for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { + c = uncore_get_event_constraint(box, box->event_list[i]); + box->event_constraint[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); + } + + /* fastpath, try to reuse previous register */ + for (i = 0; i < n; i++) { + hwc = &box->event_list[i]->hw; + c = box->event_constraint[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c->idxmsk)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + + __set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + /* slow path */ + if (i != n) + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); + + if (!assign || ret) { + for (i = 0; i < n; i++) + uncore_put_event_constraint(box, box->event_list[i]); + } + return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int idx = event->hw.idx; + + + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + return; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->events[idx] = event; + box->n_active++; + __set_bit(idx, box->active_mask); + + local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); + uncore_enable_event(box, event); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { + uncore_disable_event(box, event); + box->n_active--; + box->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + int assign[UNCORE_PMC_IDX_MAX]; + int i, n, ret; + + if (!box) + return -ENODEV; + + ret = n = uncore_collect_events(box, event, false); + if (ret < 0) + return ret; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + ret = uncore_assign_events(box, assign, n); + if (ret) + return ret; + + /* save events moving to new counters */ + for (i = 0; i < box->n_events; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == assign[i] && + hwc->last_tag == box->tags[assign[i]]) + continue; + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + } + + /* reprogram moved events into new counters */ + for (i = 0; i < n; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx != assign[i] || + hwc->last_tag != box->tags[assign[i]]) + uncore_assign_hw_event(box, event, assign[i]); + else if (i < box->n_events) + continue; + + if (hwc->state & PERF_HES_ARCH) + continue; + + uncore_pmu_event_start(event, 0); + } + box->n_events = n; + + return 0; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int i; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + uncore_put_event_constraint(box, event); + + for (++i; i < box->n_events; i++) + box->event_list[i - 1] = box->event_list[i]; + + --box->n_events; + break; + } + } + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + + uncore_perf_event_update(box, event); +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct zhaoxin_uncore_box *fake_box; + int ret = -EINVAL, n; + + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); + if (!fake_box) + return -ENOMEM; + + fake_box->pmu = pmu; + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = uncore_collect_events(fake_box, leader, true); + if (n < 0) + goto out; + + fake_box->n_events = n; + n = uncore_collect_events(fake_box, event, false); + if (n < 0) + goto out; + + fake_box->n_events = n; + + ret = uncore_assign_events(fake_box, NULL, n); +out: + kfree(fake_box); + return ret; +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + event->cpu = box->cpu; + event->pmu_private = box; + + //event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + + if (event->attr.config == UNCORE_FIXED_EVENT) { + /* no fixed counter */ + if (!pmu->type->fixed_ctl) + return -EINVAL; + /* + * if there is only one fixed counter, only the first pmu + * can access the fixed counter + */ + if (pmu->type->single_fixed && pmu->pmu_idx > 0) + return -EINVAL; + + /* fixed counters have event field hardcoded to zero */ + hwc->config = 0ULL; + } else { + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + if (pmu->type->ops->hw_config) { + ret = pmu->type->ops->hw_config(box, event); + if (ret) + return ret; + } + } + + if (event->group_leader != event) + ret = uncore_validate_group(pmu, event); + else + ret = 0; + + return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + cpumask_t *active_mask; + struct pmu *pmu; + struct zhaoxin_uncore_pmu *uncore_pmu; + + pmu = dev_get_drvdata(dev); + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(uncore_pmu->type->name, "llc")) + active_mask = &uncore_cpu_cluster_mask; + else + active_mask = &uncore_cpu_subnode_mask; + } else { + active_mask = &uncore_cpu_mask; + } + + return cpumap_print_to_pagebuf(true, buf, active_mask); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ + int ret; + + if (!pmu->type->pmu) { + pmu->pmu = (struct pmu) { + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + } else { + pmu->pmu = *pmu->type->pmu; + pmu->pmu.attr_groups = pmu->type->attr_groups; + } + + if (pmu->type->num_boxes == 1) { + if (strlen(pmu->type->name) > 0) + sprintf(pmu->name, "uncore_%s", pmu->type->name); + else + sprintf(pmu->name, "uncore"); + } else { + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, + pmu->pmu_idx); + } + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (!ret) + pmu->registered = true; + + return ret; +} + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ + if (!pmu->registered) + return; + perf_pmu_unregister(&pmu->pmu); + pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ + int i, max; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } + + for (i = 0; i < max; i++) + kfree(pmu->boxes[i]); + kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + int i; + + if (pmu) { + for (i = 0; i < type->num_boxes; i++, pmu++) { + uncore_pmu_unregister(pmu); + uncore_free_boxes(pmu); + } + kfree(type->pmus); + type->pmus = NULL; + } + kfree(type->events_group); + type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ + for (; *types; types++) + uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ + struct zhaoxin_uncore_pmu *pmus; + size_t size; + int i, j; + + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); + if (!pmus) + return -ENOMEM; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + size = max_clusters * sizeof(struct zhaoxin_uncore_box *); + else + size = max_subnodes * sizeof(struct zhaoxin_uncore_box *); + } else { + size = max_packages * sizeof(struct zhaoxin_uncore_box *); + } + + for (i = 0; i < type->num_boxes; i++) { + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); + if (!pmus[i].boxes) + goto err; + } + + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, + 0, type->num_counters, 0, 0); + + if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; + for (i = 0; type->event_descs[i].attr.attr.name; i++) + ; + + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); + if (!attr_group) + goto err; + + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; + + for (j = 0; j < i; j++) + attr_group->attrs[j] = &type->event_descs[j].attr.attr; + + type->events_group = &attr_group->group; + } + + type->pmu_group = &uncore_pmu_attr_group; + + return 0; + +err: + for (i = 0; i < type->num_boxes; i++) + kfree(pmus[i].boxes); + kfree(pmus); + + return -ENOMEM; +} + +static int __init uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ + int ret; + + for (; *types; types++) { + ret = uncore_type_init(*types, setid); + if (ret) + return ret; + } + return 0; +} + +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_box **boxes; + char mc_dev[10]; + int loop = 1; + int i, j = 0; + int subnode_id = 0; + int ret = 0; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + subnode_id = uncore_pcibus_to_subnodeid(pdev->bus); + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + strscpy(mc_dev, "mc0", sizeof("mc0")); + if (!strcmp(type->name, mc_dev)) + loop = 2; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { + strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); + if (!strcmp(type->name, mc_dev)) + loop = 4; + } + + boxes = kcalloc(loop, sizeof(struct zhaoxin_uncore_box *), GFP_KERNEL); + if (!boxes) + return -ENOMEM; + + for (i = 0; i < loop; i++) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + + if (!type) + continue; + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + + if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) + return -EINVAL; + + box = uncore_alloc_box(type, NUMA_NO_NODE); + if (!box) + return -ENOMEM; + + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); + + atomic_inc(&box->refcnt); + box->subnode_id = subnode_id; + box->pci_dev = pdev; + box->pmu = pmu; + uncore_box_init(box); + boxes[i] = box; + + pci_set_drvdata(pdev, boxes); + pmu->boxes[subnode_id] = box; + if (atomic_inc_return(&pmu->activeboxes) > 1) { + if (!strcmp(type->name, mc_dev)) + goto next_loop; + else + return 0; + } + /* First active box registers the pmu */ + ret = uncore_pmu_register(pmu); + if (ret) { + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + uncore_box_exit(box); + kfree(box); + } +next_loop: + j++; + } + + return ret; +} + +static void uncore_pci_remove(struct pci_dev *pdev) +{ + struct zhaoxin_uncore_box **boxes; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_pmu *pmu; + int subnode_id = 0; + int i = 0; + int loop = 1; + + boxes = pci_get_drvdata(pdev); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc0")) + loop = 2; + else + loop = 1; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) + loop = 4; + else + loop = 1; + } + + for (i = 0; i < loop; i++) { + box = boxes[i]; + pmu = box->pmu; + if (WARN_ON_ONCE(subnode_id != box->subnode_id)) + return; + + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + + uncore_box_exit(box); + kfree(box); + } + + kfree(boxes); +} + +static int __init uncore_pci_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_pci_uncores, false); + if (ret) + goto errtype; + + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; + + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + + pcidrv_registered = true; + return 0; + +errtype: + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + uncore_pci_uncores = empty_uncore; + return ret; +} + +static void uncore_pci_exit(void) +{ + if (pcidrv_registered) { + pcidrv_registered = false; + pci_unregister_driver(uncore_pci_driver); + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + } +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, int new_cpu) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + struct zhaoxin_uncore_box *box; + int i, package_id, cluster_id = 0, subnode_id = 0; + + package_id = zx_topology_package_id(old_cpu < 0 ? new_cpu : old_cpu); + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + cluster_id = zx_topology_cluster_id(old_cpu < 0 ? new_cpu : old_cpu); + subnode_id = zx_topology_subnode_id(old_cpu < 0 ? new_cpu : old_cpu); + } + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) { + box = pmu->boxes[cluster_id]; + if (!box) + continue; + } else { + box = pmu->boxes[subnode_id]; + if (!box) + continue; + } + } else { + box = pmu->boxes[package_id]; + if (!box) + continue; + } + + if (old_cpu < 0) { + + WARN_ON_ONCE(box->cpu != -1); + box->cpu = new_cpu; + continue; + } + WARN_ON_ONCE(box->cpu != old_cpu); + box->cpu = -1; + if (new_cpu < 0) + continue; + + uncore_pmu_cancel_hrtimer(box); + perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); + box->cpu = new_cpu; + } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, int old_cpu, int new_cpu) +{ + for (; *uncores; uncores++) + uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +struct zhaoxin_uncore_type *uncore_msr_cluster_uncores[] = { + &kh40000_uncore_llc_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_msr_subnode_uncores[] = { + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_pci_subnode_uncores[] = { + &kh40000_uncore_mc0, + &kh40000_uncore_mc1, + &kh40000_uncore_pci, + &kh40000_uncore_zpi_dll, + &kh40000_uncore_zdi_dll, + &kh40000_uncore_pxptrf, + NULL, +}; + +static void kx5000_event_cpu_offline(int cpu) +{ + int package, target; + + /* Check if exiting cpu is used for collecting uncore events */ + + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + goto unref_cpu_mask; + + /* Find a new cpu to collect uncore events */ + target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + + /* Migrate uncore events to the new target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &uncore_cpu_mask); + else + target = -1; + + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); + +unref_cpu_mask: + /*clear the references*/ + package = zx_topology_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, package); + uncore_box_unref(uncore_mmio_uncores, package); +} + +static void kh40000_event_cpu_offline(int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + /* Check if exiting cpu is used for collecting uncore events */ + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_cluster_mask)) { + cluster_target = cpumask_any_but(topology_cluster_core_cpumask(cpu), cpu); + if (cluster_target < nr_cpu_ids) + cpumask_set_cpu(cluster_target, &uncore_cpu_cluster_mask); + else + cluster_target = -1; + uncore_change_context(uncore_msr_cluster_uncores, cpu, cluster_target); + } else { + uncore_box_unref(uncore_msr_cluster_uncores, cluster_id); + } + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_subnode_mask)) { + subnode_target = cpumask_any_but(topology_subnode_core_cpumask(cpu), cpu); + if (subnode_target < nr_cpu_ids) + cpumask_set_cpu(subnode_target, &uncore_cpu_subnode_mask); + else + subnode_target = -1; + uncore_change_context(uncore_msr_subnode_uncores, cpu, subnode_target); + uncore_change_context(uncore_pci_subnode_uncores, cpu, subnode_target); + } else { + uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); + } +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + unsigned int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_event_cpu_offline(cpu); + else + kx5000_event_cpu_offline(cpu); + + return 0; +} + +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->package_id = id; + list_add(&box->active_list, &allocated); + } + } + + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + if (!strcmp(type->name, "llc")) + box->cluster_id = id; + else + box->subnode_id = id; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, int id, unsigned int cpu) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i, ret = 0; + + int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + ret = kh40000_allocate_boxes(types, id, cpu); + else + ret = kx5000_allocate_boxes(types, id, cpu); + + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } + return 0; +} + +static int kx5000_event_cpu_online(unsigned int cpu) +{ + int package, target, msr_ret, mmio_ret; + + package = zx_topology_package_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, package, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the package + * which collects uncore events already. + */ + target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); + + return 0; +} + +static int kh40000_event_cpu_online(unsigned int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + int cluster_ret, subnode_ret; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + cluster_ret = uncore_box_ref(uncore_msr_cluster_uncores, cluster_id, cpu); + subnode_ret = uncore_box_ref(uncore_msr_subnode_uncores, subnode_id, cpu); + + if (cluster_ret && subnode_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the cluster or subnode + * which collects uncore events already. + */ + + cluster_target = + cpumask_any_and(&uncore_cpu_cluster_mask, topology_cluster_core_cpumask(cpu)); + subnode_target = + cpumask_any_and(&uncore_cpu_subnode_mask, topology_subnode_core_cpumask(cpu)); + + if (cluster_target < nr_cpu_ids && subnode_target < nr_cpu_ids) + return 0; + + if (!cluster_ret && cluster_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_cluster_mask); + uncore_change_context(uncore_msr_cluster_uncores, -1, cpu); + } + + if (!subnode_ret && subnode_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_subnode_mask); + uncore_change_context(uncore_msr_subnode_uncores, -1, cpu); + uncore_change_context(uncore_pci_subnode_uncores, -1, cpu); + } + + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int x86_model; + int kx5000_ret = 0, kh40000_ret = 0; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_ret = kh40000_event_cpu_online(cpu); + else + kx5000_ret = kx5000_event_cpu_online(cpu); + + if (kx5000_ret || kh40000_ret) + return -ENOMEM; + + return 0; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ + int i, ret; + + for (i = 0; i < type->num_boxes; i++) { + ret = uncore_pmu_register(&type->pmus[i]); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ + struct zhaoxin_uncore_type **types = uncore_msr_uncores; + int ret; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_cpu_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_msr_uncores, true); + if (ret) + goto err; + + ret = uncore_msr_pmus_register(); + if (ret) + goto err; + return 0; +err: + uncore_types_exit(uncore_msr_uncores); + uncore_msr_uncores = empty_uncore; + return ret; +} + +static int __init uncore_mmio_init(void) +{ + struct zhaoxin_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + +struct zhaoxin_uncore_init_fun { + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); +}; + +void kx5000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx5000_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx5000_uncore_init __initconst = { + .cpu_init = kx5000_uncore_cpu_init, +}; + +void kh40000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kh40000_msr_uncores; +} + +int kh40000_uncore_pci_init(void) +{ + int ret = kh40000_pci2node_map_init();/*pci_bus to package mapping, do nothing*/ + + if (ret) + return ret; + uncore_pci_uncores = kh40000_pci_uncores; + uncore_pci_driver = &kh40000_uncore_pci_driver; + return 0; +} + +static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { + .cpu_init = kh40000_uncore_cpu_init, + .pci_init = kh40000_uncore_pci_init, +}; + +void kx7000_uncore_cpu_init(void) +{ + u64 val; + int cpu; + + uncore_msr_uncores = kx7000_msr_uncores; + + /* clear bit 16 of MSR 0x1877 so that HIF can work normally */ + for_each_present_cpu(cpu) { + rdmsrl_on_cpu(cpu, 0x1877, &val); + val = val & 0xfffffffffffeffffULL; + wrmsrl_on_cpu(cpu, 0x1877, val); + } +} + +int kx7000_uncore_pci_init(void) +{ + uncore_pci_uncores = kx7000_pci_uncores; + uncore_pci_driver = &kx7000_uncore_pci_driver; + + return 0; +} + +void kx7000_uncore_mmio_init(void) +{ + uncore_mmio_uncores = kx7000_mmio_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx7000_uncore_init __initconst = { + .cpu_init = kx7000_uncore_cpu_init, + .pci_init = kx7000_uncore_pci_init, + .mmio_init = kx7000_uncore_mmio_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), + {}, +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ + const struct x86_cpu_id *id = NULL; + struct zhaoxin_uncore_init_fun *uncore_init; + int pret = 0, cret = 0, mret = 0, ret; + + id = x86_match_cpu(zhaoxin_uncore_match); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return -ENODEV; + + pr_info("welcome to uncore.\n"); + + get_topology_number(); + get_topology_info(); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + zx_gen_core_map(); + get_pcibus_limit(); + } + + uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + + if (uncore_init->pci_init) { + pret = uncore_init->pci_init(); + if (!pret) + pret = uncore_pci_init(); + } + + if (uncore_init->cpu_init) { + uncore_init->cpu_init(); + cret = uncore_cpu_init(); + } + + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; + pr_info("uncore init success!\n"); + + return 0; + +err: + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); + pr_info("uncore init fail!\n"); + + return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 0000000000000000000000000000000000000000..43ea0636417527f2b524dbf4a771de12e42fdf4a --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include + +#include +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX7000 0x6b + +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) + +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) + +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int num_shared_regs:8; + unsigned int single_fixed:1; + unsigned int pair_ctr_ctl:1; + unsigned int *msr_offsets; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct zhaoxin_uncore_pmu *pmus; + struct zhaoxin_uncore_ops *ops; + struct uncore_event_desc *event_descs; + const struct attribute_group *attr_groups[4]; + struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { + void (*init_box)(struct zhaoxin_uncore_box *box); + void (*exit_box)(struct zhaoxin_uncore_box *box); + void (*disable_box)(struct zhaoxin_uncore_box *box); + void (*enable_box)(struct zhaoxin_uncore_box *box); + void (*disable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + void (*enable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + u64 (*read_counter)(struct zhaoxin_uncore_box *box, struct perf_event *event); + int (*hw_config)(struct zhaoxin_uncore_box *box, struct perf_event *event); + struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *box, + struct perf_event *event); + void (*put_constraint)(struct zhaoxin_uncore_box *box, struct perf_event *event); +}; + +struct zhaoxin_uncore_pmu { + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; +}; + +struct zhaoxin_uncore_extra_reg { + raw_spinlock_t lock; + u64 config, config1, config2; + atomic_t ref; +}; + +struct zhaoxin_uncore_box { + int pci_phys_id; + int package_id; /*Package ID */ + int cluster_id; + int subnode_id; + int n_active; /* number of active events */ + int n_events; + int cpu; /* cpu to collect events */ + unsigned long flags; + atomic_t refcnt; + struct perf_event *events[UNCORE_PMC_IDX_MAX]; + struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; + unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + u64 tags[UNCORE_PMC_IDX_MAX]; + struct pci_dev *pci_dev; + struct zhaoxin_uncore_pmu *pmu; + u64 hrtimer_duration; /* hrtimer timeout for this box */ + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void __iomem *io_addr; + struct zhaoxin_uncore_extra_reg shared_regs[]; +}; + +#define UNCORE_BOX_FLAG_INITIATED 0 + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct hw_info { + u64 config_info; + u64 active_state; +}; + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ + .config = _config, \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + +static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr; +} + +static inline unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return idx * 4 + box->pmu->type->event_ctl; +} + +static inline unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (!strncmp(box->pmu->type->name, "mc_", 3)) + return idx * 2 + box->pmu->type->perf_ctr; + else + return idx * 8 + box->pmu->type->perf_ctr; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ + struct zhaoxin_uncore_pmu *pmu = box->pmu; + + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->box_ctl) + return 0; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->fixed_ctl) + return 0; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->event_ctl + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctl(box); + else + return uncore_msr_fixed_ctl(box); +} + +static inline unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctr(box); + else + return uncore_msr_fixed_ctr(box); +} + +static inline unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) + return uncore_pci_event_ctl(box, idx); + else + return uncore_msr_event_ctl(box, idx); +} + +static inline unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) + return uncore_pci_perf_ctr(box, idx); + else + return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->disable_box) + box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->enable_box) + box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->exit_box) + box->pmu->type->ops->exit_box(box); + } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ + return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ + return event->pmu_private; +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint *uncore_get_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 529c36a98d9ea07306be4958437a2853a717c793..9114ff001770122c6ae1e3215403f0439e8ca832 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -102,7 +102,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_proc_cap_bits(u32 *cap) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index c8cdc69aae098b2d2bb0b8690119e737cdeb0474..497ad86ef2256b27d2d05ff0d5a525f56b3aa11d 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -82,6 +82,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -122,6 +126,10 @@ static inline struct amd_northbridge *node_to_amd_nb(int node) } #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 33aa0c31c21cf13573df29559dfa6377a3f35f82..a2258c894244a8369bf9e0fe9b9cfb2dc9bfb9e5 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -277,7 +277,8 @@ struct apic { u32 disable_esr : 1, dest_mode_logical : 1, - x2apic_set_max_apicid : 1; + x2apic_set_max_apicid : 1, + nmi_to_offline_cpu : 1; u32 (*calc_dest_apicid)(unsigned int cpu); @@ -543,6 +544,8 @@ extern bool default_check_apicid_used(physid_mask_t *map, int apicid); extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); extern int default_cpu_present_to_apicid(int mps_cpu); +void apic_send_nmi_to_offline_cpu(unsigned int cpu); + #else /* CONFIG_X86_LOCAL_APIC */ static inline unsigned int read_apic_id(void) { return 0; } diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 58dacd90daefc4308d98407b7ac0bf02b717126d..7cd7525579051d06a8bd670d6be24c103a0fe764 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -7,16 +7,140 @@ * * Copyright (C) 2022 Google LLC */ +#include +#include -#include +/* + * An overview of the various calling conventions... + * + * Traditional: + * + * foo: + * ... code here ... + * ret + * + * direct caller: + * call foo + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * call *%r11 + * + * + * IBT: + * + * foo: + * endbr64 + * ... code here ... + * ret + * + * direct caller: + * call foo / call foo+4 + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * call *%r11 + * + * + * kCFI: + * + * __cfi_foo: + * movl $0x12345678, %eax + * # 11 nops when CONFIG_CALL_PADDING + * foo: + * endbr64 # when IBT + * ... code here ... + * ret + * + * direct call: + * call foo # / call foo+4 when IBT + * + * indirect call: + * lea foo(%rip), %r11 + * ... + * movl $(-0x12345678), %r10d + * addl -4(%r11), %r10d # -15 when CONFIG_CALL_PADDING + * jz 1f + * ud2 + * 1:call *%r11 + * + * + * FineIBT (builds as kCFI + CALL_PADDING + IBT + RETPOLINE and runtime patches into): + * + * __cfi_foo: + * endbr64 + * subl 0x12345678, %r10d + * jz foo + * ud2 + * nop + * foo: + * osp nop3 # was endbr64 + * ... code here ... + * ret + * + * direct caller: + * call foo / call foo+4 + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * movl $0x12345678, %r10d + * subl $16, %r11 + * nop4 + * call *%r11 + * + */ +enum cfi_mode { + CFI_DEFAULT, /* FineIBT if hardware has IBT, otherwise kCFI */ + CFI_OFF, /* Taditional / IBT depending on .config */ + CFI_KCFI, /* Optionally CALL_PADDING, IBT, RETPOLINE */ + CFI_FINEIBT, /* see arch/x86/kernel/alternative.c */ +}; + +extern enum cfi_mode cfi_mode; + +struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); +#define __bpfcall +extern u32 cfi_bpf_hash; +extern u32 cfi_bpf_subprog_hash; + +static inline int cfi_get_offset(void) +{ + switch (cfi_mode) { + case CFI_FINEIBT: + return 16; + case CFI_KCFI: + if (IS_ENABLED(CONFIG_CALL_PADDING)) + return 16; + return 5; + default: + return 0; + } +} +#define cfi_get_offset cfi_get_offset + +extern u32 cfi_get_func_hash(void *func); + #else static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } +#define cfi_bpf_hash 0U +#define cfi_bpf_subprog_hash 0U +static inline u32 cfi_get_func_hash(void *func) +{ + return 0; +} #endif /* CONFIG_CFI_CLANG */ +#if HAS_KERNEL_IBT == 1 +#define CFI_NOSEAL(x) asm(IBT_NOSEAL(__stringify(x))) +#endif + #endif /* _ASM_X86_CFI_H */ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 25050d953eee02d71cb50f585f2f7dcc7482c011..fecc4fe1d68aff799c7b91b363d69e961cefbab1 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -71,26 +71,12 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} extern __noendbr void cet_disable(void); -struct ucode_cpu_info; +struct cpu_signature; -int intel_cpu_collect_info(struct ucode_cpu_info *uci); - -static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, - unsigned int s2, unsigned int p2) -{ - if (s1 != s2) - return false; - - /* Processor flags are either both 0 ... */ - if (!p1 && !p2) - return true; - - /* ... or they intersect. */ - return p1 & p2; -} +void intel_collect_cpu_info(struct cpu_signature *sig); extern u64 x86_read_arch_cap_msr(void); -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf); +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig); int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); extern struct cpumask cpus_stop_mask; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3508f3fc928d4d83137bb84ead8d80ec2e073a16..c5b1d083bc6b391d1533fd1d82e5d1ad2e01b18f 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -34,6 +34,8 @@ enum cpuid_leafs CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, CPUID_LNX_5, + CPUID_8C86_0000_EDX, + CPUID_C000_0006_EAX, NR_CPUID_WORDS, }; @@ -94,8 +96,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 22, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 23, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 24)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -120,8 +124,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 22, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 23, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 24)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 55d18eef6775a6f1a3d5ef1ebdf015d9ba39330f..0f174149a845c5c8ae7008d94901245e7d53b0c5 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 22 /* N 32-bit words worth of info */ +#define NCAPINTS 24 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -81,10 +81,8 @@ #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ - -/* CPU types for specific tunings: */ #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */ +#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */ #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ @@ -146,8 +144,12 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32 + 0) /* SM2 ZhaoXin GMI present */ +#define X86_FEATURE_SM2_EN (5*32 + 1) /* SM2 ZhaoXin GMI enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ +#define X86_FEATURE_CCS_EN (5*32 + 5) /* "sm3/4" SM3/4 enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ @@ -156,6 +158,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ @@ -329,6 +348,7 @@ #define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ #define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ #define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */ +#define X86_FEATURE_CRC32C_LOW_PERF (12*32+27) /* "" Low performance */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ @@ -361,6 +381,7 @@ #define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ #define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* "" HWP Highest perf change */ #define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */ /* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ @@ -445,6 +466,8 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ +/* HYGON 3rd CSV */ +#define X86_FEATURE_CSV3 (19*32 + 30) /* HYGON 3rd CSV */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ @@ -457,6 +480,13 @@ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ +/* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 22 */ +#define X86_FEATURE_SM3 (22*32 + 1) /* SM3 instructions */ +#define X86_FEATURE_SM4 (22*32 + 2) /* SM4 instructions */ + +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 23 */ +#define X86_FEATURE_ZXPAUSE (23*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * Extended auxiliary flags: Linux defined - for features scattered in various * CPUID levels like 0x80000022, etc and Linux defined features. diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..18ddf881a6fca0871720e0b3365b39660ea9449e --- /dev/null +++ b/arch/x86/include/asm/csv.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#ifndef __ASM_X86_CSV_H__ +#define __ASM_X86_CSV_H__ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_HYGON_CSV + +struct csv_mem { + uint64_t start; + uint64_t size; +}; + +#define CSV_MR_ALIGN_BITS (28) + +extern struct csv_mem *csv_smr; +extern unsigned int csv_smr_num; + +void __init early_csv_reserve_mem(void); + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align); +void csv_release_to_contiguous(phys_addr_t pa, size_t size); + +uint32_t csv_get_smr_entry_shift(void); + +#else /* !CONFIG_HYGON_CSV */ + +#define csv_smr NULL +#define csv_smr_num 0U + +static inline void __init early_csv_reserve_mem(void) { } + +static inline phys_addr_t +csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) { return 0; } +static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } + +static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +#ifdef CONFIG_HYGON_CSV + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); +int csv3_issue_request_report(phys_addr_t paddr, size_t size); + +#else /* !CONFIG_HYGON_CSV */ + +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } +static inline int csv3_issue_request_report(phys_addr_t paddr, size_t size) { return -EIO; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 630891d2581989e4e2058a18c5790545f412d73b..4dbb3fea67fb510c051de76b86b08d322caf3521 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,6 +7,7 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); +void __init use_zxpause_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 88fcf08458d9cd25a31a7349d28d9fcc655ac447..c1e800b636f458e816f24a3399afe7fd3a236a05 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -144,6 +144,8 @@ #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 #define DISABLED_MASK21 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define DISABLED_MASK22 0 +#define DISABLED_MASK23 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a2be3aefff9f8974131d08d04830b1e2c8582210..c7c3074f383b7491fdc9b4468d7dd4bed445551f 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -49,6 +49,43 @@ static inline void kernel_fpu_begin(void) #endif } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +extern int kernel_fpu_begin_nonatomic_mask(unsigned int kfpu_mask); +extern void kernel_fpu_end_nonatomic(void); + +/* Code that is unaware of kernel_fpu_begin_nonatomic_mask() can use this */ +static inline int kernel_fpu_begin_nonatomic(void) +{ +#ifdef CONFIG_X86_64 + /* + * Any 64-bit code that uses 387 instructions must explicitly request + * KFPU_387. + */ + return kernel_fpu_begin_nonatomic_mask(KFPU_MXCSR); +#else + /* + * 32-bit kernel code may use 387 operations as well as SSE2, etc, + * as long as it checks that the CPU has the required capability. + */ + return kernel_fpu_begin_nonatomic_mask(KFPU_387 | KFPU_MXCSR); +#endif +} + +/* + * It means we call kernel_fpu_end after kernel_fpu_begin_nonatomic + * func, but before kernel_fpu_end_nonatomic + */ +static inline void check_using_kernel_fpu(void) +{ + WARN_ON_ONCE(test_thread_flag(TIF_USING_FPU_NONATOMIC)); +} + +#else +static inline void check_using_kernel_fpu(void) { } + +#endif + /* * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate. * A context switch will (and softirq might) save CPU's FPU registers to diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h index ca6e5e5f16b2eca0e02222956951c628b556ec50..0c02a89b9f881c221c1901b763460547ace03fc9 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -66,4 +66,62 @@ static inline void switch_fpu_finish(void) set_thread_flag(TIF_NEED_FPU_LOAD); } +/* + * Kernel FPU state switching for scheduling. + * + * This is a two-stage process: + * + * - switch_kernel_fpu_prepare() saves the old kernel fpu state. + * This is done within the context of the old process. + * + * - switch_kernel_fpu_finish() restore new kernel fpu state. + * + * The kernel FPU context is only stored/restored for a user task in kernel + * mode and PF_KTHREAD is used to distinguish between kernel and user threads. + */ +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +extern void save_fpregs_to_fpkernelstate(struct fpu *kfpu); +extern unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int off); +static inline void switch_kernel_fpu_prepare(struct task_struct *prev, int cpu) +{ + struct fpu *old_fpu = &prev->thread.fpu; + + if (!test_thread_flag(TIF_USING_FPU_NONATOMIC)) + return; + + if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) + save_fpregs_to_fpkernelstate(old_fpu); +} + +/* Internal helper for switch_kernel_fpu_finish() and signal frame setup */ +static inline void fpregs_restore_kernelregs(struct fpu *kfpu) +{ + kernel_fpu_states_restore(NULL, (void *)get_fpu_registers_pos(kfpu, MAX_FPU_CTX_SIZE), + MAX_FPU_CTX_SIZE); +} + +/* Loading of the complete FPU state immediately. */ +static inline void switch_kernel_fpu_finish(struct task_struct *next) +{ + struct fpu *new_fpu = &next->thread.fpu; + + if (next->flags & PF_KTHREAD) + return; + + if (cpu_feature_enabled(X86_FEATURE_FPU) && + test_ti_thread_flag((struct thread_info *)next, + TIF_USING_FPU_NONATOMIC)) + fpregs_restore_kernelregs(new_fpu); +} +#else +static inline void switch_kernel_fpu_prepare(struct task_struct *prev, int cpu) +{ +} +static inline void switch_kernel_fpu_finish(struct task_struct *next) +{ +} + +#endif + #endif /* _ASM_X86_FPU_SCHED_H */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37aa11a6b9cfc08994cd58af76dbfbc..ba344b416ac5bb5d48c499b9d3e62ac47d84554c 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -19,11 +19,12 @@ #include /* Force 4K pages for __kfence_pool. */ -static inline bool arch_kfence_init_pool(void) +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { + char *__kfence_pool = kpa->addr; unsigned long addr; - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + for (addr = (unsigned long)__kfence_pool; is_kfence_address_area((void *)addr, kpa); addr += PAGE_SIZE) { unsigned int level; @@ -68,6 +69,51 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +/* + * This function is used to recover TLB to 1G kernel mapping. + * The caller MUST make sure there're no other active kfence + * pools in this 1G area. + */ +static inline bool arch_kfence_free_pool(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud, new_pud, old_pud; + + addr = ALIGN_DOWN(addr, PUD_SIZE); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return false; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + + if (p4d_large(*p4d) || !p4d_present(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; + + if (pud_large(*pud) || !pud_present(*pud)) + return false; + + new_pud = pfn_pud((unsigned long)__phys_to_pfn(__pa(addr)), + __pgprot(__PAGE_KERNEL_LARGE)); + + old_pud = xchg(pud, new_pud); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + if (!pud_free_pmd_page(&old_pud, addr)) { + pr_warn("free old TLB error at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); + } + + return true; +} + #endif /* !MODULE */ #endif /* _ASM_X86_KFENCE_H */ diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index a2e9317aad4955c1110e9f19d5b94ffd837b3865..a5e4a019b4ccfdf15ff5d18f5900078216155a14 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -80,6 +80,9 @@ struct arch_specific_insn { void (*emulate_op)(struct kprobe *p, struct pt_regs *regs); /* Number of bytes of text poked */ int tp_len; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct arch_optimized_insn { diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 9b419f0de713cc720ad348598ac3c5a615426c58..7c702a0faa32f8efbf42655b4ed4c18672337b44 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,13 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(get_untagged_addr) +KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(arch_hypercall) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) +KVM_X86_OP_OPTIONAL(get_hygon_coco_extension) +KVM_X86_OP_OPTIONAL(enable_hygon_coco_extension) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 257bf2e71d06056438fc0a8eb53d709753f81b79..afd6dee38a893491f2c8b1e8f20a4e46e55f45b5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -125,7 +125,8 @@ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ - | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) + | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ + | X86_CR4_LAM_SUP)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) @@ -1462,6 +1463,8 @@ struct kvm_arch { */ #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1) struct kvm_mmu_memory_cache split_desc_cache; + + CK_KABI_RESERVE(1) }; struct kvm_vm_stat { @@ -1751,6 +1754,19 @@ struct kvm_x86_ops { * Returns vCPU specific APICv inhibit reasons */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + + gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + + /* + * Interfaces for HYGON CSV guest + */ + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); + int (*get_hygon_coco_extension)(struct kvm *kvm); + int (*enable_hygon_coco_extension)(struct kvm *kvm, u32 arg); + + int (*arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 180b1cbfcc4e11e90f25f4dd378ba8d0e14e8f5e..a02d2215a79f12ef40e88a658475960879d064a9 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -289,6 +289,12 @@ struct cper_sec_mem_err; extern void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +extern void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err); +struct cper_sec_pcie; +extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err); +struct cper_sec_proc_generic; +extern void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err); + /* * Enumerate new IP types and HWID values in AMD processors which support * Scalable MCA. diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 76081a34fc231b143d96cb1e8af8350623bd84c6..9816db501ea462c2108b1414ce3a7f67da6f968e 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -114,6 +114,12 @@ void add_encrypt_protection_map(void); extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +#ifdef CONFIG_HYGON_CSV +extern void print_hygon_cc_feature_info(void); +#else /* !CONFIG_HYGON_CSV */ +static inline void print_hygon_cc_feature_info(void) { } +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index bbbe9d744977d0276d3c503aa29f9d51faba8fd2..695e569159c1d1fcbf469f8fe8ce1a0f366df996 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { } static inline void microcode_bsp_resume(void) { } #endif +extern unsigned long initrd_start_early; + #ifdef CONFIG_CPU_SUP_INTEL /* Intel specific microcode defines. Public for IFS */ struct microcode_header_intel { @@ -36,7 +38,8 @@ struct microcode_header_intel { unsigned int datasize; unsigned int totalsize; unsigned int metasize; - unsigned int reserved[2]; + unsigned int min_req_ver; + unsigned int reserved; }; struct microcode_intel { @@ -68,11 +71,19 @@ static inline u32 intel_get_microcode_revision(void) return rev; } +#endif /* !CONFIG_CPU_SUP_INTEL */ -void show_ucode_info_early(void); +bool microcode_nmi_handler(void); +void microcode_offline_nmi_handler(void); -#else /* CONFIG_CPU_SUP_INTEL */ -static inline void show_ucode_info_early(void) { } -#endif /* !CONFIG_CPU_SUP_INTEL */ +#ifdef CONFIG_MICROCODE_LATE_LOADING +DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static __always_inline bool microcode_nmi_handler_enabled(void) +{ + return static_branch_unlikely(µcode_nmi_handler_enable); +} +#else +static __always_inline bool microcode_nmi_handler_enabled(void) { return false; } +#endif #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 24b7bd255e98301552811d2e4e309882905daf2b..d449f6dad52979151d1d7e79240ecfcaa6689979 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -75,12 +75,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -237,6 +248,8 @@ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) +#define MSR_INTEGRITY_CAPS_SBAF_BIT 8 +#define MSR_INTEGRITY_CAPS_SBAF BIT(MSR_INTEGRITY_CAPS_SBAF_BIT) #define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) #define MSR_LBR_NHM_FROM 0x00000680 @@ -252,6 +265,11 @@ #define LBR_INFO_CYCLES 0xffff #define LBR_INFO_BR_TYPE_OFFSET 56 #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) +#define LBR_INFO_BR_CNTR_OFFSET 32 +#define LBR_INFO_BR_CNTR_NUM 4 +#define LBR_INFO_BR_CNTR_BITS 2 +#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0) +#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0) #define MSR_ARCH_LBR_CTL 0x000014ce #define ARCH_LBR_CTL_LBREN BIT(0) @@ -769,6 +787,13 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index bae83810505bf59a666d2358b114fdcc33cc0432..3aa7f98683e3db0fd955720cff0ad0baad5e7135 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,6 +26,8 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 +#define ZXPAUSE_C01_STATE 1 + static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -148,4 +150,17 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } +/* + * Caller can specify whether to enter C0.1 (low latency, less + * power saving) or C0.2 state (saves more power, but longer wakeup + * latency). This may be overridden by the ZX_PAUSE_CONTROL MSR + * which can force requests for C0.2 to be downgraded to C0.1. + */ +static inline void __zxpause(u32 ecx, u32 edx, u32 eax) +{ + /* "zxpause %ecx, %edx, %eax;" */ + asm volatile(".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +} #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 85a9fd5a3ec33176ff3e2dd96aae915136f47864..103ae26479de2a22699c1ec477e895ac56e5947b 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -31,6 +31,7 @@ #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) #define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 @@ -57,9 +58,20 @@ #define AMD64_EVENTSEL_EVENT \ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) +#define HYGON_F18H_EVENTSEL_EVENT \ + (AMD64_EVENTSEL_EVENT | \ + GENMASK_ULL(62, 61)) #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) @@ -100,6 +112,19 @@ (AMD64_EVENTSEL_EVENT | \ ARCH_PERFMON_EVENTSEL_UMASK) +#define HYGON_F18H_M4H_EVENTSEL_UMASK_NB 0x0003FF00ULL +#define HYGON_F18H_M6H_EVENTSEL_UMASK_NB 0x000FFF00ULL + +#define HYGON_F18H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + ARCH_PERFMON_EVENTSEL_UMASK) +#define HYGON_F18H_M4H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + HYGON_F18H_M4H_EVENTSEL_UMASK_NB) +#define HYGON_F18H_M6H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + HYGON_F18H_M6H_EVENTSEL_UMASK_NB) + #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ (AMD64_EVENTSEL_EVENT | \ GENMASK_ULL(37, 36)) @@ -216,6 +241,9 @@ union cpuid28_ecx { unsigned int lbr_timed_lbr:1; /* Branch Type Field Supported */ unsigned int lbr_br_type:1; + unsigned int reserved:13; + /* Branch counters (Event Logging) Supported */ + unsigned int lbr_counters:4; } split; unsigned int full; }; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index d03fe4fb41f43c24b5ca8d5fc16348581acbe313..2f291fe56c1d5a0d43b78d83c1186b5513883f35 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -939,13 +939,13 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte == b.pte; } -static inline pte_t pte_next_pfn(pte_t pte) +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { if (__pte_needs_invert(pte_val(pte))) - return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT)); - return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) - (nr << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } -#define pte_next_pfn pte_next_pfn +#define pte_advance_pfn pte_advance_pfn static inline int pte_present(pte_t a) { @@ -1038,8 +1038,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { - return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) != - (_KERNPG_TABLE & ~_PAGE_ACCESSED); + return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED | _PAGE_RW)) != + (_KERNPG_TABLE & ~(_PAGE_ACCESSED | _PAGE_RW)); } static inline unsigned long pages_to_mb(unsigned long npg) diff --git a/arch/x86/include/asm/processor-hygon.h b/arch/x86/include/asm/processor-hygon.h new file mode 100644 index 0000000000000000000000000000000000000000..a19bda3ed00561cec551775e20c2bedaabe64fb1 --- /dev/null +++ b/arch/x86/include/asm/processor-hygon.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The helpers to support Hygon CPU specific code path. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef _ASM_X86_PROCESSOR_HYGON_H +#define _ASM_X86_PROCESSOR_HYGON_H + +#include + +/* + * helper to determine HYGON CPU + */ +static inline bool is_x86_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} + +#endif /* _ASM_X86_PROCESSOR_HYGON_H */ diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index e897046c5d2c63dd6484d99bef1a6e922955fcf7..b0ebee446563ac5f17de84bc1208ee60ec8cbf3a 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -27,6 +27,10 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo return val; } +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +extern void cna_configure_spin_lock_slowpath(void); +#endif + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index e9187ddd3d1fdc61fff087b0ea3b8b9b0ff33ac3..6a3de575bec6a06ef41e3ac281cffbbd9248f1a5 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -100,6 +100,8 @@ #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 #define REQUIRED_MASK21 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK23 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 255a78d9d90672afb053875184d89b05bab52a0b..f159bddbec5101a2280fadae38e1d7b301ba48b0 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -4,8 +4,19 @@ #ifdef CONFIG_X86_CPU_RESCTRL -#include #include +#include +#include +#include + +/* + * This value can never be a valid CLOSID, and is used when mapping a + * (closid, rmid) pair to an index and back. On x86 only the RMID is + * needed. The index is a software defined value. + */ +#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) + +void resctrl_arch_reset_resources(void); /** * struct resctrl_pqr_state - State cache for the PQR MSR @@ -31,10 +42,68 @@ struct resctrl_pqr_state { DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; +extern unsigned int rdt_mon_features; + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline bool resctrl_arch_alloc_capable(void) +{ + return rdt_alloc_capable; +} + +static inline void resctrl_arch_enable_alloc(void) +{ + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_alloc(void) +{ + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + +static inline bool resctrl_arch_mon_capable(void) +{ + return rdt_mon_capable; +} + +static inline void resctrl_arch_enable_mon(void) +{ + static_branch_enable_cpuslocked(&rdt_mon_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_mon(void) +{ + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + +static inline bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_local_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return false; +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * @@ -52,8 +121,8 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - u32 closid = state->default_closid; - u32 rmid = state->default_rmid; + u32 closid = READ_ONCE(state->default_closid); + u32 rmid = READ_ONCE(state->default_rmid); u32 tmp; /* @@ -88,17 +157,78 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } -static inline void resctrl_sched_in(struct task_struct *tsk) +static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, + u32 rmid) +{ + WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid); + WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid); +} + +static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, + u32 closid, u32 rmid) +{ + WRITE_ONCE(tsk->closid, closid); + WRITE_ONCE(tsk->rmid, rmid); +} + +static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + return READ_ONCE(tsk->closid) == closid; +} + +static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, + u32 rmid) +{ + return READ_ONCE(tsk->rmid) == rmid; +} + +static inline void resctrl_arch_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); } +static inline u32 resctrl_arch_system_num_rmid_idx(void) +{ + /* RMID are independent numbers for x86. num_rmid_idx == num_rmid */ + return boot_cpu_data.x86_cache_max_rmid + 1; +} + +static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + *rmid = idx; + *closid = X86_RESCTRL_EMPTY_CLOSID; +} + +static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) +{ + return rmid; +} + +/* x86 can always read an rmid, nothing needs allocating */ +struct rdt_resource; +static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + might_sleep(); + return NULL; +}; + +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *ctx) { }; + +u64 resctrl_arch_get_prefetch_disable_bits(void); +int resctrl_arch_pseudo_lock_fn(void *_plr); +int resctrl_arch_measure_cycles_lat_fn(void *_plr); +int resctrl_arch_measure_l2_residency(void *_plr); +int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); + #else -static inline void resctrl_sched_in(struct task_struct *tsk) {} +static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f3495623ac997222fe62a9b571c656b6fcbc7fbb..5c83729c8e71ff5a287095211829cf197365a728 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,8 +31,6 @@ #include #include -extern u64 relocated_ramdisk; - /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); @@ -126,6 +124,7 @@ void clear_bss(void); #ifdef __i386__ asmlinkage void __init __noreturn i386_start_kernel(void); +void __init mk_early_pgtbl_32(void); #else asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d63b02940747fad97cabfdb399b488d577d36529..1c62f52a6ae80d39026d545f88bfc42e2982fed2 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { #ifdef CONFIG_SMP u32 cpu; /* current CPU */ #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define INIT_THREAD_INFO(tsk) \ @@ -100,6 +102,7 @@ struct thread_info { #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ +#define TIF_USING_FPU_NONATOMIC 30 /* using fpu in kernel non-atomic context */ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3a7755c1a44102c9a50f043d7fecf9afc9b5969c..3db67f44063b0c9521f3b299f7cfe2ba726a9228 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -497,6 +497,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len); unsigned long __must_check copy_mc_to_user(void __user *to, const void *from, unsigned len); +#define copy_mc_to_user copy_mc_to_user #endif /* diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f2c02e4469ccc3db6265b2e36a6bd336f5e822fc..e42507ec4b1d6d72ab51f649d558b68431719766 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -11,6 +11,12 @@ #include #include #include +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +#include +#endif + +extern struct static_key_false hygon_lmc_key; #ifdef CONFIG_ADDRESS_MASKING /* @@ -97,6 +103,74 @@ static inline bool __access_ok(const void __user *ptr, unsigned long size) * Copy To/From Userspace */ +#ifdef CONFIG_X86_HYGON_LMC_SSE2_ON +void fpu_save_xmm0_3(void *to, const void *from, unsigned long len); +void fpu_restore_xmm0_3(void *to, const void *from, unsigned long len); + +#define kernel_fpu_states_save fpu_save_xmm0_3 +#define kernel_fpu_states_restore fpu_restore_xmm0_3 + +__must_check unsigned long copy_user_sse2_opt_string(void *to, const void *from, + unsigned long len); + +#define MAX_FPU_CTX_SIZE 64 +#define KERNEL_FPU_NONATOMIC_SIZE (2 * (MAX_FPU_CTX_SIZE)) + +#define copy_user_large_memory_generic_string copy_user_sse2_opt_string + +#endif + +#ifdef CONFIG_X86_HYGON_LMC_AVX2_ON +void fpu_save_ymm0_7(void *to, const void *from, unsigned long len); +void fpu_restore_ymm0_7(void *to, const void *from, unsigned long len); + +#define kernel_fpu_states_save fpu_save_ymm0_7 +#define kernel_fpu_states_restore fpu_restore_ymm0_7 + +__must_check unsigned long +copy_user_avx2_pf64_nt_string(void *to, const void *from, unsigned long len); + +#define MAX_FPU_CTX_SIZE 256 +#define KERNEL_FPU_NONATOMIC_SIZE (2 * (MAX_FPU_CTX_SIZE)) + +#define copy_user_large_memory_generic_string copy_user_avx2_pf64_nt_string +#endif + +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +unsigned int get_nt_block_copy_mini_len(void); +static inline bool Hygon_LMC_check(unsigned long len) +{ + unsigned int nt_blk_cpy_mini_len = get_nt_block_copy_mini_len(); + + if (((nt_blk_cpy_mini_len) && (nt_blk_cpy_mini_len <= len) && + (system_state == SYSTEM_RUNNING) && + (!kernel_fpu_begin_nonatomic()))) + return true; + else + return false; +} +static inline unsigned long +copy_large_memory_generic_string(void *to, const void *from, unsigned long len) +{ + unsigned long ret; + + ret = copy_user_large_memory_generic_string(to, from, len); + kernel_fpu_end_nonatomic(); + return ret; +} +#else +static inline bool Hygon_LMC_check(unsigned long len) +{ + return false; +} +static inline unsigned long +copy_large_memory_generic_string(void *to, const void *from, unsigned long len) +{ + return 0; +} +#endif + /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long rep_movs_alternative(void *to, const void *from, unsigned len); @@ -104,6 +178,16 @@ rep_movs_alternative(void *to, const void *from, unsigned len); static __always_inline __must_check unsigned long copy_user_generic(void *to, const void *from, unsigned long len) { + /* Check if Hygon large memory copy support enabled. */ + if (static_branch_unlikely(&hygon_lmc_key)) { + if (Hygon_LMC_check(len)) { + unsigned long ret; + + ret = copy_large_memory_generic_string(to, from, len); + return ret; + } + } + stac(); /* * If CPU has FSRM feature, use 'rep movs'. diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0e73616b82f3469f47877e0cc56ca6b79f034173..3a4f60f19de3efe789e0424bb9b4485c45d7d979 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,6 +84,11 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) +/* + * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. + */ +#define ZX_TERTIARY_EXEC_GUEST_ZXPAUSE VMCS_CONTROL_BIT(GUEST_ZXPAUSE) + #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -235,6 +240,7 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, + ZXPAUSE_VMEXIT_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -284,6 +290,7 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, + ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index c6a7eed039145be3964db90a6cac559e45d87040..ba209bdf57d9dc7c59b4cc8cb87432d0b674c708 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 5 /* N 32-bit words worth of info */ +#define NVMXINTS 6 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -89,4 +89,8 @@ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ + +/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 4 */ +#define VMX_FEATURE_GUEST_ZXPAUSE (4*32 + 0) /* zxpause instruction in guest mode */ + #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3269a0e23d3ab86752380175352106085bde640a..2b433325ca8fc31517455af10e40a31c12dfb220 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg +CFLAGS_REMOVE_head32.o = -pg CFLAGS_REMOVE_sev.o = -pg CFLAGS_REMOVE_rethook.o = -pg endif @@ -158,4 +159,7 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o + obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index 0916f00a992e1b3384840c99ebe0ddf78383b20b..e3782035d7c33f4088db5b450de08954889212ef 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -40,10 +40,36 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data) void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { #ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev, mem_err); + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_mem_error(mem_err); + else + apei_mce_report_mem_error(sev, mem_err); #endif } +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +#ifdef CONFIG_X86_MCE + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_pcie_error(sev, pcie_err); +#endif +} + +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) +{ +#ifdef CONFIG_X86_MCE + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC))) { + zx_apei_mce_report_zdi_error(zdi_err); + return true; + } +#endif + return false; +} + int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { return apei_smca_report_x86_error(ctx_info, lapic_id); diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 401808b47af3edd6bf06ab9f83b846c5a939ae2d..90f22148acc798aee67fcd5a0f3c9713ed55186f 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -221,7 +221,9 @@ static int __init ffh_cstate_init(void) if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && - c->x86_vendor != X86_VENDOR_HYGON) + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index aae7456ece070002c5f73bd68866fc2ef083a35e..f3cb4309a8b36c41af04b66b17b7eb8cb4cd1d40 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -30,6 +30,7 @@ #include #include #include +#include int __read_mostly alternatives_patched; @@ -842,15 +843,82 @@ void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } #endif /* CONFIG_X86_KERNEL_IBT */ #ifdef CONFIG_FINEIBT +#define __CFI_DEFAULT CFI_DEFAULT +#elif defined(CONFIG_CFI_CLANG) +#define __CFI_DEFAULT CFI_KCFI +#else +#define __CFI_DEFAULT CFI_OFF +#endif -enum cfi_mode { - CFI_DEFAULT, - CFI_OFF, - CFI_KCFI, - CFI_FINEIBT, -}; +enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT; + +#ifdef CONFIG_CFI_CLANG +struct bpf_insn; + +/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ +extern unsigned int __bpf_prog_runX(const void *ctx, + const struct bpf_insn *insn); + +/* + * Force a reference to the external symbol so the compiler generates + * __kcfi_typid. + */ +__ADDRESSABLE(__bpf_prog_runX); + +/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ +asm ( +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" +" .type cfi_bpf_hash,@object \n" +" .globl cfi_bpf_hash \n" +" .p2align 2, 0x0 \n" +"cfi_bpf_hash: \n" +" .long __kcfi_typeid___bpf_prog_runX \n" +" .size cfi_bpf_hash, 4 \n" +" .popsection \n" +); + +/* Must match bpf_callback_t */ +extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); + +__ADDRESSABLE(__bpf_callback_fn); + +/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ +asm ( +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" +" .type cfi_bpf_subprog_hash,@object \n" +" .globl cfi_bpf_subprog_hash \n" +" .p2align 2, 0x0 \n" +"cfi_bpf_subprog_hash: \n" +" .long __kcfi_typeid___bpf_callback_fn \n" +" .size cfi_bpf_subprog_hash, 4 \n" +" .popsection \n" +); + +u32 cfi_get_func_hash(void *func) +{ + u32 hash; + + func -= cfi_get_offset(); + switch (cfi_mode) { + case CFI_FINEIBT: + func += 7; + break; + case CFI_KCFI: + func += 1; + break; + default: + return 0; + } + + if (get_kernel_nofault(hash, func)) + return 0; + + return hash; +} +#endif + +#ifdef CONFIG_FINEIBT -static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; static bool cfi_rand __ro_after_init = true; static u32 cfi_seed __ro_after_init; @@ -1159,8 +1227,11 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, goto err; if (cfi_rand) { - if (builtin) + if (builtin) { cfi_seed = get_random_u32(); + cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); + cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash); + } ret = cfi_rand_preamble(start_cfi, end_cfi); if (ret) @@ -1612,6 +1683,10 @@ void __init alternative_instructions(void) */ paravirt_set_cap(); +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + cna_configure_spin_lock_slowpath(); +#endif + /* * First patch paravirt functions, such that we overwrite the indirect * call with the direct call. diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6dabb53f58a4456f5f9e6161cd9c6ab563d9faea..3d6d25b64bb39fa4532456afc37ccef89e9c512d 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -45,10 +45,19 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M10H_ROOT 0x14c0 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 + /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -127,16 +136,25 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4) }, {} }; @@ -230,6 +248,228 @@ int amd_smn_write(u16 node, u32 address, u32 value) } EXPORT_SYMBOL_GPL(amd_smn_write); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) +{ + struct pci_dev *df_func = NULL; + u32 device; + int err; + + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + case 0x7: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + case 0x7: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { + return -ENODEV; + } + + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); + return -ENODEV; + } + + err = pci_read_config_dword(df_func, offset, value); + if (err) + pr_warn("Error reading DF F%d register.\n", func); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + if (boot_cpu_data.x86_model == 0x6) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df_register(misc, 1, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} static int amd_cache_northbridges(void) { @@ -250,6 +490,11 @@ static int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 032a84e2c3ccc7e345ed0c05193965549989510f..cd16228611ce8fcd356e281c1003391963674049 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, @@ -175,6 +176,7 @@ static struct apic apic_physflat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index a44ba7209ef3a854e36444af9ad53c7314772fbb..edad86f32e38cb5fb36656494f4d2472d8ab7030 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -97,6 +97,14 @@ void native_send_call_func_ipi(const struct cpumask *mask) __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } +void apic_send_nmi_to_offline_cpu(unsigned int cpu) +{ + if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu)) + return; + if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask))) + return; + apic->send_IPI(cpu, NMI_VECTOR); +} #endif /* CONFIG_SMP */ static inline int __prepare_ICR2(unsigned int mask) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index affbff65e49713d80e88f4097e83b031494c369d..a8306089c91bca12277b904e1b79ad38f1b85122 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 788cdb4ee394dae662e4618644e8e240b3666429..c8ac1b12b8ac6c2f8e68cecbc61237e30195babc 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/cfi.c b/arch/x86/kernel/cfi.c index 8674a5c0c031d3dd3c0a2708ff39369efb52ff7e..e6bf78fac1462209be7f7115dc3f588bdd3a9c12 100644 --- a/arch/x86/kernel/cfi.c +++ b/arch/x86/kernel/cfi.c @@ -4,10 +4,10 @@ * * Copyright (C) 2022 Google LLC */ -#include +#include +#include #include #include -#include /* * Returns the target address and the expected type when regs->ip points diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4350f6bfc0641ea7a6877cd9a224055a456753bd..dec6b0d9e711ba3e0cfd690411d3bff45edcd67f 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -25,6 +25,7 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zxpause.o obj-$(CONFIG_PROC_FS) += proc.o obj-y += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9413fb767c6a71601c8a2964677b648300b9aaf3..8118839daa08a3771a061c4348e9025edb2bfa52 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -34,73 +34,6 @@ */ static u32 nodes_per_socket = 1; -/* - * AMD errata checking - * - * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or - * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that - * have an OSVW id assigned, which it takes as first argument. Both take a - * variable number of family-specific model-stepping ranges created by - * AMD_MODEL_RANGE(). - * - * Example: - * - * const int amd_erratum_319[] = - * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), - * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), - * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); - */ - -#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) - -static const int amd_erratum_400[] = - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), - AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); - -static const int amd_erratum_383[] = - AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); - -static const int amd_erratum_1485[] = - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), - AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); - -static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -{ - int osvw_id = *erratum++; - u32 range; - u32 ms; - - if (osvw_id >= 0 && osvw_id < 65536 && - cpu_has(cpu, X86_FEATURE_OSVW)) { - u64 osvw_len; - - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); - if (osvw_id < osvw_len) { - u64 osvw_bits; - - rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), - osvw_bits); - return osvw_bits & (1ULL << (osvw_id & 0x3f)); - } - } - - /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_stepping; - while ((range = *erratum++)) - if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && - (ms >= AMD_MODEL_RANGE_START(range)) && - (ms <= AMD_MODEL_RANGE_END(range))) - return true; - - return false; -} - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) { u32 gprs[8] = { 0 }; @@ -609,7 +542,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) /* Figure out Zen generations: */ switch (c->x86) { - case 0x17: { + case 0x17: switch (c->x86_model) { case 0x00 ... 0x2f: case 0x50 ... 0x5f: @@ -625,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) goto warn; } break; - } - case 0x19: { + + case 0x19: switch (c->x86_model) { case 0x00 ... 0x0f: case 0x20 ... 0x5f: @@ -640,7 +573,19 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) goto warn; } break; - } + + case 0x1a: + switch (c->x86_model) { + case 0x00 ... 0x2f: + case 0x40 ... 0x4f: + case 0x70 ... 0x7f: + setup_force_cpu_cap(X86_FEATURE_ZEN5); + break; + default: + goto warn; + } + break; + default: break; } @@ -772,15 +717,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (c->x86 == 0x16 && c->x86_model <= 0xf) msr_set_bit(MSR_AMD64_LS_CFG, 15); - /* - * Check whether the machine is affected by erratum 400. This is - * used to select the proper idle routine and to enable the check - * whether the machine is affected in arch_post_acpi_init(), which - * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. - */ - if (cpu_has_amd_erratum(c, amd_erratum_400)) - set_cpu_bug(c, X86_BUG_AMD_E400); - early_detect_mem_encrypt(c); /* Re-enable TopologyExtensions if switched off by BIOS */ @@ -847,6 +783,16 @@ static void init_amd_k8(struct cpuinfo_x86 *c) msr_set_bit(MSR_K7_HWCR, 6); #endif set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); + + /* + * Check models and steppings affected by erratum 400. This is + * used to select the proper idle routine and to enable the + * check whether the machine is affected in arch_post_acpi_subsys_init() + * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. + */ + if (c->x86_model > 0x41 || + (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) + setup_force_cpu_bug(X86_BUG_AMD_E400); } static void init_amd_gh(struct cpuinfo_x86 *c) @@ -880,8 +826,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c) */ msr_clear_bit(MSR_AMD64_BU_CFG2, 24); - if (cpu_has_amd_erratum(c, amd_erratum_383)) - set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); + set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); + + /* + * Check models and steppings affected by erratum 400. This is + * used to select the proper idle routine and to enable the + * check whether the machine is affected in arch_post_acpi_subsys_init() + * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. + */ + if (c->x86_model > 0x2 || + (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) + setup_force_cpu_bug(X86_BUG_AMD_E400); } static void init_amd_ln(struct cpuinfo_x86 *c) @@ -997,10 +952,8 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) * * This suppresses speculation from the middle of a basic block, i.e. it * suppresses non-branch predictions. - * - * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H */ - if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); @@ -1009,7 +962,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) #endif } -static void init_amd_zn(struct cpuinfo_x86 *c) +static void init_amd_zen_common(void) { setup_force_cpu_cap(X86_FEATURE_ZEN); #ifdef CONFIG_NUMA @@ -1027,14 +980,6 @@ static void init_amd_zen1(struct cpuinfo_x86 *c) /* Erratum 1076: CPB feature bit not being set in CPUID. */ if (!cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); - - /* - * Zen3 (Fam19 model < 0x10) parts are not susceptible to - * Branch Type Confusion, but predate the allocation of the - * BTC_NO bit. - */ - if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) - set_cpu_cap(c, X86_FEATURE_BTC_NO); } pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); @@ -1077,19 +1022,36 @@ static void zen2_zenbleed_check(struct cpuinfo_x86 *c) } else { msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); } + } static void init_amd_zen2(struct cpuinfo_x86 *c) { + init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); } static void init_amd_zen3(struct cpuinfo_x86 *c) { + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { + /* + * Zen3 (Fam19 model < 0x10) parts are not susceptible to + * Branch Type Confusion, but predate the allocation of the + * BTC_NO bit. + */ + if (!cpu_has(c, X86_FEATURE_BTC_NO)) + set_cpu_cap(c, X86_FEATURE_BTC_NO); + } } static void init_amd_zen4(struct cpuinfo_x86 *c) +{ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) + msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); +} + +static void init_amd_zen5(struct cpuinfo_x86 *c) { } @@ -1126,11 +1088,15 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; case 0x16: init_amd_jg(c); break; - case 0x17: init_spectral_chicken(c); - fallthrough; - case 0x19: init_amd_zn(c); break; } + /* + * Save up on some future enablement work and do common Zen + * settings. + */ + if (c->x86 >= 0x17) + init_amd_zen_common(); + if (boot_cpu_has(X86_FEATURE_ZEN1)) init_amd_zen1(c); else if (boot_cpu_has(X86_FEATURE_ZEN2)) @@ -1139,6 +1105,8 @@ static void init_amd(struct cpuinfo_x86 *c) init_amd_zen3(c); else if (boot_cpu_has(X86_FEATURE_ZEN4)) init_amd_zen4(c); + else if (boot_cpu_has(X86_FEATURE_ZEN5)) + init_amd_zen5(c); /* * Enable workaround for FXSAVE leak on CPUs @@ -1207,10 +1175,6 @@ static void init_amd(struct cpuinfo_x86 *c) cpu_has(c, X86_FEATURE_AUTOIBRS)) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0); - if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && - cpu_has_amd_erratum(c, amd_erratum_1485)) - msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); - /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); } diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 8f86eacf69f7c965a92996ddf669c02b2d62b751..66c3ba27750732dc96cd88c2b0a2b1cd777121b0 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,11 +708,31 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5 || + (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 345f7d905db677291f7f8eb9b33b692263afe447..b15bcf21ac7b32704e21ffb79fba0b2a2dfe7f43 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -109,6 +109,19 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_centaur(struct cpuinfo_x86 *c) @@ -127,11 +140,14 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8bc90a501e7b801ffbc06e5dbc87233eae7ff184..8bb0d2bd8f2c17f4ad04404aa4e2883f03108747 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -86,6 +86,9 @@ EXPORT_SYMBOL_GPL(get_llc_id); /* L2 cache ID of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID; +DEFINE_STATIC_KEY_FALSE(hygon_lmc_key); +EXPORT_SYMBOL_GPL(hygon_lmc_key); + static struct ppin_info { int feature; int msr_ppin_ctl; @@ -2224,8 +2227,6 @@ static inline void setup_getcpu(int cpu) } #ifdef CONFIG_X86_64 -static inline void ucode_cpu_init(int cpu) { } - static inline void tss_setup_ist(struct tss_struct *tss) { /* Set up the per-CPU TSS IST stacks */ @@ -2236,16 +2237,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) /* Only mapped when SEV-ES is active */ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } - #else /* CONFIG_X86_64 */ - -static inline void ucode_cpu_init(int cpu) -{ - show_ucode_info_early(); -} - static inline void tss_setup_ist(struct tss_struct *tss) { } - #endif /* !CONFIG_X86_64 */ static inline void tss_setup_io_bitmap(struct tss_struct *tss) @@ -2301,8 +2294,6 @@ void cpu_init(void) struct task_struct *cur = current; int cpu = raw_smp_processor_id(); - ucode_cpu_init(cpu); - #ifdef CONFIG_NUMA if (this_cpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) @@ -2410,6 +2401,17 @@ void arch_smt_update(void) apic_smt_update(); } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +static inline void update_lmc_branch_cond(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + static_branch_enable(&hygon_lmc_key); +} +#else +static inline void update_lmc_branch_cond(void) { } +#endif + void __init arch_cpu_finalize_init(void) { identify_boot_cpu(); @@ -2428,6 +2430,7 @@ void __init arch_cpu_finalize_init(void) cpu_select_mitigations(); arch_smt_update(); + update_lmc_branch_cond(); if (IS_ENABLED(CONFIG_X86_32)) { /* diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 6fb6d8a57cecafe2d0438222b2b9cb8dc73a2c05..7d8733874218d0fd6e0853a216659446ad96803d 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -82,6 +82,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, + { X86_FEATURE_CRC32C_LOW_PERF, X86_FEATURE_XMM4_2 }, {} }; diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 03851240c3e36d4ed5e9ad250eee76410830d6e9..3e0fbf510f1c6db7aee1e4fcacf6c3269174d415 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -17,6 +17,7 @@ enum vmx_feature_leafs { SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, + ZX_TERTIARY_CTLS, NR_VMX_FEATURE_WORDS, }; @@ -97,6 +98,15 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); + /* + * Initialize Zhaoxin Tertiary Exec Control feature flags. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &supported, &ign); + if (supported & MSR_ZX_VMCS_EXEC_CTL3) + c->vmx_capability[ZX_TERTIARY_CTLS] |= VMX_F(GUEST_ZXPAUSE); + } } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 6e738759779e815e0a479bb9801efc7ea33ba793..b3b26ded3bc965ca385b376baa885520017678f9 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,10 @@ #include #include #include +#include +#include +#include +#include #include "cpu.h" @@ -80,12 +84,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->x86_max_cores /= smp_num_siblings; /* - * In case leaf B is available, use it to derive + * From model 0x4, leaf B is available, so use it to derive * topology information. */ err = detect_extended_topology(c); - if (!err) + if (!err) { c->x86_coreid_bits = get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } /* * Socket ID is ApicId[6] for the processors with model <= 0x3 @@ -240,6 +246,68 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); +} + +static void init_hygon_cap(struct cpuinfo_x86 *c) +{ + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0x8C860000) >= 0x8C860000) { + /* + * Store Extended Feature Flags of the CPU capability + * bit array + */ + c->x86_capability[CPUID_8C86_0000_EDX] = cpuid_edx(0x8C860000); + } +} + +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + u32 eax; + + eax = cpuid_eax(0x8000001f); + + /* Check whether SME or CSV is supported */ + if (!(eax & (BIT(0) | BIT(1)))) + return; + + /* If BIOS has not enabled SME then don't advertise the SME feature. */ + rdmsrl(MSR_AMD64_SYSCFG, msr); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this will be a + * value above 32-bits this is still done for CONFIG_X86_32 so that + * accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + /* Don't advertise SME and CSV features under CONFIG_X86_32. */ + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + /* Clear the SME feature flag if the kernel is not using it. */ + if (!sme_me_mask) + setup_clear_cpu_cap(X86_FEATURE_SME); + + /* + * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 + * feature. + */ + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_csv; + + return; + +clear_all: + setup_clear_cpu_cap(X86_FEATURE_SME); +clear_csv: + setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); + setup_clear_cpu_cap(X86_FEATURE_CSV3); } static void early_init_hygon(struct cpuinfo_x86 *c) @@ -290,6 +358,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_VMMCALL); hygon_get_topology_early(c); + + early_detect_mem_encrypt(c); } static void init_hygon(struct cpuinfo_x86 *c) @@ -351,6 +421,7 @@ static void init_hygon(struct cpuinfo_x86 *c) /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + init_hygon_cap(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) @@ -396,3 +467,89 @@ static const struct cpu_dev hygon_cpu_dev = { }; cpu_dev_register(hygon_cpu_dev); + +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +struct hygon_c86_info { + unsigned int nt_cpy_mini_len; +}; + +static struct hygon_c86_info hygon_c86_data = { .nt_cpy_mini_len = 0 }; + +void set_c86_features_para_invalid(void) +{ + memset((void *)&hygon_c86_data, 0, sizeof(struct hygon_c86_info)); +} + +unsigned int get_nt_block_copy_mini_len(void) +{ + unsigned int mini_len = hygon_c86_data.nt_cpy_mini_len; + + return mini_len; +} +EXPORT_SYMBOL_GPL(get_nt_block_copy_mini_len); + +static ssize_t show_nt_cpy_mini_len(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, 40, "%d\n", hygon_c86_data.nt_cpy_mini_len); +} + +static ssize_t store_nt_cpy_mini_len(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + ssize_t ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + hygon_c86_data.nt_cpy_mini_len = val; + + return count; +} + +static struct kobj_attribute nt_cpy_mini_len_attribute = __ATTR( + nt_cpy_mini_len, 0600, show_nt_cpy_mini_len, store_nt_cpy_mini_len); + +static struct attribute *c86_default_attrs[] = { + &nt_cpy_mini_len_attribute.attr, NULL +}; + +const struct attribute_group hygon_c86_attr_group = { + .attrs = c86_default_attrs, + .name = "hygon_c86", +}; + +static struct kobject *c86_features_kobj; +static int __init kobject_hygon_c86_init(void) +{ + int ret; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + goto err_out; + + c86_features_kobj = kobject_create_and_add("c86_features", NULL); + + if (c86_features_kobj) { + ret = sysfs_create_group(c86_features_kobj, + &hygon_c86_attr_group); + if (ret) + goto err_out; + } + + return 0; +err_out: + set_c86_features_para_invalid(); + if (c86_features_kobj) { + sysfs_remove_group(c86_features_kobj, &hygon_c86_attr_group); + kobject_del(c86_features_kobj); + } + + return -1; +} +subsys_initcall(kobject_hygon_c86_init); + +#endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4752a9f17ef6154a8a9557e36182d159a60c24f3..2d7637a4a157c63e613200db0362ad268d144c76 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -391,19 +391,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) setup_clear_cpu_cap(X86_FEATURE_PGE); } - if (c->cpuid_level >= 0x00000001) { - u32 eax, ebx, ecx, edx; - - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - /* - * If HTT (EDX[28]) is set EBX[16:23] contain the number of - * apicids which are reserved per package. Store the resulting - * shift value for the package management code. - */ - if (edx & (1U << 28)) - c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); - } - check_memory_type_self_snoop_errata(c); /* diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 8ed341714686a842be633b9356cbf88e87b933bc..c77cffffc6961a80fe5fefa74fc41082c59c9512 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -63,6 +63,173 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); +void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) + return; + + mce_setup(&m); + m.misc = 0; + m.misc = mem_err->module; + m.addr = mem_err->physical_addr; + if (mem_err->card == 0) + m.bank = 9; + else + m.bank = 10; + + switch (mem_err->error_type) { + case 2: + m.status = 0x9c20004000010080; + break; + case 3: + m.status = 0xbe40000000020090; + apei_error = apei_write_mce(&m); + break; + case 8: + if (mem_err->requestor_id == 2) { + m.status = 0x98200040000400b0; + } else if (mem_err->requestor_id == 3) { + m.status = 0xba400000000600a0; + apei_error = apei_write_mce(&m); + } else if (mem_err->requestor_id == 4) { + m.status = 0x98200100000300b0; + } else if (mem_err->requestor_id == 5) { + m.status = 0xba000000000500b0; + apei_error = apei_write_mce(&m); + } else { + pr_info("Undefined Parity error\n"); + } + break; + case 10: + if (mem_err->requestor_id == 6) { + m.status = 0xba400000000700a0; + apei_error = apei_write_mce(&m); + } else if (mem_err->requestor_id == 7) { + m.status = 0xba000000000800b0; + apei_error = apei_write_mce(&m); + } else { + pr_info("Undefined dvad error\n"); + } + break; + case 13: + m.status = 0x9c200040000100c0; + break; + case 14: + m.status = 0xbd000000000200c0; + apei_error = apei_write_mce(&m); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_mem_error); + +void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_setup(&m); + m.addr = 0; + m.misc = 0; + m.misc |= (u64)pcie_err->device_id.segment << 32; + m.misc |= pcie_err->device_id.bus << 24; + m.misc |= pcie_err->device_id.device << 19; + m.misc |= pcie_err->device_id.function << 16; + m.bank = 6; + + switch (severity) { + case 1: + m.status = 0x9820004000020e0b; + break; + case 2: + m.status = 0xba20000000010e0b; + break; + case 3: + m.status = 0xbd20000000000e0b; + apei_error = apei_write_mce(&m); + break; + default: + pr_info("Undefine pcie error\n"); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error); + +void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_setup(&m); + m.misc = 0; + m.misc |= (zdi_err->requestor_id & 0xff) << 19; + m.misc |= ((zdi_err->requestor_id & 0xff00) >> 8) >> 24; + m.bank = 5; + switch (zdi_err->responder_id) { + case 2: + m.status = 0xba00000000040e0f; + apei_error = apei_write_mce(&m); + break; + case 3: + m.status = 0xba00000000030e0f; + apei_error = apei_write_mce(&m); + break; + case 4: + m.status = 0xba00000000020e0f; + apei_error = apei_write_mce(&m); + break; + case 5: + m.status = 0xba00000000010e0f; + apei_error = apei_write_mce(&m); + break; + case 6: + m.status = 0x9820004000090e0f; + break; + case 7: + m.status = 0x9820004000080e0f; + break; + case 8: + m.status = 0x9820004000070e0f; + break; + case 9: + m.status = 0x9820004000060e0f; + break; + case 10: + m.status = 0x9820004000050e0f; + break; + case 11: + case 12: + case 13: + case 14: + case 15: + m.status = 0x98200040000b0e0f; + break; + case 16: + case 17: + case 18: + m.status = 0x98200040000c0e0f; + break; + default: + pr_info("Undefined ZDI Error\n"); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_zdi_error); + int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { const u64 *i_mce = ((const u64 *) (ctx_info + 1)); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index e103c227acd3ae4f43b128030461d8accc4ae38c..7730f006715a5be487811224025f11d8cfc1ca9d 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -482,7 +482,8 @@ int mce_usable_address(struct mce *m) /* Checks after this one are Intel/Zhaoxin-specific: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 1; if (!(m->status & MCI_STATUS_MISCV)) @@ -506,6 +507,7 @@ bool mce_is_memory_error(struct mce *m) return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -1231,7 +1233,8 @@ static noinstr bool mce_check_crashing_cpu(void) mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } @@ -1505,7 +1508,8 @@ noinstr void do_machine_check(struct pt_regs *regs) * on Intel, Zhaoxin only. */ if (m.cpuvendor == X86_VENDOR_INTEL || - m.cpuvendor == X86_VENDOR_ZHAOXIN) + m.cpuvendor == X86_VENDOR_CENTAUR || + m.cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1932,7 +1936,8 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) mce_flags.skx_repmov_quirk = 1; } - if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + if (c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All newer Zhaoxin CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. @@ -1941,6 +1946,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) if (cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; } + mca_cfg.bios_cmci_threshold = 1; } if (cfg->monarch_timeout < 0) @@ -1985,21 +1991,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) -{ - struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; - } -} - static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); @@ -2047,9 +2038,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); - break; - case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; @@ -2066,6 +2054,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) mce_intel_feature_clear(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; @@ -2133,10 +2122,16 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) static __always_inline void exc_machine_check_user(struct pt_regs *regs) { + irqentry_state_t irq_state; + irqentry_enter_from_user_mode(regs); + irq_state = irqentry_nmi_enter(regs); + do_machine_check(regs); + irqentry_nmi_exit(regs, irq_state); + irqentry_exit_to_user_mode(regs); } @@ -2349,9 +2344,10 @@ static void vendor_disable_error_reporting(void) * controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || - boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; mce_disable_error_reporting(); diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f5323551c1a9a9aab0a87de057db19f6e1819b94..e013dd5162fcbd86e48276be6931a54d27095743 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -93,7 +93,8 @@ static int cmci_supported(int *banks) * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index bbd1dc38ea0316c9ce22c042b4d1e235a3760571..2ba4f7dd445a7586d8abc48bdc61b89359ed5216 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -37,6 +37,16 @@ #include "internal.h" +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +static LIST_HEAD(microcode_cache); + #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 @@ -121,24 +131,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) /* * Check whether there is a valid microcode container file at the beginning - * of @buf of size @buf_size. Set @early to use this function in the early path. + * of @buf of size @buf_size. */ -static bool verify_container(const u8 *buf, size_t buf_size, bool early) +static bool verify_container(const u8 *buf, size_t buf_size) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { - if (!early) - pr_debug("Truncated microcode container header.\n"); - + pr_debug("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { - if (!early) - pr_debug("Invalid magic value (0x%08x).\n", cont_magic); - + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); return false; } @@ -147,23 +153,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated CPU equivalence table at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. */ -static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +static bool verify_equivalence_table(const u8 *buf, size_t buf_size) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; - if (!verify_container(buf, buf_size, early)) + if (!verify_container(buf, buf_size)) return false; cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { - if (!early) - pr_debug("Wrong microcode container equivalence table type: %u.\n", - cont_type); - + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); return false; } @@ -172,9 +175,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { - if (!early) - pr_debug("Truncated equivalence table.\n"); - + pr_debug("Truncated equivalence table.\n"); return false; } @@ -183,22 +184,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated microcode patch section at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { - if (!early) - pr_debug("Truncated patch section.\n"); - + pr_debug("Truncated patch section.\n"); return false; } @@ -207,17 +205,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { - if (!early) - pr_debug("Invalid type field (0x%x) in container file section header.\n", - p_type); - + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { - if (!early) - pr_debug("Patch of size %u too short.\n", p_size); - + pr_debug("Patch of size %u too short.\n", p_size); return false; } @@ -269,7 +263,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * 0: success */ static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) { struct microcode_header_amd *mc_hdr; unsigned int ret; @@ -277,7 +271,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea u16 proc_id; u8 patch_fam; - if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + if (!__verify_patch_section(buf, buf_size, &sh_psize)) return -1; /* @@ -292,16 +286,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { - if (!early) - pr_debug("Patch of size %u truncated.\n", sh_psize); - + pr_debug("Patch of size %u truncated.\n", sh_psize); return -1; } ret = __verify_patch_size(family, sh_psize, buf_size); if (!ret) { - if (!early) - pr_debug("Per-family patch size mismatch.\n"); + pr_debug("Per-family patch size mismatch.\n"); return -1; } @@ -309,8 +300,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - if (!early) - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } @@ -337,7 +327,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u16 eq_id; u8 *buf; - if (!verify_equivalence_table(ucode, size, true)) + if (!verify_equivalence_table(ucode, size)) return 0; buf = ucode; @@ -364,7 +354,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); if (ret < 0) { /* * Patch verification failed, skip to the next container, if @@ -456,14 +446,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; - u32 rev, dummy, *new_rev; bool ret = false; - -#ifdef CONFIG_X86_32 - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); -#else - new_rev = &ucode_new_rev; -#endif + u32 rev, dummy; desc.cpuid_1_eax = cpuid_1_eax; @@ -484,8 +468,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) return ret; if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - ret = true; + ucode_new_rev = mc->hdr.patch_id; + ret = true; } return ret; @@ -493,15 +477,18 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), - "amd-ucode/microcode_amd_fam%.2xh.bin", family); + "amd-ucode/microcode_amd_fam%02hhxh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -512,36 +499,23 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) return false; } -static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) +static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { - struct ucode_cpu_info *uci; struct cpio_data cp; - const char *path; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - uci = ucode_cpu_info; - path = ucode_path; - use_pa = false; - } if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd(path, use_pa); - - /* Needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_1_eax; + cp = find_microcode_in_initrd(ucode_path); *ret = cp; } -static void apply_ucode_from_containers(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; + /* Needed in load_microcode_amd() */ + ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; @@ -549,20 +523,21 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } -void load_ucode_amd_early(unsigned int cpuid_1_eax) -{ - return apply_ucode_from_containers(cpuid_1_eax); -} - static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); -int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) +static int __init save_microcode_in_initrd(void) { + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; - cp = find_microcode_in_initrd(ucode_path, false); + if (dis_ucode_ldr || ((c->x86_vendor != X86_VENDOR_AMD || + c->x86 < 0x10) && (c->x86_vendor != X86_VENDOR_HYGON))) + return 0; + + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; @@ -578,6 +553,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return 0; } +early_initcall(save_microcode_in_initrd); /* * a small, trivial cache of per-family ucode patches @@ -631,7 +607,6 @@ static struct ucode_patch *find_patch(unsigned int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u16 equiv_id; - equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; @@ -733,12 +708,20 @@ static enum ucode_state apply_microcode_amd(int cpu) return ret; } +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu = smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; const u32 *hdr; - if (!verify_equivalence_table(buf, buf_size, false)) + if (!verify_equivalence_table(buf, buf_size)) return 0; hdr = (const u32 *)buf; @@ -784,7 +767,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size, false); + ret = verify_patch(family, fw, leftover, patch_size); if (ret) return ret; @@ -904,13 +887,20 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (force_minrev) + return UCODE_NFOUND; + + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -918,7 +908,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) } ret = UCODE_ERROR; - if (!verify_container(fw->data, fw->size, false)) + if (!verify_container(fw->data, fw->size)) goto fw_release; ret = load_microcode_amd(c->x86, fw->data, fw->size); @@ -938,10 +928,11 @@ static void microcode_fini_cpu_amd(int cpu) } static struct microcode_ops microcode_amd_ops = { - .request_microcode_fw = request_microcode_amd, - .collect_cpu_info = collect_cpu_info_amd, - .apply_microcode = apply_microcode_amd, - .microcode_fini_cpu = microcode_fini_cpu_amd, + .request_microcode_fw = request_microcode_amd, + .collect_cpu_info = collect_cpu_info_amd, + .apply_microcode = apply_microcode_amd, + .microcode_fini_cpu = microcode_fini_cpu_amd, + .nmi_safe = true, }; struct microcode_ops * __init init_amd_microcode(void) @@ -960,6 +951,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a4ebd5e0ae82874390d05e5336a402db676770e1..7196ad323c4b28659f6e3042cf32360e4cc07430 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include #include #include @@ -41,12 +43,15 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_CPU_SUP_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; -static bool dis_ucode_ldr = true; - -bool initrd_gone; +#endif +bool dis_ucode_ldr = true; -LIST_HEAD(microcode_cache); +bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); +module_param(force_minrev, bool, S_IRUSR | S_IWUSR); /* * Synchronization. @@ -90,10 +95,7 @@ static bool amd_check_current_patch_level(void) native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); - if (IS_ENABLED(CONFIG_X86_32)) - levels = (u32 *)__pa_nodebug(&final_levels); - else - levels = final_levels; + levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) @@ -105,17 +107,8 @@ static bool amd_check_current_patch_level(void) static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; - -#ifdef CONFIG_X86_32 - const char *cmdline = (const char *)__pa_nodebug(boot_command_line); - const char *option = (const char *)__pa_nodebug(__dis_opt_str); - bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); - -#else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; - bool *res = &dis_ucode_ldr; -#endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not @@ -123,17 +116,18 @@ static bool __init check_loader_disabled_bsp(void) * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) - return *res; + return true; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) - return *res; + return true; } if (cmdline_find_option_bool(cmdline, option) <= 0) - *res = false; + dis_ucode_ldr = false; - return *res; + return dis_ucode_ldr; } void __init load_ucode_bsp(void) @@ -158,6 +152,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -168,23 +166,14 @@ void __init load_ucode_bsp(void) if (intel) load_ucode_intel_bsp(); else - load_ucode_amd_early(cpuid_1_eax); -} - -static bool check_loader_disabled_ap(void) -{ -#ifdef CONFIG_X86_32 - return *((bool *)__pa_nodebug(&dis_ucode_ldr)); -#else - return dis_ucode_ldr; -#endif + load_ucode_amd_bsp(cpuid_1_eax); } void load_ucode_ap(void) { unsigned int cpuid_1_eax; - if (check_loader_disabled_ap()) + if (dis_ucode_ldr) return; cpuid_1_eax = native_cpuid_eax(1); @@ -196,103 +185,47 @@ void load_ucode_ap(void) break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) - load_ucode_amd_early(cpuid_1_eax); - break; - default: - break; - } -} - -static int __init save_microcode_in_initrd(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - int ret = -EINVAL; - - if (dis_ucode_ldr) { - ret = 0; - goto out; - } - - switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - if (c->x86 >= 6) - ret = save_microcode_in_initrd_intel(); + load_ucode_amd_ap(cpuid_1_eax); break; - case X86_VENDOR_AMD: - if (c->x86 >= 0x10) - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + case X86_VENDOR_HYGON: + load_ucode_amd_ap(cpuid_1_eax); break; default: break; } - -out: - initrd_gone = true; - - return ret; } -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) +struct cpio_data __init find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 - struct boot_params *params; - - if (use_pa) - params = (struct boot_params *)__pa_nodebug(&boot_params); - else - params = &boot_params; - - size = params->hdr.ramdisk_size; - - /* - * Set start only if we have an initrd image. We cannot use initrd_start - * because it is not set that early yet. - */ + size = boot_params.hdr.ramdisk_size; + /* Early load on BSP has a temporary mapping. */ if (size) - start = params->hdr.ramdisk_image; + start = initrd_start_early; -# else /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; - start += PAGE_OFFSET; } -# endif +#endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. - * - * initrd_gone is for the hotplug case where we've thrown out initrd - * already. */ - if (!use_pa) { - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; - if (initrd_start) - start = initrd_start; - } else { - /* - * The picture with physical addresses is a bit different: we - * need to get the *physical* address to which the ramdisk was - * relocated, i.e., relocated_ramdisk (not initrd_start) and - * since we're running from physical addresses, we need to access - * relocated_ramdisk through its *physical* address too. - */ - u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); - if (*rr) - start = *rr; - } + if (initrd_start) + start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ @@ -316,6 +249,9 @@ static void reload_early_microcode(unsigned int cpu) if (family >= 0x10) reload_ucode_amd(cpu); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(cpu); + break; default: break; } @@ -336,117 +272,298 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ -#define SPINUNIT 100 /* 100 nsec */ +enum sibling_ctrl { + /* Spinwait with timeout */ + SCTRL_WAIT, + /* Invoke the microcode_apply() callback */ + SCTRL_APPLY, + /* Proceed without invoking the microcode_apply() callback */ + SCTRL_DONE, +}; + +struct microcode_ctrl { + enum sibling_ctrl ctrl; + enum ucode_state result; + unsigned int ctrl_cpu; + bool nmi_enabled; +}; -static int check_online_cpus(void) +DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static atomic_t late_cpus_in, offline_in_nmi; +static unsigned int loops_per_usec; +static cpumask_t cpu_offline_mask; + +static noinstr bool wait_for_cpus(atomic_t *cnt) { - unsigned int cpu; + unsigned int timeout, loops; - /* - * Make sure all CPUs are online. It's fine for SMT to be disabled if - * all the primary threads are still online. - */ - for_each_present_cpu(cpu) { - if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { - pr_err("Not all CPUs online, aborting microcode update.\n"); - return -EINVAL; + WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (!raw_atomic_read(cnt)) + return true; + + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); + + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); } } - - return 0; + /* Prevent the late comers from making progress and let them time out */ + raw_atomic_inc(cnt); + return false; } -static atomic_t late_cpus_in; -static atomic_t late_cpus_out; - -static int __wait_for_cpus(atomic_t *t, long long timeout) +static noinstr bool wait_for_ctrl(void) { - int all_cpus = num_online_cpus(); - - atomic_inc(t); + unsigned int timeout, loops; - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) { - pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", - all_cpus - atomic_read(t)); - return 1; - } + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; - ndelay(SPINUNIT); - timeout -= SPINUNIT; + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); - touch_nmi_watchdog(); + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + } } - return 0; + return false; } /* - * Returns: - * < 0 - on error - * 0 - success (no update done or microcode was updated) + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. */ -static int __reload_late(void *info) +static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) { - int cpu = smp_processor_id(); - enum ucode_state err; - int ret = 0; + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return false; + } /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) - return -1; + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (wait_for_ctrl()) + return true; + + instrumentation_begin(); + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + instrumentation_end(); +} +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + if (!load_secondary_wait(ctrl_cpu)) { + instrumentation_begin(); + pr_err_once("load: %d CPUs timed out\n", + atomic_read(&late_cpus_in) - 1); + instrumentation_end(); + return; + } + + /* Primary thread completed. Allow to invoke instrumentable code */ + instrumentation_begin(); /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - err = microcode_ops->apply_microcode(cpu); + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); else - goto wait_for_siblings; + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); - if (err >= UCODE_NFOUND) { - if (err == UCODE_ERROR) { - pr_warn("Error reloading microcode on CPU %d\n", cpu); - ret = -1; - } + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + instrumentation_end(); +} + +static void __load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; } -wait_for_siblings: - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) - panic("Timeout during microcode update!\n"); + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - err = microcode_ops->apply_microcode(cpu); + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; + else + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + +static bool kick_offline_cpus(unsigned int nr_offl) +{ + unsigned int cpu, timeout; + + for_each_cpu(cpu, &cpu_offline_mask) { + /* Enable the rendezvous handler and send NMI */ + per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; + apic_send_nmi_to_offline_cpu(cpu); + } - return ret; + /* Wait for them to arrive */ + for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { + if (atomic_read(&offline_in_nmi) == nr_offl) + return true; + udelay(1); + } + /* Let the others time out */ + return false; +} + +static void release_offline_cpus(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, &cpu_offline_mask) + per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; +} + +static void load_primary(unsigned int cpu) +{ + unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); + bool proceed = true; + + /* Kick soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + proceed = kick_offline_cpus(nr_offl); + + /* If the soft-offlined CPUs did not respond, abort */ + if (proceed) + __load_primary(cpu); + + /* Unconditionally release soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + release_offline_cpus(); } /* - * Reload microcode late on all CPUs. Wait for a sec until they - * all gather together. + * Minimal stub rendezvous handler for soft-offlined CPUs which participate + * in the NMI rendezvous to protect against a concurrent NMI on affected + * CPUs. */ -static int microcode_reload_late(void) +void noinstr microcode_offline_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return; + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); + raw_atomic_inc(&offline_in_nmi); + wait_for_ctrl(); +} + +static noinstr bool microcode_update_handler(void) { - int old = boot_cpu_data.microcode, ret; + unsigned int cpu = raw_smp_processor_id(); + + if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { + instrumentation_begin(); + load_primary(cpu); + instrumentation_end(); + } else { + load_secondary(cpu); + } + + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + + return true; +} + +/* + * Protection against instrumentation is required for CPUs which are not + * safe against an NMI which is delivered to the secondary SMT sibling + * while the primary thread updates the microcode. Instrumentation can end + * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI + * which is the opposite of what the NMI rendezvous is trying to achieve. + * + * The primary thread is safe versus instrumentation as the actual + * microcode update handles this correctly. It's only the sibling code + * path which must be NMI safe until the primary thread completed the + * update. + */ +bool noinstr microcode_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return false; + + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + return microcode_update_handler(); +} + +static int load_cpus_stopped(void *unused) +{ + if (microcode_ops->use_nmi) { + /* Enable the NMI handler and raise NMI */ + this_cpu_write(ucode_ctrl.nmi_enabled, true); + apic->send_IPI(smp_processor_id(), NMI_VECTOR); + } else { + /* Just invoke the handler directly */ + microcode_update_handler(); + } + return 0; +} + +static int load_late_stop_cpus(bool is_safe) +{ + unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + unsigned int nr_offl, offline = 0; + int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; - pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); - pr_err("You should switch to early loading, if possible.\n"); + if (!is_safe) { + pr_err("Late microcode loading without minimal revision check.\n"); + pr_err("You should switch to early loading, if possible.\n"); + } - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); + atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&offline_in_nmi, 0); + loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* * Take a snapshot before the microcode update in order to compare and @@ -454,52 +571,162 @@ static int microcode_reload_late(void) */ store_cpu_caps(&prev_info); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); - if (!ret) { - pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", - old, boot_cpu_data.microcode); - microcode_check(&prev_info); - } else { - pr_info("Reload failed, current microcode revision: 0x%x\n", - boot_cpu_data.microcode); + if (microcode_ops->use_nmi) + static_branch_enable_cpuslocked(µcode_nmi_handler_enable); + + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + + if (microcode_ops->use_nmi) + static_branch_disable_cpuslocked(µcode_nmi_handler_enable); + + /* Analyze the results */ + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + switch (per_cpu(ucode_ctrl.result, cpu)) { + case UCODE_UPDATED: updated++; break; + case UCODE_TIMEOUT: timedout++; break; + case UCODE_OK: siblings++; break; + case UCODE_OFFLINE: offline++; break; + default: failed++; break; + } + } + + if (microcode_ops->finalize_late_load) + microcode_ops->finalize_late_load(!updated); + + if (!updated) { + /* Nothing changed. */ + if (!failed && !timedout) + return 0; + + nr_offl = cpumask_weight(&cpu_offline_mask); + if (offline < nr_offl) { + pr_warn("%u offline siblings did not respond.\n", + nr_offl - atomic_read(&offline_in_nmi)); + return -EIO; + } + pr_err("update failed: %u CPUs failed %u CPUs timed out\n", + failed, timedout); + return -EIO; + } + + if (!is_safe || failed || timedout) + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); + if (failed || timedout) { + pr_err("load incomplete. %u CPUs timed out or failed\n", + num_online_cpus() - (updated + siblings)); + } + pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); + microcode_check(&prev_info); + + return updated + siblings == num_online_cpus() ? 0 : -EIO; +} + +/* + * This function does two things: + * + * 1) Ensure that all required CPUs which are present and have been booted + * once are online. + * + * To pass this check, all primary threads must be online. + * + * If the microcode load is not safe against NMI then all SMT threads + * must be online as well because they still react to NMIs when they are + * soft-offlined and parked in one of the play_dead() variants. So if a + * NMI hits while the primary thread updates the microcode the resulting + * behaviour is undefined. The default play_dead() implementation on + * modern CPUs uses MWAIT, which is also not guaranteed to be safe + * against a microcode update which affects MWAIT. + * + * As soft-offlined CPUs still react on NMIs, the SMT sibling + * restriction can be lifted when the vendor driver signals to use NMI + * for rendezvous and the APIC provides a mechanism to send an NMI to a + * soft-offlined CPU. The soft-offlined CPUs are then able to + * participate in the rendezvous in a trivial stub handler. + * + * 2) Initialize the per CPU control structure and create a cpumask + * which contains "offline"; secondary threads, so they can be handled + * correctly by a control CPU. + */ +static bool setup_cpus(void) +{ + struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; + bool allow_smt_offline; + unsigned int cpu; + + allow_smt_offline = microcode_ops->nmi_safe || + (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); + + cpumask_clear(&cpu_offline_mask); + + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + /* + * Offline CPUs sit in one of the play_dead() functions + * with interrupts disabled, but they still react on NMIs + * and execute arbitrary code. Also MWAIT being updated + * while the offline CPU sits there is not necessarily safe + * on all CPU variants. + * + * Mark them in the offline_cpus mask which will be handled + * by CPU0 later in the update process. + * + * Ensure that the primary thread is online so that it is + * guaranteed that all cores are updated. + */ + if (!cpu_online(cpu)) { + if (topology_is_primary_thread(cpu) || !allow_smt_offline) { + pr_err("CPU %u not online, loading aborted\n", cpu); + return false; + } + cpumask_set_cpu(cpu, &cpu_offline_mask); + per_cpu(ucode_ctrl, cpu) = ctrl; + continue; + } + + /* + * Initialize the per CPU state. This is core scope for now, + * but prepared to take package or system scope into account. + */ + ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); + per_cpu(ucode_ctrl, cpu) = ctrl; } + return true; +} - return ret; +static int load_late_locked(void) +{ + if (!setup_cpus()) + return -EBUSY; + + switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { + case UCODE_NEW: + return load_late_stop_cpus(false); + case UCODE_NEW_SAFE: + return load_late_stop_cpus(true); + case UCODE_NFOUND: + return -ENOENT; + default: + return -EBADFD; + } } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - enum ucode_state tmp_ret = UCODE_OK; - int bsp = boot_cpu_data.cpu_index; unsigned long val; - ssize_t ret = 0; + ssize_t ret; ret = kstrtoul(buf, 0, &val); if (ret || val != 1) return -EINVAL; cpus_read_lock(); - - ret = check_online_cpus(); - if (ret) - goto put; - - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); - if (tmp_ret != UCODE_NEW) - goto put; - - ret = microcode_reload_late(); -put: + ret = load_late_locked(); cpus_read_unlock(); - if (ret == 0) - ret = size; - - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - - return ret; + return ret ? : size; } static DEVICE_ATTR_WO(reload); @@ -541,17 +768,6 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_init_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - - return microcode_ops->apply_microcode(cpu); -} - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -570,19 +786,18 @@ static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; -static int mc_cpu_starting(unsigned int cpu) -{ - enum ucode_state err = microcode_ops->apply_microcode(cpu); - - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); - - return err == UCODE_ERROR; -} - static int mc_cpu_online(unsigned int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct device *dev = get_cpu_device(cpu); + memset(uci, 0, sizeof(*uci)); + + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; @@ -590,33 +805,13 @@ static int mc_cpu_online(unsigned int cpu) static int mc_cpu_down_prep(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); + struct device *dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); - - /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("%s: CPU%d\n", __func__, cpu); - return 0; } -static void setup_online_cpu(struct work_struct *work) -{ - int cpu = smp_processor_id(); - enum ucode_state err; - - err = microcode_init_cpu(cpu); - if (err == UCODE_ERROR) { - pr_err("Error applying microcode on CPU%d\n", cpu); - return; - } - - mc_cpu_online(cpu); -} - static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -642,6 +837,8 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); @@ -662,14 +859,9 @@ static int __init microcode_init(void) } } - /* Do per-CPU setup */ - schedule_on_each_cpu(setup_online_cpu); - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", - mc_cpu_online, mc_cpu_down_prep); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); @@ -680,5 +872,4 @@ static int __init microcode_init(void) return error; } -fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 94dd6af9c963a88982f852bc679ac8d8d4651ae5..6024feb98d29dbba1ea45c35660fdf1b577582cf 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -32,11 +31,14 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; +#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL) + /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch; +static struct microcode_intel *ucode_patch_va __read_mostly; +static struct microcode_intel *ucode_patch_late __read_mostly; /* last level cache size per core */ -static int llc_size_per_core; +static unsigned int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -66,60 +68,52 @@ static inline unsigned int exttable_size(struct extended_sigtable *et) return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } -int intel_cpu_collect_info(struct ucode_cpu_info *uci) +void intel_collect_cpu_info(struct cpu_signature *sig) { - unsigned int val[2]; - unsigned int family, model; - struct cpu_signature csig = { 0 }; - unsigned int eax, ebx, ecx, edx; - - memset(uci, 0, sizeof(*uci)); - - eax = 0x00000001; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - csig.sig = eax; + sig->sig = cpuid_eax(1); + sig->pf = 0; + sig->rev = intel_get_microcode_revision(); - family = x86_family(eax); - model = x86_model(eax); + if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) { + unsigned int val[2]; - if (model >= 5 || family > 6) { /* get processor flags from MSR 0x17 */ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig.pf = 1 << ((val[1] >> 18) & 7); + sig->pf = 1 << ((val[1] >> 18) & 7); } +} +EXPORT_SYMBOL_GPL(intel_collect_cpu_info); - csig.rev = intel_get_microcode_revision(); - - uci->cpu_sig = csig; +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + if (s1->sig != sig2) + return false; - return 0; + /* Processor flags are either both 0 or they intersect. */ + return ((!s1->pf && !pf2) || (s1->pf & pf2)); } -EXPORT_SYMBOL_GPL(intel_cpu_collect_info); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig) { struct microcode_header_intel *mc_hdr = mc; - struct extended_sigtable *ext_hdr; struct extended_signature *ext_sig; + struct extended_sigtable *ext_hdr; int i; - if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) - return 1; + if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf)) + return true; /* Look for ext. headers: */ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) - return 0; + return false; ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; for (i = 0; i < ext_hdr->count; i++) { - if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) - return 1; + if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf)) + return true; ext_sig++; } return 0; @@ -240,264 +234,91 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) +static void update_ucode_pointer(struct microcode_intel *mc) { - struct microcode_header_intel *mc_hdr = mc; - - if (mc_hdr->rev <= new_rev) - return 0; - - return intel_find_matching_signature(mc, csig, cpf); -} - -static struct ucode_patch *memdup_patch(void *data, unsigned int size) -{ - struct ucode_patch *p; - - p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); - if (!p) - return NULL; - - p->data = kmemdup(data, size, GFP_KERNEL); - if (!p->data) { - kfree(p); - return NULL; - } - - return p; -} - -static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) -{ - struct microcode_header_intel *mc_hdr, *mc_saved_hdr; - struct ucode_patch *iter, *tmp, *p = NULL; - bool prev_found = false; - unsigned int sig, pf; - - mc_hdr = (struct microcode_header_intel *)data; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - mc_saved_hdr = (struct microcode_header_intel *)iter->data; - sig = mc_saved_hdr->sig; - pf = mc_saved_hdr->pf; - - if (intel_find_matching_signature(data, sig, pf)) { - prev_found = true; - - if (mc_hdr->rev <= mc_saved_hdr->rev) - continue; - - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer %p\n", data); - else { - list_replace(&iter->plist, &p->plist); - kfree(iter->data); - kfree(iter); - } - } - } + kvfree(ucode_patch_va); /* - * There weren't any previous patches found in the list cache; save the - * newly found. + * Save the virtual address for early loading and for eventual free + * on late loading. */ - if (!prev_found) { - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer for %p\n", data); - else - list_add_tail(&p->plist, µcode_cache); - } - - if (!p) - return; + ucode_patch_va = mc; +} - if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) - return; +static void save_microcode_patch(struct microcode_intel *patch) +{ + unsigned int size = get_totalsize(&patch->hdr); + struct microcode_intel *mc; - /* - * Save for early loading. On 32-bit, that needs to be a physical - * address as the APs are running from physical addresses, before - * paging has been enabled. - */ - if (IS_ENABLED(CONFIG_X86_32)) - intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); + mc = kvmemdup(patch, size, GFP_KERNEL); + if (mc) + update_ucode_pointer(mc); else - intel_ucode_patch = p->data; + pr_err("Unable to allocate microcode memory size: %u\n", size); } -/* - * Get microcode matching with BSP's model. Only CPUs with the same model as - * BSP can stay in the platform. - */ -static struct microcode_intel * -scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static __init struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, + bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; + u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; - while (size) { - if (size < sizeof(struct microcode_header_intel)) - break; - + for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) { mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); - if (!mc_size || - mc_size > size || + if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - size -= mc_size; - - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { - data += mc_size; + if (!intel_find_matching_signature(data, &uci->cpu_sig)) continue; - } + /* + * For saving the early microcode, find the matching revision which + * was loaded on the BSP. + * + * On the BSP during early boot, find a newer revision than + * actually loaded in the CPU. + */ if (save) { - save_microcode_patch(uci, data, mc_size); - goto next; - } - - - if (!patch) { - if (!has_newer_microcode(data, - uci->cpu_sig.sig, - uci->cpu_sig.pf, - uci->cpu_sig.rev)) - goto next; - - } else { - struct microcode_header_intel *phdr = &patch->hdr; - - if (!has_newer_microcode(data, - phdr->sig, - phdr->pf, - phdr->rev)) - goto next; + if (cur_rev != mc_header->rev) + continue; + } else if (cur_rev >= mc_header->rev) { + continue; } - /* We have a newer patch, save it. */ patch = data; - -next: - data += mc_size; - } - - if (size) - return NULL; - - return patch; -} - -static bool load_builtin_intel_microcode(struct cpio_data *cp) -{ - unsigned int eax = 1, ebx, ecx = 0, edx; - struct firmware fw; - char name[30]; - - if (IS_ENABLED(CONFIG_X86_32)) - return false; - - native_cpuid(&eax, &ebx, &ecx, &edx); - - sprintf(name, "intel-ucode/%02x-%02x-%02x", - x86_family(eax), x86_model(eax), x86_stepping(eax)); - - if (firmware_request_builtin(&fw, name)) { - cp->size = fw.size; - cp->data = (void *)fw.data; - return true; - } - - return false; -} - -static void print_ucode_info(int old_rev, int new_rev, unsigned int date) -{ - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, - new_rev, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); -} - -#ifdef CONFIG_X86_32 - -static int delay_ucode_info; -static int current_mc_date; -static int early_old_rev; - -/* - * Print early updated ucode info after printk works. This is delayed info dump. - */ -void show_ucode_info_early(void) -{ - struct ucode_cpu_info uci; - - if (delay_ucode_info) { - intel_cpu_collect_info(&uci); - print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); - delay_ucode_info = 0; + cur_rev = mc_header->rev; } -} - -/* - * At this point, we can not call printk() yet. Delay printing microcode info in - * show_ucode_info_early() until printk() works. - */ -static void print_ucode(int old_rev, int new_rev, int date) -{ - int *delay_ucode_info_p; - int *current_mc_date_p; - int *early_old_rev_p; - - delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); - current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); - early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); - - *delay_ucode_info_p = 1; - *current_mc_date_p = date; - *early_old_rev_p = old_rev; -} -#else -static inline void print_ucode(int old_rev, int new_rev, int date) -{ - print_ucode_info(old_rev, new_rev, date); + return size ? NULL : patch; } -#endif -static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) +static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, + struct microcode_intel *mc, + u32 *cur_rev) { - struct microcode_intel *mc; - u32 rev, old_rev; + u32 rev; - mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive * operation - when the other hyperthread has updated the microcode * already. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; + *cur_rev = intel_get_microcode_revision(); + if (*cur_rev >= mc->hdr.rev) { + uci->cpu_sig.rev = *cur_rev; return UCODE_OK; } - old_rev = rev; - /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. @@ -509,247 +330,182 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) - return -1; + return UCODE_ERROR; uci->cpu_sig.rev = rev; + return UCODE_UPDATED; +} - if (early) - print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); - else - print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +{ + struct microcode_intel *mc = uci->mc; + enum ucode_state ret; + u32 cur_rev, date; - return 0; + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret == UCODE_UPDATED) { + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); + } + return ret; } -int __init save_microcode_in_initrd_intel(void) +static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { - struct ucode_cpu_info uci; - struct cpio_data cp; - - /* - * initrd is going away, clear patch ptr. We will scan the microcode one - * last time before jettisoning and save a patch, if found. Then we will - * update that pointer too, with a stable patch address to use when - * resuming the cores. - */ - intel_ucode_patch = NULL; + unsigned int eax = 1, ebx, ecx = 0, edx; + struct firmware fw; + char name[30]; - if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path, false); + if (IS_ENABLED(CONFIG_X86_32)) + return false; - if (!(cp.data && cp.size)) - return 0; + native_cpuid(&eax, &ebx, &ecx, &edx); - intel_cpu_collect_info(&uci); + sprintf(name, "intel-ucode/%02x-%02x-%02x", + x86_family(eax), x86_model(eax), x86_stepping(eax)); - scan_microcode(cp.data, cp.size, &uci, true); - return 0; + if (firmware_request_builtin(&fw, name)) { + cp->size = fw.size; + cp->data = (void *)fw.data; + return true; + } + return false; } -/* - * @res_patch, output: a pointer to the patch we found. - */ -static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) +static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save) { - static const char *path; struct cpio_data cp; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - path = ucode_path; - use_pa = false; - } - /* try built-in microcode first */ if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return NULL; - intel_cpu_collect_info(uci); + intel_collect_cpu_info(&uci->cpu_sig); - return scan_microcode(cp.data, cp.size, uci, false); + return scan_microcode(cp.data, cp.size, uci, save); } -void __init load_ucode_intel_bsp(void) +/* + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. + */ +static int __init save_builtin_microcode(void) { - struct microcode_intel *patch; struct ucode_cpu_info uci; - patch = __load_ucode_intel(&uci); - if (!patch) - return; + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; - uci.mc = patch; + if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; - apply_microcode_early(&uci, true); + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) + save_microcode_patch(uci.mc); + return 0; } +early_initcall(save_builtin_microcode); -void load_ucode_intel_ap(void) +/* Load microcode on BSP from initrd or builtin blobs */ +void __init load_ucode_intel_bsp(void) { - struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; - if (IS_ENABLED(CONFIG_X86_32)) - iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); - else - iup = &intel_ucode_patch; - - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) - return; - - *iup = patch; - } - - uci.mc = *iup; - - apply_microcode_early(&uci, true); + uci.mc = get_microcode_blob(&uci, false); + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) + ucode_patch_va = UCODE_BSP_LOADED; } -static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +void load_ucode_intel_ap(void) { - struct microcode_header_intel *phdr; - struct ucode_patch *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - - phdr = (struct microcode_header_intel *)iter->data; - - if (phdr->rev <= uci->cpu_sig.rev) - continue; - - if (!intel_find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) - continue; + struct ucode_cpu_info uci; - return iter->data; - } - return NULL; + uci.mc = ucode_patch_va; + if (uci.mc) + apply_microcode_early(&uci); } +/* Reload microcode on resume */ void reload_ucode_intel(void) { - struct microcode_intel *p; - struct ucode_cpu_info uci; - - intel_cpu_collect_info(&uci); - - p = find_patch(&uci); - if (!p) - return; - - uci.mc = p; + struct ucode_cpu_info uci = { .mc = ucode_patch_va, }; - apply_microcode_early(&uci, false); + if (uci.mc) + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu_num); - unsigned int val[2]; - - memset(csig, 0, sizeof(*csig)); - - csig->sig = cpuid_eax(0x00000001); - - if ((c->x86_model >= 5) || (c->x86 > 6)) { - /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - csig->rev = c->microcode; - + intel_collect_cpu_info(csig); return 0; } -static enum ucode_state apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_late(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - struct microcode_intel *mc; + struct microcode_intel *mc = ucode_patch_late; enum ucode_state ret; - static int prev_rev; - u32 rev; + u32 cur_rev; - /* We should bind the task to the CPU */ - if (WARN_ON(raw_smp_processor_id() != cpu)) + if (WARN_ON_ONCE(smp_processor_id() != cpu)) return UCODE_ERROR; - /* Look for a newer patch in our cache: */ - mc = find_patch(uci); - if (!mc) { - mc = uci->mc; - if (!mc) - return UCODE_NFOUND; - } + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret != UCODE_UPDATED && ret != UCODE_OK) + return ret; - /* - * Save us the MSR write below - which is a particular expensive - * operation - when the other hyperthread has updated the microcode - * already. - */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - ret = UCODE_OK; - goto out; + if (!cpu && uci->cpu_sig.rev != cur_rev) { + pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n", + uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24, + (mc->hdr.date >> 16) & 0xff); } - /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. - */ - native_wbinvd(); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; - /* write microcode via MSR 0x79 */ - wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); + return ret; +} - rev = intel_get_microcode_revision(); +static bool ucode_validate_minrev(struct microcode_header_intel *mc_header) +{ + int cur_rev = boot_cpu_data.microcode; - if (rev != mc->hdr.rev) { - pr_err("CPU%d update to revision 0x%x failed\n", - cpu, mc->hdr.rev); - return UCODE_ERROR; + /* + * When late-loading, ensure the header declares a minimum revision + * required to perform a late-load. The previously reserved field + * is 0 in older microcode blobs. + */ + if (!mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n"); + return false; } - if (bsp && rev != prev_rev) { - pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", - rev, - mc->hdr.date & 0xffff, - mc->hdr.date >> 24, - (mc->hdr.date >> 16) & 0xff); - prev_rev = rev; + /* + * Check whether the current revision is either greater or equal to + * to the minimum revision specified in the header. + */ + if (cur_rev < mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev); + pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver); + return false; } - - ret = UCODE_UPDATED; - -out: - uci->cpu_sig.rev = rev; - c->microcode = rev; - - /* Update boot_cpu_data's revision too, if we're on the BSP: */ - if (bsp) - boot_cpu_data.microcode = rev; - - return ret; + return true; } -static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) +static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - unsigned int curr_mc_size = 0, new_mc_size = 0; - enum ucode_state ret = UCODE_OK; - int new_rev = uci->cpu_sig.rev; + bool is_safe, new_is_safe = false; + int cur_rev = uci->cpu_sig.rev; + unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; - unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; @@ -758,68 +514,66 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); - break; + goto fail; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); - break; + goto fail; } data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); - break; + goto fail; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - vfree(mc); - mc = vmalloc(mc_size); + kvfree(mc); + mc = kvmalloc(mc_size, GFP_KERNEL); if (!mc) - break; + goto fail; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { - break; - } + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) + goto fail; - csig = uci->cpu_sig.sig; - cpf = uci->cpu_sig.pf; - if (has_newer_microcode(mc, csig, cpf, new_rev)) { - vfree(new_mc); - new_rev = mc_header.rev; - new_mc = mc; - new_mc_size = mc_size; - mc = NULL; /* trigger new vmalloc */ - ret = UCODE_NEW; - } - } + if (cur_rev >= mc_header.rev) + continue; - vfree(mc); + if (!intel_find_matching_signature(mc, &uci->cpu_sig)) + continue; - if (iov_iter_count(iter)) { - vfree(new_mc); - return UCODE_ERROR; + is_safe = ucode_validate_minrev(&mc_header); + if (force_minrev && !is_safe) + continue; + + kvfree(new_mc); + cur_rev = mc_header.rev; + new_mc = mc; + new_is_safe = is_safe; + mc = NULL; } + if (iov_iter_count(iter)) + goto fail; + + kvfree(mc); if (!new_mc) return UCODE_NFOUND; - vfree(uci->mc); - uci->mc = (struct microcode_intel *)new_mc; - - /* Save for CPU hotplug */ - save_microcode_patch(uci, new_mc, new_mc_size); + ucode_patch_late = (struct microcode_intel *)new_mc; + return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW; - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); - - return ret; +fail: + kvfree(mc); + kvfree(new_mc); + return UCODE_ERROR; } static bool is_blacklisted(unsigned int cpu) @@ -868,26 +622,36 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); - ret = generic_load_microcode(cpu, &iter); + ret = parse_microcode_blobs(cpu, &iter); release_firmware(firmware); return ret; } +static void finalize_late_load(int result) +{ + if (!result) + update_ucode_pointer(ucode_patch_late); + else + kvfree(ucode_patch_late); + ucode_patch_late = NULL; +} + static struct microcode_ops microcode_intel_ops = { - .request_microcode_fw = request_microcode_fw, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode_late, + .finalize_late_load = finalize_late_load, + .use_nmi = IS_ENABLED(CONFIG_X86_64), }; -static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); - - return (int)llc_size; + llc_size_per_core = (unsigned int)llc_size; } struct microcode_ops * __init init_intel_microcode(void) @@ -900,7 +664,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } - llc_size_per_core = calc_llc_size_per_core(c); + calc_llc_size_per_core(c); return µcode_intel_ops; } diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index bf883aa712330a2d38cbd21f7d43bd2f8263fb76..980ef806b377b9281d132e4f539ca2a08f8ee902 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -8,43 +8,37 @@ #include #include -struct ucode_patch { - struct list_head plist; - void *data; /* Intel uses only this one */ - unsigned int size; - u32 patch_id; - u16 equiv_cpu; -}; - -extern struct list_head microcode_cache; - struct device; enum ucode_state { UCODE_OK = 0, UCODE_NEW, + UCODE_NEW_SAFE, UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, + UCODE_TIMEOUT, + UCODE_OFFLINE, }; struct microcode_ops { enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); - void (*microcode_fini_cpu)(int cpu); /* - * The generic 'microcode_core' part guarantees that - * the callbacks below run on a target cpu when they - * are being called. + * The generic 'microcode_core' part guarantees that the callbacks + * below run on a target CPU when they are being called. * See also the "Synchronization" section in microcode_core.c. */ - enum ucode_state (*apply_microcode)(int cpu); - int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + enum ucode_state (*apply_microcode)(int cpu); + int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); + unsigned int nmi_safe : 1, + use_nmi : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); +struct cpio_data find_microcode_in_initrd(const char *path); #define MAX_UCODE_COUNT 128 @@ -55,6 +49,9 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -81,6 +78,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -94,12 +94,12 @@ static inline unsigned int x86_cpuid_family(void) return x86_family(eax); } -extern bool initrd_gone; +extern bool dis_ucode_ldr; +extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_ap(unsigned int family); -void load_ucode_amd_early(unsigned int cpuid_1_eax); int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); @@ -107,23 +107,26 @@ void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline void load_ucode_amd_early(unsigned int family) { } static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } #endif /* !CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops *init_hygon_microcode(void); +#else /* CONFIG_CPU_SUP_HYGON */ +static const inline struct microcode_ops *init_hygon_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_HYGON */ + #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -int save_microcode_in_initrd_intel(void); void reload_ucode_intel(void); struct microcode_ops *init_intel_microcode(void); #else /* CONFIG_CPU_SUP_INTEL */ static inline void load_ucode_intel_bsp(void) { } static inline void load_ucode_intel_ap(void) { } -static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; } static inline void reload_ucode_intel(void) { } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } #endif /* !CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 31c0e68f6227292cc5b217d68646ff2e26f3889a..2941134c47da66ebc7fdd0eef96a89d6dea43140 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -17,14 +17,22 @@ extern const char * const x86_vmx_flags[NVMXINTS*32]; * Get CPU information for use by the procfs. */ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) + unsigned int cpu, unsigned int index, + bool rich_container, unsigned int total) { #ifdef CONFIG_SMP - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpumask_weight(topology_core_cpumask(cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + if (rich_container) { + seq_puts(m, "physical id\t: 0\n"); + seq_printf(m, "siblings\t: %d\n", total); + seq_printf(m, "core id\t\t: %d\n", index); + seq_printf(m, "cpu cores\t: %d\n", total); + } else { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } seq_printf(m, "apicid\t\t: %d\n", c->apicid); seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); #endif @@ -63,16 +71,20 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu; + unsigned int cpu, index, total; int i; + bool rich_container = false; + + index = cpu = c->cpu_index; + if (check_rich_container(cpu, &index, &rich_container, &total)) + return 0; - cpu = c->cpu_index; seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %u\n" "model name\t: %s\n", - cpu, + index, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, c->x86_model, @@ -95,13 +107,19 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size) seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); - show_cpuinfo_core(m, c, cpu); + show_cpuinfo_core(m, c, cpu, index, rich_container, total); show_cpuinfo_misc(m, c); seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) { + if (c->x86_vendor == X86_VENDOR_HYGON) + seq_printf(m, " %s", i == X86_FEATURE_SEV ? "csv" : + (i == X86_FEATURE_SEV_ES ? "csv2" : + x86_cap_flags[i])); + else + seq_printf(m, " %s", x86_cap_flags[i]); + } #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 4a06c37b9cf11bef1b00b546e3c2344691e6e18a..0c13b0befd8a9b76fe4753b8ee230113e5f3bf54 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 10830995eadab63656c1e90b9ae9f6f5a7f34d87..51389ebc0f19174deb947d8370d133e23e3e1006 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "resctrl: " fmt +#include #include #include #include @@ -25,8 +26,15 @@ #include #include "internal.h" -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); +/* + * rdt_domain structures are kfree()d when their last CPU goes offline, + * and allocated when the first CPU in a new domain comes online. + * The rdt_resource's domain list is updated when this happens. Readers of + * the domain list must either take cpus_read_lock(), or rely on an RCU + * read-side critical section, to avoid observing concurrent modification. + * All writers take this mutex: + */ +static DEFINE_MUTEX(domain_list_lock); /* * The cached resctrl_pqr_state is strictly per CPU and can never be @@ -36,12 +44,6 @@ DEFINE_MUTEX(rdtgroup_mutex); */ DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); -/* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format - */ -int max_name_width, max_data_width; - /* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled. @@ -67,7 +69,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L3", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_L3), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -81,7 +82,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L2", .cache_level = 2, .domains = domain_init(RDT_RESOURCE_L2), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -95,7 +95,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "MB", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_MBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -107,13 +106,20 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "SMBA", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_SMBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, }, }; +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &rdt_resources_all[l].r_resctrl; +} + /* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. @@ -136,15 +142,15 @@ static inline void cache_alloc_hsw_probe(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl; - u32 l, h, max_cbm = BIT_MASK(20) - 1; + u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; - if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return; - rdmsr(MSR_IA32_L3_CBM_BASE, l, h); + rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); /* If all the bits were set in MSR, return success */ - if (l != max_cbm) + if (l3_cbm_0 != max_cbm) return; hw_res->num_closid = 4; @@ -152,26 +158,12 @@ static inline void cache_alloc_hsw_probe(void) r->cache.cbm_len = 20; r->cache.shareable_bits = 0xc0000; r->cache.min_cbm_bits = 2; + r->cache.arch_has_sparse_bitmasks = false; r->alloc_capable = true; rdt_alloc_capable = true; } -bool is_mba_sc(struct rdt_resource *r) -{ - if (!r) - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; - - /* - * The software controller support is only applicable to MBA resource. - * Make sure to check for resource type. - */ - if (r->rid != RDT_RESOURCE_MBA) - return false; - - return r->membw.mba_sc; -} - /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. @@ -220,7 +212,6 @@ static __init bool __get_mem_config_intel(struct rdt_resource *r) r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; else r->membw.throttle_mode = THREAD_THROTTLE_MAX; - thread_throttle_mode_init(); r->alloc_capable = true; @@ -265,15 +256,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; + union cpuid_0x10_x_ecx ecx; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx; - cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); + cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->cache.cbm_len = eax.split.cbm_len + 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->cache.shareable_bits = ebx & r->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; r->alloc_capable = true; } @@ -297,6 +291,11 @@ static void rdt_get_cdp_l2_config(void) rdt_get_cdp_config(RDT_RESOURCE_L2); } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) +{ + return rdt_resources_all[l].cdp_enabled; +} + static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { @@ -346,19 +345,6 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) -{ - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - /* Find the domain that contains this CPU */ - if (cpumask_test_cpu(cpu, &d->cpu_mask)) - return d; - } - - return NULL; -} - u32 resctrl_arch_get_num_closid(struct rdt_resource *r) { return resctrl_to_arch_res(r)->num_closid; @@ -372,7 +358,7 @@ void rdt_ctrl_update(void *arg) int cpu = smp_processor_id(); struct rdt_domain *d; - d = get_domain_from_cpu(cpu, r); + d = resctrl_get_domain_from_cpu(cpu, r); if (d) { hw_res->msr_update(d, m, r); return; @@ -389,8 +375,8 @@ void rdt_ctrl_update(void *arg) * caller, return the first domain whose id is bigger than the input id. * The domain list is sorted by id in ascending order. */ -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos) +static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, + struct list_head **pos) { struct rdt_domain *d; struct list_head *l; @@ -414,6 +400,11 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + return rdt_find_domain(r, id, NULL); +} + static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); @@ -466,13 +457,13 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) { size_t tsize; - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM; } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_local); hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) { @@ -506,6 +497,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) struct rdt_domain *d; int err; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -539,11 +532,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) return; } - list_add_tail(&d->list, add_pos); + list_add_tail_rcu(&d->list, add_pos); err = resctrl_online_domain(r, d); if (err) { - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); domain_free(hw_dom); } } @@ -554,6 +548,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) struct rdt_hw_domain *hw_dom; struct rdt_domain *d; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -564,7 +560,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { resctrl_offline_domain(r, d); - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); /* * rdt_domain "d" is going to be freed below, so clear @@ -576,91 +573,51 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) return; } - - if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0); - } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(r, d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0); - } - } } static void clear_closid_rmid(int cpu) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - state->default_closid = 0; - state->default_rmid = 0; - state->cur_closid = 0; - state->cur_rmid = 0; - wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); + state->default_closid = RESCTRL_RESERVED_CLOSID; + state->default_rmid = RESCTRL_RESERVED_RMID; + state->cur_closid = RESCTRL_RESERVED_CLOSID; + state->cur_rmid = RESCTRL_RESERVED_RMID; + wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID, + RESCTRL_RESERVED_CLOSID); } -static int resctrl_online_cpu(unsigned int cpu) +static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - /* The cpu is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); + resctrl_online_cpu(cpu); return 0; } -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +static int resctrl_arch_offline_cpu(unsigned int cpu) { - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { - break; - } - } -} - -static int resctrl_offline_cpu(unsigned int cpu) -{ - struct rdtgroup *rdtgrp; struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + resctrl_offline_cpu(cpu); + + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } -/* - * Choose a width for the resource name and resource data based on the - * resource that has widest name and cbm. - */ -static __init void rdt_init_padding(void) -{ - struct rdt_resource *r; - - for_each_alloc_capable_rdt_resource(r) { - if (r->data_width > max_data_width) - max_data_width = r->data_width; - } -} - enum { RDT_FLAG_CMT, RDT_FLAG_MBM_TOTAL, @@ -686,7 +643,7 @@ struct rdt_options { bool force_off, force_on; }; -static struct rdt_options rdt_options[] __initdata = { +static struct rdt_options rdt_options[] __ro_after_init = { RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), @@ -726,7 +683,7 @@ static int __init set_rdt_options(char *str) } __setup("rdt", set_rdt_options); -bool __init rdt_cpu_has(int flag) +bool rdt_cpu_has(int flag) { bool ret = boot_cpu_has(flag); struct rdt_options *o; @@ -746,6 +703,21 @@ bool __init rdt_cpu_has(int flag) return ret; } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + if (!rdt_cpu_has(X86_FEATURE_BMEC)) + return false; + + switch (evt) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL); + case QOS_L3_MBM_LOCAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL); + default: + return false; + } +} + static __init bool get_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; @@ -755,7 +727,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -870,7 +843,6 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -890,7 +862,7 @@ static __init void rdt_init_res_defs_amd(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = true; + r->cache.arch_has_sparse_bitmasks = true; r->cache.arch_has_per_cpu_cfg = true; r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -907,7 +879,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -938,12 +911,14 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } -static int __init resctrl_late_init(void) +static int __init resctrl_arch_late_init(void) { struct rdt_resource *r; int state, ret; @@ -959,15 +934,14 @@ static int __init resctrl_late_init(void) if (!get_rdt_resources()) return -ENODEV; - rdt_init_padding(); - state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, + resctrl_arch_offline_cpu); if (state < 0) return state; - ret = rdtgroup_init(); + ret = resctrl_init(); if (ret) { cpuhp_remove_state(state); return ret; @@ -983,12 +957,13 @@ static int __init resctrl_late_init(void) return 0; } -late_initcall(resctrl_late_init); +late_initcall(resctrl_arch_late_init); -static void __exit resctrl_exit(void) +static void __exit resctrl_arch_exit(void) { cpuhp_remove_state(rdt_online); - rdtgroup_exit(); + + resctrl_exit(); } -__exitcall(resctrl_exit); +__exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index a701e7921ea5c748ba3c118d3d39b5f3a5ced52b..c5c3eaea27b65be9f729863b9db4f5ecf933c940 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -19,256 +19,9 @@ #include #include #include -#include "internal.h" - -/* - * Check whether MBA bandwidth percentage value is correct. The value is - * checked against the minimum and max bandwidth values specified by the - * hardware. The allocated bandwidth percentage is rounded to the next - * control step available on the hardware. - */ -static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - int ret; - u32 bw; - - /* - * Only linear delay values is supported for current Intel SKUs. - */ - if (!r->membw.delay_linear && r->membw.arch_needs_linear) { - rdt_last_cmd_puts("No support for non-linear MB domains\n"); - return false; - } - - ret = kstrtou32(buf, 10, &bw); - if (ret) { - rdt_last_cmd_printf("Invalid MB value %s\n", buf); - return false; - } - - /* Nothing else to do if software controller is enabled. */ - if (is_mba_sc(r)) { - *data = bw; - return true; - } - - if (bw < r->membw.min_bw || bw > r->default_ctrl) { - rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", - bw, r->membw.min_bw, r->default_ctrl); - return false; - } - - *data = roundup(bw, (unsigned long)r->membw.bw_gran); - return true; -} - -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct resctrl_staged_config *cfg; - u32 closid = data->rdtgrp->closid; - struct rdt_resource *r = s->res; - u32 bw_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - if (!bw_validate(data->buf, &bw_val, r)) - return -EINVAL; - - if (is_mba_sc(r)) { - d->mbps_val[closid] = bw_val; - return 0; - } - - cfg->new_ctrl = bw_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Check whether a cache bit mask is valid. - * For Intel the SDM says: - * Please note that all (and only) contiguous '1' combinations - * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). - * Additionally Haswell requires at least two bits set. - * AMD allows non-contiguous bitmasks. - */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - unsigned long first_bit, zero_bit, val; - unsigned int cbm_len = r->cache.cbm_len; - int ret; - - ret = kstrtoul(buf, 16, &val); - if (ret) { - rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); - return false; - } - - if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { - rdt_last_cmd_puts("Mask out of range\n"); - return false; - } - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Are non-contiguous bitmaps allowed? */ - if (!r->cache.arch_has_sparse_bitmaps && - (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { - rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); - return false; - } - - if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in the mask\n", - r->cache.min_cbm_bits); - return false; - } - - *data = val; - return true; -} - -/* - * Read one cache bit mask (hex). Check that it is valid for the current - * resource type. - */ -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct rdtgroup *rdtgrp = data->rdtgrp; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 cbm_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - /* - * Cannot set up more than one pseudo-locked region in a cache - * hierarchy. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); - return -EINVAL; - } - - if (!cbm_validate(data->buf, &cbm_val, r)) - return -EINVAL; - - if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_SHAREABLE) && - rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); - return -EINVAL; - } - - /* - * The CBM may not overlap with the CBM of another closid if - * either is exclusive. - */ - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_puts("Overlaps with exclusive group\n"); - return -EINVAL; - } - - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { - if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_puts("Overlaps with other group\n"); - return -EINVAL; - } - } - - cfg->new_ctrl = cbm_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * For each domain in this resource we expect to find a series of: - * id=mask - * separated by ";". The "id" is in decimal, and must match one of - * the "id"s for this resource. - */ -static int parse_line(char *line, struct resctrl_schema *s, - struct rdtgroup *rdtgrp) -{ - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - struct rdt_parse_data data; - char *dom = NULL, *id; - struct rdt_domain *d; - unsigned long dom_id; +#include - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { - rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); - return -EINVAL; - } - -next: - if (!line || line[0] == '\0') - return 0; - dom = strsep(&line, ";"); - id = strsep(&dom, "="); - if (!dom || kstrtoul(id, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); - return -EINVAL; - } - dom = strim(dom); - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - data.buf = dom; - data.rdtgrp = rdtgrp; - if (r->parse_ctrlval(&data, s, d)) - return -EINVAL; - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - cfg = &d->staged_config[t]; - /* - * In pseudo-locking setup mode and just - * parsed a valid CBM that should be - * pseudo-locked. Only one locked region per - * resource group and domain so just do - * the required initialization for single - * region and return. - */ - rdtgrp->plr->s = s; - rdtgrp->plr->d = d; - rdtgrp->plr->cbm = cfg->new_ctrl; - d->plr = rdtgrp->plr; - return 0; - } - goto next; - } - } - return -EINVAL; -} - -static u32 get_config_index(u32 closid, enum resctrl_conf_type type) -{ - switch (type) { - default: - case CDP_NONE: - return closid; - case CDP_CODE: - return closid * 2 + 1; - case CDP_DATA: - return closid * 2; - } -} +#include "internal.h" static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, @@ -291,7 +44,7 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, t); + u32 idx = resctrl_get_config_index(closid, t); struct msr_param msr_param; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) @@ -317,6 +70,9 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) struct rdt_domain *d; u32 idx; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -328,7 +84,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) if (!cfg->have_new_ctrl) continue; - idx = get_config_index(closid, t); + idx = resctrl_get_config_index(closid, t); if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; @@ -355,232 +111,11 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) return 0; } -static int rdtgroup_parse_resource(char *resname, char *tok, - struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - - list_for_each_entry(s, &resctrl_schema_all, list) { - if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) - return parse_line(tok, s, rdtgrp); - } - rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); - return -EINVAL; -} - -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct resctrl_schema *s; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - char *tok, *resname; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - cpus_read_lock(); - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); - return -ENOENT; - } - rdt_last_cmd_clear(); - - /* - * No changes to pseudo-locked region allowed. It has to be removed - * and re-created instead. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = -EINVAL; - rdt_last_cmd_puts("Resource group is pseudo-locked\n"); - goto out; - } - - rdt_staged_configs_clear(); - - while ((tok = strsep(&buf, "\n")) != NULL) { - resname = strim(strsep(&tok, ":")); - if (!tok) { - rdt_last_cmd_puts("Missing ':'\n"); - ret = -EINVAL; - goto out; - } - if (tok[0] == '\0') { - rdt_last_cmd_printf("Missing '%s' value\n", resname); - ret = -EINVAL; - goto out; - } - ret = rdtgroup_parse_resource(resname, tok, rdtgrp); - if (ret) - goto out; - } - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - - /* - * Writes to mba_sc resources update the software controller, - * not the control MSR. - */ - if (is_mba_sc(r)) - continue; - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret) - goto out; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * If pseudo-locking fails we keep the resource group in - * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service - * active and updated for just the domain the pseudo-locked - * region was requested for. - */ - ret = rdtgroup_pseudo_lock_create(rdtgrp); - } - -out: - rdt_staged_configs_clear(); - rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); - return ret ?: nbytes; -} - u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, type); + u32 idx = resctrl_get_config_index(closid, type); return hw_dom->ctrl_val[idx]; } - -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) -{ - struct rdt_resource *r = schema->res; - struct rdt_domain *dom; - bool sep = false; - u32 ctrl_val; - - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - if (is_mba_sc(r)) - ctrl_val = dom->mbps_val[closid]; - else - ctrl_val = resctrl_arch_get_config(r, dom, closid, - schema->conf_type); - - seq_printf(s, r->format_str, dom->id, max_data_width, - ctrl_val); - sep = true; - } - seq_puts(s, "\n"); -} - -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - struct rdtgroup *rdtgrp; - int ret = 0; - u32 closid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - list_for_each_entry(schema, &resctrl_schema_all, list) { - seq_printf(s, "%s:uninitialized\n", schema->name); - } - } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%s:%d=%x\n", - rdtgrp->plr->s->res->name, - rdtgrp->plr->d->id, - rdtgrp->plr->cbm); - } - } else { - closid = rdtgrp->closid; - list_for_each_entry(schema, &resctrl_schema_all, list) { - if (closid < schema->num_closid) - show_doms(s, schema, closid); - } - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); - return ret; -} - -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first) -{ - /* - * setup the parameters to send to the IPI to read the data. - */ - rr->rgrp = rdtgrp; - rr->evtid = evtid; - rr->r = r; - rr->d = d; - rr->val = 0; - rr->first = first; - - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); -} - -int rdtgroup_mondata_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - u32 resid, evtid, domid; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - union mon_data_bits md; - struct rdt_domain *d; - struct rmid_read rr; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto out; - } - - md.priv = of->kn->priv; - resid = md.u.rid; - domid = md.u.domid; - evtid = md.u.evtid; - - r = &rdt_resources_all[resid].r_resctrl; - d = rdt_find_domain(r, domid, NULL); - if (IS_ERR_OR_NULL(d)) { - ret = -ENOENT; - goto out; - } - - mon_event_read(&rr, r, d, rdtgrp, evtid, false); - - if (rr.err == -EIO) - seq_puts(m, "Error\n"); - else if (rr.err == -EINVAL) - seq_puts(m, "Unavailable\n"); - else - seq_printf(m, "%llu\n", rr.val); - -out: - rdtgroup_kn_unlock(of->kn); - return ret; -} diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 566386abb877f9673d66830559ef9cd32c499a85..bf35389926677a29355795f6511e038a7da066f4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -7,17 +7,14 @@ #include #include #include +#include + +#include #define L3_QOS_CDP_ENABLE 0x01ULL #define L2_QOS_CDP_ENABLE 0x01ULL -#define CQM_LIMBOCHECK_INTERVAL 1000 - -#define MBM_CNTR_WIDTH_BASE 24 -#define MBM_OVERFLOW_INTERVAL 1000 -#define MAX_MBA_BW 100u -#define MBA_IS_LINEAR 0x4 #define MBM_CNTR_WIDTH_OFFSET_AMD 20 #define RMID_VAL_ERROR BIT_ULL(63) @@ -29,278 +26,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/* Reads to Local DRAM Memory */ -#define READS_TO_LOCAL_MEM BIT(0) - -/* Reads to Remote DRAM Memory */ -#define READS_TO_REMOTE_MEM BIT(1) - -/* Non-Temporal Writes to Local Memory */ -#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) - -/* Non-Temporal Writes to Remote Memory */ -#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) - -/* Reads to Local Memory the system identifies as "Slow Memory" */ -#define READS_TO_LOCAL_S_MEM BIT(4) - -/* Reads to Remote Memory the system identifies as "Slow Memory" */ -#define READS_TO_REMOTE_S_MEM BIT(5) - -/* Dirty Victims to All Types of Memory */ -#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) - -/* Max event bits supported */ -#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) - -struct rdt_fs_context { - struct kernfs_fs_context kfc; - bool enable_cdpl2; - bool enable_cdpl3; - bool enable_mba_mbps; -}; - -static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) -{ - struct kernfs_fs_context *kfc = fc->fs_private; - - return container_of(kfc, struct rdt_fs_context, kfc); -} - -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); -DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); - -/** - * struct mon_evt - Entry in the event list of a resource - * @evtid: event id - * @name: name of the event - * @configurable: true if the event is configurable - * @list: entry in &rdt_resource->evt_list - */ -struct mon_evt { - enum resctrl_event_id evtid; - char *name; - bool configurable; - struct list_head list; -}; - -/** - * union mon_data_bits - Monitoring details for each event file - * @priv: Used to store monitoring event data in @u - * as kernfs private data - * @rid: Resource id associated with the event file - * @evtid: Event id associated with the event file - * @domid: The domain to which the event file belongs - * @u: Name of the bit fields struct - */ -union mon_data_bits { - void *priv; - struct { - unsigned int rid : 10; - enum resctrl_event_id evtid : 8; - unsigned int domid : 14; - } u; -}; - -struct rmid_read { - struct rdtgroup *rgrp; - struct rdt_resource *r; - struct rdt_domain *d; - enum resctrl_event_id evtid; - bool first; - int err; - u64 val; -}; - -extern bool rdt_alloc_capable; -extern bool rdt_mon_capable; -extern unsigned int rdt_mon_features; -extern struct list_head resctrl_schema_all; - -enum rdt_group_type { - RDTCTRL_GROUP = 0, - RDTMON_GROUP, - RDT_NUM_GROUP, -}; - -/** - * enum rdtgrp_mode - Mode of a RDT resource group - * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations - * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed - * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking - * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations - * allowed AND the allocations are Cache Pseudo-Locked - * @RDT_NUM_MODES: Total number of modes - * - * The mode of a resource group enables control over the allowed overlap - * between allocations associated with different resource groups (classes - * of service). User is able to modify the mode of a resource group by - * writing to the "mode" resctrl file associated with the resource group. - * - * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by - * writing the appropriate text to the "mode" file. A resource group enters - * "pseudo-locked" mode after the schemata is written while the resource - * group is in "pseudo-locksetup" mode. - */ -enum rdtgrp_mode { - RDT_MODE_SHAREABLE = 0, - RDT_MODE_EXCLUSIVE, - RDT_MODE_PSEUDO_LOCKSETUP, - RDT_MODE_PSEUDO_LOCKED, - - /* Must be last */ - RDT_NUM_MODES, -}; - -/** - * struct mongroup - store mon group's data in resctrl fs. - * @mon_data_kn: kernfs node for the mon_data directory - * @parent: parent rdtgrp - * @crdtgrp_list: child rdtgroup node list - * @rmid: rmid for this rdtgroup - */ -struct mongroup { - struct kernfs_node *mon_data_kn; - struct rdtgroup *parent; - struct list_head crdtgrp_list; - u32 rmid; -}; - -/** - * struct pseudo_lock_region - pseudo-lock region information - * @s: Resctrl schema for the resource to which this - * pseudo-locked region belongs - * @d: RDT domain to which this pseudo-locked region - * belongs - * @cbm: bitmask of the pseudo-locked region - * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread - * completion - * @thread_done: variable used by waitqueue to test if pseudo-locking - * thread completed - * @cpu: core associated with the cache on which the setup code - * will be run - * @line_size: size of the cache lines - * @size: size of pseudo-locked region in bytes - * @kmem: the kernel memory associated with pseudo-locked region - * @minor: minor number of character device associated with this - * region - * @debugfs_dir: pointer to this region's directory in the debugfs - * filesystem - * @pm_reqs: Power management QoS requests related to this region - */ -struct pseudo_lock_region { - struct resctrl_schema *s; - struct rdt_domain *d; - u32 cbm; - wait_queue_head_t lock_thread_wq; - int thread_done; - int cpu; - unsigned int line_size; - unsigned int size; - void *kmem; - unsigned int minor; - struct dentry *debugfs_dir; - struct list_head pm_reqs; -}; - -/** - * struct rdtgroup - store rdtgroup's data in resctrl file system. - * @kn: kernfs node - * @rdtgroup_list: linked list for all rdtgroups - * @closid: closid for this rdtgroup - * @cpu_mask: CPUs assigned to this rdtgroup - * @flags: status bits - * @waitcount: how many cpus expect to find this - * group when they acquire rdtgroup_mutex - * @type: indicates type of this rdtgroup - either - * monitor only or ctrl_mon group - * @mon: mongroup related data - * @mode: mode of resource group - * @plr: pseudo-locked region - */ -struct rdtgroup { - struct kernfs_node *kn; - struct list_head rdtgroup_list; - u32 closid; - struct cpumask cpu_mask; - int flags; - atomic_t waitcount; - enum rdt_group_type type; - struct mongroup mon; - enum rdtgrp_mode mode; - struct pseudo_lock_region *plr; -}; - -/* rdtgroup.flags */ -#define RDT_DELETED 1 - -/* rftype.flags */ -#define RFTYPE_FLAGS_CPUS_LIST 1 - -/* - * Define the file type flags for base and info directories. - */ -#define RFTYPE_INFO BIT(0) -#define RFTYPE_BASE BIT(1) -#define RF_CTRLSHIFT 4 -#define RF_MONSHIFT 5 -#define RF_TOPSHIFT 6 -#define RFTYPE_CTRL BIT(RF_CTRLSHIFT) -#define RFTYPE_MON BIT(RF_MONSHIFT) -#define RFTYPE_TOP BIT(RF_TOPSHIFT) -#define RFTYPE_RES_CACHE BIT(8) -#define RFTYPE_RES_MB BIT(9) -#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) - -/* List of all resource groups */ -extern struct list_head rdt_all_groups; - -extern int max_name_width, max_data_width; - -int __init rdtgroup_init(void); -void __exit rdtgroup_exit(void); - -/** - * struct rftype - describe each file in the resctrl file system - * @name: File name - * @mode: Access mode - * @kf_ops: File operations - * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RF_* or RFTYPE_* flags - * @seq_show: Show content of the file - * @write: Write to the file - */ -struct rftype { - char *name; - umode_t mode; - const struct kernfs_ops *kf_ops; - unsigned long flags; - unsigned long fflags; - - int (*seq_show)(struct kernfs_open_file *of, - struct seq_file *sf, void *v); - /* - * write() is the generic write callback which maps directly to - * kernfs write operation and overrides all other operations. - * Maximum write size is determined by ->max_write_len. - */ - ssize_t (*write)(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -}; - -/** - * struct mbm_state - status for each MBM counter in each domain - * @prev_bw_bytes: Previous bytes value read for bandwidth calculation - * @prev_bw: The most recent bandwidth in MBps - */ -struct mbm_state { - u64 prev_bw_bytes; - u32 prev_bw; -}; - /** * struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s * return value. @@ -347,37 +72,6 @@ struct msr_param { u32 high; }; -static inline bool is_llc_occupancy_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); -} - -static inline bool is_mbm_total_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); -} - -static inline bool is_mbm_local_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); -} - -static inline bool is_mbm_enabled(void) -{ - return (is_mbm_total_enabled() || is_mbm_local_enabled()); -} - -static inline bool is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} - -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. @@ -390,8 +84,6 @@ struct rdt_parse_data { * @msr_update: Function pointer to update QOS MSRs * @mon_scale: cqm counter * mon_scale = occupancy in bytes * @mbm_width: Monitor width, to detect and correct for overflow. - * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth - * Monitoring Event Configuration (BMEC) is supported. * @cdp_enabled: CDP state of this resource * * Members of this structure are either private to the architecture @@ -406,7 +98,6 @@ struct rdt_hw_resource { struct rdt_resource *r); unsigned int mon_scale; unsigned int mbm_width; - unsigned int mbm_cfg_mask; bool cdp_enabled; }; @@ -415,28 +106,7 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); - -extern struct mutex rdtgroup_mutex; - extern struct rdt_hw_resource rdt_resources_all[]; -extern struct rdtgroup rdtgroup_default; -DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); - -extern struct dentry *debugfs_resctrl; - -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { @@ -446,13 +116,6 @@ static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) return &hw_res->r_resctrl; } -static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) -{ - return rdt_resources_all[l].cdp_enabled; -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); - /* * To return the common struct rdt_resource, which is contained in struct * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource. @@ -490,6 +153,15 @@ union cpuid_0x10_3_eax { unsigned int full; }; +/* CPUID.(EAX=10H, ECX=ResID).ECX */ +union cpuid_0x10_x_ecx { + struct { + unsigned int reserved:3; + unsigned int noncont:1; + } split; + unsigned int full; +}; + /* CPUID.(EAX=10H, ECX=ResID).EDX */ union cpuid_0x10_x_edx { struct { @@ -498,61 +170,10 @@ union cpuid_0x10_x_edx { unsigned int full; }; -void rdt_last_cmd_clear(void); -void rdt_last_cmd_puts(const char *s); -__printf(1, 2) -void rdt_last_cmd_printf(const char *fmt, ...); - void rdt_ctrl_update(void *arg); -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); -void rdtgroup_kn_unlock(struct kernfs_node *kn); -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask); -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos); -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v); -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive); -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm); -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); -int rdtgroup_tasks_assigned(struct rdtgroup *r); -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); -int closids_supported(void); -void closid_free(int closid); -int alloc_rmid(void); -void free_rmid(u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -bool __init rdt_cpu_has(int flag); -void mon_event_count(void *info); -int rdtgroup_mondata_show(struct seq_file *m, void *arg); -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first); -void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms); -void mbm_handle_overflow(struct work_struct *work); +bool rdt_cpu_has(int flag); void __init intel_rdt_mbm_apply_quirk(void); -bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); -void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); -void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void __init thread_throttle_mode_init(void); -void __init mbm_config_rftype_init(const char *config); -void rdt_staged_configs_clear(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3a6c069614eb84304dd1fec5506ef97cf358790e..e0cc1b4992798dbbb5f89b46e287d32d22fe7233 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -15,6 +15,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ +#include #include #include #include @@ -24,33 +25,6 @@ #include "internal.h" -struct rmid_entry { - u32 rmid; - int busy; - struct list_head list; -}; - -/* - * @rmid_free_lru - A least recently used list of free RMIDs - * These RMIDs are guaranteed to have an occupancy less than the - * threshold occupancy - */ -static LIST_HEAD(rmid_free_lru); - -/* - * @rmid_limbo_count - count of currently unused but (potentially) - * dirty RMIDs. - * This counts RMIDs that no one is currently using but that - * may have a occupancy value > resctrl_rmid_realloc_threshold. User can - * change the threshold occupancy value. - */ -static unsigned int rmid_limbo_count; - -/* - * @rmid_entry - The entry in the limbo and free lists. - */ -static struct rmid_entry *rmid_ptrs; - /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. @@ -62,17 +36,6 @@ bool rdt_mon_capable; */ unsigned int rdt_mon_features; -/* - * This is the threshold cache occupancy in bytes at which we will consider an - * RMID available for re-allocation. - */ -unsigned int resctrl_rmid_realloc_threshold; - -/* - * This is the maximum value for the reallocation threshold, in bytes. - */ -unsigned int resctrl_rmid_realloc_limit; - #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) /* @@ -136,16 +99,6 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 rmid) -{ - struct rmid_entry *entry; - - entry = &rmid_ptrs[rmid]; - WARN_ON(entry->rmid != rmid); - - return entry; -} - static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) { u64 msr_val; @@ -181,6 +134,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; + default: + break; } /* Never expect to get here */ @@ -190,7 +145,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid) + u32 unused, u32 rmid, + enum resctrl_event_id eventid) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; @@ -212,11 +168,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) memset(hw_dom->arch_mbm_total, 0, sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) memset(hw_dom->arch_mbm_local, 0, sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); } @@ -230,7 +186,8 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val) + u32 unused, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *ignored) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -238,6 +195,8 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u64 msr_val, chunks; int ret; + resctrl_arch_rmid_read_context_check(); + if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; @@ -260,505 +219,11 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } -/* - * Check the RMIDs that are marked as busy for this domain. If the - * reported LLC occupancy is below the threshold clear the busy bit and - * decrement the count. If the busy count gets to zero on an RMID, we - * free the RMID - */ -void __check_limbo(struct rdt_domain *d, bool force_free) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - struct rmid_entry *entry; - u32 crmid = 1, nrmid; - bool rmid_dirty; - u64 val = 0; - - /* - * Skip RMID 0 and start from RMID 1 and check all the RMIDs that - * are marked as busy for occupancy < threshold. If the occupancy - * is less than the threshold decrement the busy counter of the - * RMID and move it to the free list when the counter reaches 0. - */ - for (;;) { - nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); - if (nrmid >= r->num_rmid) - break; - - entry = __rmid_entry(nrmid); - - if (resctrl_arch_rmid_read(r, d, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val)) { - rmid_dirty = true; - } else { - rmid_dirty = (val >= resctrl_rmid_realloc_threshold); - } - - if (force_free || !rmid_dirty) { - clear_bit(entry->rmid, d->rmid_busy_llc); - if (!--entry->busy) { - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - } - } - crmid = nrmid + 1; - } -} - -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) -{ - return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; -} - -/* - * As of now the RMIDs allocation is global. - * However we keep track of which packages the RMIDs - * are used to optimize the limbo list management. - */ -int alloc_rmid(void) -{ - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? -EBUSY : -ENOSPC; - - entry = list_first_entry(&rmid_free_lru, - struct rmid_entry, list); - list_del(&entry->list); - - return entry->rmid; -} - -static void add_rmid_to_limbo(struct rmid_entry *entry) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - struct rdt_domain *d; - int cpu, err; - u64 val = 0; - - entry->busy = 0; - cpu = get_cpu(); - list_for_each_entry(d, &r->domains, list) { - if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, - &val); - if (err || val <= resctrl_rmid_realloc_threshold) - continue; - } - - /* - * For the first limbo RMID in the domain, - * setup up the limbo worker. - */ - if (!has_busy_rmid(r, d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); - set_bit(entry->rmid, d->rmid_busy_llc); - entry->busy++; - } - put_cpu(); - - if (entry->busy) - rmid_limbo_count++; - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -void free_rmid(u32 rmid) -{ - struct rmid_entry *entry; - - if (!rmid) - return; - - lockdep_assert_held(&rdtgroup_mutex); - - entry = __rmid_entry(rmid); - - if (is_llc_occupancy_enabled()) - add_rmid_to_limbo(entry); - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, - enum resctrl_event_id evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[rmid]; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[rmid]; - default: - return NULL; - } -} - -static int __mon_event_count(u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m; - u64 tval = 0; - - if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); - m = get_mbm_state(rr->d, rmid, rr->evtid); - if (m) - memset(m, 0, sizeof(struct mbm_state)); - return 0; - } - - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); - if (rr->err) - return rr->err; - - rr->val += tval; - - return 0; -} - -/* - * mbm_bw_count() - Update bw count from values previously read by - * __mon_event_count(). - * @rmid: The rmid used to identify the cached mbm_state. - * @rr: The struct rmid_read populated by __mon_event_count(). - * - * Supporting function to calculate the memory bandwidth - * and delta bandwidth in MBps. The chunks value previously read by - * __mon_event_count() is compared with the chunks value from the previous - * invocation. This must be called once per second to maintain values in MBps. - */ -static void mbm_bw_count(u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m = &rr->d->mbm_local[rmid]; - u64 cur_bw, bytes, cur_bytes; - - cur_bytes = rr->val; - bytes = cur_bytes - m->prev_bw_bytes; - m->prev_bw_bytes = cur_bytes; - - cur_bw = bytes / SZ_1M; - - m->prev_bw = cur_bw; -} - -/* - * This is called via IPI to read the CQM/MBM counters - * on a domain. - */ -void mon_event_count(void *info) -{ - struct rdtgroup *rdtgrp, *entry; - struct rmid_read *rr = info; - struct list_head *head; - int ret; - - rdtgrp = rr->rgrp; - - ret = __mon_event_count(rdtgrp->mon.rmid, rr); - - /* - * For Ctrl groups read data from child monitor groups and - * add them together. Count events which are read successfully. - * Discard the rmid_read's reporting errors. - */ - head = &rdtgrp->mon.crdtgrp_list; - - if (rdtgrp->type == RDTCTRL_GROUP) { - list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->mon.rmid, rr) == 0) - ret = 0; - } - } - - /* - * __mon_event_count() calls for newly created monitor groups may - * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. - * Discard error if any of the monitor event reads succeeded. - */ - if (ret == 0) - rr->err = 0; -} - -/* - * Feedback loop for MBA software controller (mba_sc) - * - * mba_sc is a feedback loop where we periodically read MBM counters and - * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so - * that: - * - * current bandwidth(cur_bw) < user specified bandwidth(user_bw) - * - * This uses the MBM counters to measure the bandwidth and MBA throttle - * MSRs to control the bandwidth for a particular rdtgrp. It builds on the - * fact that resctrl rdtgroups have both monitoring and control. - * - * The frequency of the checks is 1s and we just tag along the MBM overflow - * timer. Having 1s interval makes the calculation of bandwidth simpler. - * - * Although MBA's goal is to restrict the bandwidth to a maximum, there may - * be a need to increase the bandwidth to avoid unnecessarily restricting - * the L2 <-> L3 traffic. - * - * Since MBA controls the L2 external bandwidth where as MBM measures the - * L3 external bandwidth the following sequence could lead to such a - * situation. - * - * Consider an rdtgroup which had high L3 <-> memory traffic in initial - * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but - * after some time rdtgroup has mostly L2 <-> L3 traffic. - * - * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its - * throttle MSRs already have low percentage values. To avoid - * unnecessarily restricting such rdtgroups, we also increase the bandwidth. - */ -static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) -{ - u32 closid, rmid, cur_msr_val, new_msr_val; - struct mbm_state *pmbm_data, *cmbm_data; - struct rdt_resource *r_mba; - struct rdt_domain *dom_mba; - struct list_head *head; - struct rdtgroup *entry; - u32 cur_bw, user_bw; - - if (!is_mbm_local_enabled()) - return; - - r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - - closid = rgrp->closid; - rmid = rgrp->mon.rmid; - pmbm_data = &dom_mbm->mbm_local[rmid]; - - dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); - if (!dom_mba) { - pr_warn_once("Failure to get domain for MBA update\n"); - return; - } - - cur_bw = pmbm_data->prev_bw; - user_bw = dom_mba->mbps_val[closid]; - - /* MBA resource doesn't support CDP */ - cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); - - /* - * For Ctrl groups read data from child monitor groups. - */ - head = &rgrp->mon.crdtgrp_list; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cur_bw += cmbm_data->prev_bw; - } - - /* - * Scale up/down the bandwidth linearly for the ctrl group. The - * bandwidth step is the bandwidth granularity specified by the - * hardware. - * Always increase throttling if current bandwidth is above the - * target set by user. - * But avoid thrashing up and down on every poll by checking - * whether a decrease in throttling is likely to push the group - * back over target. E.g. if currently throttling to 30% of bandwidth - * on a system with 10% granularity steps, check whether moving to - * 40% would go past the limit by multiplying current bandwidth by - * "(30 + 10) / 30". - */ - if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { - new_msr_val = cur_msr_val - r_mba->membw.bw_gran; - } else if (cur_msr_val < MAX_MBA_BW && - (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { - new_msr_val = cur_msr_val + r_mba->membw.bw_gran; - } else { - return; - } - - resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); -} - -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) -{ - struct rmid_read rr; - - rr.first = false; - rr.r = r; - rr.d = d; - - /* - * This is protected from concurrent reads from user - * as both the user and we hold the global mutex. - */ - if (is_mbm_total_enabled()) { - rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; - rr.val = 0; - __mon_event_count(rmid, &rr); - } - if (is_mbm_local_enabled()) { - rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; - rr.val = 0; - __mon_event_count(rmid, &rr); - - /* - * Call the MBA software controller only for the - * control groups and when user has enabled - * the software controller explicitly. - */ - if (is_mba_sc(NULL)) - mbm_bw_count(rmid, &rr); - } -} - -/* - * Handler to scan the limbo list and move the RMIDs - * to free list whose occupancy < threshold_occupancy. - */ -void cqm_handle_limbo(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - int cpu = smp_processor_id(); - struct rdt_resource *r; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - d = container_of(work, struct rdt_domain, cqm_limbo.work); - - __check_limbo(d, false); - - if (has_busy_rmid(r, d)) - schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); - - mutex_unlock(&rdtgroup_mutex); -} - -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - cpu = cpumask_any(&dom->cpu_mask); - dom->cqm_work_cpu = cpu; - - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); -} - -void mbm_handle_overflow(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); - struct rdtgroup *prgrp, *crgrp; - int cpu = smp_processor_id(); - struct list_head *head; - struct rdt_resource *r; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - - if (!static_branch_likely(&rdt_mon_enable_key)) - goto out_unlock; - - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - d = container_of(work, struct rdt_domain, mbm_over.work); - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->mon.rmid); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->mon.rmid); - - if (is_mba_sc(NULL)) - update_mba_bw(prgrp, d); - } - - schedule_delayed_work_on(cpu, &d->mbm_over, delay); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - if (!static_branch_likely(&rdt_mon_enable_key)) - return; - cpu = cpumask_any(&dom->cpu_mask); - dom->mbm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); -} - -static int dom_data_init(struct rdt_resource *r) -{ - struct rmid_entry *entry = NULL; - int i, nr_rmids; - - nr_rmids = r->num_rmid; - rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) - return -ENOMEM; - - for (i = 0; i < nr_rmids; i++) { - entry = &rmid_ptrs[i]; - INIT_LIST_HEAD(&entry->list); - - entry->rmid = i; - list_add_tail(&entry->list, &rmid_free_lru); - } - - /* - * RMID 0 is special and is always allocated. It's used for all - * tasks that are not monitored. - */ - entry = __rmid_entry(0); - list_del(&entry->list); - - return 0; -} - -static struct mon_evt llc_occupancy_event = { - .name = "llc_occupancy", - .evtid = QOS_L3_OCCUP_EVENT_ID, -}; - -static struct mon_evt mbm_total_event = { - .name = "mbm_total_bytes", - .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, -}; - -static struct mon_evt mbm_local_event = { - .name = "mbm_local_bytes", - .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, -}; - -/* - * Initialize the event list for the resource. - * - * Note that MBM events are also part of RDT_RESOURCE_L3 resource - * because as per the SDM the total and local memory bandwidth - * are enumerated as part of L3 monitoring. - */ -static void l3_mon_evt_init(struct rdt_resource *r) -{ - INIT_LIST_HEAD(&r->evt_list); - - if (is_llc_occupancy_enabled()) - list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (is_mbm_total_enabled()) - list_add_tail(&mbm_total_event.list, &r->evt_list); - if (is_mbm_local_enabled()) - list_add_tail(&mbm_local_event.list, &r->evt_list); -} - int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); unsigned int threshold; - int ret; resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -786,29 +251,14 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) */ resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); - ret = dom_data_init(r); - if (ret) - return ret; - if (rdt_cpu_has(X86_FEATURE_BMEC)) { u32 eax, ebx, ecx, edx; /* Detect list of bandwidth sources that can be tracked */ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); - hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; - - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } + r->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; } - l3_mon_evt_init(r); - r->mon_capable = true; return 0; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8f559eeae08ed5845c291cd4142d748a3e2cca5c..ba1596afee107f65f784ee004a86d0faabf68eb4 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -39,30 +39,9 @@ */ static u64 prefetch_disable_bits; -/* - * Major number assigned to and shared by all devices exposing - * pseudo-locked regions. - */ -static unsigned int pseudo_lock_major; -static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); - -static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) -{ - const struct rdtgroup *rdtgrp; - - rdtgrp = dev_get_drvdata(dev); - if (mode) - *mode = 0600; - return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); -} - -static const struct class pseudo_lock_class = { - .name = "pseudo_lock", - .devnode = pseudo_lock_devnode, -}; - /** - * get_prefetch_disable_bits - prefetch disable bits of supported platforms + * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported + * platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support @@ -76,14 +55,16 @@ static const struct class pseudo_lock_class = { * in the SDM. * * When adding a platform here also add support for its cache events to - * measure_cycles_perf_fn() + * resctrl_arch_measure_l*_residency() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ -static u64 get_prefetch_disable_bits(void) +u64 resctrl_arch_get_prefetch_disable_bits(void) { + prefetch_disable_bits = 0; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return 0; @@ -99,7 +80,8 @@ static u64 get_prefetch_disable_bits(void) * 3 DCU IP Prefetcher Disable (R/W) * 63:4 Reserved */ - return 0xF; + prefetch_disable_bits = 0xF; + break; case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* @@ -110,308 +92,16 @@ static u64 get_prefetch_disable_bits(void) * 2 DCU Hardware Prefetcher Disable (R/W) * 63:3 Reserved */ - return 0x5; - } - - return 0; -} - -/** - * pseudo_lock_minor_get - Obtain available minor number - * @minor: Pointer to where new minor number will be stored - * - * A bitmask is used to track available minor numbers. Here the next free - * minor number is marked as unavailable and returned. - * - * Return: 0 on success, <0 on failure. - */ -static int pseudo_lock_minor_get(unsigned int *minor) -{ - unsigned long first_bit; - - first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); - - if (first_bit == MINORBITS) - return -ENOSPC; - - __clear_bit(first_bit, &pseudo_lock_minor_avail); - *minor = first_bit; - - return 0; -} - -/** - * pseudo_lock_minor_release - Return minor number to available - * @minor: The minor number made available - */ -static void pseudo_lock_minor_release(unsigned int minor) -{ - __set_bit(minor, &pseudo_lock_minor_avail); -} - -/** - * region_find_by_minor - Locate a pseudo-lock region by inode minor number - * @minor: The minor number of the device representing pseudo-locked region - * - * When the character device is accessed we need to determine which - * pseudo-locked region it belongs to. This is done by matching the minor - * number of the device to the pseudo-locked region it belongs. - * - * Minor numbers are assigned at the time a pseudo-locked region is associated - * with a cache instance. - * - * Return: On success return pointer to resource group owning the pseudo-locked - * region, NULL on failure. - */ -static struct rdtgroup *region_find_by_minor(unsigned int minor) -{ - struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->plr && rdtgrp->plr->minor == minor) { - rdtgrp_match = rdtgrp; - break; - } - } - return rdtgrp_match; -} - -/** - * struct pseudo_lock_pm_req - A power management QoS request list entry - * @list: Entry within the @pm_reqs list for a pseudo-locked region - * @req: PM QoS request - */ -struct pseudo_lock_pm_req { - struct list_head list; - struct dev_pm_qos_request req; -}; - -static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req, *next; - - list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { - dev_pm_qos_remove_request(&pm_req->req); - list_del(&pm_req->list); - kfree(pm_req); - } -} - -/** - * pseudo_lock_cstates_constrain - Restrict cores from entering C6 - * @plr: Pseudo-locked region - * - * To prevent the cache from being affected by power management entering - * C6 has to be avoided. This is accomplished by requesting a latency - * requirement lower than lowest C6 exit latency of all supported - * platforms as found in the cpuidle state tables in the intel_idle driver. - * At this time it is possible to do so with a single latency requirement - * for all supported platforms. - * - * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, - * the ACPI latencies need to be considered while keeping in mind that C2 - * may be set to map to deeper sleep states. In this case the latency - * requirement needs to prevent entering C2 also. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req; - int cpu; - int ret; - - for_each_cpu(cpu, &plr->d->cpu_mask) { - pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); - if (!pm_req) { - rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); - ret = -ENOMEM; - goto out_err; - } - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pm_req->req, - DEV_PM_QOS_RESUME_LATENCY, - 30); - if (ret < 0) { - rdt_last_cmd_printf("Failed to add latency req CPU%d\n", - cpu); - kfree(pm_req); - ret = -1; - goto out_err; - } - list_add(&pm_req->list, &plr->pm_reqs); - } - - return 0; - -out_err: - pseudo_lock_cstates_relax(plr); - return ret; -} - -/** - * pseudo_lock_region_clear - Reset pseudo-lock region data - * @plr: pseudo-lock region - * - * All content of the pseudo-locked region is reset - any memory allocated - * freed. - * - * Return: void - */ -static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) -{ - plr->size = 0; - plr->line_size = 0; - kfree(plr->kmem); - plr->kmem = NULL; - plr->s = NULL; - if (plr->d) - plr->d->plr = NULL; - plr->d = NULL; - plr->cbm = 0; - plr->debugfs_dir = NULL; -} - -/** - * pseudo_lock_region_init - Initialize pseudo-lock region information - * @plr: pseudo-lock region - * - * Called after user provided a schemata to be pseudo-locked. From the - * schemata the &struct pseudo_lock_region is on entry already initialized - * with the resource, domain, and capacity bitmask. Here the information - * required for pseudo-locking is deduced from this data and &struct - * pseudo_lock_region initialized further. This information includes: - * - size in bytes of the region to be pseudo-locked - * - cache line size to know the stride with which data needs to be accessed - * to be pseudo-locked - * - a cpu associated with the cache instance on which the pseudo-locking - * flow can be executed - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_init(struct pseudo_lock_region *plr) -{ - struct cpu_cacheinfo *ci; - int ret; - int i; - - /* Pick the first cpu we find that is associated with the cache. */ - plr->cpu = cpumask_first(&plr->d->cpu_mask); - - if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("CPU %u associated with cache not online\n", - plr->cpu); - ret = -ENODEV; - goto out_region; - } - - ci = get_cpu_cacheinfo(plr->cpu); - - plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == plr->s->res->cache_level) { - plr->line_size = ci->info_list[i].coherency_line_size; - return 0; - } - } - - ret = -1; - rdt_last_cmd_puts("Unable to determine cache line size\n"); -out_region: - pseudo_lock_region_clear(plr); - return ret; -} - -/** - * pseudo_lock_init - Initialize a pseudo-lock region - * @rdtgrp: resource group to which new pseudo-locked region will belong - * - * A pseudo-locked region is associated with a resource group. When this - * association is created the pseudo-locked region is initialized. The - * details of the pseudo-locked region are not known at this time so only - * allocation is done and association established. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_init(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr; - - plr = kzalloc(sizeof(*plr), GFP_KERNEL); - if (!plr) - return -ENOMEM; - - init_waitqueue_head(&plr->lock_thread_wq); - INIT_LIST_HEAD(&plr->pm_reqs); - rdtgrp->plr = plr; - return 0; -} - -/** - * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked - * @plr: pseudo-lock region - * - * Initialize the details required to set up the pseudo-locked region and - * allocate the contiguous memory that will be pseudo-locked to the cache. - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) -{ - int ret; - - ret = pseudo_lock_region_init(plr); - if (ret < 0) - return ret; - - /* - * We do not yet support contiguous regions larger than - * KMALLOC_MAX_SIZE. - */ - if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("Requested region exceeds maximum size\n"); - ret = -E2BIG; - goto out_region; - } - - plr->kmem = kzalloc(plr->size, GFP_KERNEL); - if (!plr->kmem) { - rdt_last_cmd_puts("Unable to allocate memory\n"); - ret = -ENOMEM; - goto out_region; + prefetch_disable_bits = 0x5; + break; } - ret = 0; - goto out; -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * pseudo_lock_free - Free a pseudo-locked region - * @rdtgrp: resource group to which pseudo-locked region belonged - * - * The pseudo-locked region's resources have already been released, or not - * yet created at this point. Now it can be freed and disassociated from the - * resource group. - * - * Return: void - */ -static void pseudo_lock_free(struct rdtgroup *rdtgrp) -{ - pseudo_lock_region_clear(rdtgrp->plr); - kfree(rdtgrp->plr); - rdtgrp->plr = NULL; + return prefetch_disable_bits; } /** - * pseudo_lock_fn - Load kernel memory into cache - * @_rdtgrp: resource group to which pseudo-lock region belongs + * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache + * @_plr: the pseudo-lock region descriptor * * This is the core pseudo-locking flow. * @@ -428,10 +118,9 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_plr) { - struct rdtgroup *rdtgrp = _rdtgrp; - struct pseudo_lock_region *plr = rdtgrp->plr; + struct pseudo_lock_region *plr = _plr; u32 rmid_p, closid_p; unsigned long i; u64 saved_msr; @@ -491,7 +180,8 @@ static int pseudo_lock_fn(void *_rdtgrp) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); + __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + /* * Cache was flushed earlier. Now access kernel memory to read it * into cache region associated with just activated plr->closid. @@ -539,339 +229,8 @@ static int pseudo_lock_fn(void *_rdtgrp) } /** - * rdtgroup_monitor_in_progress - Test if monitoring in progress - * @rdtgrp: resource group being queried - * - * Return: 1 if monitor groups have been created for this resource - * group, 0 otherwise. - */ -static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) -{ - return !list_empty(&rdtgrp->mon.crdtgrp_list); -} - -/** - * rdtgroup_locksetup_user_restrict - Restrict user access to group - * @rdtgrp: resource group needing access restricted - * - * A resource group used for cache pseudo-locking cannot have cpus or tasks - * assigned to it. This is communicated to the user by restricting access - * to all the files that can be used to make such changes. - * - * Permissions restored with rdtgroup_locksetup_user_restore() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restriction of access an attempt will be made to restore permissions but - * the state of the mode of these files will be uncertain when a failure - * occurs. - */ -static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); - if (ret) - goto err_cpus; - - if (rdt_mon_capable) { - ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); -err_cpus: - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); -err_tasks: - rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); -out: - return ret; -} - -/** - * rdtgroup_locksetup_user_restore - Restore user access to group - * @rdtgrp: resource group needing access restored - * - * Restore all file access previously removed using - * rdtgroup_locksetup_user_restrict() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restoration of access an attempt will be made to restrict permissions - * again but the state of the mode of these files will be uncertain when - * a failure occurs. - */ -static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); - if (ret) - goto err_cpus; - - if (rdt_mon_capable) { - ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); -err_cpus: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); -err_tasks: - rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); -out: - return ret; -} - -/** - * rdtgroup_locksetup_enter - Resource group enters locksetup mode - * @rdtgrp: resource group requested to enter locksetup mode - * - * A resource group enters locksetup mode to reflect that it would be used - * to represent a pseudo-locked region and is in the process of being set - * up to do so. A resource group used for a pseudo-locked region would - * lose the closid associated with it so we cannot allow it to have any - * tasks or cpus assigned nor permit tasks or cpus to be assigned in the - * future. Monitoring of a pseudo-locked region is not allowed either. - * - * The above and more restrictions on a pseudo-locked region are checked - * for and enforced before the resource group enters the locksetup mode. - * - * Returns: 0 if the resource group successfully entered locksetup mode, <0 - * on failure. On failure the last_cmd_status buffer is updated with text to - * communicate details of failure to the user. - */ -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - int ret; - - /* - * The default resource group can neither be removed nor lose the - * default closid associated with it. - */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); - return -EINVAL; - } - - /* - * Cache Pseudo-locking not supported when CDP is enabled. - * - * Some things to consider if you would like to enable this - * support (using L3 CDP as example): - * - When CDP is enabled two separate resources are exposed, - * L3DATA and L3CODE, but they are actually on the same cache. - * The implication for pseudo-locking is that if a - * pseudo-locked region is created on a domain of one - * resource (eg. L3CODE), then a pseudo-locked region cannot - * be created on that same domain of the other resource - * (eg. L3DATA). This is because the creation of a - * pseudo-locked region involves a call to wbinvd that will - * affect all cache allocations on particular domain. - * - Considering the previous, it may be possible to only - * expose one of the CDP resources to pseudo-locking and - * hide the other. For example, we could consider to only - * expose L3DATA and since the L3 cache is unified it is - * still possible to place instructions there are execute it. - * - If only one region is exposed to pseudo-locking we should - * still keep in mind that availability of a portion of cache - * for pseudo-locking should take into account both resources. - * Similarly, if a pseudo-locked region is created in one - * resource, the portion of cache used by it should be made - * unavailable to all future allocations from both resources. - */ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || - resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { - rdt_last_cmd_puts("CDP enabled\n"); - return -EINVAL; - } - - /* - * Not knowing the bits to disable prefetching implies that this - * platform does not support Cache Pseudo-Locking. - */ - prefetch_disable_bits = get_prefetch_disable_bits(); - if (prefetch_disable_bits == 0) { - rdt_last_cmd_puts("Pseudo-locking not supported\n"); - return -EINVAL; - } - - if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("Monitoring in progress\n"); - return -EINVAL; - } - - if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("Tasks assigned to resource group\n"); - return -EINVAL; - } - - if (!cpumask_empty(&rdtgrp->cpu_mask)) { - rdt_last_cmd_puts("CPUs assigned to resource group\n"); - return -EINVAL; - } - - if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); - return -EIO; - } - - ret = pseudo_lock_init(rdtgrp); - if (ret) { - rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); - goto out_release; - } - - /* - * If this system is capable of monitoring a rmid would have been - * allocated when the control group was created. This is not needed - * anymore when this group would be used for pseudo-locking. This - * is safe to call on platforms not capable of monitoring. - */ - free_rmid(rdtgrp->mon.rmid); - - ret = 0; - goto out; - -out_release: - rdtgroup_locksetup_user_restore(rdtgrp); -out: - return ret; -} - -/** - * rdtgroup_locksetup_exit - resource group exist locksetup mode - * @rdtgrp: resource group - * - * When a resource group exits locksetup mode the earlier restrictions are - * lifted. - * - * Return: 0 on success, <0 on failure - */ -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - int ret; - - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - } - - ret = rdtgroup_locksetup_user_restore(rdtgrp); - if (ret) { - free_rmid(rdtgrp->mon.rmid); - return ret; - } - - pseudo_lock_free(rdtgrp); - return 0; -} - -/** - * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked - * @d: RDT domain - * @cbm: CBM to test - * - * @d represents a cache instance and @cbm a capacity bitmask that is - * considered for it. Determine if @cbm overlaps with any existing - * pseudo-locked region on @d. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: true if @cbm overlaps with pseudo-locked region on @d, false - * otherwise. - */ -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - unsigned int cbm_len; - unsigned long cbm_b; - - if (d->plr) { - cbm_len = d->plr->s->res->cache.cbm_len; - cbm_b = d->plr->cbm; - if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) - return true; - } - return false; -} - -/** - * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy - * @d: RDT domain under test - * - * The setup of a pseudo-locked region affects all cache instances within - * the hierarchy of the region. It is thus essential to know if any - * pseudo-locked regions exist within a cache hierarchy to prevent any - * attempts to create new pseudo-locked regions in the same hierarchy. - * - * Return: true if a pseudo-locked region exists in the hierarchy of @d or - * if it is not possible to test due to memory allocation issue, - * false otherwise. - */ -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - cpumask_var_t cpu_with_psl; - struct rdt_resource *r; - struct rdt_domain *d_i; - bool ret = false; - - if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) - return true; - - /* - * First determine which cpus have pseudo-locked regions - * associated with them. - */ - for_each_alloc_capable_rdt_resource(r) { - list_for_each_entry(d_i, &r->domains, list) { - if (d_i->plr) - cpumask_or(cpu_with_psl, cpu_with_psl, - &d_i->cpu_mask); - } - } - - /* - * Next test if new pseudo-locked region would intersect with - * existing region. - */ - if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) - ret = true; - - free_cpumask_var(cpu_with_psl); - return ret; -} - -/** - * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory + * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read + * pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One @@ -884,7 +243,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int measure_cycles_lat_fn(void *_plr) +int resctrl_arch_measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; @@ -1068,7 +427,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, return 0; } -static int measure_l2_residency(void *_plr) +int resctrl_arch_measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1106,7 +465,7 @@ static int measure_l2_residency(void *_plr) return 0; } -static int measure_l3_residency(void *_plr) +int resctrl_arch_measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1161,441 +520,3 @@ static int measure_l3_residency(void *_plr) wake_up_interruptible(&plr->lock_thread_wq); return 0; } - -/** - * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region - * @rdtgrp: Resource group to which the pseudo-locked region belongs. - * @sel: Selector of which measurement to perform on a pseudo-locked region. - * - * The measurement of latency to access a pseudo-locked region should be - * done from a cpu that is associated with that pseudo-locked region. - * Determine which cpu is associated with this region and start a thread on - * that cpu to perform the measurement, wait for that thread to complete. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int cpu; - int ret = -1; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out; - } - - if (!plr->d) { - ret = -ENODEV; - goto out; - } - - plr->thread_done = 0; - cpu = cpumask_first(&plr->d->cpu_mask); - if (!cpu_online(cpu)) { - ret = -ENODEV; - goto out; - } - - plr->cpu = cpu; - - if (sel == 1) - thread = kthread_create_on_node(measure_cycles_lat_fn, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 2) - thread = kthread_create_on_node(measure_l2_residency, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 3) - thread = kthread_create_on_node(measure_l3_residency, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else - goto out; - - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - goto out; - } - kthread_bind(thread, cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) - goto out; - - ret = 0; - -out: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -static ssize_t pseudo_lock_measure_trigger(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct rdtgroup *rdtgrp = file->private_data; - size_t buf_size; - char buf[32]; - int ret; - int sel; - - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - ret = kstrtoint(buf, 10, &sel); - if (ret == 0) { - if (sel != 1 && sel != 2 && sel != 3) - return -EINVAL; - ret = debugfs_file_get(file->f_path.dentry); - if (ret) - return ret; - ret = pseudo_lock_measure_cycles(rdtgrp, sel); - if (ret == 0) - ret = count; - debugfs_file_put(file->f_path.dentry); - } - - return ret; -} - -static const struct file_operations pseudo_measure_fops = { - .write = pseudo_lock_measure_trigger, - .open = simple_open, - .llseek = default_llseek, -}; - -/** - * rdtgroup_pseudo_lock_create - Create a pseudo-locked region - * @rdtgrp: resource group to which pseudo-lock region belongs - * - * Called when a resource group in the pseudo-locksetup mode receives a - * valid schemata that should be pseudo-locked. Since the resource group is - * in pseudo-locksetup mode the &struct pseudo_lock_region has already been - * allocated and initialized with the essential information. If a failure - * occurs the resource group remains in the pseudo-locksetup mode with the - * &struct pseudo_lock_region associated with it, but cleared from all - * information and ready for the user to re-attempt pseudo-locking by - * writing the schemata again. - * - * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 - * on failure. Descriptive error will be written to last_cmd_status buffer. - */ -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int new_minor; - struct device *dev; - int ret; - - ret = pseudo_lock_region_alloc(plr); - if (ret < 0) - return ret; - - ret = pseudo_lock_cstates_constrain(plr); - if (ret < 0) { - ret = -EINVAL; - goto out_region; - } - - plr->thread_done = 0; - - thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, - cpu_to_node(plr->cpu), - "pseudo_lock/%u", plr->cpu); - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - rdt_last_cmd_printf("Locking thread returned error %d\n", ret); - goto out_cstates; - } - - kthread_bind(thread, plr->cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) { - /* - * If the thread does not get on the CPU for whatever - * reason and the process which sets up the region is - * interrupted then this will leave the thread in runnable - * state and once it gets on the CPU it will dereference - * the cleared, but not freed, plr struct resulting in an - * empty pseudo-locking loop. - */ - rdt_last_cmd_puts("Locking thread interrupted\n"); - goto out_cstates; - } - - ret = pseudo_lock_minor_get(&new_minor); - if (ret < 0) { - rdt_last_cmd_puts("Unable to obtain a new minor number\n"); - goto out_cstates; - } - - /* - * Unlock access but do not release the reference. The - * pseudo-locked region will still be here on return. - * - * The mutex has to be released temporarily to avoid a potential - * deadlock with the mm->mmap_lock which is obtained in the - * device_create() and debugfs_create_dir() callpath below as well as - * before the mmap() callback is called. - */ - mutex_unlock(&rdtgroup_mutex); - - if (!IS_ERR_OR_NULL(debugfs_resctrl)) { - plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, - debugfs_resctrl); - if (!IS_ERR_OR_NULL(plr->debugfs_dir)) - debugfs_create_file("pseudo_lock_measure", 0200, - plr->debugfs_dir, rdtgrp, - &pseudo_measure_fops); - } - - dev = device_create(&pseudo_lock_class, NULL, - MKDEV(pseudo_lock_major, new_minor), - rdtgrp, "%s", rdtgrp->kn->name); - - mutex_lock(&rdtgroup_mutex); - - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - rdt_last_cmd_printf("Failed to create character device: %d\n", - ret); - goto out_debugfs; - } - - /* We released the mutex - check if group was removed while we did so */ - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out_device; - } - - plr->minor = new_minor; - - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; - closid_free(rdtgrp->closid); - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); - - ret = 0; - goto out; - -out_device: - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); -out_debugfs: - debugfs_remove_recursive(plr->debugfs_dir); - pseudo_lock_minor_release(new_minor); -out_cstates: - pseudo_lock_cstates_relax(plr); -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region - * @rdtgrp: resource group to which the pseudo-locked region belongs - * - * The removal of a pseudo-locked region can be initiated when the resource - * group is removed from user space via a "rmdir" from userspace or the - * unmount of the resctrl filesystem. On removal the resource group does - * not go back to pseudo-locksetup mode before it is removed, instead it is - * removed directly. There is thus asymmetry with the creation where the - * &struct pseudo_lock_region is removed here while it was not created in - * rdtgroup_pseudo_lock_create(). - * - * Return: void - */ -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * Default group cannot be a pseudo-locked region so we can - * free closid here. - */ - closid_free(rdtgrp->closid); - goto free; - } - - pseudo_lock_cstates_relax(plr); - debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); - pseudo_lock_minor_release(plr->minor); - -free: - pseudo_lock_free(rdtgrp); -} - -static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = region_find_by_minor(iminor(inode)); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - filp->private_data = rdtgrp; - atomic_inc(&rdtgrp->waitcount); - /* Perform a non-seekable open - llseek is not supported */ - filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - filp->private_data = NULL; - atomic_dec(&rdtgrp->waitcount); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int pseudo_lock_dev_mremap(struct vm_area_struct *area) -{ - /* Not supported */ - return -EINVAL; -} - -static const struct vm_operations_struct pseudo_mmap_ops = { - .mremap = pseudo_lock_dev_mremap, -}; - -static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long vsize = vma->vm_end - vma->vm_start; - unsigned long off = vma->vm_pgoff << PAGE_SHIFT; - struct pseudo_lock_region *plr; - struct rdtgroup *rdtgrp; - unsigned long physical; - unsigned long psize; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - plr = rdtgrp->plr; - - if (!plr->d) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - /* - * Task is required to run with affinity to the cpus associated - * with the pseudo-locked region. If this is not the case the task - * may be scheduled elsewhere and invalidate entries in the - * pseudo-locked region. - */ - if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - physical = __pa(plr->kmem) >> PAGE_SHIFT; - psize = plr->size - off; - - if (off > plr->size) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - /* - * Ensure changes are carried directly to the memory being mapped, - * do not allow copy-on-write mapping. - */ - if (!(vma->vm_flags & VM_SHARED)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - if (vsize > psize) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - memset(plr->kmem + off, 0, vsize); - - if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, - vsize, vma->vm_page_prot)) { - mutex_unlock(&rdtgroup_mutex); - return -EAGAIN; - } - vma->vm_ops = &pseudo_mmap_ops; - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static const struct file_operations pseudo_lock_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = NULL, - .write = NULL, - .open = pseudo_lock_dev_open, - .release = pseudo_lock_dev_release, - .mmap = pseudo_lock_dev_mmap, -}; - -int rdt_pseudo_lock_init(void) -{ - int ret; - - ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); - if (ret < 0) - return ret; - - pseudo_lock_major = ret; - - ret = class_register(&pseudo_lock_class); - if (ret) { - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - return ret; - } - - return 0; -} - -void rdt_pseudo_lock_release(void) -{ - class_unregister(&pseudo_lock_class); - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - pseudo_lock_major = 0; -} diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d82d5de183b10704a1f5251c8a70747e4144faf2..fe3952514addf7994fdfb66d6ee46bec6c2fc03e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -12,22 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include - -#include #include #include "internal.h" @@ -35,3842 +21,240 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); -static struct kernfs_root *rdt_root; -struct rdtgroup rdtgroup_default; -LIST_HEAD(rdt_all_groups); - -/* list of entries for the schemata file */ -LIST_HEAD(resctrl_schema_all); - -/* Kernel fs node for "info" directory under root */ -static struct kernfs_node *kn_info; - -/* Kernel fs node for "mon_groups" directory under root */ -static struct kernfs_node *kn_mongrp; - -/* Kernel fs node for "mon_data" directory under root */ -static struct kernfs_node *kn_mondata; - -static struct seq_buf last_cmd_status; -static char last_cmd_status_buf[512]; - -struct dentry *debugfs_resctrl; - -void rdt_last_cmd_clear(void) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_clear(&last_cmd_status); -} -void rdt_last_cmd_puts(const char *s) +/* + * This is safe against resctrl_arch_sched_in() called from __switch_to() + * because __switch_to() is executed with interrupts disabled. A local call + * from update_closid_rmid() is protected against __switch_to() because + * preemption is disabled. + */ +void resctrl_arch_sync_cpu_defaults(void *info) { - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_puts(&last_cmd_status, s); -} + struct resctrl_cpu_sync *r = info; -void rdt_last_cmd_printf(const char *fmt, ...) -{ - va_list ap; + if (r) { + this_cpu_write(pqr_state.default_closid, r->closid); + this_cpu_write(pqr_state.default_rmid, r->rmid); + } - va_start(ap, fmt); - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_vprintf(&last_cmd_status, fmt, ap); - va_end(ap); + /* + * We cannot unconditionally write the MSR because the current + * executing task might have its own closid selected. Just reuse + * the context switch code. + */ + resctrl_arch_sched_in(current); } -void rdt_staged_configs_clear(void) -{ - struct rdt_resource *r; - struct rdt_domain *dom; - - lockdep_assert_held(&rdtgroup_mutex); - - for_each_alloc_capable_rdt_resource(r) { - list_for_each_entry(dom, &r->domains, list) - memset(dom->staged_config, 0, sizeof(dom->staged_config)); - } -} +#define INVALID_CONFIG_INDEX UINT_MAX -/* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. +/** + * mon_event_config_index_get - get the hardware index for the + * configurable event + * @evtid: event id. * - * Using a global CLOSID across all resources has some advantages and - * some drawbacks: - * + We can simply set "current->closid" to assign a task to a resource - * group. - * + Context switch code can avoid extra memory references deciding which - * CLOSID to load into the PQR_ASSOC MSR - * - We give up some options in configuring resource groups across multi-socket - * systems. - * - Our choices on how to configure each resource become progressively more - * limited as the number of resources grows. + * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID + * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID + * INVALID_CONFIG_INDEX for invalid evtid */ -static int closid_free_map; -static int closid_free_map_len; - -int closids_supported(void) +static inline unsigned int mon_event_config_index_get(u32 evtid) { - return closid_free_map_len; + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return 0; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return 1; + default: + /* Should never reach here */ + return INVALID_CONFIG_INDEX; + } } -static void closid_init(void) +void resctrl_arch_mon_event_config_read(void *info) { - struct resctrl_schema *s; - u32 rdt_min_closid = 32; - - /* Compute rdt_min_closid across all resources */ - list_for_each_entry(s, &resctrl_schema_all, list) - rdt_min_closid = min(rdt_min_closid, s->num_closid); + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; + u64 msrval; - closid_free_map = BIT_MASK(rdt_min_closid) - 1; + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + return; + } + rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - /* CLOSID 0 is always reserved for the default group */ - closid_free_map &= ~1; - closid_free_map_len = rdt_min_closid; + /* Report only the valid event configuration bits */ + mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -static int closid_alloc(void) +void resctrl_arch_mon_event_config_write(void *info) { - u32 closid = ffs(closid_free_map); - - if (closid == 0) - return -ENOSPC; - closid--; - closid_free_map &= ~(1 << closid); - - return closid; -} + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; -void closid_free(int closid) -{ - closid_free_map |= 1 << closid; -} + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; + return; + } + wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); -/** - * closid_allocated - test if provided closid is in use - * @closid: closid to be tested - * - * Return: true if @closid is currently associated with a resource group, - * false if @closid is free - */ -static bool closid_allocated(unsigned int closid) -{ - return (closid_free_map & (1 << closid)) == 0; + mon_info->err = 0; } -/** - * rdtgroup_mode_by_closid - Return mode of resource group with closid - * @closid: closid if the resource group - * - * Each resource group is associated with a @closid. Here the mode - * of a resource group can be queried by searching for it using its closid. - * - * Return: mode as &enum rdtgrp_mode of resource group with closid @closid - */ -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +static void l3_qos_cfg_update(void *arg) { - struct rdtgroup *rdtgrp; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->closid == closid) - return rdtgrp->mode; - } + bool *enable = arg; - return RDT_NUM_MODES; + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } -static const char * const rdt_mode_str[] = { - [RDT_MODE_SHAREABLE] = "shareable", - [RDT_MODE_EXCLUSIVE] = "exclusive", - [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", - [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", -}; - -/** - * rdtgroup_mode_str - Return the string representation of mode - * @mode: the resource group mode as &enum rdtgroup_mode - * - * Return: string representation of valid mode, "unknown" otherwise - */ -static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +static void l2_qos_cfg_update(void *arg) { - if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) - return "unknown"; + bool *enable = arg; - return rdt_mode_str[mode]; + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } -/* set uid and gid of rdtgroup dirs and files to that of the creator */ -static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +static int set_cache_qos_cfg(int level, bool enable) { - struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, - .ia_uid = current_fsuid(), - .ia_gid = current_fsgid(), }; - - if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && - gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) - return 0; + void (*update)(void *arg); + struct rdt_resource *r_l; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int cpu; - return kernfs_setattr(kn, &iattr); -} + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); -static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) -{ - struct kernfs_node *kn; - int ret; + if (level == RDT_RESOURCE_L3) + update = l3_qos_cfg_update; + else if (level == RDT_RESOURCE_L2) + update = l2_qos_cfg_update; + else + return -EINVAL; - kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - 0, rft->kf_ops, rft, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; + r_l = &rdt_resources_all[level].r_resctrl; + list_for_each_entry(d, &r_l->domains, list) { + if (r_l->cache.arch_has_per_cpu_cfg) + /* Pick all the CPUs in the domain instance */ + for_each_cpu(cpu, &d->cpu_mask) + cpumask_set_cpu(cpu, cpu_mask); + else + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } - return 0; -} + /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, update, &enable, 1); -static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - struct rftype *rft = of->kn->priv; + free_cpumask_var(cpu_mask); - if (rft->seq_show) - return rft->seq_show(of, m, arg); return 0; } -static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, - size_t nbytes, loff_t off) +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { - struct rftype *rft = of->kn->priv; - - if (rft->write) - return rft->write(of, buf, nbytes, off); - - return -EINVAL; -} - -static const struct kernfs_ops rdtgroup_kf_single_ops = { - .atomic_write_len = PAGE_SIZE, - .write = rdtgroup_file_write, - .seq_show = rdtgroup_seqfile_show, -}; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); -static const struct kernfs_ops kf_mondata_ops = { - .atomic_write_len = PAGE_SIZE, - .seq_show = rdtgroup_mondata_show, -}; + if (!r->cdp_capable) + return; -static bool is_cpu_list(struct kernfs_open_file *of) -{ - struct rftype *rft = of->kn->priv; + if (r->rid == RDT_RESOURCE_L2) + l2_qos_cfg_update(&hw_res->cdp_enabled); - return rft->flags & RFTYPE_FLAGS_CPUS_LIST; + if (r->rid == RDT_RESOURCE_L3) + l3_qos_cfg_update(&hw_res->cdp_enabled); } -static int rdtgroup_cpus_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) +static int cdp_enable(int level) { - struct rdtgroup *rdtgrp; - struct cpumask *mask; - int ret = 0; + struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; + int ret; - rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!r_l->alloc_capable) + return -EINVAL; - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - mask = &rdtgrp->plr->d->cpu_mask; - seq_printf(s, is_cpu_list(of) ? - "%*pbl\n" : "%*pb\n", - cpumask_pr_args(mask)); - } - } else { - seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", - cpumask_pr_args(&rdtgrp->cpu_mask)); - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); + ret = set_cache_qos_cfg(level, true); + if (!ret) + rdt_resources_all[level].cdp_enabled = true; return ret; } -/* - * This is safe against resctrl_sched_in() called from __switch_to() - * because __switch_to() is executed with interrupts disabled. A local call - * from update_closid_rmid() is protected against __switch_to() because - * preemption is disabled. - */ -static void update_cpu_closid_rmid(void *info) +static void cdp_disable(int level) { - struct rdtgroup *r = info; + struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - if (r) { - this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->mon.rmid); + if (r_hw->cdp_enabled) { + set_cache_qos_cfg(level, false); + r_hw->cdp_enabled = false; } - - /* - * We cannot unconditionally write the MSR because the current - * executing task might have its own closid selected. Just reuse - * the context switch code. - */ - resctrl_sched_in(current); -} - -/* - * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, - * - * Per task closids/rmids must have been set up before calling this function. - */ -static void -update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) -{ - on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); } -static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask) +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { - struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; - struct list_head *head; + struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - /* Check whether cpus belong to parent ctrl group */ - cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; - } - - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Give any dropped cpus to parent rdtgroup */ - cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); - update_closid_rmid(tmpmask, prgrp); - } - /* - * If we added cpus, remove them from previous group that owned them - * and update per-cpu rmid - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - if (crgrp == rdtgrp) - continue; - cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, - tmpmask); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (enable) + return cdp_enable(l); - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + cdp_disable(l); return 0; } -static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) -{ - struct rdtgroup *crgrp; - - cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); - /* update the child mon group masks as well*/ - list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) - cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); -} - -static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +static int reset_all_ctrls(struct rdt_resource *r) { - struct rdtgroup *r, *crgrp; - struct list_head *head; - - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Can't drop from default group */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Can't drop CPUs from default group\n"); - return -EINVAL; - } + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + struct rdt_hw_domain *hw_dom; + struct msr_param msr_param; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int i; - /* Give any dropped cpus to rdtgroup_default */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, tmpmask); - update_closid_rmid(tmpmask, &rdtgroup_default); - } + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); - /* - * If we added cpus, remove them from previous group and - * the prev group's child groups that owned them - * and update per-cpu closid/rmid. - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { - if (r == rdtgrp) - continue; - cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); - if (!cpumask_empty(tmpmask1)) - cpumask_rdtgrp_clear(r, tmpmask1); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + msr_param.res = r; + msr_param.low = 0; + msr_param.high = hw_res->num_closid; /* - * Clear child mon group masks since there is a new parent mask - * now and update the rmid for the cpus the child lost. + * Disable resource control for this resource by setting all + * CBMs in all domains to the maximum mask value. Pick one CPU + * from each domain to update the MSRs below. */ - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); - update_closid_rmid(tmpmask, rdtgrp); - cpumask_clear(&crgrp->cpu_mask); - } - - return 0; -} - -static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - cpumask_var_t tmpmask, newmask, tmpmask1; - struct rdtgroup *rdtgrp; - int ret; - - if (!buf) - return -EINVAL; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - return -ENOMEM; - } - if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - return -ENOMEM; - } - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto unlock; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - if (is_cpu_list(of)) - ret = cpulist_parse(buf, newmask); - else - ret = cpumask_parse(buf, newmask); - - if (ret) { - rdt_last_cmd_puts("Bad CPU list/mask\n"); - goto unlock; - } + list_for_each_entry(d, &r->domains, list) { + hw_dom = resctrl_to_arch_dom(d); + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - /* check that user didn't specify any offline cpus */ - cpumask_andnot(tmpmask, newmask, cpu_online_mask); - if (!cpumask_empty(tmpmask)) { - ret = -EINVAL; - rdt_last_cmd_puts("Can only assign online CPUs\n"); - goto unlock; + for (i = 0; i < hw_res->num_closid; i++) + hw_dom->ctrl_val[i] = r->default_ctrl; } - if (rdtgrp->type == RDTCTRL_GROUP) - ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); - else if (rdtgrp->type == RDTMON_GROUP) - ret = cpus_mon_write(rdtgrp, newmask, tmpmask); - else - ret = -EINVAL; - -unlock: - rdtgroup_kn_unlock(of->kn); - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - free_cpumask_var(tmpmask1); + /* Update CBM on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - return ret ?: nbytes; -} + free_cpumask_var(cpu_mask); -/** - * rdtgroup_remove - the helper to remove resource group safely - * @rdtgrp: resource group to remove - * - * On resource group creation via a mkdir, an extra kernfs_node reference is - * taken to ensure that the rdtgroup structure remains accessible for the - * rdtgroup_kn_unlock() calls where it is removed. - * - * Drop the extra reference here, then free the rdtgroup structure. - * - * Return: void - */ -static void rdtgroup_remove(struct rdtgroup *rdtgrp) -{ - kernfs_put(rdtgrp->kn); - kfree(rdtgrp); + return 0; } -static void _update_task_closid_rmid(void *task) +void resctrl_arch_reset_resources(void) { - /* - * If the task is still current on this CPU, update PQR_ASSOC MSR. - * Otherwise, the MSR is updated when the task is scheduled in. - */ - if (task == current) - resctrl_sched_in(task); -} + struct rdt_resource *r; -static void update_task_closid_rmid(struct task_struct *t) -{ - if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) - smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); - else - _update_task_closid_rmid(t); -} - -static int __rdtgroup_move_task(struct task_struct *tsk, - struct rdtgroup *rdtgrp) -{ - /* If the task is already in rdtgrp, no need to move the task. */ - if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && - tsk->rmid == rdtgrp->mon.rmid) || - (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && - tsk->closid == rdtgrp->mon.parent->closid)) - return 0; - - /* - * Set the task's closid/rmid before the PQR_ASSOC MSR can be - * updated by them. - * - * For ctrl_mon groups, move both closid and rmid. - * For monitor groups, can move the tasks only from - * their parent CTRL group. - */ - - if (rdtgrp->type == RDTCTRL_GROUP) { - WRITE_ONCE(tsk->closid, rdtgrp->closid); - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else if (rdtgrp->type == RDTMON_GROUP) { - if (rdtgrp->mon.parent->closid == tsk->closid) { - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } - } - - /* - * Ensure the task's closid and rmid are written before determining if - * the task is current that will decide if it will be interrupted. - * This pairs with the full barrier between the rq->curr update and - * resctrl_sched_in() during context switch. - */ - smp_mb(); - - /* - * By now, the task's closid and rmid are set. If the task is current - * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource - * group go into effect. If the task is not current, the MSR will be - * updated when the task is scheduled in. - */ - update_task_closid_rmid(tsk); - - return 0; -} - -static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_alloc_capable && - (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); -} - -static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_mon_capable && - (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); -} - -/** - * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group - * @r: Resource group - * - * Return: 1 if tasks have been assigned to @r, 0 otherwise - */ -int rdtgroup_tasks_assigned(struct rdtgroup *r) -{ - struct task_struct *p, *t; - int ret = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - - return ret; -} - -static int rdtgroup_task_write_permission(struct task_struct *task, - struct kernfs_open_file *of) -{ - const struct cred *tcred = get_task_cred(task); - const struct cred *cred = current_cred(); - int ret = 0; - - /* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. - */ - if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) { - rdt_last_cmd_printf("No permission to move task %d\n", task->pid); - ret = -EPERM; - } - - put_cred(tcred); - return ret; -} - -static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, - struct kernfs_open_file *of) -{ - struct task_struct *tsk; - int ret; - - rcu_read_lock(); - if (pid) { - tsk = find_task_by_vpid(pid); - if (!tsk) { - rcu_read_unlock(); - rdt_last_cmd_printf("No task %d\n", pid); - return -ESRCH; - } - } else { - tsk = current; - } - - get_task_struct(tsk); - rcu_read_unlock(); - - ret = rdtgroup_task_write_permission(tsk, of); - if (!ret) - ret = __rdtgroup_move_task(tsk, rdtgrp); - - put_task_struct(tsk); - return ret; -} - -static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - pid_t pid; - - if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) - return -EINVAL; - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - ret = rdtgroup_move_task(pid, rdtgrp, of); - -unlock: - rdtgroup_kn_unlock(of->kn); - - return ret ?: nbytes; -} - -static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) -{ - struct task_struct *p, *t; - pid_t pid; - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - pid = task_pid_vnr(t); - if (pid) - seq_printf(s, "%d\n", pid); - } - } - rcu_read_unlock(); -} - -static int rdtgroup_tasks_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - show_rdt_tasks(rdtgrp, s); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#ifdef CONFIG_PROC_CPU_RESCTRL - -/* - * A task can only be part of one resctrl control group and of one monitor - * group which is associated to that control group. - * - * 1) res: - * mon: - * - * resctrl is not available. - * - * 2) res:/ - * mon: - * - * Task is part of the root resctrl control group, and it is not associated - * to any monitor group. - * - * 3) res:/ - * mon:mon0 - * - * Task is part of the root resctrl control group and monitor group mon0. - * - * 4) res:group0 - * mon: - * - * Task is part of resctrl control group group0, and it is not associated - * to any monitor group. - * - * 5) res:group0 - * mon:mon1 - * - * Task is part of resctrl control group group0 and monitor group mon1. - */ -int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, - struct pid *pid, struct task_struct *tsk) -{ - struct rdtgroup *rdtg; - int ret = 0; - - mutex_lock(&rdtgroup_mutex); - - /* Return empty if resctrl has not been mounted. */ - if (!static_branch_unlikely(&rdt_enable_key)) { - seq_puts(s, "res:\nmon:\n"); - goto unlock; - } - - list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { - struct rdtgroup *crg; - - /* - * Task information is only relevant for shareable - * and exclusive groups. - */ - if (rdtg->mode != RDT_MODE_SHAREABLE && - rdtg->mode != RDT_MODE_EXCLUSIVE) - continue; - - if (rdtg->closid != tsk->closid) - continue; - - seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", - rdtg->kn->name); - seq_puts(s, "mon:"); - list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, - mon.crdtgrp_list) { - if (tsk->rmid != crg->mon.rmid) - continue; - seq_printf(s, "%s", crg->kn->name); - break; - } - seq_putc(s, '\n'); - goto unlock; - } - /* - * The above search should succeed. Otherwise return - * with an error. - */ - ret = -ENOENT; -unlock: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} -#endif - -static int rdt_last_cmd_status_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - int len; - - mutex_lock(&rdtgroup_mutex); - len = seq_buf_used(&last_cmd_status); - if (len) - seq_printf(seq, "%.*s", len, last_cmd_status_buf); - else - seq_puts(seq, "ok\n"); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_num_closids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - - seq_printf(seq, "%u\n", s->num_closid); - return 0; -} - -static int rdt_default_ctrl_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->default_ctrl); - return 0; -} - -static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.min_cbm_bits); - return 0; -} - -static int rdt_shareable_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->cache.shareable_bits); - return 0; -} - -/** - * rdt_bit_usage_show - Display current usage of resources - * - * A domain is a shared resource that can now be allocated differently. Here - * we display the current regions of the domain as an annotated bitmask. - * For each domain of this resource its allocation bitmask - * is annotated as below to indicate the current usage of the corresponding bit: - * 0 - currently unused - * X - currently available for sharing and used by software and hardware - * H - currently used by hardware only but available for software use - * S - currently used and shareable by software only - * E - currently used exclusively by one resource group - * P - currently pseudo-locked by one resource group - */ -static int rdt_bit_usage_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - /* - * Use unsigned long even though only 32 bits are used to ensure - * test_bit() is used safely. - */ - unsigned long sw_shareable = 0, hw_shareable = 0; - unsigned long exclusive = 0, pseudo_locked = 0; - struct rdt_resource *r = s->res; - struct rdt_domain *dom; - int i, hwb, swb, excl, psl; - enum rdtgrp_mode mode; - bool sep = false; - u32 ctrl_val; - - mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_putc(seq, ';'); - sw_shareable = 0; - exclusive = 0; - seq_printf(seq, "%d=", dom->id); - for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) - continue; - ctrl_val = resctrl_arch_get_config(r, dom, i, - s->conf_type); - mode = rdtgroup_mode_by_closid(i); - switch (mode) { - case RDT_MODE_SHAREABLE: - sw_shareable |= ctrl_val; - break; - case RDT_MODE_EXCLUSIVE: - exclusive |= ctrl_val; - break; - case RDT_MODE_PSEUDO_LOCKSETUP: - /* - * RDT_MODE_PSEUDO_LOCKSETUP is possible - * here but not included since the CBM - * associated with this CLOSID in this mode - * is not initialized and no task or cpu can be - * assigned this CLOSID. - */ - break; - case RDT_MODE_PSEUDO_LOCKED: - case RDT_NUM_MODES: - WARN(1, - "invalid mode for closid %d\n", i); - break; - } - } - for (i = r->cache.cbm_len - 1; i >= 0; i--) { - pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, &hw_shareable); - swb = test_bit(i, &sw_shareable); - excl = test_bit(i, &exclusive); - psl = test_bit(i, &pseudo_locked); - if (hwb && swb) - seq_putc(seq, 'X'); - else if (hwb && !swb) - seq_putc(seq, 'H'); - else if (!hwb && swb) - seq_putc(seq, 'S'); - else if (excl) - seq_putc(seq, 'E'); - else if (psl) - seq_putc(seq, 'P'); - else /* Unused bits remain */ - seq_putc(seq, '0'); - } - sep = true; - } - seq_putc(seq, '\n'); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_min_bw_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.min_bw); - return 0; -} - -static int rdt_num_rmids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - seq_printf(seq, "%d\n", r->num_rmid); - - return 0; -} - -static int rdt_mon_features_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - struct mon_evt *mevt; - - list_for_each_entry(mevt, &r->evt_list, list) { - seq_printf(seq, "%s\n", mevt->name); - if (mevt->configurable) - seq_printf(seq, "%s_config\n", mevt->name); - } - - return 0; -} - -static int rdt_bw_gran_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.bw_gran); - return 0; -} - -static int rdt_delay_linear_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.delay_linear); - return 0; -} - -static int max_threshold_occ_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); - - return 0; -} - -static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) - seq_puts(seq, "per-thread\n"); - else - seq_puts(seq, "max\n"); - - return 0; -} - -static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - unsigned int bytes; - int ret; - - ret = kstrtouint(buf, 0, &bytes); - if (ret) - return ret; - - if (bytes > resctrl_rmid_realloc_limit) - return -EINVAL; - - resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); - - return nbytes; -} - -/* - * rdtgroup_mode_show - Display mode of this resource group - */ -static int rdtgroup_mode_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); - - rdtgroup_kn_unlock(of->kn); - return 0; -} - -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) -{ - switch (my_type) { - case CDP_CODE: - return CDP_DATA; - case CDP_DATA: - return CDP_CODE; - default: - case CDP_NONE: - return CDP_NONE; - } -} - -/** - * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other - * @r: Resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Checks if provided @cbm intended to be used for @closid on domain - * @d overlaps with any other closids or other hardware usage associated - * with this domain. If @exclusive is true then only overlaps with - * resource groups in exclusive mode will be considered. If @exclusive - * is false then overlaps with any resource group or hardware entities - * will be considered. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: false if CBM does not overlap, true if it does. - */ -static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm, int closid, - enum resctrl_conf_type type, bool exclusive) -{ - enum rdtgrp_mode mode; - unsigned long ctrl_b; - int i; - - /* Check for any overlap with regions used by hardware directly */ - if (!exclusive) { - ctrl_b = r->cache.shareable_bits; - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) - return true; - } - - /* Check for overlap with other resource groups */ - for (i = 0; i < closids_supported(); i++) { - ctrl_b = resctrl_arch_get_config(r, d, i, type); - mode = rdtgroup_mode_by_closid(i); - if (closid_allocated(i) && i != closid && - mode != RDT_MODE_PSEUDO_LOCKSETUP) { - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { - if (exclusive) { - if (mode == RDT_MODE_EXCLUSIVE) - return true; - continue; - } - return true; - } - } - } - - return false; -} - -/** - * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware - * @s: Schema for the resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Resources that can be allocated using a CBM can use the CBM to control - * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test - * for overlap. Overlap test is not limited to the specific resource for - * which the CBM is intended though - when dealing with CDP resources that - * share the underlying hardware the overlap check should be performed on - * the CDP resource sharing the hardware also. - * - * Refer to description of __rdtgroup_cbm_overlaps() for the details of the - * overlap test. - * - * Return: true if CBM overlap detected, false if there is no overlap - */ -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - struct rdt_resource *r = s->res; - - if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, - exclusive)) - return true; - - if (!resctrl_arch_get_cdp_enabled(r->rid)) - return false; - return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); -} - -/** - * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive - * - * An exclusive resource group implies that there should be no sharing of - * its allocated resources. At the time this group is considered to be - * exclusive this test can determine if its current schemata supports this - * setting by testing for overlap with all other resource groups. - * - * Return: true if resource group can be exclusive, false if there is overlap - * with allocations of other resource groups and thus this resource group - * cannot be exclusive. - */ -static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) -{ - int closid = rdtgrp->closid; - struct resctrl_schema *s; - struct rdt_resource *r; - bool has_cache = false; - struct rdt_domain *d; - u32 ctrl; - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) - continue; - has_cache = true; - list_for_each_entry(d, &r->domains, list) { - ctrl = resctrl_arch_get_config(r, d, closid, - s->conf_type); - if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { - rdt_last_cmd_puts("Schemata overlaps\n"); - return false; - } - } - } - - if (!has_cache) { - rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); - return false; - } - - return true; -} - -/** - * rdtgroup_mode_write - Modify the resource group's mode - * - */ -static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - enum rdtgrp_mode mode; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - rdt_last_cmd_clear(); - - mode = rdtgrp->mode; - - if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || - (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || - (!strcmp(buf, "pseudo-locksetup") && - mode == RDT_MODE_PSEUDO_LOCKSETUP) || - (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) - goto out; - - if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); - ret = -EINVAL; - goto out; - } - - if (!strcmp(buf, "shareable")) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_SHAREABLE; - } else if (!strcmp(buf, "exclusive")) { - if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - ret = -EINVAL; - goto out; - } - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (!strcmp(buf, "pseudo-locksetup")) { - ret = rdtgroup_locksetup_enter(rdtgrp); - if (ret) - goto out; - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; - } else { - rdt_last_cmd_puts("Unknown or unsupported mode\n"); - ret = -EINVAL; - } - -out: - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - -/** - * rdtgroup_cbm_to_size - Translate CBM to size in bytes - * @r: RDT resource to which @d belongs. - * @d: RDT domain instance. - * @cbm: bitmask for which the size should be computed. - * - * The bitmask provided associated with the RDT domain instance @d will be - * translated into how many bytes it represents. The size in bytes is - * computed by first dividing the total cache size by the CBM length to - * determine how many bytes each bit in the bitmask represents. The result - * is multiplied with the number of bits set in the bitmask. - * - * @cbm is unsigned long, even if only 32 bits are used to make the - * bitmap functions work correctly. - */ -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, - struct rdt_domain *d, unsigned long cbm) -{ - struct cpu_cacheinfo *ci; - unsigned int size = 0; - int num_b, i; - - num_b = bitmap_weight(&cbm, r->cache.cbm_len); - ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == r->cache_level) { - size = ci->info_list[i].size / r->cache.cbm_len * num_b; - break; - } - } - - return size; -} - -/** - * rdtgroup_size_show - Display size in bytes of allocated regions - * - * The "size" file mirrors the layout of the "schemata" file, printing the - * size in bytes of each region instead of the capacity bitmask. - * - */ -static int rdtgroup_size_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - enum resctrl_conf_type type; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - struct rdt_domain *d; - unsigned int size; - int ret = 0; - u32 closid; - bool sep; - u32 ctrl; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%*s:", max_name_width, - rdtgrp->plr->s->name); - size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, - rdtgrp->plr->d, - rdtgrp->plr->cbm); - seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); - } - goto out; - } - - closid = rdtgrp->closid; - - list_for_each_entry(schema, &resctrl_schema_all, list) { - r = schema->res; - type = schema->conf_type; - sep = false; - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(d, &r->domains, list) { - if (sep) - seq_putc(s, ';'); - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - size = 0; - } else { - if (is_mba_sc(r)) - ctrl = d->mbps_val[closid]; - else - ctrl = resctrl_arch_get_config(r, d, - closid, - type); - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) - size = ctrl; - else - size = rdtgroup_cbm_to_size(r, d, ctrl); - } - seq_printf(s, "%d=%u", d->id, size); - sep = true; - } - seq_putc(s, '\n'); - } - -out: - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -struct mon_config_info { - u32 evtid; - u32 mon_config; -}; - -#define INVALID_CONFIG_INDEX UINT_MAX - -/** - * mon_event_config_index_get - get the hardware index for the - * configurable event - * @evtid: event id. - * - * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID - * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID - * INVALID_CONFIG_INDEX for invalid evtid - */ -static inline unsigned int mon_event_config_index_get(u32 evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return 0; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return 1; - default: - /* Should never reach here */ - return INVALID_CONFIG_INDEX; - } -} - -static void mon_event_config_read(void *info) -{ - struct mon_config_info *mon_info = info; - unsigned int index; - u64 msrval; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - - /* Report only the valid event configuration bits */ - mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; -} - -static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) -{ - smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); -} - -static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) -{ - struct mon_config_info mon_info = {0}; - struct rdt_domain *dom; - bool sep = false; - - mutex_lock(&rdtgroup_mutex); - - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - memset(&mon_info, 0, sizeof(struct mon_config_info)); - mon_info.evtid = evtid; - mondata_config_read(dom, &mon_info); - - seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); - sep = true; - } - seq_puts(s, "\n"); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int mbm_total_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); - - return 0; -} - -static int mbm_local_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); - - return 0; -} - -static void mon_event_config_write(void *info) -{ - struct mon_config_info *mon_info = info; - unsigned int index; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); -} - -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) -{ - struct mon_config_info mon_info = {0}; - int ret = 0; - - /* - * Read the current config value first. If both are the same then - * no need to write it again. - */ - mon_info.evtid = evtid; - mondata_config_read(d, &mon_info); - if (mon_info.mon_config == val) - goto out; - - mon_info.mon_config = val; - - /* - * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the - * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE - * are scoped at the domain level. Writing any of these MSRs - * on one CPU is observed by all the CPUs in the domain. - */ - smp_call_function_any(&d->cpu_mask, mon_event_config_write, - &mon_info, 1); - - /* - * When an Event Configuration is changed, the bandwidth counters - * for all RMIDs and Events will be cleared by the hardware. The - * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for - * every RMID on the next read to any event for every RMID. - * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) - * cleared while it is tracked by the hardware. Clear the - * mbm_local and mbm_total counts for all the RMIDs. - */ - resctrl_arch_reset_rmid_all(r, d); - -out: - return ret; -} - -static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - char *dom_str = NULL, *id_str; - unsigned long dom_id, val; - struct rdt_domain *d; - int ret = 0; - -next: - if (!tok || tok[0] == '\0') - return 0; - - /* Start processing the strings for each domain */ - dom_str = strim(strsep(&tok, ";")); - id_str = strsep(&dom_str, "="); - - if (!id_str || kstrtoul(id_str, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); - return -EINVAL; - } - - if (!dom_str || kstrtoul(dom_str, 16, &val)) { - rdt_last_cmd_puts("Non-numeric event configuration value\n"); - return -EINVAL; - } - - /* Value from user cannot be more than the supported set of events */ - if ((val & hw_res->mbm_cfg_mask) != val) { - rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", - hw_res->mbm_cfg_mask); - return -EINVAL; - } - - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - ret = mbm_config_write_domain(r, d, evtid, val); - if (ret) - return -EINVAL; - goto next; - } - } - - return -EINVAL; -} - -static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - - return ret ?: nbytes; -} - -static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - - return ret ?: nbytes; -} - -/* rdtgroup information files for one cache resource. */ -static struct rftype res_common_files[] = { - { - .name = "last_cmd_status", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_last_cmd_status_show, - .fflags = RF_TOP_INFO, - }, - { - .name = "num_closids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_closids_show, - .fflags = RF_CTRL_INFO, - }, - { - .name = "mon_features", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_mon_features_show, - .fflags = RF_MON_INFO, - }, - { - .name = "num_rmids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_rmids_show, - .fflags = RF_MON_INFO, - }, - { - .name = "cbm_mask", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_default_ctrl_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_cbm_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_cbm_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "shareable_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_shareable_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "bit_usage", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bit_usage_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_bandwidth", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_bw_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "bandwidth_gran", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bw_gran_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "delay_linear", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_delay_linear_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - /* - * Platform specific which (if any) capabilities are provided by - * thread_throttle_mode. Defer "fflags" initialization to platform - * discovery. - */ - { - .name = "thread_throttle_mode", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_thread_throttle_mode_show, - }, - { - .name = "max_threshold_occupancy", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = max_threshold_occ_write, - .seq_show = max_threshold_occ_show, - .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "mbm_total_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_total_bytes_config_show, - .write = mbm_total_bytes_config_write, - }, - { - .name = "mbm_local_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_local_bytes_config_show, - .write = mbm_local_bytes_config_write, - }, - { - .name = "cpus", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "cpus_list", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .flags = RFTYPE_FLAGS_CPUS_LIST, - .fflags = RFTYPE_BASE, - }, - { - .name = "tasks", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_tasks_write, - .seq_show = rdtgroup_tasks_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "schemata", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_schemata_write, - .seq_show = rdtgroup_schemata_show, - .fflags = RF_CTRL_BASE, - }, - { - .name = "mode", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_mode_write, - .seq_show = rdtgroup_mode_show, - .fflags = RF_CTRL_BASE, - }, - { - .name = "size", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_size_show, - .fflags = RF_CTRL_BASE, - }, - -}; - -static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) -{ - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - lockdep_assert_held(&rdtgroup_mutex); - - for (rft = rfts; rft < rfts + len; rft++) { - if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { - ret = rdtgroup_add_file(kn, rft); - if (ret) - goto error; - } - } - - return 0; -error: - pr_warn("Failed to add %s, err=%d\n", rft->name, ret); - while (--rft >= rfts) { - if ((fflags & rft->fflags) == rft->fflags) - kernfs_remove_by_name(kn, rft->name); - } - return ret; -} - -static struct rftype *rdtgroup_get_rftype_by_name(const char *name) -{ - struct rftype *rfts, *rft; - int len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - return rft; - } - - return NULL; -} - -void __init thread_throttle_mode_init(void) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); - if (!rft) - return; - - rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; -} - -void __init mbm_config_rftype_init(const char *config) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name(config); - if (rft) - rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; -} - -/** - * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * - * The permissions of named resctrl file, directory, or link are modified - * to not allow read, write, or execute by any user. - * - * WARNING: This function is intended to communicate to the user that the - * resctrl file has been locked down - that it is not relevant to the - * particular state the system finds itself in. It should not be relied - * on to protect from user access because after the file's permissions - * are restricted the user can still change the permissions using chmod - * from the command line. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn; - int ret = 0; - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - iattr.ia_mode = S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode = S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode = S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -/** - * rdtgroup_kn_mode_restore - Restore user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * @mask: Mask of permissions that should be restored - * - * Restore the permissions of the named file. If @name is a directory the - * permissions of its parent will be used. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn, *parent; - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - iattr.ia_mode = rft->mode & mask; - } - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - parent = kernfs_get_parent(kn); - if (parent) { - iattr.ia_mode |= parent->mode; - kernfs_put(parent); - } - iattr.ia_mode |= S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode |= S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode |= S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -static int rdtgroup_mkdir_info_resdir(void *priv, char *name, - unsigned long fflags) -{ - struct kernfs_node *kn_subdir; - int ret; - - kn_subdir = kernfs_create_dir(kn_info, name, - kn_info->mode, priv); - if (IS_ERR(kn_subdir)) - return PTR_ERR(kn_subdir); - - ret = rdtgroup_kn_set_ugid(kn_subdir); - if (ret) - return ret; - - ret = rdtgroup_add_files(kn_subdir, fflags); - if (!ret) - kernfs_activate(kn_subdir); - - return ret; -} - -static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - unsigned long fflags; - char name[32]; - int ret; - - /* create the directory */ - kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); - if (IS_ERR(kn_info)) - return PTR_ERR(kn_info); - - ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); - if (ret) - goto out_destroy; - - /* loop over enabled controls, these are all alloc_capable */ - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - fflags = r->fflags | RF_CTRL_INFO; - ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); - if (ret) - goto out_destroy; - } - - for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RF_MON_INFO; - sprintf(name, "%s_MON", r->name); - ret = rdtgroup_mkdir_info_resdir(r, name, fflags); - if (ret) - goto out_destroy; - } - - ret = rdtgroup_kn_set_ugid(kn_info); - if (ret) - goto out_destroy; - - kernfs_activate(kn_info); - - return 0; - -out_destroy: - kernfs_remove(kn_info); - return ret; -} - -static int -mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, - char *name, struct kernfs_node **dest_kn) -{ - struct kernfs_node *kn; - int ret; - - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - if (dest_kn) - *dest_kn = kn; - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -static void l3_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); -} - -static void l2_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); -} - -static inline bool is_mba_linear(void) -{ - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; -} - -static int set_cache_qos_cfg(int level, bool enable) -{ - void (*update)(void *arg); - struct rdt_resource *r_l; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int cpu; - - if (level == RDT_RESOURCE_L3) - update = l3_qos_cfg_update; - else if (level == RDT_RESOURCE_L2) - update = l2_qos_cfg_update; - else - return -EINVAL; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - r_l = &rdt_resources_all[level].r_resctrl; - list_for_each_entry(d, &r_l->domains, list) { - if (r_l->cache.arch_has_per_cpu_cfg) - /* Pick all the CPUs in the domain instance */ - for_each_cpu(cpu, &d->cpu_mask) - cpumask_set_cpu(cpu, cpu_mask); - else - /* Pick one CPU from each domain instance to update MSR */ - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - } - - /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, update, &enable, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* Restore the qos cfg state when a domain comes online */ -void rdt_domain_reconfigure_cdp(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - - if (!r->cdp_capable) - return; - - if (r->rid == RDT_RESOURCE_L2) - l2_qos_cfg_update(&hw_res->cdp_enabled); - - if (r->rid == RDT_RESOURCE_L3) - l3_qos_cfg_update(&hw_res->cdp_enabled); -} - -static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 num_closid = resctrl_arch_get_num_closid(r); - int cpu = cpumask_any(&d->cpu_mask); - int i; - - d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), - GFP_KERNEL, cpu_to_node(cpu)); - if (!d->mbps_val) - return -ENOMEM; - - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - - return 0; -} - -static void mba_sc_domain_destroy(struct rdt_resource *r, - struct rdt_domain *d) -{ - kfree(d->mbps_val); - d->mbps_val = NULL; -} - -/* - * MBA software controller is supported only if - * MBM is supported and MBA is in linear scale. - */ -static bool supports_mba_mbps(void) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - - return (is_mbm_local_enabled() && - r->alloc_capable && is_mba_linear()); -} - -/* - * Enable or disable the MBA software controller - * which helps user specify bandwidth in MBps. - */ -static int set_mba_sc(bool mba_sc) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rdt_domain *d; - int i; - - if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) - return -EINVAL; - - r->membw.mba_sc = mba_sc; - - list_for_each_entry(d, &r->domains, list) { - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - } - - return 0; -} - -static int cdp_enable(int level) -{ - struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; - int ret; - - if (!r_l->alloc_capable) - return -EINVAL; - - ret = set_cache_qos_cfg(level, true); - if (!ret) - rdt_resources_all[level].cdp_enabled = true; - - return ret; -} - -static void cdp_disable(int level) -{ - struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - - if (r_hw->cdp_enabled) { - set_cache_qos_cfg(level, false); - r_hw->cdp_enabled = false; - } -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) -{ - struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - - if (!hw_res->r_resctrl.cdp_capable) - return -EINVAL; - - if (enable) - return cdp_enable(l); - - cdp_disable(l); - - return 0; -} - -static void cdp_disable_all(void) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -} - -/* - * We don't allow rdtgroup directories to be created anywhere - * except the root directory. Thus when looking for the rdtgroup - * structure for a kernfs node we are either looking at a directory, - * in which case the rdtgroup structure is pointed at by the "priv" - * field, otherwise we have a file, and need only look to the parent - * to find the rdtgroup. - */ -static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) -{ - if (kernfs_type(kn) == KERNFS_DIR) { - /* - * All the resource directories use "kn->priv" - * to point to the "struct rdtgroup" for the - * resource. "info" and its subdirectories don't - * have rdtgroup structures, so return NULL here. - */ - if (kn == kn_info || kn->parent == kn_info) - return NULL; - else - return kn->priv; - } else { - return kn->parent->priv; - } -} - -static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - atomic_inc(&rdtgrp->waitcount); - kernfs_break_active_protection(kn); -} - -static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - if (atomic_dec_and_test(&rdtgrp->waitcount) && - (rdtgrp->flags & RDT_DELETED)) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - kernfs_unbreak_active_protection(kn); - rdtgroup_remove(rdtgrp); - } else { - kernfs_unbreak_active_protection(kn); - } -} - -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return NULL; - - rdtgroup_kn_get(rdtgrp, kn); - - mutex_lock(&rdtgroup_mutex); - - /* Was this group deleted while we waited? */ - if (rdtgrp->flags & RDT_DELETED) - return NULL; - - return rdtgrp; -} - -void rdtgroup_kn_unlock(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return; - - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); -} - -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **mon_data_kn); - -static int rdt_enable_ctx(struct rdt_fs_context *ctx) -{ - int ret = 0; - - if (ctx->enable_cdpl2) - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); - - if (!ret && ctx->enable_cdpl3) - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); - - if (!ret && ctx->enable_mba_mbps) - ret = set_mba_sc(true); - - return ret; -} - -static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) -{ - struct resctrl_schema *s; - const char *suffix = ""; - int ret, cl; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - - s->res = r; - s->num_closid = resctrl_arch_get_num_closid(r); - if (resctrl_arch_get_cdp_enabled(r->rid)) - s->num_closid /= 2; - - s->conf_type = type; - switch (type) { - case CDP_CODE: - suffix = "CODE"; - break; - case CDP_DATA: - suffix = "DATA"; - break; - case CDP_NONE: - suffix = ""; - break; - } - - ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); - if (ret >= sizeof(s->name)) { - kfree(s); - return -EINVAL; - } - - cl = strlen(s->name); - - /* - * If CDP is supported by this resource, but not enabled, - * include the suffix. This ensures the tabular format of the - * schemata file does not change between mounts of the filesystem. - */ - if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) - cl += 4; - - if (cl > max_name_width) - max_name_width = cl; - - INIT_LIST_HEAD(&s->list); - list_add(&s->list, &resctrl_schema_all); - - return 0; -} - -static int schemata_list_create(void) -{ - struct rdt_resource *r; - int ret = 0; - - for_each_alloc_capable_rdt_resource(r) { - if (resctrl_arch_get_cdp_enabled(r->rid)) { - ret = schemata_list_add(r, CDP_CODE); - if (ret) - break; - - ret = schemata_list_add(r, CDP_DATA); - } else { - ret = schemata_list_add(r, CDP_NONE); - } - - if (ret) - break; - } - - return ret; -} - -static void schemata_list_destroy(void) -{ - struct resctrl_schema *s, *tmp; - - list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { - list_del(&s->list); - kfree(s); - } -} - -static int rdt_get_tree(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct rdt_domain *dom; - struct rdt_resource *r; - int ret; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - /* - * resctrl file system can only be mounted once. - */ - if (static_branch_unlikely(&rdt_enable_key)) { - ret = -EBUSY; - goto out; - } - - ret = rdt_enable_ctx(ctx); - if (ret < 0) - goto out_cdp; - - ret = schemata_list_create(); - if (ret) { - schemata_list_destroy(); - goto out_mba; - } - - closid_init(); - - ret = rdtgroup_create_info_dir(rdtgroup_default.kn); - if (ret < 0) - goto out_schemata_free; - - if (rdt_mon_capable) { - ret = mongroup_create_dir(rdtgroup_default.kn, - &rdtgroup_default, "mon_groups", - &kn_mongrp); - if (ret < 0) - goto out_info; - - ret = mkdir_mondata_all(rdtgroup_default.kn, - &rdtgroup_default, &kn_mondata); - if (ret < 0) - goto out_mongrp; - rdtgroup_default.mon.mon_data_kn = kn_mondata; - } - - ret = rdt_pseudo_lock_init(); - if (ret) - goto out_mondata; - - ret = kernfs_get_tree(fc); - if (ret < 0) - goto out_psl; - - if (rdt_alloc_capable) - static_branch_enable_cpuslocked(&rdt_alloc_enable_key); - if (rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_mon_enable_key); - - if (rdt_alloc_capable || rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_enable_key); - - if (is_mbm_enabled()) { - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - list_for_each_entry(dom, &r->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); - } - - goto out; - -out_psl: - rdt_pseudo_lock_release(); -out_mondata: - if (rdt_mon_capable) - kernfs_remove(kn_mondata); -out_mongrp: - if (rdt_mon_capable) - kernfs_remove(kn_mongrp); -out_info: - kernfs_remove(kn_info); -out_schemata_free: - schemata_list_destroy(); -out_mba: - if (ctx->enable_mba_mbps) - set_mba_sc(false); -out_cdp: - cdp_disable_all(); -out: - rdt_last_cmd_clear(); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -enum rdt_param { - Opt_cdp, - Opt_cdpl2, - Opt_mba_mbps, - nr__rdt_params -}; - -static const struct fs_parameter_spec rdt_fs_parameters[] = { - fsparam_flag("cdp", Opt_cdp), - fsparam_flag("cdpl2", Opt_cdpl2), - fsparam_flag("mba_MBps", Opt_mba_mbps), - {} -}; - -static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct fs_parse_result result; - int opt; - - opt = fs_parse(fc, rdt_fs_parameters, param, &result); - if (opt < 0) - return opt; - - switch (opt) { - case Opt_cdp: - ctx->enable_cdpl3 = true; - return 0; - case Opt_cdpl2: - ctx->enable_cdpl2 = true; - return 0; - case Opt_mba_mbps: - if (!supports_mba_mbps()) - return -EINVAL; - ctx->enable_mba_mbps = true; - return 0; - } - - return -EINVAL; -} - -static void rdt_fs_context_free(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - - kernfs_free_fs_context(fc); - kfree(ctx); -} - -static const struct fs_context_operations rdt_fs_context_ops = { - .free = rdt_fs_context_free, - .parse_param = rdt_parse_param, - .get_tree = rdt_get_tree, -}; - -static int rdt_init_fs_context(struct fs_context *fc) -{ - struct rdt_fs_context *ctx; - - ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->kfc.root = rdt_root; - ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; - fc->fs_private = &ctx->kfc; - fc->ops = &rdt_fs_context_ops; - put_user_ns(fc->user_ns); - fc->user_ns = get_user_ns(&init_user_ns); - fc->global = true; - return 0; -} - -static int reset_all_ctrls(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - struct rdt_hw_domain *hw_dom; - struct msr_param msr_param; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int i; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - msr_param.res = r; - msr_param.low = 0; - msr_param.high = hw_res->num_closid; - - /* - * Disable resource control for this resource by setting all - * CBMs in all domains to the maximum mask value. Pick one CPU - * from each domain to update the MSRs below. - */ - list_for_each_entry(d, &r->domains, list) { - hw_dom = resctrl_to_arch_dom(d); - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - - for (i = 0; i < hw_res->num_closid; i++) - hw_dom->ctrl_val[i] = r->default_ctrl; - } - - /* Update CBM on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* - * Move tasks from one to the other group. If @from is NULL, then all tasks - * in the systems are moved unconditionally (used for teardown). - * - * If @mask is not NULL the cpus on which moved tasks are running are set - * in that mask so the update smp function call is restricted to affected - * cpus. - */ -static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, - struct cpumask *mask) -{ - struct task_struct *p, *t; - - read_lock(&tasklist_lock); - for_each_process_thread(p, t) { - if (!from || is_closid_match(t, from) || - is_rmid_match(t, from)) { - WRITE_ONCE(t->closid, to->closid); - WRITE_ONCE(t->rmid, to->mon.rmid); - - /* - * Order the closid/rmid stores above before the loads - * in task_curr(). This pairs with the full barrier - * between the rq->curr update and resctrl_sched_in() - * during context switch. - */ - smp_mb(); - - /* - * If the task is on a CPU, set the CPU in the mask. - * The detection is inaccurate as tasks might move or - * schedule before the smp function call takes place. - * In such a case the function call is pointless, but - * there is no other side effect. - */ - if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) - cpumask_set_cpu(task_cpu(t), mask); - } - } - read_unlock(&tasklist_lock); -} - -static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) -{ - struct rdtgroup *sentry, *stmp; - struct list_head *head; - - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->mon.rmid); - list_del(&sentry->mon.crdtgrp_list); - - if (atomic_read(&sentry->waitcount) != 0) - sentry->flags = RDT_DELETED; - else - rdtgroup_remove(sentry); - } -} - -/* - * Forcibly remove all of subdirectories under root. - */ -static void rmdir_all_sub(void) -{ - struct rdtgroup *rdtgrp, *tmp; - - /* Move all tasks to the default resource group */ - rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); - - list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { - /* Free any child rmids */ - free_all_child_rdtgrp(rdtgrp); - - /* Remove each rdtgroup other than root */ - if (rdtgrp == &rdtgroup_default) - continue; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - - /* - * Give any CPUs back to the default group. We cannot copy - * cpu_online_mask because a CPU might have executed the - * offline callback already, but is still marked online. - */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - free_rmid(rdtgrp->mon.rmid); - - kernfs_remove(rdtgrp->kn); - list_del(&rdtgrp->rdtgroup_list); - - if (atomic_read(&rdtgrp->waitcount) != 0) - rdtgrp->flags = RDT_DELETED; - else - rdtgroup_remove(rdtgrp); - } - /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - update_closid_rmid(cpu_online_mask, &rdtgroup_default); - - kernfs_remove(kn_info); - kernfs_remove(kn_mongrp); - kernfs_remove(kn_mondata); -} - -static void rdt_kill_sb(struct super_block *sb) -{ - struct rdt_resource *r; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - set_mba_sc(false); - - /*Put everything back to default values. */ - for_each_alloc_capable_rdt_resource(r) - reset_all_ctrls(r); - cdp_disable_all(); - rmdir_all_sub(); - rdt_pseudo_lock_release(); - rdtgroup_default.mode = RDT_MODE_SHAREABLE; - schemata_list_destroy(); - static_branch_disable_cpuslocked(&rdt_alloc_enable_key); - static_branch_disable_cpuslocked(&rdt_mon_enable_key); - static_branch_disable_cpuslocked(&rdt_enable_key); - kernfs_kill_sb(sb); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -static struct file_system_type rdt_fs_type = { - .name = "resctrl", - .init_fs_context = rdt_init_fs_context, - .parameters = rdt_fs_parameters, - .kill_sb = rdt_kill_sb, -}; - -static int mon_addfile(struct kernfs_node *parent_kn, const char *name, - void *priv) -{ - struct kernfs_node *kn; - int ret = 0; - - kn = __kernfs_create_file(parent_kn, name, 0444, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, - &kf_mondata_ops, priv, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; - } - - return ret; -} - -/* - * Remove all subdirectories of mon_data of ctrl_mon groups - * and monitor groups with given domain id. - */ -static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - unsigned int dom_id) -{ - struct rdtgroup *prgrp, *crgrp; - char name[32]; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - sprintf(name, "mon_%s_%02d", r->name, dom_id); - kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); - - list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) - kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); - } -} - -static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, - struct rdt_domain *d, - struct rdt_resource *r, struct rdtgroup *prgrp) -{ - union mon_data_bits priv; - struct kernfs_node *kn; - struct mon_evt *mevt; - struct rmid_read rr; - char name[32]; - int ret; - - sprintf(name, "mon_%s_%02d", r->name, d->id); - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - if (WARN_ON(list_empty(&r->evt_list))) { - ret = -EPERM; - goto out_destroy; - } - - priv.u.rid = r->rid; - priv.u.domid = d->id; - list_for_each_entry(mevt, &r->evt_list, list) { - priv.u.evtid = mevt->evtid; - ret = mon_addfile(kn, mevt->name, priv.priv); - if (ret) - goto out_destroy; - - if (is_mbm_event(mevt->evtid)) - mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); - } - kernfs_activate(kn); - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/* - * Add all subdirectories of mon_data for "ctrl_mon" groups - * and "monitor" groups with given domain id. - */ -static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - struct rdt_domain *d) -{ - struct kernfs_node *parent_kn; - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - parent_kn = prgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, prgrp); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - parent_kn = crgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, crgrp); - } - } -} - -static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, - struct rdt_resource *r, - struct rdtgroup *prgrp) -{ - struct rdt_domain *dom; - int ret; - - list_for_each_entry(dom, &r->domains, list) { - ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); - if (ret) - return ret; - } - - return 0; -} - -/* - * This creates a directory mon_data which contains the monitored data. - * - * mon_data has one directory for each domain which are named - * in the format mon__. For ex: A mon_data - * with L3 domain looks as below: - * ./mon_data: - * mon_L3_00 - * mon_L3_01 - * mon_L3_02 - * ... - * - * Each domain directory has one file per event: - * ./mon_L3_00/: - * llc_occupancy - * - */ -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **dest_kn) -{ - struct rdt_resource *r; - struct kernfs_node *kn; - int ret; - - /* - * Create the mon_data directory first. - */ - ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); - if (ret) - return ret; - - if (dest_kn) - *dest_kn = kn; - - /* - * Create the subdirectories for each domain. Note that all events - * in a domain like L3 are grouped into a resource whose domain is L3 - */ - for_each_mon_capable_rdt_resource(r) { - ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); - if (ret) - goto out_destroy; - } - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/** - * cbm_ensure_valid - Enforce validity on provided CBM - * @_val: Candidate CBM - * @r: RDT resource to which the CBM belongs - * - * The provided CBM represents all cache portions available for use. This - * may be represented by a bitmap that does not consist of contiguous ones - * and thus be an invalid CBM. - * Here the provided CBM is forced to be a valid CBM by only considering - * the first set of contiguous bits as valid and clearing all bits. - * The intention here is to provide a valid default CBM with which a new - * resource group is initialized. The user can follow this with a - * modification to the CBM if the default does not satisfy the - * requirements. - */ -static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) -{ - unsigned int cbm_len = r->cache.cbm_len; - unsigned long first_bit, zero_bit; - unsigned long val = _val; - - if (!val) - return 0; - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(&val, zero_bit, cbm_len - zero_bit); - return (u32)val; -} - -/* - * Initialize cache resources per RDT domain - * - * Set the RDT domain up to start off with all usable allocations. That is, - * all shareable and unused bits. All-zero CBM is invalid. - */ -static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, - u32 closid) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 used_b = 0, unused_b = 0; - unsigned long tmp_cbm; - enum rdtgrp_mode mode; - u32 peer_ctl, ctrl_val; - int i; - - cfg = &d->staged_config[t]; - cfg->have_new_ctrl = false; - cfg->new_ctrl = r->cache.shareable_bits; - used_b = r->cache.shareable_bits; - for (i = 0; i < closids_supported(); i++) { - if (closid_allocated(i) && i != closid) { - mode = rdtgroup_mode_by_closid(i); - if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - /* - * ctrl values for locksetup aren't relevant - * until the schemata is written, and the mode - * becomes RDT_MODE_PSEUDO_LOCKED. - */ - continue; - /* - * If CDP is active include peer domain's - * usage to ensure there is no overlap - * with an exclusive group. - */ - if (resctrl_arch_get_cdp_enabled(r->rid)) - peer_ctl = resctrl_arch_get_config(r, d, i, - peer_type); - else - peer_ctl = 0; - ctrl_val = resctrl_arch_get_config(r, d, i, - s->conf_type); - used_b |= ctrl_val | peer_ctl; - if (mode == RDT_MODE_SHAREABLE) - cfg->new_ctrl |= ctrl_val | peer_ctl; - } - } - if (d->plr && d->plr->cbm > 0) - used_b |= d->plr->cbm; - unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); - unused_b &= BIT_MASK(r->cache.cbm_len) - 1; - cfg->new_ctrl |= unused_b; - /* - * Force the initial CBM to be valid, user can - * modify the CBM based on system availability. - */ - cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); - /* - * Assign the u32 CBM to an unsigned long to ensure that - * bitmap_weight() does not access out-of-bound memory. - */ - tmp_cbm = cfg->new_ctrl; - if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); - return -ENOSPC; - } - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Initialize cache resources with default values. - * - * A new RDT group is being created on an allocation capable (CAT) - * supporting system. Set this group up to start off with all usable - * allocations. - * - * If there are no more shareable bits available on any domain then - * the entire allocation will fail. - */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) -{ - struct rdt_domain *d; - int ret; - - list_for_each_entry(d, &s->res->domains, list) { - ret = __init_one_rdt_domain(d, s, closid); - if (ret < 0) - return ret; - } - - return 0; -} - -/* Initialize MBA resource with default values. */ -static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) -{ - struct resctrl_staged_config *cfg; - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - if (is_mba_sc(r)) { - d->mbps_val[closid] = MBA_MAX_MBPS; - continue; - } - - cfg = &d->staged_config[CDP_NONE]; - cfg->new_ctrl = r->default_ctrl; - cfg->have_new_ctrl = true; - } -} - -/* Initialize the RDT group's allocations. */ -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - int ret = 0; - - rdt_staged_configs_clear(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) { - rdtgroup_init_mba(r, rdtgrp->closid); - if (is_mba_sc(r)) - continue; - } else { - ret = rdtgroup_init_cat(s, rdtgrp->closid); - if (ret < 0) - goto out; - } - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Failed to initialize allocations\n"); - goto out; - } - - } - - rdtgrp->mode = RDT_MODE_SHAREABLE; - -out: - rdt_staged_configs_clear(); - return ret; -} - -static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, - const char *name, umode_t mode, - enum rdt_group_type rtype, struct rdtgroup **r) -{ - struct rdtgroup *prdtgrp, *rdtgrp; - struct kernfs_node *kn; - uint files = 0; - int ret; - - prdtgrp = rdtgroup_kn_lock_live(parent_kn); - if (!prdtgrp) { - ret = -ENODEV; - goto out_unlock; - } - - if (rtype == RDTMON_GROUP && - (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto out_unlock; - } - - /* allocate the rdtgroup. */ - rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); - if (!rdtgrp) { - ret = -ENOSPC; - rdt_last_cmd_puts("Kernel out of memory\n"); - goto out_unlock; - } - *r = rdtgrp; - rdtgrp->mon.parent = prdtgrp; - rdtgrp->type = rtype; - INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); - - /* kernfs creates the directory for rdtgrp */ - kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); - if (IS_ERR(kn)) { - ret = PTR_ERR(kn); - rdt_last_cmd_puts("kernfs create error\n"); - goto out_free_rgrp; - } - rdtgrp->kn = kn; - - /* - * kernfs_remove() will drop the reference count on "kn" which - * will free it. But we still need it to stick around for the - * rdtgroup_kn_unlock(kn) call. Take one extra reference here, - * which will be dropped by kernfs_put() in rdtgroup_remove(). - */ - kernfs_get(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - rdt_last_cmd_puts("kernfs perm error\n"); - goto out_destroy; - } - - files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); - ret = rdtgroup_add_files(kn, files); - if (ret) { - rdt_last_cmd_puts("kernfs fill error\n"); - goto out_destroy; - } - - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - goto out_destroy; - } - rdtgrp->mon.rmid = ret; - - ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_idfree; - } - } - kernfs_activate(kn); - - /* - * The caller unlocks the parent_kn upon success. - */ - return 0; - -out_idfree: - free_rmid(rdtgrp->mon.rmid); -out_destroy: - kernfs_put(rdtgrp->kn); - kernfs_remove(rdtgrp->kn); -out_free_rgrp: - kfree(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) -{ - kernfs_remove(rgrp->kn); - free_rmid(rgrp->mon.rmid); - rdtgroup_remove(rgrp); -} - -/* - * Create a monitor group under "mon_groups" directory of a control - * and monitor group(ctrl_mon). This is a resource group - * to monitor a subset of tasks and cpus in its parent ctrl_mon group. - */ -static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp, *prgrp; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); - if (ret) - return ret; - - prgrp = rdtgrp->mon.parent; - rdtgrp->closid = prgrp->closid; - - /* - * Add the rdtgrp to the list of rdtgrps the parent - * ctrl_mon group has to track. - */ - list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); - - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * These are rdtgroups created under the root directory. Can be used - * to allocate and monitor resources. - */ -static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp; - struct kernfs_node *kn; - u32 closid; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); - if (ret) - return ret; - - kn = rdtgrp->kn; - ret = closid_alloc(); - if (ret < 0) { - rdt_last_cmd_puts("Out of CLOSIDs\n"); - goto out_common_fail; - } - closid = ret; - ret = 0; - - rdtgrp->closid = closid; - ret = rdtgroup_init_alloc(rdtgrp); - if (ret < 0) - goto out_id_free; - - list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - - if (rdt_mon_capable) { - /* - * Create an empty mon_groups directory to hold the subset - * of tasks and cpus to monitor. - */ - ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_del_list; - } - } - - goto out_unlock; - -out_del_list: - list_del(&rdtgrp->rdtgroup_list); -out_id_free: - closid_free(closid); -out_common_fail: - mkdir_rdt_prepare_clean(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * We allow creating mon groups only with in a directory called "mon_groups" - * which is present in every ctrl_mon group. Check if this is a valid - * "mon_groups" directory. - * - * 1. The directory should be named "mon_groups". - * 2. The mon group itself should "not" be named "mon_groups". - * This makes sure "mon_groups" directory always has a ctrl_mon group - * as parent. - */ -static bool is_mon_groups(struct kernfs_node *kn, const char *name) -{ - return (!strcmp(kn->name, "mon_groups") && - strcmp(name, "mon_groups")); -} - -static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, - umode_t mode) -{ - /* Do not accept '\n' to avoid unparsable situation. */ - if (strchr(name, '\n')) - return -EINVAL; - - /* - * If the parent directory is the root directory and RDT - * allocation is supported, add a control and monitoring - * subdirectory - */ - if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) - return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); - - /* - * If RDT monitoring is supported and the parent directory is a valid - * "mon_groups" directory, add a monitoring subdirectory. - */ - if (rdt_mon_capable && is_mon_groups(parent_kn, name)) - return rdtgroup_mkdir_mon(parent_kn, name, mode); - - return -EPERM; -} - -static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - int cpu; - - /* Give any tasks back to the parent group */ - rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); - - /* Update per cpu rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) - per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->mon.rmid); - - /* - * Remove the rdtgrp from the parent ctrl_mon group's list - */ - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_del(&rdtgrp->mon.crdtgrp_list); - - kernfs_remove(rdtgrp->kn); - - return 0; -} - -static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) -{ - rdtgrp->flags = RDT_DELETED; - list_del(&rdtgrp->rdtgroup_list); - - kernfs_remove(rdtgrp->kn); - return 0; -} - -static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - int cpu; - - /* Give any tasks back to the default group */ - rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); - - /* Give any CPUs back to the default group */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - /* Update per cpu closid and rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) { - per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; - per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; - } - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - closid_free(rdtgrp->closid); - free_rmid(rdtgrp->mon.rmid); - - rdtgroup_ctrl_remove(rdtgrp); - - /* - * Free all the child monitor group rmids. - */ - free_all_child_rdtgrp(rdtgrp); - - return 0; -} - -static int rdtgroup_rmdir(struct kernfs_node *kn) -{ - struct kernfs_node *parent_kn = kn->parent; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret = 0; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - - rdtgrp = rdtgroup_kn_lock_live(kn); - if (!rdtgrp) { - ret = -EPERM; - goto out; - } - - /* - * If the rdtgroup is a ctrl_mon group and parent directory - * is the root directory, remove the ctrl_mon group. - * - * If the rdtgroup is a mon group and parent directory - * is a valid "mon_groups" directory, remove the mon group. - */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && - rdtgrp != &rdtgroup_default) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = rdtgroup_ctrl_remove(rdtgrp); - } else { - ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); - } - } else if (rdtgrp->type == RDTMON_GROUP && - is_mon_groups(parent_kn, kn->name)) { - ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); - } else { - ret = -EPERM; - } - -out: - rdtgroup_kn_unlock(kn); - free_cpumask_var(tmpmask); - return ret; -} - -/** - * mongrp_reparent() - replace parent CTRL_MON group of a MON group - * @rdtgrp: the MON group whose parent should be replaced - * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp - * @cpus: cpumask provided by the caller for use during this call - * - * Replaces the parent CTRL_MON group for a MON group, resulting in all member - * tasks' CLOSID immediately changing to that of the new parent group. - * Monitoring data for the group is unaffected by this operation. - */ -static void mongrp_reparent(struct rdtgroup *rdtgrp, - struct rdtgroup *new_prdtgrp, - cpumask_var_t cpus) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - - WARN_ON(rdtgrp->type != RDTMON_GROUP); - WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); - - /* Nothing to do when simply renaming a MON group. */ - if (prdtgrp == new_prdtgrp) - return; - - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_move_tail(&rdtgrp->mon.crdtgrp_list, - &new_prdtgrp->mon.crdtgrp_list); - - rdtgrp->mon.parent = new_prdtgrp; - rdtgrp->closid = new_prdtgrp->closid; - - /* Propagate updated closid to all tasks in this group. */ - rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); - - update_closid_rmid(cpus, NULL); -} - -static int rdtgroup_rename(struct kernfs_node *kn, - struct kernfs_node *new_parent, const char *new_name) -{ - struct rdtgroup *new_prdtgrp; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret; - - rdtgrp = kernfs_to_rdtgroup(kn); - new_prdtgrp = kernfs_to_rdtgroup(new_parent); - if (!rdtgrp || !new_prdtgrp) - return -ENOENT; - - /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ - rdtgroup_kn_get(rdtgrp, kn); - rdtgroup_kn_get(new_prdtgrp, new_parent); - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - /* - * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if - * either kernfs_node is a file. - */ - if (kernfs_type(kn) != KERNFS_DIR || - kernfs_type(new_parent) != KERNFS_DIR) { - rdt_last_cmd_puts("Source and destination must be directories"); - ret = -EPERM; - goto out; - } - - if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { - ret = -ENOENT; - goto out; - } - - if (rdtgrp->type != RDTMON_GROUP || !kn->parent || - !is_mon_groups(kn->parent, kn->name)) { - rdt_last_cmd_puts("Source must be a MON group\n"); - ret = -EPERM; - goto out; - } - - if (!is_mon_groups(new_parent, new_name)) { - rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); - ret = -EPERM; - goto out; - } - - /* - * If the MON group is monitoring CPUs, the CPUs must be assigned to the - * current parent CTRL_MON group and therefore cannot be assigned to - * the new parent, making the move illegal. - */ - if (!cpumask_empty(&rdtgrp->cpu_mask) && - rdtgrp->mon.parent != new_prdtgrp) { - rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); - ret = -EPERM; - goto out; - } - - /* - * Allocate the cpumask for use in mongrp_reparent() to avoid the - * possibility of failing to allocate it after kernfs_rename() has - * succeeded. - */ - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } - - /* - * Perform all input validation and allocations needed to ensure - * mongrp_reparent() will succeed before calling kernfs_rename(), - * otherwise it would be necessary to revert this call if - * mongrp_reparent() failed. - */ - ret = kernfs_rename(kn, new_parent, new_name); - if (!ret) - mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); - - free_cpumask_var(tmpmask); - -out: - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); - rdtgroup_kn_put(new_prdtgrp, new_parent); - return ret; -} - -static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - seq_puts(seq, ",cdp"); - - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - seq_puts(seq, ",cdpl2"); - - if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) - seq_puts(seq, ",mba_MBps"); - - return 0; -} - -static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { - .mkdir = rdtgroup_mkdir, - .rmdir = rdtgroup_rmdir, - .rename = rdtgroup_rename, - .show_options = rdtgroup_show_options, -}; - -static int __init rdtgroup_setup_root(void) -{ - int ret; - - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, - KERNFS_ROOT_CREATE_DEACTIVATED | - KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, - &rdtgroup_default); - if (IS_ERR(rdt_root)) - return PTR_ERR(rdt_root); - - mutex_lock(&rdtgroup_mutex); - - rdtgroup_default.closid = 0; - rdtgroup_default.mon.rmid = 0; - rdtgroup_default.type = RDTCTRL_GROUP; - INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); - - list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); - if (ret) { - kernfs_destroy_root(rdt_root); - goto out; - } - - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - kernfs_activate(rdtgroup_default.kn); - -out: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} - -static void domain_destroy_mon_state(struct rdt_domain *d) -{ - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); -} - -void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - lockdep_assert_held(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - mba_sc_domain_destroy(r, d); - - if (!r->mon_capable) - return; - - /* - * If resctrl is mounted, remove all the - * per domain monitor data directories. - */ - if (static_branch_unlikely(&rdt_mon_enable_key)) - rmdir_mondata_subdir_allrdtgrp(r, d->id); - - if (is_mbm_enabled()) - cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { - /* - * When a package is going down, forcefully - * decrement rmid->ebusy. There is no way to know - * that the L3 was flushed and hence may lead to - * incorrect counts in rare scenarios, but leaving - * the RMID as busy creates RMID leaks if the - * package never comes back. - */ - __check_limbo(d, true); - cancel_delayed_work(&d->cqm_limbo); - } - - domain_destroy_mon_state(d); -} - -static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) -{ - size_t tsize; - - if (is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); - if (!d->rmid_busy_llc) - return -ENOMEM; - } - if (is_mbm_total_enabled()) { - tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); - if (!d->mbm_total) { - bitmap_free(d->rmid_busy_llc); - return -ENOMEM; - } - } - if (is_mbm_local_enabled()) { - tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); - if (!d->mbm_local) { - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - return -ENOMEM; - } - } - - return 0; -} - -int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - int err; - - lockdep_assert_held(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - /* RDT_RESOURCE_MBA is never mon_capable */ - return mba_sc_domain_allocate(r, d); - - if (!r->mon_capable) - return 0; - - err = domain_setup_mon_state(r, d); - if (err) - return err; - - if (is_mbm_enabled()) { - INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); - } - - if (is_llc_occupancy_enabled()) - INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - - /* If resctrl is mounted, add per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) - mkdir_mondata_subdir_allrdtgrp(r, d); - - return 0; -} - -/* - * rdtgroup_init - rdtgroup initialization - * - * Setup resctrl file system including set up root, create mount point, - * register rdtgroup filesystem, and initialize files under root directory. - * - * Return: 0 on success or -errno - */ -int __init rdtgroup_init(void) -{ - int ret = 0; - - seq_buf_init(&last_cmd_status, last_cmd_status_buf, - sizeof(last_cmd_status_buf)); - - ret = rdtgroup_setup_root(); - if (ret) - return ret; - - ret = sysfs_create_mount_point(fs_kobj, "resctrl"); - if (ret) - goto cleanup_root; - - ret = register_filesystem(&rdt_fs_type); - if (ret) - goto cleanup_mountpoint; - - /* - * Adding the resctrl debugfs directory here may not be ideal since - * it would let the resctrl debugfs directory appear on the debugfs - * filesystem before the resctrl filesystem is mounted. - * It may also be ok since that would enable debugging of RDT before - * resctrl is mounted. - * The reason why the debugfs directory is created here and not in - * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and - * during the debugfs directory creation also &sb->s_type->i_mutex_key - * (the lockdep class of inode->i_rwsem). Other filesystem - * interactions (eg. SyS_getdents) have the lock ordering: - * &sb->s_type->i_mutex_key --> &mm->mmap_lock - * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex - * is taken, thus creating dependency: - * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause - * issues considering the other two lock dependencies. - * By creating the debugfs directory here we avoid a dependency - * that may cause deadlock (even though file operations cannot - * occur until the filesystem is mounted, but I do not know how to - * tell lockdep that). - */ - debugfs_resctrl = debugfs_create_dir("resctrl", NULL); - - return 0; - -cleanup_mountpoint: - sysfs_remove_mount_point(fs_kobj, "resctrl"); -cleanup_root: - kernfs_destroy_root(rdt_root); - - return ret; -} - -void __exit rdtgroup_exit(void) -{ - debugfs_remove_recursive(debugfs_resctrl); - unregister_filesystem(&rdt_fs_type); - sysfs_remove_mount_point(fs_kobj, "resctrl"); - kernfs_destroy_root(rdt_root); + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 05fa4ef634902293e3286705134168b40d812932..80b3791240e49dcc8b1a37aa5e66b4bcfe5aeb6f 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -66,29 +66,31 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } - if (c->cpuid_level >= 0x00000001) { - u32 eax, ebx, ecx, edx; - - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - /* - * If HTT (EDX[28]) is set EBX[16:23] contain the number of - * apicids which are reserved per package. Store the resulting - * shift value for the package management code. - */ - if (edx & (1U << 28)) - c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); - } - + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/zxpause.c b/arch/x86/kernel/cpu/zxpause.c new file mode 100644 index 0000000000000000000000000000000000000000..7f55f5d9e8c0cbb70c30c260705040901eee2a53 --- /dev/null +++ b/arch/x86/kernel/cpu/zxpause.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#define ZXPAUSE_C02_ENABLE 0 + +#define ZXPAUSE_CTRL_VAL(max_time, c02_disable) \ + (((max_time) & MSR_ZX_PAUSE_CONTROL_TIME_MASK) | \ + ((c02_disable) & MSR_ZX_PAUSE_CONTROL_C02_DISABLE)) + +/* + * Cache ZX_PAUSE_CONTROL MSR. This is a systemwide control. By default, + * zxpause max time is 100000 in TSC-quanta and C0.2 is enabled + */ +static u32 zxpause_control_cached = ZXPAUSE_CTRL_VAL(100000, ZXPAUSE_C02_ENABLE); + +/* + * Cache the original ZX_PAUSE_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_zxpause_control_cached __ro_after_init; + +/* + * Serialize access to zxpause_control_cached and ZX_PAUSE_CONTROL MSR in + * the sysfs write functions. + */ +static DEFINE_MUTEX(zxpause_lock); + +static void zxpause_update_control_msr(void *unused) +{ + lockdep_assert_irqs_disabled(); + wrmsr(MSR_ZX_PAUSE_CONTROL, READ_ONCE(zxpause_control_cached), 0); +} + +/* + * The CPU hotplug callback sets the control MSR to the global control + * value. + * + * Disable interrupts so the read of zxpause_control_cached and the WRMSR + * are protected against a concurrent sysfs write. Otherwise the sysfs + * write could update the cached value after it had been read on this CPU + * and issue the IPI before the old value had been written. The IPI would + * interrupt, write the new value and after return from IPI the previous + * value would be written by this CPU. + * + * With interrupts disabled the upcoming CPU either sees the new control + * value or the IPI is updating this CPU to the new control value after + * interrupts have been reenabled. + */ +static int zxpause_cpu_online(unsigned int cpu) +{ + local_irq_disable(); + zxpause_update_control_msr(NULL); + local_irq_enable(); + return 0; +} + +/* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int zxpause_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_zxpause_control_cached is never changed after it caches + * the original control MSR value in zxpause_init(). So there + * is no race condition here. + */ + wrmsr(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached, 0); + + return 0; +} + +/* + * On resume, restore ZX_PAUSE_CONTROL MSR on the boot processor which + * is the only active CPU at this time. The MSR is set up on the APs via the + * CPU hotplug callback. + * + * This function is invoked on resume from suspend and hibernation. On + * resume from suspend the restore should be not required, but we neither + * trust the firmware nor does it matter if the same value is written + * again. + */ +static void zxpause_syscore_resume(void) +{ + zxpause_update_control_msr(NULL); +} + +static struct syscore_ops zxpause_syscore_ops = { + .resume = zxpause_syscore_resume, +}; + +/* sysfs interface */ + +/* + * When bit 0 in ZX_PAUSE_CONTROL MSR is 1, C0.2 is disabled. + * Otherwise, C0.2 is enabled. + */ +static inline bool zxpause_ctrl_c02_enabled(u32 ctrl) +{ + return !(ctrl & MSR_ZX_PAUSE_CONTROL_C02_DISABLE); +} + +static inline u32 zxpause_ctrl_max_time(u32 ctrl) +{ + return ctrl & MSR_ZX_PAUSE_CONTROL_TIME_MASK; +} + +static inline void zxpause_update_control(u32 maxtime, bool c02_enable) +{ + u32 ctrl = maxtime & MSR_ZX_PAUSE_CONTROL_TIME_MASK; + + if (!c02_enable) + ctrl |= MSR_ZX_PAUSE_CONTROL_C02_DISABLE; + + WRITE_ONCE(zxpause_control_cached, ctrl); + /* Propagate to all CPUs */ + on_each_cpu(zxpause_update_control_msr, NULL, 1); +} + +static ssize_t +enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%d\n", zxpause_ctrl_c02_enabled(ctrl)); +} + +static ssize_t enable_c02_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool c02_enable; + u32 ctrl; + int ret; + + ret = kstrtobool(buf, &c02_enable); + if (ret) + return ret; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (c02_enable != zxpause_ctrl_c02_enabled(ctrl)) + zxpause_update_control(ctrl, c02_enable); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(enable_c02); + +static ssize_t +max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%u\n", zxpause_ctrl_max_time(ctrl)); +} + +static ssize_t max_time_store(struct device *kobj, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 max_time, ctrl; + int ret; + + ret = kstrtou32(buf, 0, &max_time); + if (ret) + return ret; + + /* bits[1:0] must be zero */ + if (max_time & ~MSR_ZX_PAUSE_CONTROL_TIME_MASK) + return -EINVAL; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (max_time != zxpause_ctrl_max_time(ctrl)) + zxpause_update_control(max_time, zxpause_ctrl_c02_enabled(ctrl)); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(max_time); + +static struct attribute *zxpause_attrs[] = { + &dev_attr_enable_c02.attr, + &dev_attr_max_time.attr, + NULL +}; + +static struct attribute_group zxpause_attr_group = { + .attrs = zxpause_attrs, + .name = "zxpause_control", +}; + +static int __init zxpause_init(void) +{ + struct device *dev; + int ret; + + if (!boot_cpu_has(X86_FEATURE_ZXPAUSE)) + return -ENODEV; + + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_zxpause_control_cached + * is modified. + */ + rdmsrl(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "zxpause:online", + zxpause_cpu_online, zxpause_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } + + register_syscore_ops(&zxpause_syscore_ops); + + /* + * Add zxpause control interface. Ignore failure, so at least the + * default values are set up in case the machine manages to boot. + */ + dev = bus_get_dev_root(&cpu_subsys); + return sysfs_create_group(&dev->kobj, &zxpause_attr_group); +} +device_initcall(zxpause_init); diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 0000000000000000000000000000000000000000..9383f5d0a476586c8f77447cc55566281c8b9c2d --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + * + * CSV3_SECURE_CMD_REQ_REPORT: + * CSV3 guest wants to request attestation report. + * The secure processor will update the request message buffer and respond + * buffer to indicate the result of this request. + */ +enum csv3_secure_command_type { + /* The secure call request should below CSV3_SECURE_CMD_ACK */ + CSV3_SECURE_CMD_ENC = 0x1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, + CSV3_SECURE_CMD_REQ_REPORT = 0x7, + + /* SECURE_CMD_ACK indicates secure call request can be handled */ + CSV3_SECURE_CMD_ACK = 0x6b, + + /* + * The following values are the error code of the secure call + * when firmware can't handling the specific secure call command + * as expected. + */ + CSV3_SECURE_CMD_ERROR_INTERNAL = 0x6c, + CSV3_SECURE_CMD_ERROR_INVALID_COMMAND = 0x6d, + CSV3_SECURE_CMD_ERROR_INVALID_PARAM = 0x6e, + CSV3_SECURE_CMD_ERROR_INVALID_ADDRESS = 0x6f, + CSV3_SECURE_CMD_ERROR_INVALID_LENGTH = 0x70, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call_ident_map - issue early secure call command at the + * stage where identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call_ident_map(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..84a76ae3c0625d5b160774231de903f39f51bf3f --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_ENC); +} + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} + +static void print_secure_call_error(enum csv3_secure_command_type code) +{ + switch (code) { + case CSV3_SECURE_CMD_ACK: + pr_debug("secure call: handled\n"); + break; + case CSV3_SECURE_CMD_ERROR_INTERNAL: + pr_err("secure call: internal error\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_COMMAND: + pr_err("secure call: unsupported cmd\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_PARAM: + pr_err("secure call: invalid param\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_ADDRESS: + pr_err("secure call: invalid address\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_LENGTH: + pr_err("secure call: invalid length\n"); + break; + default: + pr_err("secure call: shouldn't reach here\n"); + break; + } +} + +int csv3_issue_request_report(phys_addr_t paddr, size_t size) +{ + struct secure_call_pages *sc_page_info; + struct csv3_secure_call_cmd *sc_wr, *sc_rd; + unsigned long flags; + int sc_page_idx; + enum csv3_secure_command_type sc_return_code; + + /* + * secure call pages needs to access with IRQs disabled because it is + * using a per-CPU data. + */ + local_irq_save(flags); + + sc_page_info = this_cpu_read(secure_call_data); + sc_page_idx = this_cpu_read(secure_call_page_idx); + + sc_wr = sc_page_idx ? &sc_page_info->page_a : &sc_page_info->page_b; + sc_rd = sc_page_idx ? &sc_page_info->page_b : &sc_page_info->page_a; + + sc_wr->cmd_type = CSV3_SECURE_CMD_REQ_REPORT; + sc_wr->nums = 1; + sc_wr->unused = 0; + sc_wr->entry[0].base_address = (u64)paddr; + sc_wr->entry[0].size = size; + + /* + * Write command in sc_wr must be done before retrieve status code + * from sc_rd, and it's ensured by the smp_mb below. + */ + smp_mb(); + + sc_return_code = sc_rd->cmd_type; + + this_cpu_write(secure_call_page_idx, sc_page_idx ^ 1); + + /* Leave per-CPU data access */ + local_irq_restore(flags); + + /* Print return code of the secure call */ + print_secure_call_error(sc_return_code); + + return sc_return_code == CSV3_SECURE_CMD_ACK ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(csv3_issue_request_report); diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a6c1867fc7aa3ea0eb9b24fc1992c90119d2febb..b5f5e0916894406261efd7312efb607ec7ab29cb 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -685,6 +687,20 @@ static void __init apple_airport_reset(int bus, int slot, int func) early_iounmap(mmio, BCM4331_MMIO_SIZE); } +bool is_zhaoxin_kh40000; + +static void quirk_zhaoxin_dma_patch(int num, int slot, int func) +{ + u8 revision; + + revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); + if (revision == 0x10) { + is_zhaoxin_kh40000 = true; + dma_ops = &kh40000_dma_direct_ops; + pr_info("zhaoxin direct dma patch enabled\n"); + } +} + #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -728,6 +744,10 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, + { PCI_VENDOR_ID_ZHAOXIN, 0x1001, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, + { PCI_VENDOR_ID_ZHAOXIN, 0x345B, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, {} }; diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 4b414b0ab0692a3244bbd4e95f7e4c25d9002a90..510570b569e948c0a97f055263fd2cd264f06a46 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -33,6 +33,8 @@ DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); DEFINE_PER_CPU(u64, xfd_state); #endif +extern struct static_key_false hygon_lmc_key; + /* The FPU state configuration data for kernel and user space */ struct fpu_state_config fpu_kernel_cfg __ro_after_init; struct fpu_state_config fpu_user_cfg __ro_after_init; @@ -421,6 +423,8 @@ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate); void kernel_fpu_begin_mask(unsigned int kfpu_mask) { preempt_disable(); + if (static_branch_unlikely(&hygon_lmc_key)) + check_using_kernel_fpu(); WARN_ON_FPU(!irq_fpu_usable()); WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); @@ -445,6 +449,9 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { + if (static_branch_unlikely(&hygon_lmc_key)) + check_using_kernel_fpu(); + WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); this_cpu_write(in_kernel_fpu, false); @@ -452,6 +459,107 @@ void kernel_fpu_end(void) } EXPORT_SYMBOL_GPL(kernel_fpu_end); +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + +extern unsigned int fpu_kernel_nonatomic_xstate_size; +unsigned int get_fpustate_free_space(struct fpu *fpu) +{ + if ((fpu_kernel_cfg.default_size + fpu_kernel_nonatomic_xstate_size) > + sizeof(fpu->fpstate->regs)) + return 0; + return fpu_kernel_nonatomic_xstate_size; +} + +unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int off) +{ + unsigned long addr = 0; + + if (fpu && (fpu_kernel_nonatomic_xstate_size > off)) { + addr = (unsigned long)&fpu->__fpstate.regs.__padding[0]; + addr += fpu_kernel_cfg.default_size + off; + } + return addr; +} + +/* + * We can call kernel_fpu_begin_nonatomic in non-atomic task context. + */ +int kernel_fpu_begin_nonatomic_mask(unsigned int kfpu_mask) +{ + preempt_disable(); + /* we not support Nested call */ + if (test_thread_flag(TIF_USING_FPU_NONATOMIC)) + goto err; + + if (KERNEL_FPU_NONATOMIC_SIZE > + get_fpustate_free_space(¤t->thread.fpu)) + goto err; + + /* + * This means we call kernel_fpu_begin_nonatomic after kernel_fpu_begin, + * but before kernel_fpu_end. + */ + if (this_cpu_read(in_kernel_fpu)) + goto err; + + if (in_interrupt()) + goto err; + + if (current->flags & PF_KTHREAD) + goto err; + + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { + set_thread_flag(TIF_NEED_FPU_LOAD); + save_fpregs_to_fpstate(¤t->thread.fpu); + } + /* Set thread flag: TIC_USING_FPU_NONATOMIC */ + set_thread_flag(TIF_USING_FPU_NONATOMIC); + + __cpu_invalidate_fpregs_state(); + + /* Put sane initial values into the control registers. */ + if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) + ldmxcsr(MXCSR_DEFAULT); + + if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) + asm volatile ("fninit"); + + preempt_enable(); + + return 0; + +err: + preempt_enable(); + + return -1; +} +EXPORT_SYMBOL_GPL(kernel_fpu_begin_nonatomic_mask); + +void kernel_fpu_end_nonatomic(void) +{ + preempt_disable(); + /* + * This means we call kernel_fpu_end_nonatomic after kernel_fpu_begin, + * but before kernel_fpu_end. + */ + WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); + + WARN_ON_FPU(!test_thread_flag(TIF_USING_FPU_NONATOMIC)); + + clear_thread_flag(TIF_USING_FPU_NONATOMIC); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(kernel_fpu_end_nonatomic); + +void save_fpregs_to_fpkernelstate(struct fpu *kfpu) +{ + kernel_fpu_states_save((void *)get_fpu_registers_pos(kfpu, + MAX_FPU_CTX_SIZE), + NULL, MAX_FPU_CTX_SIZE); +} +#endif + /* * Sync the FPU register state to current's memory register state when the * current task owns the FPU. The hardware register state is preserved. diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 998a08f17e3317abde0d01961ebfe5d571681e62..e55abf20aa189ecd254e90b8d54cd2e92b828e34 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -133,6 +133,12 @@ static void __init fpu__init_system_generic(void) fpu__init_system_mxcsr(); } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +unsigned int fpu_kernel_nonatomic_xstate_size; +EXPORT_SYMBOL_GPL(fpu_kernel_nonatomic_xstate_size); +#endif + /* * Enforce that 'MEMBER' is the last field of 'TYPE'. * @@ -161,6 +167,11 @@ static void __init fpu__init_task_struct_size(void) * size. */ task_size += fpu_kernel_cfg.default_size; +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + task_size += fpu_kernel_nonatomic_xstate_size; +#endif /* * We dynamically size 'struct fpu', so we require that diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 255ff8f6c52705739733a19d6dce5d43dd434aed..c1ebfe4b4a8db733b5600c06e60e932dd8fb9709 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -677,6 +677,11 @@ static unsigned int __init get_xsave_size_user(void) static int __init init_xstate_size(void) { +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + extern unsigned int fpu_kernel_nonatomic_xstate_size; +#endif + /* Recompute the context size for enabled features: */ unsigned int user_size, kernel_size, kernel_default_size; bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); @@ -710,6 +715,10 @@ static int __init init_xstate_size(void) fpu_user_cfg.default_size = xstate_calculate_size(fpu_user_cfg.default_features, false); +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + fpu_kernel_nonatomic_xstate_size = KERNEL_FPU_NONATOMIC_SIZE; +#endif return 0; } diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 246a609f889b20986fe9b66d2449a81db125aec1..de001b2146abf3bef00b4632ada3e7a483421411 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -29,11 +30,33 @@ static void __init i386_default_early_setup(void) x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; } +#ifdef CONFIG_MICROCODE_INITRD32 +unsigned long __initdata initrd_start_early; +static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end; + +static void zap_early_initrd_mapping(void) +{ + pte_t *pl2p = initrd_pl2p_start; + + for (; pl2p < initrd_pl2p_end; pl2p++) { + *pl2p = (pte_t){ .pte = 0 }; + + if (!IS_ENABLED(CONFIG_X86_PAE)) + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0}; + } +} +#else +static inline void zap_early_initrd_mapping(void) { } +#endif + asmlinkage __visible void __init __noreturn i386_start_kernel(void) { /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); + load_ucode_bsp(); + zap_early_initrd_mapping(); + cr4_init_shadow(); sanitize_boot_params(&boot_params); @@ -69,52 +92,83 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * to the first kernel PMD. Note the upper half of each PMD or PTE are * always zero at this stage. */ -void __init mk_early_pgtbl_32(void); -void __init mk_early_pgtbl_32(void) -{ -#ifdef __pa -#undef __pa -#endif -#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) - pte_t pte, *ptep; - int i; - unsigned long *ptr; - /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); #ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); -#define SET_PL2(pl2, val) { (pl2).pmd = (val); } +typedef pmd_t pl2_t; +#define pl2_base initial_pg_pmd +#define SET_PL2(val) { .pmd = (val), } #else - pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); -#define SET_PL2(pl2, val) { (pl2).pgd = (val); } +typedef pgd_t pl2_t; +#define pl2_base initial_page_table +#define SET_PL2(val) { .pgd = (val), } #endif - ptep = (pte_t *)__pa(__brk_base); - pte.pte = PTE_IDENT_ATTR; - +static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p, + const unsigned long limit) +{ while ((pte.pte & PTE_PFN_MASK) < limit) { + pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR); + int i; + + **pl2p = pl2; + if (!IS_ENABLED(CONFIG_X86_PAE)) { + /* Kernel PDE entry */ + *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + } - SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); - *pl2p = pl2; -#ifndef CONFIG_X86_PAE - /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; -#endif for (i = 0; i < PTRS_PER_PTE; i++) { - *ptep = pte; + **ptep = pte; pte.pte += PAGE_SIZE; - ptep++; + (*ptep)++; } - - pl2p++; + (*pl2p)++; } + return pte; +} + +void __init __no_stack_protector mk_early_pgtbl_32(void) +{ + /* Enough space to fit pagetables for the low memory linear map */ + unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + struct boot_params __maybe_unused *params; + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + + pte.pte = PTE_IDENT_ATTR; + pte = init_map(pte, &ptep, &pl2p, limit); - ptr = (unsigned long *)__pa(&max_pfn_mapped); + ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; - ptr = (unsigned long *)__pa(&_brk_end); + ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; -} +#ifdef CONFIG_MICROCODE_INITRD32 + /* Running on a hypervisor? */ + if (native_cpuid_ecx(1) & BIT(31)) + return; + + params = (struct boot_params *)__pa_nodebug(&boot_params); + if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) + return; + + /* Save the virtual start address */ + ptr = (unsigned long *)__pa_nodebug(&initrd_start_early); + *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET; + *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK; + + /* Save PLP2 for cleanup */ + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; + + limit = (unsigned long)params->hdr.ramdisk_image; + pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit); + limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size; + + init_map(pte, &ptep, &pl2p, limit); + + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; +#endif +} diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c58213bce294e9f4cb8eaa65704203817a35e76f..369342de9c4e593fb7d38bff21e5ff1f34938957 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -160,6 +161,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* @@ -318,6 +327,54 @@ unsigned long __head __startup_64(unsigned long physaddr, return sme_postprocess_startup(bp, pmd); } +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern bool bsp_flush_bss_decrypted_section_done; + +void __ref early_clflush_bss_decrypted_section(void) +{ + /* Only allow bsp flush these caches and the bsp must at early boot stage */ + if (bsp_flush_bss_decrypted_section_done) + return; + + if (read_cr3_pa() != __pa_nodebug(early_top_pgt)) + return; + + if (sme_get_me_mask()) { + unsigned long vaddr, vaddr_end; + char *cl, *start, *end; + + /* + * The memory region of .bss..decrypted section maybe mapped + * with encryption in earlier stage. If the correspond stale + * caches lives in earlier stage were not flushed before we + * access that memory region, then Linux will crash later + * because the stale caches will pollute the memory. So we + * need flush the caches with encrypted mapping before we + * access .bss..decrypted section. + * + * The function __startup_64() have already filled the + * encrypted mapping for .bss..decrypted section, use that + * mapping here. + */ + vaddr = (unsigned long)__start_bss_decrypted - + __START_KERNEL_map + phys_base; + vaddr_end = (unsigned long)__end_bss_decrypted - + __START_KERNEL_map + phys_base; + + /* Hardcode cl-size to 64 at this stage. */ + start = (char *)(vaddr & ~63); + end = (char *)((vaddr_end + 63) & ~63); + + asm volatile("mfence" : : : "memory"); + for (cl = start; cl != end; cl += 64) + clflush(cl); + asm volatile("mfence" : : : "memory"); + } + + bsp_flush_bss_decrypted_section_done = true; +} +#endif + /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c9318993f9594562f8518d60d7182d9e1f483f1e..63f6ff4b28eb17bdea42085476ed6dff47f1ed2b 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -118,11 +118,6 @@ SYM_CODE_START(startup_32) movl %eax, pa(olpc_ofw_pgd) #endif -#ifdef CONFIG_MICROCODE - /* Early load ucode on BSP. */ - call load_ucode_bsp -#endif - /* Create early pagetables. */ call mk_early_pgtbl_32 @@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp) movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp -#ifdef CONFIG_MICROCODE - /* Early load ucode on AP. */ - call load_ucode_ap -#endif - .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e6eaee8509ceed09581726d4ec18f836b4f300ae..9c2d7e2b5edbb2be66a625777fe7c30b7ec66c7f 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -375,6 +375,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) shrq $32, %rdx wrmsr +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Ensure .bss.decrypted memory's stale caches which lived in earlier + * stage to be flushed. + */ + call early_clflush_bss_decrypted_section +#endif + /* Setup and Load IDT */ call early_setup_idt @@ -511,6 +519,8 @@ SYM_CODE_END(vc_boot_ghcb) SYM_DATA(initial_code, .quad x86_64_start_kernel) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +SYM_DATA(bsp_flush_bss_decrypted_section_done, .byte 0x0) + .balign 8 #endif SYM_DATA(trampoline_lock, .quad 0); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 046bc9d57e996617a0bdfa8af4d86cf3f84a4074..2626fa052b454bdb1644d0eb3f5ace9334973230 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -804,6 +804,12 @@ static u64 read_hpet(struct clocksource *cs) if (in_nmi()) return (u64)hpet_readl(HPET_COUNTER); + /* + * Read HPET directly if panic in progress. + */ + if (unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID)) + return (u64)hpet_readl(HPET_COUNTER); + /* * Read the current state of the lock and HPET value atomically. */ diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index ee4fe8cdb857676f3fb69cd14a17aed3c735f8f1..b49ac8ecbbd691d62c0bbda9f81a1e398916e83b 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -122,6 +122,7 @@ int sched_set_itmt_support(void) return 0; } +EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -181,3 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu) { per_cpu(sched_core_priority, cpu) = prio; } +EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 87aee638e1a5d894a02ae02b14a01d99f280ba38..6da2cfa23c29398e9b797b2e3abcd531e2efbb22 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs) instrumentation_begin(); + if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) + goto out; + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -498,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) raw_atomic_long_inc(&nsp->idt_calls); - if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) { + if (microcode_nmi_handler_enabled()) + microcode_offline_nmi_handler(); return; + } if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 708c87b88cc150ee64145de90de938e0482afa3f..619560eb9f94c049fdcbb1a2e4dcb48938fff674 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index dd19a4db741afd51dc7ddf7d2643836b64d92ec9..fbadfa84a02d6ac1b5d816c30f61873f17329cbc 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -63,6 +63,8 @@ #include "process.h" +extern struct static_key_false hygon_lmc_key; + /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) @@ -571,6 +573,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); + if (static_branch_unlikely(&hygon_lmc_key)) + switch_kernel_fpu_prepare(prev_p, cpu); + /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * @@ -625,6 +630,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); + if (static_branch_unlikely(&hygon_lmc_key)) + switch_kernel_fpu_finish(next_p); + /* Reload sp0. */ update_task_stack(next_p); @@ -659,7 +667,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index eb129277dcdd64a3b588c9914567cfc57325610b..93dc119c8e2e89aa17ac94926a0c010ffd4f67f3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -226,8 +227,6 @@ static void __init reserve_brk(void) _brk_start = 0; } -u64 relocated_ramdisk; - #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -253,6 +252,34 @@ static u64 __init get_ramdisk_size(void) return ramdisk_size; } +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +static void __init copy_early_initrd(void *dest, phys_addr_t src, + unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = offset_in_page(src); + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + /* + * _ENC flag should be preserved so that when SME is enabled initrd + * can be mapped as encrypted, as it had been encrypted earlier. + * This flag won't impact on other platforms like TDX/SEV enabled. + */ + p = early_memremap_prot(src & PAGE_MASK, clen + slop, + pgprot_val(FIXMAP_PAGE_NORMAL)); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ @@ -261,7 +288,7 @@ static void __init relocate_initrd(void) u64 area_size = PAGE_ALIGN(ramdisk_size); /* We need to move the initrd down into directly mapped mem */ - relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, + u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, PFN_PHYS(max_pfn_mapped)); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", @@ -272,7 +299,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); - copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + copy_early_initrd((void *)initrd_start, ramdisk_image, ramdisk_size); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", @@ -1219,6 +1246,10 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + + /* Try to reserve contiguous memory to support CSV3 */ + early_csv_reserve_mem(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); if (boot_cpu_has(X86_FEATURE_GBPAGES)) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 9905dc0e0b0960e50124a45a959f9ea26356f31a..614335589d527f3e42b28b76e1babd5de6e54c92 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1852,6 +1853,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co struct ghcb *ghcb; bool ret = true; + /* + * Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb() + * keep in atomic context. If #VC comes from kernel mode, then the + * codes here are in atomic context. If #VC comes from user mode, then + * it's necessary to switch to atomic context manually. + */ + if (is_x86_vendor_hygon() && !in_nmi()) + __preempt_count_add(HARDIRQ_OFFSET); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); @@ -1862,6 +1872,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); + if (is_x86_vendor_hygon() && !in_nmi()) + __preempt_count_sub(HARDIRQ_OFFSET); + /* Done - now check the result */ switch (result) { case ES_OK: diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ce77dac9a0202ae1c1f1a95fccc5419762b1e8c6..d28920971d80c9536478a4d39973664b266de1f4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -259,12 +259,9 @@ static void notrace start_secondary(void *unused) cpu_init_exception_handling(); /* - * 32-bit systems load the microcode from the ASM startup code for - * historical reasons. - * - * On 64-bit systems load it before reaching the AP alive - * synchronization point below so it is not part of the full per - * CPU serialized bringup part when "parallel" bringup is enabled. + * Load the microcode before reaching the AP alive synchronization + * point below so it is not part of the full per CPU serialized + * bringup part when "parallel" bringup is enabled. * * That's even safe when hyperthreading is enabled in the CPU as * the core code starts the primary threads first and leaves the @@ -277,8 +274,7 @@ static void notrace start_secondary(void *unused) * CPUID, MSRs etc. must be strictly serialized to maintain * software state correctness. */ - if (IS_ENABLED(CONFIG_X86_64)) - load_ucode_ap(); + load_ucode_ap(); /* * Synchronization point with the hotplug core. Sets this CPUs @@ -748,6 +744,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu) { return cpu_l2c_shared_mask(cpu); } +EXPORT_SYMBOL_GPL(cpu_clustergroup_mask); static void impress_friends(void) { diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 52e1f3f0b361cedeac7194c08c50390fe05eea10..06289c254a0ede8fb7cbba9952f45ec5638e4e08 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -84,6 +84,8 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); + else if (static_cpu_has(X86_FEATURE_ZXPAUSE)) + use_zxpause_delay(); } /* diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 4334033658edfbbe71ec48bda9f8fbcda2749c33..3998d1f681bcc2f9f779850ee98abee5632f0c4f 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -230,6 +230,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c new file mode 100644 index 0000000000000000000000000000000000000000..e8dd3bd43e72f39c722d378bdcff44c7062832d4 --- /dev/null +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../../../kernel/dma/direct.h" + +/*** + * usage: + * set "zhaoxin_patch_bitmask=" in cmdline + * value description: + * bit 0: enable(1) node check or not(0). default 1 + */ +enum { + ZHAOXIN_P2CW_NODE_CHECK = BIT(0), + ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK, +}; + +#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK + +unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT; + +static int __init zhaoxin_patch_code_setup(char *str) +{ + int err = kstrtoul(str, 0, &zhaoxin_patch_code); + + if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) { + pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n", + str); + return err; + } + + if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code) + pr_info("zhaoxin dma patch node check is enabled\n"); + + return 0; +} +__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup); + +static struct pci_dev *kh40000_get_pci_dev(struct device *dev) +{ + if (dev_is_pci(dev)) + return to_pci_dev(dev); + + if (dev->parent) + return kh40000_get_pci_dev(dev->parent); + + return NULL; +} + +static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr, + enum dma_data_direction dir, bool is_iommu) +{ + u8 vid; + struct pci_dev *pci; + u64 dma_mask = *dev->dma_mask; + + /* check direction */ + if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL)) + return; + + /* check dma capability */ + if (dma_mask <= DMA_BIT_MASK(32)) + return; + + /* check device type */ + pci = kh40000_get_pci_dev(dev); + if (pci == NULL) + return; + + /* get real physical address */ + if (is_iommu) { + struct iommu_domain *domain = iommu_get_dma_domain(dev); + + paddr = iommu_iova_to_phys(domain, paddr); + if (!paddr) + return; + } + + /* check node or not */ + if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK) + && pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev)) + return; + + /* flush data by one pci read cycle */ + pci_read_config_byte(pci, PCI_VENDOR_ID, &vid); +} + +/* zhaoxin kh-40000 direct dma ops */ +static void *kh40000_dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + if (dev->coherent_dma_mask > DMA_BIT_MASK(32)) + gfp |= __GFP_THISNODE; + + return dma_direct_alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_unmap_page(dev, addr, size, dir, attrs); +} + +static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} + +static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_unmap_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); +} + +const struct dma_map_ops kh40000_dma_direct_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_dma_direct_alloc, + .sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu, + .unmap_page = kh40000_dma_direct_unmap_page, + .sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu, + .unmap_sg = kh40000_dma_direct_unmap_sg, + .unmap_resource = kh40000_dma_direct_unmap_resource, + .dma_supported = dma_direct_supported, + .free = dma_direct_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, + .get_required_mask = dma_direct_get_required_mask, + .max_mapping_size = dma_direct_max_mapping_size, + .mmap = dma_direct_mmap, + .get_sgtable = dma_direct_get_sgtable, + .map_page = dma_direct_map_page, + .map_sg = dma_direct_map_sg, + .map_resource = dma_direct_map_resource, +}; + +/* zhaoxin kh-40000 iommu dma ops */ +static const struct dma_map_ops *iommu_dma_ops; + +static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + gfp |= __GFP_THISNODE; + + return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) +{ + iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs); +} + +static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp); +} + +static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page, + dma_addr_t dma_handle, enum dma_data_direction dir) +{ + iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir); +} + +static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); +} + +static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir); +} + +static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); +} + +static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); +} + +static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); +} + +static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs); +} + +static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + iommu_dma_ops->sync_single_for_device(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir); +} + +static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs); +} + +static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev) +{ + return iommu_dma_ops->get_merge_boundary(dev); +} + +static size_t kh40000_iommu_dma_opt_mapping_size(void) +{ + return iommu_dma_ops->opt_mapping_size(); +} + +const struct dma_map_ops kh40000_dma_iommu_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_iommu_dma_alloc, + .free = kh40000_iommu_dma_free, + .unmap_page = kh40000_iommu_dma_unmap_page, + .alloc_pages = kh40000_dma_common_alloc_pages, + .free_pages = kh40000_dma_common_free_pages, + .alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous, + .free_noncontiguous = kh40000_iommu_dma_free_noncontiguous, + .mmap = kh40000_iommu_dma_mmap, + .get_sgtable = kh40000_iommu_dma_get_sgtable, + .map_page = kh40000_iommu_dma_map_page, + .map_sg = kh40000_iommu_dma_map_sg, + .unmap_sg = kh40000_iommu_dma_unmap_sg, + .sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu, + .sync_single_for_device = kh40000_iommu_dma_sync_single_for_device, + .sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu, + .sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device, + .map_resource = kh40000_iommu_dma_map_resource, + .unmap_resource = kh40000_iommu_dma_unmap_resource, + .get_merge_boundary = kh40000_iommu_dma_get_merge_boundary, + .opt_mapping_size = kh40000_iommu_dma_opt_mapping_size, +}; + +void kh40000_set_iommu_dma_ops(struct device *dev) +{ + if (dev->dma_ops) { + iommu_dma_ops = dev->dma_ops; + set_dma_ops(dev, &kh40000_dma_iommu_ops); + pr_info_once("zhaoxin iommu dma patch enabled\n"); + } +} diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ed90f148140dfe093bed15a033b6e322a3cb5f2a..3e23d32e655a799012f0b52959b40d9028ae872e 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -154,4 +154,14 @@ config KVM_PROVE_MMU config KVM_EXTERNAL_WRITE_TRACKING bool +config KVM_SUPPORTS_CSV_REUSE_ASID + def_bool y + bool "Reuse the same ASID for different HYGON CSV guests" + depends on KVM_AMD_SEV && CPU_SUP_HYGON && HYGON_CSV + depends on !CGROUP_MISC + help + Provide support for reuse the same ASID for difference HYGON + CSV guests, this allow the user to create more CSV guests on + HYGON CPUs with limited ASIDs. + endif # VIRTUALIZATION diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a99ffc3f3a3fdbbf89186bf4a47a2d80edcf8860..39bbce9b1685a376216bd4fe02f4d881e544524f 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -41,6 +41,8 @@ obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o +kvm-amd-$(CONFIG_HYGON_CSV) += svm/csv.o + AFLAGS_svm/vmenter.o := -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7e6763c2bc01e9c90b3384307a01113fdcc4051e..4c8e631bef59ca53d724865d1bff22c887aff1ad 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -691,7 +691,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_7_1_EAX, F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | - F(AMX_FP16) | F(AVX_IFMA) + F(AMX_FP16) | F(AVX_IFMA) | F(LAM) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, @@ -812,6 +812,9 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); + /* Zhaoxin 0xC0000006 leaf */ + kvm_cpu_cap_mask(CPUID_C000_0006_EAX, 0 /* bit0: zxpause */ | 0 /* bit1 HMAC */); + /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and @@ -1233,8 +1236,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = 0; break; case 0x80000008: { + /* + * GuestPhysAddrSize (EAX[23:16]) is intended for software + * use. + * + * KVM's ABI is to report the effective MAXPHYADDR for the + * guest in PhysAddrSize (phys_as), and the maximum + * *addressable* GPA in GuestPhysAddrSize (g_phys_as). + * + * GuestPhysAddrSize is valid if and only if TDP is enabled, + * in which case the max GPA that can be addressed by KVM may + * be less than the max GPA that can be legally generated by + * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't + * support 5-level TDP. + */ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); - unsigned int phys_as; + unsigned int phys_as, g_phys_as; /* * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as @@ -1243,15 +1260,23 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) * paging, too. * * If TDP is enabled, use the raw bare metal MAXPHYADDR as - * reductions to the HPAs do not affect GPAs. + * reductions to the HPAs do not affect GPAs. The max + * addressable GPA is the same as the max effective GPA, except + * that it's capped at 48 bits if 5-level TDP isn't supported + * (hardware processes bits 51:48 only when walking the fifth + * level page table). */ if (!tdp_enabled) { phys_as = boot_cpu_data.x86_phys_bits; + g_phys_as = 0; } else { phys_as = entry->eax & 0xff; + g_phys_as = phys_as; + if (kvm_mmu_get_max_tdp_level() < 5) + g_phys_as = min(g_phys_as, 48); } - entry->eax = phys_as | (virt_as << 8); + entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16); entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); @@ -1325,17 +1350,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /*Just support up to 0xC0000004 now*/ - entry->eax = min(entry->eax, 0xC0000004); + /* Extended to 0xC0000006 */ + entry->eax = min(entry->eax, 0xC0000006); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; + case 0xC0000006: + cpuid_entry_override(entry, CPUID_C000_0006_EAX); + break; + case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: + case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 110dae2b0e49d21a6691606c26d2736d077c7b6f..af4c0456bfb752d6e1f37df3e964a323810d4865 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -48,11 +48,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return !(gpa & vcpu->arch.reserved_gpa_bits); } -static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return !kvm_vcpu_is_legal_gpa(vcpu, gpa); -} - static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment) { @@ -289,4 +284,12 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, vcpu->arch.governed_features.enabled); } +static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + if (guest_can_use(vcpu, X86_FEATURE_LAM)) + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); + + return kvm_vcpu_is_legal_gpa(vcpu, cr3); +} + #endif diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2673cd5c46cb486b9e78a20c589462552f0b4747..e223043ef5b26f23be5b2f0606641f66c5cd18aa 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, - bool write, bool fetch, - enum x86emul_mode mode, ulong *linear) + enum x86emul_mode mode, ulong *linear, + unsigned int flags) { struct desc_struct desc; bool usable; @@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: - *linear = la; + *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags); va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) goto bad; @@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ - if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) - || !(desc.type & 2)) && write) + if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && + (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) + if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { @@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, - ctxt->mode, linear); + return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, + write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) @@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; @@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, - &linear); + rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; @@ -3439,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; + unsigned int max_size; - rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); + rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode, + &linear, X86EMUL_F_INVLPG); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 423a73395c102ca908453016e416dbdfb7fc2b5b..ad463b1ed4e4a87c29aa9d5af3842fbd4e039e41 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) KVM_GOVERNED_X86_FEATURE(VGIF) KVM_GOVERNED_X86_FEATURE(VNMI) +KVM_GOVERNED_X86_FEATURE(LAM) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index be7aeb9b8ea3b152b269870e5a737642a492e192..e6d149825169dda3ace396ca979923c4a2d108e8 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -88,6 +88,12 @@ struct x86_instruction_info { #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ #define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */ +/* x86-specific emulation flags */ +#define X86EMUL_F_WRITE BIT(0) +#define X86EMUL_F_FETCH BIT(1) +#define X86EMUL_F_IMPLICIT BIT(2) +#define X86EMUL_F_INVLPG BIT(3) + struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); /* @@ -224,6 +230,9 @@ struct x86_emulate_ops { int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); + + gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, + unsigned int flags); }; /* Type, address-of, and value of an instruction's operand. */ diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 253fb2093d5dadcbe7994a004c26e2ad54a054a7..ade33a54306d2bbb431c84d913519c57b624e660 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -100,6 +100,8 @@ static inline u8 kvm_get_shadow_phys_bits(void) return boot_cpu_data.x86_phys_bits; } +u8 kvm_mmu_get_max_tdp_level(void); + void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask); void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); @@ -146,6 +148,14 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); } +static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) +{ + if (!guest_can_use(vcpu, X86_FEATURE_LAM)) + return 0; + + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); +} + static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) { u64 root_hpa = vcpu->arch.mmu->root.hpa; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ff85526a9d481957412c8eeec47813b2eda25d01..31c5562f51b2ab47500bd79af20ccf6ee6feaa6f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3774,7 +3774,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) hpa_t root; root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); - root_gfn = root_pgd >> PAGE_SHIFT; + root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT; if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { mmu->root.hpa = kvm_mmu_get_dummy_root(); @@ -5181,6 +5181,11 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) return max_tdp_level; } +u8 kvm_mmu_get_max_tdp_level(void) +{ + return tdp_root_level ? tdp_root_level : max_tdp_level; +} + static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index decc1f1536694f31529f3d8c2c7cf67d1185a8c0..68f8564d85a99b6577a41a03a4dc33284bea1073 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -13,6 +13,7 @@ #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ +#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index c6b2c52aceaca822120bc51b5dfb029cd4393dff..ab0ed5d66e0c28d7c470e489575dc5b0e947cd08 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -62,7 +62,7 @@ #endif /* Common logic, but per-type values. These also need to be undefined. */ -#define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))) +#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK) #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 2f4e155080badc5efdbcc93fbc909c5bbcf70094..cd2d6abe4762a7d6acfabdbc1ad2b50ff30fb12c 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -90,6 +90,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, + [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, }; /* diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..147a43e382544fbbfc6cd04c948dc1ce669619c6 --- /dev/null +++ b/arch/x86/kvm/svm/csv.c @@ -0,0 +1,3081 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kvm_cache_regs.h" +#include "svm.h" +#include "csv.h" +#include "x86.h" + +#undef pr_fmt +#define pr_fmt(fmt) "CSV: " fmt + +/* Function and variable pointers for hooks */ +struct hygon_kvm_hooks_table hygon_kvm_hooks; + +/* enable/disable CSV3 support */ +static bool csv3_enabled = true; + +static struct kvm_x86_ops csv_x86_ops; +static const char csv_vm_mnonce[] = "VM_ATTESTATION"; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} + +int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || !hygon_kvm_hooks.sev_hooks_installed) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = hygon_kvm_hooks.sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (hygon_kvm_hooks.get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* csv_vm_mnonce indicates attestation request from guest */ + if (sizeof(csv_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, csv_vm_mnonce, sizeof(csv_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, + data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + hygon_kvm_hooks.sev_unpin_memory(kvm, pages, n); + return ret; +} + +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void csv_reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +static void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static int csv_alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + csv_reset_mempool_offset(); + return 0; + +free_trans_mempool: + csv_free_trans_mempool(); + pr_warn("Fail to allocate mem pool, CSV(2) live migration will very slow\n"); + + return -ENOMEM; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + if (!trans_data) + return NULL; + + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + +static int +csv_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->hdr_uaddr = params.hdr_uaddr; + item->hdr_len = params.hdr_len; + item->trans_vaddr = (uintptr_t)trans_data; + item->trans_uaddr = params.trans_uaddr; + item->trans_len = params.trans_len; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); + return ret; +} + +static int +csv_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + +static int +csv_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + hygon_kvm_hooks.sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + + return ret; +} + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + hygon_kvm_hooks.sev_unpin_memory(kvm, item->pages, + item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = csv_send_update_data_to_ringbuf; + *to_user_fn = csv_send_update_data_copy_to_user; + break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = csv_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + csv_reset_mempool_offset(); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +__csv_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_csv_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int csv_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_csv_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __csv_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + +static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, + vmsa, &argp->error); + + if (!ret) { + vcpu->arch.guest_state_protected = true; + + /* + * CSV2 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); + } + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +struct shared_page_block { + struct list_head list; + struct page **pages; + u64 count; +}; + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + + /* List of shared pages */ + u64 total_shared_page_count; + struct list_head shared_pages_list; + void *cached_shared_page_block; + struct mutex shared_page_block_lock; + + struct list_head smr_list; /* List of guest secure memory regions */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ + + /* The following 5 fields record the extension status for current VM */ + bool fw_ext_valid; /* if @fw_ext field is valid */ + u32 fw_ext; /* extensions supported by current platform */ + bool kvm_ext_valid; /* if @kvm_ext field is valid */ + u32 kvm_ext; /* extensions supported by KVM */ + u32 inuse_ext; /* extensions inused by current VM */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + INIT_LIST_HEAD(&csv->shared_pages_list); + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->shared_page_block_lock); + + return 0; +} + +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + +static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + /* The smr_list should be initialized only once */ + if (!list_empty(&csv->smr_list)) + return -EFAULT; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &argp->error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +/** + * csv3_launch_encrypt_data_alt_1 - The legacy handler to encrypt CSV3 + * guest's memory before VMRUN. + */ +static int csv3_launch_encrypt_data_alt_1(struct kvm *kvm, + struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* + * If userspace request to invoke CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * explicitly, we should not calls to csv3_set_guest_private_memory() + * here. + */ + if (!(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM)) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm, argp); + if (ret) + goto exit; + } + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + +#define MAX_ENTRIES_PER_BLOCK \ + (sizeof(((struct encrypt_data_block *)0)->entry) / \ + sizeof(((struct encrypt_data_block *)0)->entry[0])) +#define MAX_BLOCKS_PER_CSV3_LUP_DATA \ + (sizeof(((struct csv3_data_launch_encrypt_data *)0)->data_blocks) / \ + sizeof(((struct csv3_data_launch_encrypt_data *)0)->data_blocks[0])) +#define MAX_ENTRIES_PER_CSV3_LUP_DATA \ + (MAX_BLOCKS_PER_CSV3_LUP_DATA * MAX_ENTRIES_PER_BLOCK) + +/** + * __csv3_launch_encrypt_data - The helper for handler + * csv3_launch_encrypt_data_alt_2. + */ +static int __csv3_launch_encrypt_data(struct kvm *kvm, + struct kvm_sev_cmd *argp, + struct kvm_csv3_launch_encrypt_data *params, + void *src_buf, + unsigned int start_pgoff, + unsigned int end_pgoff) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_data *data = NULL; + struct encrypt_data_block *block = NULL; + struct page **pages = NULL; + unsigned long len, remain_len; + unsigned long pfn, pfn_sme_mask, last_pfn; + unsigned int pgoff = start_pgoff; + int i, j; + int ret = -ENOMEM; + + /* Alloc command buffer for CSV3_CMD_LAUNCH_ENCRYPT_DATA command */ + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + /* Alloc pages for data_blocks[] in the command buffer */ + len = ARRAY_SIZE(data->data_blocks) * sizeof(struct page *); + pages = kzalloc(len, GFP_KERNEL_ACCOUNT); + if (!pages) + goto e_free_data; + + for (i = 0; i < ARRAY_SIZE(data->data_blocks); i++) { + pages[i] = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!pages[i]) + goto e_free_pages; + } + + i = 0; + while (i < ARRAY_SIZE(data->data_blocks) && pgoff < end_pgoff) { + block = (struct encrypt_data_block *)page_to_virt(pages[i]); + + j = 0; + last_pfn = 0; + while (j < ARRAY_SIZE(block->entry) && pgoff < end_pgoff) { + pfn = vmalloc_to_pfn(src_buf + (pgoff << PAGE_SHIFT)); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + + /* + * One entry can record a number of contiguous physical + * pages. If the current page is not adjacent to the + * previous physical page, we should record the page to + * the next entry. If entries of current block is used + * up, we should try the next block. + */ + if (last_pfn && (last_pfn + 1 == pfn)) { + block->entry[j].npages++; + } else if (j < (ARRAY_SIZE(block->entry) - 1)) { + /* @last_pfn == 0 means fill in entry[0] */ + if (likely(last_pfn != 0)) + j++; + block->entry[j].pfn = pfn_sme_mask; + block->entry[j].npages = 1; + } else { + break; + } + + /* + * Succeed to record one page, increase the page offset. + * We also record the pfn of current page so that we can + * record the contiguous physical pages into one entry. + */ + last_pfn = pfn; + pgoff++; + } + + i++; + } + + if (pgoff < end_pgoff) { + pr_err("CSV3: Fail to fill in LAUNCH_ENCRYPT_DATA command!\n"); + goto e_free_pages; + } + + len = (end_pgoff - start_pgoff) << PAGE_SHIFT; + clflush_cache_range(src_buf + (start_pgoff << PAGE_SHIFT), len); + + /* Fill in command buffer */ + data->handle = csv->sev->handle; + + if (start_pgoff == 0) { + data->gpa = params->gpa; + len -= params->gpa & ~PAGE_MASK; + } else { + data->gpa = (params->gpa & PAGE_MASK) + (start_pgoff << PAGE_SHIFT); + } + remain_len = params->len - (data->gpa - params->gpa); + + data->length = (len <= remain_len) ? len : remain_len; + + for (j = 0; j < i; j++) + data->data_blocks[j] = __sme_set(page_to_phys(pages[j])); + + /* Issue command */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + data, &argp->error); + +e_free_pages: + for (i = 0; i < ARRAY_SIZE(data->data_blocks); i++) { + if (pages[i]) + __free_page(pages[i]); + } + kfree(pages); +e_free_data: + kfree(data); + + return ret; +} + +/** + * csv3_launch_encrypt_data_alt_2 - The handler to support encrypt CSV3 + * guest's memory before VMRUN. This handler support issue API command + * multiple times, both the GPA and length of the memory region are not + * required to be 4K-aligned. + */ +static int csv3_launch_encrypt_data_alt_2(struct kvm *kvm, + struct kvm_sev_cmd *argp) +{ + struct kvm_csv3_launch_encrypt_data params; + void *buffer = NULL; + unsigned long len; + unsigned int total_pages, start_pgoff, next_pgoff; + int ret = 0; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + return -EFAULT; + } + + /* Both the GPA and length must be 16 Bytes aligned at least */ + if (!params.len || + !params.uaddr || + !IS_ALIGNED(params.len, 16) || + !IS_ALIGNED(params.gpa, 16)) { + return -EINVAL; + } + + /* + * Alloc buffer to save source data. When we copy source data from + * userspace to the buffer, the data in the first page of the buffer + * should keep the offset as params.gpa. + */ + len = PAGE_ALIGN((params.gpa & ~PAGE_MASK) + params.len); + total_pages = len >> PAGE_SHIFT; + next_pgoff = 0; + + buffer = vzalloc(len); + if (!buffer) + return -ENOMEM; + + if (copy_from_user(buffer + (params.gpa & ~PAGE_MASK), + (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto e_free_buffer; + } + + /* + * If the source data is too large, we should issue command more than + * once. The LAUNCH_ENCRYPT_DATA API updates not only the measurement + * of the data, but also the measurement of the metadata correspond to + * the data. The guest owner is obligated to verify the launch + * measurement, so guest owner must be aware of the launch measurement + * of each LAUNCH_ENCRYPT_DATA API command. If we processing pages more + * than MAX_ENTRIES_PER_CSV3_LUP_DATA in each API command, the guest + * owner could not able to calculate the correct measurement and fail + * to verify the launch measurement. For this reason, we limit the + * maximum number of pages processed by each API command to + * MAX_ENTRIES_PER_CSV3_LUP_DATA. + */ + while (next_pgoff < total_pages) { + start_pgoff = next_pgoff; + next_pgoff += MAX_ENTRIES_PER_CSV3_LUP_DATA; + + if (next_pgoff > total_pages) + next_pgoff = total_pages; + + ret = __csv3_launch_encrypt_data(kvm, argp, ¶ms, + buffer, start_pgoff, next_pgoff); + if (ret) + goto e_free_buffer; + } + +e_free_buffer: + vfree(buffer); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (!(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA)) + return csv3_launch_encrypt_data_alt_1(kvm, argp); + + return csv3_launch_encrypt_data_alt_2(kvm, argp); +} + +static int csv3_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + /* + * CSV3 will use a VMSA that is pointed to by the VMCB, not + * the traditional VMSA that is part of the VMCB. Copy the + * traditional VMSA as it has been built so far (in prep + * for LAUNCH_ENCRYPT_VMCB) to be the initial CSV3 state. + */ + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; + save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; + save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; + save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; + save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; +#ifdef CONFIG_X86_64 + save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; + save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; + save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; + save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; + save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; + save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; + save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; + save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; +#endif + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + + /* Sync some non-GPR registers before encrypting */ + save->xcr0 = svm->vcpu.arch.xcr0; + save->pkru = svm->vcpu.arch.pkru; + save->xss = svm->vcpu.arch.ia32_xss; + save->dr6 = svm->vcpu.arch.dr6; + + pr_debug("Virtual Machine Save Area (VMSA):\n"); + print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false); + + return 0; +} + +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv3_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + + /* + * CSV3 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm, argp); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, + &data, &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, + &data, &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + + /* + * CSV3 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page **pages, *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page_block *shared_page_block = NULL; + u64 npages = PAGE_SIZE / sizeof(struct page *); + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (!page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + if (csv->total_shared_page_count % npages == 0) { + shared_page_block = kzalloc(sizeof(*shared_page_block), + GFP_KERNEL_ACCOUNT); + if (!shared_page_block) + return -ENOMEM; + + pages = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!pages) { + kfree(shared_page_block); + return -ENOMEM; + } + + shared_page_block->pages = pages; + list_add_tail(&shared_page_block->list, + &csv->shared_pages_list); + csv->cached_shared_page_block = shared_page_block; + } else { + shared_page_block = csv->cached_shared_page_block; + pages = shared_page_block->pages; + } + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + if (shared_page_block->count == 0) { + list_del(&shared_page_block->list); + kfree(pages); + kfree(shared_page_block); + } + return -ENOMEM; + } + + pages[csv->total_shared_page_count % npages] = page; + shared_page_block->count++; + csv->total_shared_page_count++; + *pfn = page_to_pfn(page); + } else { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + } + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->shared_page_block_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->shared_page_block_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct list_head *head = &csv->shared_pages_list; + struct list_head *pos, *q; + struct shared_page_block *shared_page_block; + struct kvm_vcpu *vcpu; + unsigned long i = 0; + + struct list_head *smr_head = &csv->smr_list; + struct secure_memory_region *smr; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->shared_page_block_lock); + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + shared_page_block = list_entry(pos, + struct shared_page_block, list); + unpin_user_pages(shared_page_block->pages, + shared_page_block->count); + kfree(shared_page_block->pages); + csv->total_shared_page_count -= + shared_page_block->count; + list_del(&shared_page_block->list); + kfree(shared_page_block); + } + } + mutex_unlock(&csv->shared_page_block_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + +static int csv_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct sev_data_launch_secret data; + struct kvm_sev_launch_secret params; + struct page **pages; + void *blob, *hdr; + unsigned long n, i; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + memset(&data, 0, sizeof(data)); + + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET)) { + pages = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr, + params.guest_len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts + * pages in place; the cache may contain the data that was + * written unencrypted. + */ + hygon_kvm_hooks.sev_clflush_pages(pages, n); + + /* + * The secret must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous + * before we issue command. + */ + if (hygon_kvm_hooks.get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + offset = params.guest_uaddr & (PAGE_SIZE - 1); + data.guest_address = __sme_page_pa(pages[0]) + offset; + } else { + /* It's gpa for CSV3 guest */ + data.guest_address = params.guest_uaddr; + } + data.guest_len = params.guest_len; + + blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto e_unpin_memory; + } + + data.trans_address = __psp_pa(blob); + data.trans_len = params.trans_len; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) { + ret = PTR_ERR(hdr); + goto e_free_blob; + } + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, + &data, &argp->error); + + kfree(hdr); + +e_free_blob: + kfree(blob); +e_unpin_memory: + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET)) { + /* content of memory is updated, mark pages dirty */ + for (i = 0; i < n; i++) { + set_page_dirty_lock(pages[i]); + mark_page_accessed(pages[i]); + } + hygon_kvm_hooks.sev_unpin_memory(kvm, pages, n); + } + return ret; +} + +static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r; + + if (!hygon_kvm_hooks.sev_hooks_installed || + !(*hygon_kvm_hooks.sev_enabled)) + return -ENOTTY; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV_COMMAND_BATCH: + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + case KVM_SEV_LAUNCH_SECRET: + r = csv_launch_secret(kvm, &sev_cmd); + break; + case KVM_SEV_SEND_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_SEND_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_send_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_send_update_vmsa(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_RECEIVE_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_receive_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_receive_update_vmsa(kvm, &sev_cmd); + break; + case KVM_CSV3_INIT: + if (!csv3_enabled) { + r = -ENOTTY; + goto out; + } + r = csv3_guest_init(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV3_SET_GUEST_PRIVATE_MEMORY: + r = csv3_set_guest_private_memory(kvm, &sev_cmd); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + return csv_x86_ops.mem_enc_ioctl(kvm, argp); + } + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + mutex_unlock(&kvm->lock); + return r; +} + +/* The caller must flush the stale caches about svm->sev_es.vmsa */ +void csv2_sync_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); +} + +void csv2_free_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) { + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); + svm->sev_es.reset_vmsa = NULL; + } +} + +int csv2_setup_reset_vmsa(struct vcpu_svm *svm) +{ + struct page *reset_vmsa_page = NULL; + + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + return -ENOMEM; + + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + return 0; +} + +static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} + +static bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return !!(ghcb_val & GHCB_MSR_INFO_MASK); +} + +/* + * csv_get_msr return msr data to the userspace. + * + * Return 0 if get msr success. + */ +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + switch (msr_info->index) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace get from vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated) + return 1; + + /* Filling the data as 0 if it's not a Hygon CSV2 guest */ + if (!sev_es_guest(svm->vcpu.kvm)) { + msr_info->data = 0; + return 0; + } + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + default: + return 1; + } + return 0; +} + +/* + * csv_set_msr set msr data from the userspace. + * + * Return 0 if set msr success. + */ +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 ecx = msr_info->index; + u64 data = msr_info->data; + + switch (ecx) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace set to vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated) + return 1; + + /* Ignore write to this MSR if it's not a Hygon CSV2 guest. */ + if (!sev_es_guest(svm->vcpu.kvm)) + return 0; + + /* + * Value 0 means uninitialized userspace MSR data, userspace + * need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on the recipient of migration, + * should return error if cannot map the ghcb + * page. + */ + if (csv2_map_ghcb_gpa(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + default: + return 1; + } + return 0; +} + +bool csv_has_emulated_ghcb_msr(struct kvm *kvm) +{ + /* this should be determined after KVM_CREATE_VM. */ + if (kvm && !sev_es_guest(kvm)) + return false; + + return true; +} + +static int csv_control_pre_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + +static int csv_control_post_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_guest(kvm)) + return 0; + + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +struct csv_asid_userid *csv_asid_userid_array; + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + int ret = 0; + + csv_asid_userid_array = kcalloc(nr_asids, sizeof(struct csv_asid_userid), + GFP_KERNEL_ACCOUNT); + if (!csv_asid_userid_array) + ret = -ENOMEM; + + if (ret) + pr_warn("Fail to allocate array, reuse ASID is unavailable\n"); + + return ret; +} + +static void csv_free_asid_userid_array(void) +{ + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +} + +#else /* !CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + pr_warn("reuse ASID is unavailable\n"); + return -EFAULT; +} + +static void csv_free_asid_userid_array(void) +{ +} + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +/** + * When userspace recognizes these extensions, it is suggested that the userspace + * enables these extensions through KVM_ENABLE_CAP, so that both the userspace + * and KVM can utilize these extensions. + */ +static int csv_get_hygon_coco_extension(struct kvm *kvm) +{ + struct kvm_csv_info *csv; + size_t len = sizeof(uint32_t); + int ret = 0; + + if (!kvm) + return 0; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + + if (csv->fw_ext_valid == false) { + ret = csv_get_extension_info(&csv->fw_ext, &len); + + if (ret == -ENODEV) { + pr_err("Unable to interact with CSV firmware!\n"); + return 0; + } else if (ret == -EINVAL) { + pr_err("Need %ld bytes to record fw extension!\n", len); + return 0; + } + + csv->fw_ext_valid = true; + } + + /* The kvm_ext field of kvm_csv_info is filled in only if the fw_ext + * field of kvm_csv_info is valid. + */ + if (csv->kvm_ext_valid == false) { + if (csv3_guest(kvm)) { + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM; + if (csv->fw_ext & CSV_EXT_CSV3_MULT_LUP_DATA) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA; + if (csv->fw_ext & CSV_EXT_CSV3_INJ_SECRET) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET; + } + csv->kvm_ext_valid = true; + } + + /* Return extension info only if both fw_ext and kvm_ext fields of + * kvm_csv_info are valid. + */ + pr_debug("%s: fw_ext=%#x kvm_ext=%#x\n", + __func__, csv->fw_ext, csv->kvm_ext); + return (int)csv->kvm_ext; +} + +/** + * Return 0 means KVM accept the negotiation from userspace. Both the + * userspace and KVM should not utilise extensions if failed to negotiate. + */ +static int csv_enable_hygon_coco_extension(struct kvm *kvm, u32 arg) +{ + struct kvm_csv_info *csv; + + if (!kvm) + return -EINVAL; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + + /* Negotiation is accepted only if both the fw_ext and kvm_ext fields + * of kvm_csv_info are valid and the virtual machine is a CSV3 guest. + */ + if (csv->fw_ext_valid && csv->kvm_ext_valid && csv3_guest(kvm)) { + csv->inuse_ext = csv->kvm_ext & arg; + pr_debug("%s: inuse_ext=%#x\n", __func__, csv->inuse_ext); + return csv->inuse_ext; + } + + /* Userspace should not utilise the extensions */ + return -EINVAL; +} + +void __init csv_hardware_setup(unsigned int max_csv_asid) +{ + unsigned int nr_asids = max_csv_asid + 1; + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); + + /* CSV3 depends on X86_FEATURE_CSV3 */ + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + csv3_enabled = true; + else + csv3_enabled = false; + + pr_info("CSV3 %s (ASIDs 1 - %u)\n", + csv3_enabled ? "enabled" : "disabled", max_csv_asid); +} + +void csv_hardware_unsetup(void) +{ + /* Free the memory that allocated in csv_hardware_setup(). */ + csv_free_trans_mempool(); + csv_free_asid_userid_array(); +} + +void csv_exit(void) +{ +} + +void __init csv_init(struct kvm_x86_ops *ops) +{ + /* + * Hygon CSV is indicated by X86_FEATURE_SEV, return directly if CSV + * is unsupported. + */ + if (!boot_cpu_has(X86_FEATURE_SEV)) + return; + + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->vm_size = sizeof(struct kvm_svm_csv); + ops->mem_enc_ioctl = csv_mem_enc_ioctl; + ops->vm_attestation = csv_vm_attestation; + ops->control_pre_system_reset = csv_control_pre_system_reset; + ops->control_post_system_reset = csv_control_post_system_reset; + ops->get_hygon_coco_extension = csv_get_hygon_coco_extension; + ops->enable_hygon_coco_extension = csv_enable_hygon_coco_extension; + + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { + ops->vm_destroy = csv_vm_destroy; + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; + } +} diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..9b0563062a941aad1e2e35b400706604f7a77e97 --- /dev/null +++ b/arch/x86/kvm/svm/csv.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __SVM_CSV_H +#define __SVM_CSV_H + +#include + +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +#define ASID_USERID_LENGTH 20 + +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; +extern struct csv_asid_userid *csv_asid_userid_array; + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +#ifdef CONFIG_HYGON_CSV + +/* + * Hooks table: a table of function and variable pointers filled in + * when module init. + */ +extern struct hygon_kvm_hooks_table { + bool sev_hooks_installed; + bool *sev_enabled; + unsigned long *sev_me_mask; + int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); + unsigned long (*get_num_contig_pages)(unsigned long idx, + struct page **inpages, + unsigned long npages); + struct page **(*sev_pin_memory)(struct kvm *kvm, unsigned long uaddr, + unsigned long ulen, unsigned long *n, + int write); + void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, + unsigned long npages); + void (*sev_clflush_pages)(struct page *pages[], unsigned long npages); +} hygon_kvm_hooks; + +void __init csv_init(struct kvm_x86_ops *ops); +void csv_exit(void); + +void __init csv_hardware_setup(unsigned int max_csv_asid); +void csv_hardware_unsetup(void); + +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +bool csv_has_emulated_ghcb_msr(struct kvm *kvm); +void csv2_sync_reset_vmsa(struct vcpu_svm *svm); +void csv2_free_reset_vmsa(struct vcpu_svm *svm); +int csv2_setup_reset_vmsa(struct vcpu_svm *svm); + +static inline bool csv2_state_unstable(struct vcpu_svm *svm) +{ + return svm->sev_es.receiver_ghcb_map_fail; +} + +#else /* !CONFIG_HYGON_CSV */ + +static inline void __init csv_init(struct kvm_x86_ops *ops) { } +static inline void csv_exit(void) { } + +static inline void __init csv_hardware_setup(unsigned int max_csv_asid) { } +static inline void csv_hardware_unsetup(void) { } + +static inline +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline bool csv_has_emulated_ghcb_msr(struct kvm *kvm) { return false; } +static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return false; } +static inline void csv2_sync_reset_vmsa(struct vcpu_svm *svm) { } +static inline void csv2_free_reset_vmsa(struct vcpu_svm *svm) { } +static inline int csv2_setup_reset_vmsa(struct vcpu_svm *svm) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#include + +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + +#endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index acf22bd99efcd8b1b3228e8cb3b4c2cbfecf0cba..634edce60d56fbfbec2a61eea9db751203711404 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -300,7 +300,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { if (CC(!(save->cr4 & X86_CR4_PAE)) || CC(!(save->cr0 & X86_CR0_PE)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) return false; } @@ -509,7 +509,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) return -EINVAL; if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 99e72b8a96ac0b224b1b86affd930b3bbdf436a9..36ea5d0c0a65baa1dc87e2f8ae0376a62d12ddaa 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "mmu.h" #include "x86.h" @@ -32,6 +33,8 @@ #include "cpuid.h" #include "trace.h" +#include "csv.h" + #ifndef CONFIG_KVM_AMD_SEV /* * When this config is not defined, SEV feature is not supported and APIs in @@ -142,7 +145,11 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) misc_cg_uncharge(type, sev->misc_cg, 1); } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +static int sev_asid_new(struct kvm_sev_info *sev, const char *userid, u32 userid_len) +#else static int sev_asid_new(struct kvm_sev_info *sev) +#endif { /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. @@ -156,6 +163,13 @@ static int sev_asid_new(struct kvm_sev_info *sev) bool retry = true; int ret; + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + max_asid = max_sev_asid; + if (min_asid > max_asid) return -ENOTTY; @@ -170,6 +184,34 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, check whether the userid exists */ + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { + int i = !min_sev_asid ? 1 : min_sev_asid; + + for (; i <= max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable asid %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i; + } + } + } +#endif + again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -184,6 +226,16 @@ static int sev_asid_new(struct kvm_sev_info *sev) __set_bit(asid, sev_asid_bitmap); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, initialize the new userid */ + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { + memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); + csv_asid_userid_array[asid].userid_len = userid_len; + csv_asid_userid_array[asid].refcnt = 1; + } +#endif + mutex_unlock(&sev_bitmap_lock); return asid; @@ -208,7 +260,25 @@ static void sev_asid_free(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, decrease the reference count if userid exist */ + if (!is_x86_vendor_hygon() || !csv_asid_userid_array || + !csv_asid_userid_array[sev->asid].userid_len) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } else { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); + + if (--csv_asid_userid_array[sev->asid].refcnt == 0) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[sev->asid], 0, + sizeof(struct csv_asid_userid)); + } + } +#else __set_bit(sev->asid, sev_reclaim_asid_bitmap); +#endif for_each_possible_cpu(cpu) { sd = per_cpu_ptr(&svm_data, cpu); @@ -264,7 +334,46 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->active = true; sev->es_active = argp->id == KVM_SEV_ES_INIT; + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* Try reuse ASID iff userid array is available for HYGON CSV guests */ + if (is_x86_vendor_hygon() && csv_asid_userid_array) { + struct kvm_csv_init params; + void *csv_blob = NULL; + + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + asid = sev_asid_new(sev, (const char *)csv_blob, params.len); + + /* The buffer @csv_blob is no longer used, free it. */ + kfree(csv_blob); + } else { + asid = sev_asid_new(sev, NULL, 0); + } +#else asid = sev_asid_new(sev); +#endif + if (asid < 0) goto e_no_asid; sev->asid = asid; @@ -407,6 +516,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long locked, lock_limit; struct page **pages; unsigned long first, last; + unsigned int flags = 0; int ret; lockdep_assert_held(&kvm->lock); @@ -439,8 +549,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, if (!pages) return ERR_PTR(-ENOMEM); + flags = write ? FOLL_WRITE : 0; + /* Pin the user virtual address. */ - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + npinned = pin_user_pages_fast(uaddr, npages, flags | FOLL_LONGTERM, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); ret = -ENOMEM; @@ -672,6 +784,18 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set. */ svm_enable_lbrv(vcpu); + + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (is_x86_vendor_hygon()) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + csv2_sync_reset_vmsa(svm); + } + return 0; } @@ -2198,6 +2322,22 @@ void __init sev_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_SEV_ES); } +#ifdef CONFIG_HYGON_CSV +/* Code to set all of the function and vaiable pointers */ +static void sev_install_hooks(void) +{ + hygon_kvm_hooks.sev_enabled = &sev_enabled; + hygon_kvm_hooks.sev_me_mask = &sev_me_mask; + hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; + hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; + hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; + hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + hygon_kvm_hooks.sev_clflush_pages = sev_clflush_pages; + + hygon_kvm_hooks.sev_hooks_installed = true; +} +#endif + void __init sev_hardware_setup(void) { #ifdef CONFIG_KVM_AMD_SEV @@ -2278,31 +2418,57 @@ void __init sev_hardware_setup(void) goto out; } - /* Has the system been allocated ASIDs for SEV-ES? */ - if (min_sev_asid == 1) - goto out; + if (is_x86_vendor_hygon()) { + /* + * Ths ASIDs from 1 to max_sev_asid are available for hygon + * CSV2 guest. + */ + sev_es_asid_count = max_sev_asid; + } else { + /* Has the system been allocated ASIDs for SEV-ES? */ + if (min_sev_asid == 1) + goto out; - sev_es_asid_count = min_sev_asid - 1; + sev_es_asid_count = min_sev_asid - 1; + } WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; out: if (boot_cpu_has(X86_FEATURE_SEV)) - pr_info("SEV %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + is_x86_vendor_hygon() ? "CSV" : "SEV", sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : "unusable" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) - pr_info("SEV-ES %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + is_x86_vendor_hygon() ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", - min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); + is_x86_vendor_hygon() ? 1 : (min_sev_asid > 1 ? 1 : 0), + is_x86_vendor_hygon() ? max_sev_asid : min_sev_asid - 1); sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) || !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP)) sev_es_debug_swap_enabled = false; + +#ifdef CONFIG_HYGON_CSV + /* Setup resources which are necessary for HYGON CSV */ + if (is_x86_vendor_hygon()) { + /* + * Install sev related function and variable pointers hooks + * no matter @sev_enabled is false. + */ + sev_install_hooks(); + + if (sev_enabled) + csv_hardware_setup(max_sev_asid); + } +#endif + #endif } @@ -2311,6 +2477,9 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; + if (is_x86_vendor_hygon()) + csv_hardware_unsetup(); + /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); @@ -2395,8 +2564,14 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); + if (svm->sev_es.ghcb) + kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map, false); + if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + if (is_x86_vendor_hygon()) + csv2_free_reset_vmsa(svm); } static void dump_ghcb(struct vcpu_svm *svm) @@ -2662,6 +2837,13 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->asid = asid; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if (is_x86_vendor_hygon() && csv_asid_userid_array && + csv_asid_userid_array[asid].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; +#endif + /* * Flush guest TLB: * diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 413f1f2aadd1a3132e41ecd0bb89861c966eb9b9..1b1d332216a48e8d2e666d397d2954267b347f92 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -41,6 +42,7 @@ #include #include #include +#include #include @@ -48,6 +50,7 @@ #include "svm.h" #include "svm_ops.h" +#include "csv.h" #include "kvm_onhyperv.h" #include "svm_onhyperv.h" @@ -547,7 +550,10 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); + if (is_x86_vendor_hygon()) + pr_info("KVM is unsupported when running as an CSV guest\n"); + else + pr_info("KVM is unsupported when running as an SEV guest\n"); return false; } @@ -1458,6 +1464,11 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; + if (is_x86_vendor_hygon()) { + if (csv2_setup_reset_vmsa(svm)) + goto error_free_vmsa_page; + } + /* * SEV-ES guests maintain an encrypted version of their FPU * state which is restored and saved on VMRUN and VMEXIT. @@ -1493,6 +1504,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); + + if (is_x86_vendor_hygon()) + csv2_free_reset_vmsa(svm); error_free_vmcb_page: __free_page(vmcb01_page); out: @@ -2956,6 +2970,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support export this MSR to userspace */ + if (is_x86_vendor_hygon()) + return csv_get_msr(vcpu, msr_info); + else + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3197,6 +3217,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support update this MSR from userspace */ + if (is_x86_vendor_hygon()) + return csv_set_msr(vcpu, msr); + else + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4163,6 +4189,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); + /* + * For receipient side of CSV2 guest, fake the exit code as SVM_EXIT_ERR + * and return directly if failed to mapping the necessary GHCB page. + * When handling the exit code afterwards, it can exit to userspace and + * stop the guest. + */ + if (is_x86_vendor_hygon() && sev_es_guest(vcpu->kvm)) { + if (csv2_state_unstable(svm)) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4337,6 +4376,12 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support emulate this MSR */ + if (is_x86_vendor_hygon()) + return csv_has_emulated_ghcb_msr(kvm); + else + return false; default: break; } @@ -4938,6 +4983,36 @@ static int svm_vm_init(struct kvm *kvm) return 0; } +static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) +{ + int ret = 0; + struct kvm_vpsp vpsp = { + .kvm = kvm, + .write_guest = kvm_write_guest, + .read_guest = kvm_read_guest, + .gfn_to_pfn = gfn_to_pfn, + }; + + if (sev_guest(kvm)) { + vpsp.vm_handle = to_kvm_svm(kvm)->sev_info.handle; + vpsp.is_csv_guest = 1; + } + + switch (nr) { + case KVM_HC_PSP_COPY_FORWARD_OP: + ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2); + break; + + case KVM_HC_PSP_FORWARD_OP: + ret = kvm_pv_psp_forward_op(&vpsp, a0, a1, a2); + break; + default: + ret = -KVM_ENOSYS; + break; + } + return ret; +} + static struct kvm_x86_ops svm_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -5069,6 +5144,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, + + .arch_hypercall = kvm_hygon_arch_hypercall, }; /* @@ -5366,6 +5443,10 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static void __svm_exit(void) { + /* Unregister CSV specific interface for Hygon CPUs */ + if (is_x86_vendor_hygon()) + csv_exit(); + kvm_x86_vendor_exit(); cpu_emergency_unregister_virt_callback(svm_emergency_disable); @@ -5380,9 +5461,21 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + /* Register CSV specific interface for Hygon CPUs */ + if (is_x86_vendor_hygon()) + csv_init(&svm_x86_ops); + r = kvm_x86_vendor_init(&svm_init_ops); - if (r) + if (r) { + /* + * Unregister CSV specific interface for Hygon CPUs + * if error occurs. + */ + if (is_x86_vendor_hygon()) + csv_exit(); + return r; + } cpu_emergency_register_virt_callback(svm_emergency_disable); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 37ada9808d9b577a27cb6b8e89dcca941b778cd4..aaf945935ff7ca9e61384c158022f80aef550473 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -203,6 +203,13 @@ struct vcpu_sev_es_state { u32 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + +#ifdef CONFIG_HYGON_CSV + /* migrated ghcb mapping state for HYGON CSV2 */ + bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; +#endif }; struct vcpu_svm { diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f9989748c9226255e6aa7c043ced40ecc..631e65a212285073924d90df28ea1ee981efe747 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -60,6 +60,7 @@ struct vmcs_config { u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; + u32 zx_cpu_based_3rd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; @@ -255,6 +256,12 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } +static inline bool cpu_has_vmx_zxpause(void) +{ + return vmcs_config.zx_cpu_based_3rd_exec_ctrl & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d3e346a574f11b6ccf5d055152fb067155461f70..4872ac288f51431bf7f75e6ca7140f2ae287f88f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1086,7 +1086,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2732,7 +2732,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) + if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ @@ -2927,7 +2927,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || @@ -5027,6 +5027,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, else *ret = off; + *ret = vmx_get_untagged_addr(vcpu, *ret, 0); /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! @@ -5850,6 +5851,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if (!operand.vpid || is_noncanonical_address(operand.gla, vcpu)) return nested_vmx_fail(vcpu, diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 3e822e58249753c5ff267b5ce4f69e2ede9a00c9..6fef01e0536e5079c34e6efc6b64716119c10c4e 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, if (!IS_ALIGNED(*gva, alignment)) { fault = true; } else if (likely(is_64_bit_mode(vcpu))) { + *gva = vmx_get_untagged_addr(vcpu, *gva, 0); fault = is_noncanonical_address(*gva, vcpu); } else { *gva &= 0xffffffff; diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 7c1996b433e262fa0c67d06a7a71baa6304db476..4eabed8e5813af72bac6e9d9377747cdf3a219fa 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -50,7 +50,9 @@ struct vmcs_controls_shadow { u32 pin; u32 exec; u32 secondary_exec; + u32 zx_tertiary_exec; u64 tertiary_exec; + u64 zx_vmexit_tsc; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 479ef26626f2fe49722f890ba372b2a86935f950..a21bff07374c913165c8752691a2d5ed2f7019ba 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -218,6 +218,8 @@ int __read_mostly pt_mode = PT_MODE_SYSTEM; module_param(pt_mode, int, S_IRUGO); #endif +static u32 zx_ext_vmcs_cap; + static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_MUTEX(vmx_l1d_flush_mutex); @@ -2019,7 +2021,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) return 1; - + msr_info->data = vmx->msr_ia32_umwait_control; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: @@ -2279,7 +2285,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) return 1; + vmx->msr_ia32_umwait_control = data; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; + /* The reserved bit 1 and non-32 bit [63:32] should be zero */ + if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) + return 1; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: @@ -2737,6 +2751,10 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; + /* Setup Zhaoxin exec-cntl3 VMCS field. */ + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) + vmcs_conf->zx_cpu_based_3rd_exec_ctrl |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + #if IS_ENABLED(CONFIG_HYPERV) if (enlightened_vmcs) evmcs_sanitize_exec_ctrls(vmcs_conf); @@ -3413,7 +3431,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { - guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) | + kvm_get_active_cr3_lam_bits(vcpu); } if (update_guest_cr3) @@ -4529,6 +4548,28 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } +static u32 vmx_zx_tertiary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; + + /* + * Show errors if Qemu wants to enable guest_zxpause while + * vmx not support it. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_ZXPAUSE)) { + if (!cpu_has_vmx_zxpause()) + pr_err("VMX not support guest_zxpause!\n"); + else + exec_control |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + } else + exec_control &= ~ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + + /* enable other features here */ + + return exec_control; +} + /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4735,6 +4776,11 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_secondary_exec_ctrls()) secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (cpu_has_tertiary_exec_ctrls()) tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx)); @@ -5800,7 +5846,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * would also use advanced VM-exit information for EPT violations to * reconstruct the page fault error code. */ - if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) + if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa))) return kvm_emulate_instruction(vcpu, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); @@ -6269,6 +6315,13 @@ void dump_vmcs(struct kvm_vcpu *vcpu) else tertiary_exec_control = 0; + pr_err("*** Zhaoxin Specific Fields ***\n"); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", + vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); + pr_err("ZXPAUSE Saved TSC = 0x%016llx\n", vmcs_read64(ZXPAUSE_VMEXIT_TSC)); + } + pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); pr_err("*** Guest State ***\n"); @@ -7025,6 +7078,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: @@ -7698,6 +7752,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); + entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1); + cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM)); + #undef cr4_fixed1_update } @@ -7784,6 +7841,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM); vmx_setup_uret_msrs(vmx); @@ -7791,6 +7849,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -7941,6 +8004,9 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + + if (cpu_has_vmx_zxpause()) + kvm_cpu_cap_check_and_set(X86_FEATURE_ZXPAUSE); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8250,6 +8316,50 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +/* + * Note, the SDM states that the linear address is masked *after* the modified + * canonicality check, whereas KVM masks (untags) the address and then performs + * a "normal" canonicality check. Functionally, the two methods are identical, + * and when the masking occurs relative to the canonicality check isn't visible + * to software, i.e. KVM's behavior doesn't violate the SDM. + */ +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) +{ + int lam_bit; + unsigned long cr3_bits; + + if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) + return gva; + + if (!is_64_bit_mode(vcpu)) + return gva; + + /* + * Bit 63 determines if the address should be treated as user address + * or a supervisor address. + */ + if (!(gva & BIT_ULL(63))) { + cr3_bits = kvm_get_active_cr3_lam_bits(vcpu); + if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48))) + return gva; + + /* LAM_U48 is ignored if LAM_U57 is set. */ + lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47; + } else { + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) + return gva; + + lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47; + } + + /* + * Untag the address by sign-extending the lam_bit, but NOT to bit 63. + * Bit 63 is retained from the raw virtual address so that untagging + * doesn't change a user access to a supervisor access, and vice versa. + */ + return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63)); +} + static struct kvm_x86_ops vmx_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -8390,6 +8500,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .complete_emulated_msr = kvm_complete_insn_gp, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, + + .get_untagged_addr = vmx_get_untagged_addr, }; static unsigned int vmx_handle_intel_pt_intr(void) @@ -8464,6 +8576,12 @@ static __init int hardware_setup(void) unsigned long host_bndcfgs; struct desc_ptr dt; int r; + u32 ign; + + /* Caches Zhaoxin extend VMCS capabilities. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); store_idt(&dt); host_idt_base = dt.address; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6be1627d888e5a3fbcefe5a8fb2a9034294aaa30..d5b654770d4b34af31cfbc6842bfb76988e81935 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -421,6 +421,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { @@ -579,6 +581,17 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) +#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 +#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL \ + (ZX_TERTIARY_EXEC_GUEST_ZXPAUSE) + +/* + * We shouldn't rw zxpause_vmexit_tsc vmcs field in this + * way, try to use another way in the future. + */ +#define KVM_REQUIRED_VMX_ZXPAUSE_VMEXIT_TSC 0 +#define KVM_OPTIONAL_VMX_ZXPAUSE_VMEXIT_TSC 1 + #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -611,6 +624,8 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) +BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) +BUILD_CONTROLS_SHADOW(zx_vmexit_tsc, ZXPAUSE_VMEXIT_TSC, 64) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -713,6 +728,12 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } +static inline bool vmx_guest_zxpause_enabled(struct vcpu_vmx *vmx) +{ + return zx_tertiary_exec_controls_get(vmx) & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dcd0c12c308e59f8f5cfd308eaf83a18233eb133..a1c763bb697f7d1a1b02768f4ff1ac3beba4491c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -86,6 +86,8 @@ #include #include +#include + #define CREATE_TRACE_POINTS #include "trace.h" @@ -1284,7 +1286,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) * stuff CR3, e.g. for RSM emulation, and there is no guarantee that * the current vCPU mode is accurate. */ - if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) return 1; if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) @@ -1462,8 +1464,8 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, - MSR_IA32_XFD, MSR_IA32_XFD_ERR, + MSR_ZX_PAUSE_CONTROL, }; static const u32 msrs_to_save_pmu[] = { @@ -1564,6 +1566,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -1704,22 +1708,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) struct kvm_msr_entry msr; int r; + /* Unconditionally clear the output for simplicity */ + msr.data = 0; msr.index = index; r = kvm_get_msr_feature(&msr); - if (r == KVM_MSR_RET_INVALID) { - /* Unconditionally clear the output for simplicity */ - *data = 0; - if (kvm_msr_ignored_check(index, 0, false)) - r = 0; - } - - if (r) - return r; + if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) + r = 0; *data = msr.data; - return 0; + return r; } static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) @@ -4636,6 +4635,29 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_caps.has_notify_vmexit; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (is_x86_vendor_hygon()) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; + case KVM_CAP_HYGON_COCO_EXT: + r = 0; + + /* + * Before running a Hygon confidential guest, the userspace + * should find the advanced extensions of the Hygon CSV + * technology. If the userspace recognize the extensions, it's + * suggested that the userspace to utilise extensions. + */ + if (is_x86_vendor_hygon() && kvm_x86_ops.get_hygon_coco_extension) + r = static_call(kvm_x86_get_hygon_coco_extension)(kvm); + break; default: break; } @@ -6513,6 +6535,17 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->lock); break; + case KVM_CAP_HYGON_COCO_EXT: + r = -EINVAL; + + /* + * The userspace negotiate with KVM to utilise extensions of + * Hygon CSV technology. + */ + if (is_x86_vendor_hygon() && kvm_x86_ops.enable_hygon_coco_extension) + r = static_call(kvm_x86_enable_hygon_coco_extension)(kvm, + (u32)cap->args[0]); + break; default: r = -EINVAL; break; @@ -7107,6 +7140,18 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } @@ -7151,6 +7196,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; + case MSR_ZX_PAUSE_CONTROL: + if (!kvm_cpu_cap_has(X86_FEATURE_ZXPAUSE)) + return; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) @@ -8337,6 +8386,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) kvm_vm_bugged(kvm); } +static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt, + gva_t addr, unsigned int flags) +{ + if (!kvm_x86_ops.get_untagged_addr) + return addr; + + return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags); +} + static const struct x86_emulate_ops emulate_ops = { .vm_bugged = emulator_vm_bugged, .read_gpr = emulator_read_gpr, @@ -8381,6 +8439,7 @@ static const struct x86_emulate_ops emulate_ops = { .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, + .get_untagged_addr = emulator_get_untagged_addr, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -9860,7 +9919,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION + || nr == KVM_HC_PSP_OP_OBSOLETE + || nr == KVM_HC_PSP_COPY_FORWARD_OP + || nr == KVM_HC_PSP_FORWARD_OP))) { ret = -KVM_EPERM; goto out; } @@ -9923,6 +9986,18 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } + case KVM_HC_VM_ATTESTATION: + ret = -KVM_ENOSYS; + if (is_x86_vendor_hygon() && kvm_x86_ops.vm_attestation) + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); + break; + case KVM_HC_PSP_OP_OBSOLETE: + case KVM_HC_PSP_COPY_FORWARD_OP: + case KVM_HC_PSP_FORWARD_OP: + ret = -KVM_ENOSYS; + if (kvm_x86_ops.arch_hypercall) + ret = static_call(kvm_x86_arch_hypercall)(vcpu->kvm, nr, a0, a1, a2, a3); + break; default: ret = -KVM_ENOSYS; break; @@ -11496,7 +11571,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) return false; } else { /* @@ -11526,21 +11601,23 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected && !is_x86_vendor_hygon()) return 0; - dt.size = sregs->idt.limit; - dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); - dt.size = sregs->gdt.limit; - dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + if (!vcpu->arch.guest_state_protected) { + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); - vcpu->arch.cr2 = sregs->cr2; - *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; - vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + vcpu->arch.cr2 = sregs->cr2; + *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; + vcpu->arch.cr3 = sregs->cr3; + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + } kvm_set_cr8(vcpu, sregs->cr8); @@ -11554,6 +11631,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { @@ -13414,6 +13494,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) switch (type) { case INVPCID_TYPE_INDIV_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if ((!pcid_enabled && (operand.pcid != 0)) || is_noncanonical_address(operand.gla, vcpu)) { kvm_inject_gp(vcpu, 0); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1e7be1f6ab299d78a76e76385db159dee679220b..53e883721e7167dfe2875412987a5254b3b0c83c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -529,6 +529,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); __reserved_bits |= X86_CR4_VMXE; \ if (!__cpu_has(__c, X86_FEATURE_PCID)) \ __reserved_bits |= X86_CR4_PCIDE; \ + if (!__cpu_has(__c, X86_FEATURE_LAM)) \ + __reserved_bits |= X86_CR4_LAM_SUP; \ __reserved_bits; \ }) diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f0dae4fb6d071627842fae7321e0eaea10cf558f..b6a951dd34391dc84faa7116cad0cdf5411ab3fc 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -60,5 +60,7 @@ endif lib-y += clear_page_64.o copy_page_64.o lib-y += memmove_64.o memset_64.o lib-y += copy_user_64.o copy_user_uncached_64.o + lib-$(CONFIG_X86_HYGON_LMC_SSE2_ON) += copy_user_sse2.o + lib-$(CONFIG_X86_HYGON_LMC_AVX2_ON) += copy_user_avx2.o lib-y += cmpxchg16b_emu.o endif diff --git a/arch/x86/lib/copy_user_avx2.S b/arch/x86/lib/copy_user_avx2.S new file mode 100644 index 0000000000000000000000000000000000000000..a2a785aaccb217ff1b3deacd8e0cd89ebb1348df --- /dev/null +++ b/arch/x86/lib/copy_user_avx2.S @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2011 Siarhei Siamashka + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFETCH_DISTANCE 64 + +#define PREFETCH(addr) prefetchnta addr + +.macro ALIGN_DESTINATION_32 + /* check for bad alignment of destination, there is 32Bytes, for we will use vmovntdq */ + /* if <32Bytes, jb .Lcopy_user_string */ + cmpq $32, %rdx + jb .Lcopy_user_string + + /* + * Adjust unaligned destination addresses, for 32-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %edi, %ecx + andl $31, %ecx + jz .Lcopy_user_string /* already aligned */ + + subl $32, %ecx + negl %ecx + subl %ecx, %edx + +300: + movb (%rsi), %al +301: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 300b + jmp .Lcopy_user_string + +303: + addl %ecx,%edx/* ecx is zerorest also */ + jmp Lavx2_copy_user_handle_tail + + _ASM_EXTABLE_UA(300b, 303b) + _ASM_EXTABLE_UA(301b, 303b) + +.Lcopy_user_string: +.endm + +/* + * large block copy, use avx2 nt & prefetchnta + */ +SYM_FUNC_START(copy_user_avx2_pf64_nt_string) + ASM_STAC + ALIGN_DESTINATION_32 + + /* if len < 256 jmp to Lless_than_256_bytes_cpy */ + cmpq $256, %rdx + jb Lless_than_256_bytes_cpy + + /* + * Check if src is aligned, for 32-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %esi, %ecx /* check if src is aligned */ + andl $31, %ecx + jnz large_block_nt_unaligned_cpy + +large_block_nt_aligned_cpy: + PREFETCH(PREFETCH_DISTANCE(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 64)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 128)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 192)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 256)(%rsi)) + +32: + vmovdqa 0(%rsi), %ymm0 +33: + vmovdqa 32(%rsi), %ymm1 +34: + vmovdqa 64(%rsi), %ymm2 +35: + vmovdqa 96(%rsi), %ymm3 +36: + vmovdqa 128(%rsi), %ymm4 +37: + vmovdqa 160(%rsi), %ymm5 +38: + vmovdqa 192(%rsi), %ymm6 +39: + vmovdqa 224(%rsi), %ymm7 + +40: + vmovntdq %ymm0, 0(%rdi) +41: + vmovntdq %ymm1, 32(%rdi) +42: + vmovntdq %ymm2, 64(%rdi) +43: + vmovntdq %ymm3, 96(%rdi) +44: + vmovntdq %ymm4, 128(%rdi) +45: + vmovntdq %ymm5, 160(%rdi) +46: + vmovntdq %ymm6, 192(%rdi) +47: + vmovntdq %ymm7, 224(%rdi) + + add $256, %rsi + add $256, %rdi + subq $256, %rdx + cmpq $256, %rdx + jg large_block_nt_aligned_cpy + + vzeroupper + sfence + jmp Lless_than_256_bytes_cpy + +large_block_nt_unaligned_cpy: + PREFETCH(PREFETCH_DISTANCE(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 64)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 128)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 192)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 256)(%rsi)) + +48: + vmovdqu 0(%rsi), %ymm0 +49: + vmovdqu 32(%rsi), %ymm1 +50: + vmovdqu 64(%rsi), %ymm2 +51: + vmovdqu 96(%rsi), %ymm3 +52: + vmovdqu 128(%rsi), %ymm4 +53: + vmovdqu 160(%rsi), %ymm5 +54: + vmovdqu 192(%rsi), %ymm6 +55: + vmovdqu 224(%rsi), %ymm7 + +56: + vmovntdq %ymm0, 0(%rdi) +57: + vmovntdq %ymm1, 32(%rdi) +58: + vmovntdq %ymm2, 64(%rdi) +59: + vmovntdq %ymm3, 96(%rdi) +60: + vmovntdq %ymm4, 128(%rdi) +61: + vmovntdq %ymm5, 160(%rdi) +62: + vmovntdq %ymm6, 192(%rdi) +63: + vmovntdq %ymm7, 224(%rdi) + + add $256, %rsi + add $256, %rdi + subq $256, %rdx + cmpq $256, %rdx + jg large_block_nt_unaligned_cpy + + vzeroupper + sfence + jmp Lless_than_256_bytes_cpy + +88: + vzeroupper + jmp Lavx2_copy_user_handle_tail + + _ASM_EXTABLE_UA(32b, 88b) + _ASM_EXTABLE_UA(33b, 88b) + _ASM_EXTABLE_UA(34b, 88b) + _ASM_EXTABLE_UA(35b, 88b) + _ASM_EXTABLE_UA(36b, 88b) + _ASM_EXTABLE_UA(37b, 88b) + _ASM_EXTABLE_UA(38b, 88b) + _ASM_EXTABLE_UA(39b, 88b) + + _ASM_EXTABLE_UA(40b, 88b) + _ASM_EXTABLE_UA(41b, 88b) + _ASM_EXTABLE_UA(42b, 88b) + _ASM_EXTABLE_UA(43b, 88b) + _ASM_EXTABLE_UA(44b, 88b) + _ASM_EXTABLE_UA(45b, 88b) + _ASM_EXTABLE_UA(46b, 88b) + _ASM_EXTABLE_UA(47b, 88b) + _ASM_EXTABLE_UA(48b, 88b) + _ASM_EXTABLE_UA(49b, 88b) + + _ASM_EXTABLE_UA(50b, 88b) + _ASM_EXTABLE_UA(51b, 88b) + _ASM_EXTABLE_UA(52b, 88b) + _ASM_EXTABLE_UA(53b, 88b) + _ASM_EXTABLE_UA(54b, 88b) + _ASM_EXTABLE_UA(55b, 88b) + _ASM_EXTABLE_UA(56b, 88b) + _ASM_EXTABLE_UA(57b, 88b) + _ASM_EXTABLE_UA(58b, 88b) + _ASM_EXTABLE_UA(59b, 88b) + + _ASM_EXTABLE_UA(60b, 88b) + _ASM_EXTABLE_UA(61b, 88b) + _ASM_EXTABLE_UA(62b, 88b) + _ASM_EXTABLE_UA(63b, 88b) +SYM_FUNC_END(copy_user_avx2_pf64_nt_string) +EXPORT_SYMBOL(copy_user_avx2_pf64_nt_string) + +/* + * If len < 256 bytes, then we use rep mov directly. + * + * Input: + * rdi destination + * rsi source + * edx len + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(Lless_than_256_bytes_cpy) + movl %edx, %ecx +90: + rep movsb + + xorl %eax,%eax + ASM_CLAC + RET + +99: + mov %ecx,%eax + + ASM_CLAC + RET + + _ASM_EXTABLE_UA(90b, 99b) +SYM_CODE_END(Lless_than_256_bytes_cpy) + +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ + +SYM_CODE_START_LOCAL(Lavx2_copy_user_handle_tail) + movq %rdx,%rcx + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f + +1: rep movsb +2: mov %rcx,%rax + + ASM_CLAC + RET + +3: xorl %eax,%eax + ASM_CLAC + RET + + _ASM_EXTABLE_UA(1b, 2b) +SYM_CODE_END(Lavx2_copy_user_handle_tail) + +/* + * Called when task schedule. we call fpu_save_%ymm0_7 to save old + * task's fpu states and we call fpu_restore_%ymm0_7 to restore new + * task's fpu states. + */ +SYM_FUNC_START(fpu_restore_ymm0_7) + vmovdqu 0(%rsi), %ymm0 + vmovdqu 32(%rsi), %ymm1 + vmovdqu 64(%rsi), %ymm2 + vmovdqu 96(%rsi), %ymm3 + vmovdqu 128(%rsi), %ymm4 + vmovdqu 160(%rsi), %ymm5 + vmovdqu 192(%rsi), %ymm6 + vmovdqu 224(%rsi), %ymm7 + + xorl %eax,%eax + RET//ret +SYM_FUNC_END(fpu_restore_ymm0_7) +EXPORT_SYMBOL(fpu_restore_ymm0_7) + +SYM_FUNC_START(fpu_save_ymm0_7) + vmovdqu %ymm0, 0(%rdi) + vmovdqu %ymm1, 32(%rdi) + vmovdqu %ymm2, 64(%rdi) + vmovdqu %ymm3, 96(%rdi) + vmovdqu %ymm4, 128(%rdi) + vmovdqu %ymm5, 160(%rdi) + vmovdqu %ymm6, 192(%rdi) + vmovdqu %ymm7, 224(%rdi) + + xorl %eax,%eax + RET +SYM_FUNC_END(fpu_save_ymm0_7) +EXPORT_SYMBOL(fpu_save_ymm0_7) diff --git a/arch/x86/lib/copy_user_sse2.S b/arch/x86/lib/copy_user_sse2.S new file mode 100644 index 0000000000000000000000000000000000000000..5422ff03ce2e6ce40ab88d24f6d6f7d92dfc5ea2 --- /dev/null +++ b/arch/x86/lib/copy_user_sse2.S @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2011 Siarhei Siamashka + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFETCH_DISTANCE 256 + +.macro ALIGN_DESTINATION_16 + /* check for bad alignment of destination, there is 16Bytes, for we will use movdqa */ + /* if len<16Bytes, jb .Lcopy_user_string */ + cmpq $16,%rdx + jb .Lcopy_user_string + + /* + * Adjust unaligned destination addresses, for 16-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %edi,%ecx + andl $15,%ecx + jz .Lcopy_user_string /* already aligned */ + + subl $16,%ecx + negl %ecx + subl %ecx,%edx + +200: + movb (%rsi),%al +201: + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 200b + jmp .Lcopy_user_string + +203: + addl %ecx,%edx/* ecx is zerorest also */ + jmp Lsse2_copy_user_handle_tail + + _ASM_EXTABLE_UA(200b, 203b) + _ASM_EXTABLE_UA(201b, 203b) + +.Lcopy_user_string: +.endm +/*****************************************************************************/ +SYM_FUNC_START(copy_user_sse2_opt_string) + ASM_STAC + ALIGN_DESTINATION_16 + + cmpq $64,%rdx + jb 70f /* less then 64 bytes, avoid the costly 'rep' */ + + /* + * Check if src is aligned, for 16-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %esi,%ecx + andl $15,%ecx + jnz 20f + +10: + prefetchnta PREFETCH_DISTANCE(%rsi) +11: + prefetchnta (PREFETCH_DISTANCE + 32)(%rsi) +12: + movdqa (%rsi),%xmm0 +13: + movdqa 16(%rsi),%xmm1 +14: + movdqa 32(%rsi),%xmm2 +15: + movdqa 48(%rsi),%xmm3 +16: + movntdq %xmm0,0(%rdi) +17: + movntdq %xmm1,16(%rdi) +18: + movntdq %xmm2,32(%rdi) +19: + movntdq %xmm3,48(%rdi) + add $64,%rsi + add $64,%rdi + subq $64,%rdx + cmpq $64,%rdx + jg 10b + sfence + jmp 70f + +20: + prefetchnta PREFETCH_DISTANCE(%rsi) +21: + prefetchnta (PREFETCH_DISTANCE + 32)(%rsi) +22: + movdqu (%rsi),%xmm0 +23: + movdqu 16(%rsi),%xmm1 +24: + movdqu 32(%rsi),%xmm2 +25: + movdqu 48(%rsi),%xmm3 +26: + movntdq %xmm0,0(%rdi) +27: + movntdq %xmm1,16(%rdi) +28: + movntdq %xmm2,32(%rdi) +29: + movntdq %xmm3,48(%rdi) + add $64,%rsi + add $64,%rdi + subq $64,%rdx + cmpq $64,%rdx + jg 20b + sfence + +70: + movl %edx,%ecx +80: + rep + movsb + + xorl %eax,%eax + ASM_CLAC + RET//ret + +99: + movl %ecx,%edx /* ecx is zerorest also */ +100: + sfence + jmp Lsse2_copy_user_handle_tail + + _ASM_EXTABLE_UA(10b, 100b) + _ASM_EXTABLE_UA(11b, 100b) + _ASM_EXTABLE_UA(12b, 100b) + _ASM_EXTABLE_UA(13b, 100b) + _ASM_EXTABLE_UA(14b, 100b) + _ASM_EXTABLE_UA(15b, 100b) + _ASM_EXTABLE_UA(16b, 100b) + _ASM_EXTABLE_UA(17b, 100b) + _ASM_EXTABLE_UA(18b, 100b) + _ASM_EXTABLE_UA(19b, 100b) + + _ASM_EXTABLE_UA(20b, 100b) + _ASM_EXTABLE_UA(21b, 100b) + _ASM_EXTABLE_UA(22b, 100b) + _ASM_EXTABLE_UA(23b, 100b) + _ASM_EXTABLE_UA(24b, 100b) + _ASM_EXTABLE_UA(25b, 100b) + _ASM_EXTABLE_UA(26b, 100b) + _ASM_EXTABLE_UA(27b, 100b) + _ASM_EXTABLE_UA(28b, 100b) + _ASM_EXTABLE_UA(29b, 100b) + + _ASM_EXTABLE_UA(80b, 99b) +SYM_FUNC_END(copy_user_sse2_opt_string) +EXPORT_SYMBOL(copy_user_sse2_opt_string) + +SYM_FUNC_START(fpu_restore_xmm0_3) + ASM_STAC + movdqu (%rsi),%xmm0 + movdqu 16(%rsi),%xmm1 + movdqu 32(%rsi),%xmm2 + movdqu 48(%rsi),%xmm3 + + xorl %eax,%eax + ASM_CLAC + RET//ret +SYM_FUNC_END(fpu_restore_xmm0_3) +EXPORT_SYMBOL(fpu_restore_xmm0_3) + +SYM_FUNC_START(fpu_save_xmm0_3) + ASM_STAC + + movdqu %xmm0,(%rdi) + movdqu %xmm1,16(%rdi) + movdqu %xmm2,32(%rdi) + movdqu %xmm3,48(%rdi) + + xorl %eax,%eax + ASM_CLAC + RET//ret +SYM_FUNC_END(fpu_save_xmm0_3) +EXPORT_SYMBOL(fpu_save_xmm0_3) + +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(Lsse2_copy_user_handle_tail) + movq %rdx,%rcx + /* + * The trap number and error code are both 32 bits. + */ + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f +1: rep movsb +2: mov %rcx,%rax + ASM_CLAC + RET + + /* + * Return zero to pretend that this copy succeeded. This + * is counter-intuitive, but needed to prevent the code + * in lib/iov_iter.c from retrying and running back into + * the poison cache line again. The machine check handler + * will ensure that a SIGBUS is sent to the task. + */ +3: xorl %eax,%eax + ASM_CLAC + RET + + _ASM_EXTABLE_UA(1b, 2b) +SYM_CODE_END(Lsse2_copy_user_handle_tail) + +/*****************************************************************************/ diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 0e65d00e2339ff95e022d03ee9012346d2ead8b3..3946badbd78fd7058a871b3d6779766a8f0c606d 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,6 +117,27 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } +/* + * On ZHAOXIN the ZXPAUSE instruction waits until any of: + * 1) the delta of TSC counter exceeds the value provided in EDX:EAX + * 2) global timeout in ZX_PAUSE_CONTROL is exceeded + * 3) an external interrupt occurs + */ +static void delay_halt_zxpause(u64 unused, u64 cycles) +{ + u64 until = cycles; + u32 eax, edx; + + eax = lower_32_bits(until); + edx = upper_32_bits(until); + + /* + * Hard code the deeper (C0.1) sleep state because exit latency is + * small compared to the "microseconds" that usleep() will delay. + */ + __zxpause(ZXPAUSE_C01_STATE, edx, eax); +} + /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -183,6 +204,12 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } +void __init use_zxpause_delay(void) +{ + delay_halt_fn = delay_halt_zxpause; + delay_fn = delay_halt; +} + void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index c80febc44cd2feed47bf3c419accb98b12c1e38b..699cd989f6af363c85df101e718f2430e8697c06 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -67,3 +67,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o + +obj-$(CONFIG_HYGON_CSV) += mem_encrypt_hygon.o diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6529b3e2cff3cc27eb025ac7e5dffedaada28f9c..7add6b93a7b4f2ac704b5ffbaf8f4a82e1adf541 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -828,14 +828,17 @@ bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static void __bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, u32 pkey, int si_code) + unsigned long address, struct mm_struct *mm, + struct vm_area_struct *vma, u32 pkey, int si_code) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } @@ -859,7 +862,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma) + unsigned long address, struct mm_struct *mm, + struct vm_area_struct *vma) { /* * This OSPKE check is not strictly necessary at runtime. @@ -889,9 +893,9 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, */ u32 pkey = vma_pkey(vma); - __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); + __bad_area(regs, error_code, address, mm, vma, pkey, SEGV_PKUERR); } else { - __bad_area(regs, error_code, address, 0, SEGV_ACCERR); + __bad_area(regs, error_code, address, mm, vma, 0, SEGV_ACCERR); } } @@ -1318,8 +1322,9 @@ void do_user_addr_fault(struct pt_regs *regs, goto lock_mmap; if (unlikely(access_error(error_code, vma))) { - vma_end_read(vma); - goto lock_mmap; + bad_area_access_error(regs, error_code, address, NULL, vma); + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return; } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) @@ -1353,7 +1358,7 @@ void do_user_addr_fault(struct pt_regs *regs, * we can handle it.. */ if (unlikely(access_error(error_code, vma))) { - bad_area_access_error(regs, error_code, address, vma); + bad_area_access_error(regs, error_code, address, mm, vma); return; } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9f27e14e185f339f58010d859c7941b6c8345f3e..050f77087d8f5c1de542fb0b2f4fe9bf0dcc7622 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -13,6 +13,8 @@ #include #include +#include + /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) { @@ -48,6 +50,11 @@ static void print_mem_encrypt_feature_info(void) return; } + if (is_x86_vendor_hygon()) { + print_hygon_cc_feature_info(); + return; + } + pr_cont(" AMD"); /* Secure Memory Encryption */ diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655786e9b05fcfdfc06fd9d4a17e6ee..f7d88ad030b9b6a6b3e1be331eda06cc2bd3d916 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -344,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } @@ -377,6 +386,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +402,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +482,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..52ec3fa041feb72217567e02a8b2551b62833dca --- /dev/null +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Memory Encryption Support + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + +void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); +} + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init __maybe_unused csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (!is_x86_vendor_hygon()) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +/* csv3_active() indicate whether the guest is protected by CSV3 */ +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} +EXPORT_SYMBOL_GPL(csv3_active); + +/******************************************************************************/ +/**************************** CSV3 CMA interfaces *****************************/ +/******************************************************************************/ + +/* 0 percent of total memory by default*/ +static unsigned char csv_mem_percentage; +static unsigned long csv_mem_size; + +static int __init cmdline_parse_csv_mem_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + csv_mem_size = size; + if (!csv_mem_size) + csv_mem_percentage = 0; + } + + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_mem_size); + +static int __init cmdline_parse_csv_mem_percentage(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_mem_percentage = min_t(unsigned char, percentage, 80); + if (csv_mem_percentage != percentage) + pr_warn("csv_mem_percentage is limited to 80.\n"); + } else { + /* Disable CSV CMA. */ + csv_mem_percentage = 0; + pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); + +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); + +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); + +struct csv_cma { + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + struct csv_cma csv_cma[]; +}; + +static unsigned int smr_entry_shift; +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("CSV-CMA: SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static unsigned long __init present_pages_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long nr_present = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) + nr_present += range_end_pfn - range_start_pfn; + + return nr_present; +} + +static phys_addr_t __init csv_early_percent_memory_on_node(int nid) +{ + return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; +} + +static void __init csv_cma_reserve_mem(void) +{ + int node, i; + unsigned long size; + int idx = 0; + int count; + int cma_array_size; + unsigned long max_spanned_size = 0; + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("CSV-CMA: Fail to allocate csv_smr\n"); + return; + } + + for_each_node_state(node, N_ONLINE) { + int ret; + char name[CMA_MAX_NAME]; + struct cma_array *array; + unsigned long spanned_size; + unsigned long start = 0, end = 0; + struct csv_cma *csv_cma; + + size = csv_early_percent_memory_on_node(node); + count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT); + if (!count) + continue; + + cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); + array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!array) { + pr_err("CSV-CMA: Fail to allocate cma_array\n"); + continue; + } + + array->count = 0; + csv_contiguous_pernuma_area[node] = array; + + for (i = 0; i < count; i++) { + csv_cma = &array->csv_cma[i]; + csv_cma->fast = 1; + snprintf(name, sizeof(name), "csv-n%dc%d", node, i); + ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, + 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, + false, name, &(csv_cma->cma), node); + if (ret) { + pr_warn("CSV-CMA: Fail to reserve memory size 0x%x node %d\n", + 1 << CSV_CMA_SHIFT, node); + break; + } + cma_enable_concurrency(csv_cma->cma); + + if (start > cma_get_base(csv_cma->cma) || !start) + start = cma_get_base(csv_cma->cma); + + if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma)) + end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma); + } + + if (!i) + continue; + + array->count = i; + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("CSV-CMA: Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + node, (unsigned long)i * CSV_CMA_SIZE, size); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); +} + +#define CSV_CMA_AREAS 2458 + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_mem_size) { + if (csv_mem_size < (total_pages << PAGE_SHIFT)) { + csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); + if (csv_mem_percentage > 80) + csv_mem_percentage = 80; /* Maximum percentage */ + } else + csv_mem_percentage = 80; /* Maximum percentage */ + } + + if (!csv_mem_percentage) { + pr_warn("CSV-CMA: Don't reserve any memory\n"); + return; + } + + csv_cma_reserve_mem(); +} + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + int nid; + int nr_nodes; + struct page *page = NULL; + phys_addr_t phys_addr; + int count; + struct csv_cma *csv_cma; + int fast = 1; + + if (!nodes_allowed || size > CSV_CMA_SIZE) { + pr_err("CSV-CMA: Invalid params, size = 0x%lx, nodes_allowed = %p\n", + size, nodes_allowed); + return 0; + } + + align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE)); +retry: + nr_nodes = nodes_weight(*nodes_allowed); + + /* Traverse from current node */ + nid = numa_node_id(); + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_in(nid, *nodes_allowed); + + for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { + struct cma_array *array = csv_contiguous_pernuma_area[nid]; + + if (!array) + continue; + + count = array->count; + while (count) { + csv_cma = &array->csv_cma[count - 1]; + + /* + * The value check of csv_cma->fast is lockless, but + * that's ok as this don't affect functional correntness + * whatever the value of csv_cma->fast. + */ + if (fast && !csv_cma->fast) { + count--; + continue; + } + page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, true); + if (page) { + page->private = (unsigned long)csv_cma; + if (!csv_cma->fast) + csv_cma->fast = 1; + goto success; + } else + csv_cma->fast = 0; + + count--; + } + } + + if (fast) { + fast = 0; + goto retry; + } else { + pr_err("CSV-CMA: Fail to alloc secure memory(size = 0x%lx)\n", size); + return 0; + } + +success: + phys_addr = page_to_phys(page); + clflush_cache_range(__va(phys_addr), size); + + return phys_addr; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ + struct csv_cma *csv_cma; + struct page *page = pfn_to_page(pa >> PAGE_SHIFT); + + WARN_ON(!page); + if (likely(page)) { + csv_cma = (struct csv_cma *)page->private; + WARN_ON(!csv_cma); + if (likely(csv_cma)) { + page->private = 0; + csv_cma->fast = 1; + cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + } + } +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a50c99e9b5c01f155a06a1b51cc3cc06dbfe970e..7d631623a9506e613a2b10029577b66b491adc09 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -16,6 +16,7 @@ #include #include #include +#include static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -48,9 +49,11 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) #ifdef CONFIG_X86_KERNEL_IBT -#define EMIT_ENDBR() EMIT(gen_endbr(), 4) +#define EMIT_ENDBR() EMIT(gen_endbr(), 4) +#define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) #else #define EMIT_ENDBR() +#define EMIT_ENDBR_POISON() #endif static bool is_imm8(int value) @@ -335,6 +338,69 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) *pprog = prog; } +/* + * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT + * in arch/x86/kernel/alternative.c + */ + +static void emit_fineibt(u8 **pprog, u32 hash) +{ + u8 *prog = *pprog; + + EMIT_ENDBR(); + EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ + EMIT2(0x74, 0x07); /* jz.d8 +7 */ + EMIT2(0x0f, 0x0b); /* ud2 */ + EMIT1(0x90); /* nop */ + EMIT_ENDBR_POISON(); + + *pprog = prog; +} + +static void emit_kcfi(u8 **pprog, u32 hash) +{ + u8 *prog = *pprog; + + EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ +#ifdef CONFIG_CALL_PADDING + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); +#endif + EMIT_ENDBR(); + + *pprog = prog; +} + +static void emit_cfi(u8 **pprog, u32 hash) +{ + u8 *prog = *pprog; + + switch (cfi_mode) { + case CFI_FINEIBT: + emit_fineibt(&prog, hash); + break; + + case CFI_KCFI: + emit_kcfi(&prog, hash); + break; + + default: + EMIT_ENDBR(); + break; + } + + *pprog = prog; +} + /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes @@ -345,10 +411,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, { u8 *prog = *pprog; + emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash); /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ - EMIT_ENDBR(); memcpy(prog, x86_nops[5], X86_PATCH_SIZE); prog += X86_PATCH_SIZE; if (!ebpf_from_cbpf) { @@ -2410,10 +2476,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i u8 *prog; bool save_ret; + /* + * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is + * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG + * because @func_addr. + */ + WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && + (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); + /* extra registers for struct arguments */ - for (i = 0; i < m->nr_args; i++) + for (i = 0; i < m->nr_args; i++) { if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) nr_regs += (m->arg_size[i] + 7) / 8 - 1; + } /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 * are passed through regs, the remains are through stack. @@ -2496,20 +2571,27 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i prog = image; - EMIT_ENDBR(); - /* - * This is the direct-call trampoline, as such it needs accounting - * for the __fentry__ call. - */ - x86_call_depth_emit_accounting(&prog, NULL); + if (flags & BPF_TRAMP_F_INDIRECT) { + /* + * Indirect call for bpf_struct_ops + */ + emit_cfi(&prog, cfi_get_func_hash(func_addr)); + } else { + /* + * Direct-call fentry stub, as such it needs accounting for the + * __fentry__ call. + */ + x86_call_depth_emit_accounting(&prog, NULL); + } EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ - if (!is_imm8(stack_size)) + if (!is_imm8(stack_size)) { /* sub rsp, stack_size */ EMIT3_off32(0x48, 0x81, 0xEC, stack_size); - else + } else { /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size); + } if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) EMIT1(0x50); /* push rax */ /* mov QWORD PTR [rbp - rbx_off], rbx */ @@ -2542,10 +2624,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } } - if (fentry->nr_links) + if (fentry->nr_links) { if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) return -EINVAL; + } if (fmod_ret->nr_links) { branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), @@ -2564,11 +2647,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i restore_regs(m, &prog, regs_off); save_args(m, &prog, arg_stack_off, true); - if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before calling the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); + } if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); @@ -2597,16 +2681,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i /* Update the branches saved in invoke_bpf_mod_ret with the * aligned address of do_fexit. */ - for (i = 0; i < fmod_ret->nr_links; i++) + for (i = 0; i < fmod_ret->nr_links; i++) { emit_cond_near_jump(&branches[i], prog, branches[i], X86_JNE); + } } - if (fexit->nr_links) + if (fexit->nr_links) { if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { ret = -EINVAL; goto cleanup; } + } if (flags & BPF_TRAMP_F_RESTORE_REGS) restore_regs(m, &prog, regs_off); @@ -2623,11 +2709,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ret = -EINVAL; goto cleanup; } - } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before running the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); + } /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) @@ -2635,9 +2722,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); EMIT1(0xC9); /* leave */ - if (flags & BPF_TRAMP_F_SKIP_FRAME) + if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* skip our return address and return to parent */ EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ + } emit_return(&prog, prog); /* Make sure the trampoline generation logic doesn't overflow */ if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { @@ -2908,9 +2996,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) jit_data->header = header; jit_data->rw_header = rw_header; } - prog->bpf_func = (void *)image; + /* + * ctx.prog_offset is used when CFI preambles put code *before* + * the function. See emit_cfi(). For FineIBT specifically this code + * can also be executed and bpf_prog_kallsyms_add() will + * generate an additional symbol to cover this, hence also + * decrement proglen. + */ + prog->bpf_func = (void *)image + cfi_get_offset(); prog->jited = 1; - prog->jited_len = proglen; + prog->jited_len = proglen - cfi_get_offset(); } else { prog = orig_prog; } @@ -2965,6 +3060,7 @@ void bpf_jit_free(struct bpf_prog *prog) kvfree(jit_data->addrs); kfree(jit_data); } + prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 9a7e5e57ee9ad05bfa8c83541cb2f2410f4c8c28..1647a7cc3fbf6760e4187858d3a20f6ccf9773a3 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -410,9 +410,9 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, typedef pte_t *pte_addr_t; -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep); -#define __HAVE_ARCH_UPDATE_MMU_TLB +void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); +#define update_mmu_tlb_range update_mmu_tlb_range #endif /* !defined (__ASSEMBLY__) */ diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 4f974b74883caeb7960d8f825f0759ca3f40d16e..f69feee19d590761bda015de862e5940c472403c 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -163,10 +163,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } } -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) { - local_flush_tlb_page(vma, address); + local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr); } #ifdef CONFIG_DEBUG_TLB_SANITY diff --git a/block/bio.c b/block/bio.c index 62419aa09d731922e5d62cbc2649b1882da8bfaf..6784bbe44d16c9d7bec61e109f210b88fece59ec 100644 --- a/block/bio.c +++ b/block/bio.c @@ -263,6 +263,12 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_issue.value = 0; if (bdev) bio_associate_blkg(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + bio->start_time_ns = 0; + bio->io_start_time_ns = 0; + bio->bi_tg_end_io = NULL; + bio->bi_tg_private = NULL; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif @@ -1376,12 +1382,15 @@ int submit_bio_wait(struct bio *bio) /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; + + task_set_wait_res(TASK_WAIT_BIO, bio); if (hang_check) while (!wait_for_completion_io_timeout(&done, hang_check * (HZ/2))) ; else wait_for_completion_io(&done); + task_clear_wait_res(); return blk_status_to_errno(bio->bi_status); } @@ -1602,6 +1611,10 @@ void bio_endio(struct bio *bio) blk_throtl_bio_endio(bio); /* release cgroup info */ bio_uninit(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + if (bio->bi_tg_end_io) + bio->bi_tg_end_io(bio); +#endif if (bio->bi_end_io) bio->bi_end_io(bio); } diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 5b0bdc268ade9f6975f7ed5e02e550d6d3110f43..315e7eb6925a157e191c741a0fe6383e8f34fd4a 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -49,6 +49,10 @@ struct blkg_iostat_set { int lqueued; /* queued in llist */ struct blkg_iostat cur; struct blkg_iostat last; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* association between a blk cgroup and a request queue */ @@ -88,6 +92,8 @@ struct blkcg_gq { int last_use; struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) }; struct blkcg { @@ -114,6 +120,11 @@ struct blkcg { #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) @@ -183,6 +194,9 @@ struct blkcg_policy { blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; blkcg_pol_stat_pd_fn *pd_stat_fn; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern struct blkcg blkcg_root; diff --git a/block/blk-core.c b/block/blk-core.c index 4f25d2c4bc7055b9e78a2c8ebe4e09898fd17221..ad2b3ab4a2460f7f79a85cbc2948bc8ee4dbdd3e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -404,6 +404,7 @@ struct request_queue *blk_alloc_queue(int node_id) return NULL; q->last_merge = NULL; + q->rq_hang_threshold = BLK_REQ_HANG_THRESHOLD; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) @@ -734,6 +735,9 @@ void submit_bio_noacct(struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; + DEFINE_WAIT(wait); + wait_queue_head_t *wait_head = NULL; + bool throtl; might_sleep(); @@ -807,7 +811,13 @@ void submit_bio_noacct(struct bio *bio) break; } - if (blk_throtl_bio(bio)) + throtl = blk_throtl_bio(bio, &wait_head, &wait); + if (wait_head) { + io_schedule(); + finish_wait(wait_head, &wait); + } + + if (throtl) return; submit_bio_noacct_nocheck(bio); return; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 129732a8d0dd9b4f96eaeeec05686a5216142fd4..2c2c82007a541d6c91893122d1d85444c5c32a2b 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3510,6 +3510,36 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, return ret; } +static u64 ioc_stat_prfill(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct blkcg_gq *blkg = pd->blkg; + const char *dname = blkg_dev_name(blkg); + struct ioc_gq *iocg = blkg_to_iocg(blkg); + struct ioc *ioc = iocg->ioc; + + if (!dname) + return 0; + + seq_printf(sf, "%s is_active=%d active=%u inuse=%u " + "hweight_active=%u hweight_inuse=%u vrate=%llu\n", + dname, !list_empty(&iocg->active_list), + iocg->active, iocg->inuse, + iocg->hweight_active, iocg->hweight_inuse, + (unsigned long long)atomic64_read(&ioc->vtime_rate)); + + return 0; +} + +static int ioc_cost_print_stat(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + + blkcg_print_blkgs(sf, blkcg, ioc_stat_prfill, + &blkcg_policy_iocost, seq_cft(sf)->private, false); + return 0; +} + static struct cftype ioc_files[] = { { .name = "weight", @@ -3532,8 +3562,36 @@ static struct cftype ioc_files[] = { {} }; +static struct cftype ioc_legacy_files[] = { + { + .name = "cost.weight", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_weight_show, + .write = ioc_weight_write, + }, + { + .name = "cost.qos", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_qos_show, + .write = ioc_qos_write, + }, + { + .name = "cost.model", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_cost_model_show, + .write = ioc_cost_model_write, + }, + { + .name = "cost.stat", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_cost_print_stat, + }, + {} +}; + static struct blkcg_policy blkcg_policy_iocost = { .dfl_cftypes = ioc_files, + .legacy_cftypes = ioc_legacy_files, .cpd_alloc_fn = ioc_cpd_alloc, .cpd_free_fn = ioc_cpd_free, .pd_alloc_fn = ioc_pd_alloc, diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b288bec70850a90f88f2fdc1ed5a38..f42314c863773827ff62f70a9ff5728ac5d9bd62 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -155,12 +155,47 @@ static ssize_t queue_state_write(void *data, const char __user *buf, return count; } +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq); + +static bool blk_mq_check_rq_hang(struct request *rq, void *priv) +{ + struct seq_file *m = priv; + u64 now = ktime_get_ns(); + u64 duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration < rq->q->rq_hang_threshold) + return true; + + /* See comments in blk_mq_check_expired() */ + if (!req_ref_inc_not_zero(rq)) + return true; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq->q->rq_hang_threshold) + blk_mq_debugfs_rq_hang_show(m, rq); + + blk_mq_put_rq_ref(rq); + + return true; + +} + +static int queue_rq_hang_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, m); + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, + { "rq_hang", 0400, queue_rq_hang_show, NULL }, { }, }; @@ -310,6 +345,52 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) +{ + const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; + const unsigned int op = req_op(rq); + const char *op_str = blk_op_str(op); + struct bio *bio; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + seq_printf(m, "%px {.op=", rq); + if (strcmp(op_str, "UNKNOWN") == 0) + seq_printf(m, "%u", op); + else + seq_printf(m, "%s", op_str); + seq_puts(m, ", .cmd_flags="); + blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, + ARRAY_SIZE(cmd_flag_name)); + seq_puts(m, ", .rq_flags="); + blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, + ARRAY_SIZE(rqf_name)); + seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); + seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, + rq->internal_tag); + seq_printf(m, ", .start_time_ns=%llu", rq->start_time_ns); + seq_printf(m, ", .io_start_time_ns=%llu", rq->io_start_time_ns); + seq_printf(m, ", .current_time=%llu", ktime_get_ns()); + + __rq_for_each_bio(bio, rq) { + seq_printf(m, ", .bio = %px", bio); + seq_printf(m, ", .sector = %llu, .len=%u", + bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + seq_puts(m, ", .bio_pages = { "); + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + if (!page) + continue; + seq_printf(m, "%px ", page); + } + seq_puts(m, "}"); + } + if (mq_ops->show_rq) + mq_ops->show_rq(m, rq); + seq_puts(m, "}\n"); +} + static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 5da948b07058b41f5e15b0d0e3990f20e0461673..93324d9fb176876c1a67475a2869a63db7ee71f1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -89,6 +89,11 @@ struct mq_inflight { unsigned int inflight[2]; }; +struct mq_hang { + struct block_device *part; + unsigned int hang[2]; +}; + static bool blk_mq_check_inflight(struct request *rq, void *priv) { struct mq_inflight *mi = priv; @@ -121,6 +126,29 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, inflight[1] = mi.inflight[1]; } +static bool blk_mq_check_hang(struct request *rq, void *priv) +{ + struct mq_hang *mh = priv; + u64 now = ktime_get_ns(), duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if ((duration >= rq->q->rq_hang_threshold) && + (!mh->part->bd_partno || rq->part == mh->part)) + mh->hang[rq_data_dir(rq)]++; + + return true; +} + +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]) +{ + struct mq_hang mh = { .part = part }; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_hang, &mh); + hang[0] = mh.hang[0]; + hang[1] = mh.hang[1]; +} + void blk_freeze_queue_start(struct request_queue *q) { mutex_lock(&q->mq_freeze_lock); @@ -997,6 +1025,10 @@ static inline void blk_account_io_done(struct request *req, u64 now) part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); part_stat_local_dec(req->part, in_flight[op_is_write(req_op(req))]); + if (req->rq_flags & RQF_STATS) { + part_stat_add(req->part, d2c_nsecs[sgrp], + now - req->io_start_time_ns); + } part_stat_unlock(); } } diff --git a/block/blk-mq.h b/block/blk-mq.h index cf9f21772ddc8e445fd974846e9e1d6c9453e4f7..cc3dc4abb2e8367c1cd3a7520551a51b5ed832d4 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -158,6 +158,10 @@ struct blk_mq_alloc_data { /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, @@ -253,6 +257,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct block_device *part); void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, unsigned int inflight[2]); +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) diff --git a/block/blk-settings.c b/block/blk-settings.c index 7019b8e204d965161eff7e232d11abc56e8dab54..52fa777d29988f17a96a6729bd909cc179300d9d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -25,6 +25,13 @@ void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); +void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold) +{ + q->rq_hang_threshold = hang_threshold; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_hang_threshold); + /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 63e4812623361ddde759809c4e04b715aa871e43..f852ce8b40a446d3975763475fc9bc42e47ce388 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -438,6 +438,26 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, return count; } +static ssize_t queue_hang_threshold_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%u\n", q->rq_hang_threshold); +} + +static ssize_t queue_hang_threshold_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned int hang_threshold; + int err; + + err = kstrtou32(page, 10, &hang_threshold); + if (err || hang_threshold == 0) + return -EINVAL; + + blk_queue_rq_hang_threshold(q, hang_threshold); + + return count; +} + static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -527,6 +547,7 @@ QUEUE_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); +QUEUE_RW_ENTRY(queue_hang_threshold, "hang_threshold"); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); @@ -656,6 +677,7 @@ static struct attribute *queue_attrs[] = { #endif &queue_virt_boundary_mask_entry.attr, &queue_dma_alignment_entry.attr, + &queue_hang_threshold_entry.attr, NULL, }; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 16f5766620a41043645756c51d441f4488af9edf..8921f61d257adb26a76472d6438eaa589fdee04a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -331,6 +331,10 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq) { INIT_LIST_HEAD(&sq->queued[READ]); INIT_LIST_HEAD(&sq->queued[WRITE]); + sq->nr_queued_bytes[READ] = 0; + sq->nr_queued_bytes[WRITE] = 0; + init_waitqueue_head(&sq->wait[READ]); + init_waitqueue_head(&sq->wait[WRITE]); sq->pending_tree = RB_ROOT_CACHED; timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); } @@ -345,11 +349,14 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (!tg) return NULL; - if (blkg_rwstat_init(&tg->stat_bytes, gfp)) - goto err_free_tg; - - if (blkg_rwstat_init(&tg->stat_ios, gfp)) - goto err_exit_stat_bytes; + if (blkg_rwstat_init(&tg->stat_bytes, gfp) || + blkg_rwstat_init(&tg->stat_ios, gfp) || + blkg_rwstat_init(&tg->service_time, gfp) || + blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->completed, gfp) || + blkg_rwstat_init(&tg->total_bytes_queued, gfp) || + blkg_rwstat_init(&tg->total_io_queued, gfp)) + goto err; throtl_service_queue_init(&tg->service_queue); @@ -376,9 +383,14 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, return &tg->pd; -err_exit_stat_bytes: +err: blkg_rwstat_exit(&tg->stat_bytes); -err_free_tg: + blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); return NULL; } @@ -405,7 +417,9 @@ static void throtl_pd_init(struct blkg_policy_data *pd) * regardless of the position of the group in the hierarchy. */ sq->parent_sq = &td->service_queue; - if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) + + /* Enable hierarchical throttling even on traditional hierarchy */ + if (blkg->parent) sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; tg->td = td; } @@ -474,6 +488,8 @@ static void throtl_upgrade_state(struct throtl_data *td); static void throtl_pd_offline(struct blkg_policy_data *pd) { struct throtl_grp *tg = pd_to_tg(pd); + struct blkcg_gq *blkg = pd_to_blkg(pd); + struct blkcg_gq *parent = blkg->parent; tg->bps[READ][LIMIT_LOW] = 0; tg->bps[WRITE][LIMIT_LOW] = 0; @@ -484,6 +500,18 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) if (!tg->td->limit_valid[tg->td->limit_index]) throtl_upgrade_state(tg->td); + if (parent) { + blkg_rwstat_add_aux(&blkg_to_tg(parent)->service_time, + &tg->service_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, + &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->completed, + &tg->completed); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, + &tg->total_bytes_queued); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, + &tg->total_io_queued); + } } static void throtl_pd_free(struct blkg_policy_data *pd) @@ -493,9 +521,25 @@ static void throtl_pd_free(struct blkg_policy_data *pd) del_timer_sync(&tg->service_queue.pending_timer); blkg_rwstat_exit(&tg->stat_bytes); blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); } +static void throtl_pd_reset(struct blkg_policy_data *pd) +{ + struct throtl_grp *tg = pd_to_tg(pd); + + blkg_rwstat_reset(&tg->service_time); + blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->completed); + blkg_rwstat_reset(&tg->total_bytes_queued); + blkg_rwstat_reset(&tg->total_io_queued); +} + static struct throtl_grp * throtl_rb_first(struct throtl_service_queue *parent_sq) { @@ -958,6 +1002,65 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, return false; } +static void throtl_stats_update_completion(struct throtl_grp *tg, + uint64_t start_time, + uint64_t io_start_time, + int op) +{ + unsigned long flags; + uint64_t now = sched_clock(); + + local_irq_save(flags); + if (time_after64(now, io_start_time)) + blkg_rwstat_add(&tg->service_time, op, now - io_start_time); + if (time_after64(io_start_time, start_time)) + blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + blkg_rwstat_add(&tg->completed, op, 1); + local_irq_restore(flags); +} + +static void throtl_bio_end_io(struct bio *bio) +{ + struct throtl_grp *tg; + + rcu_read_lock(); + /* see comments in throtl_bio_stats_start() */ + if (!bio_ext_flagged(bio, BIO_THROTL_STATED)) + goto out; + + tg = (struct throtl_grp *)bio->bi_tg_private; + if (!tg) + goto out; + + throtl_stats_update_completion(tg, bio_start_time_ns(bio), + bio_io_start_time_ns(bio), + bio_op(bio)); + blkg_put(tg_to_blkg(tg)); + bio_clear_ext_flag(bio, BIO_THROTL_STATED); +out: + rcu_read_unlock(); +} + +static inline void throtl_bio_stats_start(struct bio *bio, struct throtl_grp *tg) +{ + int op = bio_op(bio); + + /* + * It may happen that end_io will be called twice like dm-thin, + * which will save origin end_io first, and call its overwrite + * end_io and then the saved end_io. We use bio flag + * BIO_THROTL_STATED to do only once statistics. + */ + if ((op == REQ_OP_READ || op == REQ_OP_WRITE) && + !bio_ext_flagged(bio, BIO_THROTL_STATED)) { + blkg_get(tg_to_blkg(tg)); + bio_set_ext_flag(bio, BIO_THROTL_STATED); + bio->bi_tg_end_io = throtl_bio_end_io; + bio->bi_tg_private = tg; + bio_set_start_time_ns(bio); + } +} + static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); @@ -1003,6 +1106,10 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + sq->nr_queued_bytes[rw] += throtl_bio_data_size(bio); + blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), + throtl_bio_data_size(bio)); + blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); throtl_enqueue_tg(tg); } @@ -1058,6 +1165,15 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) */ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; + sq->nr_queued_bytes[rw] -= throtl_bio_data_size(bio); + WARN_ON_ONCE(sq->nr_queued_bytes[rw] < 0); + + if (wq_has_sleeper(&sq->wait[rw])) { + if (sq->nr_queued_bytes[rw] > 0) + wake_up(&sq->wait[rw]); + else + wake_up_all(&sq->wait[rw]); + } throtl_charge_bio(tg, bio); @@ -1486,6 +1602,31 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, stat_ios), .seq_show = tg_print_rwstat_recursive, }, + { + .name = "throttle.io_service_time", + .private = offsetof(struct throtl_grp, service_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_wait_time", + .private = offsetof(struct throtl_grp, wait_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_completed", + .private = offsetof(struct throtl_grp, completed), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_bytes_queued", + .private = offsetof(struct throtl_grp, total_bytes_queued), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_io_queued", + .private = offsetof(struct throtl_grp, total_io_queued), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; @@ -1552,6 +1693,56 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, return 0; } +static u64 tg_prfill_extstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + const char *dname = blkg_dev_name(pd->blkg); + char bufs[10][21] = { "0", "0", "0", "0", "0", "0", "0", "0", "0", "0" }; + struct blkg_rwstat_sample tmp = { }; + + if (!dname) + return 0; + + /* read/write IOs wait time */ + blkg_rwstat_read(&tg->wait_time, &tmp); + snprintf(bufs[0], sizeof(bufs[0]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[1], sizeof(bufs[1]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write IOs service time */ + blkg_rwstat_read(&tg->service_time, &tmp); + snprintf(bufs[2], sizeof(bufs[2]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[3], sizeof(bufs[3]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write completed IOs */ + blkg_rwstat_read(&tg->completed, &tmp); + snprintf(bufs[4], sizeof(bufs[4]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[5], sizeof(bufs[5]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued bytes */ + blkg_rwstat_read(&tg->total_bytes_queued, &tmp); + snprintf(bufs[6], sizeof(bufs[6]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[7], sizeof(bufs[7]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued IOs */ + blkg_rwstat_read(&tg->total_io_queued, &tmp); + snprintf(bufs[8], sizeof(bufs[8]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[9], sizeof(bufs[9]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + + seq_printf(sf, "%s rwait=%s wwait=%s rserv=%s wserv=%s rcomp=%s wcomp=%s " + "rbytesq=%s wbytesq=%s riosq=%s wiosq=%s\n", + dname, bufs[0], bufs[1], bufs[2], bufs[3], bufs[4], + bufs[5], bufs[6], bufs[7], bufs[8], bufs[9]); + + return 0; +} + static int tg_print_limit(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, @@ -1559,6 +1750,13 @@ static int tg_print_limit(struct seq_file *sf, void *v) return 0; } +static int tg_print_extstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_extstat, + &blkcg_policy_throtl, 0, false); + return 0; +} + static ssize_t tg_set_limit(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -1695,6 +1893,10 @@ static struct cftype throtl_files[] = { .write = tg_set_limit, .private = LIMIT_MAX, }, + { + .name = "extstat", + .seq_show = tg_print_extstat, + }, { } /* terminate */ }; @@ -1714,6 +1916,7 @@ struct blkcg_policy blkcg_policy_throtl = { .pd_online_fn = throtl_pd_online, .pd_offline_fn = throtl_pd_offline, .pd_free_fn = throtl_pd_free, + .pd_reset_stats_fn = throtl_pd_reset, }; void blk_throtl_cancel_bios(struct gendisk *disk) @@ -2173,7 +2376,8 @@ static void throtl_upgrade_state(struct throtl_data *td) } #endif -bool __blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; @@ -2186,6 +2390,8 @@ bool __blk_throtl_bio(struct bio *bio) rcu_read_lock(); + throtl_bio_stats_start(bio, tg); + spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2255,6 +2461,18 @@ bool __blk_throtl_bio(struct bio *bio) tg->last_low_overflow_time[rw] = jiffies; td->nr_queued[rw]++; + + if (rw == WRITE) { + u64 bps_limit = tg_bps_limit(tg, rw); + + if (bps_limit != U64_MAX && + (wq_has_sleeper(&sq->wait[rw]) || + sq->nr_queued_bytes[rw] > div_u64(bps_limit, 2))) { + *waitq = &sq->wait[rw]; + prepare_to_wait_exclusive(*waitq, wait, TASK_UNINTERRUPTIBLE); + } + } + throtl_add_bio_tg(bio, qn, tg); throttled = true; @@ -2275,6 +2493,8 @@ bool __blk_throtl_bio(struct bio *bio) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif spin_unlock_irq(&q->queue_lock); + if (!throttled) + bio_set_io_start_time_ns(bio); rcu_read_unlock(); return throttled; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index bffbc9cfc8ab6bac79ad4d3cff04622ce1f852d8..4b5ce538ca5b177ba31ab2ae2497fc21598aaf0b 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -41,6 +41,8 @@ struct throtl_service_queue { */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ unsigned int nr_queued[2]; /* number of queued bios */ + long nr_queued_bytes[2]; /* number of queued bytes */ + wait_queue_head_t wait[2]; /* * RB tree of active children throtl_grp's, which are sorted by @@ -150,6 +152,16 @@ struct throtl_grp { struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; + /* total time spent on lower layer: scheduler, device and others */ + struct blkg_rwstat service_time; + /* total time spent on block throttle */ + struct blkg_rwstat wait_time; + /* total IOs completed */ + struct blkg_rwstat completed; + /* total bytes throttled */ + struct blkg_rwstat total_bytes_queued; + /* total IOs throttled */ + struct blkg_rwstat total_io_queued; }; extern struct blkcg_policy blkcg_policy_throtl; @@ -171,13 +183,18 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) static inline int blk_throtl_init(struct gendisk *disk) { return 0; } static inline void blk_throtl_exit(struct gendisk *disk) { } static inline void blk_throtl_register(struct gendisk *disk) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) +{ + return false; +} static inline void blk_throtl_cancel_bios(struct gendisk *disk) { } #else /* CONFIG_BLK_DEV_THROTTLING */ int blk_throtl_init(struct gendisk *disk); void blk_throtl_exit(struct gendisk *disk); void blk_throtl_register(struct gendisk *disk); -bool __blk_throtl_bio(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait); void blk_throtl_cancel_bios(struct gendisk *disk); static inline bool blk_should_throtl(struct bio *bio) @@ -204,13 +221,14 @@ static inline bool blk_should_throtl(struct bio *bio) return false; } -static inline bool blk_throtl_bio(struct bio *bio) +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { if (!blk_should_throtl(bio)) return false; - return __blk_throtl_bio(bio); + return __blk_throtl_bio(bio, waitq, wait); } #endif /* CONFIG_BLK_DEV_THROTTLING */ diff --git a/block/blk.h b/block/blk.h index 67915b04b3c179329d34af45a48f2d4a6264c35c..475bbb40bb83f78e76d2e59145d716779c7b88f5 100644 --- a/block/blk.h +++ b/block/blk.h @@ -285,6 +285,8 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf); ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, diff --git a/block/elevator.h b/block/elevator.h index 7ca3d7b6ed8289fca0f1261eb9a5001018fc04cc..d07757ab6e349db8a95d0db783124b41e54a5148 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -48,6 +48,11 @@ struct elevator_mq_ops { struct request *(*next_request)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define ELV_NAME_MAX (16) @@ -84,6 +89,11 @@ struct elevator_type /* managed by elevator core */ char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool elevator_tryget(struct elevator_type *e) diff --git a/block/genhd.c b/block/genhd.c index 203c880c3e1cd270e6ba27e1aac9875d3f00f82d..40f3a35e58830a8b3c617727ab47229603cc3fbc 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -109,6 +109,7 @@ static void part_stat_read_all(struct block_device *part, for (group = 0; group < NR_STAT_GROUPS; group++) { stat->nsecs[group] += ptr->nsecs[group]; + stat->d2c_nsecs[group] += ptr->d2c_nsecs[group]; stat->sectors[group] += ptr->sectors[group]; stat->ios[group] += ptr->ios[group]; stat->merges[group] += ptr->merges[group]; @@ -964,7 +965,8 @@ ssize_t part_stat_show(struct device *dev, "%8lu %8lu %8llu %8u " "%8u %8u %8u " "%8lu %8lu %8llu %8u " - "%8lu %8u" + "%8lu %8u " + "%8u %8u %8u" "\n", stat.ios[STAT_READ], stat.merges[STAT_READ], @@ -986,7 +988,10 @@ ssize_t part_stat_show(struct device *dev, (unsigned long long)stat.sectors[STAT_DISCARD], (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), stat.ios[STAT_FLUSH], - (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); + (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, @@ -1004,6 +1009,23 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct block_device *bdev = dev_to_bdev(dev); + struct request_queue *q = bdev_get_queue(bdev); + unsigned int hang[2] = {0, 0}; + + /* + * For now, we only support mq device, since don't find a generic method + * to track reqs in single queue device. + */ + if (queue_is_mq(q)) + blk_mq_hang_rw(q, bdev, hang); + + return sprintf(buf, "%8u %8u\n", hang[0], hang[1]); +} + static ssize_t disk_capability_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1054,6 +1076,7 @@ static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); static DEVICE_ATTR(partscan, 0444, partscan_show, NULL); @@ -1098,6 +1121,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, &dev_attr_badblocks.attr, &dev_attr_events.attr, &dev_attr_events_async.attr, @@ -1265,7 +1289,8 @@ static int diskstats_show(struct seq_file *seqf, void *v) "%lu %lu %lu %u " "%u %u %u " "%lu %lu %lu %u " - "%lu %u" + "%lu %u " + "%u %u %u" "\n", MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd, stat.ios[STAT_READ], @@ -1292,6 +1317,12 @@ static int diskstats_show(struct seq_file *seqf, void *v) NSEC_PER_MSEC), stat.ios[STAT_FLUSH], (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC) ); } diff --git a/block/partitions/core.c b/block/partitions/core.c index fc0ab5d8ab705bab8651249b073ab081904b967f..549ce89a657b25c5d21577ace675c353ca9a4d3a 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -208,6 +208,7 @@ static DEVICE_ATTR(alignment_offset, 0444, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, 0444, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); @@ -222,6 +223,7 @@ static struct attribute *part_attrs[] = { &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 216878c8bc3d62f8abd6e708acffffae7d09e5df..b6d924e0ff59fa951ee9815c0a2388495a454cb4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4819,6 +4819,16 @@ static const struct alg_test_desc alg_test_descs[] = { .decomp = __VECS(deflate_decomp_tv_template) } } + }, { + .alg = "deflate-iaa", + .test = alg_test_comp, + .fips_allowed = 1, + .suite = { + .comp = { + .comp = __VECS(deflate_comp_tv_template), + .decomp = __VECS(deflate_decomp_tv_template) + } + } }, { .alg = "dh", .test = alg_test_kpp, diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 80f945cbec8a7cf12502400575a02a455594ed10..791f4b234e02052fb48f7f4919d42f1357ba09db 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -40,7 +40,8 @@ struct apd_private_data { const struct apd_device_desc *dev_desc; }; -#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) +#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +defined(CONFIG_ARM64) || defined(CONFIG_SW64) #define APD_ADDR(desc) ((unsigned long)&desc) static int acpi_apd_setup(struct apd_private_data *pdata) @@ -178,6 +179,18 @@ static const struct apd_device_desc hip08_spi_desc = { }; #endif /* CONFIG_ARM64 */ +#ifdef CONFIG_SW64 +static const struct apd_device_desc sunway_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; + +static const struct apd_device_desc sunway_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; +#endif + #endif /* @@ -246,6 +259,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, { "NXP0001", APD_ADDR(nxp_i2c_desc) }, +#endif +#ifdef CONFIG_SW64 + { "HISI02A1", APD_ADDR(sunway_i2c_desc) }, + { "HISI0173", APD_ADDR(sunway_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index c7c26872f4cec11a3be409204b25e54bd727b482..ad8d5d5e97ccc3931f26f815cf8f7b2325c435d9 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -773,6 +773,17 @@ void __weak arch_apei_report_mem_error(int sev, } EXPORT_SYMBOL_GPL(arch_apei_report_mem_error); +void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error); + +bool __weak arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) +{ + return false; +} +EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error); + int apei_osc_setup(void) { static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 67c2c3b959e1538a645d43bda3449378c0051ed8..448370641d1d5bb1348622b0912ce9b81006df43 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -131,3 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) int apei_osc_setup(void); #endif + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index ab2a82cb1b0b48ab21682bdb87c052707f19d282..6f8ee3041ee91fc9bb4f176136e1a29137d0bc82 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -483,9 +483,62 @@ static void ghes_kick_task_work(struct callback_head *head) gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); } +/* + * Tasks can handle task_work: + * + * - All user task: run task work before return to user. + */ +static bool should_add_task_work(struct task_struct *task) +{ + if (task->mm) + return true; + + return false; +} + +/** + * struct mce_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: fine tune action taken + * + * Structure to pass task work to be handled before + * ret_to_user via task_work_add(). + */ +struct mce_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) +{ + int rc; + struct mce_task_work *twcb = + container_of(twork, struct mce_task_work, twork); + + rc = memory_failure(twcb->pfn, twcb->flags); + kfree(twcb); + + if (!rc) + return; + /* + * -EHWPOISON from memory_failure() means that it already sent SIGBUS + * to the current process with the proper error info, so no need to + * send SIGBUS here again. + */ + if (rc == -EHWPOISON) + return; + + pr_err("Memory error not recovered"); + force_sig(SIGBUS); +} + static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; + struct mce_task_work *twcb; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; @@ -498,7 +551,20 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; } + if (flags == MF_ACTION_REQUIRED && should_add_task_work(current)) { + twcb = kmalloc(sizeof(*twcb), GFP_ATOMIC); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return false; + } + memory_failure_queue(pfn, flags); + return true; } @@ -673,6 +739,33 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +/* + * Check if the event is synchronous exception by Yitian DDR Raw data + * NOTE: only works for Yitian 710 now + */ +static bool is_sync_event(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + struct yitian_ddr_raw_data *data; + + if (!yitian_estatus_check_header(estatus)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + if (header->type != ERR_TYPE_DDR) + return false; + + data = (struct yitian_ddr_raw_data *)(header + 1); + /* 1 for synchronous exception */ + if (data->ex_type == 1) + return true; + + return false; +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -685,6 +778,10 @@ static bool ghes_do_proc(struct ghes *ghes, bool sync = is_hest_sync_notify(ghes); sev = ghes_severity(estatus->error_severity); +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + sync = is_sync_event(estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ apei_estatus_for_each_section(estatus, gdata) { sec_type = (guid_t *)gdata->section_type; sec_sev = ghes_severity(gdata->error_severity); @@ -703,6 +800,9 @@ static bool ghes_do_proc(struct ghes *ghes, queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sec_sev, pcie_err); ghes_handle_aer(gdata); } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { @@ -710,10 +810,13 @@ static bool ghes_do_proc(struct ghes *ghes, } else { void *err = acpi_hest_get_payload(gdata); - ghes_defer_non_standard_event(gdata, sev); - log_non_standard_event(sec_type, fru_id, fru_text, - sec_sev, err, - gdata->error_data_length); + if (!arch_apei_report_zdi_error(sec_type, + (struct cper_sec_proc_generic *)err)) { + ghes_defer_non_standard_event(gdata, sev); + log_non_standard_event(sec_type, fru_id, fru_text, + sec_sev, err, + gdata->error_data_length); + } } } @@ -1091,6 +1194,8 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, u32 len, node_len; u64 buf_paddr; int sev, rc; + struct acpi_hest_generic_data *gdata; + guid_t *sec_type; if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) return -EOPNOTSUPP; @@ -1126,6 +1231,23 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, sev = ghes_severity(estatus->error_severity); if (sev >= GHES_SEV_PANIC) { + apei_estatus_for_each_section(estatus, gdata) { + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); + + arch_apei_report_mem_error(sev, mem_err); + } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sev, pcie_err); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *zdi_err = + acpi_hest_get_payload(gdata); + + arch_apei_report_zdi_error(sec_type, zdi_err); + } + } ghes_print_queued_estatus(); __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); } diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index b3ed6212244c1e5405008355b7d0878252564251..f2fd79f22e7d836b07401eb055725b2632ba624c 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -21,3 +21,6 @@ config ACPI_AGDI config ACPI_APMT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 143debc1ba4a9d9dae6147c3c1dfc4408d05c5e9..a55d16c01c50827564233d1cedfbe812253ddd6f 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ARM_AMBA) += amba.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o obj-y += dma.o init.o diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 0000000000000000000000000000000000000000..153ef041abf0ca1e037b91dde21b86a62ba903ba --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ + +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include +#include +#include +#include +#include + +#include + +#include + +/* Flags for acpi_table_mpam_msc.*_interrupt_flags */ +#define ACPI_MPAM_MSC_IRQ_MODE_EDGE 1 +#define ACPI_MPAM_MSC_IRQ_TYPE_MASK (3<<1) +#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) +#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) + +int ddrc_freq; + +/* Use OEM info in MPAM ACPI table to distinguish different machine types */ +struct acpi_mpam_machine_oem_info { + enum mpam_machine_type type; + char signature[ACPI_NAMESEG_SIZE + 1]; + u8 revision; + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct acpi_mpam_machine_oem_info acpi_mpam_machines[MPAM_NUM_MACHINE_TYPES] = { + [MPAM_YITIAN710] = { + .signature = "YMPM", + .revision = 0, + .oem_id = "PTG ", + .oem_table_id = "PTG01 ", + .oem_revision = 0, + }, +}; + +static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, + int *irq, u32 processor_container_uid) +{ + int sense; + + if (!intid) + return false; + + /* 0 in this field indicates a wired interrupt */ + if (flags & ACPI_MPAM_MSC_IRQ_TYPE_MASK) + return false; + + if (flags & ACPI_MPAM_MSC_IRQ_MODE_EDGE) + sense = ACPI_EDGE_SENSITIVE; + else + sense = ACPI_LEVEL_SENSITIVE; + + /* + * If the GSI is in the GIC's PPI range, try and create a partitioned + * percpu interrupt. + */ + if (16 <= intid && intid < 32 && processor_container_uid != ~0) { + pr_err_once("Partitioned interrupts not supported\n"); + return false; + } else { + *irq = acpi_register_gsi(&pdev->dev, intid, sense, + ACPI_ACTIVE_HIGH); + } + if (*irq <= 0) { + pr_err_once("Failed to register interrupt 0x%x with ACPI\n", + intid); + return false; + } + + return true; +} + +static void acpi_mpam_parse_irqs(struct platform_device *pdev, + struct acpi_mpam_msc_node *tbl_msc, + struct resource *res, int *res_idx) +{ + u32 flags, aff = ~0; + int irq; + + flags = tbl_msc->overflow_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->overflow_interrupt_affinity; + if (frob_irq(pdev, tbl_msc->overflow_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "overflow"; + + (*res_idx)++; + } + + flags = tbl_msc->error_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->error_interrupt_affinity; + else + aff = ~0; + if (frob_irq(pdev, tbl_msc->error_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "error"; + + (*res_idx)++; + } +} + +static int acpi_mpam_parse_resource(struct mpam_msc *msc, + struct acpi_mpam_resource_node *res) +{ + u32 cache_id; + int level; + + switch (res->locator_type) { + case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: + cache_id = res->locator.cache_locator.cache_reference; + if (mpam_current_machine == MPAM_YITIAN710) { + /* + * YITIAN710's BIOS doesn't support find level from + * cache id. Since it only supports L3 cache, use a + * fixed value, 3. + */ + level = 3; + } else { + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, + level, cache_id); + case ACPI_MPAM_LOCATION_TYPE_MEMORY: + if (mpam_current_machine == MPAM_YITIAN710) + ddrc_freq = res->locator.memory_locator.reserved; + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, + 255, res->locator.memory_locator.proximity_domain); + default: + /* These get discovered later and treated as unknown */ + return 0; + } +} + +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + int i, err; + struct acpi_mpam_resource_node *resources; + + resources = (struct acpi_mpam_resource_node *)(tbl_msc + 1); + for (i = 0; i < tbl_msc->num_resouce_nodes; i++) { + err = acpi_mpam_parse_resource(msc, &resources[i]); + if (err) + return err; + } + + return 0; +} + +static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, + struct platform_device *pdev, + u32 *acpi_id) +{ + bool acpi_id_valid = false; + struct acpi_device *buddy; + char hid[16], uid[16]; + int err; + + memset(&hid, 0, sizeof(hid)); + memcpy(hid, &tbl_msc->hardware_id_linked_device, + sizeof(tbl_msc->hardware_id_linked_device)); + + if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { + *acpi_id = tbl_msc->instance_id_linked_device; + acpi_id_valid = true; + } + + err = snprintf(uid, sizeof(uid), "%u", + tbl_msc->instance_id_linked_device); + if (err < 0 || err >= sizeof(uid)) + return acpi_id_valid; + + buddy = acpi_dev_get_first_match_dev(hid, uid, -1); + if (buddy) { + device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); + } + + return acpi_id_valid; +} + +static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, + enum mpam_msc_iface *iface) +{ + switch (tbl_msc->interface_type){ + case 0: + *iface = MPAM_IFACE_MMIO; + return 0; + case 1: + *iface = MPAM_IFACE_PCC; + return 0; + default: + return -EINVAL; + } +} + +static int __init _parse_table(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct property_entry props[4]; /* needs a sentinel */ + struct acpi_mpam_msc_node *tbl_msc; + int next_res, next_prop, err = 0; + struct acpi_device *companion; + struct platform_device *pdev; + enum mpam_msc_iface iface; + struct resource res[3]; + char uid[16]; + u32 acpi_id; + int msc_num = 0; + + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_offset += tbl_msc->length; + + /* + * If any of the reserved fields are set, make no attempt to + * parse the msc structure. This will prevent the driver from + * probing all the MSC, meaning it can't discover the system + * wide supported partid and pmg ranges. This avoids whatever + * this MSC is truncating the partids and creating a screaming + * error interrupt. + */ + if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) + continue; + + if (decode_interface_type(tbl_msc, &iface)) + continue; + + next_res = 0; + next_prop = 0; + memset(res, 0, sizeof(res)); + memset(props, 0, sizeof(props)); + + /* + * Use an extra msc_num instead of msc->identifier, since MSC + * nodes with different types in MPAM ACPI table may have the + * same id value. + */ + pdev = platform_device_alloc("mpam_msc", msc_num++); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + break; + } + + if (tbl_msc->length < sizeof(*tbl_msc)) { + err = -EINVAL; + break; + } + + /* Some power management is described in the namespace: */ + err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); + if (err > 0 && err < sizeof(uid)) { + companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); + if (companion) + ACPI_COMPANION_SET(&pdev->dev, companion); + } + + if (iface == MPAM_IFACE_MMIO) { + res[next_res].name = "MPAM:MSC"; + res[next_res].start = tbl_msc->base_address; + res[next_res].end = tbl_msc->base_address + tbl_msc->mmio_size - 1; + res[next_res].flags = IORESOURCE_MEM; + next_res++; + } else if (iface == MPAM_IFACE_PCC) { + props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", + tbl_msc->base_address); + next_prop++; + } + + acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); + err = platform_device_add_resources(pdev, res, next_res); + if (err) + break; + + props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", + tbl_msc->max_nrdy_usec); + + /* + * The MSC's CPU affinity is described via its linked power + * management device, but only if it points at a Processor or + * Processor Container. + */ + if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) { + props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", + acpi_id); + } + + err = device_create_managed_software_node(&pdev->dev, props, + NULL); + if (err) + break; + + /* Come back later if you want the RIS too */ + err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); + if (err) + break; + + platform_device_add(pdev); + } + + if (err) + platform_device_put(pdev); + + return err; +} + +static struct acpi_table_header *get_table(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type mtype; + acpi_status status; + + if (acpi_disabled || !mpam_cpus_have_feature()) + return NULL; + + mtype = acpi_mpam_get_machine_type(); + + if (mtype != MPAM_DEFAULT_MACHINE) + status = acpi_get_table(acpi_mpam_machines[mtype].signature, 0, &table); + else + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + if (ACPI_FAILURE(status)) + return NULL; + + if (mtype == MPAM_DEFAULT_MACHINE && table->revision != 1) + return NULL; + + /* + * Kunpeng's MPAM ACPI adopts an older version of MPAM ACPI, so + * this MPAM ACPI driver is not suitable for Kunpeng platform. + * Skip it. + */ + if (!strncmp(table->oem_id, "HISI", 4)) { + acpi_put_table(table); + return NULL; + } + + return table; +} + + + +static int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam; + int err; + + mpam = get_table(); + if (!mpam) + return 0; + + err = _parse_table(mpam); + acpi_put_table(mpam); + + return err; +} + +static int _count_msc(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct acpi_mpam_msc_node *tbl_msc; + int ret = 0; + + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + if (tbl_msc->length < sizeof(*tbl_msc)) + return -EINVAL; + + ret++; + + table_offset += tbl_msc->length; + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + } + + return ret; +} + + +int acpi_mpam_count_msc(void) +{ + struct acpi_table_header *mpam; + int ret; + + mpam = get_table(); + if (!mpam) + return 0; + + ret = _count_msc(mpam); + acpi_put_table(mpam); + + return ret; +} + +enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type ret; + acpi_status status; + int i; + + ret = MPAM_DEFAULT_MACHINE; + + for (i = MPAM_DEFAULT_MACHINE + 1; i < MPAM_NUM_MACHINE_TYPES; i++) { + status = acpi_get_table(acpi_mpam_machines[i].signature, 0, &table); + if (ACPI_FAILURE(status)) + continue; + + if (!memcmp(acpi_mpam_machines[i].oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(acpi_mpam_machines[i].oem_table_id, table->oem_table_id, + ACPI_OEM_TABLE_ID_SIZE) && + acpi_mpam_machines[i].oem_revision == table->oem_revision) { + ret = i; + } + + acpi_put_table(table); + } + + return ret; +} + +/* + * Call after ACPI devices have been created, which happens behind acpi_scan_init() + * called from subsys_initcall(). PCC requires the mailbox driver, which is + * initialised from postcore_initcall(). + */ +subsys_initcall_sync(acpi_mpam_parse); diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig index 39b1f34c21dfd7a86ddc360857897a94dc0d2046..67d1f40bfa9f4350e9c902bbc4fba983b969275a 100644 --- a/drivers/acpi/numa/Kconfig +++ b/drivers/acpi/numa/Kconfig @@ -2,7 +2,7 @@ config ACPI_NUMA bool "NUMA support" depends on NUMA - depends on (X86 || IA64 || ARM64 || LOONGARCH) + depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) default y if IA64 || ARM64 config ACPI_HMAT diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index a44c0761fd1c06ac72875f19fd1877ddd63d3f24..8ed90017a56d871dfeb291b80063f6a76d6e89b4 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -211,7 +211,7 @@ __weak int __init numa_fill_memblks(u64 start, u64 end) return NUMA_NO_MEMBLK; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 860014b89b8ebe80fcfecb6107b56423f15770c0..98bbb01981c5a172d578c31c80131d474a883fd5 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -181,7 +181,46 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("LOONGSON", 0), LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), + LOONGSON_ECAM_MCFG("\0", 2), + LOONGSON_ECAM_MCFG("LOONGSON", 2), + LOONGSON_ECAM_MCFG("\0", 3), + LOONGSON_ECAM_MCFG("LOONGSON", 3), + LOONGSON_ECAM_MCFG("\0", 4), + LOONGSON_ECAM_MCFG("LOONGSON", 4), + LOONGSON_ECAM_MCFG("\0", 5), + LOONGSON_ECAM_MCFG("LOONGSON", 5), + LOONGSON_ECAM_MCFG("\0", 6), + LOONGSON_ECAM_MCFG("LOONGSON", 6), + LOONGSON_ECAM_MCFG("\0", 7), + LOONGSON_ECAM_MCFG("LOONGSON", 7), + #endif /* LOONGARCH */ + +#ifdef CONFIG_SW64 +#define _SW64_ECAM_QUIRK(rev, seg) \ + { "SUNWAY", "SUNWAY. ", rev, seg, MCFG_BUS_ANY, &sw64_pci_ecam_ops } +#define SW64_ECAM_QUIRK(rev, node) _SW64_ECAM_QUIRK(rev, node * 8 + 0),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 1),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 2),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 3),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 4),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 5),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 6),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 7) + + /** + * According to the address space of sw64, up to 8 nodes supported + * with a maximum of 8 pcie controllers per node + */ + SW64_ECAM_QUIRK(1, 0x00), + SW64_ECAM_QUIRK(1, 0x01), + SW64_ECAM_QUIRK(1, 0x02), + SW64_ECAM_QUIRK(1, 0x03), + SW64_ECAM_QUIRK(1, 0x04), + SW64_ECAM_QUIRK(1, 0x05), + SW64_ECAM_QUIRK(1, 0x06), + SW64_ECAM_QUIRK(1, 0x07), +#endif /* SW64 */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index a35dd0e41c27043bc0cb6f8783c1bc0280cb1155..56b53d45dadde28a8f4e336d1bb7adb95a741f04 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -21,6 +21,8 @@ #include #include +typedef int (*acpi_pptt_cpu_callback_t)(struct acpi_pptt_processor *, void *); + static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, u32 pptt_ref) { @@ -181,9 +183,10 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for - * @levels: Number of levels if success. + * @levels: Number of levels if success. (*levels) should be initialized by + * the caller with the value to be used as the starting level. * @split_levels: Number of split cache levels (data/instruction) if - * success. Can by NULL. + * success. Can be NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit @@ -293,6 +296,177 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + +/* parent_node points into the table, but the table isn't provided. */ +static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, + cpumask_t *cpus) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + acpi_status status; + u32 acpi_id; + int cpu; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return; + + for_each_possible_cpu(cpu) { + acpi_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table_hdr, acpi_id); + + while (cpu_node) { + if (cpu_node == parent_node) { + cpumask_set_cpu(cpu, cpus); + break; + } + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + } + + acpi_put_table(table_hdr); + + return; +} + +/** + * acpi_pptt_for_each_container() - Iterate over all processor containers + * + * Not all 'Processor' entries in the PPTT are either a CPU or a Processor + * Container, they may exist purely to describe a Private resource. CPUs + * have to be leaves, so a Processor Container is a non-leaf that has the + * 'ACPI Processor ID valid' flag set. + * + * Return: 0 for a complete walk, or the first non-zero value from the callback + * that stopped the walk. + */ +int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + struct acpi_subtable_header *entry; + bool leaf_flag, has_leaf_flag = false; + unsigned long table_end; + acpi_status status; + u32 proc_sz; + int ret = 0; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return 0; + + if (table_hdr->revision > 1) + has_leaf_flag = true; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) + { + leaf_flag = cpu_node->flags & ACPI_PPTT_ACPI_LEAF_NODE; + if ((has_leaf_flag && !leaf_flag) || + (!has_leaf_flag && !acpi_pptt_leaf_node(table_hdr, cpu_node))) + { + ret = callback(cpu_node, arg); + if (ret) + break; + } + } + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + acpi_put_table(table_hdr); + + return ret; +} + +struct __cpus_from_container_arg { + u32 acpi_cpu_id; + cpumask_t *cpus; +}; + +static int __cpus_from_container(struct acpi_pptt_processor *container, void *arg) +{ + struct __cpus_from_container_arg *params = arg; + + if (container->acpi_processor_id == params->acpi_cpu_id) + acpi_pptt_get_child_cpus(container, params->cpus); + + return 0; +} + +/** + * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a + * processor containers + * + * Find the specified Processor Container, and fill cpus with all the cpus + * below it. + * + * Return: 0 for a complete walk, or an error if the mask is incomplete. + */ +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus) +{ + struct __cpus_from_container_arg params; + + params.acpi_cpu_id = acpi_cpu_id; + params.cpus = cpus; + + cpumask_clear(cpus); + return acpi_pptt_for_each_container(&__cpus_from_container, ¶ms); +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { @@ -812,3 +986,215 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu) return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, ACPI_PPTT_ACPI_IDENTICAL); } + + +/** + * find_acpi_cache_level_from_id() - Get the level of the specified cache + * @cache_id: The id field of the unified cache + * + * Determine the level relative to any CPU for the unified cache identified by + * cache_id. This allows the property to be found even if the CPUs are offline. + * + * The returned level can be used to group unified caches that are peers. + * + * The PPTT table must be rev 3 or later, + * + * If one CPUs L2 is shared with another as L3, this function will return + * and unpredictable value. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns a value which represents the level of the specified cache. + */ +int find_acpi_cache_level_from_id(u32 cache_id) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each CPU + * to find the level... + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + acpi_put_table(table); + return level; + } + } + } + + acpi_put_table(table); + return -ENOENT; +} + +/** + * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the + * specified cache + * @cache_id: The id field of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each cpu. + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + cpumask_set_cpu(cpu, cpus); + } + } + } + + acpi_put_table(table); + return 0; +} + +/** + * acpi_pptt_get_cpumask_from_cache_id_and_level() - Get the cpus associated with the + * cache specified by id and level + * @cache_id: The id field of the unified cache + * @cache_level: The level of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int cpu; + struct acpi_table_header *table; + struct acpi_pptt_cache *cache_node; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + /* + * FIXME: Since this function does not actually use the cache id in the + * PPTT table, we downgrade the revision requirement. + */ + if (table->revision < 2) { + acpi_put_table(table); + return -ENOENT; + } + + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + cache_node = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + cache_level, &cpu_node); + + if (!cache_node) + continue; + + cpu_node = acpi_pptt_find_cache_backwards(table, cache_node); + if (cpu_node->acpi_processor_id == cache_id) + cpumask_set_cpu(cpu, cpus); + } + + acpi_put_table(table); + return 0; +} diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8ab0a82b4da41d2aad9f975db596e6d0a9375e20..94cb47d740c9ce3fd3bccff8a1134a1370b4d472 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -566,7 +566,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = { ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI, - ACPI_SIG_NBFT }; + ACPI_SIG_NBFT, ACPI_SIG_MPAM }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 42b51c9812a0ebab52d2dfd4cb94e563c8312f17..423b4194f06bdfa3e1d24dd3f7c959b3977f26e6 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -115,6 +115,16 @@ config SATA_AHCI If unsure, say N. +config AHCI_ZHAOXIN_SGPIO + tristate "zhaoxin AHCI SGPIO support" + depends on SATA_AHCI + default y + help + This option enables support for Zhaoxin AHCI SGPIO. + Add support SGPIO mode and SGPIO GP mode. + + If unsure, say N. + config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy for low power chipsets" range 0 4 @@ -553,6 +563,15 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "ZhaoXin SATA support" + depends on PCI + select SATA_HOST + help + This option enables support for ZhaoXin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 20e6645ab737183d92fec21117286c5e354c272f..ee2cb6367b661b9d0f398ca9732572a108ad069e 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_QORIQ) += ahci_qoriq.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_ZHAOXIN_SGPIO) += ahci_zhaoxin_sgpio.o # SFF w/ custom DMA obj-$(CONFIG_PDC_ADMA) += pdc_adma.o @@ -45,6 +46,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/ahci_zhaoxin_sgpio.c b/drivers/ata/ahci_zhaoxin_sgpio.c new file mode 100644 index 0000000000000000000000000000000000000000..ad0715bc389e6770c903674a0f6a891261aadb9c --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ahci_zhaoxin_sgpio.c - Driver for Zhaoxin sgpio + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ahci.h" +#include "libata.h" +#include "ahci_zhaoxin_sgpio.h" + +static LIST_HEAD(sgpio_zhaoxin_list); + +static unsigned int zhaoxin_em_type __read_mostly = AHCI_EM_MSG_LED_MODE; /*LED protocol*/ +module_param(zhaoxin_em_type, int, 0644); +MODULE_PARM_DESC(zhaoxin_em_type, + "AHCI Enclosure Management Message type control (1 = led on, 2 = sgpio on,3 = sgpio gp on)"); + +int ahci_wait_em_reset(struct sgpio_zhaoxin *sgpio_zhaoxin, u32 retry) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_ctl; + + if (!sgpio_zhaoxin || retry == 0) { + pr_err("In ahci wait em reset, invalid param\n"); + return -EINVAL; + } + + while (retry--) { /*EM_CTL needs reset at least 64ms*/ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_RST) + usleep_range(10000, 20000); /*EM_CTL still in reset, usleep 10ms*/ + else + break; + + if (!retry) + pr_err("Wait for EM_CTL reset, time out\n"); + } + + return 0; +} + +void ahci_zhaoxin_set_em_sgpio(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + + u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_a = 0x7; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_b = 0x3; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_c = 0x0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_on = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_off = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.max_act_on = 2; + sgpio_zhaoxin->sgpio_reg.cfg_1.force_act_off = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0x0; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0x07070707; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read = read | SGPIO_MESSAGE_HEAD; /*LED register MSG_HEAD, select SGPIO*/ + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Initial SGPIO CFG1*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_1.sgpio_cfg_1, em_mmio + 0x4); + + /*Initial SGPIO CFG0*/ + read = readl(em_mmio); + read |= sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0; + writel(read, em_mmio); +} + +void ahci_zhaoxin_set_em_sgpio_gpmode(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0xff; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0xff0f0000; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read |= SGPIO_MESSAGE_HEAD; + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Enable SGPIO*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0, em_mmio); +} + +static ssize_t ahci_em_type_sys_show(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf) +{ + return sprintf(buf, "0x%x\n", zhaoxin_em_type); +} +static ssize_t ahci_em_type_sys_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + int code = 0; + int rc = 0; + + if (kstrtouint(buf, 0, &code)) + return count; + + if (code == AHCI_EM_MSG_LED_MODE) { + zhaoxin_em_type = code; + } else if (code == AHCI_EM_MSG_SGPIO_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + } else if (code == AHCI_EM_MSG_SGPIO_GP_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + } else + pr_err("Incorrect value:1 = LED on, 2 = SGPIO normal on, 3 = SGPIO GP on)\n"); + + return count; +} + +static ssize_t ahci_transmit_sgpio_message(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, u16 state, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + switch (port_num) { + case 0: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x22); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_error = state & 0x7; + break; + case 1: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x20); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_error = state & 0x7; + break; + case 2: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x26); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_error = state & 0x7; + break; + case 3: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x24); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_error = state & 0x7; + break; + default: + pr_err("Unsupported port number in this controller\n"); + break; + } + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + + return size; +} + +static ssize_t ahci_transmit_sgpio_indicator(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u16 state; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_MODE)) { + pr_err("Current setting not SGPIO normal mode, quit\n"); + return -EINVAL; + } + + switch (port_num) { + case 0: + state = readw(em_mmio + 0x22); + break; + case 1: + state = readw(em_mmio + 0x20); + break; + case 2: + state = readw(em_mmio + 0x26); + break; + case 3: + state = readw(em_mmio + 0x24); + break; + default: + return -EINVAL; + } + + if (type == SGPIO_ACTIVITY) { + state &= 0xfc3f; + state |= (indicator_code&0xf) << 6; + } else if (type == SGPIO_LOCATE) { + state &= 0xffc7; + state |= (indicator_code&0x7) << 3; + } else if (type == SGPIO_ERROR) { + state &= 0xfff8; + state |= indicator_code & 0x7; + } else { + return -EINVAL; + } + + return ahci_transmit_sgpio_message(port_num, sgpio_zhaoxin, state, size); +} + +static ssize_t ahci_transmit_sgpio_indicator_gp(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + union SGPIO_TX_GP state; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_GP_MODE)) { + pr_err("Current setting not SGPIO_GP mode, quit\n"); + return -EINVAL; + } + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + state.sgpio_tx_gp = readl(em_mmio + 0x3c); + switch (port_num) { + case 0: + if (type == SGPIO_ACTIVITY) + state.D00 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D01 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D02 = indicator_code & 0x1; + break; + case 1: + if (type == SGPIO_ACTIVITY) + state.D10 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D11 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D12 = indicator_code & 0x1; + break; + case 2: + if (type == SGPIO_ACTIVITY) + state.D20 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D21 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D22 = indicator_code & 0x1; + break; + case 3: + if (type == SGPIO_ACTIVITY) + state.D30 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D31 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D32 = indicator_code & 0x1; + break; + default: + return -EINVAL; + } + + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writel(state.sgpio_tx_gp, em_mmio + 0x3c); + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = state.sgpio_tx_gp; + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + return size; +} + +static ssize_t sgpio_activity_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_DISABLE, SGPIO_ACTIVITY, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_ENABLE, SGPIO_ACTIVITY, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FON, SGPIO_ACTIVITY, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_EOF, SGPIO_ACTIVITY, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_SOF, SGPIO_ACTIVITY, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FON, SGPIO_ACTIVITY, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x8: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FON, SGPIO_ACTIVITY, 1); + break; + case 0x9: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ACTIVITY, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ACTIVITY, 1); + break; + default: + pr_err("Unsupported command for activity indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static ssize_t sgpio_locate_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_LOCATE, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_LOCATE, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_LOCATE, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_LOCATE, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_LOCATE, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_LOCATE, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_LOCATE, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_LOCATE, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_LOCATE, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, GP_ON, + SGPIO_LOCATE, 1); + break; + default: + pr_err("Unsupported command for locate indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + return -EINVAL; +} + +static ssize_t sgpio_error_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_ERROR, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_ERROR, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_ERROR, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_ERROR, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_ERROR, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_ERROR, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_ERROR, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_ERROR, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ERROR, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ERROR, 1); + break; + default: + pr_err("Unsupport command for error indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static struct sgpio_zhaoxin_sysfs_attr dev_attr_ahci_em_type_sys = + __ATTR(ahci_em_type_sys, 0644, ahci_em_type_sys_show, + ahci_em_type_sys_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_activity = + __ATTR(sgpio_activity, 0200, NULL, sgpio_activity_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_locate = + __ATTR(sgpio_locate, 0200, NULL, sgpio_locate_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_error = + __ATTR(sgpio_error, 0200, NULL, sgpio_error_store); + +struct attribute *sgpio_attrs[] = { + &dev_attr_ahci_em_type_sys.attr, + &dev_attr_sgpio_activity.attr, + &dev_attr_sgpio_locate.attr, + &dev_attr_sgpio_error.attr, + NULL +}; + +static const struct attribute_group sgpio_attrs_group = { + .attrs = sgpio_attrs +}; +const struct attribute_group *sgpio_groups[] = { + &sgpio_attrs_group, + NULL +}; + +static ssize_t sgpio_zhaoxin_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->show) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->show(sgpio_zhaoxin, buf); +} + +static ssize_t sgpio_zhaoxin_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->store) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->store(sgpio_zhaoxin, buf, len); +} + +const struct sysfs_ops sgpio_zhaoxin_sysfs_ops = { + .show = sgpio_zhaoxin_attr_show, + .store = sgpio_zhaoxin_attr_store, +}; + +const struct kobj_type sgpio_zhaoxin_ktype = { + .sysfs_ops = &sgpio_zhaoxin_sysfs_ops, + .default_groups = sgpio_groups, +}; + +void set_em_messages(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + u8 messages; + + if (!get_ahci_em_messages()) + return; + + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + + if (messages) { + /* store em_loc */ + sgpio_zhaoxin->em_loc = ((em_loc >> 16) * 4); + sgpio_zhaoxin->em_buf_sz = ((em_loc & 0xff) * 4); + sgpio_zhaoxin->em_msg_type = messages; + } +} + +int add_sgpio_zhaoxin(void) +{ + struct pci_dev *pdev_cur = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, NULL); + struct pci_dev *pdev_next = pdev_cur; + struct sgpio_zhaoxin *sgpio_zhaoxin; + int ret = 0; + + if (!get_ahci_em_messages()) + return 0; + + while (pdev_next) { + pdev_next = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, pdev_cur); + + WARN_ON(MAX_TEST_RESULT_LEN <= 0); + + sgpio_zhaoxin = (struct sgpio_zhaoxin *)get_zeroed_page(GFP_KERNEL); + if (!sgpio_zhaoxin) + return -ENOMEM; + + list_add(&sgpio_zhaoxin->list, &sgpio_zhaoxin_list); + ret = kobject_init_and_add(&sgpio_zhaoxin->kobj, &sgpio_zhaoxin_ktype, + &(&pdev_cur->dev)->kobj, "zx_sgpio"); + if (ret) { + kobject_put(&sgpio_zhaoxin->kobj); + return -1; + } + + kobject_uevent(&sgpio_zhaoxin->kobj, KOBJ_ADD); + spin_lock_init(&sgpio_zhaoxin->wr_lock); + sgpio_zhaoxin->kobj_valid = 1; + sgpio_zhaoxin->mmio = pcim_iomap_table(pdev_cur)[5]; + set_em_messages(sgpio_zhaoxin); + ret = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (ret < 0) { + pr_err("ahci wait em reset failed!\n"); + return ret; + } + + sgpio_zhaoxin->kobj_valid = 1; + + if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_GP_MODE) + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + else if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_MODE) + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + + pdev_cur = pdev_next; + } + + return 0; +} + + +void remove_sgpio_zhaoxin(void) +{ + struct sgpio_zhaoxin *cur = NULL, *next = NULL; + + if (!get_ahci_em_messages()) + return; + + list_for_each_entry_safe(cur, next, &sgpio_zhaoxin_list, list) { + list_del(&cur->list); + if (cur->kobj_valid) + kobject_put(&cur->kobj); + + free_page((unsigned long)cur); + if (!next) + break; + } +} + +static int __init zhaoxin_sgpio_init(void) +{ + return add_sgpio_zhaoxin(); +} + +static void __exit zhaoxin_sgpio_exit(void) +{ + remove_sgpio_zhaoxin(); +} + +late_initcall(zhaoxin_sgpio_init); +module_exit(zhaoxin_sgpio_exit); + +MODULE_DESCRIPTION("Zhaoxin SGPIO driver"); +MODULE_AUTHOR("XanderChen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/ahci_zhaoxin_sgpio.h b/drivers/ata/ahci_zhaoxin_sgpio.h new file mode 100644 index 0000000000000000000000000000000000000000..b9fd7c6656022df09514ba5411741b6086a105a9 --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ACHI_ZHAOXIN_SGPIO_H +#define _ACHI_ZHAOXIN_SGPIO_H + +#define SGPIO_OFFSET 0x580 + +#define SGPIO_MESSAGE_HEAD 0x3000000 + +#define ACTIVITY_DISABLE 0x0 +#define ACTIVITY_ENABLE 0x1 +#define ACTIVITY_GA_FON 0x2 +#define ACTIVITY_GA_FOFF 0x3 +#define ACTIVITY_BRIEF_EN_EOF 0x4 +#define ACTIVITY_BRIEF_EN_SOF 0x5 +#define ACTIVITY_GB_FON 0x6 +#define ACTIVITY_GB_FOFF 0x7 +#define ACTIVITY_GC_FON 0x8 +#define ACTIVITY_GC_FOFF 0x9 +#define LOCATE_ERROR_DISABLE 0x0 +#define LOCATE_ERROR_ENABLE 0x1 +#define LOCATE_ERROR_GA_FON 0x2 +#define LOCATE_ERROR_GA_FOFF 0x3 +#define LOCATE_ERROR_GB_FON 0x4 +#define LOCATE_ERROR_GB_FOFF 0x5 +#define LOCATE_ERROR_GC_FON 0x6 +#define LOCATE_ERROR_GC_FOFF 0x7 + +#define GP_OFF 0x10 +#define GP_ON 0x11 + +#define to_sgpio_attr(x) container_of(x, struct sgpio_zhaoxin_sysfs_attr, attr) +#define to_sgpio_obj(x) container_of(x, struct sgpio_zhaoxin, kobj) +#define MAX_TEST_RESULT_LEN (PAGE_SIZE - sizeof(struct sgpio_zhaoxin) - 8) + +//SGPIO module parameter: 0-off, 1-LED, 2-SGPIO, 3-SGPIO_GP +enum ahci_em_msg_modes { + AHCI_EM_MSG_OFF = 0, + AHCI_EM_MSG_LED_MODE, + AHCI_EM_MSG_SGPIO_MODE, + AHCI_EM_MSG_SGPIO_GP_MODE, + AHCI_EM_MSG_NULL, +}; + +enum SGPIO_INDICATOR { + SGPIO_ACTIVITY, + SGPIO_LOCATE, + SGPIO_ERROR +}; + +enum SGPIO_CFG1 { + STRETCH_ACTIVITY_OFF, + STRETCH_ACTIVITY_ON, + FORCE_ACTIVITY_OFF, + MAXIMUM_ACTIVITY_ON, + BLINK_GENERATIOR_RATE_B, + BLINK_GENERATIOR_RATE_A, + BLINK_GENERATIOR_RATE_C +}; + +union SGPIO_CFG_0 { + struct { + u32 reserved0 :8; + u32 version :4; + u32 reserved1 :4; + u32 gp_register_count :4; + u32 cfg_register_count :3; + u32 enable :1; + u32 supported_drive_count :8; + }; + u32 sgpio_cfg_0; +}; + +union SGPIO_CFG_1 { + struct { + u32 reserved0 :4; + u32 blink_gen_c :4; + u32 blink_gen_a :4; + u32 blink_gen_b :4; + u32 max_act_on :4; + u32 force_act_off :4; + u32 stretch_act_on :4; + u32 stretch_act_off :4; + }; + u32 sgpio_cfg_1; +}; + +union SGPIO_RX { + struct { + u32 drive_3_input :3; + u32 reserved3 :5; + u32 drive_2_input :3; + u32 reserved2 :5; + u32 drive_1_input :3; + u32 reserved1 :5; + u32 drive_0_input :3; + u32 reserved0 :5; + }; + u32 sgpio_rx; +}; + +union SGPIO_RX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 reserved1 :8; + }; + u32 sgpio_rx_gp_cfg; +}; +union SGPIO_RX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_rx_gp; +}; + +union SGPIO_TX_0 { + struct { + u32 drive_1_error :3; + u32 drive_1_locate :3; + u32 drive_1_active :4; + u32 reserved1 :6; + u32 drive_0_error :3; + u32 drive_0_locate :3; + u32 drive_0_active :4; + u32 reserved0 :6; + }; + u32 sgpio_tx_0; +}; + +union SGPIO_TX_1 { + struct { + u32 drive_3_error :3; + u32 drive_3_locate :3; + u32 drive_3_active :4; + u32 reserved3 :6; + u32 drive_2_error :3; + u32 drive_2_locate :3; + u32 drive_2_active :4; + u32 reserved2 :6; + }; + u32 sgpio_tx_1; +}; + +union SGPIO_TX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 sload :4; + u32 reserved1 :4; + }; + u32 sgpio_tx_gp_cfg; +}; + +union SGPIO_TX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_tx_gp; +}; + +struct AHCI_SGPIO_REG { + union SGPIO_CFG_0 cfg_0; + union SGPIO_CFG_1 cfg_1; + union SGPIO_RX receive_reg; + union SGPIO_RX_GP_CFG gp_receive_cfg; + union SGPIO_RX_GP gp_receive_reg; + union SGPIO_TX_0 transmit_0; + union SGPIO_TX_1 transmit_1; + union SGPIO_TX_GP_CFG gp_transmit_cfg; + union SGPIO_TX_GP gp_transmit_reg; +}; + +struct sgpio_zhaoxin { + struct kobject kobj; + struct list_head list; + unsigned int kobj_valid; + unsigned int index; + u32 em_loc; /* enclosure management location */ + u32 em_buf_sz; /* EM buffer size in byte */ + u32 em_msg_type; /* EM message type */ + void __iomem *mmio; + spinlock_t wr_lock; /* protects sgpio register */ + struct AHCI_SGPIO_REG sgpio_reg; /* saved sgpio register */ +}; + +struct sgpio_zhaoxin_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf); + ssize_t (*store)(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count); +}; + +int get_ahci_em_messages(void); + +#endif /* _ACHI_ZHAOXIN_SGPIO_H */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index f1263364fa97fa5f37f878f5a16c376290fb9e18..6524c5a02648cdb694d19e9aa8410ec9b6d0beb6 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -207,6 +207,12 @@ static int devslp_idle_timeout __read_mostly = 1000; module_param(devslp_idle_timeout, int, 0644); MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout"); +int get_ahci_em_messages(void) +{ + return ahci_em_messages; +} +EXPORT_SYMBOL_GPL(get_ahci_em_messages); + static void ahci_enable_ahci(void __iomem *mmio) { int i; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9cc02252218497292ea7e12e422e72cec859e414..dca0e73300daad3ce5bf860e9f3a8025ec1e6849 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3383,6 +3383,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -3391,6 +3393,11 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags*/ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(~ap->host->flags & (ATA_HOST_NO_PART | ATA_HOST_NO_SSC | ATA_HOST_NO_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if (!IS_ENABLED(CONFIG_SATA_HOST) || (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..53c3e2ab60956b85ad7cb42d2db4ea3bfb5ef497 --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +#define PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL 9002 +#define PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL 9003 + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL), zx100s }, + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL), zx100s }, + + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + + +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", + rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + + return 0; + + default: + return -EINVAL; + } +} + + +/** + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL || + pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, + "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", + i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index 5b59d133b6af4f514caa241b07d898957ac86489..9cb049bd375bc7932022d7450d0cd666ea37bf8d 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -141,6 +141,22 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid) } #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +static size_t pcpu_mod_reserved = PERCPU_MODULE_RESERVE; + +#ifdef CONFIG_ARM64 +static __init int set_reserve_pcpu(char *str) +{ + if (!str) + return -EINVAL; + + pcpu_mod_reserved = (size_t)memparse(str, NULL); + pr_notice("Reserve module percpu memory to %zuKB\n", + pcpu_mod_reserved >> 10); + return 0; +} +early_param("pcpu_mod_reserve", set_reserve_pcpu); +#endif + unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -165,7 +181,7 @@ void __init setup_per_cpu_areas(void) * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + rc = pcpu_embed_first_chunk(pcpu_mod_reserved, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, pcpu_cpu_distance, early_cpu_to_node); @@ -178,7 +194,7 @@ void __init setup_per_cpu_areas(void) #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK if (rc < 0) - rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node); + rc = pcpu_page_first_chunk(pcpu_mod_reserved, early_cpu_to_node); #endif if (rc < 0) panic("Failed to initialize percpu areas (err=%d).", rc); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index b5715b8ded89744126389f483b0a6d3da0161ef9..b864f8fbdaa154caed8267416eabb497d4150798 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,6 +183,38 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +unsigned long cache_of_get_id(struct device_node *np) +{ + struct device_node *cpu; + unsigned long min_id = ~0UL; + + for_each_of_cpu_node(cpu) { + struct device_node *cache_node = cpu; + u64 id = of_get_cpu_hwid(cache_node, 0); + + while ((cache_node = of_find_next_cache_node(cache_node))) { + if ((cache_node == np) && (id < min_id)) { + min_id = id; + of_node_put(cache_node); + break; + } + of_node_put(cache_node); + } + } + + return min_id; +} + +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + unsigned long id = cache_of_get_id(np); + + if (id != ~0UL) { + this_leaf->id = id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +230,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) @@ -622,13 +655,19 @@ static ssize_t file_name##_show(struct device *dev, \ return sysfs_emit(buf, "%u\n", this_leaf->object); \ } -show_one(id, id); show_one(level, level); show_one(coherency_line_size, coherency_line_size); show_one(number_of_sets, number_of_sets); show_one(physical_line_partition, physical_line_partition); show_one(ways_of_associativity, ways_of_associativity); +static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", this_leaf->id); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index ef427ee787a99bce2a66e7cb93d1d13046374fcf..d186a8b17a4c221ac3b790fdfe6546e128652f86 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "base.h" @@ -216,8 +218,28 @@ static ssize_t show_cpus_attr(struct device *dev, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); + struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; + bool rich_container; - return cpumap_print_to_pagebuf(true, buf, ca->map); + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + + if (rich_container && !strcmp(attr->attr.name, "online")) { + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + + rich_container_get_cpus(scenario, &cpuset_allowed); + + put_task_struct(scenario); + } + else + cpumask_copy(&cpuset_allowed, ca->map); + + return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } #define _CPU_ATTR(name, map) \ diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a1474fb67db9b2f09812b293127730efc6655243..525574c312d3aeb7d063341487699ba91e81738d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "power.h" /* @@ -108,7 +109,19 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { + struct pci_dev *pdev = (!dev || !dev_is_pci(dev)) ? NULL : to_pci_dev(dev); + device_lock(dev); + + /* Zhaoxin sata controller may occur error when resume from runtime pm, so disable it */ + if (pdev && + pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && + pdev->device == 0x9083 && + pdev->revision <= 0x20) { + device_unlock(dev); + return -EPERM; + } + if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index d816d1512531e4a59cf2686f067c1624b45962ad..d69b96d01316a2fec3d8323143c32baf30704baa 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -44,6 +44,9 @@ struct brd_device { */ struct xarray brd_pages; u64 brd_nr_pages; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -240,6 +243,25 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, return err; } +static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) +{ + sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + struct page *page; + + size -= (aligned_sector - sector) * SECTOR_SIZE; + xa_lock(&brd->brd_pages); + while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); + if (page) { + __free_page(page); + brd->brd_nr_pages--; + } + aligned_sector += PAGE_SECTORS; + size -= PAGE_SIZE; + } + xa_unlock(&brd->brd_pages); +} + static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; @@ -247,6 +269,12 @@ static void brd_submit_bio(struct bio *bio) struct bio_vec bvec; struct bvec_iter iter; + if (unlikely(op_is_discard(bio->bi_opf))) { + brd_do_discard(brd, sector, bio->bi_iter.bi_size); + bio_endio(bio); + return; + } + bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; int err; @@ -373,7 +401,7 @@ static int brd_alloc(int i) disk->private_data = brd; strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - + /* * This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN @@ -382,6 +410,9 @@ static int brd_alloc(int i) * is harmless) */ blk_queue_physical_block_size(disk->queue, PAGE_SIZE); + blk_queue_max_discard_sectors(disk->queue, UINT_MAX); + disk->queue->limits.discard_granularity = PAGE_SIZE; + blk_queue_max_discard_segments(disk->queue, 1); /* Tell the block layer that this is not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 625af75833fc37047bbb93228dbf7131404a64a5..8a5e272bdc7851ba28bd68010c03c7fe7efc8824 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -391,6 +391,24 @@ config UV_MMTIMER The uv_mmtimer device allows direct userspace access to the UV system timer. +config LOONGSON_SE + tristate "LOONGSON SECURITY MODULE Interface" + depends on LOONGARCH + default m + help + If you have LOONGSON security module (SE) support say Yes and it + will be accessible from within Linux. To compile this driver + as a module, choose M here; the module will be called loongson-se. + +config LOONGSON_SE_SDF + tristate "LOONGSON SECURITY MODULE SDF Interface" + depends on LOONGARCH && LOONGSON_SE + default m + help + If you want to use LOONGSON security module (SE) as SDF say Yes + and it will be accessible from within Linux. To compile this driver + as a module, choose M here; + source "drivers/char/tpm/Kconfig" config TELCLOCK diff --git a/drivers/char/Makefile b/drivers/char/Makefile index c5f532e412f1a4b93100ad51e5662563d7f3ab25..109af71c54169d206a4d4665b697535f8351a091 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -32,6 +32,8 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o obj-$(CONFIG_TELCLOCK) += tlclk.o +obj-$(CONFIG_LOONGSON_SE) += loongson_se.o +obj-$(CONFIG_LOONGSON_SE_SDF) += lsse_sdf_cdev.o obj-$(CONFIG_MWAVE) += mwave/ obj-y += agp/ diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8de74dcfa18cf53bf9ad623264bd3d3d83669fe4..7c486989dd04d58abb0055528c375fc7d5299727 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -152,6 +152,19 @@ config HW_RANDOM_VIA If unsure, say Y. +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on ARCH_IXP4XX || COMPILE_TEST diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc57a26250d117a70820b328af62ac..ef5b3ae0794dd87089709a5b9548fc427ebfd253 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a9a0a3b09c8bdddda0b029b56147a435ed3a07bf..4288e1114fc96e2a02fae47484ac127f4b0ae683 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -35,7 +35,7 @@ #include #include - +static struct x86_cpu_id via_rng_cpu_id[]; enum { @@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -196,7 +196,7 @@ static int __init via_rng_mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_cpu_id)) return -ENODEV; pr_info("VIA RNG detected\n"); @@ -217,8 +217,8 @@ static void __exit via_rng_mod_exit(void) } module_exit(via_rng_mod_exit); -static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), +static struct x86_cpu_id via_rng_cpu_id[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 0000000000000000000000000000000000000000..f0bfda78fea14547a6b9b9711ad6a4ea1bd087e5 --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "2.0.0" + +enum { + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), +}; + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + + return 0; +} + +static inline int rep_xstore(size_t size, size_t factor, void *result) +{ + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); + + return 0; +} + +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; + + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); + + return max; +} + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, +}; + +static struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); + +static int __init zhaoxin_rng_mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) { + pr_err(PFX "The CPU isn't support XSTORE.\n"); + return -ENODEV; + } + + pr_info("Zhaoxin RNG detected\n"); + + err = hwrng_register(&zhaoxin_rng); + if (err) + pr_err(PFX "RNG registering failed (%d)\n", err); + + return err; +} +module_init(zhaoxin_rng_mod_init); + +static void __exit zhaoxin_rng_mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} +module_exit(zhaoxin_rng_mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded90548dea6bee34fc10fd805e618e8..bc9c6506fd59e6225eb4e76d9da022c78493bbe4 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -13,6 +13,10 @@ ifdef CONFIG_PARISC ipmi_si-y += ipmi_si_parisc.o endif +ifdef CONFIG_LOONGARCH +ipmi_si-y += ipmi_si_ls2k500.o +endif + obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o diff --git a/drivers/char/ipmi/btlock.h b/drivers/char/ipmi/btlock.h new file mode 100644 index 0000000000000000000000000000000000000000..cf585e42d42d4ba5bda34ca52c476aaff40af0bb --- /dev/null +++ b/drivers/char/ipmi/btlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BTLOCK_H__ +#define __BTLOCK_H__ + +#include +#include + +union btlock { + char b[2]; + unsigned int u; +}; + +/* + *wait delay us if lock failed. + *lock fail if another one get lock or both try get lock. + *c must compile b with byte access. + */ +static inline int btlock_lock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + unsigned long c0 = get_cycles(), c1; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + while (1) { + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + c1 = get_cycles(); + if (c1 - c0 > *mscycles * 1000) + return -1; + ndelay(((t.b[1 - n] & 0x7f) + (c1 & 1)) * 100); + } + return 0; +} + +static inline int btlock_trylock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + ndelay(((t.b[1 - n] & 0x7f) + (get_cycles() & 1)) * 100); + return -1; +} + +static inline int btlock_unlock(volatile union btlock *p, int n) +{ + p->b[n] = 0; + wmb(); /* flush write out immediately */ + return p->u; +} + +static inline int btlock_islocked(volatile union btlock *p, int n) +{ + union btlock t; + + t.u = p->u; + return t.b[n] && !t.b[1 - n]; +} +#endif diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753b4a9cca7d919c0ebf0dcf638b777..aa2f81472ce5320ad78fb89902125c7c98ce7455 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -51,6 +51,9 @@ struct si_sm_io { unsigned int regshift; enum ipmi_addr_space addr_space; unsigned long addr_data; +#ifdef CONFIG_LOONGARCH + void *addr_source_data; +#endif enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ union ipmi_smi_info_union addr_info; @@ -101,6 +104,14 @@ static inline void ipmi_si_parisc_init(void) { } static inline void ipmi_si_parisc_shutdown(void) { } #endif +#ifdef CONFIG_LOONGARCH +int ipmi_si_ls2k500_init(void); +void ipmi_si_ls2k500_shutdown(void); +#else +static inline void ipmi_si_ls2k500_init(void) { } +static inline void ipmi_si_ls2k500_shutdown(void) { } +#endif + int ipmi_si_port_setup(struct si_sm_io *io); int ipmi_si_mem_setup(struct si_sm_io *io); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc9700f10b1d5bcef0564ece1928df69..373ee71811e35fdd64f8791c6763fdd88f3574ad 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2104,6 +2104,8 @@ static int __init init_ipmi_si(void) ipmi_si_platform_init(); + ipmi_si_ls2k500_init(); + ipmi_si_pci_init(); ipmi_si_parisc_init(); @@ -2289,6 +2291,8 @@ static void cleanup_ipmi_si(void) ipmi_si_parisc_shutdown(); + ipmi_si_ls2k500_shutdown(); + ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); diff --git a/drivers/char/ipmi/ipmi_si_ls2k500.c b/drivers/char/ipmi/ipmi_si_ls2k500.c new file mode 100644 index 0000000000000000000000000000000000000000..7e259d85729f5375c7e9def353f5730b40b32ae2 --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k500.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ipmi_si_pci.c + * + * Handling for IPMI devices on the PCI bus. + */ + +#define pr_fmt(fmt) "ipmi_pci: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipmi_si.h" +static unsigned long *mscycles; +static unsigned long *event_jiffies; +#include "kcs_bmc_ls2k500.h" +static int resetbootwait = 60; +module_param(resetbootwait, int, 0664); + +#define KCS_STATUS_CMD_DAT BIT(3) + +static int pcie_busy(void) +{ + if (time_before(jiffies, *event_jiffies + resetbootwait*HZ)) + return -1; + return 0; +} + +static unsigned char intf_sim_inb(const struct si_sm_io *io, + unsigned int offset) +{ + IPMIKCS *ik = io->addr_source_data; + uint32_t ret; + + if (pcie_busy()) + return 0; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return 0; + switch (offset & 1) { + case 0: + ret = ik->data_out_reg; + IPMI_KCS_SET_OBF(ik->status_reg, 0); + break; + case 1: + ret = ik->status_reg; + break; + } + btlock_unlock(&ik->lock, 0); + return ret; +} + +static void intf_sim_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + IPMIKCS *ik = io->addr_source_data; + + if (pcie_busy()) + return; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return; + if (IPMI_KCS_GET_IBF(ik->status_reg)) + goto out; + + switch (offset & 1) { + case 0: + ik->data_in_reg = val; + ik->status_reg &= ~KCS_STATUS_CMD_DAT; + break; + + case 1: + ik->cmd_reg = val; + ik->status_reg |= KCS_STATUS_CMD_DAT; + break; + } + IPMI_KCS_SET_IBF(ik->status_reg, 1); + ik->write_req++; +out: + btlock_unlock(&ik->lock, 0); +} + +static void ipmi_ls2k500_cleanup(struct si_sm_io *io) +{ +} + +int ipmi_si_sim_setup(struct si_sm_io *io) +{ + io->inputb = intf_sim_inb; + io->outputb = intf_sim_outb; + io->io_cleanup = ipmi_ls2k500_cleanup; + return 0; +} + +#define platform_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define platform_resource_end(dev, bar) ((dev)->resource[(bar)].end) +static int of_ipmi_ls2k500_probe(struct platform_device *pdev) +{ + int rv; + struct si_sm_io io; + void **kcs_data; + + memset(&io, 0, sizeof(io)); + io.addr_source = SI_PLATFORM; + dev_info(&pdev->dev, "probing via ls2k500 platform"); + io.si_type = SI_KCS; + + io.addr_space = IPMI_MEM_ADDR_SPACE; + io.io_setup = ipmi_si_sim_setup; + io.addr_data = pdev->resource[0].start; + io.addr_source_data = ioremap(pdev->resource[0].start, + pdev->resource[0].end - + pdev->resource[0].start + 1); + kcs_data = dev_get_platdata(&pdev->dev); + event_jiffies = kcs_data[0]; + mscycles = kcs_data[1]; + io.dev = &pdev->dev; + io.regspacing = 4; + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + io.irq = 0; + if (io.irq) + io.irq_setup = ipmi_std_irq_setup; + + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], io.regsize, io.regspacing, io.irq); + + rv = ipmi_si_add_smi(&io); + if (rv) + ipmi_si_remove_by_dev(&pdev->dev); + + return rv; +} + +static int ipmi_ls2k500_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); + + return 0; +} + +#define LS2K500_SI_DEVICE_NAME "ipmi_ls2k500_si" +struct platform_driver ipmi_ls2k500_platform_driver = { + .driver = { + .name = LS2K500_SI_DEVICE_NAME, + }, + .probe = of_ipmi_ls2k500_probe, + .remove = ipmi_ls2k500_remove, +}; + +static bool platform_registered; +int ipmi_si_ls2k500_init(void) +{ + int rv; + + rv = platform_driver_register(&ipmi_ls2k500_platform_driver); + if (rv) + pr_err("Unable to register driver: %d\n", rv); + else + platform_registered = true; + return rv; +} + +void ipmi_si_ls2k500_shutdown(void) +{ + if (platform_registered) + platform_driver_unregister(&ipmi_ls2k500_platform_driver); +} diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index 86b92e93a70d52731635a43792eb61110582e2a2..dc6cf7d89fea4c8a696922a5d6808c5c5e78e422 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -3,9 +3,77 @@ #include #include "ipmi_si.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include + +#define CTL_RST_FUNC_ID 0xC2000011 + +static bool apply_phytium2500_workaround; + +struct ipmi_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; +}; + +#ifdef CONFIG_ACPI +static struct ipmi_workaround_oem_info wa_info[] = { + { + .oem_id = "KPSVVJ", + } +}; +#endif + +static void ipmi_check_phytium_workaround(void) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header tbl; + int i; + + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl))) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + apply_phytium2500_workaround = true; + break; + } +#endif +} + +static void ctl_smc(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + struct arm_smccc_res res; + + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res); + if (res.a0 != 0) + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n", + (int)res.a0, arg2); +} + +static void ctl_timeout_reset(void) +{ + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1); + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1); +} + +static inline void ipmi_phytium_workaround(void) +{ + if (apply_phytium2500_workaround) + ctl_timeout_reset(); +} + +#else +static inline void ipmi_check_phytium_workaround(void) {} +static inline void ipmi_phytium_workaround(void) {} +#endif + static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return readb((io->addr)+(offset * io->regspacing)); } @@ -18,6 +86,8 @@ static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -31,6 +101,8 @@ static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -44,6 +116,8 @@ static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -81,6 +155,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; + ipmi_check_phytium_workaround(); + /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. diff --git a/drivers/char/ipmi/kcs_bmc_ls2k500.h b/drivers/char/ipmi/kcs_bmc_ls2k500.h new file mode 100644 index 0000000000000000000000000000000000000000..86e08a08d41a60005d0f126036a3309f1e5ab334 --- /dev/null +++ b/drivers/char/ipmi/kcs_bmc_ls2k500.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KCS_BMC_LS2K500__ +#define __KCS_BMC_LS2K500__ 1 +#include +#include "btlock.h" +#define IPMI_KCS_OBF_BIT 0 +#define IPMI_KCS_IBF_BIT 1 +#define IPMI_KCS_SMS_ATN_BIT 2 +#define IPMI_KCS_CD_BIT 3 + +#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT) +#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1) +#define IPMI_KCS_SET_OBF(d, v) ((d) = (((d) & ~IPMI_KCS_OBF_MASK) | \ + (((v) & 1) << IPMI_KCS_OBF_BIT))) +#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT) +#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1) +#define IPMI_KCS_SET_IBF(d, v) ((d) = (((d) & ~IPMI_KCS_IBF_MASK) | \ + (((v) & 1) << IPMI_KCS_IBF_BIT))) +#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT) +#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1) +#define IPMI_KCS_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \ + ((v) & 1) << IPMI_KCS_SMS_ATN_BIT)) +#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT) +#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1) +#define IPMI_KCS_SET_CD(d, v) ((d) = (((d) & ~IPMI_KCS_CD_MASK) | \ + (((v) & 1) << IPMI_KCS_CD_BIT))) + +#define IPMI_KCS_IDLE_STATE 0 +#define IPMI_KCS_READ_STATE 1 +#define IPMI_KCS_WRITE_STATE 2 +#define IPMI_KCS_ERROR_STATE 3 + +#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3) +#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6)) + +#define IPMI_KCS_ABORT_STATUS_CMD 0x60 +#define IPMI_KCS_WRITE_START_CMD 0x61 +#define IPMI_KCS_WRITE_END_CMD 0x62 +#define IPMI_KCS_READ_CMD 0x68 +#define IPMI_KCS_STATUS_NO_ERR 0x00 +#define IPMI_KCS_STATUS_ABORTED_ERR 0x01 +#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02 +#define IPMI_KCS_STATUS_LENGTH_ERR 0x06 +#define KCS_STATUS_CMD_DAT BIT(3) + +typedef struct IPMIKCS { + union btlock lock; + uint8_t status_reg; + uint8_t data_out_reg; + + int16_t data_in_reg; + int16_t cmd_reg; + int16_t reserved2; + + uint32_t write_req; + uint32_t write_ack; + + uint32_t reserved3; + uint32_t reserved4; +} IPMIKCS; + +struct loongson_kcs_bmc { + struct list_head next; + IPMIKCS *kcs; + struct kcs_bmc *bmc; +}; +#endif diff --git a/drivers/char/loongson_se.c b/drivers/char/loongson_se.c new file mode 100644 index 0000000000000000000000000000000000000000..3eeb348fc7117531e05d67bd31ce41dcbbd8371b --- /dev/null +++ b/drivers/char/loongson_se.c @@ -0,0 +1,599 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int se_mem_size = 0x800000; +module_param(se_mem_size, int, 0444); +MODULE_PARM_DESC(se_mem_size, "LOONGSON SE shared memory size"); + +static int se_mem_page = PAGE_SIZE; +module_param(se_mem_page, int, 0444); +MODULE_PARM_DESC(se_mem_page, "LOONGSON SE shared memory page size"); + +static struct loongson_se se_dev; + +static int lsse_open(struct inode *inode, struct file *filp) +{ + return 0; +} + +static ssize_t lsse_write(struct file *filp, const char __user *buf, + size_t cnt, loff_t *offt) +{ + return 0; +} + +static const struct file_operations lsse_fops = { + .owner = THIS_MODULE, + .open = lsse_open, + .write = lsse_write, +}; + +static struct miscdevice lsse_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "loongson-se", + .fops = &lsse_fops, +}; + +static inline u32 se_readl(u64 addr) +{ + return readl(se_dev.base + addr); +} + +static inline void se_writel(u32 val, u64 addr) +{ + writel(val, se_dev.base + addr); +} + +static inline bool se_ch_status(struct loongson_se *se, u32 int_bit) +{ + return !!(se->ch_status & int_bit) == 1; +} + +static void se_enable_int(struct loongson_se *se, u32 int_bit) +{ + unsigned long flag; + u32 tmp; + + if (!int_bit) + return; + + spin_lock_irqsave(&se->dev_lock, flag); + + tmp = se_readl(SE_S2LINT_EN); + tmp |= int_bit; + se_writel(tmp, SE_S2LINT_EN); + + spin_unlock_irqrestore(&se->dev_lock, flag); +} + +static void se_disable_int(struct loongson_se *se, u32 int_bit) +{ + unsigned long flag; + u32 tmp; + + if (!int_bit) + return; + + spin_lock_irqsave(&se->dev_lock, flag); + + tmp = se_readl(SE_S2LINT_EN); + tmp &= ~(int_bit); + se_writel(tmp, SE_S2LINT_EN); + + spin_unlock_irqrestore(&se->dev_lock, flag); +} + +static int se_send_requeset(struct loongson_se *se, + struct se_mailbox_data *req) +{ + unsigned long flag; + u32 status; + int err = 0; + int i; + + if (!se || !req) + return -EINVAL; + + if (se_readl(SE_L2SINT_STAT) || + !(se_readl(SE_L2SINT_EN) & req->int_bit)) + return -EBUSY; + + spin_lock_irqsave(&se->cmd_lock, flag); + + for (i = 0; i < ARRAY_SIZE(req->u.mailbox); i++) + se_writel(req->u.mailbox[i], SE_MAILBOX_S + i * 4); + + se_writel(req->int_bit, SE_L2SINT_SET); + + err = readl_relaxed_poll_timeout_atomic(se->base + SE_L2SINT_STAT, status, + !(status & req->int_bit), 10, 10000); + + spin_unlock_irqrestore(&se->cmd_lock, flag); + + return err; +} + +static int se_get_response(struct loongson_se *se, + struct se_mailbox_data *res) +{ + unsigned long flag; + int i; + + if (!se || !res) + return -EINVAL; + + if ((se_readl(SE_S2LINT_STAT) & res->int_bit) == 0) + return -EBUSY; + + spin_lock_irqsave(&se->cmd_lock, flag); + + for (i = 0; i < ARRAY_SIZE(res->u.mailbox); i++) + res->u.mailbox[i] = se_readl(SE_MAILBOX_L + i * 4); + + se_writel(res->int_bit, SE_S2LINT_CL); + + spin_unlock_irqrestore(&se->cmd_lock, flag); + + return 0; +} + +static int loongson_se_get_res(struct loongson_se *se, u32 int_bit, u32 cmd, + struct se_mailbox_data *res) +{ + int err = 0; + + res->int_bit = int_bit; + + if (se_get_response(se, res)) { + dev_err(se->dev, "Int 0x%x get response fail.\n", int_bit); + return -EFAULT; + } + + /* Check response */ + if (res->u.res.cmd == cmd) + err = 0; + else { + dev_err(se->dev, "Response cmd is 0x%x, not expect cmd 0x%x.\n", + res->u.res.cmd, cmd); + err = -EFAULT; + } + + return err; +} + +static int se_send_genl_cmd(struct loongson_se *se, struct se_mailbox_data *req, + struct se_mailbox_data *res, int retry) +{ + int err = 0, cnt = 0; + +try_again: + if (cnt++ >= retry) { + err = -ETIMEDOUT; + goto out; + } + + dev_dbg(se->dev, "%d time send cmd 0x%x\n", cnt, req->u.gcmd.cmd); + + err = se_send_requeset(se, req); + if (err) + goto try_again; + + if (!wait_for_completion_timeout(&se->cmd_completion, + msecs_to_jiffies(0x1000))) { + se_enable_int(se, req->int_bit); + goto try_again; + } + + err = loongson_se_get_res(se, req->int_bit, req->u.gcmd.cmd, res); + if (err || res->u.res.cmd_ret) { + se_enable_int(se, req->int_bit); + goto try_again; + } + +out: + se_enable_int(se, req->int_bit); + + return err; +} + +static int loongson_se_set_msg(struct lsse_ch *ch) +{ + struct loongson_se *se = ch->se; + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + int err; + + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_SETMSG; + /* MSG off */ + req.u.gcmd.info[0] = ch->id; + req.u.gcmd.info[1] = ch->smsg - se->mem_base; + req.u.gcmd.info[2] = ch->msg_size; + + dev_dbg(se->dev, "Set Channel %d msg off 0x%x, msg size %d\n", ch->id, + req.u.gcmd.info[1], req.u.gcmd.info[2]); + + err = se_send_genl_cmd(se, &req, &res, 5); + if (res.u.res.cmd_ret) + return res.u.res.cmd_ret; + + return err; +} + +static irqreturn_t loongson_se_irq(int irq, void *dev_id) +{ + struct loongson_se *se = (struct loongson_se *)dev_id; + struct lsse_ch *ch; + u32 int_status; + + int_status = se_readl(SE_S2LINT_STAT); + + dev_dbg(se->dev, "%s int status is 0x%x\n", __func__, int_status); + + se_disable_int(se, int_status); + + if (int_status & SE_INT_SETUP) { + complete(&se->cmd_completion); + int_status &= ~SE_INT_SETUP; + } + + while (int_status) { + int id = __ffs(int_status); + + ch = &se->chs[id]; + if (ch->complete) + ch->complete(ch); + int_status &= ~BIT(id); + se_writel(BIT(id), SE_S2LINT_CL); + } + + return IRQ_HANDLED; +} + +static int se_init_hw(struct loongson_se *se) +{ + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + struct device *dev = se->dev; + int err, retry = 5; + u64 size; + + size = se_mem_size; + + if (size & (size - 1)) { + size = roundup_pow_of_two(size); + se_mem_size = size; + } + + se_enable_int(se, SE_INT_SETUP); + + /* Start engine */ + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_START; + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + /* Get Version */ + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_GETVER; + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + se->version = res.u.res.info[0]; + + /* Setup data buffer */ + se->mem_base = dmam_alloc_coherent(dev, size, + &se->mem_addr, GFP_KERNEL); + if (!se->mem_base) + return -ENOMEM; + + memset(se->mem_base, 0, size); + + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_SETBUF; + /* MMAP */ + req.u.gcmd.info[0] = (se->mem_addr & 0xffffffff) | 0x80; + req.u.gcmd.info[1] = se->mem_addr >> 32; + /* MASK */ + req.u.gcmd.info[2] = ~(size - 1); + req.u.gcmd.info[3] = 0xffffffff; + + pr_debug("Set win mmap 0x%llx, mask 0x%llx\n", + ((u64)req.u.gcmd.info[1] << 32) | req.u.gcmd.info[0], + ((u64)req.u.gcmd.info[3] << 32) | req.u.gcmd.info[2]); + + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + se->mem_map_size = size / se_mem_page; + se->mem_map = bitmap_zalloc(se->mem_map_size, GFP_KERNEL); + if (!se->mem_map) + return -ENOMEM; + + dev_info(se->dev, "SE module setup down, shared memory size is 0x%x bytes, " + "memory page size is 0x%x bytes\n", + se_mem_size, se_mem_page); + + return err; +} + +static void loongson_se_disable_hw(struct loongson_se *se) +{ + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + int retry = 5; + + /* Stop engine */ + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_STOP; + se_send_genl_cmd(se, &req, &res, retry); + + se_disable_int(se, SE_INT_ALL); + kfree(se->mem_map); +} + +int se_send_ch_requeset(struct lsse_ch *ch) +{ + struct loongson_se *se; + u32 status, int_bit; + int err = 0; + + if (!ch) + return -EINVAL; + + se = ch->se; + int_bit = ch->int_bit; + + if ((se_readl(SE_L2SINT_STAT) & int_bit) || + !(se_readl(SE_L2SINT_EN) & int_bit)) + return -EBUSY; + + se_enable_int(se, int_bit); + se_writel(int_bit, SE_L2SINT_SET); + + err = readl_relaxed_poll_timeout_atomic(se->base + SE_L2SINT_STAT, status, + !(status & int_bit), 10, 10000); + + return err; +} +EXPORT_SYMBOL_GPL(se_send_ch_requeset); + +struct lsse_ch *se_init_ch(int id, int data_size, int msg_size, void *priv, + void (*complete)(struct lsse_ch *se_ch)) +{ + struct loongson_se *se = &se_dev; + struct lsse_ch *ch; + unsigned long flag; + int data_first, data_nr; + int msg_first, msg_nr; + + if (!se) { + pr_err("SE has bot been initialized\n"); + return NULL; + } + + if (id == 0 || id > SE_CH_MAX) { + dev_err(se->dev, "Channel number %d is invalid\n", id); + return NULL; + } + + if (se_ch_status(se, BIT(id))) { + dev_err(se->dev, "Channel number %d has been initialized\n", id); + return NULL; + } + + spin_lock_irqsave(&se->dev_lock, flag); + + ch = &se_dev.chs[id]; + ch->se = se; + ch->id = id; + ch->int_bit = BIT(id); + se->ch_status |= BIT(id); + + data_nr = round_up(data_size, se_mem_page) / se_mem_page; + data_first = bitmap_find_next_zero_area(se->mem_map, se->mem_map_size, + 0, data_nr, 0); + if (data_first >= se->mem_map_size) { + dev_err(se->dev, "Insufficient memory space\n"); + spin_unlock_irqrestore(&se->dev_lock, flag); + return NULL; + } + + bitmap_set(se->mem_map, data_first, data_nr); + ch->data_buffer = se->mem_base + data_first * se_mem_page; + ch->data_addr = se->mem_addr + data_first * se_mem_page; + ch->data_size = data_size; + + msg_nr = round_up(msg_size, se_mem_page) / se_mem_page; + msg_first = bitmap_find_next_zero_area(se->mem_map, se->mem_map_size, + 0, msg_nr, 0); + if (msg_first >= se->mem_map_size) { + dev_err(se->dev, "Insufficient memory space\n"); + bitmap_clear(se->mem_map, data_first, data_nr); + spin_unlock_irqrestore(&se->dev_lock, flag); + return NULL; + } + + bitmap_set(se->mem_map, msg_first, msg_nr); + ch->smsg = se->mem_base + msg_first * se_mem_page; + ch->rmsg = ch->smsg + msg_size / 2; + ch->msg_size = msg_size; + + ch->complete = complete; + ch->priv = priv; + + spin_lock_init(&ch->ch_lock); + + spin_unlock_irqrestore(&se->dev_lock, flag); + + if (loongson_se_set_msg(ch)) { + dev_err(se->dev, "Channel %d setup message address failed\n", id); + return NULL; + } + + se_enable_int(se, ch->int_bit); + + return ch; +} +EXPORT_SYMBOL_GPL(se_init_ch); + +void se_deinit_ch(struct lsse_ch *ch) +{ + struct loongson_se *se = &se_dev; + unsigned long flag; + int first, nr; + int id = ch->id; + + if (!se) { + pr_err("SE has bot been initialized\n"); + return; + } + + if (id == 0 || id > SE_CH_MAX) { + dev_err(se->dev, "Channel number %d is invalid\n", id); + return; + } + + if (!se_ch_status(se, BIT(id))) { + dev_err(se->dev, "Channel number %d has not been initialized\n", id); + return; + } + + spin_lock_irqsave(&se->dev_lock, flag); + + se->ch_status &= ~BIT(ch->id); + + first = (ch->data_buffer - se->mem_base) / se_mem_page; + nr = round_up(ch->data_size, se_mem_page) / se_mem_page; + bitmap_clear(se->mem_map, first, nr); + + first = (ch->smsg - se->mem_base) / se_mem_page; + nr = round_up(ch->msg_size, se_mem_page) / se_mem_page; + bitmap_clear(se->mem_map, first, nr); + + spin_unlock_irqrestore(&se->dev_lock, flag); + + se_disable_int(se, ch->int_bit); +} +EXPORT_SYMBOL_GPL(se_deinit_ch); + +static struct platform_device lsse_sdf_pdev = { + .name = "loongson-sdf", + .id = -1, +}; + +static const struct of_device_id loongson_se_of_match[] = { + { .compatible = "loongson,ls3c6000se", }, + {} +}; +MODULE_DEVICE_TABLE(of, loongson_se_of_match); + +static int loongson_se_probe(struct platform_device *pdev) +{ + struct loongson_se *se = &se_dev; + struct resource *res; + struct device *dev = &pdev->dev; + int nr_irq, err, i; + int irq[8]; + + nr_irq = platform_irq_count(pdev); + if (nr_irq < 0) + return -ENODEV; + + for (i = 0; i < nr_irq; i++) { + irq[i] = platform_get_irq(pdev, i); + if (irq[i] < 0) + return -ENODEV; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + se->base = devm_ioremap_resource(dev, res); + if (IS_ERR(se->base)) + return PTR_ERR(se->base); + + se->dev = &pdev->dev; + platform_set_drvdata(pdev, se); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + init_completion(&se->cmd_completion); + spin_lock_init(&se->cmd_lock); + spin_lock_init(&se->dev_lock); + + for (i = 0; i < nr_irq; i++) { + err = devm_request_irq(dev, irq[i], loongson_se_irq, 0, + "loongson-se", se); + if (err) + goto out; + } + + err = se_init_hw(se); + if (err) + goto disable_hw; + + err = misc_register(&lsse_miscdev); + if (err) + goto disable_hw; + + err = platform_device_register(&lsse_sdf_pdev); + if (err) + pr_err("register sdf device failed\n"); + + return 0; + +disable_hw: + loongson_se_disable_hw(se); +out: + for (; i >= 0; i--) + devm_free_irq(dev, irq[i], se); + return err; +} + +static int loongson_se_remove(struct platform_device *pdev) +{ + struct loongson_se *se = platform_get_drvdata(pdev); + + misc_deregister(&lsse_miscdev); + loongson_se_disable_hw(se); + platform_device_unregister(&lsse_sdf_pdev); + + return 0; +} + +static struct platform_driver loongson_se_driver = { + .probe = loongson_se_probe, + .remove = loongson_se_remove, + .driver = { + .name = "loongson-se", + .of_match_table = loongson_se_of_match, + }, +}; + +module_platform_driver(loongson_se_driver); + +MODULE_AUTHOR("Yinggang Gu"); +MODULE_DESCRIPTION("Loongson SE driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/lsse_sdf_cdev.c b/drivers/char/lsse_sdf_cdev.c new file mode 100644 index 0000000000000000000000000000000000000000..a4df080036862e39c1a8e7f0e125a4f93246aa48 --- /dev/null +++ b/drivers/char/lsse_sdf_cdev.c @@ -0,0 +1,379 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SE_SDF_BUFSIZE (PAGE_SIZE * 2) +#define SDF_OPENSESSION 0x204 +#define SDF_CLOSESESSION 0x205 + +struct lsse_sdf_dev { + struct lsse_ch *se_ch; + struct mutex data_lock; + bool processing_cmd; + + /* Synchronous CMD */ + wait_queue_head_t wq; +}; + +struct se_sdf_msg { + u32 cmd; + u32 data_off; + u32 data_len; + u32 info[5]; +}; + +struct sdf_command_header { + int command; + union { + int param_cnt; + int ret; + } u; + int param_len[14]; +}; + +struct sdf_kernel_command { + struct sdf_command_header header; + void *handle; +}; + +#define KERNEL_COMMAND_SIZE (sizeof(struct sdf_kernel_command)) + +struct sdf_handle { + struct list_head handle_list; + void *handle; +}; + +struct sdf_file_pvt_data { + struct lsse_sdf_dev *se; + struct list_head handle_list; + struct sdf_kernel_command skc; + struct sdf_handle *ph; +}; + +static struct lsse_sdf_dev *se_sdf_dev; + +static void lsse_sdf_complete(struct lsse_ch *ch) +{ + struct lsse_sdf_dev *se = (struct lsse_sdf_dev *)ch->priv; + + se->processing_cmd = false; + wake_up(&se->wq); +} + +static int se_send_sdf_cmd(struct lsse_sdf_dev *se, int len, int retry) +{ + struct se_sdf_msg *smsg = (struct se_sdf_msg *)se->se_ch->smsg; + unsigned long flag; + int err; + + spin_lock_irqsave(&se->se_ch->ch_lock, flag); + + smsg->cmd = SE_CMD_SDF; + /* One time one cmd */ + smsg->data_off = se->se_ch->data_buffer - se->se_ch->se->mem_base; + smsg->data_len = len; + +try_again: + if (!retry--) + goto out; + + pr_debug("Send sdf cmd, last retry %d times\n", retry); + + err = se_send_ch_requeset(se->se_ch); + if (err) { + udelay(5); + goto try_again; + } + +out: + spin_unlock_irqrestore(&se->se_ch->ch_lock, flag); + + return err; +} + +static int lsse_sdf_recv(struct sdf_file_pvt_data *pvt, char *buf, + size_t size, int user, int *se_ret) +{ + int len, time, ret = 0; + struct se_sdf_msg *rmsg; + struct sdf_kernel_command *skc; + struct sdf_handle *ph; + struct lsse_sdf_dev *se = pvt->se; + + if (!se->se_ch->rmsg) { + pr_err("se device is not ready\n"); + return -EBUSY; + } + + time = wait_event_timeout(se->wq, !se->processing_cmd, HZ*30); + if (!time) + return -ETIME; + + rmsg = (struct se_sdf_msg *)se->se_ch->rmsg; + if (rmsg->cmd != SE_CMD_SDF) { + pr_err("se get wrong response\n"); + return -EIO; + } + len = rmsg->data_len; + + if ((!user && len > KERNEL_COMMAND_SIZE) || len > SE_SDF_BUFSIZE + || (size && len > size)) + return -E2BIG; + + if (user) { + ret = copy_to_user((char __user *)buf, + se->se_ch->data_buffer + rmsg->data_off, len); + if (!se_ret) + return ret; + + skc = (struct sdf_kernel_command *) + (se->se_ch->data_buffer + rmsg->data_off); + *se_ret = skc->header.u.ret; + if (skc->header.command == SDF_OPENSESSION && !*se_ret) { + ph = kmalloc(sizeof(*ph), GFP_KERNEL); + if (!ph) + return -ENOMEM; + ph->handle = skc->handle; + list_add(&ph->handle_list, &pvt->handle_list); + } + } else { + memcpy(buf, se->se_ch->data_buffer + rmsg->data_off, len); + } + return ret; +} + +static struct sdf_handle *find_sdf_handle(void *handle, + struct sdf_file_pvt_data *pvt) +{ + struct sdf_handle *ph; + + list_for_each_entry(ph, &pvt->handle_list, handle_list) { + if (ph->handle == handle) + return ph; + } + + return NULL; +} + +static int lsse_sdf_send(struct sdf_file_pvt_data *pvt, const char *buf, + size_t count, int user) +{ + int ret, se_ret; + struct sdf_handle *ph = NULL; + struct sdf_kernel_command *skc; + struct lsse_sdf_dev *se = pvt->se; + + if (!se->se_ch->smsg) { + pr_err("se device is not ready\n"); + return 0; + } + + if (count > se->se_ch->data_size) { + pr_err("Invalid size in send: count=%zd, size=%d\n", + count, se->se_ch->data_size); + return -EIO; + } + + if (user) { + ret = mutex_lock_interruptible(&se->data_lock); + if (ret) + goto out; + } else + mutex_lock(&se->data_lock); + + if (user) { + ret = copy_from_user(se->se_ch->data_buffer, buf, count); + if (ret) { + ret = -EFAULT; + goto out_unlock; + } + skc = (struct sdf_kernel_command *)se->se_ch->data_buffer; + if (skc->header.command == SDF_CLOSESESSION) + ph = find_sdf_handle(skc->handle, pvt); + } else { + memcpy(se->se_ch->data_buffer, buf, count); + } + + se->processing_cmd = true; + ret = se_send_sdf_cmd(se, count, 5); + if (ret) { + pr_err("se_send_sdf_cmd failed\n"); + goto out_unlock; + } + + ret = lsse_sdf_recv(pvt, (char *)buf, 0, user, &se_ret); + if (ret) { + pr_err("recv failed ret: %x\n", ret); + goto out_unlock; + } + if (ph && !se_ret) { + list_del(&ph->handle_list); + kfree(ph); + } +out_unlock: + mutex_unlock(&se->data_lock); +out: + return ret; +} + +static ssize_t lsse_sdf_write(struct file *filp, const char __user *buf, + size_t cnt, loff_t *offt) +{ + struct sdf_file_pvt_data *pvt = filp->private_data; + + if (cnt > SE_SDF_BUFSIZE) + return -E2BIG; + + if (lsse_sdf_send(pvt, buf, cnt, 1)) + return -EFAULT; + + return cnt; +} + +static ssize_t lsse_sdf_read(struct file *filp, char __user *buf, + size_t size, loff_t *off) +{ + return lsse_sdf_recv(filp->private_data, buf, size, 1, NULL); +} + +static int close_one_handle(struct sdf_file_pvt_data *pvt, struct sdf_handle *ph) +{ + struct sdf_kernel_command *skc = &pvt->skc; + + skc->header.command = 0x205; + skc->header.u.param_cnt = 1; + skc->header.param_len[0] = 8; + skc->handle = ph->handle; + /* close one session */ + lsse_sdf_send(pvt, (char *)&pvt->skc, KERNEL_COMMAND_SIZE, 0); + if (skc->header.u.ret) { + pr_err("Auto Close Session failed, session handle: %llx, ret: %d\n", + (u64)ph->handle, skc->header.u.ret); + return skc->header.u.ret; + } + kfree(ph); + + return 0; +} + +static int close_all_handle(struct sdf_file_pvt_data *pvt) +{ + int ret = 0; + struct sdf_handle *ph, *tmp; + + list_for_each_entry_safe(ph, tmp, &pvt->handle_list, handle_list) { + list_del(&ph->handle_list); + ret = close_one_handle(pvt, ph); + if (ret) + return ret; + } + + return 0; +} + +static int lsse_sdf_release(struct inode *inode, struct file *filp) +{ + int ret; + struct sdf_file_pvt_data *pvt = filp->private_data; + + ret = close_all_handle(pvt); + filp->private_data = NULL; + kfree(pvt); + + if (ret) + ret = -EFAULT; + return ret; +} + +static int lsse_sdf_open(struct inode *inode, struct file *filp) +{ + struct sdf_file_pvt_data *pvt = kmalloc(sizeof(*pvt), GFP_KERNEL); + + if (!pvt) + return -ENOMEM; + + INIT_LIST_HEAD(&pvt->handle_list); + pvt->se = se_sdf_dev; + filp->private_data = pvt; + + return 0; +} + +static const struct file_operations lsse_sdf_fops = { + .owner = THIS_MODULE, + .open = lsse_sdf_open, + .write = lsse_sdf_write, + .read = lsse_sdf_read, + .release = lsse_sdf_release, +}; + +static struct miscdevice lsse_sdf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lsse_sdf", + .fops = &lsse_sdf_fops, +}; + +static int lsse_sdf_probe(struct platform_device *pdev) +{ + int msg_size; + int ret; + + se_sdf_dev = kzalloc(sizeof(*se_sdf_dev), GFP_KERNEL); + if (IS_ERR_OR_NULL(se_sdf_dev)) + return PTR_ERR(se_sdf_dev); + + mutex_init(&se_sdf_dev->data_lock); + init_waitqueue_head(&se_sdf_dev->wq); + se_sdf_dev->processing_cmd = false; + + msg_size = 2 * sizeof(struct se_sdf_msg); + se_sdf_dev->se_ch = se_init_ch(SE_CH_SDF, SE_SDF_BUFSIZE, msg_size, + se_sdf_dev, lsse_sdf_complete); + + ret = misc_register(&lsse_sdf_miscdev); + if (ret < 0) { + pr_err("register sdf dev failed!\n"); + goto out; + } + + return 0; + +out: + kfree(se_sdf_dev); + + return ret; +} + +static int lsse_sdf_remove(struct platform_device *pdev) +{ + misc_deregister(&lsse_sdf_miscdev); + se_deinit_ch(se_sdf_dev->se_ch); + kfree(se_sdf_dev); + + return 0; +} + +static struct platform_driver loongson_sdf_driver = { + .probe = lsse_sdf_probe, + .remove = lsse_sdf_remove, + .driver = { + .name = "loongson-sdf", + }, +}; +module_platform_driver(loongson_sdf_driver); + +MODULE_ALIAS("platform:loongson-sdf"); +MODULE_AUTHOR("Yinggang Gu"); +MODULE_DESCRIPTION("Loongson SE sdf driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3f2c60bd5c3fa9735ba90d2e0bdd3..301284e07603bb353c3c04d1128293f325b5d479 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -210,5 +210,29 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb3105e6726fa93e2ea3500d404964cbc..8f868c9b9ce78f53ec54620bec947f099fd13624 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -42,3 +42,5 @@ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..63f5e61d9b3e604130c09984635b9849a4246b9f --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..8e509df90290e972ee57c7f13de5ac5a28788cba --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 8208a3d895634bb6ea7871c3578529d076aa746f..a944c3122b7b76d3ce30ac7c4a84b5dce2588ac5 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -733,4 +733,7 @@ config GOLDFISH_TIMER help Support for the timer/counter of goldfish-rtc +config SW64_TIMER + bool + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 368c3461dab8146a6f89a111677c413b974e4608..b9ef4c79915e88b4de638548f43dd48d83624346 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o +obj-$(CONFIG_SW64_TIMER) += timer-sw64.o diff --git a/drivers/clocksource/timer-sw64.c b/drivers/clocksource/timer-sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..a124b6d8fed94d785f94483610ac143a8de13920 --- /dev/null +++ b/drivers/clocksource/timer-sw64.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define SHTCLK_RATE_KHZ 25000 +#define SHTCLK_RATE (SHTCLK_RATE_KHZ * 1000) + +#if defined(CONFIG_SUBARCH_C4) +static u64 read_longtime(struct clocksource *cs) +{ + return read_csr(CSR_SHTCLOCK); +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 notrace read_sched_clock(void) +{ + return read_csr(CSR_SHTCLOCK); +} + +void __init sw64_setup_clocksource(void) +{ + clocksource_register_khz(&clocksource_longtime, SHTCLK_RATE_KHZ); + sched_clock_register(read_sched_clock, BITS_PER_LONG, SHTCLK_RATE); +} + +void __init setup_sched_clock(void) { } +#elif defined(CONFIG_SUBARCH_C3B) +#ifdef CONFIG_SMP +static u64 read_longtime(struct clocksource *cs) +{ + unsigned long node; + + node = __this_cpu_read(hard_node_id); + return __io_read_longtime(node); +} + +static int longtime_enable(struct clocksource *cs) +{ + switch (cpu_desc.model) { + case CPU_SW3231: + sw64_io_write(0, GPIO_SWPORTA_DR, 0); + sw64_io_write(0, GPIO_SWPORTA_DDR, 0xff); + break; + case CPU_SW831: + __io_write_longtime_start_en(0, 0x1); + break; + default: + break; + } + + return 0; +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .enable = longtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 read_vtime(struct clocksource *cs) +{ + unsigned long vtime_addr; + + vtime_addr = IO_BASE | LONG_TIME; + return rdio64(vtime_addr); +} + +static int vtime_enable(struct clocksource *cs) +{ + return 0; +} + +static struct clocksource clocksource_vtime = { + .name = "vtime", + .rating = 100, + .enable = vtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_vtime, +}; +#else /* !SMP */ +static u64 read_tc(struct clocksource *cs) +{ + return rdtc(); +} + +static struct clocksource clocksource_tc = { + .name = "tc", + .rating = 300, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = read_tc, +}; +#endif /* SMP */ + +#define DEFAULT_MCLK 25 /* Mhz */ + +void __init sw64_setup_clocksource(void) +{ + unsigned int mclk = *((unsigned char *)__va(MB_MCLK)); + + if (!mclk) + mclk = DEFAULT_MCLK; + +#ifdef CONFIG_SMP + if (is_in_host()) + clocksource_register_khz(&clocksource_longtime, mclk * 1000); + else + clocksource_register_khz(&clocksource_vtime, DEFAULT_MCLK * 1000); +#else + clocksource_register_hz(&clocksource_tc, get_cpu_freq()); + pr_info("Setup clocksource TC, mult = %d\n", clocksource_tc.mult); +#endif +} + +DECLARE_PER_CPU(u64, tc_offset); +static u64 sc_start, sc_shift, sc_multi; +DEFINE_STATIC_KEY_FALSE(use_tc_as_sched_clock); + +static int __init sched_clock_setup(char *opt) +{ + if (!opt) + return -EINVAL; + + if (!strncmp(opt, "on", 2)) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("Using TC instead of jiffies as source of sched_clock()\n"); + } + + return 0; +} +early_param("tc_sched_clock", sched_clock_setup); + +static void __init calibrate_sched_clock(void) +{ + sc_start = rdtc(); +} + +void __init setup_sched_clock(void) +{ + unsigned long step; + + sc_shift = 7; + step = 1UL << sc_shift; + sc_multi = step * NSEC_PER_SEC / get_cpu_freq(); + calibrate_sched_clock(); + + pr_info("sched_clock: sc_multi=%llu, sc_shift=%llu\n", sc_multi, sc_shift); +} + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +static u64 notrace read_sched_clock(void) +{ + return (rdtc() - sc_start) >> sc_shift; +} + +void __init sw64_sched_clock_init(void) +{ + sched_clock_register(sched_clock_read, BITS_PER_LONG, get_cpu_freq() >> sc_shift); +} +#else /* !CONFIG_GENERIC_SCHED_CLOCK */ +/* + * scheduler clock - returns current time in nanoseconds. + */ +unsigned long long notrace sched_clock(void) +{ + if (static_branch_likely(&use_tc_as_sched_clock)) + return ((rdtc() - sc_start + __this_cpu_read(tc_offset)) >> sc_shift) * sc_multi; + else + return (jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t sched_clock_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[2]; + + if (static_key_enabled(&use_tc_as_sched_clock)) + buf[0] = 'Y'; + else + buf[0] = 'N'; + buf[1] = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sched_clock_status_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int r; + bool bv; + bool val = static_key_enabled(&use_tc_as_sched_clock); + + r = kstrtobool_from_user(user_buf, count, &bv); + if (!r) { + if (val != bv) { + if (bv) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from jiffies to TC\n"); + } else { + static_branch_disable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from TC to jiffies\n"); + } + } else { + if (val) + pr_info("source of sched_clock() unchanged (using TC)\n"); + else + pr_info("source of sched_clock() unchanged (using jiffies)\n"); + } + } + + return count; +} + +static const struct file_operations sched_clock_status_fops = { + .read = sched_clock_status_read, + .write = sched_clock_status_write, + .open = nonseekable_open, + .llseek = no_llseek, +}; + +static int __init sched_clock_debug_init(void) +{ + struct dentry *sched_clock_status; + + if (!sw64_debugfs_dir) + return -ENODEV; + + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, + &sched_clock_status_fops); + + if (!sched_clock_status) + return -ENOMEM; + + return 0; +} +late_initcall(sched_clock_debug_init); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_GENERIC_SCHED_CLOCK */ + +#endif + + + +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt); +static int timer_set_shutdown(struct clock_event_device *evt); +static int timer_set_oneshot(struct clock_event_device *evt); + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device timer_clockevent = { + .name = "timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = timer_set_shutdown, + .set_state_oneshot = timer_set_oneshot, + .set_next_event = timer_next_event, + .rating = 300, + .irq = -1, +}; + +static int vtimer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, delta, 0, 0); + return 0; +} + +static int vtimer_shutdown(struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + return 0; +} + +static int vtimer_set_oneshot(struct clock_event_device *evt) +{ + return 0; +} +static struct clock_event_device vtimer_clockevent = { + .name = "vtimer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = vtimer_shutdown, + .set_state_oneshot = vtimer_set_oneshot, + .set_next_event = vtimer_next_event, + .rating = 300, + .irq = -1, +}; + +static DEFINE_PER_CPU(struct clock_event_device, timer_events); + +/* + * Program the next event, relative to now + */ +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + wrtimer(delta); + return 0; +} + +static int timer_set_shutdown(struct clock_event_device *evt) +{ + wrtimer(0); + return 0; +} + +static int timer_set_oneshot(struct clock_event_device *evt) +{ + /* + * SW-TIMER support CLOCK_EVT_MODE_ONESHOT only, and automatically. + * unlike PIT and HPET, which support ONESHOT or PERIODIC by setting PIT_MOD or HPET_Tn_CFG + * so, nothing to do here ... + */ + return 0; +} + +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); + else { + clockevents_calc_mult_shift(swevt, freq, 4); + swevt->min_delta_ns = clockevent_delta2ns(swevt->min_delta_ticks, swevt); + swevt->max_delta_ns = clockevent_delta2ns(swevt->max_delta_ticks, swevt); + } +} + +/* + * Setup the local timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +void sw64_setup_timer(void) +{ + unsigned long min_delta; + int cpu = smp_processor_id(); + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + /* min_delta ticks => 100ns */ + min_delta = get_cpu_freq()/1000/1000/10; + + if (is_in_guest()) { + memcpy(swevt, &vtimer_clockevent, sizeof(*swevt)); + /* + * CUIWEI: This value is very important. + * If it's too small, the timer will timeout when the IER + * haven't been opened. + */ + min_delta *= 4; + } else { + memcpy(swevt, &timer_clockevent, sizeof(*swevt)); + } + swevt->cpumask = cpumask_of(cpu); + swevt->set_state_shutdown(swevt); + clockevents_config_and_register(swevt, get_cpu_freq(), min_delta, ULONG_MAX); +} + +void sw64_timer_interrupt(void) +{ + struct clock_event_device *evt = this_cpu_ptr(&timer_events); + + irq_enter(); + if (!evt->event_handler) { + pr_warn("Spurious local timer interrupt on cpu %d\n", + smp_processor_id()); + timer_set_shutdown(evt); + return; + } + + inc_irq_stat(timer_irqs_event); + + evt->event_handler(evt); + + irq_exit(); +} diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f429b9b37b76c7e45e0c0cd9eb38980666ff597d..d1fdea27eb0d734b41e5ce6c5832ae3380af735a 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -273,6 +273,17 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_ACPI_CPUFREQ + bool "Loongson3 ACPI cpufreq driver" + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + This driver supports Loongson 3A5000 compatible CPUs. + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" @@ -303,6 +314,29 @@ config SH_CPU_FREQ If unsure, say N. endif +if SW64 +config SW64_CPUFREQ + bool "SW64 CPU Frequency interface" + depends on UNCORE_XUELANG + default y + help + This adds the CPUFreq driver for SW64 processor which supports + software configurable cpu frequency. + + For details, take a look at . + + If unsure, say N. + +config SW64_CPUFREQ_DEBUGFS + bool "SW64 CPU Frequency debugfs interface" + depends on SW64_CPUFREQ && DEBUG_FS + default y + help + Turns on the DebugFS interface for CPU Frequency. + + If you don't know what to do here, say N. +endif + config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ef8510774913113b19f3ee9ee738bc228154d188..f9c1c9012ce7b20ca229044e1eb7b902ef20f5b7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -104,6 +104,9 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 4ac3a35dcd983ccefc89277d69b2ca8ef169924d..d34a8ca6187d1b82589d16a85d95451c69eb8128 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -628,28 +628,35 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static u64 get_max_boost_ratio(unsigned int cpu) +static bool cppc_highest_perf_diff; +static struct cpumask core_prior_mask; + +static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf) { struct cppc_perf_caps perf_caps; - u64 highest_perf, nominal_perf; int ret; - if (acpi_pstate_strict) - return 0; - ret = cppc_get_perf_caps(cpu, &perf_caps); if (ret) { - pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", - cpu, ret); - return 0; + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); + return; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - highest_perf = amd_get_highest_perf(); + *highest_perf = amd_get_highest_perf(); else - highest_perf = perf_caps.highest_perf; + *highest_perf = perf_caps.highest_perf; - nominal_perf = perf_caps.nominal_perf; + *nominal_perf = perf_caps.nominal_perf; +} + +static u64 get_max_boost_ratio(unsigned int cpu) +{ + u64 highest_perf, nominal_perf; + + if (acpi_pstate_strict) + return 0; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); @@ -663,8 +670,51 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + +/* The work item is needed to avoid CPU hotplug locking issues */ +static void cpufreq_sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn); + +static void cpufreq_set_itmt_prio(int cpu) +{ + u64 highest_perf, nominal_perf; + static u64 max_highest_perf = 0, min_highest_perf = U64_MAX; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); + + sched_set_itmt_core_prio(highest_perf, cpu); + cpumask_set_cpu(cpu, &core_prior_mask); + + if (max_highest_perf <= min_highest_perf) { + if (highest_perf > max_highest_perf) + max_highest_perf = highest_perf; + + if (highest_perf < min_highest_perf) + min_highest_perf = highest_perf; + + if (max_highest_perf > min_highest_perf) { + /* + * This code can be run during CPU online under the + * CPU hotplug locks, so sched_set_itmt_support() + * cannot be called from here. Queue up a work item + * to invoke it. + */ + cppc_highest_perf_diff = true; + } + } + + if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) { + pr_debug("queue a work to set itmt enabled\n"); + schedule_work(&sched_itmt_work); + } +} #else static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static inline void cpufreq_set_itmt_prio(int cpu) { } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -677,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int valid_states = 0; unsigned int result = 0; u64 max_boost_ratio; - unsigned int i; + unsigned int i, j; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -741,6 +791,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_info_once("overriding BIOS provided _PSD data\n"); } #endif + if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) { + for_each_cpu(j, policy->cpus) { + cpufreq_set_itmt_prio(j); + } + } /* capability check */ if (perf->state_count <= 1) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8a4fdf212ce0dea5898d91244fbd8f0510852d1a..08689264b54a982fce91b70e7777b1f5cbbf0fe1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -172,7 +173,6 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. - * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo @@ -181,7 +181,6 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; - bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; @@ -201,8 +200,6 @@ struct global_params { * @prev_aperf: Last APERF value read from APERF MSR * @prev_mperf: Last MPERF value read from MPERF MSR * @prev_tsc: Last timestamp counter (TSC) value - * @prev_cummulative_iowait: IO Wait time difference from last and - * current sample * @sample: Storage for storing last Sample data * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios @@ -214,7 +211,7 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_cached Cached HWP energy-performance preference value + * @epp_cached: Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set @@ -241,7 +238,6 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; - u64 prev_cummulative_iowait; struct sample sample; int32_t min_perf_ratio; int32_t max_perf_ratio; @@ -294,11 +290,11 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; -static int hwp_active __read_mostly; -static int hwp_mode_bdw __read_mostly; -static bool per_cpu_limits __read_mostly; +static bool hwp_active __ro_after_init; +static int hwp_mode_bdw __ro_after_init; +static bool per_cpu_limits __ro_after_init; +static bool hwp_forced __ro_after_init; static bool hwp_boost __read_mostly; -static bool hwp_forced __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -592,16 +588,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); } -static inline void update_turbo_state(void) +static bool turbo_is_disabled(void) { u64 misc_en; - struct cpudata *cpu; - cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = - (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); + + return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } static int min_perf_pct_min(void) @@ -1156,12 +1149,16 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); + + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); } -static void intel_pstate_update_max_freq(unsigned int cpu) +static void intel_pstate_update_limits(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); @@ -1173,25 +1170,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu) cpufreq_cpu_release(policy); } -static void intel_pstate_update_limits(unsigned int cpu) +static void intel_pstate_update_limits_for_all(void) { - mutex_lock(&intel_pstate_driver_lock); - - update_turbo_state(); - /* - * If turbo has been turned on or off globally, policy limits for - * all CPUs need to be updated to reflect that. - */ - if (global.turbo_disabled_mf != global.turbo_disabled) { - global.turbo_disabled_mf = global.turbo_disabled; - arch_set_max_freq_ratio(global.turbo_disabled); - for_each_possible_cpu(cpu) - intel_pstate_update_max_freq(cpu); - } else { - cpufreq_update_policy(cpu); - } + int cpu; - mutex_unlock(&intel_pstate_driver_lock); + for_each_possible_cpu(cpu) + intel_pstate_update_limits(cpu); } /************************** sysfs begin ************************/ @@ -1289,11 +1273,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return -EAGAIN; } - update_turbo_state(); - if (global.turbo_disabled) - ret = sprintf(buf, "%u\n", global.turbo_disabled); - else - ret = sprintf(buf, "%u\n", global.no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1304,32 +1284,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; - int ret; + bool no_turbo; - ret = sscanf(buf, "%u", &input); - if (ret != 1) + if (sscanf(buf, "%u", &input) != 1) return -EINVAL; mutex_lock(&intel_pstate_driver_lock); if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); - return -EAGAIN; + count = -EAGAIN; + goto unlock_driver; } - mutex_lock(&intel_pstate_limits_lock); + no_turbo = !!clamp_t(int, input, 0, 1); - update_turbo_state(); - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); - mutex_unlock(&intel_pstate_limits_lock); - mutex_unlock(&intel_pstate_driver_lock); - return -EPERM; + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); + count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; } - global.no_turbo = clamp_t(int, input, 0, 1); + if (no_turbo == global.no_turbo) { + goto unlock_driver; + } - if (global.no_turbo) { + WRITE_ONCE(global.no_turbo, no_turbo); + + mutex_lock(&intel_pstate_limits_lock); + + if (no_turbo) { struct cpudata *cpu = all_cpu_data[0]; int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; @@ -1340,9 +1327,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); - arch_set_max_freq_ratio(global.no_turbo); + intel_pstate_update_limits_for_all(); + arch_set_max_freq_ratio(no_turbo); +unlock_driver: mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1623,7 +1611,6 @@ static void intel_pstate_notify_work(struct work_struct *work) struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); if (policy) { - intel_pstate_get_hwp_cap(cpudata); __intel_pstate_update_max_freq(cpudata, policy); cpufreq_cpu_release(policy); @@ -1635,18 +1622,24 @@ static void intel_pstate_notify_work(struct work_struct *work) static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; +#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3) + void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); - struct cpudata *cpudata; + u64 value, status_mask; unsigned long flags; - u64 value; - if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; + status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS; + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; + rdmsrl_safe(MSR_HWP_STATUS, &value); - if (!(value & 0x01)) + if (!(value & status_mask)) return; raw_spin_lock_irqsave(&hwp_notify_lock, flags); @@ -1654,24 +1647,8 @@ void notify_hwp_interrupt(void) if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; - /* - * Currently we never free all_cpu_data. And we can't reach here - * without this allocated. But for safety for future changes, added - * check. - */ - if (unlikely(!READ_ONCE(all_cpu_data))) - goto ack_intr; - - /* - * The free is done during cleanup, when cpufreq registry is failed. - * We wouldn't be here if it fails on init or switch status. But for - * future changes, added check. - */ - cpudata = READ_ONCE(all_cpu_data[this_cpu]); - if (unlikely(!cpudata)) - goto ack_intr; - - schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, + msecs_to_jiffies(10)); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); @@ -1684,33 +1661,41 @@ void notify_hwp_interrupt(void) static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { - unsigned long flags; + bool cancel_work; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - raw_spin_lock_irqsave(&hwp_notify_lock, flags); - if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) - cancel_delayed_work(&cpudata->hwp_notify_work); - raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_lock_irq(&hwp_notify_lock); + cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); + raw_spin_unlock_irq(&hwp_notify_lock); + + if (cancel_work) + cancel_delayed_work_sync(&cpudata->hwp_notify_work); } +#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2) + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { - /* Enable HWP notification interrupt for guaranteed performance change */ + /* Enable HWP notification interrupt for performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { - unsigned long flags; + u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - raw_spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irq(&hwp_notify_lock); + + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -1719,13 +1704,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) { cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - /* - * If this CPU gen doesn't call for change in balance_perf - * EPP return. - */ - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) - return; - /* * If the EPP is set by firmware, which means that firmware enabled HWP * - Is equal or less than 0x80 (default balance_perf EPP) @@ -1738,6 +1716,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) return; } + /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + /* * Use hard coded value per gen to update the balance_perf * and default EPP. @@ -1793,7 +1778,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1958,7 +1943,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; @@ -2031,14 +2016,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static void intel_pstate_max_within_limits(struct cpudata *cpu) -{ - int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); - - update_turbo_state(); - intel_pstate_set_pstate(cpu, pstate); -} - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); @@ -2264,7 +2241,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = global.no_turbo || global.turbo_disabled ? + target = READ_ONCE(global.no_turbo) ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -2308,8 +2285,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) struct sample *sample; int target_pstate; - update_turbo_state(); - target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); @@ -2434,10 +2409,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(TIGERLAKE, core_funcs), X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +#ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), @@ -2446,6 +2423,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), {} }; +#endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { X86_MATCH(KABYLAKE, core_funcs), @@ -2527,7 +2505,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) static int intel_pstate_get_max_freq(struct cpudata *cpu) { - return global.turbo_disabled || global.no_turbo ? + return READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } @@ -2612,12 +2590,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); + /* * NOHZ_FULL CPUs need this as the governor callback may not * be invoked on them. */ intel_pstate_clear_update_util_hook(policy->cpu); - intel_pstate_max_within_limits(cpu); + intel_pstate_set_pstate(cpu, pstate); } else { intel_pstate_set_update_util_hook(policy->cpu); } @@ -2660,10 +2640,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, { int max_freq; - update_turbo_state(); if (hwp_active) { intel_pstate_get_hwp_cap(cpu); - max_freq = global.no_turbo || global.turbo_disabled ? + max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } else { max_freq = intel_pstate_get_max_freq(cpu); @@ -2757,9 +2736,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_freq; - update_turbo_state(); - global.turbo_disabled_mf = global.turbo_disabled; - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; policy->min = policy->cpuinfo.min_freq; @@ -2924,8 +2901,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; - update_turbo_state(); - freqs.old = policy->cur; freqs.new = target_freq; @@ -2947,8 +2922,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - update_turbo_state(); - target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); @@ -2966,9 +2939,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, int old_pstate = cpu->pstate.current_pstate; int cap_pstate, min_pstate, max_pstate, target_pstate; - update_turbo_state(); - cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : - HWP_HIGHEST_PERF(hwp_cap); + cap_pstate = READ_ONCE(global.no_turbo) ? + HWP_GUARANTEED_PERF(hwp_cap) : + HWP_HIGHEST_PERF(hwp_cap); /* Optimization: Avoid unnecessary divisions. */ @@ -3136,10 +3109,8 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); - raw_spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); WRITE_ONCE(all_cpu_data[cpu], NULL); - raw_spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); @@ -3156,6 +3127,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; + global.turbo_disabled = turbo_is_disabled(); + global.no_turbo = global.turbo_disabled; + + arch_set_max_freq_ratio(global.turbo_disabled); intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); @@ -3406,14 +3381,30 @@ static bool intel_pstate_hwp_is_enabled(void) return !!(value & 0x1); } -static const struct x86_cpu_id intel_epp_balance_perf[] = { +#define POWERSAVE_MASK GENMASK(7, 0) +#define BALANCE_POWER_MASK GENMASK(15, 8) +#define BALANCE_PERFORMANCE_MASK GENMASK(23, 16) +#define PERFORMANCE_MASK GENMASK(31, 24) + +#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \ + (FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\ + FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\ + FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\ + FIELD_PREP_CONST(PERFORMANCE_MASK, performance)) + +#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \ + (HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\ + balance_perf, HWP_EPP_PERFORMANCE)) + +static const struct x86_cpu_id intel_epp_default[] = { /* * Set EPP value as 102, this is the max suggested EPP * which can result in one core turbo frequency for * AlderLake Mobile CPUs. */ - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), {} }; @@ -3445,7 +3436,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - WRITE_ONCE(hwp_active, 1); + hwp_active = true; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; @@ -3506,10 +3497,23 @@ static int __init intel_pstate_init(void) intel_pstate_sysfs_expose_params(); if (hwp_active) { - const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf); + const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default); - if (id) - epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; + if (id) { + epp_values[EPP_INDEX_POWERSAVE] = + FIELD_GET(POWERSAVE_MASK, id->driver_data); + epp_values[EPP_INDEX_BALANCE_POWERSAVE] = + FIELD_GET(BALANCE_POWER_MASK, id->driver_data); + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = + FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data); + epp_values[EPP_INDEX_PERFORMANCE] = + FIELD_GET(PERFORMANCE_MASK, id->driver_data); + pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n", + epp_values[EPP_INDEX_POWERSAVE], + epp_values[EPP_INDEX_BALANCE_POWERSAVE], + epp_values[EPP_INDEX_BALANCE_PERFORMANCE], + epp_values[EPP_INDEX_PERFORMANCE]); + } } mutex_lock(&intel_pstate_driver_lock); diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..67e48763e3f1d899b4a28279d802c22a0e1e6e4a --- /dev/null +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -0,0 +1,1529 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * loongson3-acpi-cpufreq.c - Loongson ACPI Processor P-States Driver + * + * Copyright (C) 2020 lvjianmin + * Yijun + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "cpufreq_governor.h" + +#include +#define CPU_ID_FIELD 0xf + +#define COMPLETE_STATUS 0x80000000 +#define VOLTAGE_COMMAND 0x21 + +#define DVFS_INFO 0x22 +#define DVFS_INFO_BOOST_LEVEL 0x23 +#define DVFS_INFO_MIN_FREQ 0xf +#define DVFS_INFO_MAX_FREQ 0xf0 +#define DVFS_INFO_BOOST_CORE_FREQ 0xff00 +#define DVFS_INFO_NORMAL_CORE_UPPER_LIMIT 0xf0000 +#define DVFS_INFO_BOOST_CORES 0xf00000 + +#define BOOST_MODE 0x80000 +#define NORMAL_MODE 0x40000 + +MODULE_DESCRIPTION("Loongson 3A5000 ACPI Processor P-States Driver"); + +MODULE_LICENSE("GPL"); + +#define CPUFREQ_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) +#define LOONGSON_CONTROL_MASK (0xFF) +#define FACTOR (0xeac0c6e8) +#define BOOST_THRESHOLD (900) +#define MAX_CORES_PER_PACKAGE 64 +#define CPU_ID_FIELD 0xf +#define VOLTAGE_COMMAND 0x21 +#define MAX_READY_TIMEOUT 300000000 +#define RESERVED_FREQ 3 + +#define LOONGSON_BOOST_FREQ_MASK (0x7 << 8) +#define FREQ_STEP (25) + +static struct mutex boost_mutex[MAX_PACKAGES]; +static bool cpufreq_has_boost_freq; +static int max_boost_cores; +static int boost_gears; +static int boost_freqs[NR_CPUS + 1]; +struct package_data; +struct core_data; +static struct acpi_processor_performance __percpu *acpi_perf_data; +static struct cpufreq_driver loongson3_cpufreq_driver; +static struct freq_attr *loongson3_cpufreq_attr[]; +DECLARE_PER_CPU(struct clock_event_device, stable_clockevent_device); +static inline struct core_data *get_core_data(int cpu); + +static int min_freq_level; +static int max_freq_level; +static int max_upper_index; +static int max_boost_freq; + +/* threshold of core's get into msa */ +static int msa_count_threshold = 200; +/* threshold of core's get into lasx */ +static int lasx_count_threshold = 200; +/* other cores' upper load threshold when 1 core get into boost mode and enable msa/lasx */ +static int load_threshold = 60; + +DEFINE_PER_CPU(unsigned long, msa_count); +EXPORT_PER_CPU_SYMBOL(msa_count); + +#if defined(CONFIG_CPU_HAS_LASX) +DEFINE_PER_CPU(unsigned long, lasx_count); +EXPORT_PER_CPU_SYMBOL(lasx_count); +#endif + +struct ce_update_data { + struct clock_event_device *cd; + unsigned int new_freq; +}; + +static struct kthread_worker cpufreq_worker; +static struct task_struct *cpufreq_thread; +/** + * struct core_data - Store core related information + * @in_boost: the core is boosting to boost_freq + * @cpu: logical cpu of the core + * @update_util The update_util_data pointer of @cpu, is passed to the callback + * function, which will be called by cpufreq_update_util() + * @package The package_data structure the core belonged to + * @work_in_progress @work is busy + * @irq_work to enqueue callback handling on irq workqueue + * @work to enqueue work from irq workqueue on system workqueue + * @perf store frequency table related information from ACPI table + * @max_freq max normal freq of cpu + * @boost_freq max boost freq of cpu + * @clock_scale clock scale to calculate cpu_data[cpu].udelay_val in boost mode + * @package_id package id of core + * @shift clock shift to calculate cpu_data[cpu].udelay_val in boost mode + * @update_util_set if callback has been set for cpufreq_update_util() + * @load current load of the core + * @last_freq_update_time last freq update time + * @freq_update_delay_ns min interval of freq update, which is + * transition_latency configured in ACPI table + * + * following elements are used to calculate load of the core + * @prev_update_time + * @prev_cpu_idle + * @prev_load + * @sampling_rate + * + */ +struct core_data { + bool in_boost; + int cpu; + struct update_util_data update_util; + struct package_data *package; + bool work_in_progress; + struct irq_work irq_work; + struct kthread_work work; + struct acpi_processor_performance *perf; + unsigned int normal_max_freq; + unsigned int *boost_freq; + unsigned int *clock_scale; + unsigned int package_id; + unsigned int *shift; + bool update_util_set; + unsigned long long load; + + u64 last_freq_update_time; + s64 freq_update_delay_ns; + u64 prev_update_time; + u64 prev_cpu_idle; + u32 prev_load; + u32 sampling_rate; +}; + +struct package_data { + int boost_cores; + int max_boost_cores; + int nr_cores; + char in_boost; + int nr_full_load_cores; + struct core_data core[MAX_CORES_PER_PACKAGE]; +} all_package_data[MAX_PACKAGES]; + +static bool boost_supported(void) +{ + return loongson3_cpufreq_driver.set_boost; +} + +/* + * Check if target_freq is a boost freq + * + * target_freq must be a freq in freq table when + * calling the function. + */ +static int boost_level(struct acpi_processor_performance *perf, unsigned int target_freq) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + } + return 0; +} + +#ifdef CONFIG_SMP +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + struct ce_update_data __maybe_unused ce_data; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->policy->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, freqs->new); + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + cpufreq_scale(loops_per_jiffy, boost_freqs[cur_boost_level] * 1000, + freqs->new)) / core->shift[cur_boost_level]); + } else { + lpj_fine = + cpufreq_scale(loops_per_jiffy, core->normal_max_freq * 1000, freqs->new); + } + } + + return 0; +} +#else +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, target_freq); + + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + loops_per_jiffy) / core->shift[cur_boost_level]); + } else { + lpj_fine = loops_per_jiffy; + } + } + + return 0; +} +#endif +static struct notifier_block loongson3_cpufreq_notifier_block = { + .notifier_call = loongson3_cpu_freq_notifier +}; + +static int cpufreq_perf_find_level(struct acpi_processor_performance *perf, + unsigned int target_freq, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control & LOONGSON_CONTROL_MASK; + } + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control; + } + } + return 0; +} + +static int cpufreq_perf_find_freq(struct acpi_processor_performance *perf, + unsigned int target_index, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) + if (target_index == (perf->states[i].control & LOONGSON_CONTROL_MASK)) + return perf->states[i].core_frequency; + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_index == perf->states[i].control) + return perf->states[i].core_frequency; + } + } + + return 0; +} + + +static inline struct core_data *get_core_data(int cpu) +{ + int package_id = cpu_data[cpu].package; + struct package_data *package = &all_package_data[package_id]; + int core_id = cpu_logical_map(cpu) % package->nr_cores; + + return &package->core[core_id]; +} + +static bool package_boost(struct package_data *package) +{ + int i; + int cur_full_load = 0; + +#if defined(CONFIG_CPU_HAS_LASX) + int lasx_enable_count = 0; + unsigned long lasx_num; + bool clear_lasx = false; +#endif + + int msa_enable_count = 0; + unsigned long msa_num; + bool clear_msa = false; + + for (i = 0; i < package->nr_cores; i++) { + +#if defined(CONFIG_CPU_HAS_LASX) + lasx_num = per_cpu(lasx_count, package->core[i].cpu); + + if (lasx_num) + lasx_enable_count++; + + if (lasx_num >= lasx_count_threshold) + clear_lasx = true; + + pr_debug("%s: lasx enabled, i %d, cpu %d, lasx_num %lu\n", + __func__, i, package->core[i].cpu, lasx_num); +#endif + msa_num = per_cpu(msa_count, package->core[i].cpu); + + if (msa_num) + msa_enable_count++; + + if (msa_num >= msa_count_threshold) + clear_msa = true; + + pr_debug("%s: msa enabled, i %d, cpu %d, msa_num %lu\n", + __func__, i, package->core[i].cpu, msa_num); + + if (package->core[i].prev_load >= load_threshold) + cur_full_load++; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (clear_lasx) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(lasx_count, package->core[i].cpu) = 0; + } +#endif + + if (clear_msa) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(msa_count, package->core[i].cpu) = 0; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (lasx_enable_count > 1 + || (lasx_enable_count && package->nr_full_load_cores > 1) + || (lasx_enable_count && cur_full_load > 1)) { + return false; + } +#endif + + if (msa_enable_count > 1 + || (msa_enable_count && package->nr_full_load_cores > 1) + || (msa_enable_count && cur_full_load > 1)) { + return false; + } + + if (package->nr_full_load_cores && + package->nr_full_load_cores <= package->max_boost_cores) + return true; + + return false; +} + +/* + * check if the cpu can be boosted. + * + * call the function after load of cpu updated. + */ +static bool cpu_can_boost(int cpu) +{ + struct core_data *core = get_core_data(cpu); + struct package_data *package = core->package; + + if (package->boost_cores >= package->max_boost_cores) + return false; + if (core->load > BOOST_THRESHOLD) + return true; + + return false; +} + +static void do_set_freq_level(int cpu, int freq_level) +{ + uint32_t message; + uint32_t val; + + message = (0 << 31) | (VOLTAGE_COMMAND << 24) + | ((uint32_t)freq_level << 4) + | (cpu & CPU_ID_FIELD); + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); +} + +static int wait_for_ready_timeout(int64_t timeout) +{ + int ret; + struct timespec64 prev_ts; + struct timespec64 curr_ts; + ktime_t delay = ktime_set(0, 100); + + ktime_get_ts64(&prev_ts); + ktime_get_ts64(&curr_ts); + + ret = -EPERM; + while (((curr_ts.tv_sec - prev_ts.tv_sec) * 1000000000 + (curr_ts.tv_nsec - prev_ts.tv_nsec)) < timeout) { + ktime_get_ts64(&curr_ts); + + if (iocsr_read32(0x51c) & COMPLETE_STATUS) { + ret = 0; + break; + } + + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + } + return ret; +} + +/* Find closest freq to target in a table in ascending order */ +static int cpufreq_table_find_freq_ac(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + if (freq == target_freq) + return freq; + + if (freq < target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found below target_freq, return freq above target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return freq; + + return best_freq; + } + + return best_freq; +} + +/* Find closest freq to target in a table in descending order */ +static int cpufreq_table_find_freq_dc(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + + if (freq == target_freq) + return freq; + + if (freq > target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found above target_freq, return freq below target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return freq; + return best_freq; + } + + return best_freq; +} + +/* Works only on sorted freq-tables */ +static int cpufreq_table_find_freq(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_freq_ac(policy, target_freq, boost_level); + else + return cpufreq_table_find_freq_dc(policy, target_freq, boost_level); +} + +static void transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, bool failed) +{ + if (unlikely(!policy->transition_ongoing)) + return; + cpufreq_freq_transition_end(policy, freqs, failed); +} +static void transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + if (unlikely(policy->transition_ongoing)) + cpufreq_freq_transition_end(policy, freqs, true); + + cpufreq_freq_transition_begin(policy, freqs); +} + +static void update_core_boost_info(struct core_data *core, bool boost_set) +{ + core->in_boost = boost_set; + if (boost_set) + core->package->boost_cores++; + else + core->package->boost_cores--; +} + +static unsigned int cores_freq_trans_notify(struct package_data *package, + bool before_trans, + bool trans_failed, + int find_level, + int find_freq, + unsigned int skip_cpumask) +{ + int i; + struct cpufreq_policy *policy; + struct cpufreq_freqs freqs; + unsigned int cores_level = 0; + unsigned int core_level; + + for (i = 0; i < package->nr_cores; i++) { + struct core_data *core = &package->core[i]; + + policy = cpufreq_cpu_get_raw(core->cpu); + if (((1 << i) & skip_cpumask) || !policy) + continue; + freqs.old = policy->cur; + freqs.flags = 0; + + /* find level from normal levels */ + core_level = cpufreq_perf_find_level(core->perf, policy->cur, find_level); + if (!core_level) { + pr_debug("cpu%d policy->cur=%d find_level=%d freq=%d skip_cpumask=%x \n", + policy->cpu, policy->cur, + find_level, find_freq, skip_cpumask); + } + freqs.new = cpufreq_perf_find_freq(core->perf, core_level, find_freq) * 1000; + if (!freqs.new) + pr_debug("%s: find freq error\n", __func__); + + pr_debug("%s: cpu %d, old freq %d, new freq %d, find_level %d, find_freq %d\n", + __func__, policy->cpu, freqs.old, freqs.new, find_level, find_freq); + cores_level |= (core_level << (i << 2)); + + if (before_trans) + transition_begin(policy, &freqs); + else + transition_end(policy, &freqs, trans_failed); + } + return cores_level; +} +static int loongson3_set_freq(struct core_data *core, unsigned long freq, int boost_level) +{ + int ret = 0; + int freq_level; + int phy_cpu; + int target_freq; + struct cpufreq_freqs freqs; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(core->cpu); + + if (!policy) + return -EINVAL; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + phy_cpu = cpu_logical_map(core->cpu); + target_freq = cpufreq_table_find_freq(policy, freq, boost_level); + if (!target_freq) + return -1; + if (target_freq == policy->cur) + return -1; + + freqs.flags = 0; + freqs.old = policy->cur; + freqs.new = target_freq; + freq_level = cpufreq_perf_find_level(core->perf, target_freq, boost_level); + if (!freq_level) { + pr_debug("%s: cpu%d freq=%lu targetfreq=%d boost_level=%d find level error\n", + __func__, core->cpu, freq, target_freq, boost_level); + } + + transition_begin(policy, &freqs); + do_set_freq_level(phy_cpu, freq_level); + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + transition_end(policy, &freqs, !!ret); + + return ret; +} + +int loongson3_set_mode(int mode, int freq_level) +{ + uint32_t val; + int ret = 0; + uint32_t message; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + message = mode | (VOLTAGE_COMMAND << 24) | freq_level; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + val |= 1 << 10; + iocsr_write32(val, 0x420); + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +enum freq_adjust_action { + FAA_NORMAL, + FAA_N2B, + FAA_B2N, + FAA_BOOST, +}; + +static int faa_normal(struct cpufreq_policy *policy, int load) +{ + int ret; + unsigned int freq_next, min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + + if (!core) + return -1; + + min_f = policy->min; + max_f = policy->max; + freq_next = min_f + load * (max_f - min_f) / 100; + ret = loongson3_set_freq(core, freq_next, 0); + return ret; +} + +static void handle_boost_cores(struct core_data *core, struct package_data *package, + unsigned long target_freq, bool skip_update_and_notify, bool update_core, bool inc_boost) +{ + int boost_level; + int find_level; + int find_freq; + int ret; + int inc_core = inc_boost ? 1 : -1; + + if (boost_gears == 1) { + find_level = 0; + boost_level = boost_gears; + } else { + find_level = package->boost_cores; + if (update_core) + boost_level = package->boost_cores + inc_core; + else + boost_level = package->boost_cores; + } + find_freq = boost_level; + ret = loongson3_set_freq(core, target_freq, boost_level); + if (ret) + return; + + if (skip_update_and_notify) { + if (update_core) + update_core_boost_info(core, inc_boost); + return; + } + + if (boost_gears != 1) { + cores_freq_trans_notify(package, true, false, + find_level, find_freq, 1 << core->cpu); + cores_freq_trans_notify(package, false, false, + find_level, find_freq, 1 << core->cpu); + } + if (update_core) + update_core_boost_info(core, inc_boost); +} + +static void faa_boost(struct cpufreq_policy *policy, int load) +{ + unsigned int min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + struct package_data *package = core->package; + unsigned long target_freq; + + /* boost cores form n to n + 1 */ + if (core->load > BOOST_THRESHOLD) { + if (package->boost_cores < package->max_boost_cores + && !core->in_boost) { + if (boost_gears == 1) { + target_freq = policy->max; + } else { + target_freq = cpufreq_table_find_freq(policy, policy->max, + package->boost_cores + 1); + if (!target_freq) { + pr_debug("%s: find freq error ,boost_level %d, cur freq %d\n", + __func__, package->boost_cores, policy->max); + } + } + handle_boost_cores(core, package, target_freq, false, true, true); + } + } else { + /* 1. core not in boost, level up but not change pll + * 2. core in boost, boost cores from n to n - 1 + */ + min_f = policy->min; + max_f = policy->max; + target_freq = min_f + load * (max_f - min_f) / 100; + handle_boost_cores(core, package, target_freq, !core->in_boost, core->in_boost, false); + } + + +} + +static void get_boost_cores(struct package_data *package, int *boost_cores, int *boost_count) +{ + struct core_data *core; + struct cpufreq_policy *policy; + int i; + + /* count boost cores */ + for (i = 0; i < package->nr_cores; i++) { + core = &package->core[i]; + policy = cpufreq_cpu_get_raw(core->cpu); + if (!policy) + continue; + + if (cpu_can_boost(core->cpu)) { + if (boost_cores) + *boost_cores |= (1 << i); + + (*boost_count)++; + } + } +} + +static void faa_n2b(struct package_data *package, struct core_data *core) +{ + int boost_cores = 0; + int boost_count = 0; + int freq_level; + int i; + + get_boost_cores(package, &boost_cores, &boost_count); + + if (boost_gears == 1) + boost_count = 1; + + freq_level = cores_freq_trans_notify(package, true, false, + 0, boost_count, 0); + if (!loongson3_set_mode(BOOST_MODE, freq_level)) { + cores_freq_trans_notify(package, false, false, + 0, boost_count, 0); + package->in_boost = true; + for (i = 0; i < package->nr_cores; i++) { + if (boost_cores & (1 << i)) + update_core_boost_info(&package->core[i], true); + } + } else + cores_freq_trans_notify(package, false, true, + 0, boost_count, 0); +} + +static void faa_b2n(struct package_data *package) +{ + int i; + int boost_count = package->boost_cores; + + if (boost_gears == 1) + boost_count = 1; + + cores_freq_trans_notify(package, true, false, + boost_count, 0, 0); + if (!loongson3_set_mode(NORMAL_MODE, 0)) { + cores_freq_trans_notify(package, false, false, + boost_count, 0, 0); + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].in_boost) + update_core_boost_info(&package->core[i], false); + } + package->in_boost = false; + } else + cores_freq_trans_notify(package, false, true, + boost_count, 0, 0); +} + + +unsigned int load_update(struct core_data *core) +{ + int i; + u64 update_time, cur_idle_time; + unsigned int idle_time, time_elapsed; + unsigned int load = 0; + struct package_data *package = core->package; + + cur_idle_time = get_cpu_idle_time(core->cpu, &update_time, true); + + time_elapsed = update_time - core->prev_update_time; + core->prev_update_time = update_time; + + idle_time = cur_idle_time - core->prev_cpu_idle; + core->prev_cpu_idle = cur_idle_time; + + if (unlikely(!time_elapsed)) { + /* + * That can only happen when this function is called + * twice in a row with a very short interval between the + * calls, so the previous load value can be used then. + */ + load = core->prev_load; + } else if (unlikely((int)idle_time > 2 * core->sampling_rate && + core->prev_load)) { + + load = core->prev_load; + core->prev_load = 0; + } else { + if (time_elapsed >= idle_time) + load = 100 * (time_elapsed - idle_time) / time_elapsed; + else + load = (int)idle_time < 0 ? 100 : 0; + core->prev_load = load; + } + + package->nr_full_load_cores = 0; + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].load > BOOST_THRESHOLD) + package->nr_full_load_cores++; + } + + return load; +} + +static bool cpufreq_should_update_freq(struct core_data *core, u64 time) +{ + s64 delta_ns; + + delta_ns = time - core->last_freq_update_time; + return delta_ns >= core->freq_update_delay_ns; +} + +static void cpufreq_update(struct cpufreq_policy *policy) +{ + int action; + struct core_data *core; + struct package_data *package; + unsigned long int load; + bool should_be_boost = 0; + + core = get_core_data(policy->cpu); + package = core->package; + + mutex_lock(&boost_mutex[core->package_id]); + + if (!core->update_util_set) { + mutex_unlock(&boost_mutex[core->package_id]); + return; + } + + load = load_update(core); + core->load = (u64)load + ((core->load * FACTOR) >> 32); + + if (cpufreq_boost_enabled()) { + should_be_boost = package_boost(package); + } else { + if (package->in_boost) + should_be_boost = false; + } + + action = (package->in_boost << 1) | should_be_boost; + switch (action) { + case FAA_NORMAL: + faa_normal(policy, load); + break; + case FAA_B2N: + faa_b2n(package); + break; + case FAA_N2B: + faa_n2b(package, core); + break; + case FAA_BOOST: + faa_boost(policy, load); + break; + } + mutex_unlock(&boost_mutex[core->package_id]); +} + +static void set_max_within_limits(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + /* + * policy->max <= cpu->pstate.max_freq indecates that + * the boost is disabled, so max freq is in normal range + * + * Skip performance policy with boost enabled!!! + * + */ + if (policy->max <= (core->normal_max_freq * 1000)) { + mutex_lock(&boost_mutex[core->package_id]); + if (!loongson3_set_freq(core, policy->max, 0)) + pr_debug("Set cpu %d to performance mode under normal range.\n", + policy->cpu); + mutex_unlock(&boost_mutex[core->package_id]); + } +} + +static void clear_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (!core->update_util_set) + return; + + cpufreq_remove_update_util_hook(cpu); + core->update_util_set = false; + synchronize_rcu(); +} + +static void update_util_handler(struct update_util_data *data, u64 time, + unsigned int flags) +{ + struct core_data *core = container_of(data, struct core_data, update_util); + + if (!cpufreq_should_update_freq(core, time)) + return; + if (!core->work_in_progress) { + core->last_freq_update_time = time; + core->work_in_progress = true; + irq_work_queue(&core->irq_work); + } +} +static void set_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (core->update_util_set) + return; + + cpufreq_add_update_util_hook(cpu, &core->update_util, + update_util_handler); + core->update_util_set = true; +} +static int loongson3_cpufreq_set_policy(struct cpufreq_policy *policy) +{ + if (!policy->cpuinfo.max_freq) + return -ENODEV; + + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + clear_update_util_hook(policy->cpu); + set_max_within_limits(policy); + } else { + set_update_util_hook(policy->cpu); + } + + return 0; +} + +static int loongson3_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); + + return 0; +} + +static void set_boost_freq(bool has) +{ + cpufreq_has_boost_freq = has; +} + +static bool has_boost_freq(void) +{ + return cpufreq_has_boost_freq; +} + +static int compute_scale(int *shift, int dividor, int dividee) +{ + int i; + int result = 0; + int remainder = 0; + int scale_resolution = 8; + + result = dividor / dividee; + remainder = (dividor % dividee) * 10; + + for (i = 0; i < scale_resolution; i++) { + result = result * 10 + remainder / dividee; + remainder = (remainder % dividee) * 10; + *shift *= 10; + } + + return result; +} + +static void cpufreq_work_handler(struct kthread_work *work) +{ + struct core_data *core; + struct cpufreq_policy *policy; + + core = container_of(work, struct core_data, work); + policy = cpufreq_cpu_get_raw(core->cpu); + + if (policy) { + cpufreq_update(policy); + core->work_in_progress = false; + } +} + +static void cpufreq_irq_work(struct irq_work *irq_work) +{ + struct core_data *core = container_of(irq_work, struct core_data, irq_work); + + kthread_queue_work(&cpufreq_worker, &core->work); +} + +static void cpufreq_kthread_stop(void) +{ + kthread_flush_worker(&cpufreq_worker); + kthread_stop(cpufreq_thread); +} +static int cpufreq_kthread_create(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_flags = 0x10000000, + .sched_nice = 0, + .sched_priority = 0, + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + kthread_init_worker(&cpufreq_worker); + cpufreq_thread = kthread_create(kthread_worker_fn, &cpufreq_worker, "lsfrq:%d", 0); + if (IS_ERR(cpufreq_thread)) + return PTR_ERR(cpufreq_thread); + + ret = sched_setattr_nocheck(cpufreq_thread, &attr); + if (ret) { + kthread_stop(cpufreq_thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); + return ret; + } + + wake_up_process(cpufreq_thread); + + return 0; +} + +static int init_acpi(struct acpi_processor_performance *perf) +{ + int result = 0; + int i; + + perf->shared_type = 0; + perf->state_count = (max_freq_level - min_freq_level + 1) * (boost_gears + 1); + + perf->states = + kmalloc_array(perf->state_count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); + + if (!perf->states) { + result = -ENOMEM; + return result; + } + + for (i = 0; i < perf->state_count; i++) { + perf->states[i].power = 0x3A98; + perf->states[i].transition_latency = 10000; + perf->states[i].bus_master_latency = 10000; + perf->states[i].status = (RESERVED_FREQ + i / (boost_gears + 1)); + perf->states[i].control = (RESERVED_FREQ + i / (boost_gears + 1)); + + switch (i % (boost_gears + 1)) { + case 0: + perf->states[i].core_frequency = (cpu_clock_freq / 1000000) * (8 - i / (boost_gears + 1)) / 8; + break; + case 1: + case 2: + case 3: + case 4: + perf->states[i].core_frequency = + boost_freqs[i % (boost_gears + 1)] * (8 - i / (boost_gears + 1)) / 8; + perf->states[i].control |= ((i % (boost_gears + 1)) << 8); + break; + default: + pr_info("%s: i %d freq table error\n", __func__, i); + } + } + + return result; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + struct acpi_processor_performance *perf; + struct cpufreq_frequency_table *freq_table; + struct core_data *core; + int package_id; + unsigned int cpu = policy->cpu; + unsigned int result = 0; + + perf = per_cpu_ptr(acpi_perf_data, cpu); + package_id = cpu_data[cpu].package; + core = get_core_data(cpu); + all_package_data[package_id].nr_cores = loongson_sysconf.cores_per_package; + all_package_data[package_id].max_boost_cores = max_boost_cores; + core->normal_max_freq = 0; + all_package_data[package_id].nr_full_load_cores = 0; + core->cpu = cpu; + core->work_in_progress = false; + core->last_freq_update_time = 0; + core->perf = perf; + core->package_id = package_id; + core->package = &all_package_data[package_id]; + + core->boost_freq = kmalloc_array(boost_gears + 1, sizeof(typeof(core->boost_freq)), GFP_KERNEL); + core->clock_scale = kmalloc_array(boost_gears + 1, sizeof(typeof(core->clock_scale)), GFP_KERNEL); + core->shift = kmalloc_array(boost_gears + 1, sizeof(typeof(core->shift)), GFP_KERNEL); + + for (i = 0; i < boost_gears + 1; i++) { + core->boost_freq[i] = boost_freqs[i]; + core->shift[i] = 1; + } + + if (!acpi_disabled) + result = acpi_processor_register_performance(perf, cpu); + else { + result = init_acpi(perf); + policy->shared_type = perf->shared_type; + } + + if (result) { + pr_info("CPU%d acpi_processor_register_performance failed.\n", cpu); + return result; + } + + for (i = 0; i < MAX_PACKAGES; i++) + mutex_init(&boost_mutex[i]); + + /* capability check */ + if (perf->state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), + GFP_KERNEL); + if (!freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i = 0; i < perf->state_count; i++) { + if ((perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) + policy->cpuinfo.transition_latency = + perf->states[i].transition_latency * 1000; + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + set_boost_freq(true); + } else { + if (perf->states[i].core_frequency > core->normal_max_freq) + core->normal_max_freq = perf->states[i].core_frequency; + } + } + + core->freq_update_delay_ns = policy->cpuinfo.transition_latency; + + for (i = 0; i < boost_gears + 1; i++) { + core->clock_scale[i] = compute_scale(&core->shift[i], boost_freqs[i], core->normal_max_freq); + pr_debug("%s: boost_freqs[%d] %d, normal_max_freq %d, scale %d, shift %d\n", + __func__, i, boost_freqs[i], core->normal_max_freq, + core->clock_scale[i], core->shift[i]); + } + + /* table init */ + for (i = 0; i < perf->state_count; i++) { + freq_table[i].driver_data = (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + if (freq_table[i].driver_data) + freq_table[i].flags |= CPUFREQ_BOOST_FREQ; + freq_table[i].frequency = + perf->states[i].core_frequency * 1000; + } + freq_table[i].frequency = CPUFREQ_TABLE_END; + policy->freq_table = freq_table; + perf->state = 0; + + /* add boost-attr if supported. */ + if (has_boost_freq() && boost_supported()) + loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + + policy->cur = core->normal_max_freq * 1000; + + pr_info("CPU%u - ACPI performance management activated.\n", cpu); + for (i = 0; i < perf->state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n", + (i == perf->state ? '*' : ' '), i, + (u32) perf->states[i].core_frequency, + (u32) perf->states[i].power, + (u32) perf->states[i].transition_latency, + (u32) perf->states[i].control); + + /* + * the first call to ->target() should result in us actually + * writing something to the appropriate registers. + */ + policy->fast_switch_possible = false; + + init_irq_work(&core->irq_work, cpufreq_irq_work); + kthread_init_work(&core->work, cpufreq_work_handler); + core->sampling_rate = max_t(unsigned int, + CPUFREQ_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); + return result; + +err_unreg: + if (!acpi_disabled) + acpi_processor_unregister_performance(cpu); + + return result; +} + +static int loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + + clear_update_util_hook(policy->cpu); + irq_work_sync(&core->irq_work); + kthread_cancel_work_sync(&core->work); + core->work_in_progress = false; + policy->fast_switch_possible = false; + if (!acpi_disabled) + acpi_processor_unregister_performance(policy->cpu); + kfree(policy->freq_table); + kfree(core->boost_freq); + kfree(core->clock_scale); + kfree(core->shift); + return 0; +} + +static struct freq_attr *loongson3_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if supported */ + NULL, +}; + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .verify = loongson3_cpufreq_verify_policy, + .setpolicy = loongson3_cpufreq_set_policy, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .name = "acpi-cpufreq", + .attr = loongson3_cpufreq_attr, +}; + +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + +static int __init loongson3_cpufreq_early_init(void) +{ + unsigned int i; + + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) + return -ENOMEM; + for_each_possible_cpu(i) { + if (!zalloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { + free_acpi_perf_data(); + return -ENOMEM; + } + } + return 0; +} + +static bool support_boost(void) +{ + int message; + int val; + int i; + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + message = DVFS_INFO << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: not support boost\n", __func__); + return false; + } + + val = iocsr_read32(0x51c); + + min_freq_level = val & DVFS_INFO_MIN_FREQ; + max_freq_level = (val & DVFS_INFO_MAX_FREQ) >> 4; + + if ((val & DVFS_INFO_BOOST_CORE_FREQ) && ((val & DVFS_INFO_BOOST_CORES) >> 20)) { + max_boost_cores = (val & DVFS_INFO_BOOST_CORES) >> 20; + max_boost_freq = ((val & DVFS_INFO_BOOST_CORE_FREQ) >> 8) * 25; + max_upper_index = (val & DVFS_INFO_NORMAL_CORE_UPPER_LIMIT) >> 16; + } else { + boost_gears = 0; + return false; + } + + /* Read boost levels */ + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + + /* for version 1, single boost freq boost */ + message = DVFS_INFO_BOOST_LEVEL << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: single boost mode\n", __func__); + boost_gears = 1; + boost_freqs[0] = calc_const_freq() / 1000000; + for (i = 1; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq; + + /* set 0x51c complete */ + iocsr_write32(COMPLETE_STATUS, 0x51c); + } else { + pr_info("%s: multi boost mode\n", __func__); + boost_gears = max_boost_cores; + val = iocsr_read32(0x51c); + + boost_freqs[0] = calc_const_freq() / 1000000; + boost_freqs[1] = max_boost_freq; + + if (boost_gears > 1) { + for (i = 2; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq - (((val >> ((i-2) * 4)) & 0xf) * FREQ_STEP); + } + } + + pr_info("%s: min_freq_level %d, max_freq_level %d, max_boost_cores %d, boost_gears %d\n", + __func__, min_freq_level, max_freq_level, max_boost_cores, boost_gears); + + return true; +} + +static int cpufreq_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + bool boost) +{ + struct cpufreq_frequency_table *pos; + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int freq; + + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if (!boost) { + if (pos->driver_data) + continue; + } + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} + +static int set_boost(struct cpufreq_policy *policy, int state) +{ + if (!has_boost_freq()) + return -EINVAL; + + if (!policy) + return -EINVAL; + + if (!state) { + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) + cpufreq_update(policy); + } + if (!policy->freq_table) + return -EINVAL; + + cpufreq_table_cpuinfo(policy, policy->freq_table, state); + down_write(&policy->rwsem); + up_write(&policy->rwsem); + + if (!state) + set_max_within_limits(policy); + + return 0; +} + +static void __init loongson3_cpufreq_boost_init(void) +{ + if (!support_boost()) { + pr_info("Boost capabilities not present in the processor\n"); + return; + } + + loongson3_cpufreq_driver.set_boost = set_boost; +} + +static int cpufreq_supported_detect(void) +{ + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +static int __init loongson3_cpufreq_init(void) +{ + int ret; + + if (!cpu_has_csr || !cpu_has_scalefreq) + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; + + if (cpufreq_supported_detect()) { + pr_info("%s failed!\n", __func__); + return -ENODEV; + } + + ret = loongson3_cpufreq_early_init(); + if (ret) + return ret; + loongson3_cpufreq_boost_init(); + + cpufreq_register_notifier(&loongson3_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + cpufreq_kthread_create(); + if (ret) + free_acpi_perf_data(); + + return ret; +} + +static void __exit loongson3_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); + free_acpi_perf_data(); + cpufreq_kthread_stop(); +} + +late_initcall(loongson3_cpufreq_init); +module_exit(loongson3_cpufreq_exit); + +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + +MODULE_ALIAS("acpi"); diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..f4bf5f3cc550d9ce0788707c2f8cb7eef11d77b2 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu = freqs->policy->cpu; + + if (val == CPUFREQ_POSTCHANGE) + sw64_update_clockevents(cpu, freqs->new * 1000); + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return __sw64_cpufreq_get(policy); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + + if (!cpu_online(cpu)) + return -ENODEV; + + /* setting the cpu frequency */ + sw64_set_rate(index); + update_cpu_freq(freq_table[index].frequency); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, freq_table, 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + if (is_in_guest()) { + pr_warn("Now sw_64 CPUFreq does not support virtual machines\n"); + return -ENODEV; + } + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/sw64_cpufreq_debugfs.c b/drivers/cpufreq/sw64_cpufreq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bb4ae26bc22b4a561039433f4e291c9a87d78a32 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq_debugfs.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + +static int cpufreq_show(struct seq_file *m, void *v) +{ + int i; + u64 val; + int freq; + + val = sw64_io_read(0, CLK_CTL); + val = val >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (val == i) + seq_printf(m, "[%d] ", freq); + else + seq_printf(m, "%d ", freq); + } + seq_puts(m, "\n"); + + return 0; +} + +static int cpufreq_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpufreq_show, NULL); +} + +static ssize_t cpufreq_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + char buf[5]; + size_t size; + int cf, i, err, index, freq; + + size = min(sizeof(buf) - 1, len); + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + buf[size] = '\0'; + + err = kstrtoint(buf, 10, &cf); + if (err) + return err; + + index = -1; + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (cf == freq) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_set_rate(index); + update_cpu_freq(freq); + return len; +} + +static const struct file_operations set_cpufreq_fops = { + .open = cpufreq_open, + .read = seq_read, + .write = cpufreq_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpufreq_debugfs_init(void) +{ + struct dentry *cpufreq_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + cpufreq_entry = debugfs_create_file("cpufreq", 0600, + sw64_debugfs_dir, NULL, + &set_cpufreq_fops); + if (!cpufreq_entry) + return -ENOMEM; + + return 0; +} +late_initcall(cpufreq_debugfs_init); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c761952f0dc6df92e1ee37ad5707bb7539c2cef3..b84a921d293f0b9f97379ba6d35514570b7ffa29 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -52,6 +52,45 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI @@ -796,5 +835,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/montage/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d859d6a5f3a45439c6e14bb19d6240e121c9ac62..5247d2bf09ce63d3d1ec8f293c4c3893b074db1e 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,6 +31,8 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o @@ -51,3 +53,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += montage/ diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf15e49c9749e3fd5e9640ea09056a0..7115bf3028d48ac974f61756128074dfc31e306c 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,53 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + select CRYPTO_SM3_GENERIC + help + Hygon GM ccp driver + +config HYGON_PSP2CPU_CMD + bool "Hygon PSP2CPU Command Interface" + default y + depends on CRYPTO_DEV_SP_PSP + help + Hygon PSP2CPU Command Support + +config TDM_DEV_HYGON + bool "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver + +config CRYPTO_DEV_HCT + tristate "HCT CCP device" + default m + depends on X86_64 + select VFIO_MDEV + help + Provides hygon crypto technology ccp device driver. + Support virtualize ccp devices based on mediated devices. + Support multi-process and virtual machines. + Support host-noiommu mode memory encryption function. + Support compiling hct.ko when mdev module is disabled. + If you choose 'M' here, this module will be called hct ccp. + +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e2202bf6115e0774bf63a802115cb..74fe6611bac40f6f0f2f7e933cc07018dccf3c42 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -5,14 +5,23 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-dmaengine.o + ccp-dmaengine.o \ + hygon/ccp-dev-v5.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o -ccp-$(CONFIG_PCI) += sp-pci.o +ccp-$(CONFIG_PCI) += sp-pci.o \ + hygon/sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + hygon/psp-dev.o \ + hygon/csv-dev.o \ + hygon/ring-buffer.o \ + hygon/vpsp.o + +ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o +obj-$(CONFIG_CRYPTO_DEV_HCT) += hygon/hct.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ @@ -23,3 +32,12 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o + +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46eed8fa79e9028f7e2be097f85463b..3d22fbabc815a5d452bcf782617ec58e1db81866 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -39,6 +39,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -322,6 +326,25 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..25c9a49f7d2236059517a5b62f18375dc6a53f8b --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1155 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; + int ret; + + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(req->base.data, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; + int nents; + int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); + + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + +error: + kfree(buffer); + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..46ddbc1f14a8ec48df3b4e447d99a9c4788272db --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,488 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..2328a9f87218419522ae52b8189bbb1b572c3532 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,306 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d071680e2ef0432ce704bf4a48f1c35962..58b2950f91001a6b12cfa5c5bae8add6663e7c67 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,105 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +366,9 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -282,5 +384,8 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821e5fd8d76dc4eb26bddf4aa9fc812..46518c80f8caf3f4f82a23db4cef108c5d246c91 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,7 +99,9 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 + #define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ CMD5_Q_SIZE) #define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) @@ -334,6 +336,10 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; + unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -355,6 +361,9 @@ struct ccp_device { bool use_tasklet; struct tasklet_struct irq_tasklet; + /* This flag mark if the ccp support both sm2 and ecc function */ + uint32_t support_sm2_ecc; + /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. * The CMD_REQx registers and the Delete_Cmd_Queue_Job register @@ -528,6 +537,28 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +582,10 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -599,6 +634,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -609,6 +645,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -657,6 +694,11 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); @@ -669,5 +711,7 @@ extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; +extern const struct ccp_vdata ccpv5a_hygon; +extern const struct ccp_vdata ccpv5b_hygon; #endif diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index cb8e99936abb7248759e543f2d9f0bcdcc267d07..794ad6d6eb5b8474d1dc69c8c06904e6517d1212 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2463,6 +2463,520 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2507,6 +3021,18 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 0000000000000000000000000000000000000000..7e83e6799cb47574430dbecb06415ea179767adc --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} diff --git a/drivers/crypto/ccp/hygon/ccp-dev-v5.c b/drivers/crypto/ccp/hygon/ccp-dev-v5.c new file mode 100644 index 0000000000000000000000000000000000000000..35e9fc5135d027268b8cee581706e6d9fb0d2e6c --- /dev/null +++ b/drivers/crypto/ccp/hygon/ccp-dev-v5.c @@ -0,0 +1,1236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Depei Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "../ccp-dev.h" + +/* Allocate the requested number of contiguous LSB slots + * from the LSB bitmap. Look in the private range for this + * queue first; failing that, check the public area. + * If no space is available, wait around. + * Return: first slot number + */ +static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, + 0, count, 0); + if (start < LSB_SIZE) { + bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* No joy; try to get an entry from the shared blocks */ + ccp = cmd_q->ccp; + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, + count, 0); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + bitmap_set(ccp->lsbmap, start, count); + + mutex_unlock(&ccp->sb_mutex); + return start; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } +} + + +/* Free a number of LSB slots from the bitmap, starting at + * the indicated starting slot number. + */ +static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + if (!start) + return; + + if (cmd_q->lsb == start) { + /* An entry from the private LSB */ + bitmap_clear(cmd_q->lsbmap, start, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->ccp; + + mutex_lock(&ccp->sb_mutex); + bitmap_clear(ccp->lsbmap, start, count); + ccp->sb_avail = 1; + mutex_unlock(&ccp->sb_mutex); + wake_up_interruptible_all(&ccp->sb_queue); + } +} + +/* Hygon CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + u16 byteswap:2; + u16 bitwise:3; + u16 reflect:2; + u16 rsvd:8; + } pt; + struct { + u16 rand:1; + u16 rsvd:10; + u16 mode:3; + u16 ecc_mode:1; + } sm2_ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; + u16 raw; +}; + +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) + +/* For ccp support both sm2 and ecc */ +#define CCP_SM2_ECC_RAND(p) ((p)->sm2_ecc.rand) +#define CCP_SM2_ECC_MODE(p) ((p)->sm2_ecc.mode) +#define CCP_SM2_ECC_ECC_MODE(p) ((p)->sm2_ecc.ecc_mode) + +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) + +/* Word 0 */ +#define CCP5_CMD_DW0(p) ((p)->dw0) +#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) +#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) +#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) +#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) +#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) +#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) +#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP5_CMD_DW1(p) ((p)->length) +#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) + +/* Word 2 */ +#define CCP5_CMD_DW2(p) ((p)->src_lo) +#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) + +/* Word 3 */ +#define CCP5_CMD_DW3(p) ((p)->dw3) +#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP5_CMD_DW4(p) ((p)->dw4) +#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) +#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) +#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) + +/* Word 6/7 */ +#define CCP5_CMD_DW6(p) ((p)->key_lo) +#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) +#define CCP5_CMD_DW7(p) ((p)->dw7) +#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +#define CCP5_COMMANDS_PER_QUEUE 8192 +#define CCP5_QUEUE_SIZE_VAL ((ffs(CCP5_COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define CCP5_Q_PTR_MASK (2 << (CCP5_QUEUE_SIZE_VAL + 5) - 1) +#define CCP5_Q_SIZE(n) (CCP5_COMMANDS_PER_QUEUE * (n)) + +/* indicates whether there is ECC engine for Hygon CCP */ +#define RI_ECC_PRESENT 0x0400 + +/** + * Hygon CCP from 4th generation support both sm2 & ecc, + * but its input content is different from previous version. + * the previous requries only one src buffer which include + * hash + key. Now, hash and key should passed separately. To + * compatible with previous driver, we parse hash and key + * from src buffer which same as previous input + */ +#define SM2_ECC_OPERAND_LEN 32 +#define SM2_ECC_KG_SRC_SIZE 32 +#define SM2_ECC_LP_SRC_SIZE 32 +#define SM2_ECC_SIGN_SRC_SIZE 64 +#define SM2_ECC_VERIFY_SRC_SIZE 96 + +static inline int ccp5_get_keyinfo(struct ccp_op *op, dma_addr_t *kaddr, u32 *slen) +{ + struct ccp_dma_info *sinfo = &op->src.u.dma; + dma_addr_t saddr = sinfo->address + sinfo->offset; + int ret = 0; + + switch (op->u.sm2.mode) { + case CCP_SM2_MODE_SIGN: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_SIGN_SRC_SIZE; + break; + case CCP_SM2_MODE_VERIFY: + *kaddr = saddr + SM2_ECC_VERIFY_SRC_SIZE; + *slen = SM2_ECC_VERIFY_SRC_SIZE; + break; + case CCP_SM2_MODE_KG: + *kaddr = 0; /* unused for KG */ + *slen = SM2_ECC_KG_SRC_SIZE; + break; + case CCP_SM2_MODE_LP: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_LP_SRC_SIZE; + break; + default: + pr_err("Invalid sm2 operation, mode = %d\n", op->u.sm2.mode); + ret = -EINVAL; + break; + } + + return ret; +} + +static inline u32 low_address(unsigned long addr) +{ + return (u64)addr & 0x0ffffffff; +} + +static inline u32 high_address(unsigned long addr) +{ + return ((u64)addr >> 32) & 0x00000ffff; +} + +static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + unsigned int head_idx, n; + u32 head_lo, queue_start; + + queue_start = low_address(cmd_q->qdma_tail); + head_lo = ioread32(cmd_q->reg_head_lo); + head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); + + n = head_idx + CCP5_COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + + return n % CCP5_COMMANDS_PER_QUEUE; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_do_cmd(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + __le32 *mP; + u32 *dP; + u32 tail; + int i; + int ret = 0; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + mutex_lock(&cmd_q->q_mutex); + + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (CCP5_CMD_IOC(desc)) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + dma_addr_t kaddr; + unsigned int slen = saddr->length; + int ret = 0; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + + /* + * ccp support both sm2 and ecc, the rand,mode filed are different + * with previous, and run on ecc or sm2 also should be indicated + */ + if (op->cmd_q->ccp->support_sm2_ecc) { + ret = ccp5_get_keyinfo(op, &kaddr, &slen); + if (ret) + return ret; + + CCP_SM2_ECC_RAND(&function) = op->u.sm2.rand; + CCP_SM2_ECC_MODE(&function) = op->u.sm2.mode; + CCP_SM2_ECC_ECC_MODE(&function) = 0; /* 0: SM2 1: ECC */ + } else { + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + } + + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = slen; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->cmd_q->ccp->support_sm2_ecc && + op->u.sm2.mode != CCP_SM2_MODE_KG) { + CCP5_CMD_KEY_LO(&desc) = low_address(kaddr); + CCP5_CMD_KEY_HI(&desc) = high_address(kaddr); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_passthru(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + + op->cmd_q->total_pt_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; + CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data is always 256 bytes */ + if (op->src.type == CCP_MEMTYPE_SYSTEM) + CCP5_CMD_LEN(&desc) = saddr->length; + else + CCP5_CMD_LEN(&desc) = daddr->length; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP5_CMD_LSB_ID(&desc) = op->sb_key; + } else { + u32 key_addr = op->src.u.sb * CCP_SB_BYTES; + + CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_SRC_HI(&desc) = 0; + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } else { + u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; + + CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_DST_HI(&desc) = 0; + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_aes(struct ccp_op *op) +{ + pr_err("AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_xts_aes(struct ccp_op *op) +{ + pr_err("XTS-AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_sha(struct ccp_op *op) +{ + pr_err("SHA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_des3(struct ccp_op *op) +{ + pr_err("DES3 function not implement!"); + return -EPERM; +} + +static int ccp5_perform_rsa(struct ccp_op *op) +{ + pr_err("RSA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_ecc(struct ccp_op *op) +{ + pr_err("ECC function not implement!"); + return -EPERM; +} + +static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) +{ + int q_mask = 1 << cmd_q->id; + int queues = 0; + int j; + + /* Build a bit mask to know which LSBs this queue has access to. + * Don't bother with segment 0 as it has special privileges. + */ + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + bitmap_set(cmd_q->lsbmask, j, 1); + status >>= LSB_REGION_WIDTH; + } + queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", + cmd_q->id, queues); + + return queues ? 0 : -EINVAL; +} + +static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int bitno; + int qlsb_wgt; + int i; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + + if (qlsb_wgt == lsb_cnt) { + bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); + + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + if (test_bit(bitno, lsb_pub)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + bitmap_clear(lsb_pub, bitno, 1); + dev_dbg(ccp->dev, + "Queue %d gets LSB %d\n", + i, bitno); + break; + } + bitmap_clear(qlsb, bitno, 1); + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared resources. + */ +static int ccp_assign_lsbs(struct ccp_device *ccp) +{ + DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + bitmap_zero(lsb_pub, MAX_LSB_CNT); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + bitmap_or(lsb_pub, + lsb_pub, ccp->cmd_q[i].lsbmask, + MAX_LSB_CNT); + + n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; + n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); + + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + bitmap_set(qlsb, bitno, 1); + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status & SUPPORTED_INTERRUPTS) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp5_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + + ccp5_disable_queue_interrupts(ccp); + ccp->total_interrupts++; + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp5_irq_bh((unsigned long)ccp); + return IRQ_HANDLED; +} + +static int ccp5_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, i; + u64 status; + u32 status_lo, status_hi; + int ret; + + /* Find available queues */ + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + /* + * Check for a access to the registers. If this read returns + * 0xffffffff, it's likely that the system is running a broken + * BIOS which disallows access to the device. Stop here and fail + * the initialization (but not the load, as the PSP could get + * properly initialized). + */ + if (qmr == 0xffffffff) { + dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n"); + return 1; + } + + /* check if ccp support both sm2 and ecc. */ + ccp->support_sm2_ecc = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) + & RI_ECC_PRESENT); + + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + mutex_init(&cmd_q->q_mutex); + + /* Page alignment satisfies our needs for N <= 128 */ + BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); + cmd_q->qsize = CCP5_Q_SIZE(Q_DESC_SIZE); + cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); + if (!cmd_q->qbase) { + dev_err(dev, "unable to allocate command queue\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q->qidx = 0; + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_control = ccp->io_regs + + CMD5_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + + CMD5_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + + CMD5_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + + CMD5_Q_INT_STATUS_BASE; + cmd_q->reg_dma_status = cmd_q->reg_control + + CMD5_Q_DMA_STATUS_BASE; + cmd_q->reg_dma_read_status = cmd_q->reg_control + + CMD5_Q_DMA_READ_STATUS_BASE; + cmd_q->reg_dma_write_status = cmd_q->reg_control + + CMD5_Q_DMA_WRITE_STATUS_BASE; + + init_waitqueue_head(&cmd_q->int_queue); + + dev_dbg(dev, "queue #%u available\n", i); + } + + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = 1; + goto e_pool; + } + + /* Turn off the queues and disable interrupts until ready */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol = 0; /* Start with nothing */ + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + } + + dev_dbg(dev, "Requesting an IRQ...\n"); + /* Request an irq */ + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + /* Initialize the ISR tasklet */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, + (unsigned long)ccp); + + dev_dbg(dev, "Loading LSB map...\n"); + /* Copy the private LSB mask to the public registers */ + status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); + iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); + status = ((u64)status_hi<<30) | (u64)status_lo; + + dev_dbg(dev, "Configuring virtual queues...\n"); + /* Configure size of each virtual queue accessible to host */ + for (i = 0; i < ccp->cmd_q_count; i++) { + u32 dma_addr_lo; + u32 dma_addr_hi; + + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); + cmd_q->qcontrol |= CCP5_QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + + cmd_q->qdma_tail = cmd_q->qbase_dma; + dma_addr_lo = low_address(cmd_q->qdma_tail); + iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); + iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); + + dma_addr_hi = high_address(cmd_q->qdma_tail); + cmd_q->qcontrol |= (dma_addr_hi << 16); + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Find the LSB regions accessible to the queue */ + ccp_find_lsb_regions(cmd_q, status); + cmd_q->lsb = -1; /* Unassigned value */ + } + + dev_dbg(dev, "Assigning LSBs...\n"); + ret = ccp_assign_lsbs(ccp); + if (ret) { + dev_err(dev, "Unable to assign LSBs (%d)\n", ret); + goto e_irq; + } + + /* Optimization: pre-allocate LSB slots for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + } + + dev_dbg(dev, "Starting threads...\n"); + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + } + + dev_dbg(dev, "Enabling interrupts...\n"); + ccp5_enable_queue_interrupts(ccp); + + dev_dbg(dev, "Registering device...\n"); + /* Put this on the unit list to make it available */ + ccp_add_device(ccp); + + ret = ccp_register_rng(ccp); + if (ret) + goto e_kthread; + + /* Register the DMA engine support */ + ret = ccp_dmaengine_register(ccp); + if (ret) + goto e_hwrng; + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* Set up debugfs entries */ + ccp5_debugfs_setup(ccp); +#endif + + return 0; + +e_hwrng: + ccp_unregister_rng(ccp); + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + +e_irq: + sp_free_ccp_irq(ccp->sp, ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp5_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int i; + + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + ccp_unregister_rng(ccp); + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* We're in the process of tearing down the entire driver; + * when all the devices are gone clean up debugfs + */ + if (ccp_present()) + ccp5_debugfs_destroy(); +#endif + + /* Disable and clear interrupts */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + /* Turn off the run bit */ + iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + sp_free_ccp_irq(ccp->sp, ccp); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static void ccp5_config(struct ccp_device *ccp) +{ + /* Public side */ + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); +} + +static void ccp5other_config(struct ccp_device *ccp) +{ + int i; + u32 rnd; + + /* We own all of the queues on the NTB CCP */ + + iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); + iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); + + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + for (i = 0; i < 16; i++) { + rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); + iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); + } + + iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); + iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); + iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); + + iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + + iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); + + ccp5_config(ccp); +} + +/* Version 5 adds some function, but is essentially the same as v5 */ +static const struct ccp_actions ccp5_actions = { + .aes = ccp5_perform_aes, + .xts_aes = ccp5_perform_xts_aes, + .sha = ccp5_perform_sha, + .des3 = ccp5_perform_des3, + .rsa = ccp5_perform_rsa, + .passthru = ccp5_perform_passthru, + .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, + .sballoc = ccp_lsb_alloc, + .sbfree = ccp_lsb_free, + .init = ccp5_init, + .destroy = ccp5_destroy, + .get_free_slots = ccp5_get_free_slots, +}; + +const struct ccp_vdata ccpv5a_hygon = { + .version = CCP_VERSION(5, 1), + .setup = ccp5_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; + +const struct ccp_vdata ccpv5b_hygon = { + .version = CCP_VERSION(5, 1), + .dma_chan_attr = DMA_PRIVATE, + .setup = ccp5other_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..62e8835e9b0ea83d6cc9ecd4f081e6687c7247a6 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -0,0 +1,1357 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include + +#include "csv-dev.h" +#include "psp-dev.h" +#include "ring-buffer.h" + +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +u32 hygon_csv_build; + +int csv_comm_mode = CSV_COMM_MAILBOX_ON; + +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + +/* + * csv_update_api_version used to update the api version of HYGON CSV + * firmwareat driver side. + * Currently, we only need to update @hygon_csv_build. + */ +void csv_update_api_version(struct sev_user_data_status *status) +{ + if (status) { + hygon_csv_build = (status->flags >> 9) | + ((u32)status->build << 23); + } +} + +int csv_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); + default: return 0; + } +} + +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, + data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} + +static int csv_get_api_version(void) +{ + struct sev_device *sev; + struct sev_user_data_status *status; + int error = 0, ret; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + sev = psp_master->sev_data; + + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) + return -ENOMEM; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + status, &error); + if (ret) { + dev_err(sev->dev, + "CSV: failed to get status. Error: %#x\n", error); + goto e_free_status; + } + + sev->api_major = status->api_major; + sev->api_minor = status->api_minor; + sev->build = status->build; + sev->state = status->state; + + csv_update_api_version(status); + + ret = 0; +e_free_status: + kfree(status); + return ret; +} + +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +{ + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; + + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, + data, &argp->error); + if (ret) { + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + goto err_free_page; + } else { + pr_info("CSV firmware update successful\n"); + } + + /* + * Synchronize API version status. The return value of csv_get_api_version + * will inform the user of any error encountered when attempting to + * communicate with the Hygon PSP after the DOWNLOAD_FIRMWARE API completes + * successfully. + */ + ret = csv_get_api_version(); + +err_free_page: + __free_pages(p, order); + + return ret; +} + +static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + if (ioctl != SEV_ISSUE_CMD) + return -EINVAL; + + if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) + return -EFAULT; + + if (input.cmd > CSV_MAX) + return -EINVAL; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + switch (input.cmd) { + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + break; + case CSV_PLATFORM_INIT: + ret = hygon_psp_hooks.__sev_platform_init_locked(&input.error); + break; + case CSV_PLATFORM_SHUTDOWN: + ret = hygon_psp_hooks.__sev_platform_shutdown_locked(&input.error); + break; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + return hygon_psp_hooks.sev_ioctl(file, ioctl, arg); + } + + if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) + ret = -EFAULT; + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return ret; +} + +const struct file_operations csv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_ioctl, +}; + +/* + * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER + * mode, the caller must acquire the mutex lock. + */ +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, + (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +/* + * csv_do_ringbuf_cmds will enter RING BUFFER mode and handling commands + * queued in RING BUFFER queues, the user is obligate to manage RING + * BUFFER queues including allocate, enqueue and free, etc. + */ +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &csv_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + +void csv_restore_mailbox_mode_postprocess(void) +{ + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); +} + +/* + * __csv_ring_buffer_queue_init will allocate memory for command queue + * and status queue. If error occurs, this function will return directly, + * the caller must free the memories allocated for queues. + * + * Function csv_ring_buffer_queue_free() can be used to handling error + * return by this function and cleanup ring buffer queues when exiting + * from RING BUFFER mode. + * + * Return -ENOMEM if fail to allocate memory for queues, otherwise 0 + */ +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + /* If reach here, the command and status queues must be NULL */ + WARN_ON(ring_buffer->cmd_ptr.data || + ring_buffer->stat_val.data); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + /* the command queue will points to @cmd_ptr_buffer */ + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) + return -ENOMEM; + + /* the status queue will points to @stat_val_buffer */ + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + return 0; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + /* + * If command queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + csv_queue_cleanup(&ring_buffer->cmd_ptr); + } + + /* + * If status queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + csv_queue_cleanup(&ring_buffer->stat_val); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + +int csv_get_extension_info(void *buf, size_t *size) +{ + /* If @hygon_csv_build is 0, this means CSV firmware doesn't exist or + * the psp device doesn't exist. + */ + if (hygon_csv_build == 0) + return -ENODEV; + + /* The caller must provide valid @buf and the @buf must >= 4 bytes in + * size. + */ + if (!buf || !size || *size < sizeof(uint32_t)) { + if (size) + *size = sizeof(uint32_t); + + return -EINVAL; + } + + /* Since firmware with build id 2200, support: + * a. issue LAUNCH_ENCRYPT_DATA command more than once for a + * CSV3 guest. + * b. inject secret to a CSV3 guest. + */ + if (csv_version_greater_or_equal(2200)) { + *(uint32_t *)buf |= CSV_EXT_CSV3_MULT_LUP_DATA; + *(uint32_t *)buf |= CSV_EXT_CSV3_INJ_SECRET; + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_get_extension_info); + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) { + ret = -ENODEV; + goto l_end; + } + + if (!csv_smr || !csv_smr_num) { + ret = -EINVAL; + goto l_end; + } + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) { + ret = -ENOMEM; + goto l_end; + } + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + +l_end: + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + return ret; +} + +#else /* !CONFIG_HYGON_CSV */ + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + dev_warn(sev->dev, + "CSV3: needs CONFIG_HYGON_CSV, CSV3 support unavailable\n"); + return -EFAULT; +} + +#endif /* CONFIG_HYGON_CSV */ + +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = phy_addr; + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) + return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + kfree(status); + return rb_supported; +} + +static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; + phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + return ret; +} + +int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (vpsp_psp_mutex_trylock()) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + + ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); + goto end; + } + } else { + /* mailbox mode */ + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..664685338e2cbe4521f56b69cde009b130fc0319 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON CSV driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_CSV_DEV_H__ +#define __CCP_HYGON_CSV_DEV_H__ + +#include +#include + +#include "../sev-dev.h" + +#define CSV_FW_FILE "hygon/csv.fw" + +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + +extern u32 hygon_csv_build; +extern int csv_comm_mode; +extern const struct file_operations csv_fops; + +void csv_update_api_version(struct sev_user_data_status *status); +int csv_cmd_buffer_len(int cmd); +void csv_restore_mailbox_mode_postprocess(void); +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); + +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + +static inline bool csv_in_ring_buffer_mode(void) +{ + return csv_comm_mode == CSV_COMM_RINGBUFFER_ON; +} + +#endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c new file mode 100644 index 0000000000000000000000000000000000000000..719b3287d15156013d4f8fc737a54872133b24fb --- /dev/null +++ b/drivers/crypto/ccp/hygon/hct.c @@ -0,0 +1,2335 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2022 HYGON Corporation . All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_VFIO_MDEV) +#include +#endif + +/** + * VERSION_STRING modification instructions: + * 0.1 -- support hct/mdev mode. + * 0.2 -- supoort qemu virtualization. + * 0.3 -- support host-noiommu mode memory encryption function, + * and performance optimization in virtual machines (enable caching). + * 0.4 -- support compiling hct.ko when mdev module is disabled. + * 0.5 -- change the maximum number of supported ccps from 16 to 48. + */ + +#undef pr_fmt +#define pr_fmt(fmt) "hct: " fmt + +#define VERSION_STRING "0.5" +#define DRIVER_AUTHOR "HYGON Corporation" +#define VERSION_SIZE 16 + +#define MCCP_CLASS_NAME "hct" +#define MCCP_NAME "hct" +#define MCCP_STRING_LEN 16 + +#define MCCP_CONFIG_SPACE_SIZE 0xff + +#define MCCP_VFIO_PCI_OFFSET_SHIFT 40 +#define MCCP_VFIO_PCI_OFFSET_TO_INDEX(off) \ + (off >> MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_INDEX_TO_OFFSET(index) \ + ((u64)(index) << MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_OFFSET_MASK \ + (((u64)(1) << MCCP_VFIO_PCI_OFFSET_SHIFT) - 1) +#define vdev_to_mdev_state(vdev) \ + container_of((vdev), struct mdev_state, vdev) + +#define MCCP_SHARE_IOC_TYPE 'C' +#define MCCP_SHARE_OP 0x01 +#define MCCP_SHARE_OP_DMA_MAP 0x01 +#define MCCP_SHARE_OP_DMA_UNMAP_ALL 0x02 +#define MCCP_SHARE_OP_GET_ID 0x03 +#define MCCP_SHARE_OP_GET_PASID 0x04 +#define MCCP_SHARE_OP_DMA_UNMAP 0x05 +#define MCCP_SHARE_OP_GET_VERSION 0x06 + +#define MCCP_NOIOMMU_IOC_TYPE MCCP_SHARE_IOC_TYPE +#define MCCP_NOIOMMU_OP MCCP_SHARE_OP +#define MCCP_NOIOMMU_SET_MEMORY_WB 0x01 +#define MCCP_NOIOMMU_GET_SME_ACTIVE 0x02 + +#define MCCP_SHARE_IOMMU_MAGIC 0x3d6a9c5728633b9e + +#define PCI_RESOURCE_BAR2 2 +#define MCCP_DEV_ID_SIZE 8 + +/* fixed iova range for ccp dma. */ +#define MCCP_DMA_IOVA_OFFSET 0 +#define MCCP_DMA_IOVA_SIZE (1ul << 30) + +#define MCCP_INSTANCE_MAX 1024 +#define MCCP_INSTANCE_OFFSET 8 +#define MCCP_INSTANCE_MASK (~((1u << MCCP_INSTANCE_OFFSET) - 1)) +#define MCCP_PASID_SIZE (1 << 8) +#define MCCP_IOVA_MAX_SLOT 1024 +#define MCCP_DEV_MAX 48 +#define MCCP_DEV_QUEUE_MAX 8 +#define MCCP_DEV_QUEUE 5 +#define MCCP_QUEUES_MAX (MCCP_DEV_MAX * MCCP_DEV_QUEUE_MAX) +#define MCCP_QUEUE_NEED_INIT 0x01 +#define MCCP_SHARED_SIZE (MCCP_DEV_MAX * PAGE_SIZE) + +#define MCCP_MSIX_ENTRY_SIZE 2 +#define MCCP_NTB_VECTOR_NUM 1 +#define MCCP_PSP_VECTOR_NUM 2 +#define MCCP_GET_QUEUE_FLAG (0x55) +#define MCCP_PUT_QUEUE_FLAG (0xAA) +#define IRQ_EVENT_SIGNAL (1UL) +#define IRQ_EVENT_SIGFAL (0xFF) + +#define Q_MASK_REG 0x0000 +#define MCMD_Q_STATUS_INCR 0x1000 +#define MCMD_Q_TAIL_LO_BASE 0x0004 +#define MCMD_Q_HEAD_LO_BASE 0x0008 +#define MCMD_Q_INT_ENABLE_BASE 0x000C +#define MCMD_Q_INTERRUPT_STATUS_BASE 0x0010 +#define MCMD_Q_STATUS_BASE 0x0100 +#define MCMD_Q_INT_STATUS_BASE 0x0104 + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define INT_EMPTY_QUEUE 0x8 +#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR) +#define MCMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) + +#define PHY_ADDR_MASK 0x7FFFFFFFFFFF + +struct hct_shared_cfg { + unsigned int iova_slot[MCCP_IOVA_MAX_SLOT]; + unsigned int ccp_queue_state[MCCP_QUEUES_MAX]; + unsigned int ccps_ref[MCCP_DEV_MAX]; + unsigned int ccps_ref_lock; + int rsvd1[15]; + u64 qidx[MCCP_QUEUES_MAX]; + unsigned int ccp_state[MCCP_DEV_MAX]; +} __aligned(PAGE_SIZE); + +struct hct_dev_ctrl { + unsigned char op; + unsigned char rsvd[3]; + union { + unsigned char version[VERSION_SIZE]; + unsigned int id; + unsigned long sme_mask; + struct { + unsigned long vaddr; + unsigned long iova; + unsigned long size; + }; + struct { + unsigned long vt_addr; + unsigned int nr_pages; + }; + }; +}; + +struct hct_dma { + struct list_head next; + unsigned long vaddr; + unsigned long iova; + size_t size; + struct page **pages; + unsigned long npages; + unsigned int pfnmap_flag; +}; + +/* record the register address related to interrupt */ +struct hct_cmd_queue { + void __iomem *reg_control; + void __iomem *reg_tail_lo; + void __iomem *reg_head_lo; + void __iomem *reg_int_enable; + void __iomem *reg_interrupt_status; + void __iomem *reg_status; + void __iomem *reg_int_status; + struct mutex q_lock; + DECLARE_KFIFO_PTR(ectx_fifo, struct eventfd_ctx *); +} ____cacheline_aligned; + +struct hct_dev_ctx { + struct hct_cmd_queue cmd_q[MCCP_DEV_QUEUE_MAX]; + struct tasklet_struct irq_tasklet; + char devname[MCCP_STRING_LEN]; + void __iomem *io_regs; /* for BAR2 memory address */ + u32 q_count; + int irq; +} ____cacheline_aligned; + +struct hct_iommu { + unsigned long magic; + struct mutex lock; + struct pci_dev *pdev; + struct hct_dev_ctx dev_ctx; + unsigned long id; + unsigned long ref; +}; + +#if IS_ENABLED(CONFIG_VFIO_MDEV) +static struct hct_data { + struct hct_iommu iommu[MCCP_DEV_MAX]; + struct mutex lock; + unsigned long bitmap; + struct iommu_domain *domain; + int prot; + dma_addr_t dma_share_iova; + size_t dma_share_size; + unsigned long dma_share_ref; + unsigned long mdev_ref; + unsigned long ids[BITS_TO_LONGS(MCCP_INSTANCE_MAX)]; +} hct_data; + +static struct hct_share_cfg { + long ref; + struct mutex lock; + struct page *pages[MCCP_DEV_MAX]; + u64 pagecount; + void *vaddr; + u64 size; +} hct_share; + +static struct hct_dev { + dev_t vd_devt; + struct class *vd_class; + struct cdev vd_cdev; + struct device dev; + struct mdev_parent mdev_parent; +} hct_dev; + +struct mdev_region_info { + u64 start; + u64 phys_start; + u32 size; + u64 vfio_offset; +}; + +struct mdev_state { + struct vfio_device vdev; + struct mutex ops_lock; + struct mdev_device *mdev; + struct hct_iommu *iommu; + struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; + struct list_head next; + struct vfio_device_info dev_info; + unsigned long ref; + struct eventfd_ctx *trigger[MCCP_DEV_QUEUE_MAX]; + u8 efd_start; + u8 efd_count; +}; + +struct mdev_type hct_mdev_type = { + .sysfs_name = "1", + .pretty_name = "hct mdev type" +}; +struct mdev_type *hct_mdev_types[] = { + &hct_mdev_type +}; + +static void hct_cmd_queue_enable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_disable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(0x00, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_intr_task(unsigned long data) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)data; + u32 i, err, status; + + hct_cmd_queue_disable_interrupt(dev_ctx); + + for (i = 0; i < dev_ctx->q_count; i++) { + struct hct_cmd_queue *cmd_q = &dev_ctx->cmd_q[i]; + struct eventfd_ctx *trigger; + + status = ioread32(cmd_q->reg_interrupt_status); + if (status) { + if (status & INT_ERROR) { + /* print interrupt numbers for debug */ + err = ioread32(cmd_q->reg_status); + pr_err("Irq fail, errcode = %d.\n", MCMD_Q_ERROR(err)); + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGFAL); + } else { + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGNAL); + } + + iowrite32(status, cmd_q->reg_interrupt_status); + } + } + + hct_cmd_queue_enable_interrupt(dev_ctx); +} + +static irqreturn_t hct_cmd_queue_intr_handler(int irq, void *arg) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)arg; + + tasklet_schedule(&dev_ctx->irq_tasklet); + return IRQ_HANDLED; +} + +static int hct_dev_cmd_queue_init(struct pci_dev *pdev, struct hct_dev_ctx *dev_ctx, int idx) +{ + struct hct_cmd_queue *cmd_q; + unsigned long addr, len; + unsigned int retval, qmr; + int i, ret; + + if (!pdev || !dev_ctx) + return -EINVAL; + + memset(dev_ctx, 0, sizeof(*dev_ctx)); + + ret = pci_enable_device(pdev); + if (ret) + return -EINVAL; + + addr = pci_resource_start(pdev, PCI_RESOURCE_BAR2); + len = pci_resource_len(pdev, PCI_RESOURCE_BAR2); + dev_ctx->io_regs = ioremap(addr, len); + if (!dev_ctx->io_regs) + return -ENOMEM; + + pci_set_master(pdev); + retval = pci_alloc_irq_vectors(pdev, 1, MCCP_MSIX_ENTRY_SIZE, PCI_IRQ_MSIX); + if (retval != MCCP_NTB_VECTOR_NUM && retval != MCCP_PSP_VECTOR_NUM) + return -ENOMEM; + + snprintf(dev_ctx->devname, MCCP_STRING_LEN, "hct-ccp-%d", idx); + dev_ctx->irq = pci_irq_vector(pdev, retval - 1); + /* To request_irq, the fourth parameter dev_name must be global + * variable or static variable. + */ + ret = request_irq(dev_ctx->irq, hct_cmd_queue_intr_handler, 0, dev_ctx->devname, dev_ctx); + if (ret) { + pci_free_irq_vectors(pdev); + dev_ctx->irq = 0; + return ret; + } + + tasklet_init(&dev_ctx->irq_tasklet, hct_cmd_queue_intr_task, (unsigned long)dev_ctx); + + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + if (qmr == 0) { + iowrite32(0x1f, dev_ctx->io_regs + Q_MASK_REG); + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + } + for (i = 0; i < MCCP_DEV_QUEUE_MAX; i++) { + if (!(qmr & (1 << i))) + continue; + + cmd_q = &dev_ctx->cmd_q[dev_ctx->q_count++]; + + mutex_init(&cmd_q->q_lock); + ret = kfifo_alloc(&cmd_q->ectx_fifo, MCCP_INSTANCE_MAX, GFP_KERNEL); + if (ret) + return -ENOMEM; + + cmd_q->reg_control = dev_ctx->io_regs + MCMD_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + MCMD_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + MCMD_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + MCMD_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + MCMD_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + MCMD_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + MCMD_Q_INT_STATUS_BASE; + } + + return (dev_ctx->q_count >= 0) ? 0 : -1; +} + +static int hct_iommu_alloc(struct pci_dev *pdev) +{ + unsigned long i; + int ret = -EINVAL; + + mutex_lock(&hct_data.lock); + + i = find_first_zero_bit(&hct_data.bitmap, MCCP_DEV_MAX); + if (i != MCCP_DEV_MAX) + bitmap_set(&hct_data.bitmap, i, 1); + + if (device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) + hct_data.prot |= IOMMU_CACHE; + + mutex_unlock(&hct_data.lock); + + if (i == MCCP_DEV_MAX) + return -EINVAL; + + ret = iommu_attach_device(hct_data.domain, &pdev->dev); + if (ret) { + mutex_lock(&hct_data.lock); + bitmap_clear(&hct_data.bitmap, i, 1); + mutex_unlock(&hct_data.lock); + } else { + mutex_lock(&hct_data.iommu[i].lock); + hct_data.iommu[i].pdev = pdev; + hct_data.iommu[i].id = i; + hct_data.iommu[i].ref = 0; + hct_data.iommu[i].magic = MCCP_SHARE_IOMMU_MAGIC; + pci_set_drvdata(pdev, &hct_data.iommu[i]); + + ret = hct_dev_cmd_queue_init(pdev, &hct_data.iommu[i].dev_ctx, i); + mutex_unlock(&hct_data.iommu[i].lock); + } + + return ret; +} + +static void hct_iommu_free(struct hct_iommu *iommu) +{ + struct iommu_domain *domain; + + if (!iommu || iommu->magic != MCCP_SHARE_IOMMU_MAGIC) + return; + + domain = iommu_get_domain_for_dev(&iommu->pdev->dev); + + mutex_lock(&iommu->lock); + if (iommu->pdev && domain == hct_data.domain) + iommu_detach_device(domain, &iommu->pdev->dev); + iommu->pdev = NULL; + iommu->magic = 0; + mutex_unlock(&iommu->lock); + + mutex_lock(&hct_data.lock); + if (iommu->id < MCCP_DEV_MAX) + bitmap_clear(&hct_data.bitmap, iommu->id, 1); + mutex_unlock(&hct_data.lock); +} + +static int handle_pci_cfg_read(struct mdev_state *mdev_state, int offset, + __le32 *val, int count) +{ + u32 tmp_val = 0; + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) { + u8 tmp; + + ret = pci_user_read_config_byte(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 2) { + u16 tmp; + + ret = pci_user_read_config_word(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 4) + ret = pci_user_read_config_dword(pdev, offset, &tmp_val); + + *val = cpu_to_le32(tmp_val); + + return ret; +} + +static int handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, + u8 *buf, u32 count) +{ + u32 tmp_val = le32_to_cpu(*(u32 *)buf); + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) + ret = pci_user_write_config_byte(pdev, offset, tmp_val); + else if (count == 2) + ret = pci_user_write_config_word(pdev, offset, tmp_val); + else if (count == 4) + ret = pci_user_write_config_dword(pdev, offset, tmp_val); + + return ret; +} + +static ssize_t hct_access(struct mdev_device *mdev, u8 *buf, size_t count, + loff_t pos, bool is_write) +{ + struct mdev_state *mdev_state; + unsigned int index; + loff_t offset; + int ret = 0; + + if (!mdev || !buf) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) { + pr_err("%s mdev_state not found\n", __func__); + return -EINVAL; + } + + mutex_lock(&mdev_state->ops_lock); + + index = MCCP_VFIO_PCI_OFFSET_TO_INDEX(pos); + offset = pos & MCCP_VFIO_PCI_OFFSET_MASK; + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + if (is_write) + ret = handle_pci_cfg_write(mdev_state, offset, buf, count); + else + ret = handle_pci_cfg_read(mdev_state, offset, (__le32 *)buf, count); + break; + default: + ret = -1; + } + + if (!ret) + ret = count; + + mutex_unlock(&mdev_state->ops_lock); + + return ret; +} + +static int hct_mdev_state_init(struct mdev_state *mdev_state) +{ + unsigned long *bitmap = &hct_data.bitmap; + struct hct_iommu *iommu = hct_data.iommu; + unsigned long ref = -1ul; + int i, n = -1; + int ret = 0; + + if (!mdev_state) + return -EINVAL; + + mutex_init(&mdev_state->ops_lock); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) { + mutex_unlock(&hct_data.lock); + return -EBUSY; + } + + for (i = 0; i < MCCP_DEV_MAX; i++) { + if (test_bit(i, bitmap)) { + if (ref > iommu[i].ref) { + n = i; + ref = iommu[i].ref; + } + } + } + + if (n >= 0 && n < MCCP_DEV_MAX) { + mdev_state->iommu = &iommu[n]; + mdev_state->ref = iommu[n].ref++; + } else + ret = -EINVAL; + mutex_unlock(&hct_data.lock); + + return ret; +} + +static int hct_init_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + int ret = 0; + + if (!mdev || !mdev_state) + return -EINVAL; + + ret = hct_mdev_state_init(mdev_state); + if (ret) + return ret; + + mdev_state->mdev = mdev; + return 0; +} + +static void hct_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) + pr_warn("The mdev device is in use.\n"); + else { + mdev_state->iommu->ref--; + dev_set_drvdata(&mdev->dev, NULL); + } + mutex_unlock(&hct_data.lock); +} + +static ssize_t hct_read(struct vfio_device *vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u32 val; + size_t filled; + + while (count) { + if (count >= 4 && !(*ppos % 4)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u32))) + goto read_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u16))) + goto read_err; + + filled = 2; + } else { + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u8))) + goto read_err; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; + +read_err: + return -EFAULT; +} + +static ssize_t hct_write(struct vfio_device *vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u64 val; + u8 idx; + + while (count) { + size_t filled; + + if (count == MCCP_DEV_ID_SIZE && *ppos == MCCP_GET_QUEUE_FLAG) { + struct mdev_state *mdev_state; + struct hct_dev_ctx *dev_ctx; + struct hct_cmd_queue *cmd_q; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + goto write_err; + + if (copy_from_user(&val, buf, sizeof(u64)) || + val >= MCCP_DEV_QUEUE_MAX || + val < mdev_state->efd_start) + goto write_err; + + idx = val - mdev_state->efd_start; + dev_ctx = &mdev_state->iommu->dev_ctx; + cmd_q = &dev_ctx->cmd_q[idx]; + + mutex_lock(&cmd_q->q_lock); + if (kfifo_avail(&cmd_q->ectx_fifo)) + kfifo_put(&cmd_q->ectx_fifo, mdev_state->trigger[idx]); + mutex_unlock(&cmd_q->q_lock); + + filled = MCCP_DEV_ID_SIZE; + } else if (count >= 4 && !(*ppos % 4)) { + if (copy_from_user(&val, buf, sizeof(u32))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + if (copy_from_user(&val, buf, sizeof(u16))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 2; + } else { + if (copy_from_user(&val, buf, sizeof(u8))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 1; + } + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +write_err: + return -EFAULT; +} + +static int hct_get_region_info(struct mdev_device *mdev, + struct vfio_region_info *region_info, + u16 *cap_type_id, void **cap_type) +{ + struct mdev_state *mdev_state = NULL; + struct pci_dev *pdev = NULL; + unsigned int size = 0; + u32 bar_index; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + bar_index = region_info->index; + if (bar_index >= VFIO_PCI_NUM_REGIONS) + return -EINVAL; + + pdev = mdev_state->iommu->pdev; + mutex_lock(&mdev_state->ops_lock); + + switch (bar_index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + size = pdev->cfg_size; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + size = pci_resource_len(pdev, bar_index); + break; + default: + size = 0; + break; + } + + mdev_state->region_info[bar_index].size = size; + mdev_state->region_info[bar_index].vfio_offset = + MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + + region_info->size = size; + region_info->offset = MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + region_info->flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (size >= PAGE_SIZE) + region_info->flags |= VFIO_REGION_INFO_FLAG_MMAP; + + mutex_unlock(&mdev_state->ops_lock); + return 0; +} + +static int hct_get_irq_info(struct mdev_device *mdev, + struct vfio_irq_info *irq_info) +{ + switch (irq_info->index) { + case VFIO_PCI_INTX_IRQ_INDEX: + case VFIO_PCI_MSI_IRQ_INDEX: + case VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + + default: + return -EINVAL; + } + + irq_info->flags = VFIO_IRQ_INFO_EVENTFD; + irq_info->count = 1; + + if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) + irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; + + return 0; +} + +static int hct_get_device_info(struct mdev_device *mdev, + struct vfio_device_info *dev_info) +{ + dev_info->flags = VFIO_DEVICE_FLAGS_PCI; + dev_info->num_regions = VFIO_PCI_NUM_REGIONS; + dev_info->num_irqs = VFIO_PCI_NUM_IRQS; + + return 0; +} + +/* each ccp vq corresponding to one eventfd */ +static int hct_set_irq_efds(struct mdev_device *mdev, + struct vfio_irq_set *hdr, + void *data) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + int *fd = (int *)data; + int i; + + if (!mdev_state || !data) + return -EINVAL; + + if (hdr->index != VFIO_PCI_MSIX_IRQ_INDEX) + return -EINVAL; + + if ((hdr->flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) != VFIO_IRQ_SET_ACTION_TRIGGER) + return -EINVAL; + + if (hdr->start + hdr->count > MCCP_DEV_QUEUE_MAX) + return -EINVAL; + + mdev_state->efd_start = hdr->start; + for (i = 0; i < hdr->count; i++) { + struct eventfd_ctx *trigger; + + trigger = eventfd_ctx_fdget(fd[i]); + if (IS_ERR(trigger)) + return -1; + + mdev_state->trigger[mdev_state->efd_count++] = trigger; + } + + return 0; +} + +static int hct_reset(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -EINVAL; + + return 0; +} + +static long hct_ioctl(struct vfio_device *vdev, unsigned int cmd, + unsigned long arg) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mdev_state = NULL; + unsigned long minsz; + int ret = 0; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_device_info(mdev, &info); + if (ret) + return ret; + + memcpy(&mdev_state->dev_info, &info, sizeof(info)); + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_GET_REGION_INFO: + { + struct vfio_region_info info; + u16 cap_type_id = 0; + void *cap_type = NULL; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_region_info(mdev, &info, &cap_type_id, + &cap_type); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + + case VFIO_DEVICE_GET_IRQ_INFO: + { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if ((info.argsz < minsz) || + (info.index >= mdev_state->dev_info.num_irqs)) + return -EINVAL; + + ret = hct_get_irq_info(mdev, &info); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_SET_IRQS: + { + struct vfio_irq_set hdr; + u8 *data = NULL; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(&hdr, mdev_state->dev_info.num_irqs, + mdev_state->dev_info.num_irqs, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&mdev_state->ops_lock); + ret = hct_set_irq_efds(mdev, &hdr, data); + mutex_unlock(&mdev_state->ops_lock); + kfree(data); + + return ret; + } + case VFIO_DEVICE_RESET: + return hct_reset(mdev); + } + return -ENOTTY; +} + +static int hct_open(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref++; + mutex_unlock(&hct_data.lock); + + return 0; +} + +static void hct_close(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + int i; + + if (!mdev_state || !mdev_state->iommu) + return; + + for (i = 0; i < mdev_state->efd_count; i++) + eventfd_ctx_put(mdev_state->trigger[i]); + mdev_state->efd_count = 0; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref--; + mutex_unlock(&hct_data.lock); +} + +static ssize_t address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + struct pci_dev *pdev = NULL; + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + pdev = mdev_state->iommu->pdev; + size = sprintf(buf, "%04x:%02x:%02x.%x", + pci_domain_nr(pdev->bus), + pdev->bus->number, + 0x00ff & (pdev->devfn >> 8), + 0x00ff & pdev->devfn); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->iommu->id); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t idx_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->ref); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(address); +static DEVICE_ATTR_RO(id); +static DEVICE_ATTR_RO(idx); + +static struct attribute *mdev_dev_attrs[] = { + &dev_attr_address.attr, + &dev_attr_id.attr, + &dev_attr_idx.attr, + NULL, +}; + +static const struct attribute_group mdev_dev_group = { + .name = "vendor", + .attrs = mdev_dev_attrs, +}; + +static const struct attribute_group *hct_mdev_groups[] = { + &mdev_dev_group, + NULL, +}; + +static void hct_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void hct_mmap_close(struct vm_area_struct *vma) +{ +} + +static vm_fault_t hct_mmap_fault(struct vm_fault *vmf) +{ + vm_fault_t ret = VM_FAULT_NOPAGE; + struct vm_area_struct *vma = vmf->vma; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, pgprot_decrypted(vma->vm_page_prot))) + ret = VM_FAULT_SIGBUS; + return ret; +} + +static const struct vm_operations_struct hct_mmap_ops = { + .open = hct_mmap_open, + .close = hct_mmap_close, + .fault = hct_mmap_fault, +}; + +static int hct_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mds = dev_get_drvdata(&mdev->dev); + struct pci_dev *pdev = mds->iommu->pdev; + unsigned int index; + + index = vma->vm_pgoff >> (40 - PAGE_SHIFT); + vma->vm_private_data = mdev; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = pci_resource_start(pdev, index) >> PAGE_SHIFT; + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &hct_mmap_ops; + return 0; +} + +static const struct vfio_device_ops hct_mdev_ops = { + .init = hct_init_dev, + .release = hct_release_dev, + .open_device = hct_open, + .close_device = hct_close, + .read = hct_read, + .write = hct_write, + .ioctl = hct_ioctl, + .mmap = hct_mmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, +}; + +static int hct_mdev_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + int ret; + + if (!mdev) + return -EINVAL; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &hct_mdev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); + if (ret) { + vfio_put_device(&mdev_state->vdev); + return ret; + } + + dev_set_drvdata(&mdev->dev, mdev_state); + return 0; +} + +static void hct_mdev_remove(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + + vfio_unregister_group_dev(&mdev_state->vdev); + vfio_put_device(&mdev_state->vdev); +} + +static unsigned int hct_mdev_get_available(struct mdev_type *mtype) +{ + return MCCP_INSTANCE_MAX; +} + +static ssize_t hct_mdev_show_description(struct mdev_type *mtype, char *buf) +{ + return sprintf(buf, "This is HYGON CCP device!"); +} + +struct mdev_driver hct_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, + .driver = { + .name = "hct_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + .dev_groups = hct_mdev_groups, + }, + .probe = hct_mdev_probe, + .remove = hct_mdev_remove, + .get_available = hct_mdev_get_available, + .show_description = hct_mdev_show_description, +}; + +struct hct_private { + struct list_head head; + struct mutex lock; + unsigned int id; +}; + +static int hct_share_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct hct_private *private; + unsigned int id; + + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + mutex_lock(&hct_data.lock); + id = (unsigned int)find_first_zero_bit(hct_data.ids, MCCP_INSTANCE_MAX); + if (id < MCCP_INSTANCE_MAX) + bitmap_set(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + if (id >= MCCP_INSTANCE_MAX) { + kfree(private); + return -EBUSY; + } + + mutex_lock(&hct_share.lock); + hct_share.ref++; + hct_share.pagecount = MCCP_DEV_MAX; + mutex_unlock(&hct_share.lock); + + file->private_data = private; + /* + * At user space, each process is assigned a different number + * which cannot be 0, as the identifier for the process. + * The number is assigned by id, so the value of id needs to + * start from 1, and cannot be 0. + */ + private->id = (++id) << MCCP_INSTANCE_OFFSET; + INIT_LIST_HEAD(&private->head); + mutex_init(&private->lock); + + return ret; +} + +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + +static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + int prot, unsigned long *pfn) +{ + struct page *page[1]; + struct vm_area_struct *vma; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + mmap_read_lock(mm); + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, + page, NULL); + if (ret == 1) { + *pfn = page_to_pfn(page[0]); + ret = 0; + goto done; + } + + vaddr = untagged_addr(vaddr); + +retry: + vma = find_vma_intersection(mm, vaddr, vaddr + 1); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; + } +done: + mmap_read_unlock(mm); + + return ret; +} + +struct page **hct_pin_memory(struct hct_private *private, unsigned long uaddr, + unsigned long ulen, unsigned long *n) +{ + unsigned long npages, size; + int npinned; + struct page **pages; + unsigned long first, last; + + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; + + first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; + last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; + npages = (last - first + 1); + + if (WARN_ON_ONCE(npages > INT_MAX)) + return NULL; + + size = npages * sizeof(struct page *); + if (size > PAGE_SIZE) + pages = vmalloc(size); + else + pages = kmalloc(size, GFP_KERNEL); + + if (!pages) + return NULL; + + /* Pin the user virtual address. */ + npinned = pin_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); + if (npinned != npages) + goto err; + + *n = npages; + return pages; + +err: + if (npinned > 0) + unpin_user_pages(pages, npinned); + kvfree(pages); + return NULL; +} + +static void hct_unpin_memory(struct hct_private *private, struct page **pages, + unsigned long npages) +{ + unpin_user_pages(pages, npages); + kvfree(pages); +} + +static inline int is_dma_share(dma_addr_t dma_iova, size_t dma_size) +{ + if (dma_iova >= MCCP_DMA_IOVA_OFFSET && + dma_iova + dma_size <= MCCP_DMA_IOVA_OFFSET + MCCP_DMA_IOVA_SIZE) + return 1; + else + return 0; +} + +static int hct_add_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + int ret = 0; + + if (!is_dma_share(dma_iova, dma_size)) + return 0; + + if (!hct_data.dma_share_size) { + hct_data.dma_share_iova = dma_iova; + hct_data.dma_share_size = dma_size; + } + + if (dma_iova != hct_data.dma_share_iova || + dma_size != hct_data.dma_share_size) + ret = -EINVAL; + else + hct_data.dma_share_ref++; + + return ret; +} + +static int hct_unmap_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + if (!is_dma_share(dma_iova, dma_size)) + return -EINVAL; + + if (hct_data.dma_share_size) { + if (hct_data.dma_share_iova == dma_iova && + hct_data.dma_share_size == dma_size) + hct_data.dma_share_ref--; + + if (hct_data.dma_share_ref == 0) { + iommu_unmap(hct_data.domain, hct_data.dma_share_iova, + hct_data.dma_share_size); + hct_data.dma_share_size = 0; + } + } + + return 0; +} + +static int hct_iommu_iova_check_unsafe(dma_addr_t dma_iova, size_t dma_size, + phys_addr_t phys_addr, + struct iommu_domain *domain) +{ + dma_addr_t iova; + int ret = 0; + size_t mapped = 0; + + iova = dma_iova; + while (iova < dma_iova + dma_size) { + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, iova); + if (phys) { + if ((phys_addr & PHY_ADDR_MASK) != (phys & PHY_ADDR_MASK)) { + pr_err("iova=0x%llx phys_addr=0x%llx phys=0x%llx, check fail.\n", + iova, phys_addr, phys); + ret = -1; + break; + } + mapped += PAGE_SIZE; + } + iova += PAGE_SIZE; + phys_addr += PAGE_SIZE; + } + + if (ret == 0 && mapped == dma_size) + ret = 1; + + return ret; +} + +static unsigned long get_num_contig_pages(unsigned long idx, + struct page **inpages, unsigned long npages) +{ + unsigned long paddr, next_paddr; + unsigned long i = idx + 1, pages = 1; + + /* find the number of contiguous pages starting from idx */ + paddr = page_to_phys(inpages[idx]); + while (i < npages) { + next_paddr = page_to_phys(inpages[i++]); + if ((paddr + PAGE_SIZE) == next_paddr) { + pages++; + paddr = next_paddr; + continue; + } + break; + } + + return pages; +} + +static struct hct_dma *hct_find_dma(struct hct_private *private, + dma_addr_t start, size_t size) +{ + struct hct_dma *dma, *tmp; + + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (dma->iova <= start && + dma->iova + dma->size >= start + size) + return dma; + } + + return NULL; +} + +/* + * Turns out AMD IOMMU has a page table bug where it won't map large pages + * to a region that previously mapped smaller pages. This should be fixed + * soon, so this is just a temporary workaround to break mappings down into + * PAGE_SIZE. Better to map smaller pages than nothing. + */ +static int map_try_harder(struct iommu_domain *domain, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + long i; + int ret = 0; + + for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { + ret = iommu_map(domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + PAGE_SIZE, prot, GFP_KERNEL); + if (ret) + break; + } + + for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) + iommu_unmap(domain, iova, PAGE_SIZE); + + return ret; +} + +/* + * only handle io-memory [vm_flags | VM_PFNMAP == true] + */ +static int hct_iommu_pfnmap(struct hct_private *private, struct hct_dma *dma) +{ + unsigned long pfn; + unsigned long vaddr; + dma_addr_t iova; + size_t mapped_size = 0; + size_t size; + int ret = 0; + + if (!private || !dma) + return -EINVAL; + + dma->pfnmap_flag = 1; + vaddr = dma->vaddr; + iova = dma->iova; + size = dma->size; + + mutex_lock(&hct_data.lock); + while (size) { + ret = vaddr_get_pfn(current->mm, vaddr, hct_data.prot, &pfn); + if (ret) + goto map_fail; + + ret = iommu_map(hct_data.domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + 1 << PAGE_SHIFT, hct_data.prot, + GFP_KERNEL); + if (ret) + goto map_fail; + + size -= 1 << PAGE_SHIFT; + vaddr += 1 << PAGE_SHIFT; + iova += 1 << PAGE_SHIFT; + mapped_size += 1 << PAGE_SHIFT; + } + mutex_unlock(&hct_data.lock); + + list_add(&dma->next, &private->head); + return 0; + +map_fail: + mutex_unlock(&hct_data.lock); + iommu_unmap(hct_data.domain, dma->iova, mapped_size); + return ret; +} + +static int hct_iommu_map(struct hct_private *private, unsigned long vaddr, + dma_addr_t dma_iova, size_t dma_size) +{ + struct hct_dma *dma; + struct page **pages; + unsigned long n, i, npages; + dma_addr_t iova, iova_end, iova_next; + int ret = 0; + size_t mapped_size = 0; + size_t iova_size = dma_size; + + if (!dma_size || (vaddr | dma_iova | dma_size) & (PAGE_SIZE - 1)) + return -EINVAL; + + if (hct_find_dma(private, dma_iova, dma_size)) + return 0; + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + pages = hct_pin_memory(private, vaddr, dma_size, &n); + if (!pages) { + /* We will think the vm_flags includes VM_PFNMAP. */ + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->size = dma_size; + ret = hct_iommu_pfnmap(private, dma); + if (ret) + kfree(dma); + return ret; + } + + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->pages = pages; + dma->size = dma_size; + dma->npages = n; + + iova = dma_iova; + iova_end = dma_iova + dma_size; + iova_size = dma_size; + + mutex_lock(&hct_data.lock); + for (i = 0; iova < iova_end && i < n; iova = iova_next, i += npages) { + size_t len; + phys_addr_t phys; + + npages = get_num_contig_pages(i, pages, n); + + /* When the value of npages is 524288, the value of npages * PAGE_SIZE + * will be 0x80000000 (bit31 is 1). + * When the value of npages is greater than 524288, if the type of len is int, + * the len will be a negative value. + */ + len = min_t(size_t, (npages * PAGE_SIZE), iova_size); + phys = page_to_phys(pages[i]); + + iova_size -= len; + iova_next = iova + len; + + ret = hct_iommu_iova_check_unsafe(iova, len, phys, hct_data.domain); + if (ret < 0) { + ret = -EBUSY; + goto map_fail; + } else if (ret > 0) { + ret = 0; + continue; + } + + ret = iommu_map(hct_data.domain, iova, phys, len, hct_data.prot, GFP_KERNEL); + if (ret) { + if (ret == -EBUSY) + ret = map_try_harder(hct_data.domain, iova, + phys >> PAGE_SHIFT, + len >> PAGE_SHIFT, + hct_data.prot); + if (ret) + goto map_fail; + } + mapped_size += len; + cond_resched(); + } + + ret = hct_add_dma_share_unsafe(dma_iova, dma_size); + if (ret) + goto map_fail; + + mutex_unlock(&hct_data.lock); + list_add(&dma->next, &private->head); + return 0; +map_fail: + if (mapped_size) + iommu_unmap(hct_data.domain, dma_iova, mapped_size); + mutex_unlock(&hct_data.lock); + hct_unpin_memory(private, pages, n); + kfree(dma); + return ret; +} + +static void hct_iommu_unmap(struct hct_private *private, + dma_addr_t iova, size_t size) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma; + + if (!size || (iova | size) & (PAGE_SIZE - 1)) + return; + + dma = hct_find_dma(private, iova, size); + if (!dma) + return; + + mutex_lock(&hct_data.lock); + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + list_del(&dma->next); + kfree(dma); + mutex_unlock(&hct_data.lock); +} + +static void hct_iommu_unmap_all(struct hct_private *private) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma, *tmp; + + mutex_lock(&hct_data.lock); + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (hct_unmap_dma_share_unsafe(dma->iova, dma->size)) + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + cond_resched(); + list_del(&dma->next); + kfree(dma); + } + mutex_unlock(&hct_data.lock); +} + +static struct page *hct_get_page(pgoff_t page_idx) +{ + u64 *node; + + mutex_lock(&hct_share.lock); + if (!hct_share.pages[page_idx]) { + hct_share.pages[page_idx] = + alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0); + if (!hct_share.pages[page_idx]) { + mutex_unlock(&hct_share.lock); + return NULL; + } + } + get_page(hct_share.pages[page_idx]); + + node = page_to_virt(hct_share.pages[page_idx]) + PAGE_SIZE - 8; + *node = hct_data.iommu[page_idx].pdev->dev.numa_node; + mutex_unlock(&hct_share.lock); + + return hct_share.pages[page_idx]; +} + +static void hct_put_pages(void) +{ + int i; + + for (i = 0; i < hct_share.pagecount; i++) { + if (!hct_share.pages[i]) + continue; + + put_page(hct_share.pages[i]); + hct_share.pages[i] = NULL; + } +} + +/* Clear status information when exiting abnormally. */ +static void hct_clear_shared_lock_memory(unsigned int gid) +{ + int *base; + int *queue_lck; + int dev_idx; + int queue_idx; + + for (dev_idx = 0; dev_idx < MCCP_DEV_MAX && + hct_share.pages[dev_idx]; dev_idx++) { + base = (int *)page_to_virt(hct_share.pages[dev_idx]); + for (queue_idx = 0; queue_idx < MCCP_DEV_QUEUE; queue_idx++) { + queue_lck = base + queue_idx; + if (*queue_lck == gid) + *queue_lck = 0; /* vq userid will be changed. */ + } + } +} + +static long hct_share_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl dev_ctrl; + unsigned int cmd_id; + unsigned int len; + unsigned int pasid; + int ret = 0; + struct hct_private *private = file->private_data; + + if (_IOC_TYPE(ioctl) != MCCP_SHARE_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(dev_ctrl)) + return -EINVAL; + + if (copy_from_user(&dev_ctrl, (void __user *)arg, sizeof(dev_ctrl))) + return -EINVAL; + + mutex_lock(&private->lock); + switch (dev_ctrl.op) { + case MCCP_SHARE_OP_DMA_MAP: + ret = hct_iommu_map(private, dev_ctrl.vaddr, dev_ctrl.iova, dev_ctrl.size); + break; + case MCCP_SHARE_OP_DMA_UNMAP: + hct_iommu_unmap(private, dev_ctrl.iova, dev_ctrl.size); + ret = 0; + break; + case MCCP_SHARE_OP_DMA_UNMAP_ALL: + hct_iommu_unmap_all(private); + ret = 0; + break; + case MCCP_SHARE_OP_GET_ID: + dev_ctrl.id = private->id; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + else + ret = 0; + break; + case MCCP_SHARE_OP_GET_PASID: + /* The different virtual machines is distinguished through pasid. */ + pasid = private->id >> MCCP_INSTANCE_OFFSET; + if (pasid >= MCCP_PASID_SIZE) { + ret = -EINVAL; + break; + } + + dev_ctrl.id = pasid; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + case MCCP_SHARE_OP_GET_VERSION: + memcpy(dev_ctrl.version, VERSION_STRING, sizeof(VERSION_STRING)); + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&private->lock); + + return ret; +} + +static int hct_share_close(struct inode *inode, struct file *file) +{ + struct hct_private *private = file->private_data; + unsigned int id = private->id >> MCCP_INSTANCE_OFFSET; + + mutex_lock(&hct_share.lock); + /* For the vm scenario, the hct_share.vaddr value is NULL. */ + if (hct_share.vaddr) { + struct hct_shared_cfg *cfg = hct_share.vaddr; + int i; + + if (private->id == cfg->ccps_ref_lock) + cfg->ccps_ref_lock = 0; + + for (i = 0; i < MCCP_DEV_MAX; i++) + if (private->id == (MCCP_INSTANCE_MASK & cfg->ccp_state[i])) + cfg->ccp_state[i] = 0; + + for (i = 0; i < MCCP_QUEUES_MAX; i++) + if (private->id == cfg->ccp_queue_state[i]) + cfg->ccp_queue_state[i] = MCCP_QUEUE_NEED_INIT; + + for (i = 0; i < MCCP_IOVA_MAX_SLOT; i++) + if (private->id == cfg->iova_slot[i]) + cfg->iova_slot[i] = 0; + } + + hct_clear_shared_lock_memory(private->id); + + hct_share.ref--; + if (!hct_share.ref) { + hct_put_pages(); + if (hct_share.vaddr) + memset(hct_share.vaddr, 0x00, hct_share.size); + } + mutex_unlock(&hct_share.lock); + + mutex_lock(&hct_data.lock); + if (--id < MCCP_INSTANCE_MAX) + bitmap_clear(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + mutex_lock(&private->lock); + hct_iommu_unmap_all(private); + mutex_unlock(&private->lock); + + kfree(private); + return 0; +} + +static vm_fault_t hct_cdev_vma_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + pgoff_t page_idx = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + + if (page_idx >= hct_share.pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = hct_get_page(page_idx); + if (!vmf->page) + return VM_FAULT_SIGBUS; + + return 0; +} + +static const struct vm_operations_struct hct_cdev_vm_ops = { + .fault = hct_cdev_vma_fault, +}; + +static int hct_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long len; + int ret = 0; + + mutex_lock(&hct_share.lock); + len = vma->vm_end - vma->vm_start; + if (len == MCCP_SHARED_SIZE) { + /* The required size for vm is 64KB, + * and will follow the pagefault process. + */ + vma->vm_ops = &hct_cdev_vm_ops; + goto exit; + } + + if (unlikely(!hct_share.vaddr)) { + hct_share.size = (vma->vm_end - vma->vm_start); + hct_share.vaddr = kzalloc(hct_share.size, GFP_KERNEL); + } + + if (!hct_share.vaddr) { + ret = -ENOMEM; + goto exit; + } + + if (hct_share.size != (vma->vm_end - vma->vm_start)) { + ret = -EINVAL; + pr_err("invalid hct share size\n"); + goto exit; + } + + ret = remap_pfn_range(vma, vma->vm_start, + virt_to_phys(hct_share.vaddr) >> PAGE_SHIFT, + hct_share.size, + vma->vm_page_prot); +exit: + mutex_unlock(&hct_share.lock); + return ret; +} + +static const struct file_operations hct_share_fops = { + .owner = THIS_MODULE, + .open = hct_share_open, + .release = hct_share_close, + .mmap = hct_share_mmap, + .unlocked_ioctl = hct_share_ioctl, +}; + +static struct miscdevice hct_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_share", + .fops = &hct_share_fops, + .mode = 0666, +}; + +static int hct_share_init(void) +{ + int i; + int ret; + + memset(&hct_data, 0x00, sizeof(hct_data)); + mutex_init(&hct_data.lock); + + for (i = 0; i < MCCP_DEV_MAX; i++) + mutex_init(&hct_data.iommu[i].lock); + + ret = misc_register(&hct_misc); + if (!ret) { + hct_data.domain = iommu_domain_alloc(&pci_bus_type); + if (!hct_data.domain) { + misc_deregister(&hct_misc); + if (!pci_bus_type.iommu_ops) { + pr_err("iommu is disabled\n"); + return -ENODEV; + } + return -ENOMEM; + } + hct_data.prot = IOMMU_READ | IOMMU_WRITE; + } + + return ret; +} + +static void hct_share_exit(void) +{ + int i; + struct hct_iommu *iommu; + struct iommu_domain *domain; + struct pci_dev *pdev; + + mutex_lock(&hct_data.lock); + for (i = 0; i < MCCP_DEV_MAX; i++) { + iommu = &hct_data.iommu[i]; + pdev = iommu->pdev; + if (pdev) { + domain = iommu_get_domain_for_dev(&pdev->dev); + if (domain == hct_data.domain) + iommu_detach_device(domain, &pdev->dev); + } + } + mutex_unlock(&hct_data.lock); + + if (hct_data.domain) + iommu_domain_free(hct_data.domain); + + misc_deregister(&hct_misc); + kfree(hct_share.vaddr); +} + +static int hct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return hct_iommu_alloc(pdev); +} + +static void hct_pci_remove(struct pci_dev *pdev) +{ + struct hct_iommu *iommu; + struct hct_dev_ctx *dev_ctx; + int i; + + iommu = pci_get_drvdata(pdev); + if (!iommu) { + pci_set_drvdata(pdev, NULL); + return; + } + + dev_ctx = &iommu->dev_ctx; + for (i = 0; i < dev_ctx->q_count; i++) + kfifo_free(&dev_ctx->cmd_q[i].ectx_fifo); + + if (dev_ctx->io_regs) + iounmap(dev_ctx->io_regs); + if (dev_ctx->irq) { + tasklet_kill(&dev_ctx->irq_tasklet); + free_irq(dev_ctx->irq, dev_ctx); + dev_ctx->irq = 0; + pci_free_irq_vectors(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + } + hct_iommu_free(iommu); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hct_pci_driver = { + .name = "hct", + .id_table = NULL, + .probe = hct_pci_probe, + .remove = hct_pci_remove, +}; + +static const struct file_operations hct_vd_fops = { + .owner = THIS_MODULE, +}; + +static void hct_device_release(struct device *dev) +{ + dev_dbg(dev, "hct: released\n"); +} +#endif /* IS_ENABLED(CONFIG_VFIO_MDEV) */ + +/* set the flags PAT, PCT and PWT of page all to 0 + * for obtaining cache properties. + */ +void hct_noiommu_set_memory_wb(unsigned long address) +{ + pgd_t *pgd = current->mm->pgd + pgd_index(address); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t old_pte; + pte_t new_pte; + pgprot_t new_prot; + unsigned long pfn; + + if (pgd_none(*pgd)) { + pr_err("pgd val shouldn't be none\n"); + return; + } + + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d)) { + pr_err("p4d val shouldn't be none\n"); + return; + } + + pud = pud_offset(p4d, address); + if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) { + pr_err("pud val is invalid.\n"); + return; + } + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) { + pr_err("pmd val is invalid.\n"); + return; + } + + pte = pte_offset_kernel(pmd, address); + if (pte_none(*pte)) { + pr_err("pte val shouldn't be none\n"); + return; + } + + old_pte = *pte; + pfn = pte_pfn(old_pte); + new_prot = pte_pgprot(old_pte); + pgprot_val(new_prot) &= ~(_PAGE_PAT | _PAGE_PCD | _PAGE_PWT); + new_pte = pfn_pte(pfn, new_prot); + set_pte_atomic(pte, new_pte); +} + +static DEFINE_MUTEX(hct_noiommu_lock); +static long hct_noiommu_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl ctrl; + unsigned int cmd_id; + unsigned int len; + int ret = 0; + + if (_IOC_TYPE(ioctl) != MCCP_NOIOMMU_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(ctrl)) + return -EINVAL; + + if (copy_from_user(&ctrl, (void __user *)arg, sizeof(ctrl))) + return -EINVAL; + + mutex_lock(&hct_noiommu_lock); + switch (ctrl.op) { + case MCCP_NOIOMMU_SET_MEMORY_WB: + while (ctrl.nr_pages && ctrl.nr_pages--) { + hct_noiommu_set_memory_wb(ctrl.vt_addr); + ctrl.vt_addr += PAGE_SIZE; + } + break; + case MCCP_NOIOMMU_GET_SME_ACTIVE: + ctrl.sme_mask = sme_me_mask; + if (copy_to_user((void __user *)arg, &ctrl, sizeof(ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&hct_noiommu_lock); + + return ret; +} + +const struct file_operations hct_noiommu_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hct_noiommu_ioctl, +}; + +struct miscdevice hct_noiommu_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_noiommu", + .fops = &hct_noiommu_fops, +}; + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +static int __init hct_dev_init(void) +{ + int __maybe_unused ret = 0; + u32 vendor_ebx = 0; + u32 vendor_ecx = 0; + u32 vendor_edx = 0; + u32 vendor_eax = 0; + + cpuid(0, &vendor_eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + + /* HygonGenuine */ + if (!(vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx)) { + pr_err("Not hygon hardware\n"); + return -1; + } + +#if IS_ENABLED(CONFIG_VFIO_MDEV) + if (!iommu_present(&pci_bus_type)) + return misc_register(&hct_noiommu_misc); + + ret = mdev_register_driver(&hct_mdev_driver); + if (ret) + return ret; + + memset(&hct_dev, 0, sizeof(hct_dev)); + + ret = alloc_chrdev_region(&hct_dev.vd_devt, 0, MINORMASK + 1, + MCCP_NAME); + + if (ret < 0) { + pr_err("Error: failed to register hct_dev, err:%d\n", ret); + goto failed0; + } + + cdev_init(&hct_dev.vd_cdev, &hct_vd_fops); + cdev_add(&hct_dev.vd_cdev, hct_dev.vd_devt, MINORMASK + 1); + + hct_dev.vd_class = class_create(MCCP_CLASS_NAME); + if (IS_ERR(hct_dev.vd_class)) { + pr_err("Error: failed to register hct_dev class\n"); + ret = PTR_ERR(hct_dev.vd_class); + goto failed1; + } + + hct_dev.dev.class = hct_dev.vd_class; + hct_dev.dev.release = hct_device_release; + dev_set_name(&hct_dev.dev, "%s", MCCP_NAME); + hct_dev.dev.devt = hct_dev.vd_devt; + + ret = device_register(&hct_dev.dev); + if (ret) + goto failed2; + + ret = mdev_register_parent(&hct_dev.mdev_parent, &hct_dev.dev, + &hct_mdev_driver, hct_mdev_types, + ARRAY_SIZE(hct_mdev_types)); + if (ret) + goto failed3; + + ret = hct_share_init(); + if (ret) + goto failed4; + + memset(&hct_share, 0x00, sizeof(hct_share)); + mutex_init(&hct_share.lock); + + ret = pci_register_driver(&hct_pci_driver); + if (ret) + goto failed5; + + goto all_done; + +failed5: + hct_share_exit(); + +failed4: + mdev_unregister_parent(&hct_dev.mdev_parent); + +failed3: + device_unregister(&hct_dev.dev); + +failed2: + class_destroy(hct_dev.vd_class); + +failed1: + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + +failed0: + mdev_unregister_driver(&hct_mdev_driver); + +all_done: + return ret; +#else + pr_info("The module mdev is disabled.\n"); + return misc_register(&hct_noiommu_misc); +#endif +} + +static void __exit hct_dev_exit(void) +{ +#if IS_ENABLED(CONFIG_VFIO_MDEV) + if (!iommu_present(&pci_bus_type)) { + misc_deregister(&hct_noiommu_misc); + return; + } + + hct_share_exit(); + hct_dev.dev.bus = NULL; + mdev_unregister_parent(&hct_dev.mdev_parent); + + device_unregister(&hct_dev.dev); + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + class_destroy(hct_dev.vd_class); + mdev_unregister_driver(&hct_mdev_driver); + hct_dev.vd_class = NULL; + + pci_unregister_driver(&hct_pci_driver); +#else + misc_deregister(&hct_noiommu_misc); +#endif +} + +module_init(hct_dev_init) +module_exit(hct_dev_exit) + +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION_STRING); +MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..26d4e87836ac2640bb014f319e12423e6132762e --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -0,0 +1,737 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "psp-dev.h" + +/* Function and variable pointers for hooks */ +struct hygon_psp_hooks_table hygon_psp_hooks; + +static struct psp_misc_dev *psp_misc; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OP_PIN_USER_PAGE, + HYGON_PSP_OP_UNPIN_USER_PAGE, + HYGON_PSP_OPCODE_MAX_NR, +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, + VPSP_OP_SET_GPA, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + /** + * To be compatible with old user mode, + * struct vpsp_dev_ctrl must be kept at 132 bytes. + */ + unsigned char resv[3]; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + struct { + u64 gpa_start; + u64 gpa_end; + } gpa; + unsigned char reserved[128]; + } __packed data; +}; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while ((ms == 0) || time_before(jiffies, je)); + + return ret; +} + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} + +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_INFO "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_context entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + +/** + * get a vpsp context from pid + */ +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) +{ + struct vpsp_context new_entry = {.pid = pid}; + struct vpsp_context *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, + sizeof(struct vpsp_context), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + + if (ctx) + *ctx = existing_entry; + + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_context); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_context(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_context_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); + memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context *ctx = NULL; + + vpsp_get_context(&ctx, cur_pid); + if (!ctx) { + pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); + return -ENOENT; + } + + ctx->gpa_start = gpa_start; + ctx->gpa_end = gpa_end; + pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", + gpa_start, gpa_end, cur_pid); + return 0; +} + +/** + * Try to pin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_pin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + npinned = pin_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + ref_count = page_ref_count(page); + pr_debug("pin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + +/** + * Try to unpin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_unpin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + // page reference count increment by 1 + npinned = get_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + // page reference count decrement by 2 + put_page(page); + put_page(page); + + ref_count = page_ref_count(page); + pr_debug("unpin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + case VPSP_OP_SET_GPA: + ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_INFO "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + hygon_psp_hooks.psp_mutex_enabled = 1; + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + hygon_psp_hooks.psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + break; + + case HYGON_PSP_OP_PIN_USER_PAGE: + ret = psp_pin_user_page((u64)arg); + break; + + case HYGON_PSP_OP_UNPIN_USER_PAGE: + ret = psp_unpin_user_page((u64)arg); + break; + + default: + printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return ret; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, + .unlocked_ioctl = ioctl_psp, +}; + +int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + hygon_psp_hooks.psp_misc = psp_misc; + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} + +void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; + hygon_psp_hooks.psp_misc = NULL; +} + +int fixup_hygon_psp_caps(struct psp_device *psp) +{ + /* the hygon psp is unavailable if bit0 is cleared in feature reg */ + if (!(psp->capability & PSP_CAPABILITY_SEV)) + return -ENODEV; + + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); + return 0; +} + +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + if (mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(psp_do_cmd); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 + +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, psp_irq_handler_hygon, name, data); +} + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, handler, name, data); +} + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..1d180e46b1a3ac8c2a8b0f4ac6574f542fbf16f4 --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_PSP_DEV_H__ +#define __CCP_HYGON_PSP_DEV_H__ + +#include +#include + +#include "sp-dev.h" + +#include "../psp-dev.h" +#include "../sev-dev.h" + +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + +/* + * Hooks table: a table of function and variable pointers filled in + * when psp init. + */ +extern struct hygon_psp_hooks_table { + bool sev_dev_hooks_installed; + struct mutex *sev_cmd_mutex; + struct psp_misc_dev *psp_misc; + bool psp_mutex_enabled; + bool *psp_dead; + int *psp_timeout; + int *psp_cmd_timeout; + int (*sev_cmd_buffer_len)(int cmd); + int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); + int (*__sev_platform_init_locked)(int *error); + int (*__sev_platform_shutdown_locked)(int *error); + int (*sev_wait_cmd_ioc)(struct sev_device *sev, + unsigned int *reg, unsigned int timeout); + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); + long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); +} hygon_psp_hooks; + +#define PSP_MUTEX_TIMEOUT 600000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + +int hygon_psp_additional_setup(struct sp_device *sp); +void hygon_psp_exit(struct kref *ref); +int psp_mutex_trylock(struct psp_mutex *mutex); +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +int psp_mutex_unlock(struct psp_mutex *mutex); +int fixup_hygon_psp_caps(struct psp_device *psp); +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); + +#endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c new file mode 100644 index 0000000000000000000000000000000000000000..0c9ea0217b2ea1a39e2b02030af0e293e8f13790 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include "ring-buffer.h" + +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} + +void csv_queue_cleanup(struct csv_queue *queue) +{ + memset((void *)queue, 0, sizeof(struct csv_queue)); +} + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} + +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len) +{ + unsigned int size; + + size = ring_buf->tail - ring_buf->head; + if (len > size) + len = size; + + dequeue_data(ring_buf, buf, len, ring_buf->head); + ring_buf->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf) +{ + unsigned int free_size; + + free_size = queue_avail_size(ring_buf); + return ring_buf->mask - free_size; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..bf97aa6df36a2e66b4e306dc1bf253b73a07ce70 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __CCP_HYGON_RINGBUF_H__ +#define __CCP_HYGON_RINGBUF_H__ + +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); +void csv_queue_cleanup(struct csv_queue *queue); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); + +#endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.h b/drivers/crypto/ccp/hygon/sp-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..e1996fc3b7c6f503df4c0dff7802fd350a1a9640 --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-dev.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Secure Processor interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_SP_DEV_H__ +#define __CCP_HYGON_SP_DEV_H__ + +#include +#include + +#include "../ccp-dev.h" +#include "../sp-dev.h" + +#ifdef CONFIG_X86_64 +static inline bool is_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} +#else +static inline bool is_vendor_hygon(void) { return false; } +#endif + +extern const struct sp_dev_vdata hygon_dev_vdata[]; + +#endif /* __CCP_HYGON_SP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..691127a0007b3af50f0e08ca3fbdbf1152b95435 --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "sp-dev.h" + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, /* C2PMSG_32 */ + .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ + .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ +}; + +static const struct psp_vdata pspv1 = { + .sev = &csvv1, + .feature_reg = 0x105fc, /* C2PMSG_63 */ + .inten_reg = 0x10610, /* P2CMSG_INTEN */ + .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif +}; + +static const struct psp_vdata pspv2 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif +}; + +#endif + +const struct sp_dev_vdata hygon_dev_vdata[] = { + { /* 0 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv1, +#endif + }, + { /* 1 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b_hygon, +#endif + }, + { /* 2 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv2, +#endif + }, +}; diff --git a/drivers/crypto/ccp/hygon/tdm-dev.c b/drivers/crypto/ccp/hygon/tdm-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..71ab3f6caaaba0eba54288e3ef482b93bd2b61aa --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.c @@ -0,0 +1,1594 @@ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_warn("get_fw_info exception: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +int tdm_dev_init(void) +{ + int ret = 0; + + if (tdm_init_flag) + return 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + goto unreg; + } + + wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; + pr_info("TDM driver loaded successfully!\n"); + + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; +} + +int tdm_dev_destroy(void) +{ + if (tdm_destroy_flag) + goto end; + + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; +} + diff --git a/drivers/crypto/ccp/hygon/tdm-dev.h b/drivers/crypto/ccp/hygon/tdm-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..afc4761a7e81778bb8e0bb9c4984504a540766bc --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_check_tdm_support(void); +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ diff --git a/drivers/crypto/ccp/hygon/tdm-kernel-guard.c b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c new file mode 100644 index 0000000000000000000000000000000000000000..c3afe888ea04d77ed2eb53d67364423915e9b300 --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c new file mode 100644 index 0000000000000000000000000000000000000000..df62dab035b89add08e41e535f201540c0a08c3d --- /dev/null +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt +#define VTKM_VM_BIND 0x904 + +/* + * The file mainly implements the base execution logic of virtual PSP in kernel mode, + * which mainly includes: + * (1) Preprocess the guest data in the host kernel + * (2) The command that has been converted will interact with the channel of the + * psp through the driver and try to obtain the execution result + * (3) The executed command data is recovered, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall, KVM_HC_PSP_COPY_FORWARD_OP) + * | + * kvm_pv_psp_copy_op----> | -> kvm_pv_psp_cmd_pre_op + * | + * | -> vpsp_try_do_cmd/vpsp_try_get_result + * | |<=> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * + * guest command(vmmcall, KVM_HC_PSP_FORWARD_OP) + * | + * kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result + * |<=> psp device driver + */ + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; +}; + +/* Virtual PSP host memory information maintenance, used in ringbuffer mode */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size) +{ + if (!vpsp_ctx || !addr) + return -EFAULT; + + if (addr >= vpsp_ctx->gpa_start && (addr + size) <= vpsp_ctx->gpa_end) + return 0; + return -EFAULT; +} + +static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, + void *data, uint32_t size) +{ + if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) != + ((uintptr_t)data & ~PSP_2MB_MASK)) { + pr_err("data %llx, size %d crossing 2MB\n", (u64)data, size); + return -EFAULT; + } + + if (vpsp_ctx) + return check_gpa_range(vpsp_ctx, (gpa_t)data, size); + + return 0; +} + +/** + * Copy the guest data to the host kernel buffer + * and record the host buffer address in 'hbuf'. + * This 'hbuf' is used to restore context information + * during asynchronous processing. + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + if (check_psp_mem_range(NULL, (void *)data_gpa, data_size)) + return -EFAULT; + + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + hbuf->data = data; + hbuf->data_size = data_size; + +end: + return ret; +} + +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + /* restore cmdresp's buffer from context */ + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } +end: + kfree(hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + return ret; +} + +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +static int cmd_type_is_allowed(int cmd) +{ + if (cmd >= TKM_PSP_CMDID_OFFSET && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +struct psp_cmdresp_vtkm_vm_bind { + struct psp_cmdresp_head head; + uint16_t vid; + uint32_t vm_handle; + uint8_t reserved[46]; +} __packed; + +static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint32_t *pret) +{ + int ret = 0; + struct psp_cmdresp_vtkm_vm_bind *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->head.buf_size = sizeof(*data); + data->head.cmdresp_size = sizeof(*data); + data->head.cmdresp_code = VTKM_VM_BIND; + data->vid = vid; + data->vm_handle = vm_handle; + + ret = psp_do_cmd(cmd_id, data, pret); + if (ret == -EIO) + ret = 0; + + kfree(data); + return ret; +} + +static unsigned long vpsp_get_me_mask(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + +#define AMD_SME_BIT BIT(0) +#define AMD_SEV_BIT BIT(1) + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - Secure Memory Encryption support + * - Bit 1 - Secure Encrypted Virtualization support + * CPUID Fn8000_001F[EBX] + * - Bits 5:0 - Pagetable bit position used to indicate encryption + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV or SME is supported */ + if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) + return 0; + + me_mask = 1UL << (ebx & 0x3f); + return me_mask; +} + +static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) +{ + phys_addr_t hpa = 0; + unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT); + unsigned long me_mask = sme_get_me_mask(); + struct page *page; + + if (me_mask == 0 && vpsp->is_csv_guest) + me_mask = vpsp_get_me_mask(); + + if (!is_error_pfn(pfn)) + hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask; + else { + pr_err("[%s] pfn: %lx is invalid, gpa %lx", + __func__, pfn, data_gpa); + return 0; + } + + /* + * Using gfn_to_pfn causes the refcount to increment + * atomically by one, which needs to be released. + */ + page = pfn_to_page(pfn); + if (PageCompound(page)) + page = compound_head(page); + + put_page(page); + + pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa); + return hpa; + +} + +static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) +{ + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct psp_cmdresp_head psp_head; + + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } + + if (vpsp->is_csv_guest) { + /** + * If the gpa address range exists, + * it means there must be a legal vid + */ + if (!vpsp_ctx || !vpsp_ctx->gpa_start || !vpsp_ctx->gpa_end) { + pr_err("[%s]: No set gpa range or vid in csv guest\n", __func__); + return -EPERM; + } + + ret = check_psp_mem_range(vpsp_ctx, (void *)data, 0); + if (ret) + return -EFAULT; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EPERM; + } + + // the 'data' is gpa address + if (unlikely(vpsp->read_guest(vpsp->kvm, data, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + ret = check_psp_mem_range(vpsp_ctx, (void *)data, psp_head.buf_size); + if (ret) + return -EFAULT; + } + return 0; +} + +static int +check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, + struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) +{ + int ret = 0; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } + + if (vpsp->is_csv_guest) { + pr_err("[%s]: unsupported run on csv guest\n", __func__); + ret = -EPERM; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + ret = -EPERM; + } + } + return ret; +} + +static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint32_t cmd, uint32_t *psp_ret) +{ + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + + if (vpsp_ctx && !vpsp_ctx->vm_is_bound && vpsp->is_csv_guest) { + ret = kvm_bind_vtkm(vpsp->vm_handle, vcmd->cmd_id, + vpsp_ctx->vid, psp_ret); + if (ret || *psp_ret) { + pr_err("[%s] kvm bind vtkm failed with ret: %d, pspret: %d\n", + __func__, ret, *psp_ret); + return ret; + } + vpsp_ctx->vm_is_bound = 1; + } + return 0; +} + +/** + * @brief Directly convert the gpa address into hpa and forward it to PSP, + * It is another form of kvm_pv_psp_copy_op, mainly used for csv VMs. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret indicates Asynchronous context information + * + * Since the csv guest memory cannot be read or written directly, + * the shared asynchronous context information is shared through psp_ret and return value. + */ +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) +{ + int ret; + uint64_t data_hpa; + uint32_t index = 0, vid = 0; + struct vpsp_ret psp_async = {0}; + struct vpsp_context *vpsp_ctx = NULL; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + phys_addr_t hpa; + + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + + ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("directly operation not allowed\n"); + goto end; + } + + ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async); + if (unlikely(ret || *(uint32_t *)&psp_async)) { + pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n", + ret, *(uint32_t *)&psp_async); + goto end; + } + + if (vpsp_ctx) + vid = vpsp_ctx->vid; + + *((uint32_t *)&psp_async) = psp_ret; + + hpa = gpa_to_hpa(vpsp, data_gpa); + if (unlikely(!hpa)) { + ret = -EFAULT; + goto end; + } + + data_hpa = PUT_PSP_VID(hpa, vid); + + switch (psp_async.status) { + case VPSP_INIT: + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + goto end; + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_async.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + goto end; + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + break; + } + +end: + /** + * In order to indicate both system errors and PSP errors, + * the psp_async.pret field needs to be reused. + */ + psp_async.format = VPSP_RET_PSP_FORMAT; + if (ret) { + psp_async.format = VPSP_RET_SYS_FORMAT; + if (ret > 0) + ret = -ret; + psp_async.pret = (uint16_t)ret; + } + return *((int *)&psp_async); +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_forward_op); + +/** + * @brief copy data in gpa to host memory and send it to psp for processing. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret_gpa guest physical address of psp_ret + */ +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct vpsp_context *vpsp_ctx = NULL; + phys_addr_t data_paddr = 0; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + uint32_t vid = 0; + + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + + ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("copy operation not allowed\n"); + return -EPERM; + } + + if (vpsp_ctx) + vid = vpsp_ctx->vid; + + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* copy data from guest */ + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid); + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + if (psp_ret.status == VPSP_RUNNING) { + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + break; + + } else if (psp_ret.status == VPSP_FINISH) { + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid); + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, data_paddr, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + if (psp_ret.status == VPSP_RUNNING) { + ret = 0; + goto end; + } else if (psp_ret.status == VPSP_FINISH) { + /* copy data to guest */ + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + } + goto end; + } + ret = -EFAULT; + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + return ret; +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d42d7bc623523dad25f4665d18405b499be2bee9..ccb605bf861895115f461e888e295123cc9c921a 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -17,6 +17,11 @@ #include "platform-access.h" #include "dbc.h" +#include "hygon/psp-dev.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "hygon/tdm-dev.h" +#endif + struct psp_device *psp_master; static struct psp_device *psp_alloc_struct(struct sp_device *sp) @@ -73,6 +78,17 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability = val; + /* + * Fix capability of Hygon psp, the meaning of Hygon psp feature + * register is not exactly the same as AMD. + * Return -ENODEV directly if hygon psp not configured with CSV + * capability. + */ + if (is_vendor_hygon()) { + if (fixup_hygon_psp_caps(psp)) + return -ENODEV; + } + /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && @@ -140,6 +156,14 @@ static int psp_init(struct psp_device *psp) if (psp->vdata->platform_access) psp_init_platform_access(psp); +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } @@ -174,7 +198,11 @@ int psp_dev_init(struct sp_device *sp) iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); /* Request an irq */ - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (is_vendor_hygon()) { + ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } else { + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; @@ -188,6 +216,18 @@ int psp_dev_init(struct sp_device *sp) if (ret) goto e_irq; + /** + * hygon_psp_additional_setup() needs to wait for + * sev_dev_install_hooks() to complete before it can be called. + */ + if (is_vendor_hygon()) { + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_irq; + } + } + /* Enable interrupt */ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); @@ -220,6 +260,11 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); @@ -230,6 +275,9 @@ void psp_dev_destroy(struct sp_device *sp) sp_free_psp_irq(sp, psp); + if (is_vendor_hygon() && hygon_psp_hooks.psp_misc) + kref_put(&hygon_psp_hooks.psp_misc->refcount, hygon_psp_exit); + if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 07e6f782b622523504f9b73f210632aafc1c58a3..947dc26e139cb6103f2aa0e95b6c25bf29240508 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -33,6 +33,9 @@ #include "psp-dev.h" #include "sev-dev.h" +#include "hygon/psp-dev.h" +#include "hygon/csv-dev.h" + #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" #define SEV_FW_NAME_SIZE 64 @@ -104,7 +107,8 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + (is_vendor_hygon() && csv_in_ring_buffer_mode())) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -127,6 +131,18 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, static int sev_cmd_buffer_len(int cmd) { + /* + * The Hygon CSV command may conflict with AMD SEV command, so it's + * preferred to check whether it's a CSV-specific command for Hygon + * psp. + */ + if (is_vendor_hygon()) { + int r = csv_cmd_buffer_len(cmd); + + if (r) + return r; + } + switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); @@ -392,10 +408,20 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -500,8 +526,12 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, - sev->api_minor, sev->build); + if (is_vendor_hygon()) + dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, hygon_csv_build); + else + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); return 0; } @@ -509,10 +539,20 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -536,6 +576,10 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; + /* RING BUFFER mode exits if a SHUTDOWN command is executed */ + if (is_vendor_hygon() && csv_in_ring_buffer_mode()) + csv_restore_mailbox_mode_postprocess(); + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); @@ -545,10 +589,20 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -723,6 +777,13 @@ static int sev_get_api_version(void) sev->build = status.build; sev->state = status.state; + /* + * The api version fields of HYGON CSV firmware are not consistent + * with AMD SEV firmware. + */ + if (is_vendor_hygon()) + csv_update_api_version(&status); + return 0; } @@ -732,6 +793,14 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; + if (is_vendor_hygon()) { + /* Check for CSV FW to using generic name: csv.fw */ + if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) + return 0; + else + return -ENOENT; + } + snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); @@ -770,13 +839,15 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15)) { + if (!sev_version_greater_or_equal(0, 15) && + !(is_vendor_hygon() && csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } if (sev_get_firmware(dev, &firmware) == -ENOENT) { - dev_dbg(dev, "No SEV firmware file present\n"); + dev_dbg(dev, "No %s firmware file present\n", + is_vendor_hygon() ? "CSV" : "SEV"); return -1; } @@ -816,9 +887,11 @@ static int sev_update_firmware(struct device *dev) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + dev_dbg(dev, "Failed to update %s firmware: %#x\n", + is_vendor_hygon() ? "CSV" : "SEV", error); else - dev_info(dev, "SEV firmware update successful\n"); + dev_info(dev, "%s firmware update successful\n", + is_vendor_hygon() ? "CSV" : "SEV"); __free_pages(p, order); @@ -1076,6 +1149,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1089,7 +1163,13 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > SEV_MAX) return -EINVAL; - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } switch (input.cmd) { @@ -1129,7 +1209,10 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return ret; } @@ -1198,7 +1281,11 @@ static int sev_misc_init(struct sev_device *sev) misc = &misc_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; - misc->fops = &sev_fops; + + if (is_vendor_hygon()) + misc->fops = &csv_fops; + else + misc->fops = &sev_fops; ret = misc_register(misc); if (ret) @@ -1216,12 +1303,38 @@ static int sev_misc_init(struct sev_device *sev) return 0; } +/* Code to set all of the function and variable pointers */ +static void sev_dev_install_hooks(void) +{ + hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; + hygon_psp_hooks.psp_dead = &psp_dead; + hygon_psp_hooks.psp_timeout = &psp_timeout; + hygon_psp_hooks.psp_cmd_timeout = &psp_cmd_timeout; + hygon_psp_hooks.sev_cmd_buffer_len = sev_cmd_buffer_len; + hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; + hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; + hygon_psp_hooks.sev_do_cmd = sev_do_cmd; + hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; + hygon_psp_hooks.sev_ioctl = sev_ioctl; + + hygon_psp_hooks.sev_dev_hooks_installed = true; +} + int sev_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct sev_device *sev; int ret = -ENOMEM; + /* + * Install sev-dev related function and variable pointers hooks only + * for Hygon vendor, install these hooks here, even though the + * following initialization fails. + */ + if (is_vendor_hygon()) + sev_dev_install_hooks(); + if (!boot_cpu_has(X86_FEATURE_SEV)) { dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); return 0; @@ -1311,7 +1424,8 @@ void sev_dev_destroy(struct psp_device *psp) int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { - if (!filep || filep->f_op != &sev_fops) + if (!filep || filep->f_op != (is_vendor_hygon() + ? &csv_fops : &sev_fops)) return -EBADF; return sev_do_cmd(cmd, data, error); @@ -1358,6 +1472,10 @@ void sev_pci_init(void) if (!psp_init_on_probe) return; + /* Set SMR for HYGON CSV3 */ + if (is_vendor_hygon() && boot_cpu_has(X86_FEATURE_CSV3)) + csv_platform_cmd_set_secure_memory_region(sev, &error); + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e745becb09ada5f21d83044e6a120e1..a137ae6959735545eac9e757c4dfd3614350da07 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "hygon/ring-buffer.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -52,6 +54,9 @@ struct sev_device { u8 build; void *cmd_buf; + + /* Management for the Hygon RING BUFFER mode */ + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 2329ad524b4945b29bac80e1b0843c4de6a72a54..d04d9743b68000cb549d44c33892948209edf2a8 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -76,6 +76,11 @@ struct psp_vdata { const unsigned int intsts_reg; const unsigned int bootloader_info_reg; const unsigned int platform_features; +#ifdef CONFIG_HYGON_PSP2CPU_CMD + const unsigned int p2c_cmdresp_reg; + const unsigned int p2c_cmdbuff_addr_lo_reg; + const unsigned int p2c_cmdbuff_addr_hi_reg; +#endif }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b6ab56abeb682f89f913558e957ac364d57fbeec..d093ff25091050edf8757642df8923d0c5f43a83 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -25,6 +25,8 @@ #include "ccp-dev.h" #include "psp-dev.h" +#include "hygon/sp-dev.h" + /* used for version string AA.BB.CC.DD */ #define AA GENMASK(31, 24) #define BB GENMASK(23, 16) @@ -576,6 +578,11 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&hygon_dev_vdata[0] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&hygon_dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&hygon_dev_vdata[2] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig index 3d90c87d409454593cccd36f60c89f3442abf2c2..f38cd62a3f67a9f3a7726bbb6b9a39947044252a 100644 --- a/drivers/crypto/intel/Kconfig +++ b/drivers/crypto/intel/Kconfig @@ -3,3 +3,4 @@ source "drivers/crypto/intel/keembay/Kconfig" source "drivers/crypto/intel/ixp4xx/Kconfig" source "drivers/crypto/intel/qat/Kconfig" +source "drivers/crypto/intel/iaa/Kconfig" diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile index b3d0352ae188da6016f21efddec5ee6ca0f7c368..2f56f6d34cf072622f7e4f0bde06c44abd5a9e6a 100644 --- a/drivers/crypto/intel/Makefile +++ b/drivers/crypto/intel/Makefile @@ -3,3 +3,4 @@ obj-y += keembay/ obj-y += ixp4xx/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ +obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) += iaa/ diff --git a/drivers/crypto/intel/iaa/Kconfig b/drivers/crypto/intel/iaa/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..d53f4b1d494f98eed11b457643fe4fd2272b6da6 --- /dev/null +++ b/drivers/crypto/intel/iaa/Kconfig @@ -0,0 +1,19 @@ +config CRYPTO_DEV_IAA_CRYPTO + tristate "Support for Intel(R) IAA Compression Accelerator" + depends on CRYPTO_DEFLATE + depends on INTEL_IDXD + default n + help + This driver supports acceleration for compression and + decompression with the Intel Analytics Accelerator (IAA) + hardware using the cryptographic API. If you choose 'M' + here, the module will be called iaa_crypto. + +config CRYPTO_DEV_IAA_CRYPTO_STATS + bool "Enable Intel(R) IAA Compression Accelerator Statistics" + depends on CRYPTO_DEV_IAA_CRYPTO + default n + help + Enable statistics for the IAA compression accelerator. + These include per-device and per-workqueue statistics in + addition to global driver statistics. diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b64b208d2344085740f788c62697c1abfd72dc6a --- /dev/null +++ b/drivers/crypto/intel/iaa/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for IAA crypto device drivers +# + +ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD + +obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o + +iaa_crypto-y := iaa_crypto_main.o iaa_crypto_comp_fixed.o + +iaa_crypto-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) += iaa_crypto_stats.o diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..56985e39526373fb7132c77e605d4b8dc91eb4a9 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#ifndef __IAA_CRYPTO_H__ +#define __IAA_CRYPTO_H__ + +#include +#include +#include + +#define IDXD_SUBDRIVER_NAME "crypto" + +#define IAA_DECOMP_ENABLE BIT(0) +#define IAA_DECOMP_FLUSH_OUTPUT BIT(1) +#define IAA_DECOMP_CHECK_FOR_EOB BIT(2) +#define IAA_DECOMP_STOP_ON_EOB BIT(3) +#define IAA_DECOMP_SUPPRESS_OUTPUT BIT(9) + +#define IAA_COMP_FLUSH_OUTPUT BIT(1) +#define IAA_COMP_APPEND_EOB BIT(2) + +#define IAA_COMPLETION_TIMEOUT 1000000 + +#define IAA_ANALYTICS_ERROR 0x0a +#define IAA_ERROR_DECOMP_BUF_OVERFLOW 0x0b +#define IAA_ERROR_COMP_BUF_OVERFLOW 0x19 +#define IAA_ERROR_WATCHDOG_EXPIRED 0x24 + +#define IAA_COMP_MODES_MAX 2 + +#define FIXED_HDR 0x2 +#define FIXED_HDR_SIZE 3 + +#define IAA_COMP_FLAGS (IAA_COMP_FLUSH_OUTPUT | \ + IAA_COMP_APPEND_EOB) + +#define IAA_DECOMP_FLAGS (IAA_DECOMP_ENABLE | \ + IAA_DECOMP_FLUSH_OUTPUT | \ + IAA_DECOMP_CHECK_FOR_EOB | \ + IAA_DECOMP_STOP_ON_EOB) + +/* Representation of IAA workqueue */ +struct iaa_wq { + struct list_head list; + + struct idxd_wq *wq; + int ref; + bool remove; + + struct iaa_device *iaa_device; + + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; +}; + +struct iaa_device_compression_mode { + const char *name; + + struct aecs_comp_table_record *aecs_comp_table; + + dma_addr_t aecs_comp_table_dma_addr; +}; + +/* Representation of IAA device with wqs, populated by probe */ +struct iaa_device { + struct list_head list; + struct idxd_device *idxd; + + struct iaa_device_compression_mode *compression_modes[IAA_COMP_MODES_MAX]; + + int n_wq; + struct list_head wqs; + + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; +}; + +struct wq_table_entry { + struct idxd_wq **wqs; + int max_wqs; + int n_wqs; + int cur_wq; +}; + +#define IAA_AECS_ALIGN 32 + +/* + * Analytics Engine Configuration and State (AECS) contains parameters and + * internal state of the analytics engine. + */ +struct aecs_comp_table_record { + u32 crc; + u32 xor_checksum; + u32 reserved0[5]; + u32 num_output_accum_bits; + u8 output_accum[256]; + u32 ll_sym[286]; + u32 reserved1; + u32 reserved2; + u32 d_sym[30]; + u32 reserved_padding[2]; +} __packed; + +int iaa_aecs_init_fixed(void); +void iaa_aecs_cleanup_fixed(void); + +typedef int (*iaa_dev_comp_init_fn_t) (struct iaa_device_compression_mode *mode); +typedef int (*iaa_dev_comp_free_fn_t) (struct iaa_device_compression_mode *mode); + +struct iaa_compression_mode { + const char *name; + u32 *ll_table; + int ll_table_size; + u32 *d_table; + int d_table_size; + iaa_dev_comp_init_fn_t init; + iaa_dev_comp_free_fn_t free; +}; + +int add_iaa_compression_mode(const char *name, + const u32 *ll_table, + int ll_table_size, + const u32 *d_table, + int d_table_size, + iaa_dev_comp_init_fn_t init, + iaa_dev_comp_free_fn_t free); + +void remove_iaa_compression_mode(const char *name); + +enum iaa_mode { + IAA_MODE_FIXED, +}; + +struct iaa_compression_ctx { + enum iaa_mode mode; + bool verify_compress; + bool async_mode; + bool use_irq; +}; + +extern struct list_head iaa_devices; +extern struct mutex iaa_devices_lock; + +#endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c new file mode 100644 index 0000000000000000000000000000000000000000..19d9a333ac49c90eccbe8714dd6ed90c46d68f15 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include "idxd.h" +#include "iaa_crypto.h" + +/* + * Fixed Huffman tables the IAA hardware requires to implement RFC-1951. + */ +static const u32 fixed_ll_sym[286] = { + 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037, + 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F, + 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047, + 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F, + 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057, + 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F, + 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067, + 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F, + 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077, + 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F, + 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087, + 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F, + 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097, + 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F, + 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7, + 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF, + 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7, + 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF, + 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197, + 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F, + 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7, + 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF, + 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7, + 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF, + 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7, + 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF, + 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7, + 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF, + 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7, + 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF, + 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7, + 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF, + 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007, + 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F, + 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017, + 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5 +}; + +static const u32 fixed_d_sym[30] = { + 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007, + 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F, + 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017, + 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D +}; + +static int init_fixed_mode(struct iaa_device_compression_mode *mode) +{ + struct aecs_comp_table_record *comp_table = mode->aecs_comp_table; + u32 bfinal = 1; + u32 offset; + + /* Configure aecs table using fixed Huffman table */ + comp_table->crc = 0; + comp_table->xor_checksum = 0; + offset = comp_table->num_output_accum_bits / 8; + comp_table->output_accum[offset] = FIXED_HDR | bfinal; + comp_table->num_output_accum_bits = FIXED_HDR_SIZE; + + return 0; +} + +int iaa_aecs_init_fixed(void) +{ + int ret; + + ret = add_iaa_compression_mode("fixed", + fixed_ll_sym, + sizeof(fixed_ll_sym), + fixed_d_sym, + sizeof(fixed_d_sym), + init_fixed_mode, NULL); + if (!ret) + pr_debug("IAA fixed compression mode initialized\n"); + + return ret; +} + +void iaa_aecs_cleanup_fixed(void) +{ + remove_iaa_compression_mode("fixed"); +} diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c new file mode 100644 index 0000000000000000000000000000000000000000..814fb2c31626b45325f1515b7a7ad7f8c56a64e6 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -0,0 +1,2096 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "idxd.h" +#include "iaa_crypto.h" +#include "iaa_crypto_stats.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt + +#define IAA_ALG_PRIORITY 300 + +/* number of iaa instances probed */ +static unsigned int nr_iaa; +static unsigned int nr_cpus; +static unsigned int nr_nodes; +static unsigned int nr_cpus_per_node; + +/* Number of physical cpus sharing each iaa instance */ +static unsigned int cpus_per_iaa; + +static struct crypto_comp *deflate_generic_tfm; + +/* Per-cpu lookup table for balanced wqs */ +static struct wq_table_entry __percpu *wq_table; + +static struct idxd_wq *wq_table_next_wq(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + if (++entry->cur_wq >= entry->n_wqs) + entry->cur_wq = 0; + + if (!entry->wqs[entry->cur_wq]) + return NULL; + + pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__, + entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, + entry->wqs[entry->cur_wq]->id, cpu); + + return entry->wqs[entry->cur_wq]; +} + +static void wq_table_add(int cpu, struct idxd_wq *wq) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + if (WARN_ON(entry->n_wqs == entry->max_wqs)) + return; + + entry->wqs[entry->n_wqs++] = wq; + + pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__, + entry->wqs[entry->n_wqs - 1]->idxd->id, + entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); +} + +static void wq_table_free_entry(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + kfree(entry->wqs); + memset(entry, 0, sizeof(*entry)); +} + +static void wq_table_clear_entry(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + entry->n_wqs = 0; + entry->cur_wq = 0; + memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); +} + +LIST_HEAD(iaa_devices); +DEFINE_MUTEX(iaa_devices_lock); + +/* If enabled, IAA hw crypto algos are registered, unavailable otherwise */ +static bool iaa_crypto_enabled; +static bool iaa_crypto_registered; + +/* Verify results of IAA compress or not */ +static bool iaa_verify_compress = true; + +static ssize_t verify_compress_show(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", iaa_verify_compress); +} + +static ssize_t verify_compress_store(struct device_driver *driver, + const char *buf, size_t count) +{ + int ret = -EBUSY; + + mutex_lock(&iaa_devices_lock); + + if (iaa_crypto_enabled) + goto out; + + ret = kstrtobool(buf, &iaa_verify_compress); + if (ret) + goto out; + + ret = count; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +} +static DRIVER_ATTR_RW(verify_compress); + +/* + * The iaa crypto driver supports three 'sync' methods determining how + * compressions and decompressions are performed: + * + * - sync: the compression or decompression completes before + * returning. This is the mode used by the async crypto + * interface when the sync mode is set to 'sync' and by + * the sync crypto interface regardless of setting. + * + * - async: the compression or decompression is submitted and returns + * immediately. Completion interrupts are not used so + * the caller is responsible for polling the descriptor + * for completion. This mode is applicable to only the + * async crypto interface and is ignored for anything + * else. + * + * - async_irq: the compression or decompression is submitted and + * returns immediately. Completion interrupts are + * enabled so the caller can wait for the completion and + * yield to other threads. When the compression or + * decompression completes, the completion is signaled + * and the caller awakened. This mode is applicable to + * only the async crypto interface and is ignored for + * anything else. + * + * These modes can be set using the iaa_crypto sync_mode driver + * attribute. + */ + +/* Use async mode */ +static bool async_mode; +/* Use interrupts */ +static bool use_irq; + +/** + * set_iaa_sync_mode - Set IAA sync mode + * @name: The name of the sync mode + * + * Make the IAA sync mode named @name the current sync mode used by + * compression/decompression. + */ + +static int set_iaa_sync_mode(const char *name) +{ + int ret = 0; + + if (sysfs_streq(name, "sync")) { + async_mode = false; + use_irq = false; + } else if (sysfs_streq(name, "async")) { + async_mode = true; + use_irq = false; + } else if (sysfs_streq(name, "async_irq")) { + async_mode = true; + use_irq = true; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t sync_mode_show(struct device_driver *driver, char *buf) +{ + int ret = 0; + + if (!async_mode && !use_irq) + ret = sprintf(buf, "%s\n", "sync"); + else if (async_mode && !use_irq) + ret = sprintf(buf, "%s\n", "async"); + else if (async_mode && use_irq) + ret = sprintf(buf, "%s\n", "async_irq"); + + return ret; +} + +static ssize_t sync_mode_store(struct device_driver *driver, + const char *buf, size_t count) +{ + int ret = -EBUSY; + + mutex_lock(&iaa_devices_lock); + + if (iaa_crypto_enabled) + goto out; + + ret = set_iaa_sync_mode(buf); + if (ret == 0) + ret = count; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +} +static DRIVER_ATTR_RW(sync_mode); + +static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; + +static int find_empty_iaa_compression_mode(void) +{ + int i = -EINVAL; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + if (iaa_compression_modes[i]) + continue; + break; + } + + return i; +} + +static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx) +{ + struct iaa_compression_mode *mode; + int i; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + mode = iaa_compression_modes[i]; + if (!mode) + continue; + + if (!strcmp(mode->name, name)) { + *idx = i; + return iaa_compression_modes[i]; + } + } + + return NULL; +} + +static void free_iaa_compression_mode(struct iaa_compression_mode *mode) +{ + kfree(mode->name); + kfree(mode->ll_table); + kfree(mode->d_table); + + kfree(mode); +} + +/* + * IAA Compression modes are defined by an ll_table and a d_table. + * These tables are typically generated and captured using statistics + * collected from running actual compress/decompress workloads. + * + * A module or other kernel code can add and remove compression modes + * with a given name using the exported @add_iaa_compression_mode() + * and @remove_iaa_compression_mode functions. + * + * When a new compression mode is added, the tables are saved in a + * global compression mode list. When IAA devices are added, a + * per-IAA device dma mapping is created for each IAA device, for each + * compression mode. These are the tables used to do the actual + * compression/deccompression and are unmapped if/when the devices are + * removed. Currently, compression modes must be added before any + * device is added, and removed after all devices have been removed. + */ + +/** + * remove_iaa_compression_mode - Remove an IAA compression mode + * @name: The name the compression mode will be known as + * + * Remove the IAA compression mode named @name. + */ +void remove_iaa_compression_mode(const char *name) +{ + struct iaa_compression_mode *mode; + int idx; + + mutex_lock(&iaa_devices_lock); + + if (!list_empty(&iaa_devices)) + goto out; + + mode = find_iaa_compression_mode(name, &idx); + if (mode) { + free_iaa_compression_mode(mode); + iaa_compression_modes[idx] = NULL; + } +out: + mutex_unlock(&iaa_devices_lock); +} +EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); + +/** + * add_iaa_compression_mode - Add an IAA compression mode + * @name: The name the compression mode will be known as + * @ll_table: The ll table + * @ll_table_size: The ll table size in bytes + * @d_table: The d table + * @d_table_size: The d table size in bytes + * @init: Optional callback function to init the compression mode data + * @free: Optional callback function to free the compression mode data + * + * Add a new IAA compression mode named @name. + * + * Returns 0 if successful, errcode otherwise. + */ +int add_iaa_compression_mode(const char *name, + const u32 *ll_table, + int ll_table_size, + const u32 *d_table, + int d_table_size, + iaa_dev_comp_init_fn_t init, + iaa_dev_comp_free_fn_t free) +{ + struct iaa_compression_mode *mode; + int idx, ret = -ENOMEM; + + mutex_lock(&iaa_devices_lock); + + if (!list_empty(&iaa_devices)) { + ret = -EBUSY; + goto out; + } + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + goto out; + + mode->name = kstrdup(name, GFP_KERNEL); + if (!mode->name) + goto free; + + if (ll_table) { + mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL); + if (!mode->ll_table) + goto free; + memcpy(mode->ll_table, ll_table, ll_table_size); + mode->ll_table_size = ll_table_size; + } + + if (d_table) { + mode->d_table = kzalloc(d_table_size, GFP_KERNEL); + if (!mode->d_table) + goto free; + memcpy(mode->d_table, d_table, d_table_size); + mode->d_table_size = d_table_size; + } + + mode->init = init; + mode->free = free; + + idx = find_empty_iaa_compression_mode(); + if (idx < 0) + goto free; + + pr_debug("IAA compression mode %s added at idx %d\n", + mode->name, idx); + + iaa_compression_modes[idx] = mode; + + ret = 0; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +free: + free_iaa_compression_mode(mode); + goto out; +} +EXPORT_SYMBOL_GPL(add_iaa_compression_mode); + +static struct iaa_device_compression_mode * +get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx) +{ + return iaa_device->compression_modes[idx]; +} + +static void free_device_compression_mode(struct iaa_device *iaa_device, + struct iaa_device_compression_mode *device_mode) +{ + size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; + struct device *dev = &iaa_device->idxd->pdev->dev; + + kfree(device_mode->name); + + if (device_mode->aecs_comp_table) + dma_free_coherent(dev, size, device_mode->aecs_comp_table, + device_mode->aecs_comp_table_dma_addr); + kfree(device_mode); +} + +#define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000 +#define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC) +#define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) +#define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) +#define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \ + IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \ + IDXD_OP_FLAG_AECS_RW_TGLS) + +static int check_completion(struct device *dev, + struct iax_completion_record *comp, + bool compress, + bool only_once); + +static int init_device_compression_mode(struct iaa_device *iaa_device, + struct iaa_compression_mode *mode, + int idx, struct idxd_wq *wq) +{ + size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; + struct device *dev = &iaa_device->idxd->pdev->dev; + struct iaa_device_compression_mode *device_mode; + int ret = -ENOMEM; + + device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL); + if (!device_mode) + return -ENOMEM; + + device_mode->name = kstrdup(mode->name, GFP_KERNEL); + if (!device_mode->name) + goto free; + + device_mode->aecs_comp_table = dma_alloc_coherent(dev, size, + &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL); + if (!device_mode->aecs_comp_table) + goto free; + + /* Add Huffman table to aecs */ + memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); + memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); + memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); + + if (mode->init) { + ret = mode->init(device_mode); + if (ret) + goto free; + } + + /* mode index should match iaa_compression_modes idx */ + iaa_device->compression_modes[idx] = device_mode; + + pr_debug("IAA %s compression mode initialized for iaa device %d\n", + mode->name, iaa_device->idxd->id); + + ret = 0; +out: + return ret; +free: + pr_debug("IAA %s compression mode initialization failed for iaa device %d\n", + mode->name, iaa_device->idxd->id); + + free_device_compression_mode(iaa_device, device_mode); + goto out; +} + +static int init_device_compression_modes(struct iaa_device *iaa_device, + struct idxd_wq *wq) +{ + struct iaa_compression_mode *mode; + int i, ret = 0; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + mode = iaa_compression_modes[i]; + if (!mode) + continue; + + ret = init_device_compression_mode(iaa_device, mode, i, wq); + if (ret) + break; + } + + return ret; +} + +static void remove_device_compression_modes(struct iaa_device *iaa_device) +{ + struct iaa_device_compression_mode *device_mode; + int i; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + device_mode = iaa_device->compression_modes[i]; + if (!device_mode) + continue; + + free_device_compression_mode(iaa_device, device_mode); + iaa_device->compression_modes[i] = NULL; + if (iaa_compression_modes[i]->free) + iaa_compression_modes[i]->free(device_mode); + } +} + +static struct iaa_device *iaa_device_alloc(void) +{ + struct iaa_device *iaa_device; + + iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL); + if (!iaa_device) + return NULL; + + INIT_LIST_HEAD(&iaa_device->wqs); + + return iaa_device; +} + +static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) +{ + struct iaa_wq *iaa_wq; + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { + if (iaa_wq->wq == wq) + return true; + } + + return false; +} + +static struct iaa_device *add_iaa_device(struct idxd_device *idxd) +{ + struct iaa_device *iaa_device; + + iaa_device = iaa_device_alloc(); + if (!iaa_device) + return NULL; + + iaa_device->idxd = idxd; + + list_add_tail(&iaa_device->list, &iaa_devices); + + nr_iaa++; + + return iaa_device; +} + +static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq) +{ + int ret = 0; + + ret = init_device_compression_modes(iaa_device, iaa_wq->wq); + if (ret) + return ret; + + return ret; +} + +static void del_iaa_device(struct iaa_device *iaa_device) +{ + list_del(&iaa_device->list); + + nr_iaa--; +} + +static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, + struct iaa_wq **new_wq) +{ + struct idxd_device *idxd = iaa_device->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iaa_wq *iaa_wq; + + iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL); + if (!iaa_wq) + return -ENOMEM; + + iaa_wq->wq = wq; + iaa_wq->iaa_device = iaa_device; + idxd_wq_set_private(wq, iaa_wq); + + list_add_tail(&iaa_wq->list, &iaa_device->wqs); + + iaa_device->n_wq++; + + if (new_wq) + *new_wq = iaa_wq; + + dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n", + wq->id, iaa_device->idxd->id, iaa_device->n_wq); + + return 0; +} + +static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) +{ + struct idxd_device *idxd = iaa_device->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iaa_wq *iaa_wq; + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { + if (iaa_wq->wq == wq) { + list_del(&iaa_wq->list); + iaa_device->n_wq--; + + dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n", + wq->id, iaa_device->idxd->id, + iaa_device->n_wq, nr_iaa); + + if (iaa_device->n_wq == 0) + del_iaa_device(iaa_device); + break; + } + } +} + +static void clear_wq_table(void) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpus; cpu++) + wq_table_clear_entry(cpu); + + pr_debug("cleared wq table\n"); +} + +static void free_iaa_device(struct iaa_device *iaa_device) +{ + if (!iaa_device) + return; + + remove_device_compression_modes(iaa_device); + kfree(iaa_device); +} + +static void __free_iaa_wq(struct iaa_wq *iaa_wq) +{ + struct iaa_device *iaa_device; + + if (!iaa_wq) + return; + + iaa_device = iaa_wq->iaa_device; + if (iaa_device->n_wq == 0) + free_iaa_device(iaa_wq->iaa_device); +} + +static void free_iaa_wq(struct iaa_wq *iaa_wq) +{ + struct idxd_wq *wq; + + __free_iaa_wq(iaa_wq); + + wq = iaa_wq->wq; + + kfree(iaa_wq); + idxd_wq_set_private(wq, NULL); +} + +static int iaa_wq_get(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + int ret = 0; + + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (iaa_wq && !iaa_wq->remove) { + iaa_wq->ref++; + idxd_wq_get(wq); + } else { + ret = -ENODEV; + } + spin_unlock(&idxd->dev_lock); + + return ret; +} + +static int iaa_wq_put(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + bool free = false; + int ret = 0; + + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (iaa_wq) { + iaa_wq->ref--; + if (iaa_wq->ref == 0 && iaa_wq->remove) { + idxd_wq_set_private(wq, NULL); + free = true; + } + idxd_wq_put(wq); + } else { + ret = -ENODEV; + } + spin_unlock(&idxd->dev_lock); + if (free) { + __free_iaa_wq(iaa_wq); + kfree(iaa_wq); + } + + return ret; +} + +static void free_wq_table(void) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpus; cpu++) + wq_table_free_entry(cpu); + + free_percpu(wq_table); + + pr_debug("freed wq table\n"); +} + +static int alloc_wq_table(int max_wqs) +{ + struct wq_table_entry *entry; + int cpu; + + wq_table = alloc_percpu(struct wq_table_entry); + if (!wq_table) + return -ENOMEM; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + entry = per_cpu_ptr(wq_table, cpu); + entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); + if (!entry->wqs) { + free_wq_table(); + return -ENOMEM; + } + + entry->max_wqs = max_wqs; + } + + pr_debug("initialized wq table\n"); + + return 0; +} + +static int save_iaa_wq(struct idxd_wq *wq) +{ + struct iaa_device *iaa_device, *found = NULL; + struct idxd_device *idxd; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + if (iaa_device->idxd == wq->idxd) { + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + /* + * Check to see that we don't already have this wq. + * Shouldn't happen but we don't control probing. + */ + if (iaa_has_wq(iaa_device, wq)) { + dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n", + iaa_device); + goto out; + } + + found = iaa_device; + + ret = add_iaa_wq(iaa_device, wq, NULL); + if (ret) + goto out; + + break; + } + } + + if (!found) { + struct iaa_device *new_device; + struct iaa_wq *new_wq; + + new_device = add_iaa_device(wq->idxd); + if (!new_device) { + ret = -ENOMEM; + goto out; + } + + ret = add_iaa_wq(new_device, wq, &new_wq); + if (ret) { + del_iaa_device(new_device); + free_iaa_device(new_device); + goto out; + } + + ret = init_iaa_device(new_device, new_wq); + if (ret) { + del_iaa_wq(new_device, new_wq->wq); + del_iaa_device(new_device); + free_iaa_wq(new_wq); + goto out; + } + } + + if (WARN_ON(nr_iaa == 0)) + return -EINVAL; + + cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; + if (!cpus_per_iaa) + cpus_per_iaa = 1; +out: + return 0; +} + +static void remove_iaa_wq(struct idxd_wq *wq) +{ + struct iaa_device *iaa_device; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + if (iaa_has_wq(iaa_device, wq)) { + del_iaa_wq(iaa_device, wq); + break; + } + } + + if (nr_iaa) { + cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; + if (!cpus_per_iaa) + cpus_per_iaa = 1; + } else + cpus_per_iaa = 1; +} + +static int wq_table_add_wqs(int iaa, int cpu) +{ + struct iaa_device *iaa_device, *found_device = NULL; + int ret = 0, cur_iaa = 0, n_wqs_added = 0; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + if (cur_iaa != iaa) { + cur_iaa++; + continue; + } + + found_device = iaa_device; + dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n", + found_device->idxd->id, cur_iaa); + break; + } + + if (!found_device) { + found_device = list_first_entry_or_null(&iaa_devices, + struct iaa_device, list); + if (!found_device) { + pr_debug("couldn't find any iaa devices with wqs!\n"); + ret = -EINVAL; + goto out; + } + cur_iaa = 0; + + idxd = found_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n", + found_device->idxd->id, cur_iaa); + } + + list_for_each_entry(iaa_wq, &found_device->wqs, list) { + wq_table_add(cpu, iaa_wq->wq); + pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n", + cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); + n_wqs_added++; + } + + if (!n_wqs_added) { + pr_debug("couldn't find any iaa wqs!\n"); + ret = -EINVAL; + goto out; + } +out: + return ret; +} + +/* + * Rebalance the wq table so that given a cpu, it's easy to find the + * closest IAA instance. The idea is to try to choose the most + * appropriate IAA instance for a caller and spread available + * workqueues around to clients. + */ +static void rebalance_wq_table(void) +{ + const struct cpumask *node_cpus; + int node, cpu, iaa = -1; + + if (nr_iaa == 0) + return; + + pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n", + nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa); + + clear_wq_table(); + + if (nr_iaa == 1) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + if (WARN_ON(wq_table_add_wqs(0, cpu))) { + pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); + return; + } + } + + return; + } + + for_each_node_with_cpus(node) { + node_cpus = cpumask_of_node(node); + + for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { + int node_cpu = cpumask_nth(cpu, node_cpus); + + if (WARN_ON(node_cpu >= nr_cpu_ids)) { + pr_debug("node_cpu %d doesn't exist!\n", node_cpu); + return; + } + + if ((cpu % cpus_per_iaa) == 0) + iaa++; + + if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { + pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); + return; + } + } + } +} + +static inline int check_completion(struct device *dev, + struct iax_completion_record *comp, + bool compress, + bool only_once) +{ + char *op_str = compress ? "compress" : "decompress"; + int ret = 0; + + while (!comp->status) { + if (only_once) + return -EAGAIN; + cpu_relax(); + } + + if (comp->status != IAX_COMP_SUCCESS) { + if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) { + ret = -ETIMEDOUT; + dev_dbg(dev, "%s timed out, size=0x%x\n", + op_str, comp->output_size); + update_completion_timeout_errs(); + goto out; + } + + if (comp->status == IAA_ANALYTICS_ERROR && + comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) { + ret = -E2BIG; + dev_dbg(dev, "compressed > uncompressed size," + " not compressing, size=0x%x\n", + comp->output_size); + update_completion_comp_buf_overflow_errs(); + goto out; + } + + if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) { + ret = -EOVERFLOW; + goto out; + } + + ret = -EINVAL; + dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n", + op_str, comp->status, comp->error_code, comp->output_size); + print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); + update_completion_einval_errs(); + + goto out; + } +out: + return ret; +} + +static int deflate_generic_decompress(struct acomp_req *req) +{ + void *src, *dst; + int ret; + + src = kmap_local_page(sg_page(req->src)) + req->src->offset; + dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset; + + ret = crypto_comp_decompress(deflate_generic_tfm, + src, req->slen, dst, &req->dlen); + + kunmap_local(src); + kunmap_local(dst); + + update_total_sw_decomp_calls(); + + return ret; +} + +static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, + struct acomp_req *req, + dma_addr_t *src_addr, dma_addr_t *dst_addr); + +static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 compression_crc); + +static void iaa_desc_complete(struct idxd_desc *idxd_desc, + enum idxd_complete_type comp_type, + bool free_desc, void *__ctx, + u32 *status) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *compression_ctx; + struct crypto_ctx *ctx = __ctx; + struct iaa_device *iaa_device; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret, err = 0; + + compression_ctx = crypto_tfm_ctx(ctx->tfm); + + iaa_wq = idxd_wq_get_private(idxd_desc->wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, + compression_ctx->mode); + dev_dbg(dev, "%s: compression mode %s," + " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__, + active_compression_mode->name, + ctx->src_addr, ctx->dst_addr); + + ret = check_completion(dev, idxd_desc->iax_completion, + ctx->compress, false); + if (ret) { + dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); + if (!ctx->compress && + idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { + pr_warn("%s: falling back to deflate-generic decompress, " + "analytics error code %x\n", __func__, + idxd_desc->iax_completion->error_code); + ret = deflate_generic_decompress(ctx->req); + if (ret) { + dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", + __func__, ret); + err = -EIO; + goto err; + } + } else { + err = -EIO; + goto err; + } + } else { + ctx->req->dlen = idxd_desc->iax_completion->output_size; + } + + /* Update stats */ + if (ctx->compress) { + update_total_comp_bytes_out(ctx->req->dlen); + update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); + } else { + update_total_decomp_bytes_in(ctx->req->slen); + update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); + } + + if (ctx->compress && compression_ctx->verify_compress) { + dma_addr_t src_addr, dst_addr; + u32 compression_crc; + + compression_crc = idxd_desc->iax_completion->crc; + + ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); + if (ret) { + dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); + err = -EIO; + goto out; + } + + ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, + ctx->req->slen, dst_addr, &ctx->req->dlen, + compression_crc); + if (ret) { + dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); + err = -EIO; + } + + dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE); + dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE); + + goto out; + } +err: + dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE); + dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE); +out: + if (ret != 0) + dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); + + if (ctx->req->base.complete) + acomp_request_complete(ctx->req, err); + + if (free_desc) + idxd_free_desc(idxd_desc->wq, idxd_desc); + iaa_wq_put(idxd_desc->wq); +} + +static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 *compression_crc, + bool disable_async) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | + IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_COMPRESS; + desc->compr_flags = IAA_COMP_FLAGS; + desc->priv = 0; + + desc->src1_addr = (u64)src_addr; + desc->src1_size = slen; + desc->dst_addr = (u64)dst_addr; + desc->max_dst_size = *dlen; + desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr; + desc->src2_size = sizeof(struct aecs_comp_table_record); + desc->completion_addr = idxd_desc->compl_dma; + + if (ctx->use_irq && !disable_async) { + desc->flags |= IDXD_OP_FLAG_RCI; + + idxd_desc->crypto.req = req; + idxd_desc->crypto.tfm = tfm; + idxd_desc->crypto.src_addr = src_addr; + idxd_desc->crypto.dst_addr = dst_addr; + idxd_desc->crypto.compress = true; + + dev_dbg(dev, "%s use_async_irq: compression mode %s," + " src_addr %llx, dst_addr %llx\n", __func__, + active_compression_mode->name, + src_addr, dst_addr); + } else if (ctx->async_mode && !disable_async) + req->base.data = idxd_desc; + + dev_dbg(dev, "%s: compression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", __func__, + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc failed ret=%d\n", ret); + goto err; + } + + /* Update stats */ + update_total_comp_calls(); + update_wq_comp_calls(wq); + + if (ctx->async_mode && !disable_async) { + ret = -EINPROGRESS; + dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); + goto out; + } + + ret = check_completion(dev, idxd_desc->iax_completion, true, false); + if (ret) { + dev_dbg(dev, "check_completion failed ret=%d\n", ret); + goto err; + } + + *dlen = idxd_desc->iax_completion->output_size; + + /* Update stats */ + update_total_comp_bytes_out(*dlen); + update_wq_comp_bytes(wq, *dlen); + + *compression_crc = idxd_desc->iax_completion->crc; + + if (!ctx->async_mode || disable_async) + idxd_free_desc(wq, idxd_desc); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, + struct acomp_req *req, + dma_addr_t *src_addr, dma_addr_t *dst_addr) +{ + int ret = 0; + int nr_sgs; + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "verify: couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + *src_addr = sg_dma_address(req->src); + dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + goto out; + } + *dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); +out: + return ret; +} + +static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 compression_crc) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa compress failed: ret=%ld\n", + PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + /* Verify (optional) - decompress and check crc, suppress dest write */ + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_DECOMPRESS; + desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; + desc->priv = 0; + + desc->src1_addr = (u64)dst_addr; + desc->src1_size = *dlen; + desc->dst_addr = (u64)src_addr; + desc->max_dst_size = slen; + desc->completion_addr = idxd_desc->compl_dma; + + dev_dbg(dev, "(verify) compression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret); + goto err; + } + + ret = check_completion(dev, idxd_desc->iax_completion, false, false); + if (ret) { + dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret); + goto err; + } + + if (compression_crc != idxd_desc->iax_completion->crc) { + ret = -EINVAL; + dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" + " comp=0x%x, decomp=0x%x\n", compression_crc, + idxd_desc->iax_completion->crc); + print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, + 8, 1, idxd_desc->iax_completion, 64, 0); + goto err; + } + + idxd_free_desc(wq, idxd_desc); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + bool disable_async) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa decompress failed: ret=%ld\n", + PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_DECOMPRESS; + desc->max_dst_size = PAGE_SIZE; + desc->decompr_flags = IAA_DECOMP_FLAGS; + desc->priv = 0; + + desc->src1_addr = (u64)src_addr; + desc->dst_addr = (u64)dst_addr; + desc->max_dst_size = *dlen; + desc->src1_size = slen; + desc->completion_addr = idxd_desc->compl_dma; + + if (ctx->use_irq && !disable_async) { + desc->flags |= IDXD_OP_FLAG_RCI; + + idxd_desc->crypto.req = req; + idxd_desc->crypto.tfm = tfm; + idxd_desc->crypto.src_addr = src_addr; + idxd_desc->crypto.dst_addr = dst_addr; + idxd_desc->crypto.compress = false; + + dev_dbg(dev, "%s: use_async_irq compression mode %s," + " src_addr %llx, dst_addr %llx\n", __func__, + active_compression_mode->name, + src_addr, dst_addr); + } else if (ctx->async_mode && !disable_async) + req->base.data = idxd_desc; + + dev_dbg(dev, "%s: decompression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", __func__, + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc failed ret=%d\n", ret); + goto err; + } + + /* Update stats */ + update_total_decomp_calls(); + update_wq_decomp_calls(wq); + + if (ctx->async_mode && !disable_async) { + ret = -EINPROGRESS; + dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); + goto out; + } + + ret = check_completion(dev, idxd_desc->iax_completion, false, false); + if (ret) { + dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); + if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { + pr_warn("%s: falling back to deflate-generic decompress, " + "analytics error code %x\n", __func__, + idxd_desc->iax_completion->error_code); + ret = deflate_generic_decompress(req); + if (ret) { + dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", + __func__, ret); + goto err; + } + } else { + goto err; + } + } else { + req->dlen = idxd_desc->iax_completion->output_size; + } + + *dlen = req->dlen; + + if (!ctx->async_mode || disable_async) + idxd_free_desc(wq, idxd_desc); + + /* Update stats */ + update_total_decomp_bytes_in(slen); + update_wq_decomp_bytes(wq, slen); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_comp_acompress(struct acomp_req *req) +{ + struct iaa_compression_ctx *compression_ctx; + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + bool disable_async = false; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + u32 compression_crc; + struct idxd_wq *wq; + struct device *dev; + int order = -1; + + compression_ctx = crypto_tfm_ctx(tfm); + + if (!iaa_crypto_enabled) { + pr_debug("iaa_crypto disabled, not compressing\n"); + return -ENODEV; + } + + if (!req->src || !req->slen) { + pr_debug("invalid src, not compressing\n"); + return -EINVAL; + } + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + if (!req->dst) { + gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + + /* incompressible data will always be < 2 * slen */ + req->dlen = 2 * req->slen; + order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); + req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); + if (!req->dst) { + ret = -ENOMEM; + order = -1; + goto out; + } + disable_async = true; + } + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + + ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, + &req->dlen, &compression_crc, disable_async); + if (ret == -EINPROGRESS) + return ret; + + if (!ret && compression_ctx->verify_compress) { + ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr); + if (ret) { + dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); + goto out; + } + + ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, compression_crc); + if (ret) + dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + + goto out; + } + + if (ret) + dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + if (order >= 0) + sgl_free_order(req->dst, order); + + return ret; +} + +static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) +{ + gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + struct device *dev; + struct idxd_wq *wq; + int order = -1; + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + req->dlen = 4 * req->slen; /* start with ~avg comp rato */ +alloc_dest: + order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); + req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); + if (!req->dst) { + ret = -ENOMEM; + order = -1; + goto out; + } + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, true); + if (ret == -EOVERFLOW) { + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + req->dlen *= 2; + if (req->dlen > CRYPTO_ACOMP_DST_MAX) + goto err_map_dst; + goto alloc_dest; + } + + if (ret != 0) + dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + if (order >= 0) + sgl_free_order(req->dst, order); + + return ret; +} + +static int iaa_comp_adecompress(struct acomp_req *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + struct device *dev; + struct idxd_wq *wq; + + if (!iaa_crypto_enabled) { + pr_debug("iaa_crypto disabled, not decompressing\n"); + return -ENODEV; + } + + if (!req->src || !req->slen) { + pr_debug("invalid src, not decompressing\n"); + return -EINVAL; + } + + if (!req->dst) + return iaa_comp_adecompress_alloc_dest(req); + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + + ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, false); + if (ret == -EINPROGRESS) + return ret; + + if (ret != 0) + dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + return ret; +} + +static void compression_ctx_init(struct iaa_compression_ctx *ctx) +{ + ctx->verify_compress = iaa_verify_compress; + ctx->async_mode = async_mode; + ctx->use_irq = use_irq; +} + +static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + + compression_ctx_init(ctx); + + ctx->mode = IAA_MODE_FIXED; + + return 0; +} + +static void dst_free(struct scatterlist *sgl) +{ + /* + * Called for req->dst = NULL cases but we free elsewhere + * using sgl_free_order(). + */ +} + +static struct acomp_alg iaa_acomp_fixed_deflate = { + .init = iaa_comp_init_fixed, + .compress = iaa_comp_acompress, + .decompress = iaa_comp_adecompress, + .dst_free = dst_free, + .base = { + .cra_name = "deflate", + .cra_driver_name = "deflate-iaa", + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_ctxsize = sizeof(struct iaa_compression_ctx), + .cra_module = THIS_MODULE, + .cra_priority = IAA_ALG_PRIORITY, + } +}; + +static int iaa_register_compression_device(void) +{ + int ret; + + ret = crypto_register_acomp(&iaa_acomp_fixed_deflate); + if (ret) { + pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret); + goto out; + } + + iaa_crypto_registered = true; +out: + return ret; +} + +static int iaa_unregister_compression_device(void) +{ + if (iaa_crypto_registered) + crypto_unregister_acomp(&iaa_acomp_fixed_deflate); + + return 0; +} + +static int iaa_crypto_probe(struct idxd_dev *idxd_dev) +{ + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + struct idxd_device *idxd = wq->idxd; + struct idxd_driver_data *data = idxd->data; + struct device *dev = &idxd_dev->conf_dev; + bool first_wq = false; + int ret = 0; + + if (idxd->state != IDXD_DEV_ENABLED) + return -ENXIO; + + if (data->type != IDXD_TYPE_IAX) + return -ENODEV; + + mutex_lock(&wq->wq_lock); + + if (idxd_wq_get_private(wq)) { + mutex_unlock(&wq->wq_lock); + return -EBUSY; + } + + if (!idxd_wq_driver_name_match(wq, dev)) { + dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", + idxd->id, wq->id, wq->driver_name, dev->driver->name); + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + ret = -ENODEV; + goto err; + } + + wq->type = IDXD_WQT_KERNEL; + + ret = idxd_drv_enable_wq(wq); + if (ret < 0) { + dev_dbg(dev, "enable wq %d.%d failed: %d\n", + idxd->id, wq->id, ret); + ret = -ENXIO; + goto err; + } + + mutex_lock(&iaa_devices_lock); + + if (list_empty(&iaa_devices)) { + ret = alloc_wq_table(wq->idxd->max_wqs); + if (ret) + goto err_alloc; + first_wq = true; + } + + ret = save_iaa_wq(wq); + if (ret) + goto err_save; + + rebalance_wq_table(); + + if (first_wq) { + iaa_crypto_enabled = true; + ret = iaa_register_compression_device(); + if (ret != 0) { + iaa_crypto_enabled = false; + dev_dbg(dev, "IAA compression device registration failed\n"); + goto err_register; + } + try_module_get(THIS_MODULE); + + pr_info("iaa_crypto now ENABLED\n"); + } + + mutex_unlock(&iaa_devices_lock); +out: + mutex_unlock(&wq->wq_lock); + + return ret; + +err_register: + remove_iaa_wq(wq); + free_iaa_wq(idxd_wq_get_private(wq)); +err_save: + if (first_wq) + free_wq_table(); +err_alloc: + mutex_unlock(&iaa_devices_lock); + idxd_drv_disable_wq(wq); +err: + wq->type = IDXD_WQT_NONE; + + goto out; +} + +static void iaa_crypto_remove(struct idxd_dev *idxd_dev) +{ + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + bool free = false; + + idxd_wq_quiesce(wq); + + mutex_lock(&wq->wq_lock); + mutex_lock(&iaa_devices_lock); + + remove_iaa_wq(wq); + + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (!iaa_wq) { + spin_unlock(&idxd->dev_lock); + pr_err("%s: no iaa_wq available to remove\n", __func__); + goto out; + } + + if (iaa_wq->ref) { + iaa_wq->remove = true; + } else { + wq = iaa_wq->wq; + idxd_wq_set_private(wq, NULL); + free = true; + } + spin_unlock(&idxd->dev_lock); + if (free) { + __free_iaa_wq(iaa_wq); + kfree(iaa_wq); + } + + idxd_drv_disable_wq(wq); + rebalance_wq_table(); + + if (nr_iaa == 0) { + iaa_crypto_enabled = false; + free_wq_table(); + module_put(THIS_MODULE); + + pr_info("iaa_crypto now DISABLED\n"); + } +out: + mutex_unlock(&iaa_devices_lock); + mutex_unlock(&wq->wq_lock); +} + +static enum idxd_dev_type dev_types[] = { + IDXD_DEV_WQ, + IDXD_DEV_NONE, +}; + +static struct idxd_device_driver iaa_crypto_driver = { + .probe = iaa_crypto_probe, + .remove = iaa_crypto_remove, + .name = IDXD_SUBDRIVER_NAME, + .type = dev_types, + .desc_complete = iaa_desc_complete, +}; + +static int __init iaa_crypto_init_module(void) +{ + int ret = 0; + int node; + + nr_cpus = num_possible_cpus(); + for_each_node_with_cpus(node) + nr_nodes++; + if (!nr_nodes) { + pr_err("IAA couldn't find any nodes with cpus\n"); + return -ENODEV; + } + nr_cpus_per_node = nr_cpus / nr_nodes; + + if (crypto_has_comp("deflate-generic", 0, 0)) + deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0); + + if (IS_ERR_OR_NULL(deflate_generic_tfm)) { + pr_err("IAA could not alloc %s tfm: errcode = %ld\n", + "deflate-generic", PTR_ERR(deflate_generic_tfm)); + return -ENOMEM; + } + + ret = iaa_aecs_init_fixed(); + if (ret < 0) { + pr_debug("IAA fixed compression mode init failed\n"); + goto err_aecs_init; + } + + ret = idxd_driver_register(&iaa_crypto_driver); + if (ret) { + pr_debug("IAA wq sub-driver registration failed\n"); + goto err_driver_reg; + } + + ret = driver_create_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); + if (ret) { + pr_debug("IAA verify_compress attr creation failed\n"); + goto err_verify_attr_create; + } + + ret = driver_create_file(&iaa_crypto_driver.drv, + &driver_attr_sync_mode); + if (ret) { + pr_debug("IAA sync mode attr creation failed\n"); + goto err_sync_attr_create; + } + + if (iaa_crypto_debugfs_init()) + pr_warn("debugfs init failed, stats not available\n"); + + pr_debug("initialized\n"); +out: + return ret; + +err_sync_attr_create: + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); +err_verify_attr_create: + idxd_driver_unregister(&iaa_crypto_driver); +err_driver_reg: + iaa_aecs_cleanup_fixed(); +err_aecs_init: + crypto_free_comp(deflate_generic_tfm); + + goto out; +} + +static void __exit iaa_crypto_cleanup_module(void) +{ + if (iaa_unregister_compression_device()) + pr_debug("IAA compression device unregister failed\n"); + + iaa_crypto_debugfs_cleanup(); + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_sync_mode); + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); + idxd_driver_unregister(&iaa_crypto_driver); + iaa_aecs_cleanup_fixed(); + crypto_free_comp(deflate_generic_tfm); + + pr_debug("cleaned up\n"); +} + +MODULE_IMPORT_NS(IDXD); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_IDXD_DEVICE(0); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver"); + +module_init(iaa_crypto_init_module); +module_exit(iaa_crypto_cleanup_module); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..f5cc3d29ca19e81808465f38fc6f4748413bba08 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../dma/idxd/idxd.h" +#include +#include +#include "iaa_crypto.h" +#include "iaa_crypto_stats.h" + +static atomic64_t total_comp_calls; +static atomic64_t total_decomp_calls; +static atomic64_t total_sw_decomp_calls; +static atomic64_t total_comp_bytes_out; +static atomic64_t total_decomp_bytes_in; +static atomic64_t total_completion_einval_errors; +static atomic64_t total_completion_timeout_errors; +static atomic64_t total_completion_comp_buf_overflow_errors; + +static struct dentry *iaa_crypto_debugfs_root; + +void update_total_comp_calls(void) +{ + atomic64_inc(&total_comp_calls); +} + +void update_total_comp_bytes_out(int n) +{ + atomic64_add(n, &total_comp_bytes_out); +} + +void update_total_decomp_calls(void) +{ + atomic64_inc(&total_decomp_calls); +} + +void update_total_sw_decomp_calls(void) +{ + atomic64_inc(&total_sw_decomp_calls); +} + +void update_total_decomp_bytes_in(int n) +{ + atomic64_add(n, &total_decomp_bytes_in); +} + +void update_completion_einval_errs(void) +{ + atomic64_inc(&total_completion_einval_errors); +} + +void update_completion_timeout_errs(void) +{ + atomic64_inc(&total_completion_timeout_errors); +} + +void update_completion_comp_buf_overflow_errs(void) +{ + atomic64_inc(&total_completion_comp_buf_overflow_errors); +} + +void update_wq_comp_calls(struct idxd_wq *idxd_wq) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + atomic64_inc(&wq->comp_calls); + atomic64_inc(&wq->iaa_device->comp_calls); +} + +void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + atomic64_add(n, &wq->comp_bytes); + atomic64_add(n, &wq->iaa_device->comp_bytes); +} + +void update_wq_decomp_calls(struct idxd_wq *idxd_wq) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + atomic64_inc(&wq->decomp_calls); + atomic64_inc(&wq->iaa_device->decomp_calls); +} + +void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + atomic64_add(n, &wq->decomp_bytes); + atomic64_add(n, &wq->iaa_device->decomp_bytes); +} + +static void reset_iaa_crypto_stats(void) +{ + atomic64_set(&total_comp_calls, 0); + atomic64_set(&total_decomp_calls, 0); + atomic64_set(&total_sw_decomp_calls, 0); + atomic64_set(&total_comp_bytes_out, 0); + atomic64_set(&total_decomp_bytes_in, 0); + atomic64_set(&total_completion_einval_errors, 0); + atomic64_set(&total_completion_timeout_errors, 0); + atomic64_set(&total_completion_comp_buf_overflow_errors, 0); +} + +static void reset_wq_stats(struct iaa_wq *wq) +{ + atomic64_set(&wq->comp_calls, 0); + atomic64_set(&wq->comp_bytes, 0); + atomic64_set(&wq->decomp_calls, 0); + atomic64_set(&wq->decomp_bytes, 0); +} + +static void reset_device_stats(struct iaa_device *iaa_device) +{ + struct iaa_wq *iaa_wq; + + atomic64_set(&iaa_device->comp_calls, 0); + atomic64_set(&iaa_device->comp_bytes, 0); + atomic64_set(&iaa_device->decomp_calls, 0); + atomic64_set(&iaa_device->decomp_bytes, 0); + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) + reset_wq_stats(iaa_wq); +} + +static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq) +{ + seq_printf(m, " name: %s\n", iaa_wq->wq->name); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_wq->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_wq->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_wq->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n\n", + atomic64_read(&iaa_wq->decomp_bytes)); +} + +static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) +{ + struct iaa_wq *iaa_wq; + + seq_puts(m, "iaa device:\n"); + seq_printf(m, " id: %d\n", iaa_device->idxd->id); + seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_device->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_device->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_device->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n", + atomic64_read(&iaa_device->decomp_bytes)); + seq_puts(m, " wqs:\n"); + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) + wq_show(m, iaa_wq); +} + +static int global_stats_show(struct seq_file *m, void *v) +{ + seq_puts(m, "global stats:\n"); + seq_printf(m, " total_comp_calls: %llu\n", + atomic64_read(&total_comp_calls)); + seq_printf(m, " total_decomp_calls: %llu\n", + atomic64_read(&total_decomp_calls)); + seq_printf(m, " total_sw_decomp_calls: %llu\n", + atomic64_read(&total_sw_decomp_calls)); + seq_printf(m, " total_comp_bytes_out: %llu\n", + atomic64_read(&total_comp_bytes_out)); + seq_printf(m, " total_decomp_bytes_in: %llu\n", + atomic64_read(&total_decomp_bytes_in)); + seq_printf(m, " total_completion_einval_errors: %llu\n", + atomic64_read(&total_completion_einval_errors)); + seq_printf(m, " total_completion_timeout_errors: %llu\n", + atomic64_read(&total_completion_timeout_errors)); + seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n", + atomic64_read(&total_completion_comp_buf_overflow_errors)); + + return 0; +} + +static int wq_stats_show(struct seq_file *m, void *v) +{ + struct iaa_device *iaa_device; + + mutex_lock(&iaa_devices_lock); + + list_for_each_entry(iaa_device, &iaa_devices, list) + device_stats_show(m, iaa_device); + + mutex_unlock(&iaa_devices_lock); + + return 0; +} + +static int iaa_crypto_stats_reset(void *data, u64 value) +{ + struct iaa_device *iaa_device; + + reset_iaa_crypto_stats(); + + mutex_lock(&iaa_devices_lock); + + list_for_each_entry(iaa_device, &iaa_devices, list) + reset_device_stats(iaa_device); + + mutex_unlock(&iaa_devices_lock); + + return 0; +} + +static int wq_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wq_stats_show, file); +} + +static const struct file_operations wq_stats_fops = { + .open = wq_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int global_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, global_stats_show, file); +} + +static const struct file_operations global_stats_fops = { + .open = global_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n"); + +int __init iaa_crypto_debugfs_init(void) +{ + if (!debugfs_initialized()) + return -ENODEV; + + iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); + + debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL, + &global_stats_fops); + debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL, + &wq_stats_fops); + debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL, + &wq_stats_reset_fops); + + return 0; +} + +void __exit iaa_crypto_debugfs_cleanup(void) +{ + debugfs_remove_recursive(iaa_crypto_debugfs_root); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..3787a5f507eb2aecd8565a19de39615404973080 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#ifndef __CRYPTO_DEV_IAA_CRYPTO_STATS_H__ +#define __CRYPTO_DEV_IAA_CRYPTO_STATS_H__ + +#if defined(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) +int iaa_crypto_debugfs_init(void); +void iaa_crypto_debugfs_cleanup(void); + +void update_total_comp_calls(void); +void update_total_comp_bytes_out(int n); +void update_total_decomp_calls(void); +void update_total_sw_decomp_calls(void); +void update_total_decomp_bytes_in(int n); +void update_completion_einval_errs(void); +void update_completion_timeout_errs(void); +void update_completion_comp_buf_overflow_errs(void); + +void update_wq_comp_calls(struct idxd_wq *idxd_wq); +void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); +void update_wq_decomp_calls(struct idxd_wq *idxd_wq); +void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); + +#else +static inline int iaa_crypto_debugfs_init(void) { return 0; } +static inline void iaa_crypto_debugfs_cleanup(void) {} + +static inline void update_total_comp_calls(void) {} +static inline void update_total_comp_bytes_out(int n) {} +static inline void update_total_decomp_calls(void) {} +static inline void update_total_sw_decomp_calls(void) {} +static inline void update_total_decomp_bytes_in(int n) {} +static inline void update_completion_einval_errs(void) {} +static inline void update_completion_timeout_errs(void) {} +static inline void update_completion_comp_buf_overflow_errs(void) {} + +static inline void update_wq_comp_calls(struct idxd_wq *idxd_wq) {} +static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} +static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} +static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} + +#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS + +#endif diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 1220cc86f9100af58dca9d639080c3de4b9f84e8..02fb8abe4e6ed3eae100f9be715ea276452a0653 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX To compile this as a module, choose M here: the module will be called qat_4xxx. +config CRYPTO_DEV_QAT_420XX + tristate "Support for Intel(R) QAT_420XX" + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_420xx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_420xx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) @@ -95,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF To compile this as a module, choose M here: the module will be called qat_c62xvf. + +config CRYPTO_DEV_QAT_ERROR_INJECTION + bool "Support for Intel(R) QAT Devices Heartbeat Error Injection" + depends on CRYPTO_DEV_QAT + depends on DEBUG_FS + help + Enables a mechanism that allows to inject a heartbeat error on + Intel(R) QuickAssist devices for testing purposes. + + This is intended for developer use only. + If unsure, say N. + + This functionality is available via debugfs entry of the Intel(R) + QuickAssist device diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 258c8a626ce04989925e5be69fe1a7c5c2327577..235b69f4f3f72a29b7e3a6b914c08cdcac4d4dd2 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a90fbe00b3c88fd47548f0a0a9a57e95bd79a09e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y := -I $(srctree)/$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o +qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c new file mode 100644 index 0000000000000000000000000000000000000000..78f0ea49254dbbd814d5b9614c1ab24fa1b45f8a --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_420xx_hw_data.h" +#include "icp_qat_hw.h" + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 GENMASK(11, 8) +#define ADF_AE_GROUP_3 GENMASK(15, 12) +#define ADF_AE_GROUP_4 BIT(16) + +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_SYM GENMASK(3, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + +static const char * const adf_420xx_fw_objs[] = { + [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, + [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, + [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_fw_cy_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_dc_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_dc_config[] = { + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dcc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + + +static struct adf_hw_device_class adf_420xx_class = { + .name = ADF_420XX_DEVICE_NAME, + .type = DEV_420XX, + .instances = 0, +}; + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + u32 me_disable = self->fuses; + + return ~me_disable & ADF_420XX_ACCELENGINES_MASK; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return ARRAY_SIZE(adf_fw_cy_config); + case SVC_DC: + return ARRAY_SIZE(adf_fw_dc_config); + case SVC_DCC: + return ARRAY_SIZE(adf_fw_dcc_config); + case SVC_SYM: + return ARRAY_SIZE(adf_fw_sym_config); + case SVC_ASYM: + return ARRAY_SIZE(adf_fw_asym_config); + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return ARRAY_SIZE(adf_fw_asym_dc_config); + case SVC_SYM_DC: + case SVC_DC_SYM: + return ARRAY_SIZE(adf_fw_sym_dc_config); + default: + return 0; + } +} + +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return adf_fw_cy_config; + case SVC_DC: + return adf_fw_dc_config; + case SVC_DCC: + return adf_fw_dcc_config; + case SVC_SYM: + return adf_fw_sym_config; + case SVC_ASYM: + return adf_fw_asym_config; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return adf_fw_asym_dc_config; + case SVC_SYM_DC: + case SVC_DC_SYM: + return adf_fw_sym_dc_config; + default: + return NULL; + } +} + +static void update_ae_mask(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + const struct adf_fw_config *fw_config; + u32 config_ae_mask = 0; + u32 ae_mask, num_objs; + int i; + + ae_mask = get_ae_mask(hw_data); + + /* Modify the AE mask based on the firmware configuration loaded */ + fw_config = get_fw_config(accel_dev); + num_objs = uof_get_num_objs(accel_dev); + + config_ae_mask |= ADF_420XX_ADMIN_AE_MASK; + for (i = 0; i < num_objs; i++) + config_ae_mask |= fw_config[i].ae_mask; + + hw_data->ae_mask = ae_mask & config_ae_mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym, capabilities_dc; + struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 capabilities_dcc; + u32 fusectl1; + + /* As a side effect, update ae_mask based on configuration */ + update_ae_mask(accel_dev); + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); + + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_HKDF | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_SM3 | + ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_AES_V2 | + ICP_ACCEL_CAPABILITIES_ZUC | + ICP_ACCEL_CAPABILITIES_ZUC_256 | + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; + + /* A set bit in fusectl1 means the feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_SM2 | + ICP_ACCEL_CAPABILITIES_ECEDMONT; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; + } + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return capabilities_sym | capabilities_asym; + case SVC_DC: + return capabilities_dc; + case SVC_DCC: + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + capabilities_dcc = capabilities_dc | capabilities_sym; + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + return capabilities_dcc; + case SVC_SYM: + return capabilities_sym; + case SVC_ASYM: + return capabilities_asym; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return capabilities_asym | capabilities_dc; + case SVC_SYM_DC: + case SVC_DC_SYM: + return capabilities_sym | capabilities_dc; + default: + return 0; + } +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Failed to generate thread to arbiter mapping"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; +} + +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; +} + +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + case ADF_AE_GROUP_3: + return RP_GROUP_1; + case ADF_AE_GROUP_2: + if (get_fw_config(accel_dev) == adf_fw_cy_config) + return RP_GROUP_0; + else + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, + const char * const fw_objs[], int num_objs) +{ + const struct adf_fw_config *fw_config; + int id; + + fw_config = get_fw_config(accel_dev); + if (fw_config) + id = fw_config[obj_num].obj; + else + id = -EINVAL; + + if (id < 0 || id > num_objs) + return NULL; + + return fw_objs[id]; +} + +static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs); + + return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); +} + +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + return fw_config[obj_num].ae_mask; +} + +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; +} + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) +{ + hw_data->dev_class = &adf_420xx_class; + hw_data->instance_id = adf_420xx_class.instances++; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; + hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; + hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = adf_gen4_get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; + hw_data->fw_name = ADF_420XX_FW; + hw_data->fw_mmp_name = ADF_420XX_MMP; + hw_data->uof_get_name = uof_get_name_420xx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->enable_pm = adf_gen4_enable_pm; + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_gen4_dev_config; + hw_data->start_timer = adf_gen4_timer_start; + hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_420XX_AE_FREQ; + + adf_gen4_set_err_mask(&hw_data->dev_err_mask); + adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); + adf_init_rl_data(&hw_data->rl_data); +} + +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h new file mode 100644 index 0000000000000000000000000000000000000000..99abbfc1482063c4f3aacc942e8605510538160d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_420XX_HW_DATA_H_ +#define ADF_420XX_HW_DATA_H_ + +#include + +#define ADF_420XX_MAX_ACCELENGINES 17 + +#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF +#define ADF_420XX_ADMIN_AE_MASK 0x10000 + +#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF) +#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF) +#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001) +#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007) +#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF) +#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF) + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(27) - enable parity detection on SPPs + */ +#define ADF_420XX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) + +/* Firmware Binaries */ +#define ADF_420XX_FW "qat_420xx.bin" +#define ADF_420XX_MMP "qat_420xx_mmp.bin" +#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin" +#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin" +#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin" +#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin" + +/* RL constants */ +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_420XX_RL_DCPR_CORRECTION 1 +#define ADF_420XX_RL_SCANS_PER_SEC 954 +#define ADF_420XX_RL_MAX_TP_ASYM 173750UL +#define ADF_420XX_RL_MAX_TP_SYM 95000UL +#define ADF_420XX_RL_MAX_TP_DC 40000UL +#define ADF_420XX_RL_SLICE_REF 1000UL + +/* Clocks frequency */ +#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ) + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id); +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data); + +#endif diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..2a3598409eeb5132056a535f0863b627224cc2c4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "adf_420xx_hw_data.h" + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->hw_device) { + adf_clean_hw_data_420xx(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_dbgfs_exit(accel_dev); + adf_cfg_dev_remove(accel_dev); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + unsigned int i, bar_nr; + unsigned long bar_mask; + struct adf_bar *bar; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* + * Add accel device to accel table + * This should be called before adf_cleanup_accel is called + */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and initialise device hardware meta-data structure */ + hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found.\n"); + ret = -EFAULT; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Can't enable PCI device.\n"); + goto out_err; + } + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto out_err; + } + + ret = adf_gen4_cfg_dev_init(accel_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize configuration.\n"); + goto out_err; + } + + /* Get accelerator capabilities mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; + goto out_err; + } + + /* Find and map all the device's BARS */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; + + ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Failed to map pci regions.\n"); + goto out_err; + } + + i = 0; + for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { + bar = &accel_pci_dev->pci_bars[i++]; + bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + } + + pci_set_master(pdev); + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state.\n"); + ret = -ENOMEM; + goto out_err; + } + + accel_dev->ras_errors.enabled = true; + adf_dbgfs_init(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto out_err_dev_stop; + + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_down(accel_dev, false); +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + adf_dev_down(accel_dev, false); + adf_cleanup_accel(accel_dev); +} + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_420XX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_420XX_FW); +MODULE_FIRMWARE(ADF_420XX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 403f0737144507016102ea371f706a53643be50b..bbd92c017c28edbe18a63e9359a1f2f5c7a3bb1d 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -2,15 +2,22 @@ /* Copyright(c) 2020 - 2021 Intel Corporation */ #include #include +#include #include #include #include #include +#include +#include #include +#include #include #include #include +#include "adf_gen4_ras.h" #include +#include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -18,12 +25,10 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) -enum adf_fw_objs { - ADF_FW_SYM_OBJ, - ADF_FW_ASYM_OBJ, - ADF_FW_DC_OBJ, - ADF_FW_ADMIN_OBJ, -}; +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0) +#define ENA_THD_MASK_SYM GENMASK(6, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, @@ -39,11 +44,6 @@ static const char * const adf_402xx_fw_objs[] = { [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, }; -struct adf_fw_config { - u32 ae_mask; - enum adf_fw_objs obj; -}; - static const struct adf_fw_config adf_fw_cy_config[] = { {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, @@ -93,36 +93,12 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)) static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { - 0x5555555, 0x5555555, 0x5555555, 0x5555555, - 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, .instances = 0, }; -static u32 get_accel_mask(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ACCELERATORS_MASK; -} - static u32 get_ae_mask(struct adf_hw_device_data *self) { u32 me_disable = self->fuses; @@ -130,55 +106,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - return ADF_4XXX_MAX_ACCELERATORS; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - if (!self || !self->ae_mask) - return 0; - - return hweight32(self->ae_mask); -} - -static u32 get_misc_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_PMISC_BAR; -} - -static u32 get_etr_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ETR_BAR; -} - -static u32 get_sram_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_SRAM_BAR; -} - -/* - * The vector routing table is used to select the MSI-X entry to use for each - * interrupt source. - * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. - * The final entry corresponds to VF2PF or error interrupts. - * This vector table could be used to configure one MSI-X entry to be shared - * between multiple interrupt sources. - * - * The default routing is set to have a one to one correspondence between the - * interrupt source and the MSI-X entry used. - */ -static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr; - int i; - - csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) - ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); -} - static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; @@ -187,7 +114,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) u32 fusectl1; /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | @@ -202,27 +129,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_AES_V2; /* A set bit in fusectl1 means the feature is OFF in this SKU */ - if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; } @@ -232,7 +159,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; @@ -243,7 +170,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; @@ -279,98 +206,34 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } } -static enum dev_sku_info get_sku(struct adf_hw_device_data *self) -{ - return DEV_SKU_1; -} - static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (adf_get_service_enabled(accel_dev)) { - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - default: - return default_thrd_to_arb_map; - } -} - -static void get_arb_info(struct arb_info *arb_info) -{ - arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG; - arb_info->arb_offset = ADF_4XXX_ARB_OFFSET; - arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; -} - -static void get_admin_info(struct admin_info *admin_csrs_info) -{ - admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; - admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; - admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; -} - -static u32 get_heartbeat_clock(struct adf_hw_device_data *self) -{ - /* - * 4XXX uses KPT counter for HB - */ - return ADF_4XXX_KPT_COUNTER_FREQ; -} - -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; - void __iomem *csr = misc_bar->virt_addr; - - /* Enable all in errsou3 except VFLR notification on host */ - ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); -} - -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Enable bundle interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Failed to generate thread to arbiter mapping"); - /* Enable misc interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } -static int adf_init_device(struct adf_accel_dev *accel_dev) +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) { - void __iomem *addr; - u32 status; - u32 csr; - int ret; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Temporarily mask PM interrupt */ - csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); - csr |= ADF_GEN4_PM_SOU; - ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); - - /* Set DRV_ACTIVE bit to power up the device */ - ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); - - /* Poll status register to make sure the device is powered up */ - ret = read_poll_timeout(ADF_CSR_RD, status, - status & ADF_GEN4_PM_INIT_STATE, - ADF_GEN4_PM_POLL_DELAY_US, - ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, - ADF_GEN4_PM_STATUS); - if (ret) - dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); - - return ret; + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; } -static u32 uof_get_num_objs(void) +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); } @@ -400,65 +263,63 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev } } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { - enum adf_cfg_service_type rps[RP_GROUP_COUNT]; const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; fw_config = get_fw_config(accel_dev); if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; } +} - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } +static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; - return ring_to_svc_map; + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM_401XX; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } } static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, @@ -493,6 +354,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -504,73 +379,102 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) return fw_config[obj_num].ae_mask; } +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK; +} + void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; - hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; - hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF; - hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; - hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; - hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; - hw_data->get_accel_mask = get_accel_mask; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; hw_data->get_ae_mask = get_ae_mask; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; - hw_data->get_sram_bar_id = get_sram_bar_id; - hw_data->get_etr_bar_id = get_etr_bar_id; - hw_data->get_misc_bar_id = get_misc_bar_id; - hw_data->get_arb_info = get_arb_info; - hw_data->get_admin_info = get_admin_info; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; hw_data->get_accel_cap = get_accel_cap; - hw_data->get_sku = get_sku; + hw_data->get_sku = adf_gen4_get_sku; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; - hw_data->init_device = adf_init_device; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { case ADF_402XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; + case ADF_401XX_PCI_DEVICE_ID: + hw_data->fw_name = ADF_4XXX_FW; + hw_data->fw_mmp_name = ADF_4XXX_MMP; + hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx; break; - default: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; } hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; - hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->get_rp_group = get_rp_group; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->bank_state_save = adf_gen4_bank_state_save; + hw_data->bank_state_restore = adf_gen4_bank_state_restore; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; - hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); + adf_init_rl_data(&hw_data->rl_data); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index bb3d95a8fb2129db35b07964825f90a562c6e0d1..76388363ea8776e4646772f6f9569509fcfac311 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -6,49 +6,27 @@ #include #include -/* PCIe configuration space */ -#define ADF_4XXX_SRAM_BAR 0 -#define ADF_4XXX_PMISC_BAR 1 -#define ADF_4XXX_ETR_BAR 2 -#define ADF_4XXX_RX_RINGS_OFFSET 1 -#define ADF_4XXX_TX_RINGS_MASK 0x1 -#define ADF_4XXX_MAX_ACCELERATORS 1 #define ADF_4XXX_MAX_ACCELENGINES 9 -#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) -/* Physical function fuses */ -#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8) -#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC) -#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0) -#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4) -#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8) -#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC) - -#define ADF_4XXX_ACCELERATORS_MASK (0x1) #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) -#define ADF_4XXX_ETR_MAX_BANKS 64 - -/* MSIX interrupt */ -#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040) -#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044) -#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084) -#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) - -/* Bank and ring configuration */ -#define ADF_4XXX_NUM_RINGS_PER_BANK 2 -#define ADF_4XXX_NUM_BANKS_PER_VF 4 - -/* Arbiter configuration */ -#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_4XXX_ARB_OFFSET (0x0) -#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400) - -/* Admin Interface Reg Offset */ -#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574) -#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) -#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) +#define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F +#define ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK 0xF000F +#define ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK 0x10001 +#define ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK 0x30007 +#define ADF_4XXX_PARITYERRORMASK_PKE_MASK 0x3F + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(23) - enable parity detection on SPPs + */ +#define ADF_4XXX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" @@ -65,22 +43,20 @@ #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" -/* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +/* RL constants */ +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_4XXX_RL_DCPR_CORRECTION 1 +#define ADF_4XXX_RL_SCANS_PER_SEC 954 +#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL +#define ADF_4XXX_RL_MAX_TP_SYM 95000UL +#define ADF_4XXX_RL_MAX_TP_DC 45000UL +#define ADF_4XXX_RL_SLICE_REF 1000UL -/* qat_4xxx fuse bits are different from old GENs, redefine them */ -enum icp_qat_4xxx_slice_mask { - ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), - ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1), - ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2), - ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), - ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), - ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), - ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7), -}; +/* Clocks frequency */ +#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index f6f9e20f74b543cc9fd318a887c434ec01c9cee6..d26564cebdec4a1cc88bcef4ff40296efcaf134e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -8,13 +8,10 @@ #include #include #include -#include +#include +#include #include "adf_4xxx_hw_data.h" -#include "adf_cfg_services.h" -#include "qat_compression.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, @@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) -{ - const char *config; - int ret; - - config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; - - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - /* Default configuration is crypto only for even devices - * and compression for odd devices - */ - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, config, - ADF_STR); - if (ret) - return ret; - - adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); - - return 0; -} - -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long bank, val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_crypto(accel_dev)) - instances = min(cpus, banks / 2); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - bank = i * 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - bank += 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); - return ret; -} - -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_compression(accel_dev)) - instances = min(cpus, banks); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); - return ret; -} - -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) -{ - unsigned long val; - int ret; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); -} - -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: - ret = adf_crypto_dev_config(accel_dev); - break; - case SVC_DC: - case SVC_DCC: - ret = adf_comp_dev_config(accel_dev); - break; - default: - ret = adf_no_dev_config(accel_dev); - break; - } - - if (ret) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - return ret; - -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); - return ret; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); @@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } - ret = adf_cfg_dev_init(accel_dev); + ret = adf_gen4_cfg_dev_init(accel_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize configuration.\n"); goto out_err; @@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Find and map all the device's BARS */ - bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); if (ret) { @@ -418,6 +151,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } + accel_dev->ras_errors.enabled = true; adf_dbgfs_init(accel_dev); ret = adf_dev_up(accel_dev, true); @@ -469,3 +203,4 @@ MODULE_FIRMWARE(ADF_402XX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 9c00c441b602d2d2a5b22e73be20dac1eca552fc..201f9412c5823034a2b1a8b8cca47579c1856f41 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include #include +#include #include #include #include "adf_c3xxx_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 468c9102093fce93303fe2cfeb3ed27df10a8598..956a4c85609a9504b8e73f23eed3c3f7add81b7a 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW); MODULE_FIRMWARE(ADF_C3XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index 84d9486e04de6bc9438d7384af1e67d07af34232..a512ca4efd3f9caef8a4cb581c28a54ad9946331 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index d5a0ecca9d0bba4929863448d479d52258d00a8d..a8de9cd09c05a2608ce2c1d918ba3e9edbbcfb12 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 355a781693eb3fbd7af8ef01ae432168a0459d64..6b5b0cf9c7c7464cbde9178dab336dd4dfd8e7d3 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include #include +#include #include #include #include "adf_c62x_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 0186921be93689d041b0e0b7a88ef4437ae49907..ad0ca4384998524db6a4b1a89f3a3c94fb8b522b 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW); MODULE_FIRMWARE(ADF_C62X_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 751d7aa57fc7f04cd95c7ac42792f0286b76c4c5..4aaaaf921734689ed5c86c0fd8f4bba670205012 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index c9ae6c0d0dca2ec39b872de9d0e8e443c8605b3a..53b8ddb63364197278c945e257b473a81ff4913f 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 8dbf146de3fa596c2e4536216161dd842e41d64a..6f9266edc9f17e3358d4a0a1b11bdf36d7cf8d5f 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT intel_qat-objs := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ @@ -12,31 +13,49 @@ intel_qat-objs := adf_cfg.o \ adf_admin.o \ adf_hw_arbiter.o \ adf_sysfs.o \ + adf_sysfs_ras_counters.o \ + adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ + adf_gen4_config.o \ + adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ + adf_gen4_vf_mig.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ + adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ + adf_mstate_mgr.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ qat_algs.o \ qat_asym_algs.o \ qat_algs_send.o \ + adf_rl.o \ + adf_rl_admin.o \ + adf_sysfs_rl.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_cnv_dbgfs.o \ + adf_gen4_pm_debugfs.o \ + adf_gen4_tl.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_pm_dbgfs.o \ + adf_telemetry.o \ + adf_tl_debugfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o + +intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 79d5a1535eda34df0d5be747ff5eef8244e030d7..7830ecb1a1f1585bbd21b7b42004e9d91287c1fc 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -6,9 +6,15 @@ #include #include #include +#include #include +#include +#include #include "adf_cfg_common.h" +#include "adf_rl.h" +#include "adf_telemetry.h" #include "adf_pfvf_msg.h" +#include "icp_qat_hw.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" @@ -17,12 +23,15 @@ #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" +#define ADF_420XX_DEVICE_NAME "420xx" #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define ADF_402XX_PCI_DEVICE_ID 0x4944 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 +#define ADF_420XX_PCI_DEVICE_ID 0x4946 +#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 @@ -81,6 +90,19 @@ enum dev_sku_info { DEV_SKU_UNKNOWN, }; +enum ras_errors { + ADF_RAS_CORR, + ADF_RAS_UNCORR, + ADF_RAS_FATAL, + ADF_RAS_ERRORS, +}; + +struct adf_error_counters { + atomic_t counter[ADF_RAS_ERRORS]; + bool sysfs_added; + bool enabled; +}; + static inline const char *get_sku_info(enum dev_sku_info info) { switch (info) { @@ -119,6 +141,40 @@ struct admin_info { u32 mailbox_offset; }; +struct ring_config { + u64 base; + u32 config; + u32 head; + u32 tail; + u32 reserved0; +}; + +struct bank_state { + u32 ringstat0; + u32 ringstat1; + u32 ringuostat; + u32 ringestat; + u32 ringnestat; + u32 ringnfstat; + u32 ringfstat; + u32 ringcstat0; + u32 ringcstat1; + u32 ringcstat2; + u32 ringcstat3; + u32 iaintflagen; + u32 iaintflagreg; + u32 iaintflagsrcsel0; + u32 iaintflagsrcsel1; + u32 iaintcolen; + u32 iaintcolctl; + u32 iaintflagandcolen; + u32 ringexpstat; + u32 ringexpintenable; + u32 ringsrvarben; + u32 reserved0; + struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK]; +}; + struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size); u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank, @@ -129,22 +185,49 @@ struct adf_hw_csr_ops { u32 ring); void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank); u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr); + u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr, + u32 bank, u32 value); + u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr, + u32 bank); void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*get_int_col_ctl_enable_mask)(void); }; struct adf_cfg_device_data; @@ -152,6 +235,13 @@ struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; +struct adf_ras_ops { + void (*enable_ras_errors)(struct adf_accel_dev *accel_dev); + void (*disable_ras_errors)(struct adf_accel_dev *accel_dev); + bool (*handle_interrupt)(struct adf_accel_dev *accel_dev, + bool *reset_required); +}; + struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); @@ -169,6 +259,30 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct qat_migdev_ops { + int (*init)(struct qat_mig_dev *mdev); + void (*cleanup)(struct qat_mig_dev *mdev); + void (*reset)(struct qat_mig_dev *mdev); + int (*open)(struct qat_mig_dev *mdev); + void (*close)(struct qat_mig_dev *mdev); + int (*suspend)(struct qat_mig_dev *mdev); + int (*resume)(struct qat_mig_dev *mdev); + int (*save_state)(struct qat_mig_dev *mdev); + int (*save_setup)(struct qat_mig_dev *mdev); + int (*load_state)(struct qat_mig_dev *mdev); + int (*load_setup)(struct qat_mig_dev *mdev, int size); +}; + +struct adf_dev_err_mask { + u32 cppagentcmdpar_mask; + u32 parerr_ath_cph_mask; + u32 parerr_cpr_xlt_mask; + u32 parerr_dcpr_ucs_mask; + u32 parerr_pke_mask; + u32 parerr_wat_wcp_mask; + u32 ssmfeatren_mask; +}; + struct adf_hw_device_data { struct adf_hw_device_class *dev_class; u32 (*get_accel_mask)(struct adf_hw_device_data *self); @@ -206,21 +320,34 @@ struct adf_hw_device_data { void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr); + int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); + int (*bank_state_restore)(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); - u32 (*uof_get_num_objs)(void); + u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); + int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); + u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; + struct adf_ras_ops ras_ops; + struct adf_dev_err_mask dev_err_mask; + struct adf_rl_hw_data rl_data; + struct adf_tl_hw_data tl_data; + struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; + u16 fw_capabilities; u32 clock_frequency; u32 instance_id; u16 accel_mask; @@ -228,6 +355,7 @@ struct adf_hw_device_data { u32 admin_ae_mask; u16 tx_rings_mask; u16 ring_to_svc_map; + u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER]; u8 tx_rx_gap; u8 num_banks; u16 num_banks_per_vf; @@ -236,6 +364,7 @@ struct adf_hw_device_data { u8 num_logical_accel; u8 num_engines; u32 num_hb_ctrs; + u8 num_rps; }; /* CSR write macro */ @@ -263,10 +392,13 @@ struct adf_hw_device_data { #define GET_SRV_TYPE(accel_dev, idx) \ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \ & ADF_SRV_TYPE_MASK) +#define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops) +#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev struct adf_admin_comms; @@ -280,10 +412,17 @@ struct adf_fw_loader_data { struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ + struct mutex pfvf_mig_lock; /* protects PFVF state for migration */ struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; + bool restarting; u8 vf_compat_ver; + /* + * Private area used for device migration. + * Memory allocation and free is managed by migration driver. + */ + void *mig_priv; }; struct adf_dc_data { @@ -292,24 +431,46 @@ struct adf_dc_data { dma_addr_t ovf_buff_p; }; +struct adf_pm { + struct dentry *debugfs_pm_status; + bool present; + int idle_irq_counters; + int throttle_irq_counters; + int fw_irq_counters; + int host_ack_counter; + int host_nack_counter; + ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, loff_t *pos); +}; + +struct adf_sysfs { + int ring_num; + struct rw_semaphore lock; /* protects access to the fields in this struct */ +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; + struct adf_telemetry *telemetry; struct adf_dc_data *dc_data; + struct adf_pm power_management; struct list_head crypto_list; struct list_head compression_list; unsigned long status; atomic_t ref_count; struct dentry *debugfs_dir; struct dentry *fw_cntr_dbgfile; + struct dentry *cnv_dbgfile; struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; struct adf_timer *timer; struct adf_heartbeat *heartbeat; + struct adf_rl *rate_limiting; + struct adf_sysfs sysfs; union { struct { /* protects VF2PF interrupts access */ @@ -327,8 +488,10 @@ struct adf_accel_dev { u8 pf_compat_ver; } vf; }; + struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; + bool autoreset_on_error; u32 accel_id; }; #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c index 6be064dc64c8e8daa769baf9c28d0d453eb4d749..4b5d0350fc2ef1bd92d307f0b43a98ae2a45a8ce 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c @@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, int i; loader = loader_data->fw_loader; - num_objs = hw_device->uof_get_num_objs(); + num_objs = hw_device->uof_get_num_objs(accel_dev); for (i = 0; i < num_objs; i++) { obj_name = hw_device->uof_get_name(accel_dev, i); diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 194d64d4b99a1b4ac350df8fee3c19064e835cee..acad526eb741683b350665333d3a047f5d252613 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -7,6 +7,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" @@ -309,6 +310,73 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) return !strcmp(services, "dcc"); } +static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + if (!ae_mask) + return 0; + + req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET; + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + *caps = resp.fw_capabilities; + + return 0; +} + +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_RL_INIT; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slices, &resp.slices, sizeof(*slices)); + + return 0; +} + +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + + /* + * req struct filled in rl implementation. Used commands + * ICP_QAT_FW_RL_ADD for a new SLA + * ICP_QAT_FW_RL_UPDATE for update SLA + */ + return adf_send_admin(accel_dev, req, &resp, ae_mask); +} + +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + + req.cmd_id = ICP_QAT_FW_RL_REMOVE; + req.node_id = node_id; + req.node_type = node_type; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. @@ -319,6 +387,7 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); u32 dc_capabilities = 0; int ret; @@ -339,6 +408,8 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; + adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities); + return adf_init_ae(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); @@ -379,6 +450,91 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, + size_t buff_size) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + u32 ae_mask = hw_data->admin_ae_mask; + int ret; + + /* Query pm info via init/admin cmd */ + if (!accel_dev->admin) { + dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n"); + return -EFAULT; + } + + req.cmd_id = ICP_QAT_FW_PM_INFO; + req.init_cfg_sz = buff_size; + req.init_cfg_ptr = p_state_addr; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to query power-management info\n"); + + return ret; +} + +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, + u16 *latest_err) +{ + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + int ret; + + req.cmd_id = ICP_QAT_FW_CNV_STATS_GET; + + ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp); + if (ret) + return ret; + if (resp.status) + return -EPROTONOSUPPORT; + + *err_cnt = resp.error_count; + *latest_err = resp.latest_error; + + return ret; +} + +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_TL_START; + req.init_cfg_ptr = tl_dma_addr; + req.init_cfg_sz = layout_sz; + + if (rp_indexes) + memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes)); + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slice_count, &resp.slices, sizeof(*slice_count)); + + return 0; +} + +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + u32 ae_mask = hw_data->admin_ae_mask; + + req.cmd_id = ICP_QAT_FW_TL_STOP; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h new file mode 100644 index 0000000000000000000000000000000000000000..647c8e196752104f009bceeda30e5ae71b79b96e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_ADMIN +#define ADF_ADMIN + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices); +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req); +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count); +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index af495a6f039f6b6c748e12723145ccd650502f56..04260f61d04294b24d9bf77b68789955de414f01 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -7,8 +7,15 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_pfvf_pf_msg.h" + +struct adf_fatal_error_data { + struct adf_accel_dev *accel_dev; + struct work_struct work; +}; static struct workqueue_struct *device_reset_wq; +static struct workqueue_struct *device_sriov_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -26,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + if (accel_dev->hw_device->exit_arb) { + dev_dbg(&pdev->dev, "Disabling arbitration\n"); + accel_dev->hw_device->exit_arb(accel_dev); + } + adf_error_notifier(accel_dev); + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_restarting_notify(accel_dev); + adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); + pci_clear_master(pdev); + adf_dev_down(accel_dev, false); + return PCI_ERS_RESULT_NEED_RESET; } @@ -37,6 +57,13 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; +/* sriov dev data */ +struct adf_sriov_dev_data { + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct sriov_work; +}; + void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -82,11 +109,22 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) } } +static void adf_device_sriov_worker(struct work_struct *work) +{ + struct adf_sriov_dev_data *sriov_data = + container_of(work, struct adf_sriov_dev_data, sriov_work); + + adf_reenable_sriov(sriov_data->accel_dev); + complete(&sriov_data->compl); +} + static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; + unsigned long wait_jiffies = msecs_to_jiffies(10000); + struct adf_sriov_dev_data sriov_data; adf_dev_restarting_notify(accel_dev); if (adf_dev_restart(accel_dev)) { @@ -97,6 +135,14 @@ static void adf_device_reset_worker(struct work_struct *work) WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } + + sriov_data.accel_dev = accel_dev; + init_completion(&sriov_data.compl); + INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker); + queue_work(device_sriov_wq, &sriov_data.sriov_work); + if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies)) + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); @@ -148,14 +194,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int res = 0; if (!accel_dev) { pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + + if (!pdev->is_busmaster) + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + res = adf_dev_up(accel_dev, false); + if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; + adf_reenable_sriov(accel_dev); + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return PCI_ERS_RESULT_RECOVERED; } @@ -172,11 +229,62 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->autoreset_on_error) + return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC); + + return 0; +} + +static void adf_notify_fatal_error_worker(struct work_struct *work) +{ + struct adf_fatal_error_data *wq_data = + container_of(work, struct adf_fatal_error_data, work); + struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + adf_error_notifier(accel_dev); + + if (!accel_dev->is_vf) { + /* Disable arbitration to stop processing of new requests */ + if (accel_dev->autoreset_on_error && hw_device->exit_arb) + hw_device->exit_arb(accel_dev); + if (accel_dev->pf.vf_info) + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_autoreset(accel_dev); + } + + kfree(wq_data); +} + +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct adf_fatal_error_data *wq_data; + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->accel_dev = accel_dev; + INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker); + adf_misc_wq_queue_work(&wq_data->work); + + return 0; +} + int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", WQ_MEM_RECLAIM, 0); - return !device_reset_wq ? -EFAULT : 0; + if (!device_reset_wq) + return -EFAULT; + + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + if (!device_sriov_wq) + return -EFAULT; + + return 0; } void adf_exit_aer(void) @@ -184,4 +292,8 @@ void adf_exit_aer(void) if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; + + if (device_sriov_wq) + destroy_workqueue(device_sriov_wq); + device_sriov_wq = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 6e5de1dab97b4f3402fee40d62dbfc1ba9f1443f..89df3888d7eac7791c6b2ce47f9e1c8387e370cb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -47,6 +47,7 @@ enum adf_device_type { DEV_C3XXX, DEV_C3XXXVF, DEV_4XXX, + DEV_420XX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 322b76903a737d4e0fce0371a355f6a1f17fb0b0..e015ad6cace2b22afae87fdeda773fa260dcd6ba 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -49,5 +49,6 @@ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" +#define ADF_SRIOV_ENABLED "SriovEnabled" #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c index eae44969dc84fa8a71826fcaf1fc647d7c3fd7da..cf89f57de2a7021494faefc006ca4c4eda038e2b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_clock.c +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_clock.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c new file mode 100644 index 0000000000000000000000000000000000000000..627953a72d4784c82b53c9ddc7d8e64002a9f524 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" +#include "qat_compression.h" + +#define CNV_DEBUGFS_FILENAME "cnv_errors" +#define CNV_MIN_PADDING 16 + +#define CNV_ERR_INFO_MASK GENMASK(11, 0) +#define CNV_ERR_TYPE_MASK GENMASK(15, 12) +#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 +#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 + +enum cnv_error_type { + CNV_ERR_TYPE_NONE, + CNV_ERR_TYPE_CHECKSUM, + CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH, + CNV_ERR_TYPE_DECOMPRESSION, + CNV_ERR_TYPE_TRANSLATION, + CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH, + CNV_ERR_TYPE_UNKNOWN, + CNV_ERR_TYPES_COUNT +}; + +#define CNV_ERROR_TYPE_GET(latest_err) \ + min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN) + +#define CNV_GET_DELTA_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_SLICE_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_DEFAULT_ERR_INFO(latest_error) \ + u16_get_bits(latest_error, CNV_ERR_INFO_MASK) + +enum cnv_fields { + CNV_ERR_COUNT, + CNV_LATEST_ERR, + CNV_FIELDS_COUNT +}; + +static const char * const cnv_field_names[CNV_FIELDS_COUNT] = { + [CNV_ERR_COUNT] = "Total Errors", + [CNV_LATEST_ERR] = "Last Error", +}; + +static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = { + [CNV_ERR_TYPE_NONE] = "No Error", + [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error", + [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P", + [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error", + [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error", + [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C", + [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error", +}; + +struct ae_cnv_errors { + u16 ae; + u16 err_cnt; + u16 latest_err; + bool is_comp_ae; +}; + +struct cnv_err_stats { + u16 ae_count; + struct ae_cnv_errors ae_cnv_errors[]; +}; + +static s16 get_err_info(u8 error_type, u16 latest) +{ + switch (error_type) { + case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH: + case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH: + return CNV_GET_DELTA_ERR_INFO(latest); + case CNV_ERR_TYPE_DECOMPRESSION: + case CNV_ERR_TYPE_TRANSLATION: + return CNV_GET_SLICE_ERR_INFO(latest); + default: + return CNV_GET_DEFAULT_ERR_INFO(latest); + } +} + +static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v, + loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + (*pos)++; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v) +{ +} + +static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v) +{ + struct ae_cnv_errors *ae_errors; + unsigned int i; + s16 err_info; + u8 err_type; + + if (v == SEQ_START_TOKEN) { + seq_puts(sfile, "AE "); + for (i = 0; i < CNV_FIELDS_COUNT; ++i) + seq_printf(sfile, " %*s", CNV_MIN_PADDING, + cnv_field_names[i]); + } else { + ae_errors = v; + + if (!ae_errors->is_comp_ae) + return 0; + + err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err); + err_info = get_err_info(err_type, ae_errors->latest_err); + + seq_printf(sfile, "%d:", ae_errors->ae); + seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt); + seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING, + cnv_error_names[err_type], err_info); + } + seq_putc(sfile, '\n'); + + return 0; +} + +static const struct seq_operations qat_cnv_errors_sops = { + .start = qat_cnv_errors_seq_start, + .next = qat_cnv_errors_seq_next, + .stop = qat_cnv_errors_seq_stop, + .show = qat_cnv_errors_seq_show, +}; + +/** + * cnv_err_stats_alloc() - Get CNV stats for the provided device. + * @accel_dev: Pointer to a QAT acceleration device + * + * Allocates and populates table of CNV errors statistics for each non-admin AE + * available through the supplied acceleration device. The caller becomes the + * owner of such memory and is responsible for the deallocation through a call + * to kfree(). + * + * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success + * or a negative value on error. + */ +static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct cnv_err_stats *err_stats; + unsigned long ae_count; + unsigned long ae_mask; + size_t err_stats_size; + unsigned long ae; + unsigned int i; + u16 latest_err; + u16 err_cnt; + int ret; + + if (!adf_dev_started(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); + return ERR_PTR(-EBUSY); + } + + /* Ignore the admin AEs */ + ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; + ae_count = hweight_long(ae_mask); + if (unlikely(!ae_count)) + return ERR_PTR(-EINVAL); + + err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count); + err_stats = kmalloc(err_stats_size, GFP_KERNEL); + if (!err_stats) + return ERR_PTR(-ENOMEM); + + err_stats->ae_count = ae_count; + + i = 0; + for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { + ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "Failed to get CNV stats for ae %ld, [%d].\n", + ae, ret); + err_stats->ae_cnv_errors[i++].is_comp_ae = false; + continue; + } + err_stats->ae_cnv_errors[i].is_comp_ae = true; + err_stats->ae_cnv_errors[i].latest_err = latest_err; + err_stats->ae_cnv_errors[i].err_cnt = err_cnt; + err_stats->ae_cnv_errors[i].ae = ae; + i++; + } + + return err_stats; +} + +static int qat_cnv_errors_file_open(struct inode *inode, struct file *file) +{ + struct adf_accel_dev *accel_dev = inode->i_private; + struct seq_file *cnv_errors_seq_file; + struct cnv_err_stats *cnv_err_stats; + int ret; + + cnv_err_stats = cnv_err_stats_alloc(accel_dev); + if (IS_ERR(cnv_err_stats)) + return PTR_ERR(cnv_err_stats); + + ret = seq_open(file, &qat_cnv_errors_sops); + if (unlikely(ret)) { + kfree(cnv_err_stats); + return ret; + } + + cnv_errors_seq_file = file->private_data; + cnv_errors_seq_file->private = cnv_err_stats; + return ret; +} + +static int qat_cnv_errors_file_release(struct inode *inode, struct file *file) +{ + struct seq_file *cnv_errors_seq_file = file->private_data; + + kfree(cnv_errors_seq_file->private); + cnv_errors_seq_file->private = NULL; + + return seq_release(inode, file); +} + +static const struct file_operations qat_cnv_fops = { + .owner = THIS_MODULE, + .open = qat_cnv_errors_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qat_cnv_errors_file_release, +}; + +static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + char *file_msg = "No engine configured for comp\n"; + + return simple_read_from_buffer(buf, count, pos, file_msg, + strlen(file_msg)); +} + +static const struct file_operations qat_cnv_no_comp_fops = { + .owner = THIS_MODULE, + .read = no_comp_file_read, +}; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + const struct file_operations *fops; + void *data; + + if (adf_hw_dev_has_compression(accel_dev)) { + fops = &qat_cnv_fops; + data = accel_dev; + } else { + fops = &qat_cnv_no_comp_fops; + data = NULL; + } + + accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400, + accel_dev->debugfs_dir, + data, fops); +} + +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + debugfs_remove(accel_dev->cnv_dbgfile); + accel_dev->cnv_dbgfile = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h new file mode 100644 index 0000000000000000000000000000000000000000..b02b0961c43308a1e73b4220c0f0bcc7da0dd1ec --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_CNV_DBG_H +#define ADF_CNV_DBG_H + +struct adf_accel_dev; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 79ff7982378d9fef9d48ca1d37f3d9bc85de7bd7..3bec9e20bad0a3e3583e8cc818a7f94a4bad3aa7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -40,6 +40,7 @@ enum adf_event { ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, + ADF_EVENT_FATAL_ERROR, }; struct service_hndl { @@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); +void adf_error_notifier(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, @@ -84,20 +87,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); extern const struct pci_error_handlers adf_err_handler; void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); -int adf_init_admin_comms(struct adf_accel_dev *accel_dev); -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); -int adf_send_admin_init(struct adf_accel_dev *accel_dev); -int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); -int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); -int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); -int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); -int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); @@ -196,6 +193,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_reenable_sriov(struct adf_accel_dev *accel_dev); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); @@ -216,6 +214,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } +static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ +} + static inline int adf_init_pf_wq(void) { return 0; @@ -246,4 +248,24 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *etr; + + etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)]; + + return etr->virt_addr; +} + +static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *param; + + param = &GET_BARS(accel_dev)[hw_data->get_sram_bar_id(hw_data)]; + + return param->virt_addr; +} + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 056fc59b5ae61d2981f413a1ce732862c0589746..4c11ad1ebcf0f8f65822df0ff290aaeabaedafb7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -5,9 +5,12 @@ #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" +#include "adf_pm_dbgfs.h" +#include "adf_tl_debugfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -54,6 +57,9 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) if (!accel_dev->is_vf) { adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); + adf_pm_dbgfs_add(accel_dev); + adf_cnv_dbgfs_add(accel_dev); + adf_tl_dbgfs_add(accel_dev); } } @@ -64,6 +70,9 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) { if (!accel_dev->is_vf) { + adf_tl_dbgfs_rm(accel_dev); + adf_cnv_dbgfs_rm(accel_dev); + adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index 86ee36feefad34692fa5e3d1c3a935c510a6364b..f07b748795f7b79af65f2979eb6bfd2c4c39546b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake) /** * adf_clean_vf_map() - Cleans VF id mapings - * - * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs + * + * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h new file mode 100644 index 0000000000000000000000000000000000000000..4f86696800c97f55de8579b9c3cabf7a4d943b65 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_FW_CONFIG_H_ +#define ADF_FW_CONFIG_H_ + +enum adf_fw_objs { + ADF_FW_SYM_OBJ, + ADF_FW_ASYM_OBJ, + ADF_FW_DC_OBJ, + ADF_FW_ADMIN_OBJ, +}; + +struct adf_fw_config { + u32 ae_mask; + enum adf_fw_objs obj; +}; + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index cb6e09ef5c9ff92241f426586d6e6ea0ac900bd5..98fb7ccfed9fc30ab3dbbef17838eacaaf78cce3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -9,6 +9,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_fw_counters.h" @@ -34,7 +35,7 @@ struct adf_ae_counters { struct adf_fw_counters { u16 ae_count; - struct adf_ae_counters ae_counters[]; + struct adf_ae_counters ae_counters[] __counted_by(ae_count); }; static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae, diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c new file mode 100644 index 0000000000000000000000000000000000000000..650c9edd8a6650524a24d165050058f1625260c9 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen2_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring, u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h new file mode 100644 index 0000000000000000000000000000000000000000..55058b0f9e52b1e57136f32ad7a4d09ce862b7ef --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN2_HW_CSR_DATA_H_ +#define ADF_GEN2_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)((value) & 0xFFFFFFFF); \ + u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ +} while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * (index)), value) + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index d1884547b5a142cab337a51dede4eab2a65b11b9..1f64bf49b221c2b2f43a6fcd542adaefae20ff57 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen2_enable_ints); -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, - u32 ring, u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); - u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 6bd341061de414118ee11ced3e5470dbb751d9b2..708e9186127bbdf3db4c8e602f8b6af993a3d927 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -6,78 +6,9 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 #define ADF_GEN2_RX_RINGS_OFFSET 8 #define ADF_GEN2_TX_RINGS_MASK 0xFF -#define BUILD_RING_BASE_ADDR(addr, size) \ - (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - u32 l_base = 0, u_base = 0; \ - l_base = (u32)((value) & 0xFFFFFFFF); \ - u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG, value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) - /* AE to function map */ #define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) #define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) @@ -106,12 +37,6 @@ do { \ #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * (index)), value) /* Power gating */ #define ADF_POWERGATE_DC BIT(23) @@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c new file mode 100644 index 0000000000000000000000000000000000000000..fe1f3d727dc5a54ee7853c18b889eaa91fb88b2c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_services.h" +#include "adf_cfg_strings.h" +#include "adf_common_drv.h" +#include "adf_gen4_config.h" +#include "adf_heartbeat.h" +#include "adf_transport_access_macros.h" +#include "qat_compression.h" +#include "qat_crypto.h" + +static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long bank, val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_crypto(accel_dev)) + instances = min(cpus, banks / 2); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + bank = i * 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + bank += 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +{ + unsigned long val; + int ret; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); +} + +/** + * adf_gen4_dev_config() - create dev config required to create instances + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create instances + * + * Return: 0 on success, error code otherwise. + */ +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + goto err; + + ret = sysfs_match_string(adf_cfg_services, services); + if (ret < 0) + goto err; + + switch (ret) { + case SVC_CY: + case SVC_CY2: + ret = adf_crypto_dev_config(accel_dev); + break; + case SVC_DC: + case SVC_DCC: + ret = adf_comp_dev_config(accel_dev); + break; + default: + ret = adf_no_dev_config(accel_dev); + break; + } + + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_dev_config); + +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + const char *config; + int ret; + + config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; + + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + /* Default configuration is crypto only for even devices + * and compression for odd devices + */ + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, config, + ADF_STR); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h new file mode 100644 index 0000000000000000000000000000000000000000..bb87655f69a8396de3a9f2ddf816a53192a5d186 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_CONFIG_H_ +#define ADF_GEN4_CONFIG_H_ + +#include "adf_accel_devices.h" + +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c new file mode 100644 index 0000000000000000000000000000000000000000..6609c248aaba5da8683ad4900163d5642d03c272 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen4_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_STAT(csr_base_addr, bank); +} + +static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_UO_STAT(csr_base_addr, bank); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NE_STAT(csr_base_addr, bank); +} + +static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NF_STAT(csr_base_addr, bank); +} + +static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_F_STAT(csr_base_addr, bank); +} + +static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_C_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_INT_EN(csr_base_addr, bank); +} + +static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_BASE(csr_base_addr, bank, ring); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_EN(csr_base_addr, bank); +} + +static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG(csr_base_addr, bank); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_EN(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_CTL(csr_base_addr, bank); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +static u32 get_int_col_ctl_enable_mask(void) +{ + return ADF_RING_CSR_INT_COL_CTL_ENABLE; +} + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_stat = read_csr_stat; + csr_ops->read_csr_uo_stat = read_csr_uo_stat; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->read_csr_ne_stat = read_csr_ne_stat; + csr_ops->read_csr_nf_stat = read_csr_nf_stat; + csr_ops->read_csr_f_stat = read_csr_f_stat; + csr_ops->read_csr_c_stat = read_csr_c_stat; + csr_ops->read_csr_exp_stat = read_csr_exp_stat; + csr_ops->read_csr_exp_int_en = read_csr_exp_int_en; + csr_ops->write_csr_exp_int_en = write_csr_exp_int_en; + csr_ops->read_csr_ring_config = read_csr_ring_config; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->read_csr_ring_base = read_csr_ring_base; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->read_csr_int_en = read_csr_int_en; + csr_ops->write_csr_int_en = write_csr_int_en; + csr_ops->read_csr_int_flag = read_csr_int_flag; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->read_csr_int_srcsel = read_csr_int_srcsel; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val; + csr_ops->read_csr_int_col_en = read_csr_int_col_en; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; + csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h new file mode 100644 index 0000000000000000000000000000000000000000..6f33e7c87c2c90e81a2bc5d1dd6549b1ce5ff27d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_HW_CSR_DATA_H_ +#define ADF_GEN4_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL +#define ADF_RING_CSR_RING_CONFIG 0x1000 +#define ADF_RING_CSR_RING_LBASE 0x1040 +#define ADF_RING_CSR_RING_UBASE 0x1080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_STAT 0x140 +#define ADF_RING_CSR_UO_STAT 0x148 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_NE_STAT 0x150 +#define ADF_RING_CSR_NF_STAT 0x154 +#define ADF_RING_CSR_F_STAT 0x158 +#define ADF_RING_CSR_C_STAT 0x15C +#define ADF_RING_CSR_INT_FLAG_EN 0x16C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_EXP_STAT 0x188 +#define ADF_RING_CSR_EXP_INT_EN 0x18C +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_CSR_ADDR_OFFSET 0x100000 +#define ADF_RING_BUNDLE_SIZE 0x2000 +#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT) +#define READ_CSR_UO_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) +#define READ_CSR_NE_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT) +#define READ_CSR_NF_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT) +#define READ_CSR_F_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT) +#define READ_CSR_C_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT) +#define READ_CSR_EXP_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT) +#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN) +#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_EXP_INT_EN, value) +#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2)) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + void __iomem *_csr_base_addr = csr_base_addr; \ + u32 _bank = bank; \ + u32 _ring = ring; \ + dma_addr_t _value = value; \ + u32 l_base = 0, u_base = 0; \ + l_base = lower_32_bits(_value); \ + u_base = upper_32_bits(_value); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ +} while (0) + +static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + u32 l_base, u_base; + + /* + * Use special IO wrapper for ring base as LBASE and UBASE are + * not physically contigious + */ + l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_LBASE + (ring << 2)); + u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_UBASE + (ring << 2)); + + return (u64)u_base << 32 | (u64)l_base; +} + +#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \ + read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring)) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define READ_CSR_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN) +#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_EN, (value)) +#define READ_CSR_INT_FLAG(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG, (value)) +#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, (value)) +#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_EN, (value)) +#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, (value)) + +#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN) +#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN, (value)) + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 3148a62938fdd2de9ac30fc08be8147844fa29f4..41a0979e68c1774dc7a5ce4aafe915398fc05de9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,106 +1,138 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include +#include #include "adf_accel_devices.h" +#include "adf_cfg_services.h" #include "adf_common_drv.h" +#include "adf_fw_config.h" #include "adf_gen4_hw_data.h" +#include "adf_gen4_pm.h" -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { - return BUILD_RING_BASE_ADDR(addr, size); + return ADF_GEN4_ACCELERATORS_MASK; } +EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask); -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self) { - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); + return ADF_GEN4_MAX_ACCELERATORS; } +EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels); -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self) { - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} + if (!self || !self->ae_mask) + return 0; -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); + return hweight32(self->ae_mask); } +EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes); -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self) { - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); + return ADF_GEN4_PMISC_BAR; } +EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id); -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self) { - return READ_CSR_E_STAT(csr_base_addr, bank); + return ADF_GEN4_ETR_BAR; } +EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id); -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self) { - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); + return ADF_GEN4_SRAM_BAR; } +EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id); -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self) { - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); + return DEV_SKU_1; } +EXPORT_SYMBOL_GPL(adf_gen4_get_sku); -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, - u32 value) +void adf_gen4_get_arb_info(struct arb_info *arb_info) { - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); + arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN4_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET; } +EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info); -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info) { - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); + admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET; } +EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info); -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self) { - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); + /* + * GEN4 uses KPT counter for HB + */ + return ADF_GEN4_KPT_COUNTER_FREQ; } +EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock); -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev) { - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); + /* Enable all in errsou3 except VFLR notification on host */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); } +EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction); -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev) { - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0); } +EXPORT_SYMBOL_GPL(adf_gen4_enable_ints); -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +int adf_gen4_init_device(struct adf_accel_dev *accel_dev) { - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; + void __iomem *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); + csr |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN4_PM_INIT_STATE, + ADF_GEN4_PM_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN4_PM_STATUS); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + + return ret; } -EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); +EXPORT_SYMBOL_GPL(adf_gen4_init_device); static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) @@ -135,6 +167,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + int i; + + csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable); + int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev) { return 0; @@ -171,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number) int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data); - void __iomem *csr; + void __iomem *csr = adf_get_etr_base(accel_dev); int ret; if (bank_number >= hw_data->num_banks) @@ -181,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); - csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr; ret = reset_ring_pair(csr, bank_number); if (ret) dev_err(&GET_DEV(accel_dev), @@ -192,3 +244,428 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) return ret; } EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); + +static const u32 thrd_to_arb_map_dcc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u16 rp_group_to_arb_mask[] = { + [RP_GROUP_0] = 0x5, + [RP_GROUP_1] = 0xA, +}; + +static bool is_single_service(int service_id) +{ + switch (service_id) { + case SVC_DC: + case SVC_SYM: + case SVC_ASYM: + return true; + case SVC_CY: + case SVC_CY2: + case SVC_DCC: + case SVC_ASYM_DC: + case SVC_DC_ASYM: + case SVC_SYM_DC: + case SVC_DC_SYM: + default: + return false; + } +} + +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int ae_cnt, worker_obj_cnt, i, j; + unsigned long ae_mask, thds_mask; + int srv_id, rp_group; + u32 thd2arb_map_base; + u16 arb_mask; + + if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || + !hw_data->get_num_aes || !hw_data->uof_get_num_objs || + !hw_data->uof_get_ae_mask) + return -EFAULT; + + srv_id = adf_get_service_enabled(accel_dev); + if (srv_id < 0) + return srv_id; + + ae_cnt = hw_data->get_num_aes(hw_data); + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + + if (srv_id == SVC_DCC) { + if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) + return -EINVAL; + + memcpy(thd2arb_map, thrd_to_arb_map_dcc, + array_size(sizeof(*thd2arb_map), ae_cnt)); + return 0; + } + + for (i = 0; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); + thd2arb_map_base = 0; + + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return -EINVAL; + + if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) + return -EINVAL; + + if (is_single_service(srv_id)) + arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | + rp_group_to_arb_mask[RP_GROUP_1]; + else + arb_mask = rp_group_to_arb_mask[rp_group]; + + for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_map_base |= arb_mask << (j * 4); + + for_each_set_bit(j, &ae_mask, ae_cnt) + thd2arb_map[j] = thd2arb_map_base; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); + +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + unsigned int ae_mask, start_id, worker_obj_cnt, i; + u16 ring_to_svc_map; + int rp_group; + + if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || + !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) + return 0; + + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + start_id = worker_obj_cnt - RP_GROUP_COUNT; + + for (i = start_id; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return 0; + + switch (hw_data->uof_get_obj_type(accel_dev, i)) { + case ADF_FW_SYM_OBJ: + rps[rp_group] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[rp_group] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[rp_group] = COMP; + break; + default: + rps[rp_group] = 0; + break; + } + } + +set_mask: + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); + +/* + * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer + * @accel_dev: Pointer to the device structure + * @bank_idx: Offset to the bank within this device + * @timeout_ms: Timeout in milliseconds for the operation + * + * This function tries to quiesce the coalesced interrupt timer of a bank if + * it has been enabled and triggered. + * + * Returns 0 on success, error code otherwise + * + */ +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_misc = adf_get_pmisc_base(accel_dev); + void __iomem *csr_etr = adf_get_etr_base(accel_dev); + u32 int_col_ctl, int_col_mask, int_col_en; + u32 e_stat, intsrc; + u64 wait_us; + int ret; + + if (timeout_ms < 0) + return -EINVAL; + + int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx); + int_col_mask = csr_ops->get_int_col_ctl_enable_mask(); + if (!(int_col_ctl & int_col_mask)) + return 0; + + int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx); + int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX); + + e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx); + if (!(~e_stat & int_col_en)) + return 0; + + wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC; + do_div(wait_us, hw_data->clock_frequency); + wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC); + dev_dbg(&GET_DEV(accel_dev), + "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n", + bank_idx, wait_us, timeout_ms, e_stat, int_col_en); + + ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc, + ADF_COALESCED_POLL_DELAY_US, wait_us, true, + csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx)); + if (ret) + dev_warn(&GET_DEV(accel_dev), + "coalesced timer for bank %d expired (%llu us)\n", + bank_idx, wait_us); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer); + +static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us) +{ + u32 status; + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_DRAIN); + + return read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, timeout_us, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); +} + +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), + ADF_WQM_CSR_RPRESETSTS_STATUS); +} + +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number); + + ret = drain_bank(csr, bank_number, timeout_us); + if (ret) + dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n"); + + return ret; +} + +static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings) +{ + u32 i; + + state->ringstat0 = ops->read_csr_stat(base, bank); + state->ringuostat = ops->read_csr_uo_stat(base, bank); + state->ringestat = ops->read_csr_e_stat(base, bank); + state->ringnestat = ops->read_csr_ne_stat(base, bank); + state->ringnfstat = ops->read_csr_nf_stat(base, bank); + state->ringfstat = ops->read_csr_f_stat(base, bank); + state->ringcstat0 = ops->read_csr_c_stat(base, bank); + state->iaintflagen = ops->read_csr_int_en(base, bank); + state->iaintflagreg = ops->read_csr_int_flag(base, bank); + state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank); + state->iaintcolen = ops->read_csr_int_col_en(base, bank); + state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank); + state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank); + state->ringexpstat = ops->read_csr_exp_stat(base, bank); + state->ringexpintenable = ops->read_csr_exp_int_en(base, bank); + state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank); + + for (i = 0; i < num_rings; i++) { + state->rings[i].head = ops->read_csr_ring_head(base, bank, i); + state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); + state->rings[i].config = ops->read_csr_ring_config(base, bank, i); + state->rings[i].base = ops->read_csr_ring_base(base, bank, i); + } +} + +#define CHECK_STAT(op, expect_val, name, args...) \ +({ \ + u32 __expect_val = (expect_val); \ + u32 actual_val = op(args); \ + (__expect_val == actual_val) ? 0 : \ + (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \ + name, __expect_val, actual_val), -EINVAL); \ +}) + +static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings, + int tx_rx_gap) +{ + u32 val, tmp_val, i; + int ret; + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_base(base, bank, i, state->rings[i].base); + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_config(base, bank, i, state->rings[i].config); + + for (i = 0; i < num_rings / 2; i++) { + int tx = i * (tx_rx_gap + 1); + int rx = tx + tx_rx_gap; + + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); + + /* + * The TX ring head needs to be updated again to make sure that + * the HW will not consider the ring as full when it is empty + * and the correct state flags are set to match the recovered state. + */ + if (state->ringestat & BIT(tx)) { + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK; + ops->write_csr_int_srcsel_w_val(base, bank, val); + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + } + + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + /* + * The RX ring tail needs to be updated again to make sure that + * the HW will not consider the ring as empty when it is full + * and the correct state flags are set to match the recovered state. + */ + if (state->ringfstat & BIT(rx)) + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + } + + ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen); + ops->write_csr_int_en(base, bank, state->iaintflagen); + ops->write_csr_int_col_en(base, bank, state->iaintcolen); + ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0); + ops->write_csr_exp_int_en(base, bank, state->ringexpintenable); + ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl); + ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben); + + /* Check that all ring statuses match the saved state. */ + ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat", + base, bank); + if (ret) + return ret; + + tmp_val = ops->read_csr_exp_stat(base, bank); + val = state->ringexpstat; + if (tmp_val && !val) { + pr_err("QAT: Bank was restored with exception: 0x%x\n", val); + return -EINVAL; + } + + return 0; +} + +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number); + + bank_state_save(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save); + +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number); + + ret = bank_state_restore(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank, hw_data->tx_rx_gap); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Unable to restore state of bank %d\n", bank_number); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 02d7a019ebf8aa1c530708192687fcc73b8c8dc2..8b10926cedbac2d507ce3fd3f0f16dcc8972bd45 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -1,99 +1,58 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ /* Copyright(c) 2020 Intel Corporation */ -#ifndef ADF_GEN4_HW_CSR_DATA_H_ -#define ADF_GEN4_HW_CSR_DATA_H_ +#ifndef ADF_GEN4_HW_DATA_H_ +#define ADF_GEN4_HW_DATA_H_ + +#include #include "adf_accel_devices.h" #include "adf_cfg_common.h" -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL -#define ADF_RING_CSR_RING_CONFIG 0x1000 -#define ADF_RING_CSR_RING_LBASE 0x1040 -#define ADF_RING_CSR_RING_UBASE 0x1080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_ADDR_OFFSET 0x100000 -#define ADF_RING_BUNDLE_SIZE 0x2000 - -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - void __iomem *_csr_base_addr = csr_base_addr; \ - u32 _bank = bank; \ - u32 _ring = ring; \ - dma_addr_t _value = value; \ - u32 l_base = 0, u_base = 0; \ - l_base = lower_32_bits(_value); \ - u_base = upper_32_bits(_value); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG, (value)) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_EN, (value)) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, (value)) +/* PCIe configuration space */ +#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN4_SRAM_BAR 0 +#define ADF_GEN4_PMISC_BAR 1 +#define ADF_GEN4_ETR_BAR 2 + +/* Clocks frequency */ +#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0 +#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4 +#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8 +#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC + +/* Accelerators */ +#define ADF_GEN4_ACCELERATORS_MASK 0x1 +#define ADF_GEN4_MAX_ACCELERATORS 1 +#define ADF_GEN4_ADMIN_ACCELENGINES 1 + +/* MSIX interrupt */ +#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) + +/* Bank and ring configuration */ +#define ADF_GEN4_MAX_RPS 64 +#define ADF_GEN4_NUM_RINGS_PER_BANK 2 +#define ADF_GEN4_NUM_BANKS_PER_VF 4 +#define ADF_GEN4_ETR_MAX_BANKS 64 +#define ADF_GEN4_RX_RINGS_OFFSET 1 +#define ADF_GEN4_TX_RINGS_MASK 0x1 /* Arbiter configuration */ -#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C +#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN4_ARB_OFFSET 0x0 +#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400 -#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_SRV_ARB_EN, (value)) +/* Admin Interface Reg Offset */ +#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 /* Default ring mapping */ #define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \ @@ -118,10 +77,20 @@ do { \ #define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_RPRESET_POLL_DELAY_US 20 #define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) +/* Ring interrupt */ +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) +#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 +#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) +#define ADF_COALESCED_POLL_DELAY_US 1000 +#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12)) +#define ADF_WQM_CSR_RP_IDX_RX 1 + /* Error source registers */ #define ADF_GEN4_ERRSOU0 (0x41A200) #define ADF_GEN4_ERRSOU1 (0x41A204) @@ -139,7 +108,76 @@ do { \ /* Number of heartbeat counter pairs */ #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE +/* Rate Limiting */ +#define ADF_GEN4_RL_R2L_OFFSET 0x508000 +#define ADF_GEN4_RL_L2C_OFFSET 0x509000 +#define ADF_GEN4_RL_C2S_OFFSET 0x508818 +#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 +#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 + +/* Arbiter threads mask with error value */ +#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) + +/* PF2VM communication channel */ +#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20) +#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20) +#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20) +#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20) +#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20) +#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20) + +struct adf_gen4_vfmig { + struct adf_mstate_mgr *mstate_mgr; + bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF]; +}; + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +enum icp_qat_gen4_slice_mask { + ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0), + ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3), + ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), + ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), + ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), + ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), + ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), +}; + +enum adf_gen4_rp_groups { + RP_GROUP_0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info); +void adf_gen4_get_arb_info(struct arb_info *arb_info); +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self); +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); +int adf_gen4_init_device(struct adf_accel_dev *accel_dev); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms); +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us); +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number); +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c index 8e8efe93f3ee5c99ea6590e4fd9037090d55d8ae..21474d402d09dee26eda833d1d7db2bf853c1e47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c @@ -6,12 +6,10 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pfvf.h" +#include "adf_gen4_hw_data.h" #include "adf_pfvf_pf_proto.h" #include "adf_pfvf_utils.h" -#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20)) -#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20)) - /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 @@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = { static u32 adf_gen4_pf_get_pf2vf_offset(u32 i) { - return ADF_4XXX_PF2VM_OFFSET(i); + return ADF_GEN4_PF2VM_OFFSET(i); } static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) { - return ADF_4XXX_VM2PF_OFFSET(i); + return ADF_GEN4_VM2PF_OFFSET(i); } static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index 34c6cd8e27c0b58d16db092e0a1cbc875c0dd1e6..5dafd9a270dbd87f261a6d6ea228326dfcb6e78b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -2,7 +2,10 @@ /* Copyright(c) 2022 Intel Corporation */ #include #include +#include + #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "adf_cfg_strings.h" @@ -10,11 +13,6 @@ #include "adf_gen4_hw_data.h" #include "adf_cfg.h" -enum qat_pm_host_msg { - PM_NO_CHANGE = 0, - PM_SET_MIN, -}; - struct adf_gen4_pm_data { struct work_struct pm_irq_work; struct adf_accel_dev *accel_dev; @@ -25,6 +23,7 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) { char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; bool pm_idle_support; u32 msg; int ret; @@ -39,6 +38,11 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) if (ret) pm_idle_support = true; + if (pm_idle_support) + pm->host_ack_counter++; + else + pm->host_nack_counter++; + /* Send HOST_MSG */ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE); @@ -59,17 +63,27 @@ static void pm_bh_handler(struct work_struct *work) container_of(work, struct adf_gen4_pm_data, pm_irq_work); struct adf_accel_dev *accel_dev = pm_data->accel_dev; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; u32 pm_int_sts = pm_data->pm_int_sts; u32 val; /* PM Idle interrupt */ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) { + pm->idle_irq_counters++; /* Issue host message to FW */ if (send_host_msg(accel_dev)) dev_warn_ratelimited(&GET_DEV(accel_dev), "Failed to send host msg to FW\n"); } + /* PM throttle interrupt */ + if (pm_int_sts & ADF_GEN4_PM_THR_STS) + pm->throttle_irq_counters++; + + /* PM fw interrupt */ + if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS) + pm->fw_irq_counters++; + /* Clear interrupt status */ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts); @@ -129,6 +143,9 @@ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev) if (ret) return ret; + /* Initialize PM internal data */ + adf_gen4_init_dev_pm_data(accel_dev); + /* Enable default PM interrupts: IDLE, THROTTLE */ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); val |= ADF_GEN4_PM_INT_EN_DEFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index c2768762cca3b683513936bdd1be1d40586d4bdc..a49352b79a7adff1b14eea0880f463d2010df7ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -3,7 +3,14 @@ #ifndef ADF_GEN4_PM_H #define ADF_GEN4_PM_H -#include "adf_accel_devices.h" +#include + +struct adf_accel_dev; + +enum qat_pm_host_msg { + PM_NO_CHANGE = 0, + PM_SET_MIN, +}; /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) @@ -39,7 +46,48 @@ #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) +/* PM CSRs fields masks */ +#define ADF_GEN4_PM_DOMAIN_POWER_GATED_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_SSM_PM_ENABLE_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_IDLE_FILTER_MASK GENMASK(5, 3) +#define ADF_GEN4_PM_IDLE_ENABLE_MASK BIT(2) +#define ADF_GEN4_PM_ENABLE_PM_MASK BIT(21) +#define ADF_GEN4_PM_ENABLE_PM_IDLE_MASK BIT(22) +#define ADF_GEN4_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23) +#define ADF_GEN4_PM_CURRENT_WP_MASK GENMASK(19, 11) +#define ADF_GEN4_PM_CPM_PM_STATE_MASK GENMASK(22, 20) +#define ADF_GEN4_PM_PENDING_WP_MASK GENMASK(31, 23) +#define ADF_GEN4_PM_THR_VALUE_MASK GENMASK(6, 4) +#define ADF_GEN4_PM_MIN_PWR_ACK_MASK BIT(7) +#define ADF_GEN4_PM_MIN_PWR_ACK_PENDING_MASK BIT(17) +#define ADF_GEN4_PM_CPR_ACTIVE_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_CPR_MANAGED_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_XLT_ACTIVE_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_XLT_MANAGED_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_DCPR_ACTIVE_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_DCPR_MANAGED_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_PKE_ACTIVE_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_PKE_MANAGED_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_WAT_ACTIVE_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WAT_MANAGED_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WCP_ACTIVE_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_WCP_MANAGED_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_UCS_ACTIVE_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_UCS_MANAGED_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_CPH_ACTIVE_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_CPH_MANAGED_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_ATH_ACTIVE_COUNT_MASK GENMASK(28, 25) +#define ADF_GEN4_PM_ATH_MANAGED_COUNT_MASK GENMASK(28, 25) + int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev); +#else +static inline void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..ee0b5079de3ec95756c3b23af51ab4a75db62ea7 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_gen4_pm.h" +#include "icp_qat_fw_init_admin.h" + +/* + * This is needed because a variable is used to index the mask at + * pm_scnprint_table(), making it not compile time constant, so the compile + * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled. + */ +#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) + +#define PM_INFO_MEMBER_OFF(member) \ + (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32)) + +#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \ +{ \ + .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \ + .key = __stringify(_field_), \ + .field_mask = _mask_, \ +} + +#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0)) + +#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK) + +#define PM_INFO_MAX_KEY_LEN 21 + +struct pm_status_row { + int reg_offset; + u32 field_mask; + const char *key; +}; + +static struct pm_status_row pm_fuse_rows[] = { + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE), +}; + +static struct pm_status_row pm_info_rows[] = { + PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE), + PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP), + PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER), + PM_INFO_REGSET_ENTRY(pm.main, MIN_PWR_ACK), + PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING), + PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE), +}; + +static struct pm_status_row pm_ssm_rows[] = { + PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE), + PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT), + PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, ATH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, PKE_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, DCPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, UCS_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, XLT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WAT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WCP_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, ATH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, PKE_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, DCPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, UCS_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, XLT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WAT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT), +}; + +static struct pm_status_row pm_log_rows[] = { + PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.timer, TIMER_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT), +}; + +static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = { + PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0), + PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1), + PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2), + PM_INFO_REGSET_ENTRY32(event_log[3], EVENT3), + PM_INFO_REGSET_ENTRY32(event_log[4], EVENT4), + PM_INFO_REGSET_ENTRY32(event_log[5], EVENT5), + PM_INFO_REGSET_ENTRY32(event_log[6], EVENT6), + PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7), +}; + +static struct pm_status_row pm_csrs_rows[] = { + PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT), + PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS), + PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW), + PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ), +}; + +static int pm_scnprint_table(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, int table_len, + bool lowercase) +{ + char key[PM_INFO_MAX_KEY_LEN]; + int wr = 0; + int i; + + for (i = 0; i < table_len; i++) { + if (lowercase) + string_lower(key, table[i].key); + else + string_upper(key, table[i].key); + + wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key, + field_get(table[i].field_mask, + pm_info_regs[table[i].reg_offset])); + } + + return wr; +} + +static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, false); +} + +static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, true); +} + +static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE); + +static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, + loff_t *pos) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; + struct icp_qat_fw_init_admin_pm_info *pm_info; + dma_addr_t p_state_addr; + u32 *pm_info_regs; + char *pm_kv; + int len = 0; + u32 val; + int ret; + + pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_info) + return -ENOMEM; + + pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_kv) { + ret = -ENOMEM; + goto out_free; + } + + p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE, + DMA_FROM_DEVICE); + ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr); + if (ret) + goto out_free; + + /* Query PM info from QAT FW */ + ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE); + dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto out_free; + + pm_info_regs = (u32 *)pm_info; + + /* Fusectl related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- PM Fuse info ---------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_fuse_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n", + pm_info->max_pwrreq); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n", + pm_info->min_pwrreq); + + /* PM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------ PM Info ------------\n"); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n", + pm_info->pwr_state == PM_SET_MIN ? "min" : "max"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_info_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n"); + + /* SSM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- SSM_PM Info ----------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_ssm_rows)); + + /* Log related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------- PM Log -------------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_log_rows)); + + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_event_rows)); + + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n", + pm->idle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n", + pm->fw_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "throttle_irq_count: %#x\n", pm->throttle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n", + pm->host_ack_counter); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n", + pm->host_nack_counter); + + /* CSRs content */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- HW PM CSRs -----------\n"); + len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_csrs_rows)); + + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_HOST_MSG: %#x\n", val); + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_INTERRUPT: %#x\n", val); + ret = simple_read_from_buffer(buf, count, pos, pm_kv, len); + +out_free: + kfree(pm_info); + kfree(pm_kv); + return ret; +} + +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ + accel_dev->power_management.print_pm_status = adf_gen4_print_pm_status; + accel_dev->power_management.present = true; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c new file mode 100644 index 0000000000000000000000000000000000000000..2dd3772bf58a6ce673587bdae15c0f751e0329d6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -0,0 +1,1564 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_ras.h" +#include "adf_sysfs_ras_counters.h" + +#define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE) + +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt and CFC attention interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, + ADF_GEN4_ERRSOU2_PM_INT_BIT | + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); + + /* + * Enable uncorrectable error reporting in ERRSOU3 + * but disable RLT error interrupt and VFLR notify interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, + ADF_GEN4_ERRSOU3_RLTERROR_BIT | + ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + u32 val = 0; + + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); + val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, + err_mask->cppagentcmdpar_mask); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Enable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK); + + /* Enable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT); + + /* Enable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + reg |= ADF_GEN4_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Disable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0); + + /* Disable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, + ADF_GEN4_TI_CI_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, + ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, + ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, + ADF_GEN4_TI_CD_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, + ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); +} + +static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable RF parity error in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0); +} + +static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Disable RF Parity Error reporting in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, + ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0); + + /* Enable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val |= err_mask->ssmfeatren_mask; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Enable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, + ADF_GEN4_SER_EN_SSMSH_BITMASK); + + /* Enable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0); + + /* Enable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0); +} + +static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, + ADF_GEN4_INTMASKSSM_BITMASK); + + /* Disable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Disable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0); + + /* Disable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); + + /* Disable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, + ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, + ADF_GEN4_REG_ARAMCERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, + ADF_GEN4_REG_ARAMUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, + ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK); +} + +static void disable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0); +} + +static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); + enable_rf_error_reporting(accel_dev, csr); + enable_ssm_error_reporting(accel_dev, csr); + enable_aram_error_reporting(aram_csr); +} + +static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); + disable_rf_error_reporting(accel_dev, csr); + disable_ssm_error_reporting(accel_dev, csr); + disable_aram_error_reporting(aram_csr); +} + +static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0); + + aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error detected in AE: 0x%x\n", + aecorrerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); +} + +static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aeuncorerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT)) + return false; + + aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0); + aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error detected in AE: 0x%x\n", + aeuncorerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); + + return false; +} + +static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 cmdparerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT)) + return false; + + cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG); + cmdparerr &= err_mask->cppagentcmdpar_mask; + + dev_err(&GET_DEV(accel_dev), + "HI CPP agent command parity error: 0x%x\n", + cmdparerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); + + return true; +} + +static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return false; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity uncorrectable error: 0x%x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity fatal error: 0x%x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts); + + return reset_required; +} + +static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_ci_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK; + + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + return false; +} + +static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pullfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK; + + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, + ti_pullfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + return false; +} + +static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pushfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK; + + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, + ti_pushfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_cd_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK; + + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); + } + + return false; +} + +static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_trnsb_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK; + + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } + + return false; +} + +static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 rimiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS); + rimiscsts &= ADF_GEN4_RIMISCSTS_BIT; + + dev_err(&GET_DEV(accel_dev), + "Command Parity error detected on IOSFP: 0x%x\n", + rimiscsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); + + return true; +} + +static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou); + *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou); + *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); +} + +static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH); + reg &= ADF_GEN4_UERRSSMSH_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error on ssm shared memory: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); + + return false; +} + +static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH); + reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); + + return false; +} + +static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR); + reg &= ADF_GEN4_PPERR_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error CPP transaction on memory target: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); + + return false; +} + +static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 slice_hang_offset, + char *slice_name) +{ + u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset); + + if (!slice_hang_reg) + return; + + dev_err(&GET_DEV(accel_dev), + "Slice %s hang error encountered\n", slice_name); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); +} + +static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT)) + return false; + + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_PKE, "pke"); + + if (err_mask->parerr_wat_wcp_mask) + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, + "ath_cph"); + + return false; +} + +static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); + } + } + + return false; +} + +static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error PKE: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, + reg); + } + } + + return false; +} + +static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT)) + return false; + + reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg, bits_num = BITS_PER_REG(reg); + bool reset_required = false; + unsigned long errs_bits; + u32 bit_iterator; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SSM CPP parity error: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + reset_required = true; + } + + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "non-Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); + + return reset_required; +} + +static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); + reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, + reg); + } + } + + dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); + + return false; +} + +static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg, bits_num = BITS_PER_REG(reg); + bool reset_required = false; + unsigned long errs_bits; + u32 bit_iterator; + + if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + reset_required = true; + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) { + dev_warn(&GET_DEV(accel_dev), + "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + } + + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); + + return reset_required; +} + +static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM); + bool reset_required; + + iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK; + if (!iastatssm) + return false; + + reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); + reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); + + return reset_required; +} + +static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR); + + reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK; + if (!reg) + return false; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM CMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); + + return false; +} + +static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT); + + reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMXLT_CERR_BIT; + if (!reg) + return false; + + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM XLT: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM XLT: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); + + return false; +} + +static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg; + int i; + + for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) { + reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i)); + reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK; + if (!reg) + continue; + + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM DCMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM DCMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); + } + + return false; +} + +static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + bool reset_required; + + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) + return false; + + reset_required = adf_handle_iaintstatssm(accel_dev, csr); + reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr); + reset_required |= adf_handle_exprpssmxlt(accel_dev, csr); + reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 reg; + + if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: data parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: command parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: multiple errors: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK); + + return reset_required; +} + +static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_ssm(accel_dev, csr, errsou); + *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); +} + +static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT)) + return false; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS); + + dev_err(&GET_DEV(accel_dev), + "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + return true; +} + +static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK)) + return false; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS); + ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); + + return false; +} + +static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK)) + return false; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS); + ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); + + return false; +} + +static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aram_cerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT)) + return false; + + aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR); + aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "ARAM correctable error : 0x%x\n", aram_cerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); + + return false; +} + +static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 aramuerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR); + aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT | + ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT; + + if (!aramuerr) + return false; + + if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "ARAM uncorrectable error: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr); + + return reset_required; +} + +static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 cppmemtgterr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR); + cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK | + ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT; + if (!cppmemtgterr) + return false; + + if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "Misc memory target multiple uncorrectable errors: 0x%x\n", + cppmemtgterr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr); + + return reset_required; +} + +static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 i; + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + + if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT)) + return false; + + for (i = 0; i < max_rp_num; i++) { + u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT; + + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), + "Ring Pair (%u) ATU detected fault: 0x%x\n", i, + atufaultstatus); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); + } + } + + return false; +} + +static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev, + void __iomem *csr, void __iomem *aram_csr, + u32 errsou, bool *reset_required) +{ + *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou); +} + +static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, + bool *reset_required) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); + bool handled = false; + + *reset_required = false; + + if (errsou & ADF_GEN4_ERRSOU0_BIT) { + adf_gen4_process_errsou0(accel_dev, csr); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1); + if (errsou & ADF_GEN4_ERRSOU1_BITMASK) { + adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2); + if (errsou & ADF_GEN4_ERRSOU2_BITMASK) { + adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3); + if (errsou & ADF_GEN4_ERRSOU3_BITMASK) { + adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required); + handled = true; + } + + return handled; +} + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen4_enable_ras; + ras_ops->disable_ras_errors = adf_gen4_disable_ras; + ras_ops->handle_interrupt = adf_gen4_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h new file mode 100644 index 0000000000000000000000000000000000000000..53352083cd12acfeec2ba9e415789f5ffef6972f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -0,0 +1,825 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_RAS_H_ +#define ADF_GEN4_RAS_H_ + +#include + +struct adf_ras_ops; + +/* ERRSOU0 Correctable error mask*/ +#define ADF_GEN4_ERRSOU0_BIT BIT(0) + +/* HI AE Correctable error log */ +#define ADF_GEN4_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 +#define ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT BIT(0) +#define ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT BIT(1) +#define ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN4_ERRSOU1_RIMISCSTS_BIT BIT(4) + +#define ADF_GEN4_ERRSOU1_BITMASK ( \ + (ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT) | \ + (ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMISCSTS_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN4_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent Command parity error logging enable */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +/* RI Memory parity error status register */ +#define ADF_GEN4_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN4_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(0) - BIT(3) - ri_iosf_pdata_rxq[0:3] parity error + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - BIT(9) - ri_tlq_cplhdr[0:1] parity error + * BIT(10) - BIT(17) - ri_tlq_cpldata[0:7] parity error + * BIT(18) - set this bit to 1 to enable logging status to ri_mem_par_err_sts0 + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \ + BIT(7) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | \ + BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | BIT(28) | BIT(30)) + +#define ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK \ + (BIT(4) | BIT(6) | BIT(8) | BIT(9)) + +/* TI CI parity status */ +#define ADF_GEN4_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN4_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + * BIT(7) - CPP_SkidQ_sc_sts parity error status + */ +#define ADF_GEN4_TI_CI_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(3) | BIT(7)) + +/* TI PULLFUB parity status */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN4_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN4_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_CD_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN4_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB Parity error reporting mask */ +#define ADF_GEN4_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN4_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN4_RIMISCCTL 0x41B1BC + +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts, deprecated on gen4 devices + * BIT(18) - PM interrupt + */ +#define ADF_GEN4_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN4_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN4_ERRSOU2_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN4_ERRSOU2_DIS_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK) + +#define ADF_GEN4_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT BIT(0) +#define ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT BIT(1) +#define ADF_GEN4_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT BIT(3) +#define ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT BIT(4) +#define ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT BIT(5) +#define ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT BIT(6) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT BIT(7) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT BIT(8) + +#define ADF_GEN4_IAINTSTATSSM_BITMASK \ + (ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT) + +#define ADF_GEN4_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit masks definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN4_UERRSSMSH_BITMASK (BIT(0) | BIT(15)) + +#define ADF_GEN4_UERRSSMSHAD 0x1C + +#define ADF_GEN4_CERRSSMSH 0x10 + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN4_CERRSSMSH_ERROR_BIT BIT(0) + +#define ADF_GEN4_CERRSSMSHAD 0x14 + +/* SSM error handling features enable register */ +#define ADF_GEN4_SSMFEATREN 0x198 + +/* + * Disable SSM error detection and reporting features + * enabled by device driver on RAS initialization + * + * following bits should be cleared : + * BIT(4) - Disable parity for CPP parity + * BIT(12) - Disable logging push/pull data error in pperr register. + * BIT(16) - BIT(23) - Disable parity for SPPs + * BIT(24) - BIT(27) - Disable parity for SPPs, if it's supported on the device. + */ +#define ADF_GEN4_SSMFEATREN_DIS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(13) | BIT(14) | BIT(15)) + +#define ADF_GEN4_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(1) - Shared memory correctable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(3) - CPP parity error Interrupt mask + * BIT(4) - SSM interrupt generated by SER correctable error mask + * BIT(5) - SSM interrupt generated by SER uncorrectable error + * - not stop and scream - mask + */ +#define ADF_GEN4_INTMASKSSM_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)) + +/* CPP push or pull error */ +#define ADF_GEN4_PPERR 0x8 + +#define ADF_GEN4_PPERR_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_PPERRID 0xC + +/* Slice hang handling related registers */ +#define ADF_GEN4_SLICEHANGSTATUS_ATH_CPH 0x84 +#define ADF_GEN4_SLICEHANGSTATUS_CPR_XLT 0x88 +#define ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS 0x90 +#define ADF_GEN4_SLICEHANGSTATUS_WAT_WCP 0x8C +#define ADF_GEN4_SLICEHANGSTATUS_PKE 0x94 + +#define ADF_GEN4_SHINTMASKSSM_ATH_CPH 0xF0 +#define ADF_GEN4_SHINTMASKSSM_CPR_XLT 0xF4 +#define ADF_GEN4_SHINTMASKSSM_DCPR_UCS 0xFC +#define ADF_GEN4_SHINTMASKSSM_WAT_WCP 0xF8 +#define ADF_GEN4_SHINTMASKSSM_PKE 0x100 + +/* SPP pull cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH 0x1A4 +#define ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT 0x1A8 +#define ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS 0x1B0 +#define ADF_GEN4_SPPPULLCMDPARERR_PKE 0x1B4 +#define ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP 0x1AC + +/* SPP pull data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH 0x1BC +#define ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT 0x1C0 +#define ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS 0x1C8 +#define ADF_GEN4_SPPPULLDATAPARERR_PKE 0x1CC +#define ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP 0x1C4 + +/* SPP push cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH 0x1D4 +#define ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT 0x1D8 +#define ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS 0x1E0 +#define ADF_GEN4_SPPPUSHCMDPARERR_PKE 0x1E4 +#define ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP 0x1DC + +/* SPP push data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH 0x1EC +#define ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT 0x1F0 +#define ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS 0x1F8 +#define ADF_GEN4_SPPPUSHDATAPARERR_PKE 0x1FC +#define ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP 0x1F4 + +/* Accelerator SPP parity error mask registers */ +#define ADF_GEN4_SPPPARERRMSK_ATH_CPH 0x204 +#define ADF_GEN4_SPPPARERRMSK_CPR_XLT 0x208 +#define ADF_GEN4_SPPPARERRMSK_DCPR_UCS 0x210 +#define ADF_GEN4_SPPPARERRMSK_PKE 0x214 +#define ADF_GEN4_SPPPARERRMSK_WAT_WCP 0x20C + +#define ADF_GEN4_SSMCPPERR 0x224 + +/* + * Uncorrectable error mask in SSMCPPERR + * BIT(0) - indicates CPP command parity error + * BIT(1) - indicates CPP Main Push PPID parity error + * BIT(2) - indicates CPP Main ePPID parity error + * BIT(3) - indicates CPP Main push data parity error + * BIT(4) - indicates CPP Main Pull PPID parity error + * BIT(5) - indicates CPP target pull data parity error + */ +#define ADF_GEN4_SSMCPPERR_FATAL_BITMASK \ + (BIT(0) | BIT(1) | BIT(4)) + +#define ADF_GEN4_SSMCPPERR_UNCERR_BITMASK \ + (BIT(2) | BIT(3) | BIT(5)) + +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC 0x9C +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC 0xB8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH 0xA0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH 0xBC + +#define ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT 0xA4 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT 0xC0 + +#define ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS 0xAC +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS 0xC8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_PKE 0xB0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE 0xCC + +#define ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP 0xA8 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP 0xC4 + +/* RF parity error detected in SharedRAM */ +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT BIT(0) + +#define ADF_GEN4_SER_ERR_SSMSH 0x44C + +/* + * Fatal error mask in SER_ERR_SSMSH + * BIT(0) - Indicates an uncorrectable error has occurred in the + * accelerator controller command RFs + * BIT(2) - Parity error occurred in the bank SPP fifos + * BIT(3) - Indicates Parity error occurred in following fifos in + * the design + * BIT(4) - Parity error occurred in flops in the design + * BIT(5) - Uncorrectable error has occurred in the + * target push and pull data register flop + * BIT(7) - Indicates Parity error occurred in the Resource Manager + * pending lock request fifos + * BIT(8) - Indicates Parity error occurred in the Resource Manager + * MECTX command queues logic + * BIT(9) - Indicates Parity error occurred in the Resource Manager + * MECTX sigdone fifo flops + * BIT(10) - Indicates an uncorrectable error has occurred in the + * Resource Manager MECTX command RFs + * BIT(14) - Parity error occurred in Buffer Manager sigdone FIFO + */ + #define ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK \ + (BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(14)) + +/* + * Uncorrectable error mask in SER_ERR_SSMSH + * BIT(12) Parity error occurred in Buffer Manager pool 0 + * BIT(13) Parity error occurred in Buffer Manager pool 1 + */ +#define ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK \ + (BIT(12) | BIT(13)) + +/* + * Correctable error mask in SER_ERR_SSMSH + * BIT(1) - Indicates a correctable Error has occurred + * in the slice controller command RFs + * BIT(6) - Indicates a correctable Error has occurred in + * the target push and pull data RFs + * BIT(11) - Indicates an correctable Error has occurred in + * the Resource Manager MECTX command RFs + */ +#define ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK \ + (BIT(1) | BIT(6) | BIT(11)) + +/* SSM shared memory SER error reporting mask */ +#define ADF_GEN4_SER_EN_SSMSH 0x450 + +/* + * SSM SER error reporting mask in SER_en_err_ssmsh + * BIT(0) - Enables uncorrectable Error detection in : + * 1) slice controller command RFs. + * 2) target push/pull data registers + * BIT(1) - Enables correctable Error detection in : + * 1) slice controller command RFs + * 2) target push/pull data registers + * BIT(2) - Enables Parity error detection in + * 1) bank SPP fifos + * 2) gen4_pull_id_queue + * 3) gen4_push_id_queue + * 4) AE_pull_sigdn_fifo + * 5) DT_push_sigdn_fifo + * 6) slx_push_sigdn_fifo + * 7) secure_push_cmd_fifo + * 8) secure_pull_cmd_fifo + * 9) Head register in FIFO wrapper + * 10) current_cmd in individual push queue + * 11) current_cmd in individual pull queue + * 12) push_command_rxp arbitrated in ssm_push_cmd_queues + * 13) pull_command_rxp arbitrated in ssm_pull_cmd_queues + * BIT(3) - Enables uncorrectable Error detection in + * the resource manager mectx cmd RFs. + * BIT(4) - Enables correctable error detection in the Resource Manager + * mectx command RFs + * BIT(5) - Enables Parity error detection in + * 1) resource manager lock request fifo + * 2) mectx cmdqueues logic + * 3) mectx sigdone fifo + * BIT(6) - Enables Parity error detection in Buffer Manager pools + * and sigdone fifo + */ +#define ADF_GEN4_SER_EN_SSMSH_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicated CPP CFC data parity error type + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) + +/* + * BIT(0) - Enables CFC to detect and log push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for CPP error + * BIT(4) - When 1 Parity detection is disabled + * BIT(5) - When 1 Parity detection is disabled on CPP command bus + * BIT(6) - When 1 Parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +#define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C +#define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 + +/* Exception reporting in QAT SSM CMP */ +#define ADF_GEN4_EXPRPSSMCPR 0x2000 + +/* + * Uncorrectable error mask in EXPRPSSMCPR + * BIT(2) - Hard fatal error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in CPR Hash Table + * BIT(19) - Parity error detected in CPR History Buffer Copy 0 + * BIT(20) - Parity error detected in CPR History Buffer Copy 1 + * BIT(21) - Parity error detected in CPR History Buffer Copy 2 + * BIT(22) - Parity error detected in CPR History Buffer Copy 3 + * BIT(23) - Parity error detected in CPR History Buffer Copy 4 + * BIT(24) - Parity error detected in CPR History Buffer Copy 5 + * BIT(25) - Parity error detected in CPR History Buffer Copy 6 + * BIT(26) - Parity error detected in CPR History Buffer Copy 7 + */ +#define ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26)) + +/* Exception reporting in QAT SSM XLT */ +#define ADF_GEN4_EXPRPSSMXLT 0xA000 + +/* + * Uncorrectable error mask in EXPRPSSMXLT + * BIT(2) - If set, an Uncorrectable Error event occurred + * BIT(16) - Parity error detected in XLT Push FIFO + * BIT(17) - Parity error detected in XLT Pull FIFO + * BIT(18) - Parity error detected in XLT HCTB0 + * BIT(19) - Parity error detected in XLT HCTB1 + * BIT(20) - Parity error detected in XLT HCTB2 + * BIT(21) - Parity error detected in XLT HCTB3 + * BIT(22) - Parity error detected in XLT CBCL + * BIT(23) - Parity error detected in XLT LITPTR + */ +#define ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23)) + +/* + * Correctable error mask in EXPRPSSMXLT + * BIT(3) - Correctable error event occurred. + */ +#define ADF_GEN4_EXPRPSSMXLT_CERR_BIT BIT(3) + +/* Exception reporting in QAT SSM DCMP */ +#define ADF_GEN4_EXPRPSSMDCPR(_n_) (0x12000 + (_n_) * 0x80) + +/* + * Uncorrectable error mask in EXPRPSSMDCPR + * BIT(2) - Even hard fatal error + * BIT(4) - Odd hard fatal error + * BIT(6) - decode soft error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in the Input Buffer + * BIT(19) - symbuf0parerr + * Parity error detected in CPR Push FIFO + * BIT(20) - symbuf1parerr + * Parity error detected in CPR Push FIFO + */ +#define ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(4) | BIT(6) | BIT(16) | BIT(17) | \ + BIT(18) | BIT(19) | BIT(20)) + +/* + * Correctable error mask in EXPRPSSMDCPR + * BIT(3) - Even ecc correctable error + * BIT(5) - Odd ecc correctable error + */ +#define ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK (BIT(3) | BIT(5)) + +#define ADF_GEN4_DCPR_SLICES_NUM 3 + +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error Response Order Overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(3) - indicates ARAM correctable error + * BIT(4) - indicates ARAM uncorrectable error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates error when accessing RLT block + */ +#define ADF_GEN4_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK (BIT(1) | BIT(6)) +#define ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK (BIT(2) | BIT(5)) +#define ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT BIT(3) +#define ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT BIT(4) +#define ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN4_ERRSOU3_RLTERROR_BIT BIT(9) + +#define ADF_GEN4_ERRSOU3_BITMASK ( \ + (ADF_GEN4_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT) | \ + (ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN4_ERRSOU3_RLTERROR_BIT)) + +/* TI Misc status register */ +#define ADF_GEN4_TIMISCSTS 0x50054C + +/* TI Misc error reporting mask */ +#define ADF_GEN4_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * By setting this bit will revert to CPM1.x functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN4_TIMISCCTL_BIT BIT(0) +#define ADF_GEN4_TIMSCCTL_RELAY_BITMASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN4_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN4_RICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface status register control */ +#define ADF_GEN4_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + * BIT(4) - value of 1 enables the stop feature of the stop and stream + * for all RI CPP Command RFs + */ +#define ADF_GEN4_RICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPUSHID 0x41A334 + +/* Pull ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPULLID 0x41A338 + +/* TI CPP interface status register */ +#define ADF_GEN4_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN4_TICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN4_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + * BIT(4) - value of 1 enables TI stop part of stop and scream mode on + * CPP/RF Parity error + */ +#define ADF_GEN4_TICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPUSHID 0x500540 + +/* Pull ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPULLID 0x500544 + +/* Correctable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERR 0x1700 + +#define ADF_GEN4_REG_ARAMCERR_BIT BIT(0) + +/* + * Correctable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log correctable error + * BIT(26) - enables ARAM agent to generate interrupt for correctable error + */ +#define ADF_GEN4_REG_ARAMCERR_EN_BITMASK (BIT(3) | BIT(26)) + +/* Correctable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERRAD 0x1708 + +/* Uncorrectable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERR 0x1704 + +/* + * ARAM error bit mask + * BIT(0) - indicates error logged in ARAMCERR or ARAMUCERR + * BIT(18) - indicates uncorrectable multiple errors in ARAM agent + */ +#define ADF_GEN4_REG_ARAMUERR_ERROR_BIT BIT(0) +#define ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT BIT(18) + +/* + * Uncorrectable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log uncorrectable error + * BIT(19) - enables ARAM agent to generate interrupt for uncorrectable error + */ +#define ADF_GEN4_REG_ARAMUERR_EN_BITMASK (BIT(3) | BIT(19)) + +/* Unorrectable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERRAD 0x170C + +/* Uncorrectable error transaction push/pull ID registers*/ +#define ADF_GEN4_REG_ERRPPID_LO 0x1714 +#define ADF_GEN4_REG_ERRPPID_HI 0x1718 + +/* ARAM ECC block error enablement */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN 0x1808 + +/* + * ARAM ECC block error control bit masks + * BIT(0) - enable ARAM CD ECC block error detecting + * BIT(1) - enable ARAM pull request ECC error detecting + * BIT(2) - enable ARAM command dispatch ECC error detecting + * BIT(3) - enable ARAM read datapath push ECC error detecting + * BIT(4) - enable ARAM read datapath pull ECC error detecting + * BIT(5) - enable ARAM RMW ECC error detecting + * BIT(6) - enable ARAM write datapath RMW ECC error detecting + * BIT(7) - enable ARAM write datapath ECC error detecting + */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | \ + BIT(5) | BIT(6) | BIT(7)) + +/* ARAM misc memory target error registers*/ +#define ADF_GEN4_REG_CPPMEMTGTERR 0x1710 + +/* + * ARAM misc memory target error bit masks + * BIT(0) - indicates an error in ARAM target memory + * BIT(1) - indicates multiple errors in ARAM target memory + * BIT(4) - indicates pull error in ARAM target memory + * BIT(5) - indicates parity pull error in ARAM target memory + * BIT(6) - indicates push error in ARAM target memory + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_BITMASK \ + (BIT(0) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT BIT(1) + +/* + * ARAM misc memory target error enablement mask + * BIT(2) - enables CPP memory to detect and log push/pull data error + * BIT(7) - enables push/pull error to generate interrupts to RI + * BIT(8) - enables ARAM to check parity on pull data and CPP command buses + * BIT(9) - enables ARAM to autopush to AE when push/parity error is detected + * on lookaside DT + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK \ + (BIT(2) | BIT(7) | BIT(8) | BIT(9)) + +/* ATU fault status register */ +#define ADF_GEN4_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN4_ATUFAULTSTATUS_BIT BIT(0) + +/* Command Parity error detected on IOSFP Command to QAT */ +#define ADF_GEN4_RIMISCSTS_BIT BIT(0) + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN4_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c index 646c57922fcda5c1db50d4c0cb416936bd2ec7dd..35ccb91d6ec1b9060d368bc71a93e68bed77217c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c @@ -9,6 +9,7 @@ #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_timer.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c new file mode 100644 index 0000000000000000000000000000000000000000..c7ad8cf07863b16e05ab727f6f43b05feb69c106 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#include +#include + +#include "adf_gen4_tl.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ + ADF_TL_COUNTER("util_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4)) + +#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \ + ADF_TL_COUNTER("exec_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4)) + +/* Device level counters. */ +static const struct adf_tl_dbg_counter dev_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)), + /* Max read latency[ns]. */ + ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)), + /* Read latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)), + /* Max get to put latency[ns]. */ + ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)), + /* Page request latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)), + /* Page translation latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)), + /* Maximum uTLB used. */ + ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)), +}; + +/* Slice utilization counters. */ +static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cpr), + /* Translator slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(xlt), + /* Decompression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr), + /* PKE utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(pke), + /* Wireless Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wat), + /* Wireless Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wcp), + /* UCS slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ucs), + /* Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cph), + /* Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ath), +}; + +/* Slice execution counters. */ +static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cpr), + /* Translator slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(xlt), + /* Decompression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr), + /* PKE execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(pke), + /* Wireless Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wat), + /* Wireless Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wcp), + /* UCS slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ucs), + /* Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cph), + /* Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ath), +}; + +/* Ring pair counters. */ +static const struct adf_tl_dbg_counter rp_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)), + /* Message descriptor DevTLB hit rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)), + /* Message descriptor DevTLB miss rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)), + /* Payload DevTLB hit rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)), + /* Payload DevTLB miss rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)), +}; + +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ + tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; + tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ; + tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM; + tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; + tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; + tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; + + tl_data->dev_counters = dev_counters; + tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); + tl_data->sl_util_counters = sl_util_counters; + tl_data->sl_exec_counters = sl_exec_counters; + tl_data->rp_counters = rp_counters; + tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); + tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h new file mode 100644 index 0000000000000000000000000000000000000000..32df4163beb9f098664b5be8fcd3b0440a5db53c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_GEN4_TL_H +#define ADF_GEN4_TL_H + +#include +#include + +struct adf_tl_hw_data; + +/* Computation constants. */ +#define ADF_GEN4_CPP_NS_PER_CYCLE 2 +#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64 + +/* Maximum aggregation time. Value in milliseconds. */ +#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000 +/* Num of buffers to store historic values. */ +#define ADF_GEN4_TL_NUM_HIST_BUFFS \ + (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS) + +/* Max number of HW resources of one type. */ +#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 + +/* Max number of simultaneously monitored ring pairs. */ +#define ADF_GEN4_TL_MAX_RP_NUM 4 + +/** + * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. + * @reg_tm_slice_exec_cnt: Slice execution count. + * @reg_tm_slice_util: Slice utilization. + */ +struct adf_gen4_tl_slice_data_regs { + __u32 reg_tm_slice_exec_cnt; + __u32 reg_tm_slice_util; +}; + +#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs) + +/** + * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry + * counter values as are being populated periodically by device. + * @reg_tl_rd_lat_acc: read latency accumulator + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator + * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator + * @reg_tl_re_acc: accumulated ring empty time + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_rd_lat_max: maximum logged read latency + * @reg_tl_rd_cmpl_cnt: read requests completed count + * @reg_tl_gp_lat_max: maximum logged get to put latency + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_page_req_cnt: DevTLB page requests count + * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count + * @reg_tl_at_max_tlb_used: maximum uTLB used + * @reg_tl_re_cnt: ring empty time samples count + * @reserved: reserved + * @ath_slices: array of Authentication slices utilization registers + * @cph_slices: array of Cipher slices utilization registers + * @cpr_slices: array of Compression slices utilization registers + * @xlt_slices: array of Translator slices utilization registers + * @dcpr_slices: array of Decompression slices utilization registers + * @pke_slices: array of PKE slices utilization registers + * @ucs_slices: array of UCS slices utilization registers + * @wat_slices: array of Wireless Authentication slices utilization registers + * @wcp_slices: array of Wireless Cipher slices utilization registers + */ +struct adf_gen4_tl_device_data_regs { + __u64 reg_tl_rd_lat_acc; + __u64 reg_tl_gp_lat_acc; + __u64 reg_tl_at_page_req_lat_acc; + __u64 reg_tl_at_trans_lat_acc; + __u64 reg_tl_re_acc; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_rd_lat_max; + __u32 reg_tl_rd_cmpl_cnt; + __u32 reg_tl_gp_lat_max; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_page_req_cnt; + __u32 reg_tl_at_trans_lat_cnt; + __u32 reg_tl_at_max_tlb_used; + __u32 reg_tl_re_cnt; + __u32 reserved; + struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; +}; + +/** + * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair + * telemetry counter values as are being populated periodically by device. + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reserved: reserved + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate + * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate + * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate + * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate + * @reg_tl_re_cnt: ring empty time samples count + * @reserved1: reserved + */ +struct adf_gen4_tl_ring_pair_data_regs { + __u64 reg_tl_gp_lat_acc; + __u64 reserved; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_glob_devtlb_hit; + __u32 reg_tl_at_glob_devtlb_miss; + __u32 reg_tl_at_payld_devtlb_hit; + __u32 reg_tl_at_payld_devtlb_miss; + __u32 reg_tl_re_cnt; + __u32 reserved1; +}; + +#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs) + +/** + * struct adf_gen4_tl_layout - This structure represents entire telemetry + * counters data: Device + 4 Ring Pairs as are being populated periodically + * by device. + * @tl_device_data_regs: structure of device telemetry registers + * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers + * @reg_tl_msg_cnt: telemetry messages counter + * @reserved: reserved + */ +struct adf_gen4_tl_layout { + struct adf_gen4_tl_device_data_regs tl_device_data_regs; + struct adf_gen4_tl_ring_pair_data_regs + tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM]; + __u32 reg_tl_msg_cnt; + __u32 reserved; +}; + +#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout) +#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt) + +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data); +#else +static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_GEN4_TL_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c new file mode 100644 index 0000000000000000000000000000000000000000..a62eb5e8dbe6a03f8bab55eaf662f21000ad38a1 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_pfvf_utils.h" +#include "adf_mstate_mgr.h" +#include "adf_gen4_vf_mig.h" + +#define ADF_GEN4_VF_MSTATE_SIZE 4096 +#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000 + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev); +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len); + +static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev) +{ + u8 *state; + + state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL); + if (!state) + return -ENOMEM; + + mdev->state = state; + mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev) +{ + kfree(mdev->state); + mdev->state = NULL; +} + +static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev) +{ + mdev->setup_size = 0; + mdev->remote_setup_size = 0; +} + +static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + + vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL); + if (!vfmig) + return -ENOMEM; + + vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size); + if (!vfmig->mstate_mgr) { + kfree(vfmig); + return -ENOMEM; + } + vf_info->mig_priv = vfmig; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + if (vf_info->mig_priv) { + vfmig = vf_info->mig_priv; + adf_mstate_mgr_destroy(vfmig->mstate_mgr); + kfree(vfmig); + vf_info->mig_priv = NULL; + } +} + +static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int ret, i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + /* Stop all inflight jobs */ + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr, + ADF_RPRESET_POLL_TIMEOUT_US); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to drain bank %d for vf_nr %d\n", i, + vf_nr); + return ret; + } + vf_mig->bank_stopped[i] = true; + + adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr, + ADF_COALESCED_POLL_TIMEOUT_US); + } + + return 0; +} + +static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + if (vf_mig->bank_stopped[i]) { + adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr); + vf_mig->bank_stopped[i] = false; + } + } + + return 0; +} + +struct adf_vf_bank_info { + struct adf_accel_dev *accel_dev; + u32 vf_nr; + u32 bank_nr; +}; + +struct mig_user_sla { + enum adf_base_services srv; + u64 rp_mask; + u32 cir; + u32 pir; +}; + +static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf, + u32 src_size, void *opaque) +{ + struct adf_mstate_vreginfo _sinfo = { src_buf, src_size }; + struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque; + u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla); + u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla); + struct mig_user_sla *src_slas = sinfo->addr; + struct mig_user_sla *dst_slas = dinfo->addr; + int i, j; + + for (i = 0; i < src_sla_cnt; i++) { + for (j = 0; j < dst_sla_cnt; j++) { + if (src_slas[i].srv != dst_slas[j].srv || + src_slas[i].rp_mask != dst_slas[j].rp_mask) + continue; + + if (src_slas[i].cir > dst_slas[j].cir || + src_slas[i].pir > dst_slas[j].pir) { + pr_err("QAT: DST VF rate limiting mismatch.\n"); + return -EINVAL; + } + break; + } + + if (j == dst_sla_cnt) { + pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n", + src_slas[i].srv, src_slas[i].rp_mask); + return -EINVAL; + } + } + + return 0; +} + +static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz) +{ + if (src_sz > max_sz || dst_sz > max_sz) + return -EINVAL; + else + return 0; +} + +static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr, + u8 *src_buf, u32 src_sz, void *opaque) +{ + struct adf_mstate_vreginfo *info = opaque; + u8 compat = 0; + u8 *pcompat; + + if (src_sz != info->size) { + pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n", + src_sz, info->size); + return -EINVAL; + } + + memcpy(info->addr, src_buf, info->size); + pcompat = info->addr; + if (*pcompat == 0) { + pr_warn("QAT: Unable to determine the version of VF\n"); + return 0; + } + + compat = adf_vf_compat_checker(*pcompat); + if (compat == ADF_PF2VF_VF_INCOMPATIBLE) { + pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + return -EINVAL; + } + + if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN) + pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + + return 0; +} + +/* + * adf_mstate_capmask_compare() - compare QAT device capability mask + * @sinfo: Pointer to source capability info + * @dinfo: Pointer to target capability info + * + * This function compares the capability mask between source VF and target VF + * + * Returns: 0 if target capability mask is identical to source capability mask, + * 1 if target mask can represent all the capabilities represented by source mask, + * -1 if target mask can't represent all the capabilities represented by source + * mask. + */ +static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo, + struct adf_mstate_vreginfo *dinfo) +{ + u64 src = 0, dst = 0; + + if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) { + pr_debug("QAT: Unexpected capability size %u %u %zu\n", + sinfo->size, dinfo->size, sizeof(u64)); + return -1; + } + + memcpy(&src, sinfo->addr, sinfo->size); + memcpy(&dst, dinfo->addr, dinfo->size); + + pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst); + + if (src == dst) + return 0; + + if ((src | dst) == dst) + return 1; + + return -1; +} + +static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) >= 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) == 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo *info = opa; + + if (size != info->size) { + pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size); + return -EINVAL; + } + memcpy(info->addr, buf, info->size); + + return 0; +} + +static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr, + struct mig_user_sla *pmig_slas) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + u64 rp_mask, rp_index; + u32 max_num_sla; + u32 sla_cnt = 0; + int i, j; + + if (!accel_dev->rate_limiting) + return 0; + + rp_index = vf_nr * hw_data->num_banks_per_vf; + max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr); + + for (i = 0; i < max_num_sla; i++) { + if (!sla_type_arr[i]) + continue; + + rp_mask = 0; + for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++) + rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]); + + if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) { + pmig_slas->rp_mask = rp_mask; + pmig_slas->cir = sla_type_arr[i]->cir; + pmig_slas->pir = sla_type_arr[i]->pir; + pmig_slas->srv = sla_type_arr[i]->srv; + pmig_slas++; + sla_cnt++; + } + } + + return sla_cnt; +} + +static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr, + u8 *state, u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr}; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to lookup sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_load_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 ofs; + } misc_states[] = { + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + misc_states[i].id, + adf_mstate_set_vreg, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to load sec %s\n", misc_states[i].id); + return -EINVAL; + } + ADF_CSR_WR(csr, misc_states[i].ofs, regv); + } + + return 0; +} + +static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + u32 dst_sla_cnt; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == dst_slas) { + dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas); + gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + gen_states[i].id, + gen_states[i].action, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id, + setups[i].action, &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + setups[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state, + u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr; + pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + + ret = hw_data->bank_state_save(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return sizeof(struct bank_state); +} + +static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_vf_bank_info vf_bank_info; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + + subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + vf_bank_info.accel_dev = accel_dev; + vf_bank_info.vf_nr = vf_nr; + vf_bank_info.bank_nr = bank_nr; + l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_save_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 offset; + } misc_states[] = { + {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + ktime_t time_exp; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US); + while (!mutex_trylock(&vf_info->pfvf_mig_lock)) { + if (ktime_after(ktime_get(), time_exp)) { + dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n"); + return -ETIMEDOUT; + } + usleep_range(500, 1000); + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + regv = ADF_CSR_RD(csr, misc_states[i].offset); + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + misc_states[i].id, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + misc_states[i].id); + mutex_unlock(&vf_info->pfvf_mig_lock); + return -EINVAL; + } + } + + mutex_unlock(&vf_info->pfvf_mig_lock); + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + u32 src_sla_cnt; + struct { + char *id; + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, {src_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == src_slas) { + src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas); + gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + gen_states[i].id, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct { + char *id; + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id, + &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + setups[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_save_setup(mdev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save setup for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size, + mdev->state_size - mdev->setup_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save generic state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_preamble_update(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, + mdev->state + mdev->remote_setup_size, + mdev->state_size - mdev->remote_setup_size, + NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load general state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->setup_size) + return 0; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id); + if (ret) + return ret; + + adf_mstate_preamble_update(vfmig->mstate_mgr); + mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + u32 setup_size; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->remote_setup_size) + return 0; + + if (len < sizeof(struct adf_mstate_preh)) + return -EAGAIN; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr); + if (setup_size > mdev->state_size) + return -EINVAL; + + if (len < setup_size) + return -EAGAIN; + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state, + setup_size, NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n", + vf_nr); + return ret; + } + + mdev->remote_setup_size = setup_size; + + ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load config for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops) +{ + vfmig_ops->init = adf_gen4_vfmig_init_device; + vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device; + vfmig_ops->reset = adf_gen4_vfmig_reset_device; + vfmig_ops->open = adf_gen4_vfmig_open_device; + vfmig_ops->close = adf_gen4_vfmig_close_device; + vfmig_ops->suspend = adf_gen4_vfmig_suspend_device; + vfmig_ops->resume = adf_gen4_vfmig_resume_device; + vfmig_ops->save_state = adf_gen4_vfmig_save_state; + vfmig_ops->load_state = adf_gen4_vfmig_load_state; + vfmig_ops->load_setup = adf_gen4_vfmig_load_setup; + vfmig_ops->save_setup = adf_gen4_vfmig_save_setup; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h new file mode 100644 index 0000000000000000000000000000000000000000..72216d078ee1f62153d103bcc3562177f10b62f2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_VF_MIG_H_ +#define ADF_GEN4_VF_MIG_H_ + +#include "adf_accel_devices.h" + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index beef9a5f6c75c0868d9b4be0f69c572a199e068a..b19aa1ef8eeed9f55a89426cddebe165c4717cb5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -12,6 +12,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_clock.h" @@ -22,12 +23,6 @@ #define ADF_HB_EMPTY_SIG 0xA5A5A5A5 -/* Heartbeat counter pair */ -struct hb_cnt_pair { - __u16 resp_heartbeat_cnt; - __u16 req_heartbeat_cnt; -}; - static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) { u64 curr_time = adf_clock_get_current_time(); @@ -210,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) return ret; } +static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time; + + if (time_since_reset < ADF_CFG_HB_RESET_MS) + return; + + accel_dev->heartbeat->last_hb_reset_time = curr_time; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n"); +} + void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { @@ -234,6 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; + adf_heartbeat_reset(accel_dev); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index b22e3cb29798ec57200d82ea110a087105c53273..16fdfb48b196acd33f020f9cd580a5d026b5a2f7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -13,17 +13,26 @@ struct dentry; #define ADF_CFG_HB_TIMER_DEFAULT_MS 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 +#define ADF_CFG_HB_RESET_MS 5000 + enum adf_device_heartbeat_status { HB_DEV_UNRESPONSIVE = 0, HB_DEV_ALIVE, HB_DEV_UNSUPPORTED, }; +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + u64 last_hb_reset_time; bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; @@ -35,6 +44,9 @@ struct adf_heartbeat { struct dentry *cfg; struct dentry *sent; struct dentry *failed; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + struct dentry *inject_error; +#endif } dbgfs; }; @@ -51,6 +63,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); +#else +static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + return -EPERM; +} +#endif + #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 803cbfd838f0a1333e639d642653b034f81b0034..cccdff24b48d61baf2c70165c0f0d6be254b18ad 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -8,6 +8,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_heartbeat.h" @@ -154,6 +155,44 @@ static const struct file_operations adf_hb_cfg_fops = { .write = adf_hb_cfg_write, }; +static ssize_t adf_hb_error_inject_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct adf_accel_dev *accel_dev = file->private_data; + char buf[3]; + int ret; + + /* last byte left as string termination */ + if (*ppos != 0 || count != 2) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + + if (buf[0] != '1') + return -EINVAL; + + ret = adf_heartbeat_inject_error(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat error injection failed with status %d\n", + ret); + return ret; + } + + dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); + + return count; +} + +static const struct file_operations adf_hb_error_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = adf_hb_error_inject_write, +}; + void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_heartbeat *hb = accel_dev->heartbeat; @@ -170,6 +209,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) &hb->hb_failed_counter, &adf_hb_stats_fops); hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, accel_dev, &adf_hb_cfg_fops); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) { + struct dentry *inject_error __maybe_unused; + + inject_error = debugfs_create_file("inject_error", 0200, + hb->dbgfs.base_dir, accel_dev, + &adf_hb_error_inject_fops); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + hb->dbgfs.inject_error = inject_error; +#endif + } } EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); @@ -188,6 +238,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) hb->dbgfs.failed = NULL; debugfs_remove(hb->dbgfs.cfg); hb->dbgfs.cfg = NULL; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + debugfs_remove(hb->dbgfs.inject_error); + hb->dbgfs.inject_error = NULL; +#endif debugfs_remove(hb->dbgfs.base_dir); hb->dbgfs.base_dir = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c new file mode 100644 index 0000000000000000000000000000000000000000..a3b474bdef6c832f8ae10f03e64d7c0f9ffdcd02 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include + +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" + +#define MAX_HB_TICKS 0xFFFFFFFF + +static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + accel_dev->heartbeat->hb_timer = 0; + + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + + return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); +} + +static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, + u32 thr) +{ + struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + size_t thr_id = ae * hb_ctrs + thr; + u16 num_rsp = stats[thr_id].resp_heartbeat_cnt; + + /* + * Inject live.req != live.rsp and live.rsp == last.rsp + * to trigger the heartbeat error detection + */ + stats[thr_id].req_heartbeat_cnt++; + stats += (max_aes * hb_ctrs); + stats[thr_id].resp_heartbeat_cnt = num_rsp; +} + +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + u32 rand, rand_ae, rand_thr; + unsigned long ae_mask; + int ret; + + ae_mask = hw_device->ae_mask; + + do { + /* Ensure we have a valid ae */ + get_random_bytes(&rand, sizeof(rand)); + rand_ae = rand % max_aes; + } while (!test_bit(rand_ae, &ae_mask)); + + get_random_bytes(&rand, sizeof(rand)); + rand_thr = rand % hb_ctrs; + + /* Increase the heartbeat timer to prevent FW updating HB counters */ + ret = adf_hb_set_timer_to_max(accel_dev); + if (ret) + return ret; + + /* Configure worker threads to stop processing any packet */ + ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr); + if (ret) + return ret; + + /* Change HB counters memory to simulate a hang */ + adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr); + + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c index dd9a31c20bc9c982d88d20949be29e8c4cf340e0..f93d9cca70cee4cb67b4c8ea4f705911c0f2052c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c @@ -99,3 +99,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } EXPORT_SYMBOL_GPL(adf_exit_arb); + +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + const u32 *thd_2_arb_cfg; + struct arb_info info; + u32 ae_thr_map; + + if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr) + thr = ADF_AE_ADMIN_THREAD; + + hw_data->get_arb_info(&info); + thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev); + if (!thd_2_arb_cfg) + return -EFAULT; + + /* Disable scheduling for this particular AE and thread */ + ae_thr_map = *(thd_2_arb_cfg + ae); + ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2))); + + WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae, + ae_thr_map); + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0f9e2d59ce385730c2e342fbf05a0275177058e1..74f0818c07034873871056269fc0db040cc284be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,9 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_rl.h" +#include "adf_sysfs_ras_counters.h" +#include "adf_telemetry.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -61,7 +64,6 @@ int adf_service_unregister(struct service_hndl *service) static int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; @@ -120,6 +122,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + if (hw_data->ras_ops.enable_ras_errors) + hw_data->ras_ops.enable_ras_errors(accel_dev); + hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); @@ -134,14 +139,20 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } adf_heartbeat_init(accel_dev); + ret = adf_rl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + + ret = adf_tl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise service %s\n", @@ -168,7 +179,6 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -211,9 +221,15 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } adf_heartbeat_start(accel_dev); + ret = adf_rl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + ret = adf_tl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), "Failed to start service %s\n", @@ -246,6 +262,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); adf_dbgfs_add(accel_dev); + adf_sysfs_start_ras(accel_dev); return 0; } @@ -264,7 +281,6 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; bool wait = false; int ret; @@ -272,7 +288,10 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_tl_stop(accel_dev); + adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); + adf_sysfs_stop_ras(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); @@ -289,8 +308,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) qat_comp_algs_unregister(); clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->start_status)) continue; ret = service->event_hld(accel_dev, ADF_EVENT_STOP); @@ -327,7 +345,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -349,8 +366,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) &accel_dev->status); } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) @@ -361,8 +377,15 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + adf_rl_exit(accel_dev); + + if (hw_data->ras_ops.disable_ras_errors) + hw_data->ras_ops.disable_ras_errors(accel_dev); + adf_heartbeat_shutdown(accel_dev); + adf_tl_shutdown(accel_dev); + hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { @@ -387,10 +410,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -402,10 +423,8 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -414,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) return 0; } +void adf_error_notifier(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + + list_for_each_entry(service, &service_table, list) { + if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) + dev_err(&GET_DEV(accel_dev), + "Failed to send error event to %s.\n", + service->name); + } +} + static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 2aba194a7c292244b1e34503748851f53c3e16da..cae1aee5479aff04e7c123674b9f5e0cb33f67d9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -132,6 +132,26 @@ static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) return false; } +static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) +{ + struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; + bool reset_required; + + if (ras_ops->handle_interrupt && + ras_ops->handle_interrupt(accel_dev, &reset_required)) { + if (reset_required) { + dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); + } + + return true; + } + + return false; +} + static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; @@ -145,6 +165,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) if (adf_handle_pm_int(accel_dev)) return IRQ_HANDLED; + if (adf_handle_ras_int(accel_dev)) + return IRQ_HANDLED; + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); @@ -254,7 +277,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!irqs) return -ENOMEM; @@ -357,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); /** * adf_init_misc_wq() - Init misc workqueue * - * Function init workqueue 'qat_misc_wq' for general purpose. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_misc_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..41cc763a74aa25ad44da62d912e472d44d1f0507 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include "adf_mstate_mgr.h" + +#define ADF_MSTATE_MAGIC 0xADF5CAEA +#define ADF_MSTATE_VERSION 0x1 + +struct adf_mstate_sect_h { + u8 id[ADF_MSTATE_ID_LEN]; + u32 size; + u32 sub_sects; + u8 state[]; +}; + +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr) +{ + return mgr->state - mgr->buf; +} + +static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr) +{ + return mgr->buf + mgr->size - mgr->state; +} + +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size) +{ + mgr->buf = buf; + mgr->state = buf; + mgr->size = size; + mgr->n_sects = 0; +}; + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size) +{ + struct adf_mstate_mgr *mgr; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + adf_mstate_mgr_init(mgr, buf, size); + + return mgr; +} + +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr) +{ + kfree(mgr); +} + +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr) +{ + adf_mstate_mgr_init(mgr, p_mgr->state, + p_mgr->size - adf_mstate_state_size(p_mgr)); +} + +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect) +{ + adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size); + mgr->n_sects = p_sect->sub_sects; +} + +static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble) +{ + preamble->magic = ADF_MSTATE_MAGIC; + preamble->version = ADF_MSTATE_VERSION; + preamble->preh_len = sizeof(*preamble); + preamble->size = 0; + preamble->n_sects = 0; +} + +/* default preambles checker */ +static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble, + void *opaque) +{ + struct adf_mstate_mgr *mgr = opaque; + + if (preamble->magic != ADF_MSTATE_MAGIC || + preamble->version > ADF_MSTATE_VERSION || + preamble->preh_len > mgr->size) { + pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n", + preamble->magic, preamble->version, preamble->preh_len, + mgr->size); + return -EINVAL; + } + + return 0; +} + +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf; + + if (adf_mstate_avail_room(mgr) < sizeof(*pre)) { + pr_err("QAT: LM - Not enough space for preamble\n"); + return NULL; + } + + adf_mstate_preamble_init(pre); + mgr->state += pre->preh_len; + + return pre; +} + +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf; + + preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len; + preamble->n_sects = mgr->n_sects; + + return 0; +} + +static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect, + const char *prefix) +{ + pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id); + print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect, + sizeof(*sect), true); + print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state, + sect->size, true); +} + +static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *sect, + u32 size, + u32 n_subsects) +{ + sect->size += size; + sect->sub_sects += n_subsects; + mgr->n_sects++; + mgr->state += sect->size; + + adf_mstate_dump_sect(sect, "Add"); +} + +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect) +{ + __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr), + curr_mgr->n_sects); +} + +static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr, + const char *id) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state); + + if (adf_mstate_avail_room(mgr) < sizeof(*sect)) { + pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id); + return NULL; + } + + strscpy(sect->id, id, sizeof(sect->id)); + sect->size = 0; + sect->sub_sects = 0; + mgr->state += sizeof(*sect); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info) +{ + struct adf_mstate_sect_h *sect; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (adf_mstate_avail_room(mgr) < info->size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, info->size); + return NULL; + } + + memcpy(sect->state, info->addr, info->size); + __adf_mstate_sect_update(mgr, sect, info->size, 0); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque) +{ + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *sect; + int avail_room, size; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (!populate) + return sect; + + avail_room = adf_mstate_avail_room(mgr); + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr); + + size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque); + if (size < 0) + return NULL; + + size += adf_mstate_state_size(&sub_sects_mgr); + if (avail_room < size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, size); + return NULL; + } + __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects); + + return sect; +} + +static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_sect_h *sect = start; + u64 end; + int i; + + end = (uintptr_t)mgr->buf + mgr->size; + for (i = 0; i < mgr->n_sects; i++) { + uintptr_t s_start = (uintptr_t)sect->state; + uintptr_t s_end = s_start + sect->size; + + if (s_end < s_start || s_end > end) { + pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n", + i, sect->size, mgr->size, mgr->n_sects); + return -EINVAL; + } + sect = (struct adf_mstate_sect_h *)s_end; + } + + pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n", + start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start), + mgr->size, mgr->n_sects); + + return 0; +} + +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf; + + return preh->preh_len + preh->size; +} + +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size, + adf_mstate_preamble_checker pre_checker, + void *opaque) +{ + struct adf_mstate_preh *pre; + int ret; + + adf_mstate_mgr_init(mgr, buf, size); + pre = (struct adf_mstate_preh *)(mgr->buf); + + pr_debug("QAT: LM - Dump state preambles\n"); + print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0); + + if (pre_checker) + ret = (*pre_checker)(pre, opaque); + else + ret = adf_mstate_preamble_def_checker(pre, mgr); + if (ret) + return ret; + + mgr->state = mgr->buf + pre->preh_len; + mgr->n_sects = pre->n_sects; + + return adf_mstate_sect_validate(mgr); +} + +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_mgr sub_sects_mgr; + int i, ret; + + for (i = 0; i < mgr->n_sects; i++) { + if (!strncmp(sect->id, id, sizeof(sect->id))) + goto found; + + sect = (struct adf_mstate_sect_h *)(sect->state + sect->size); + } + + return NULL; + +found: + adf_mstate_dump_sect(sect, "Found"); + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect); + if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr)) + return NULL; + + if (!action) + return sect; + + ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque); + if (ret) + return NULL; + + return sect; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h new file mode 100644 index 0000000000000000000000000000000000000000..81d263a596c56f571f6923c98d1a3dc03489e69e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ + +#ifndef ADF_MSTATE_MGR_H +#define ADF_MSTATE_MGR_H + +#define ADF_MSTATE_ID_LEN 8 + +#define ADF_MSTATE_ETRB_IDS "ETRBAR" +#define ADF_MSTATE_MISCB_IDS "MISCBAR" +#define ADF_MSTATE_EXTB_IDS "EXTBAR" +#define ADF_MSTATE_GEN_IDS "GENER" +#define ADF_MSTATE_CONFIG_IDS "CONFIG" +#define ADF_MSTATE_SECTION_NUM 5 + +#define ADF_MSTATE_BANK_IDX_IDS "bnk" + +#define ADF_MSTATE_ETR_REGS_IDS "mregs" +#define ADF_MSTATE_VINTSRC_IDS "visrc" +#define ADF_MSTATE_VINTMSK_IDS "vimsk" +#define ADF_MSTATE_SLA_IDS "sla" +#define ADF_MSTATE_IOV_INIT_IDS "iovinit" +#define ADF_MSTATE_COMPAT_VER_IDS "compver" +#define ADF_MSTATE_GEN_CAP_IDS "gencap" +#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap" +#define ADF_MSTATE_GEN_EXTDC_IDS "extdc" +#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv" +#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv" +#define ADF_MSTATE_VM2PF_IDS "vm2pf" +#define ADF_MSTATE_PF2VM_IDS "pf2vm" + +struct adf_mstate_mgr { + u8 *buf; + u8 *state; + u32 size; + u32 n_sects; +}; + +struct adf_mstate_preh { + u32 magic; + u32 version; + u16 preh_len; + u16 n_sects; + u32 size; +}; + +struct adf_mstate_vreginfo { + void *addr; + u32 size; +}; + +struct adf_mstate_sect_h; + +typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa); +typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa); +typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, + void *opa); + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size); +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr); +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size); +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr); +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect); +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, + u8 *buf, u32 size, + adf_mstate_preamble_checker checker, + void *opaque); +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr); +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr); +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect); +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info); +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque); +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque); +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h index 204a42438992645960e99b234b77983cc3e54864..d1b3ef9cadacc02574ccf9c56515cc1c2cabee36 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h @@ -99,6 +99,8 @@ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, + ADF_PF2VF_MSGTYPE_RESTARTED = 0x05, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; @@ -112,6 +114,7 @@ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; @@ -124,8 +127,10 @@ enum pfvf_compatibility_version { ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 14c069f0d71a5b81ce246366f83e492924d663cd..0e31f4b41844e0a8d53de4000c4d574afab989f4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -1,21 +1,83 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ +#include #include #include "adf_accel_devices.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_pf_msg.h" #include "adf_pfvf_pf_proto.h" +#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100 +#define ADF_VF_SHUTDOWN_RETRY 100 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) { struct adf_accel_vf_info *vf; struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING }; int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) + vf->restarting = false; + if (!vf->init) + continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); + else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + } +} + +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ + int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + int i, retries = ADF_VF_SHUTDOWN_RETRY; + struct adf_accel_vf_info *vf; + bool vf_running; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); + do { + vf_running = false; + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) + if (vf->restarting) + vf_running = true; + if (!vf_running) + break; + msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY); + } while (--retries); + + if (vf_running) + dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n"); +} + +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarted msg to VF%d\n", i); + } +} + +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send fatal error msg to VF%d\n", i); } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h index e8982d1ac8962b3c4ebf3ab0d34f7d3eb65d8df3..f203d88c919c2f06bbbe1d82cf9f5966f0d5706f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h @@ -5,7 +5,28 @@ #include "adf_accel_devices.h" +#if defined(CONFIG_PCI_IOV) void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); +#else +static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ +} +#endif typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, u8 *buffer, u8 compat); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 388e58bcbcaf2683228ae30e8aef99f35f3a513b..b9b5e744a3f16356d1f8865f0eafba3dd07e7ab9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n", vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION); - if (vf_compat_ver == 0) - compat = ADF_PF2VF_VF_INCOMPATIBLE; - else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) - compat = ADF_PF2VF_VF_COMPATIBLE; - else - compat = ADF_PF2VF_VF_COMPAT_UNKNOWN; - + compat = adf_vf_compat_checker(vf_compat_ver); vf_info->vf_compat_ver = vf_compat_ver; resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP; @@ -291,6 +285,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, vf_info->init = false; } break; + case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE: + { + dev_dbg(&GET_DEV(accel_dev), + "Restarting Complete received from VF%d\n", vf_nr); + vf_info->restarting = false; + vf_info->init = false; + } + break; case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h index 2be048e2287b7b7f80cff346c38f926e044d5d21..1a044297d8733829ac48d867a9fec1e67bdb61e8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h @@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg, const struct pfvf_csr_format *fmt); +static inline u8 adf_vf_compat_checker(u8 vf_compat_ver) +{ + if (vf_compat_ver == 0) + return ADF_PF2VF_VF_INCOMPATIBLE; + + if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) + return ADF_PF2VF_VF_COMPATIBLE; + + return ADF_PF2VF_VF_COMPAT_UNKNOWN; +} + #endif /* ADF_PFVF_UTILS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c index 1015155b637493fb81c9720b72b1324873020457..dc284a089c88954c100bf1a64348c456d6162353 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c @@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, adf_pf2vf_handle_pf_restarting(accel_dev); return false; + case ADF_PF2VF_MSGTYPE_RESTARTED: + dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n"); + return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n"); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f0a13c19019673b48ef7a1dd6e9d159eb785b933 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_pm_dbgfs.h" + +static ssize_t pm_status_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + struct adf_accel_dev *accel_dev = file_inode(f)->i_private; + struct adf_pm pm = accel_dev->power_management; + + if (pm.print_pm_status) + return pm.print_pm_status(accel_dev, buf, count, pos); + + return count; +} + +static const struct file_operations pm_status_fops = { + .owner = THIS_MODULE, + .read = pm_status_read, +}; + +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present || !pm->print_pm_status) + return; + + pm->debugfs_pm_status = debugfs_create_file("pm_status", 0400, + accel_dev->debugfs_dir, + accel_dev, &pm_status_fops); +} + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present) + return; + + debugfs_remove(pm->debugfs_pm_status); + pm->debugfs_pm_status = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h new file mode 100644 index 0000000000000000000000000000000000000000..83632e5aa097c06c1c1c2a11b0b615a878d649ab --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_PM_DBGFS_H_ +#define ADF_PM_DBGFS_H_ + +struct adf_accel_dev; + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev); +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev); + +#endif /* ADF_PM_DBGFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c new file mode 100644 index 0000000000000000000000000000000000000000..346ef8bee99d9f061d7b67ee752e6c88099a4d4e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -0,0 +1,1186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include + +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_rl_admin.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U +#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U +#define RL_TOKEN_PCIE_SIZE 64 +#define RL_TOKEN_ASYM_SIZE 1024 +#define RL_CSR_SIZE 4U +#define RL_CAPABILITY_MASK GENMASK(6, 4) +#define RL_CAPABILITY_VALUE 0x70 +#define RL_VALIDATE_NON_ZERO(input) ((input) == 0) +#define ROOT_MASK GENMASK(1, 0) +#define CLUSTER_MASK GENMASK(3, 0) +#define LEAF_MASK GENMASK(5, 0) + +static int validate_user_input(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + const unsigned long rp_mask = sla_in->rp_mask; + size_t rp_mask_size; + int i, cnt; + + if (sla_in->pir < sla_in->cir) { + dev_notice(&GET_DEV(accel_dev), + "PIR must be >= CIR, setting PIR to CIR\n"); + sla_in->pir = sla_in->cir; + } + + if (!is_update) { + cnt = 0; + rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE; + for_each_set_bit(i, &rp_mask, rp_mask_size) { + if (++cnt > RL_RP_CNT_PER_LEAF_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Too many ring pairs selected for this SLA\n"); + return -EINVAL; + } + } + + if (sla_in->srv >= ADF_SVC_NONE) { + dev_notice(&GET_DEV(accel_dev), + "Wrong service type\n"); + return -EINVAL; + } + + if (sla_in->type > RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), + "Wrong node type\n"); + return -EINVAL; + } + + if (sla_in->parent_id < RL_PARENT_DEFAULT_ID || + sla_in->parent_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Wrong parent ID\n"); + return -EINVAL; + } + } + + return 0; +} + +static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) +{ + struct rl_sla *sla; + + if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); + return -EINVAL; + } + + sla = accel_dev->rate_limiting->sla[sla_id]; + + if (!sla) { + dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); + return -EINVAL; + } + + if (sla->type != RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); + return -EINVAL; + } + + return 0; +} + +/** + * find_parent() - Find the parent for a new SLA + * @rl_data: pointer to ratelimiting data + * @sla_in: pointer to user input data for a new SLA + * + * Function returns a pointer to the parent SLA. If the parent ID is provided + * as input in the user data, then such ID is validated and the parent SLA + * is returned. + * Otherwise, it returns the default parent SLA (root or cluster) for + * the new object. + * + * Return: + * * Pointer to the parent SLA object + * * NULL - when parent cannot be found + */ +static struct rl_sla *find_parent(struct adf_rl *rl_data, + struct adf_rl_sla_input_data *sla_in) +{ + int input_parent_id = sla_in->parent_id; + struct rl_sla *root = NULL; + struct rl_sla *parent_sla; + int i; + + if (sla_in->type == RL_ROOT) + return NULL; + + if (input_parent_id > RL_PARENT_DEFAULT_ID) { + parent_sla = rl_data->sla[input_parent_id]; + /* + * SLA can be a parent if it has the same service as the child + * and its type is higher in the hierarchy, + * for example the parent type of a LEAF must be a CLUSTER. + */ + if (parent_sla && parent_sla->srv == sla_in->srv && + parent_sla->type == sla_in->type - 1) + return parent_sla; + + return NULL; + } + + /* If input_parent_id is not valid, get root for this service type. */ + for (i = 0; i < RL_ROOT_MAX; i++) { + if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) { + root = rl_data->root[i]; + break; + } + } + + if (!root) + return NULL; + + /* + * If the type of this SLA is cluster, then return the root. + * Otherwise, find the default (i.e. first) cluster for this service. + */ + if (sla_in->type == RL_CLUSTER) + return root; + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root) + return rl_data->cluster[i]; + } + + return NULL; +} + +static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv) +{ + switch (rl_srv) { + case ADF_SVC_ASYM: + return ASYM; + case ADF_SVC_SYM: + return SYM; + case ADF_SVC_DC: + return COMP; + default: + return UNUSED; + } +} + +/** + * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * @rl_data: pointer to ratelimiting data + * @type: SLA type + * @sla_arr: pointer to variable where requested pointer will be stored + * + * Return: Max number of elements allowed for the returned array + */ +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr) +{ + switch (type) { + case RL_LEAF: + *sla_arr = rl_data->leaf; + return RL_LEAF_MAX; + case RL_CLUSTER: + *sla_arr = rl_data->cluster; + return RL_CLUSTER_MAX; + case RL_ROOT: + *sla_arr = rl_data->root; + return RL_ROOT_MAX; + default: + *sla_arr = NULL; + return 0; + } +} + +static bool is_service_enabled(struct adf_accel_dev *accel_dev, + enum adf_base_services rl_srv) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv); + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u8 rps_per_bundle = hw_data->num_banks_per_vf; + int i; + + for (i = 0; i < rps_per_bundle; i++) { + if (GET_SRV_TYPE(accel_dev, i) == arb_srv) + return true; + } + + return false; +} + +/** + * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask + * @accel_dev: pointer to acceleration device structure + * @sla: SLA object data where result will be written + * @rp_mask: bitmask of ring pair IDs + * + * Function tries to convert provided bitmap to an array of IDs. It checks if + * RPs aren't in use, are assigned to SLA service or if a number of provided + * IDs is not too big. If successful, writes the result into the field + * sla->ring_pairs_cnt. + * + * Return: + * * 0 - ok + * * -EINVAL - ring pairs array cannot be created from provided mask + */ +static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + const unsigned long rp_mask) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv); + u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; + bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; + size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids); + u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; + u16 cnt = 0; + u16 rp_id; + + for_each_set_bit(rp_id, &rp_mask, rp_id_max) { + if (cnt >= rp_cnt_max) { + dev_notice(&GET_DEV(accel_dev), + "Assigned more ring pairs than supported"); + return -EINVAL; + } + + if (rp_in_use[rp_id]) { + dev_notice(&GET_DEV(accel_dev), + "RP %u already assigned to other SLA", rp_id); + return -EINVAL; + } + + if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { + dev_notice(&GET_DEV(accel_dev), + "RP %u does not support SLA service", rp_id); + return -EINVAL; + } + + sla->ring_pairs_ids[cnt++] = rp_id; + } + + sla->ring_pairs_cnt = cnt; + + return 0; +} + +static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used) +{ + u16 rp_id; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + rp_id = sla->ring_pairs_ids[i]; + rp_in_use[rp_id] = used; + } +} + +static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.r2l_offset; + u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK); + u32 offset; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]); + ADF_CSR_WR(pmisc_addr, offset, node_id); + } +} + +static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.l2c_offset; + u32 node_id = sla->node_id & LEAF_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.c2s_offset; + u32 node_id = sla->node_id & CLUSTER_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_node_to_parent(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear_assignment) +{ + switch (sla->type) { + case RL_LEAF: + assign_rps_to_leaf(accel_dev, sla, clear_assignment); + assign_leaf_to_cluster(accel_dev, sla, clear_assignment); + break; + case RL_CLUSTER: + assign_cluster_to_root(accel_dev, sla, clear_assignment); + break; + default: + break; + } +} + +/** + * can_parent_afford_sla() - Verifies if parent allows to create an SLA + * @sla_in: pointer to user input data for a new SLA + * @sla_parent: pointer to parent SLA object + * @sla_cir: current child CIR value (only for update) + * @is_update: request is a update + * + * Algorithm verifies if parent has enough remaining budget to take assignment + * of a child with provided parameters. In update case current CIR value must be + * returned to budget first. + * PIR value cannot exceed the PIR assigned to parent. + * + * Return: + * * true - SLA can be created + * * false - SLA cannot be created + */ +static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla_parent, u32 sla_cir, + bool is_update) +{ + u32 rem_cir = sla_parent->rem_cir; + + if (is_update) + rem_cir += sla_cir; + + if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir) + return false; + + return true; +} + +/** + * can_node_afford_update() - Verifies if SLA can be updated with input data + * @sla_in: pointer to user input data for a new SLA + * @sla: pointer to SLA object selected for update + * + * Algorithm verifies if a new CIR value is big enough to satisfy currently + * assigned child SLAs and if PIR can be updated + * + * Return: + * * true - SLA can be updated + * * false - SLA cannot be updated + */ +static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla) +{ + u32 cir_in_use = sla->cir - sla->rem_cir; + + /* new CIR cannot be smaller then currently consumed value */ + if (cir_in_use > sla_in->cir) + return false; + + /* PIR of root/cluster cannot be reduced in node with assigned children */ + if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0) + return false; + + return true; +} + +static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + u32 max_val = rl_data->device_data->scale_ref; + struct rl_sla *parent = sla->parent; + bool ret = true; + + if (sla_in->cir > max_val || sla_in->pir > max_val) + ret = false; + + switch (sla->type) { + case RL_LEAF: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + break; + case RL_CLUSTER: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + case RL_ROOT: + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + default: + ret = false; + break; + } + + return ret; +} + +static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update) +{ + switch (sla->type) { + case RL_LEAF: + if (is_update) + sla->parent->rem_cir += old_cir; + + sla->parent->rem_cir -= sla->cir; + sla->rem_cir = 0; + break; + case RL_CLUSTER: + if (is_update) { + sla->parent->rem_cir += old_cir; + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + } else { + sla->rem_cir = sla->cir; + } + + sla->parent->rem_cir -= sla->cir; + break; + case RL_ROOT: + if (is_update) + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + else + sla->rem_cir = sla->cir; + break; + default: + break; + } +} + +/** + * get_next_free_sla_id() - finds next free ID in the SLA array + * @rl_data: Pointer to ratelimiting data structure + * + * Return: + * * 0 : RL_NODES_CNT_MAX - correct ID + * * -ENOSPC - all SLA slots are in use + */ +static int get_next_free_sla_id(struct adf_rl *rl_data) +{ + int i = 0; + + while (i < RL_NODES_CNT_MAX && rl_data->sla[i++]) + ; + + if (i == RL_NODES_CNT_MAX) + return -ENOSPC; + + return i - 1; +} + +/** + * get_next_free_node_id() - finds next free ID in the array of that node type + * @rl_data: Pointer to ratelimiting data structure + * @sla: Pointer to SLA object for which the ID is searched + * + * Return: + * * 0 : RL_[NODE_TYPE]_MAX - correct ID + * * -ENOSPC - all slots of that type are in use + */ +static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla) +{ + struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); + int max_id, i, step, rp_per_leaf; + struct rl_sla **sla_list; + + rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf; + + /* + * Static nodes mapping: + * root0 - cluster[0,4,8,12] - leaf[0-15] + * root1 - cluster[1,5,9,13] - leaf[16-31] + * root2 - cluster[2,6,10,14] - leaf[32-47] + */ + switch (sla->type) { + case RL_LEAF: + i = sla->srv * rp_per_leaf; + step = 1; + max_id = i + rp_per_leaf; + sla_list = rl_data->leaf; + break; + case RL_CLUSTER: + i = sla->srv; + step = 4; + max_id = RL_CLUSTER_MAX; + sla_list = rl_data->cluster; + break; + case RL_ROOT: + return sla->srv; + default: + return -EINVAL; + } + + while (i < max_id && sla_list[i]) + i += step; + + if (i >= max_id) + return -ENOSPC; + + return i; +} + +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 avail_slice_cycles, allocated_tokens; + + if (!sla_val) + return 0; + + avail_slice_cycles = hw_data->clock_frequency; + + switch (svc_type) { + case ADF_SVC_ASYM: + avail_slice_cycles *= device_data->slices.pke_cnt; + break; + case ADF_SVC_SYM: + avail_slice_cycles *= device_data->slices.cph_cnt; + break; + case ADF_SVC_DC: + avail_slice_cycles *= device_data->slices.dcpr_cnt; + break; + default: + break; + } + + do_div(avail_slice_cycles, device_data->scan_interval); + allocated_tokens = avail_slice_cycles * sla_val; + do_div(allocated_tokens, device_data->scale_ref); + + return allocated_tokens; +} + +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 allocated_ae_cycles, avail_ae_cycles; + + if (!sla_val) + return 0; + + avail_ae_cycles = hw_data->clock_frequency; + avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1; + do_div(avail_ae_cycles, device_data->scan_interval); + + sla_val *= device_data->max_tp[svc_type]; + sla_val /= device_data->scale_ref; + + allocated_ae_cycles = (sla_val * avail_ae_cycles); + do_div(allocated_ae_cycles, device_data->max_tp[svc_type]); + + return allocated_ae_cycles; +} + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + u64 sla_to_bytes, allocated_bw, sla_scaled; + + if (!sla_val) + return 0; + + sla_to_bytes = sla_val; + sla_to_bytes *= device_data->max_tp[svc_type]; + do_div(sla_to_bytes, device_data->scale_ref); + + sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : + BYTES_PER_MBIT; + if (svc_type == ADF_SVC_DC && is_bw_out) + sla_to_bytes *= device_data->slices.dcpr_cnt - + device_data->dcpr_correction; + + sla_scaled = sla_to_bytes * device_data->pcie_scale_mul; + do_div(sla_scaled, device_data->pcie_scale_div); + allocated_bw = sla_scaled; + do_div(allocated_bw, RL_TOKEN_PCIE_SIZE); + do_div(allocated_bw, device_data->scan_interval); + + return allocated_bw; +} + +/** + * add_new_sla_entry() - creates a new SLA object and fills it with user data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new SLA + * @sla_out: Pointer to variable that will contain the address of a new + * SLA object if the operation succeeds + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +static int add_new_sla_entry(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + struct rl_sla **sla_out) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + sla = kzalloc(sizeof(*sla), GFP_KERNEL); + if (!sla) { + ret = -ENOMEM; + goto ret_err; + } + *sla_out = sla; + + if (!is_service_enabled(accel_dev, sla_in->srv)) { + dev_notice(&GET_DEV(accel_dev), + "Provided service is not enabled\n"); + ret = -EINVAL; + goto ret_err; + } + + sla->srv = sla_in->srv; + sla->type = sla_in->type; + ret = get_next_free_node_id(rl_data, sla); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Exceeded number of available nodes for that service\n"); + goto ret_err; + } + sla->node_id = ret; + + ret = get_next_free_sla_id(rl_data); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Allocated maximum SLAs number\n"); + goto ret_err; + } + sla->sla_id = ret; + + sla->parent = find_parent(rl_data, sla_in); + if (!sla->parent && sla->type != RL_ROOT) { + if (sla_in->parent_id != RL_PARENT_DEFAULT_ID) + dev_notice(&GET_DEV(accel_dev), + "Provided parent ID does not exist or cannot be parent for this SLA."); + else + dev_notice(&GET_DEV(accel_dev), + "Unable to find parent node for this service. Is service enabled?"); + ret = -EINVAL; + goto ret_err; + } + + if (sla->type == RL_LEAF) { + ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); + if (!sla->ring_pairs_cnt || ret) { + dev_notice(&GET_DEV(accel_dev), + "Unable to find ring pairs to assign to the leaf"); + if (!ret) + ret = -EINVAL; + + goto ret_err; + } + } + + return 0; + +ret_err: + kfree(sla); + *sla_out = NULL; + + return ret; +} + +static int initialize_default_nodes(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct adf_rl_hw_data *device_data = rl_data->device_data; + struct adf_rl_sla_input_data sla_in = { }; + int ret = 0; + int i; + + /* Init root for each enabled service */ + sla_in.type = RL_ROOT; + sla_in.parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!is_service_enabled(accel_dev, i)) + continue; + + sla_in.cir = device_data->scale_ref; + sla_in.pir = sla_in.cir; + sla_in.srv = i; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + /* Init default cluster for each root */ + sla_in.type = RL_CLUSTER; + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!rl_data->root[i]) + continue; + + sla_in.cir = rl_data->root[i]->cir; + sla_in.pir = sla_in.cir; + sla_in.srv = rl_data->root[i]->srv; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + return 0; +} + +static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) +{ + bool *rp_in_use = rl_data->rp_in_use; + struct rl_sla **sla_type_arr = NULL; + int i, sla_id, node_id; + u32 old_cir; + + sla_id = sla->sla_id; + node_id = sla->node_id; + old_cir = sla->cir; + sla->cir = 0; + sla->pir = 0; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + rp_in_use[sla->ring_pairs_ids[i]] = false; + + update_budget(sla, old_cir, true); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + assign_node_to_parent(rl_data->accel_dev, sla, true); + adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); + mark_rps_usage(sla, rl_data->rp_in_use, false); + + kfree(sla); + rl_data->sla[sla_id] = NULL; + sla_type_arr[node_id] = NULL; +} + +static void free_all_sla(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int sla_id; + + mutex_lock(&rl_data->rl_lock); + + for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { + if (!rl_data->sla[sla_id]) + continue; + + kfree(rl_data->sla[sla_id]); + rl_data->sla[sla_id] = NULL; + } + + mutex_unlock(&rl_data->rl_lock); +} + +/** + * add_update_sla() - handles the creation and the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new/updated SLA + * @is_update: flag to indicate if this is an update or an add operation + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - user input data cannot be used to create SLA + * * -ENOSPC - all available SLAs are in use + */ +static int add_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, bool is_update) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + struct rl_sla *sla = NULL; + u32 old_cir = 0; + int ret; + + if (!sla_in) { + dev_warn(&GET_DEV(accel_dev), + "SLA input data pointer is missing\n"); + return -EFAULT; + } + + mutex_lock(&rl_data->rl_lock); + + /* Input validation */ + ret = validate_user_input(accel_dev, sla_in, is_update); + if (ret) + goto ret_err; + + if (is_update) { + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + goto ret_err; + + sla = rl_data->sla[sla_in->sla_id]; + old_cir = sla->cir; + } else { + ret = add_new_sla_entry(accel_dev, sla_in, &sla); + if (ret) + goto ret_err; + } + + if (!is_enough_budget(rl_data, sla, sla_in, is_update)) { + dev_notice(&GET_DEV(accel_dev), + "Input value exceeds the remaining budget%s\n", + is_update ? " or more budget is already in use" : ""); + ret = -EINVAL; + goto ret_err; + } + sla->cir = sla_in->cir; + sla->pir = sla_in->pir; + + /* Apply SLA */ + assign_node_to_parent(accel_dev, sla, false); + ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "Failed to apply an SLA\n"); + goto ret_err; + } + update_budget(sla, old_cir, is_update); + + if (!is_update) { + mark_rps_usage(sla, rl_data->rp_in_use, true); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + sla_type_arr[sla->node_id] = sla; + rl_data->sla[sla->sla_id] = sla; + } + + sla_in->sla_id = sla->sla_id; + goto ret_ok; + +ret_err: + if (!is_update) { + sla_in->sla_id = -1; + kfree(sla); + } +ret_ok: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_add_sla() - handles the creation of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to add an SLA + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, false); +} + +/** + * adf_rl_update_sla() - handles the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to update an SLA + * + * Return: + * * 0 - ok + * * -EINVAL - user input data cannot be used to update SLA + */ +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, true); +} + +/** + * adf_rl_get_sla() - returns an existing SLA data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user data where SLA info will be stored + * + * The sla_id for which data are requested should be set in sla_id structure + * + * Return: + * * 0 - ok + * * -EINVAL - provided sla_id does not exist + */ +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + struct rl_sla *sla; + int ret, i; + + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + return ret; + + sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; + sla_in->type = sla->type; + sla_in->srv = sla->srv; + sla_in->cir = sla->cir; + sla_in->pir = sla->pir; + sla_in->rp_mask = 0U; + if (sla->parent) + sla_in->parent_id = sla->parent->sla_id; + else + sla_in->parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]); + + return 0; +} + +/** + * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for + * selected service or provided sla_id + * @accel_dev: pointer to acceleration device structure + * @srv: service ID for which capability is requested + * @sla_id: ID of the cluster or root to which we want assign a new SLA + * + * Check if the provided SLA id is valid. If it is and the service matches + * the requested service and the type is cluster or root, return the remaining + * capability. + * If the provided ID does not match the service or type, return the remaining + * capacity of the default cluster for that service. + * + * Return: + * * Positive value - correct remaining value + * * -EINVAL - algorithm cannot find a remaining value for provided data + */ +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla = NULL; + int i; + + if (srv >= ADF_SVC_NONE) + return -EINVAL; + + if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { + sla = rl_data->sla[sla_id]; + + if (sla->srv == srv && sla->type <= RL_CLUSTER) + goto ret_ok; + } + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (!rl_data->cluster[i]) + continue; + + if (rl_data->cluster[i]->srv == srv) { + sla = rl_data->cluster[i]; + goto ret_ok; + } + } + + return -EINVAL; +ret_ok: + return sla->rem_cir; +} + +/** + * adf_rl_remove_sla() - removes provided sla_id + * @accel_dev: pointer to acceleration device structure + * @sla_id: ID of the cluster or root to which we want assign an new SLA + * + * Return: + * * 0 - ok + * * -EINVAL - wrong sla_id or it still have assigned children + */ +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + mutex_lock(&rl_data->rl_lock); + ret = validate_sla_id(accel_dev, sla_id); + if (ret) + goto err_ret; + + sla = rl_data->sla[sla_id]; + + if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) { + dev_notice(&GET_DEV(accel_dev), + "To remove parent SLA all its children must be removed first"); + ret = -EINVAL; + goto err_ret; + } + + clear_sla(rl_data, sla); + +err_ret: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_remove_sla_all() - removes all SLAs from device + * @accel_dev: pointer to acceleration device structure + * @incl_default: set to true if default SLAs also should be removed + */ +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int end_type = incl_default ? RL_ROOT : RL_LEAF; + struct rl_sla **sla_type_arr = NULL; + u32 max_id; + int i, j; + + mutex_lock(&rl_data->rl_lock); + + /* Unregister and remove all SLAs */ + for (j = RL_LEAF; j >= end_type; j--) { + max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr); + + for (i = 0; i < max_id; i++) { + if (!sla_type_arr[i]) + continue; + + clear_sla(rl_data, sla_type_arr[i]); + } + } + + mutex_unlock(&rl_data->rl_lock); +} + +int adf_rl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data; + struct adf_rl *rl; + int ret = 0; + + /* Validate device parameters */ + if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) { + ret = -EOPNOTSUPP; + goto err_ret; + } + + rl = kzalloc(sizeof(*rl), GFP_KERNEL); + if (!rl) { + ret = -ENOMEM; + goto err_ret; + } + + mutex_init(&rl->rl_lock); + rl->device_data = &accel_dev->hw_device->rl_data; + rl->accel_dev = accel_dev; + accel_dev->rate_limiting = rl; + +err_ret: + return ret; +} + +int adf_rl_start(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + int ret; + + if (!accel_dev->rate_limiting) { + ret = -EOPNOTSUPP; + goto ret_err; + } + + if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { + dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n"); + ret = -EOPNOTSUPP; + goto ret_free; + } + + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset, + RL_TOKEN_GRANULARITY_PCIEIN_BUCKET); + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset, + RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET); + + ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); + if (ret) { + dev_err(&GET_DEV(accel_dev), "initialization failed\n"); + goto ret_free; + } + + ret = initialize_default_nodes(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to initialize default SLAs\n"); + goto ret_sla_rm; + } + + ret = adf_sysfs_rl_add(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n"); + goto ret_sysfs_rm; + } + + return 0; + +ret_sysfs_rm: + adf_sysfs_rl_rm(accel_dev); +ret_sla_rm: + adf_rl_remove_sla_all(accel_dev, true); +ret_free: + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +ret_err: + return ret; +} + +void adf_rl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + adf_sysfs_rl_rm(accel_dev); + free_all_sla(accel_dev); +} + +void adf_rl_exit(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h new file mode 100644 index 0000000000000000000000000000000000000000..bfe750ea0e83905f9cb7be9d98b44e70101b433e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_H_ +#define ADF_RL_H_ + +#include +#include + +struct adf_accel_dev; + +#define RL_ROOT_MAX 4 +#define RL_CLUSTER_MAX 16 +#define RL_LEAF_MAX 64 +#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX) +#define RL_RP_CNT_PER_LEAF_MAX 4U +#define RL_RP_CNT_MAX 64 +#define RL_SLA_EMPTY_ID -1 +#define RL_PARENT_DEFAULT_ID -1 + +enum rl_node_type { + RL_ROOT, + RL_CLUSTER, + RL_LEAF, +}; + +enum adf_base_services { + ADF_SVC_ASYM = 0, + ADF_SVC_SYM, + ADF_SVC_DC, + ADF_SVC_NONE, +}; + +/** + * struct adf_rl_sla_input_data - ratelimiting user input data structure + * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA. + * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned. + * @sla_id: ID of current SLA for operations update, rm, get. For the add + * operation, this field will be updated with the ID of the newly + * added SLA + * @parent_id: ID of the SLA to which the current one should be assigned. + * Set to -1 to refer to the default parent. + * @cir: Committed information rate. Rate guaranteed to be achieved. Input value + * is expressed in permille scale, i.e. 1000 refers to the maximum + * device throughput for a selected service. + * @pir: Peak information rate. Maximum rate available that the SLA can achieve. + * Input value is expressed in permille scale, i.e. 1000 refers to + * the maximum device throughput for a selected service. + * @type: SLA type: root, cluster, node + * @srv: Service associated to the SLA: asym, sym dc. + * + * This structure is used to perform operations on an SLA. + * Depending on the operation, some of the parameters are ignored. + * The following list reports which parameters should be set for each operation. + * - add: all except sla_id + * - update: cir, pir, sla_id + * - rm: sla_id + * - rm_all: - + * - get: sla_id + * - get_capability_rem: srv, sla_id + */ +struct adf_rl_sla_input_data { + u64 rp_mask; + int sla_id; + int parent_id; + unsigned int cir; + unsigned int pir; + enum rl_node_type type; + enum adf_base_services srv; +}; + +struct rl_slice_cnt { + u8 dcpr_cnt; + u8 pke_cnt; + u8 cph_cnt; +}; + +struct adf_rl_interface_data { + struct adf_rl_sla_input_data input; + enum adf_base_services cap_rem_srv; + struct rw_semaphore lock; + bool sysfs_added; +}; + +struct adf_rl_hw_data { + u32 scale_ref; + u32 scan_interval; + u32 r2l_offset; + u32 l2c_offset; + u32 c2s_offset; + u32 pciin_tb_offset; + u32 pciout_tb_offset; + u32 pcie_scale_mul; + u32 pcie_scale_div; + u32 dcpr_correction; + u32 max_tp[RL_ROOT_MAX]; + struct rl_slice_cnt slices; +}; + +/** + * struct adf_rl - ratelimiting data structure + * @accel_dev: pointer to acceleration device data + * @device_data: pointer to rate limiting data specific to a device type (or revision) + * @sla: array of pointers to SLA objects + * @root: array of pointers to root type SLAs, element number reflects node_id + * @cluster: array of pointers to cluster type SLAs, element number reflects node_id + * @leaf: array of pointers to leaf type SLAs, element number reflects node_id + * @rp_in_use: array of ring pair IDs already used in one of SLAs + * @rl_lock: mutex object which is protecting data in this structure + * @input: structure which is used for holding the data received from user + */ +struct adf_rl { + struct adf_accel_dev *accel_dev; + struct adf_rl_hw_data *device_data; + /* mapping sla_id to SLA objects */ + struct rl_sla *sla[RL_NODES_CNT_MAX]; + struct rl_sla *root[RL_ROOT_MAX]; + struct rl_sla *cluster[RL_CLUSTER_MAX]; + struct rl_sla *leaf[RL_LEAF_MAX]; + bool rp_in_use[RL_RP_CNT_MAX]; + /* Mutex protecting writing to SLAs lists */ + struct mutex rl_lock; + struct adf_rl_interface_data user_input; +}; + +/** + * struct rl_sla - SLA object data structure + * @parent: pointer to the parent SLA (root/cluster) + * @type: SLA type + * @srv: service associated with this SLA + * @sla_id: ID of the SLA, used as element number in SLA array and as identifier + * shared with the user + * @node_id: ID of node, each of SLA type have a separate ID list + * @cir: committed information rate + * @pir: peak information rate (PIR >= CIR) + * @rem_cir: if this SLA is a parent then this field represents a remaining + * value to be used by child SLAs. + * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA + * @ring_pairs_cnt: number of assigned ring pairs listed in the array above + */ +struct rl_sla { + struct rl_sla *parent; + enum rl_node_type type; + enum adf_base_services srv; + u32 sla_id; + u32 node_id; + u32 cir; + u32 pir; + u32 rem_cir; + u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX]; + u16 ring_pairs_cnt; +}; + +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr); +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id); +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id); +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default); + +int adf_rl_init(struct adf_accel_dev *accel_dev); +int adf_rl_start(struct adf_accel_dev *accel_dev); +void adf_rl_stop(struct adf_accel_dev *accel_dev); +void adf_rl_exit(struct adf_accel_dev *accel_dev); + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out); +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); + +#endif /* ADF_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c new file mode 100644 index 0000000000000000000000000000000000000000..698a14f4ce66a831a59f46b1b420b35de0a7e8c6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_rl_admin.h" + +static void +prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, + struct icp_qat_fw_init_admin_sla_config_params *fw_params, + struct icp_qat_fw_init_admin_req *req, bool is_update) +{ + req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; + req->init_cfg_ptr = dma_addr; + req->init_cfg_sz = sizeof(*fw_params); + req->node_id = sla->node_id; + req->node_type = sla->type; + req->rp_count = sla->ring_pairs_cnt; + req->svc_type = sla->srv; +} + +static void +prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + struct icp_qat_fw_init_admin_sla_config_params *fw_params) +{ + fw_params->pcie_in_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false); + fw_params->pcie_in_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false); + fw_params->pcie_out_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true); + fw_params->pcie_out_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true); + + fw_params->slice_util_cir = + adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv); + fw_params->slice_util_pir = + adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv); + + fw_params->ae_util_cir = + adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv); + fw_params->ae_util_pir = + adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv); + + memcpy(fw_params->rp_ids, sla->ring_pairs_ids, + sizeof(sla->ring_pairs_ids)); +} + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int) +{ + struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; + int ret; + + ret = adf_send_admin_rl_init(accel_dev, &slices_resp); + if (ret) + return ret; + + slices_int->dcpr_cnt = slices_resp.dcpr_cnt; + slices_int->pke_cnt = slices_resp.pke_cnt; + /* For symmetric crypto, slice tokens are relative to the UCS slice */ + slices_int->cph_cnt = slices_resp.ucs_cnt; + + return 0; +} + +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update) +{ + struct icp_qat_fw_init_admin_sla_config_params *fw_params; + struct icp_qat_fw_init_admin_req req = { }; + dma_addr_t dma_addr; + int ret; + + fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), + &dma_addr, GFP_KERNEL); + if (!fw_params) + return -ENOMEM; + + prep_admin_req_params(accel_dev, sla, fw_params); + prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update); + ret = adf_send_admin_rl_add_update(accel_dev, &req); + + dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params, + dma_addr); + + return ret; +} + +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + return adf_send_admin_rl_delete(accel_dev, node_id, node_type); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h new file mode 100644 index 0000000000000000000000000000000000000000..dd5419b7e896b2a4eca5b315ce352a07149b70f2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_ADMIN_H_ +#define ADF_RL_ADMIN_H_ + +#include + +#include "adf_rl.h" + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int); +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update); +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); + +#endif /* ADF_RL_ADMIN_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index f44025bb6f995d9bdf58bf9d6290fd5566c10192..8d645e7e04aa5534206875a7da8fe9e7f9f3b079 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work) u32 vf_nr = vf_info->vf_nr; bool ret; + mutex_lock(&vf_info->pfvf_mig_lock); ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); if (ret) /* re-enable interrupt on PF from this VF */ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); + mutex_unlock(&vf_info->pfvf_mig_lock); kfree(pf2vf_resp); } @@ -60,9 +62,9 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; - vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); + mutex_init(&vf_info->pfvf_mig_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, ADF_VF2PF_RATELIMIT_INTERVAL, ADF_VF2PF_RATELIMIT_BURST); @@ -84,6 +86,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long val = 0; + + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SRIOV_ENABLED, cfg)) + return; + + if (!accel_dev->pf.vf_info) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC)) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC)) + return; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); + adf_enable_sriov(accel_dev); +} + /** * adf_disable_sriov() - Disable SRIOV for the device * @accel_dev: Pointer to accel device. @@ -103,6 +131,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) return; adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ @@ -112,11 +141,15 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { mutex_destroy(&vf->pf2vf_lock); + mutex_destroy(&vf->pfvf_mig_lock); + } - kfree(accel_dev->pf.vf_info); - accel_dev->pf.vf_info = NULL; + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -194,6 +227,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (ret) return ret; + val = 1; + adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 8f04b0d3c5ac890a987d854bb8af8caf1eedc8b4..4e7f70d4049d354bd1776611dbae079fe61f3511 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -8,6 +8,8 @@ #include "adf_cfg_services.h" #include "adf_common_drv.h" +#define UNSET_RING_NUM -1 + static const char * const state_operations[] = { [DEV_DOWN] = "down", [DEV_UP] = "up", @@ -61,8 +63,8 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } ret = adf_dev_down(accel_dev, true); - if (ret < 0) - return -EINVAL; + if (ret) + return ret; break; case DEV_UP: @@ -202,13 +204,130 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute } static DEVICE_ATTR_RW(pm_idle_enabled); +static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char *auto_reset; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; + + return sysfs_emit(buf, "%s\n", auto_reset); +} + +static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool enabled = false; + int ret; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_dev->autoreset_on_error = enabled; + + return count; +} +static DEVICE_ATTR_RW(auto_reset); + static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); +static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + enum adf_cfg_service_type svc; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + hw_data = GET_HW_DATA(accel_dev); + + if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) + return -EINVAL; + + down_read(&accel_dev->sysfs.lock); + svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num % + hw_data->num_banks_per_vf); + up_read(&accel_dev->sysfs.lock); + + switch (svc) { + case COMP: + return sysfs_emit(buf, "%s\n", ADF_CFG_DC); + case SYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_SYM); + case ASYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM); + default: + break; + } + return -EINVAL; +} + +static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + int num_rings, ret; + unsigned int ring; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = kstrtouint(buf, 10, &ring); + if (ret) + return ret; + + num_rings = GET_MAX_BANKS(accel_dev); + if (ring >= num_rings) { + dev_err(&GET_DEV(accel_dev), + "Device does not support more than %u ring pairs\n", + num_rings); + return -EINVAL; + } + + down_write(&accel_dev->sysfs.lock); + accel_dev->sysfs.ring_num = ring; + up_write(&accel_dev->sysfs.lock); + + return count; +} +static DEVICE_ATTR_RW(rp2srv); + +static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev)); +} +static DEVICE_ATTR_RO(num_rps); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, + &dev_attr_rp2srv.attr, + &dev_attr_num_rps.attr, + &dev_attr_auto_reset.attr, NULL, }; @@ -227,6 +346,8 @@ int adf_sysfs_init(struct adf_accel_dev *accel_dev) "Failed to create qat attribute group: %d\n", ret); } + accel_dev->sysfs.ring_num = UNSET_RING_NUM; + return ret; } EXPORT_SYMBOL_GPL(adf_sysfs_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c new file mode 100644 index 0000000000000000000000000000000000000000..e97c67c87b3cf17d124a4e97099622e170e30253 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_sysfs_ras_counters.h" + +static ssize_t errors_correctable_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_nonfatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_fatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t reset_error_counters_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + + if (buf[0] != '1' || count != 2) + return -EINVAL; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + return count; +} + +static DEVICE_ATTR_RO(errors_correctable); +static DEVICE_ATTR_RO(errors_nonfatal); +static DEVICE_ATTR_RO(errors_fatal); +static DEVICE_ATTR_WO(reset_error_counters); + +static struct attribute *qat_ras_attrs[] = { + &dev_attr_errors_correctable.attr, + &dev_attr_errors_nonfatal.attr, + &dev_attr_errors_fatal.attr, + &dev_attr_reset_error_counters.attr, + NULL, +}; + +static struct attribute_group qat_ras_group = { + .attrs = qat_ras_attrs, + .name = "qat_ras", +}; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_ras attribute group.\n"); + + accel_dev->ras_errors.sysfs_added = true; +} + +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + if (accel_dev->ras_errors.sysfs_added) { + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + accel_dev->ras_errors.sysfs_added = false; + } + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h new file mode 100644 index 0000000000000000000000000000000000000000..99e9d9cf57f848d9f4f4915e451aaf7e1c3a1fd4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RAS_H +#define ADF_RAS_H + +#include +#include + +struct adf_accel_dev; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev); +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev); + +#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \ + atomic_read(&(ras_errors).counter[ERR]) + +#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \ + do { \ + for (int err = 0; err < ADF_RAS_ERRORS; ++err) \ + atomic_set(&(ras_errors).counter[err], 0); \ + } while (0) + +#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \ + atomic_inc(&(ras_errors).counter[ERR]) + +#endif /* ADF_RAS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c new file mode 100644 index 0000000000000000000000000000000000000000..bedb514d4e30424d23aeb986417b9b838283ef2f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define GET_RL_STRUCT(accel_dev) ((accel_dev)->rate_limiting->user_input) + +enum rl_ops { + ADD, + UPDATE, + RM, + RM_ALL, + GET, +}; + +enum rl_params { + RP_MASK, + ID, + CIR, + PIR, + SRV, + CAP_REM_SRV, +}; + +static const char *const rl_services[] = { + [ADF_SVC_ASYM] = "asym", + [ADF_SVC_SYM] = "sym", + [ADF_SVC_DC] = "dc", +}; + +static const char *const rl_operations[] = { + [ADD] = "add", + [UPDATE] = "update", + [RM] = "rm", + [RM_ALL] = "rm_all", + [GET] = "get", +}; + +static int set_param_u(struct device *dev, enum rl_params param, u64 set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + switch (param) { + case RP_MASK: + data->input.rp_mask = set; + break; + case CIR: + data->input.cir = set; + break; + case PIR: + data->input.pir = set; + break; + case SRV: + data->input.srv = set; + break; + case CAP_REM_SRV: + data->cap_rem_srv = set; + break; + default: + ret = -EINVAL; + break; + } + up_write(&data->lock); + + return ret; +} + +static int set_param_s(struct device *dev, enum rl_params param, int set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev || param != ID) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + data->input.sla_id = set; + up_write(&data->lock); + + return 0; +} + +static int get_param_u(struct device *dev, enum rl_params param, u64 *get) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + switch (param) { + case RP_MASK: + *get = data->input.rp_mask; + break; + case CIR: + *get = data->input.cir; + break; + case PIR: + *get = data->input.pir; + break; + case SRV: + *get = data->input.srv; + break; + default: + ret = -EINVAL; + } + up_read(&data->lock); + + return ret; +} + +static int get_param_s(struct device *dev, enum rl_params param) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + if (param == ID) + ret = data->input.sla_id; + up_read(&data->lock); + + return ret; +} + +static ssize_t rp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, RP_MASK, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%#llx\n", get); +} + +static ssize_t rp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + u64 val; + + err = kstrtou64(buf, 16, &val); + if (err) + return err; + + err = set_param_u(dev, RP_MASK, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(rp); + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", get_param_s(dev, ID)); +} + +static ssize_t id_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + int val; + + err = kstrtoint(buf, 10, &val); + if (err) + return err; + + err = set_param_s(dev, ID, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(id); + +static ssize_t cir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, CIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t cir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, CIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(cir); + +static ssize_t pir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, PIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t pir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, PIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(pir); + +static ssize_t srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, SRV, &get); + if (ret) + return ret; + + if (get == ADF_SVC_NONE) + return -EINVAL; + + return sysfs_emit(buf, "%s\n", rl_services[get]); +} + +static ssize_t srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(srv); + +static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret, rem_cap; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + rem_cap = adf_rl_get_capability_remaining(accel_dev, data->cap_rem_srv, + RL_SLA_EMPTY_ID); + up_read(&data->lock); + if (rem_cap < 0) + return rem_cap; + + ret = sysfs_emit(buf, "%u\n", rem_cap); + + return ret; +} + +static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, CAP_REM_SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(cap_rem); + +static ssize_t sla_op_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + ret = sysfs_match_string(rl_operations, buf); + if (ret < 0) + return ret; + + down_write(&data->lock); + switch (ret) { + case ADD: + data->input.parent_id = RL_PARENT_DEFAULT_ID; + data->input.type = RL_LEAF; + data->input.sla_id = 0; + ret = adf_rl_add_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case UPDATE: + ret = adf_rl_update_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case RM: + ret = adf_rl_remove_sla(accel_dev, data->input.sla_id); + if (ret) + goto err_free_lock; + break; + case RM_ALL: + adf_rl_remove_sla_all(accel_dev, false); + break; + case GET: + ret = adf_rl_get_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + default: + ret = -EINVAL; + goto err_free_lock; + } + up_write(&data->lock); + + return count; + +err_free_lock: + up_write(&data->lock); + + return ret; +} +static DEVICE_ATTR_WO(sla_op); + +static struct attribute *qat_rl_attrs[] = { + &dev_attr_rp.attr, + &dev_attr_id.attr, + &dev_attr_cir.attr, + &dev_attr_pir.attr, + &dev_attr_srv.attr, + &dev_attr_cap_rem.attr, + &dev_attr_sla_op.attr, + NULL, +}; + +static struct attribute_group qat_rl_group = { + .attrs = qat_rl_attrs, + .name = "qat_rl", +}; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + int ret; + + data = &GET_RL_STRUCT(accel_dev); + + ret = device_add_group(&GET_DEV(accel_dev), &qat_rl_group); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_rl attribute group\n"); + + data->cap_rem_srv = ADF_SVC_NONE; + data->input.srv = ADF_SVC_NONE; + data->sysfs_added = true; + + return ret; +} + +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + + data = &GET_RL_STRUCT(accel_dev); + if (!data->sysfs_added) + return; + + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); + data->sysfs_added = false; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h new file mode 100644 index 0000000000000000000000000000000000000000..22d36aa8a757fcf85ed3e5a02061618ffaddca41 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_SYSFS_RL_H_ +#define ADF_SYSFS_RL_H_ + +struct adf_accel_dev; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev); +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_SYSFS_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c new file mode 100644 index 0000000000000000000000000000000000000000..74fb0c2ed2412af543c2803d1b6ade84ffc95486 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_telemetry.h" + +#define TL_IS_ZERO(input) ((input) == 0) + +static bool is_tl_supported(struct adf_accel_dev *accel_dev) +{ + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + + return fw_caps & TL_CAPABILITY_BIT; +} + +static int validate_tl_data(struct adf_tl_hw_data *tl_data) +{ + if (!tl_data->dev_counters || + TL_IS_ZERO(tl_data->num_dev_counters) || + !tl_data->sl_util_counters || + !tl_data->sl_exec_counters || + !tl_data->rp_counters || + TL_IS_ZERO(tl_data->num_rp_counters)) + return -EOPNOTSUPP; + + return 0; +} + +static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count, + u8 max_slices_per_type) +{ + u8 *sl_counter = (u8 *)slice_count; + int i; + + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + if (sl_counter[i] > max_slices_per_type) + return -EINVAL; + } + + return 0; +} + +static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + struct adf_telemetry *telemetry; + int node = dev_to_node(dev); + void *tl_data_regs; + unsigned int i; + + telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node); + if (!telemetry) + return -ENOMEM; + + telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp, + sizeof(*telemetry->rp_num_indexes), + GFP_KERNEL); + if (!telemetry->rp_num_indexes) + goto err_free_tl; + + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, + sizeof(*telemetry->regs_hist_buff), + GFP_KERNEL); + if (!telemetry->regs_hist_buff) + goto err_free_rp_indexes; + + telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, + &telemetry->regs_data_p, + GFP_KERNEL); + if (!telemetry->regs_data) + goto err_free_regs_hist_buff; + + for (i = 0; i < tl_data->num_hbuff; i++) { + tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node); + if (!tl_data_regs) + goto err_free_dma; + + telemetry->regs_hist_buff[i] = tl_data_regs; + } + + accel_dev->telemetry = telemetry; + + return 0; + +err_free_dma: + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + while (i--) + kfree(telemetry->regs_hist_buff[i]); + +err_free_regs_hist_buff: + kfree(telemetry->regs_hist_buff); +err_free_rp_indexes: + kfree(telemetry->rp_num_indexes); +err_free_tl: + kfree(telemetry); + + return -ENOMEM; +} + +static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + unsigned int i; + + for (i = 0; i < tl_data->num_hbuff; i++) + kfree(telemetry->regs_hist_buff[i]); + + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + kfree(telemetry->regs_hist_buff); + kfree(telemetry->rp_num_indexes); + kfree(telemetry); + accel_dev->telemetry = NULL; +} + +static unsigned long get_next_timeout(void) +{ + return msecs_to_jiffies(ADF_TL_TIMER_INT_MS); +} + +static void snapshot_regs(struct adf_telemetry *telemetry, size_t size) +{ + void *dst = telemetry->regs_hist_buff[telemetry->hb_num]; + void *src = telemetry->regs_data; + + memcpy(dst, src, size); +} + +static void tl_work_handler(struct work_struct *work) +{ + struct delayed_work *delayed_work; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + u32 msg_cnt, old_msg_cnt; + size_t layout_sz; + u32 *regs_data; + size_t id; + + delayed_work = to_delayed_work(work); + telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx); + tl_data = &GET_TL_DATA(telemetry->accel_dev); + regs_data = telemetry->regs_data; + + id = tl_data->msg_cnt_off / sizeof(*regs_data); + layout_sz = tl_data->layout_sz; + + if (!atomic_read(&telemetry->state)) { + cancel_delayed_work_sync(&telemetry->work_ctx); + return; + } + + msg_cnt = regs_data[id]; + old_msg_cnt = msg_cnt; + if (msg_cnt == telemetry->msg_cnt) + goto out; + + mutex_lock(&telemetry->regs_hist_lock); + + snapshot_regs(telemetry, layout_sz); + + /* Check if data changed while updating it */ + msg_cnt = regs_data[id]; + if (old_msg_cnt != msg_cnt) + snapshot_regs(telemetry, layout_sz); + + telemetry->msg_cnt = msg_cnt; + telemetry->hb_num++; + telemetry->hb_num %= telemetry->hbuffs; + + mutex_unlock(&telemetry->regs_hist_lock); + +out: + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); +} + +int adf_tl_halt(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + int ret; + + cancel_delayed_work_sync(&telemetry->work_ctx); + atomic_set(&telemetry->state, 0); + + ret = adf_send_admin_tl_stop(accel_dev); + if (ret) + dev_err(dev, "failed to stop telemetry\n"); + + return ret; +} + +int adf_tl_run(struct adf_accel_dev *accel_dev, int state) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t layout_sz = tl_data->layout_sz; + int ret; + + ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, + layout_sz, telemetry->rp_num_indexes, + &telemetry->slice_cnt); + if (ret) { + dev_err(dev, "failed to start telemetry\n"); + return ret; + } + + ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt); + if (ret) { + dev_err(dev, "invalid value returned by FW\n"); + adf_send_admin_tl_stop(accel_dev); + return ret; + } + + telemetry->hbuffs = state; + atomic_set(&telemetry->state, state); + + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); + + return 0; +} + +int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + struct device *dev = &GET_DEV(accel_dev); + struct adf_telemetry *telemetry; + unsigned int i; + int ret; + + ret = validate_tl_data(tl_data); + if (ret) + return ret; + + ret = adf_tl_alloc_mem(accel_dev); + if (ret) { + dev_err(dev, "failed to initialize: %d\n", ret); + return ret; + } + + telemetry = accel_dev->telemetry; + telemetry->accel_dev = accel_dev; + + mutex_init(&telemetry->wr_lock); + mutex_init(&telemetry->regs_hist_lock); + INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + + for (i = 0; i < max_rp; i++) + telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED; + + return 0; +} + +int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + struct device *dev = &GET_DEV(accel_dev); + + if (!accel_dev->telemetry) + return -EOPNOTSUPP; + + if (!is_tl_supported(accel_dev)) { + dev_info(dev, "feature not supported by FW\n"); + adf_tl_free_mem(accel_dev); + return -EOPNOTSUPP; + } + + return 0; +} + +void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + if (atomic_read(&accel_dev->telemetry->state)) + adf_tl_halt(accel_dev); +} + +void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + adf_tl_free_mem(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h new file mode 100644 index 0000000000000000000000000000000000000000..e54a406cc1b4aec9313fca253b1685f967880813 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TELEMETRY_H +#define ADF_TELEMETRY_H + +#include +#include +#include +#include + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; +struct adf_tl_dbg_counter; +struct dentry; + +#define ADF_TL_SL_CNT_COUNT \ + (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8)) + +#define TL_CAPABILITY_BIT BIT(1) +/* Interval within device writes data to DMA region. Value in milliseconds. */ +#define ADF_TL_DATA_WR_INTERVAL_MS 1000 +/* Interval within timer interrupt should be handled. Value in milliseconds. */ +#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) + +#define ADF_TL_RP_REGS_DISABLED (0xff) + +struct adf_tl_hw_data { + size_t layout_sz; + size_t slice_reg_sz; + size_t rp_reg_sz; + size_t msg_cnt_off; + const struct adf_tl_dbg_counter *dev_counters; + const struct adf_tl_dbg_counter *sl_util_counters; + const struct adf_tl_dbg_counter *sl_exec_counters; + const struct adf_tl_dbg_counter *rp_counters; + u8 num_hbuff; + u8 cpp_ns_per_cycle; + u8 bw_units_to_bytes; + u8 num_dev_counters; + u8 num_rp_counters; + u8 max_rp; + u8 max_sl_cnt; +}; + +struct adf_telemetry { + struct adf_accel_dev *accel_dev; + atomic_t state; + u32 hbuffs; + int hb_num; + u32 msg_cnt; + dma_addr_t regs_data_p; /* bus address for DMA mapping */ + void *regs_data; /* virtual address for DMA mapping */ + /** + * @regs_hist_buff: array of pointers to copies of the last @hbuffs + * values of @regs_data + */ + void **regs_hist_buff; + struct dentry *dbg_dir; + u8 *rp_num_indexes; + /** + * @regs_hist_lock: protects from race conditions between write and read + * to the copies referenced by @regs_hist_buff + */ + struct mutex regs_hist_lock; + /** + * @wr_lock: protects from concurrent writes to debugfs telemetry files + */ + struct mutex wr_lock; + struct delayed_work work_ctx; + struct icp_qat_fw_init_admin_slice_cnt slice_cnt; +}; + +#ifdef CONFIG_DEBUG_FS +int adf_tl_init(struct adf_accel_dev *accel_dev); +int adf_tl_start(struct adf_accel_dev *accel_dev); +void adf_tl_stop(struct adf_accel_dev *accel_dev); +void adf_tl_shutdown(struct adf_accel_dev *accel_dev); +int adf_tl_run(struct adf_accel_dev *accel_dev, int state); +int adf_tl_halt(struct adf_accel_dev *accel_dev); +#else +static inline int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_TELEMETRY_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..c8241f5a0a26ee996525a0038a5d74b3e15ec094 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -0,0 +1,710 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry debugfs: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_cfg_strings.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define TL_VALUE_MIN_PADDING 20 +#define TL_KEY_MIN_PADDING 23 +#define TL_RP_SRV_UNKNOWN "Unknown" + +static int tl_collect_values_u32(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u32 *regs_hist_buff; + u32 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +static int tl_collect_values_u64(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u64 *regs_hist_buff; + u64 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +/** + * avg_array() - Return average of values within an array. + * @array: Array of values. + * @len: Number of elements. + * + * This algorithm computes average of an array without running into overflow. + * + * Return: average of values. + */ +#define avg_array(array, len) ( \ +{ \ + typeof(&(array)[0]) _array = (array); \ + __unqual_scalar_typeof(_array[0]) _x = 0; \ + __unqual_scalar_typeof(_array[0]) _y = 0; \ + __unqual_scalar_typeof(_array[0]) _a, _b; \ + typeof(len) _len = (len); \ + size_t _i; \ + \ + for (_i = 0; _i < _len; _i++) { \ + _a = _array[_i]; \ + _b = do_div(_a, _len); \ + _x += _a; \ + if (_y >= _len - _b) { \ + _x++; \ + _y -= _len - _b; \ + } else { \ + _y += _b; \ + } \ + } \ + do_div(_y, _len); \ + (_x + _y); \ +}) + +/* Calculation function for simple counter. */ +static int tl_calc_count(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert CPP bus cycles to ns. */ +static int tl_cycles_to_ns(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + int ret; + + ret = tl_calc_count(telemetry, ctr, vals); + if (ret) + return ret; + + vals->curr *= cpp_ns_per_cycle; + vals->min *= cpp_ns_per_cycle; + vals->max *= cpp_ns_per_cycle; + vals->avg *= cpp_ns_per_cycle; + + return 0; +} + +/* + * Compute latency cumulative average with division of accumulated value + * by sample count. Returned value is in ns. + */ +static int tl_lat_acc_avg(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + u8 num_hbuff = tl_data->num_hbuff; + int sample_cnt, i; + u64 *hist_vals; + u64 *hist_cnt; + int ret = 0; + + hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); + if (!hist_cnt) { + ret = -ENOMEM; + goto out_free_hist_vals; + } + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_cnt; + + tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); + + for (i = 0; i < sample_cnt; i++) { + /* Avoid division by 0 if count is 0. */ + if (hist_cnt[i]) + hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, + hist_cnt[i]); + else + hist_vals[i] = 0; + } + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_cnt: + kfree(hist_cnt); +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert HW raw bandwidth units to Mbps. */ +static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); + vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +static void tl_seq_printf_counter(struct adf_telemetry *telemetry, + struct seq_file *s, const char *name, + struct adf_tl_dbg_aggr_values *vals) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); + if (atomic_read(&telemetry->state) > 1) { + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); + } + seq_puts(s, "\n"); +} + +static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, + struct seq_file *s, + const struct adf_tl_dbg_counter *ctr, + const char *name) +{ + const char *counter_name = name ? name : ctr->name; + enum adf_tl_counter_type type = ctr->type; + struct adf_tl_dbg_aggr_values vals; + int ret; + + switch (type) { + case ADF_TL_SIMPLE_COUNT: + ret = tl_calc_count(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS: + ret = tl_cycles_to_ns(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS_AVG: + ret = tl_lat_acc_avg(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_MBPS: + ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + tl_seq_printf_counter(telemetry, s, counter_name, &vals); + + return 0; +} + +static int tl_print_sl_counter(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct seq_file *s, u8 cnt_id) +{ + size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; + struct adf_tl_dbg_counter slice_ctr; + size_t offset_inc = cnt_id * sl_regs_sz; + char cnt_name[MAX_COUNT_NAME_SIZE]; + + snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); + slice_ctr = *ctr; + slice_ctr.offset1 += offset_inc; + + return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); +} + +static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, + struct seq_file *s, u8 cnt_type, u8 cnt_id) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *sl_tl_util_counters; + const struct adf_tl_dbg_counter *sl_tl_exec_counters; + const struct adf_tl_dbg_counter *ctr; + int ret; + + sl_tl_util_counters = tl_data->sl_util_counters; + sl_tl_exec_counters = tl_data->sl_exec_counters; + + ctr = &sl_tl_util_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice utilization counter type\n"); + return ret; + } + + ctr = &sl_tl_exec_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice execution counter type\n"); + return ret; + } + + return 0; +} + +static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); + seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); +} + +static int tl_print_dev_data(struct adf_accel_dev *accel_dev, + struct seq_file *s) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *dev_tl_counters; + u8 num_dev_counters = tl_data->num_dev_counters; + u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; + const struct adf_tl_dbg_counter *ctr; + unsigned int i; + int ret; + u8 j; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + dev_tl_counters = tl_data->dev_counters; + + tl_print_msg_cnt(s, telemetry->msg_cnt); + + /* Print device level telemetry. */ + for (i = 0; i < num_dev_counters; i++) { + ctr = &dev_tl_counters[i]; + ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid counter type\n"); + return ret; + } + } + + /* Print per slice telemetry. */ + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + for (j = 0; j < sl_cnt[i]; j++) { + ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); + if (ret) + return ret; + } + } + + return 0; +} + +static int tl_dev_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + return tl_print_dev_data(accel_dev, s); +} +DEFINE_SHOW_ATTRIBUTE(tl_dev_data); + +static int tl_control_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); + + return 0; +} + +static ssize_t tl_control_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + struct device *dev; + u32 input; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + tl_data = &GET_TL_DATA(accel_dev); + telemetry = accel_dev->telemetry; + dev = &GET_DEV(accel_dev); + + mutex_lock(&telemetry->wr_lock); + + ret = kstrtou32_from_user(userbuf, count, 10, &input); + if (ret) + goto unlock_and_exit; + + if (input > tl_data->num_hbuff) { + dev_info(dev, "invalid control input\n"); + ret = -EINVAL; + goto unlock_and_exit; + } + + /* If input is 0, just stop telemetry. */ + if (!input) { + ret = adf_tl_halt(accel_dev); + if (!ret) + ret = count; + + goto unlock_and_exit; + } + + /* If TL is already enabled, stop it. */ + if (atomic_read(&telemetry->state)) { + dev_info(dev, "already enabled, restarting.\n"); + ret = adf_tl_halt(accel_dev); + if (ret) + goto unlock_and_exit; + } + + ret = adf_tl_run(accel_dev, input); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); + +static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) +{ + char alpha; + u8 index; + int ret; + + ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); + if (ret != 1) + return -EINVAL; + + index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); + *rp_id = index; + + return 0; +} + +static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, + unsigned int new_rp_num, + unsigned int rp_regs_index) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + unsigned int i; + u8 curr_state; + int ret; + + if (new_rp_num >= hw_data->num_rps) { + dev_info(dev, "invalid Ring Pair number selected\n"); + return -EINVAL; + } + + for (i = 0; i < hw_data->tl_data.max_rp; i++) { + if (telemetry->rp_num_indexes[i] == new_rp_num) { + dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); + return 0; + } + } + + dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + + curr_state = atomic_read(&telemetry->state); + + if (curr_state) { + ret = adf_tl_halt(accel_dev); + if (ret) + return ret; + + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + + ret = adf_tl_run(accel_dev, curr_state); + if (ret) + return ret; + } else { + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + } + + return 0; +} + +static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_idx) +{ + u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; + enum adf_cfg_service_type svc; + + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); + + svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); + switch (svc) { + case COMP: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); + break; + case SYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); + break; + case ASYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); + break; + default: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); + break; + } +} + +static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_regs_index) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *rp_tl_counters; + u8 num_rp_counters = tl_data->num_rp_counters; + size_t rp_regs_sz = tl_data->rp_reg_sz; + struct adf_tl_dbg_counter ctr; + unsigned int i; + u8 rp_idx; + int ret; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + rp_tl_counters = tl_data->rp_counters; + rp_idx = telemetry->rp_num_indexes[rp_regs_index]; + + if (rp_idx == ADF_TL_RP_REGS_DISABLED) { + dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", + ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + return -EPERM; + } + + tl_print_msg_cnt(s, telemetry->msg_cnt); + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); + seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); + tl_print_rp_srv(accel_dev, s, rp_idx); + + for (i = 0; i < num_rp_counters; i++) { + ctr = rp_tl_counters[i]; + ctr.offset1 += rp_regs_sz * rp_regs_index; + ctr.offset2 += rp_regs_sz * rp_regs_index; + ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "invalid RP counter type\n"); + return ret; + } + } + + return 0; +} + +static int tl_rp_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + u8 rp_regs_index; + u8 max_rp; + int ret; + + if (!accel_dev) + return -EINVAL; + + max_rp = GET_TL_DATA(accel_dev).max_rp; + ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + return ret; + } + + return tl_print_rp_data(accel_dev, s, rp_regs_index); +} + +static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + unsigned int new_rp_num; + u8 rp_regs_index; + u8 max_rp; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + telemetry = accel_dev->telemetry; + max_rp = GET_TL_DATA(accel_dev).max_rp; + + mutex_lock(&telemetry->wr_lock); + + ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + goto unlock_and_exit; + } + + ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); + if (ret) + goto unlock_and_exit; + + ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); + +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *parent = accel_dev->debugfs_dir; + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + char name[ADF_TL_RP_REGS_FNAME_SIZE]; + struct dentry *dir; + unsigned int i; + + if (!telemetry) + return; + + dir = debugfs_create_dir("telemetry", parent); + accel_dev->telemetry->dbg_dir = dir; + debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); + debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); + + for (i = 0; i < max_rp; i++) { + snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, + ADF_TL_DBG_RP_ALPHA_INDEX(i)); + debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); + } +} + +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *dbg_dir; + + if (!telemetry) + return; + + dbg_dir = telemetry->dbg_dir; + + debugfs_remove_recursive(dbg_dir); + + if (atomic_read(&telemetry->state)) + adf_tl_halt(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..11cc9eae19b37af1b5b484456af49f646617fc72 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TL_DEBUGFS_H +#define ADF_TL_DEBUGFS_H + +#include + +struct adf_accel_dev; + +#define MAX_COUNT_NAME_SIZE 32 +#define SNAPSHOT_CNT_MSG "sample_cnt" +#define RP_NUM_INDEX "rp_num" +#define PCI_TRANS_CNT_NAME "pci_trans_cnt" +#define MAX_RD_LAT_NAME "max_rd_lat" +#define RD_LAT_ACC_NAME "rd_lat_acc_avg" +#define MAX_LAT_NAME "max_gp_lat" +#define LAT_ACC_NAME "gp_lat_acc_avg" +#define BW_IN_NAME "bw_in" +#define BW_OUT_NAME "bw_out" +#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg" +#define AT_TRANS_LAT_NAME "at_trans_lat_avg" +#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used" +#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit" +#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" +#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" +#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" +#define RP_SERVICE_TYPE "service_type" + +#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A') +#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A') + +#define ADF_TL_RP_REGS_FNAME "rp_%c_data" +#define ADF_TL_RP_REGS_FNAME_SIZE 16 + +#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ + offsetof(struct adf_##qat_gen##_tl_layout, reg) + +#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg)) + +#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \ + (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) + +#define ADF_TL_RP_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg)) + +/** + * enum adf_tl_counter_type - telemetry counter types + * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter + * @ADF_TL_SIMPLE_COUNT: simple counter + * @ADF_TL_COUNTER_NS: latency counter, value in ns + * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns + * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps + */ +enum adf_tl_counter_type { + ADF_TL_COUNTER_UNSUPPORTED, + ADF_TL_SIMPLE_COUNT, + ADF_TL_COUNTER_NS, + ADF_TL_COUNTER_NS_AVG, + ADF_TL_COUNTER_MBPS, +}; + +/** + * struct adf_tl_dbg_counter - telemetry counter definition + * @name: name of the counter as printed in the report + * @adf_tl_counter_type: type of the counter + * @offset1: offset of 1st register + * @offset2: offset of 2nd optional register + */ +struct adf_tl_dbg_counter { + const char *name; + enum adf_tl_counter_type type; + size_t offset1; + size_t offset2; +}; + +#define ADF_TL_COUNTER(_name, _type, _offset) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset \ +} + +#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset1, \ + .offset2 = _offset2 \ +} + +/* Telemetry counter aggregated values. */ +struct adf_tl_dbg_aggr_values { + u64 curr; + u64 min; + u64 max; + u64 avg; +}; + +/** + * adf_tl_dbgfs_add() - Add telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Creates telemetry's debug fs folder and attributes in QAT debug fs root. + */ +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev); + +/** + * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Removes telemetry's debug fs folder and attributes from QAT debug fs root. + */ +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_TL_DEBUGFS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c index 630d0483c4e0a1a4134f9f73f0def8091e9e4a55..1efdf46490f147d463fcc5ef9ee14127fb1028ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c @@ -474,7 +474,6 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; u32 size; u32 num_banks = 0; @@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) } accel_dev->transport = etr_data; - i = hw_data->get_etr_bar_id(hw_data); - csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + csr_addr = adf_get_etr_base(accel_dev); /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index b05c3957a16019a63d3c6568c9573cef22d838b5..cdbb2d687b1b0dfc65226c7e058bcb24b2d430ec 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); /** * adf_init_vf_wq() - Init workqueue for VF * - * Function init workqueue 'adf_vf_stop_wq' for VF. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_vf_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 019a6443834e0b11ca395e565f7ae410303ff5fc..63cf18e2a4e57d4d9e24b53754895825f7de83d2 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -5,6 +5,8 @@ #include "icp_qat_fw.h" +#define RL_MAX_RP_IDS 16 + enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_AE = 0, ICP_QAT_FW_TRNG_ENABLE = 1, @@ -16,10 +18,19 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, + ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_TIMER_GET = 19, + ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, + ICP_QAT_FW_PM_INFO = 129, + ICP_QAT_FW_RL_ADD = 134, + ICP_QAT_FW_RL_UPDATE = 135, + ICP_QAT_FW_RL_REMOVE = 136, + ICP_QAT_FW_TL_START = 137, + ICP_QAT_FW_TL_STOP = 138, }; enum icp_qat_fw_init_admin_resp_status { @@ -27,6 +38,37 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_tl_rp_indexes { + __u8 rp_num_index_0; + __u8 rp_num_index_1; + __u8 rp_num_index_2; + __u8 rp_num_index_3; +}; + +struct icp_qat_fw_init_admin_slice_cnt { + __u8 cpr_cnt; + __u8 xlt_cnt; + __u8 dcpr_cnt; + __u8 pke_cnt; + __u8 wat_cnt; + __u8 wcp_cnt; + __u8 ucs_cnt; + __u8 cph_cnt; + __u8 ath_cnt; +}; + +struct icp_qat_fw_init_admin_sla_config_params { + __u32 pcie_in_cir; + __u32 pcie_in_pir; + __u32 pcie_out_cir; + __u32 pcie_out_pir; + __u32 slice_util_cir; + __u32 slice_util_pir; + __u32 ae_util_cir; + __u32 ae_util_pir; + __u16 rp_ids[RL_MAX_RP_IDS]; +}; + struct icp_qat_fw_init_admin_req { __u16 init_cfg_sz; __u8 resrvd1; @@ -46,7 +88,15 @@ struct icp_qat_fw_init_admin_req { struct { __u32 heartbeat_ticks; }; + struct { + __u16 node_id; + __u8 node_type; + __u8 svc_type; + __u8 resrvd5[3]; + __u8 rp_count; + }; __u32 idle_filter; + struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes; }; __u32 resrvd4; @@ -64,6 +114,10 @@ struct icp_qat_fw_init_admin_resp { __u16 version_major_num; }; __u32 extended_features; + struct { + __u16 error_count; + __u16 latest_error; + }; }; __u64 opaque_data; union { @@ -103,9 +157,46 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + struct icp_qat_fw_init_admin_slice_cnt slices; + __u16 fw_capabilities; }; } __packed; #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET + +#define ICP_QAT_NUMBER_OF_PM_EVENTS 8 + +struct icp_qat_fw_init_admin_pm_info { + __u16 max_pwrreq; + __u16 min_pwrreq; + __u16 resvrd1; + __u8 pwr_state; + __u8 resvrd2; + __u32 fusectl0; + struct_group(event_counters, + __u32 sys_pm; + __u32 host_msg; + __u32 unknown; + __u32 local_ssm; + __u32 timer; + ); + __u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS]; + struct_group(pm, + __u32 fw_init; + __u32 pwrreq; + __u32 status; + __u32 main; + __u32 thread; + ); + struct_group(ssm, + __u32 pm_enable; + __u32 pm_active_status; + __u32 pm_managed_status; + __u32 pm_domain_status; + __u32 active_constraint; + ); + __u32 resvrd3[6]; +}; #endif diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index 0c8883e2ccc6dc1979ac32811b2c72de38a27a3e..b8f1c4ffb8b5a7fc21a29c5a413bad192ca62d44 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -3,6 +3,8 @@ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ +#include + enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_0 = 0, ICP_QAT_HW_AE_1 = 1, @@ -16,7 +18,12 @@ enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_9 = 9, ICP_QAT_HW_AE_10 = 10, ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 + ICP_QAT_HW_AE_12 = 12, + ICP_QAT_HW_AE_13 = 13, + ICP_QAT_HW_AE_14 = 14, + ICP_QAT_HW_AE_15 = 15, + ICP_QAT_HW_AE_16 = 16, + ICP_QAT_HW_AE_DELIMITER = 17 }; enum icp_qat_hw_qat_id { @@ -93,7 +100,7 @@ enum icp_qat_capabilities_mask { /* Bits 10-11 are currently reserved */ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12), ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13), - /* Bit 14 is currently reserved */ + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14), ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), @@ -105,7 +112,10 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25), - ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26) + ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26), + /* Bits 27-28 are currently reserved */ + ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29), + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30), }; #define QAT_AUTH_MODE_BITPOS 4 diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 69482abdb8b936c5c8ad194d9263f0bc136b2871..e28241bdd0f4efe045bfa8f326af71f68f383239 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,7 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 -#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c index 4128200a903293ba66e5fe9c337407fddf93ebbe..85c682e248fb918a11642ef5b858e5e00eb4cce7 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c @@ -110,6 +110,8 @@ struct qat_dh_ctx { unsigned int p_size; bool g2; struct qat_crypto_instance *inst; + struct crypto_kpp *ftfm; + bool fallback; } __packed __aligned(64); struct qat_asym_request { @@ -381,6 +383,36 @@ static int qat_dh_compute_value(struct kpp_request *req) return ret; } +static int qat_dh_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_generate_public_key(nreq); + } + + return qat_dh_compute_value(req); +} + +static int qat_dh_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_compute_shared_secret(nreq); + } + + return qat_dh_compute_value(req); +} + static int qat_dh_check_params_length(unsigned int p_len) { switch (p_len) { @@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - if (qat_dh_check_params_length(params->p_size << 3)) - return -EINVAL; - ctx->p_size = params->p_size; ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) @@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (qat_dh_check_params_length(params.p_size << 3)) { + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->ftfm, buf, len); + } + + ctx->fallback = false; + /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); @@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ftfm); + return ctx->p_size; } @@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(numa_node_id()); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; if (!inst) return -EINVAL; - kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm)); + + reqsize = max(sizeof(struct qat_asym_request) + 64, + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm)); + + kpp_set_reqsize(tfm, reqsize); ctx->p_size = 0; ctx->g2 = false; @@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); + if (ctx->ftfm) + crypto_free_kpp(ctx->ftfm); + qat_dh_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); } @@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = { static struct kpp_alg dh = { .set_secret = qat_dh_set_secret, - .generate_public_key = qat_dh_compute_value, - .compute_shared_secret = qat_dh_compute_value, + .generate_public_key = qat_dh_generate_public_key, + .compute_shared_secret = qat_dh_compute_shared_secret, .max_size = qat_dh_max_size, .init = qat_dh_init_tfm, .exit = qat_dh_exit_tfm, @@ -1276,6 +1329,7 @@ static struct kpp_alg dh = { .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_dh_ctx), + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 76baed0a76c0ee9386e9c14b60315026be6b532a..338acf29c487b6784abd04379df282d17309614c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!bufl)) return -ENOMEM; } else { - bufl = &buf->sgl_src.sgl_hdr; + bufl = container_of(&buf->sgl_src.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(bufl, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_src_valid = true; } @@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!buflout)) goto err_in; } else { - buflout = &buf->sgl_dst.sgl_hdr; + buflout = container_of(&buf->sgl_dst.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(buflout, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_dst_valid = true; } diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index d87e4f35ac395c768dd4f57ecf25a85f862c9352..85bc32a9ec0eb32e32ef85f282fcefb53c9f734f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -15,14 +15,17 @@ struct qat_alg_buf { } __packed; struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; + /* New members must be added within the __struct_group() macro below. */ + __struct_group(qat_alg_buf_list_hdr, hdr, __packed, + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + ); struct qat_alg_buf buffers[]; } __packed; struct qat_alg_fixed_buf_list { - struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf_list_hdr sgl_hdr; struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; } __packed __aligned(64); diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index b533984906ece67a5a6a27a5e8b5f331403861ff..2ba4aa22e09279bbbd5cdb213fb3991b0cca297a 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -13,15 +13,6 @@ #include "qat_compression.h" #include "qat_algs_send.h" -#define QAT_RFC_1950_HDR_SIZE 2 -#define QAT_RFC_1950_FOOTER_SIZE 4 -#define QAT_RFC_1950_CM_DEFLATE 8 -#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 -#define QAT_RFC_1950_CM_MASK 0x0f -#define QAT_RFC_1950_CM_OFFSET 4 -#define QAT_RFC_1950_DICT_MASK 0x20 -#define QAT_RFC_1950_COMP_HDR 0x785e - static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; @@ -109,69 +100,6 @@ static void qat_comp_resubmit(struct work_struct *work) acomp_request_complete(areq, ret); } -static int parse_zlib_header(u16 zlib_h) -{ - int ret = -EINVAL; - __be16 header; - u8 *header_p; - u8 cmf, flg; - - header = cpu_to_be16(zlib_h); - header_p = (u8 *)&header; - - flg = header_p[0]; - cmf = header_p[1]; - - if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K) - return ret; - - if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE) - return ret; - - if (flg & QAT_RFC_1950_DICT_MASK) - return ret; - - return 0; -} - -static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req, - void *resp) -{ - struct acomp_req *areq = qat_req->acompress_req; - enum direction dir = qat_req->dir; - __be32 qat_produced_adler; - - qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp)); - - if (dir == COMPRESSION) { - __be16 zlib_header; - - zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR); - scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1); - areq->dlen += QAT_RFC_1950_HDR_SIZE; - - scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen, - QAT_RFC_1950_FOOTER_SIZE, 1); - areq->dlen += QAT_RFC_1950_FOOTER_SIZE; - } else { - __be32 decomp_adler; - int footer_offset; - int consumed; - - consumed = qat_comp_get_consumed_ctr(resp); - footer_offset = consumed + QAT_RFC_1950_HDR_SIZE; - if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen) - return -EBADMSG; - - scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset, - QAT_RFC_1950_FOOTER_SIZE, 0); - - if (qat_produced_adler != decomp_adler) - return -EBADMSG; - } - return 0; -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -293,18 +221,6 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) memset(ctx, 0, sizeof(*ctx)); } -static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm) -{ - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - int ret; - - ret = qat_comp_alg_init_tfm(acomp_tfm); - ctx->qat_comp_callback = &qat_comp_rfc1950_callback; - - return ret; -} - static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, unsigned int shdr, unsigned int sftr, unsigned int dhdr, unsigned int dftr) @@ -400,43 +316,6 @@ static int qat_comp_alg_decompress(struct acomp_req *req) return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } -static int qat_comp_alg_rfc1950_compress(struct acomp_req *req) -{ - if (!req->dst && req->dlen != 0) - return -EINVAL; - - if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EINVAL; - - return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, - QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE); -} - -static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req) -{ - struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - u16 zlib_header; - int ret; - - if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EBADMSG; - - scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0); - - ret = parse_zlib_header(zlib_header); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n"); - return ret; - } - - return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE, 0, 0); -} - static struct acomp_alg qat_acomp[] = { { .base = { .cra_name = "deflate", @@ -452,22 +331,7 @@ static struct acomp_alg qat_acomp[] = { { .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), -}, { - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "qat_zlib_deflate", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct qat_compression_ctx), - .cra_module = THIS_MODULE, - }, - .init = qat_comp_alg_rfc1950_init_tfm, - .exit = qat_comp_alg_exit_tfm, - .compress = qat_comp_alg_rfc1950_compress, - .decompress = qat_comp_alg_rfc1950_decompress, - .dst_free = sgl_free, - .reqsize = sizeof(struct qat_compression_req), -} }; +}}; int qat_comp_algs_register(void) { diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c index 40c8e74d1cf9ed0d1dfaf98d0375a34c002ee980..101c6ea4167389a27814f4ac6cd7924570d9aaf6 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c @@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } /** - * qat_crypto_vf_dev_config() - * create dev config required to create crypto inst. + * qat_crypto_vf_dev_config() - create dev config required to create + * crypto inst. * * @accel_dev: Pointer to acceleration device. * diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index cbb946a800761d600b30e47a4bf1dcfd5839aafe..317cafa9d11f9eb19940a302f0668fba5e9acad3 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - handle->chip_info->icp_rst_mask = 0x100015; + if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + handle->chip_info->icp_rst_mask = 0x100155; + else + handle->chip_info->icp_rst_mask = 0x100015; handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0; handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX; handle->chip_info->wakeup_event_val = 0x80000000; diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..892c2283a50e5ce1841258f34ba511de668b49a3 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id) +{ + struct adf_accel_dev *accel_dev; + struct qat_migdev_ops *ops; + struct qat_mig_dev *mdev; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return ERR_PTR(-ENODEV); + + ops = GET_VFMIG_OPS(accel_dev); + if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open || + !ops->close || !ops->suspend || !ops->resume || !ops->save_state || + !ops->load_state || !ops->save_setup || !ops->load_setup) + return ERR_PTR(-EINVAL); + + mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->vf_id = vf_id; + mdev->parent_accel_dev = accel_dev; + + return mdev; +} +EXPORT_SYMBOL_GPL(qat_vfmig_create); + +int qat_vfmig_init(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->init(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_init); + +void qat_vfmig_cleanup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_cleanup); + +void qat_vfmig_reset(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->reset(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_reset); + +int qat_vfmig_open(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->open(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_open); + +void qat_vfmig_close(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + GET_VFMIG_OPS(accel_dev)->close(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_close); + +int qat_vfmig_suspend(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->suspend(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_suspend); + +int qat_vfmig_resume(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->resume(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_resume); + +int qat_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_state); + +int qat_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_setup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_setup); + +int qat_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_state); + +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_setup); + +void qat_vfmig_destroy(struct qat_mig_dev *mdev) +{ + kfree(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_destroy); diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 4bd150d1441a02aecb9b4d81e97327fd3af8f538..ad2c64af7427ee7c68ee2c259ea876fad4336778 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,7 +200,7 @@ static int qat_uclo_parse_num(char *str, unsigned int *num) unsigned long ae = 0; int i; - strncpy(buf, str, 15); + strscpy(buf, str, sizeof(buf)); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; @@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 0e40897cc983a8dbb05a4403aec7b0253d159d99..c0661ff5e929278e946a381232f592e538fa371f 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include +#include #include #include #include "adf_dh895xcc_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 1e748e8ce12d5df17d92d7f921c1ffb16f8598cf..40b456b8035b5a242efd103dbf04359f49495e3d 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_FIRMWARE(ADF_DH895XCC_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index 70e56cc16ecebb761f50961b64d47cbef0b8241e..f4ee4c2e00da82535e2fdd4aca14ac5f85f74a06 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index fefb85ceaeb9a2b261a06a1b8a1702e019c26d4a..d59cb1ba2ad5994b8f3b5b1c48ea21014953ec2b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/montage/Kconfig b/drivers/crypto/montage/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e8e4b287a7927f3d29bc92bc7b7f449f88c3603e --- /dev/null +++ b/drivers/crypto/montage/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +source "drivers/crypto/montage/tsse/Kconfig" diff --git a/drivers/crypto/montage/Makefile b/drivers/crypto/montage/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a50415fe10c702c7ee41b58e44c6539a80fc9668 --- /dev/null +++ b/drivers/crypto/montage/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_TSSE) += tsse/ diff --git a/drivers/crypto/montage/tsse/Kconfig b/drivers/crypto/montage/tsse/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5854f8e4525cf2fbd2dd61c81f16d1fb30d51a4b --- /dev/null +++ b/drivers/crypto/montage/tsse/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CRYPTO_DEV_TSSE + tristate "Support for Montage(R) TSSE" + depends on X86 && PCI + select FW_LOADER + help + Support for Montage(R) TSSE for accelerating crypto workloads. + + To compile this as a module, choose M here. \ No newline at end of file diff --git a/drivers/crypto/montage/tsse/Makefile b/drivers/crypto/montage/tsse/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d67ffde3a5b046c114fb6953bb042265539fb07d --- /dev/null +++ b/drivers/crypto/montage/tsse/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This file is part of tsse driver for Linux +# +# Copyright © 2023 Montage Technology. All rights reserved. + +obj-m += tsse.o + +tsse-objs := tsse_dev_mgr.o \ + tsse_ipc.o \ + tsse_fw_service.o \ + tsse_service.o \ + tsse_irq.o \ + tsse_dev_drv.o \ + tsse_vuart.o diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..c16d2ae7c414088aa585c698b776f2e9bb4acf03 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_H__ +#define __TSSE_DEV_H__ +#include +#include +#include +#include +#include +#include +#include +#include "tsse_ipc.h" + +#define TSSE_PCI_MAX_BARS 4 +#define TSSE_FW_VERSION_LEN 32 + +struct tsse_bar { + void __iomem *virt_addr; + resource_size_t addr; + resource_size_t size; +}; +struct tsse_dev_pci { + struct pci_dev *pci_dev; + struct tsse_bar bars[TSSE_PCI_MAX_BARS]; + u8 revid; +}; +enum tsse_dev_status_bit { + TSSE_DEV_STATUS_STARTING = 0, + TSSE_DEV_STATUS_STARTED = 1 + +}; +struct tsse_qpairs_bank { + struct tsse_dev *tsse_dev; + void __iomem *reg_base; + + u32 num_qparis; + u32 irq_vec; +}; +struct tsse_dev { + struct module *owner; + struct dentry *debugfs_dir; + unsigned long status; + struct list_head list; + struct tsse_dev_pci tsse_pci_dev; + struct tsse_qpairs_bank qpairs_bank; + atomic_t ref_count; + bool is_vf; + int id; + u32 num_irqs; + u32 num_vfs; + struct uart_port *port; + struct tsse_ipc *ipc; + void *adi; + void *mbx_hw; + const struct firmware *fw; + char fw_version[TSSE_FW_VERSION_LEN]; + bool fw_version_exist; +}; +#define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) +#define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) + +#include "tsse_log.h" + +struct list_head *tsse_devmgr_get_head(void); + +int tsse_dev_get(struct tsse_dev *tsse_dev); +void tsse_dev_put(struct tsse_dev *tsse_dev); +int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); +void tsse_devmgr_rm_dev(struct tsse_dev *tdev); +int tsse_prepare_restart_dev(struct tsse_dev *tdev); +int tsse_start_dev(struct tsse_dev *tdev); + +static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) +{ + return (struct tsse_dev *)pci_get_drvdata(pci_dev); +} + +static inline int tsse_get_cur_node(void) +{ + int cpu, node; + + cpu = get_cpu(); + node = topology_physical_package_id(cpu); + put_cpu(); + + return node; +} + +static inline int tsse_dev_started(struct tsse_dev *tdev) +{ + return test_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); +} +static inline int tsse_dev_in_use(struct tsse_dev *tdev) +{ + return atomic_read(&tdev->ref_count) != 0; +} +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..86c619d64f5ee3e6a76ab05ef607c424c40bd396 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev_drv.h" +#include "tsse_vuart.h" +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +#define CLUSTER_SLOT_CONFIG_OFFSET 0x5780000 +#define QPAIR_SETTING_OFFSET 0x50000 +#define BAR_START 2 +#define BAR_END 4 + +static DEFINE_IDA(tsse_ida); + +static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) +{ + writel(enable ? 1 : 0, + TSSE_DEV_BARS(tdev)[2].virt_addr + + CLUSTER_SLOT_CONFIG_OFFSET + QPAIR_SETTING_OFFSET); +} +static int tsse_sriov_disable(struct tsse_dev *tdev) +{ + pci_disable_sriov(tdev->tsse_pci_dev.pci_dev); + tsse_qpair_enable_pf(tdev, true); + + return 0; +} + +static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + int ret = 0; + + if ((!tdev) || (num_vfs_param < 0) || (totalvfs <= 0)) { + dev_err(&pdev->dev, + "%s %d: failed to config sriov, tdev=%p totalvfs=%d num_vfs_param=%d\n", + __func__, __LINE__, tdev, totalvfs, num_vfs_param); + return -EBADE; + } + + if (num_vfs_param > totalvfs) + num_vfs_param = totalvfs; + + dev_info(&pdev->dev, "%s %d: has total %d vfs, and enable %d vfs\n", + __func__, __LINE__, totalvfs, num_vfs_param); + + if ((num_vfs_param > TSSE_PF_MAX_IRQ_NUM) || + (num_vfs_param > TSSE_PF_MAX_QPAIR_NUM)) { + tsse_dev_err( + tdev, + "vfs number is greater than pf's \"max_irq_num=%d or max_qpairs_num=%d\"\n", + TSSE_PF_MAX_IRQ_NUM, TSSE_PF_MAX_QPAIR_NUM); + return -EBADE; + } + + if (!tsse_dev_started(tdev)) { + dev_err(&pdev->dev, "%s %d: device is not started\n", __func__, + __LINE__); + return -EBADE; + } + + if (tsse_dev_in_use(tdev)) { + dev_err(&pdev->dev, "%s %d: device is busy\n", __func__, + __LINE__); + return -EBUSY; + } + + tsse_sriov_disable(tdev); + + tsse_prepare_restart_dev(tdev); + + tdev->num_vfs = num_vfs_param; + + if (tdev->num_vfs > 0) { + tdev->num_irqs = TSSE_SRIOV_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_SRIOV_PF_MAX_QPAIR_NUM; + } else { + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + } + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + ret = tsse_start_dev(tdev); + if (ret) { + dev_err(&pdev->dev, "%s %d: failed to start the device\n", + __func__, __LINE__); + return ret; + } + + if (num_vfs_param > 0) { + tsse_qpair_enable_pf(tdev, false); + pci_enable_sriov(pdev, num_vfs_param); + } + + return num_vfs_param; +} + +/** + * tsse_image_load_store() - This function will be called when user + * writes string to /sys/bus/pci/devices/.../tsse_image_load. + * Driver will always loads /lib/firmware/tsse_firmware.bin. + * @dev: device + * @attr: device attribute + * @buf: string that user writes + * @count: string length that user writes + * Return: the number of bytes used from the buffer, here it is just the count argument. + */ +static ssize_t tsse_image_load_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = NULL; + struct tsse_dev *tdev = NULL; + + pdev = container_of(dev, struct pci_dev, dev); + if (pdev) + tdev = pci_to_tsse_dev(pdev); + if (buf && count && tdev) { + tsse_dev_info(tdev, "receive command to load firmware %s\n", TSSE_FIRMWARE); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + if (tsse_fw_manual_load_ipc(pdev)) + dev_err(&pdev->dev, "%s %d: firmware update failed\n", + __func__, __LINE__); + } + } + return count; +} + +DEVICE_ATTR_WO(tsse_image_load); + +static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int status = 0; + int bar; + u32 tmp_val; + struct tsse_dev *tdev; + + if (!pdev->is_physfn) { + dev_err(&pdev->dev, "%s %d: this is not Physical fn\n", + __func__, __LINE__); + return -EPERM; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + dev_err(&pdev->dev, + "%s %d: invalid numa configuration for tsse\n", + __func__, __LINE__); + return -EINVAL; + } + + tdev = kzalloc_node(sizeof(*tdev), GFP_KERNEL, dev_to_node(&pdev->dev)); + + if (!tdev) + return -ENOMEM; + + status = pcim_enable_device(pdev); + + if (status) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto out_err; + } + + pci_set_master(pdev); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(48))) { + if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, + "failed to set tsse dma address width\n"); + status = -EFAULT; + goto out_err; + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48)); + } + + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + status = pcim_iomap_regions(pdev, BIT(0) | BIT(2), TSSE_DEV_NAME); + if (status) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out_err; + } + + for (bar = BAR_START; bar < BAR_END;) { + TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); + TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); + TSSE_DEV_BARS(tdev) + [bar].virt_addr = pcim_iomap_table(pdev)[bar]; + + dev_info(&pdev->dev, + "bar[%d]: addr=0x%llx, size=0x%llx, virt_addr=0x%lx\n", + bar, TSSE_DEV_BARS(tdev)[bar].addr, + TSSE_DEV_BARS(tdev)[bar].size, + (ulong)TSSE_DEV_BARS(tdev)[bar].virt_addr); + + bar += 2; + } + + tdev->owner = THIS_MODULE; + tdev->is_vf = false; + tdev->tsse_pci_dev.pci_dev = pdev; + tdev->id = ida_alloc(&tsse_ida, GFP_KERNEL); + if (tdev->id < 0) { + dev_err(&pdev->dev, "Unable to get id\n"); + status = tdev->id; + goto out_err; + } + + pci_set_drvdata(pdev, tdev); + + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + tdev->qpairs_bank.irq_vec = TSSE_PF_QPAIR_START_IRQ_VECTOR; + tdev->qpairs_bank.reg_base = + TSSE_DEV_BARS(tdev)[2].virt_addr + TSSE_PF_QPAIR_REG_BASE; + + tsse_qpair_enable_pf(tdev, true); + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + if (tsse_devmgr_add_dev(tdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_devmgr failed to add new device\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_ida_free; + } + + if (vuart_init_port(pdev)) { + dev_err(&pdev->dev, + "%s %d: vuart_init_port failed to init vuart.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_port_init; + } + + tdev->fw_version_exist = false; + /* Its result not break driver init process */ + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + } + + if (tsse_ipc_init(pdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_ipc_init failed to tsse_ipc.\n", __func__, + __LINE__); + status = -EFAULT; + goto out_err_ipc; + } + + if (sysfs_create_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr)) { + dev_err(&pdev->dev, + "%s %d: sysfs_create_file failed for tsse image load.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_image_load; + } + + tsse_dev_info(tdev, "successful\n"); + + pci_read_config_dword(pdev, 0x720, &tmp_val); + tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); + + return 0; +out_err_image_load: + tsse_ipc_deinit(tdev); +out_err_ipc: + vuart_uninit_port(pdev); +out_err_port_init: + tsse_devmgr_rm_dev(tdev); +out_err_ida_free: + ida_free(&tsse_ida, tdev->id); +out_err: + kfree(tdev); + return status; +} + +static void device_remove(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + pr_info("%s %d: pci_dev 0x%lx tsse_dev 0x%lx\n", __func__, __LINE__, + (ulong)pdev, (ulong)tdev); + + tsse_sriov_disable(tdev); + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr); + tsse_ipc_deinit(tdev); + vuart_uninit_port(pdev); + tsse_devmgr_rm_dev(tdev); + ida_free(&tsse_ida, tdev->id); + kfree(tdev); + dev_info(&pdev->dev, "%s %d: successful\n", __func__, __LINE__); +} + +static const struct pci_device_id pci_ids[] = { + { + PCI_DEVICE(0x1b00, 0xc011), + }, + { + PCI_DEVICE(0x1b00, 0xd011), + }, + { 0 } +}; + +static struct pci_driver pci_driver = { + .name = TSSE_DEV_NAME, + .id_table = pci_ids, + .probe = device_probe, + .remove = device_remove, + .sriov_configure = tsse_sriov_configure, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static int __init tsse_init(void) +{ + int status; + + status = vuart_register(); + if (status) { + pr_err("vuart_register failed[%d].\n", status); + return status; + } + + status = pci_register_driver(&pci_driver); + if (status) { + vuart_unregister(); + return status; + } + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; +} + +static void __exit tsse_exit(void) +{ + pci_unregister_driver(&pci_driver); + vuart_unregister(); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} + +module_init(tsse_init); +module_exit(tsse_exit); + +MODULE_AUTHOR("montage-tech.com"); +MODULE_DESCRIPTION("TSSE device driver"); +MODULE_VERSION("1.0.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TSSE_FIRMWARE); diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.h b/drivers/crypto/montage/tsse/tsse_dev_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..6a05572a3849457d698f221f947056a7956f6ce0 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_DRV_H__ +#define __TSSE_DEV_DRV_H__ +#define TSSE_DEV_NAME "tsse" + +// TODO: need to support full qpairs +#define TSSE_PF_MAX_QPAIR_NUM 16 + +#define TSSE_PF_MAX_IRQ_NUM 96 +#define TSSE_PF_QPAIR_START_IRQ_VECTOR 32 + +#define TSSE_SRIOV_PF_MAX_QPAIR_NUM 0 +#define TSSE_SRIOV_PF_MAX_IRQ_NUM 16 + +#define TSSE_PF_QPAIR_REG_BASE 0x5700000 + +#include "tsse_dev.h" + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..39553eb96832380480a138593941c3c0f8bf26fa --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" +static DEFINE_MUTEX(tsse_dev_table_lock); +static LIST_HEAD(tsse_dev_table); + +static DEFINE_MUTEX(algs_lock); + +static inline void tsse_list_del(struct list_head *entry) +{ + WRITE_ONCE(entry->next->prev, entry->prev); + WRITE_ONCE(entry->prev->next, entry->next); +} +static inline void tsse_list_add(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + WRITE_ONCE(new->next, next); + WRITE_ONCE(new->prev, prev); + mb(); /* Make sure new node updates first */ + WRITE_ONCE(next->prev, new); + WRITE_ONCE(prev->next, new); +} + +static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) +{ + int ret = 0; + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return 0; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_add_return(1, &pf_tsse_dev->ref_count) == 1) { + if (!try_module_get(pf_tsse_dev->owner)) + ret = -EFAULT; + } + } + return ret; +} + +static void tsse_dev_pf_put(struct tsse_dev *vf_tsse_dev) +{ + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_sub_return(1, &pf_tsse_dev->ref_count) == 0) + module_put(pf_tsse_dev->owner); + } +} + +int tsse_dev_get(struct tsse_dev *tdev) +{ + int ref_count = atomic_add_return(1, &tdev->ref_count); + + if (!tsse_dev_started(tdev)) { + atomic_sub(1, &tdev->ref_count); + return -EAGAIN; + } + + if (ref_count == 1) { + if (!try_module_get(tdev->owner)) + return -EFAULT; + if (tdev->is_vf) + return tsse_dev_pf_get(tdev); + } + return 0; +} +void tsse_dev_put(struct tsse_dev *tdev) +{ + if (atomic_sub_return(1, &tdev->ref_count) == 0) { + module_put(tdev->owner); + if (tdev->is_vf) + tsse_dev_pf_put(tdev); + } +} + +static int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +{ + int times, max_retry = 150; + + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + + for (times = 0; times < max_retry; times++) { + if (!tsse_dev_in_use(tdev)) + break; + msleep(100); + } + + if (times >= max_retry) { + tsse_dev_err(tdev, "Failed to stop busy device\n"); + if (busy_exit) + return -EBUSY; + } + if (tdev->qpairs_bank.num_qparis != 0) { + mutex_lock(&tsse_dev_table_lock); + tsse_list_del(&tdev->list); + mutex_unlock(&tsse_dev_table_lock); + tsse_dev_info(tdev, "removed from active dev table list\n"); + } + + tsse_dev_info(tdev, "device stopped\n"); + + return 0; +} + +int tsse_start_dev(struct tsse_dev *tdev) +{ + struct tsse_dev *tmp_dev; + struct list_head *prev_node = &tsse_dev_table; + int ret = 0; + + if (tdev->qpairs_bank.num_qparis == 0) { + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_dev_info(tdev, "device started\n"); + return 0; + } + + set_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + + mutex_lock(&tsse_dev_table_lock); + + list_for_each_entry(tmp_dev, &tsse_dev_table, list) { + if (tmp_dev == tdev) { + ret = -EEXIST; + tsse_dev_err(tdev, + "The device cannot be added repeatedly\n"); + goto clear_status; + } + } + + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_list_add(&tdev->list, prev_node, prev_node->next); + + tsse_dev_info(tdev, "device started\n"); + mutex_unlock(&tsse_dev_table_lock); + + return 0; +clear_status: + mutex_unlock(&tsse_dev_table_lock); + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + return ret; +} + +int tsse_prepare_restart_dev(struct tsse_dev *tdev) +{ + return tsse_stop_dev(tdev, false); +} + +void tsse_devmgr_rm_dev(struct tsse_dev *tdev) +{ + tsse_stop_dev(tdev, false); + tsse_dev_free_irq_vectors(tdev); + msleep(300); +} + +int tsse_devmgr_add_dev(struct tsse_dev *tdev) +{ + int ret; + + ret = tsse_dev_alloc_irq_vectors(tdev); + if (ret == 0) { + atomic_set(&tdev->ref_count, 0); + tdev->status = 0; + ret = tsse_start_dev(tdev); + + if (ret != 0) + tsse_dev_free_irq_vectors(tdev); + } + return ret; +} + +struct list_head *tsse_devmgr_get_head(void) +{ + return &tsse_dev_table; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c new file mode 100644 index 0000000000000000000000000000000000000000..486352bc8f84af626f36325e008030174e0a8862 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_service.h" + +#define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" +#define SPACE_CH ' ' + +static int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + u8 *h2d; + u32 int_reg; + + mutex_lock(&tsseipc->list_lock); + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + return -EFAULT; + } + if (msg->header.i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + dev_err(tsseipc->dev, "msg format error\n"); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + msg->header.i_len - sizeof(struct ipc_header)); + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + + dev_info(tsseipc->dev, "notify device to get firmware\n"); + mutex_unlock(&tsseipc->list_lock); + return 0; +} + +/** + * get_firmware_version() - Get version information from firmware + * @fw: firmware pointer + * @fw_version_out: firmware version string output + * Return: 0 on success, error code otherwise + */ +int get_firmware_version(const struct firmware *fw, char *fw_version_out) +{ + const char *pattern = SEARCH_PATTERN; + const uint8_t *fw_buffer = fw->data; + uint32_t pattern_i = 0, buffer_i = 0; + uint32_t pattern_len = strlen(pattern); // Not include "\0" + uint32_t version_start = 0; + uint32_t version_len = 0; + + while (buffer_i < fw->size) { + if (pattern[pattern_i] == (char) fw_buffer[buffer_i]) { + buffer_i++; + pattern_i++; + } + if (pattern_i == pattern_len) { + break; // pattern found + } else if ((buffer_i < fw->size) && + (pattern[pattern_i] != (char) fw_buffer[buffer_i])) { + // mismatch after pattern_i matches + if (pattern_i != 0) { + // since the pattern has no common prefix, when mismatch, + // the next compare should start from pattern beginning + pattern_i = 0; + } else { + buffer_i++; + } + } + } + if (pattern_i == pattern_len) { + buffer_i++; + version_start = buffer_i; + while (buffer_i < fw->size) { + if (fw_buffer[buffer_i] == SPACE_CH) { + version_len = buffer_i - version_start; + if (version_len >= TSSE_FW_VERSION_LEN - 1) + version_len = TSSE_FW_VERSION_LEN - 2; + strscpy(fw_version_out, fw_buffer + version_start, version_len + 1); + return 0; + } + buffer_i++; + } + } + return -EINVAL; +} + +/** + * fw_service() - Firmware service to handle IPC message from mainCPU. + * It will write init or manual load firmware to PCIe BAR and send message back. + * @tsseipc_t: pointer to a structure used for IPC + * @msg_t: pointer to IPC message + */ +void fw_service(void *tsseipc_t, void *msg_t) +{ + void __iomem *fw; + uint32_t size; + uint32_t task_offset; + struct fw_load *fw_task; + struct tsse_dev *tdev; + struct tsse_ipc *tsseipc = (struct tsse_ipc *)tsseipc_t; + struct ipc_msg *msg = (struct ipc_msg *)msg_t; + + task_offset = sizeof(struct msg_info); + fw_task = (struct fw_load *)((uint8_t *)msg->i_data + task_offset); + tdev = pci_to_tsse_dev(tsseipc->pdev); + + if (!tdev || !tdev->fw) { + fw_task->result = 1; + fw_task->size = 0; + dev_info(tsseipc->dev, "firmware loading failed\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + return; + } + + fw_task->result = 0; + fw_task->size = tdev->fw->size; + size = tdev->fw->size; + fw = tsseipc->virt_addr + fw_task->offset + FW_BASE; + + memcpy_toio((u8 *)fw, tdev->fw->data, size); + dev_info(tsseipc->dev, "firmware loading done\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + + if (tdev->fw_version_exist) + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + memset(tdev->fw_version, 0, TSSE_FW_VERSION_LEN); + tdev->fw_version_exist = false; + } +} + +/** + * tsse_fw_load() - Load firmware from /lib/firmware + * @pdev: pci device + * @name: firmware file name + * @fw: pointer to firmware pointer + * Return: 0 on success, error code otherwise + */ +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw) +{ + int result; + + result = request_firmware(fw, name, &pdev->dev); + if (result) + dev_err(&pdev->dev, "%s failed for %s\n", __func__, name); + return result; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h new file mode 100644 index 0000000000000000000000000000000000000000..706ea6d297696cf1e49e51bc1150f63a141af1e0 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_FW_SERVICE_H__ +#define __TSSE_FW_SERVICE_H__ + +#include + +#define FW_BASE 0x7000000 +#define TSSE_FIRMWARE "tsse_firmware.bin" + +void fw_service(void *tsseipc_t, void *msg_t); +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw); +int get_firmware_version(const struct firmware *fw, char *fw_version_out); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c new file mode 100644 index 0000000000000000000000000000000000000000..b75ca97db6b67afa3c3159427a454c865e01a7f7 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tsse_ipc.h" +#include "tsse_dev.h" +#include "tsse_service.h" + +/** + * get_msginf() - Create ipc_msg and read message from BAR. + * Return the pointer to ipc_msg, the caller is responsible for free it. + * @d2h: device2host memory pointer + * Return: new ipc_msg pointer, which points to message read from device + */ +static struct ipc_msg *get_msginf(void __iomem *d2h) +{ + uint32_t u_len = 0; + struct ipc_msg *msg = NULL; + + uint8_t *device_msg_data = NULL; + struct ipc_header *ipc_info = (struct ipc_header *)d2h; + + // The memory layout in d2h should at least contains: + // ipc_header, msg_info and fw_load (message body) + if (ipc_info->i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + pr_info("%s(): msg format error\n", __func__); + return NULL; + } + u_len = ipc_info->i_len - sizeof(struct ipc_header); + msg = (struct ipc_msg *)(kzalloc(sizeof(struct ipc_msg) + u_len, + GFP_ATOMIC)); + if (!msg) { + pr_info("%s(): ipc_msg kzalloc failed\n", __func__); + return NULL; + } + + msg->header.inst_id = ipc_info->inst_id; + msg->header.tgid = ipc_info->tgid; + msg->header.i_len = ipc_info->i_len; + + device_msg_data = (uint8_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio((uint8_t *)msg->i_data, device_msg_data, u_len); + + return msg; +} + +static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)dev_id; + + writel(0x0, tsseipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); + tasklet_hi_schedule(&tsseipc->ipc_handle); + dev_err(tsseipc->dev, "irq%d\n", irq); + return IRQ_HANDLED; +} + +bool check_send_enbit(struct tsse_ipc *tsseipc) +{ + u32 int_reg; + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) == 0) + return true; + else + return false; +} + +void notify_device(struct tsse_ipc *tsseipc) +{ + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + return; + +} + +/** + * ipc_hw_init()- Enable main2host interrupt, cleanup interrupt + * set value in host2main and main2host. + * @hw_ipc: pointer to a structure used for IPC + */ +static void ipc_hw_init(struct tsse_ipc *hw_ipc) +{ + writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); + writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); +} + +static int ipc_init_msg(struct tsse_ipc *tsseipc) +{ + u8 *h2d; + u32 int_reg; + u32 cmd_len; + u32 i_len; + struct ipc_msg *msg; + struct msg_info *info_msg; + + cmd_len = sizeof(uint32_t); + i_len = sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg = (struct ipc_msg *)(kzalloc(i_len, GFP_ATOMIC)); + + if (!msg) { + pr_info("%s(): msg kzalloc failed\n", __func__); + return -EFAULT; + } + msg->header.i_len = i_len; + info_msg = (struct msg_info *)msg->i_data; + info_msg->msg_class = IPC_MESSAGE_BASIC; + *(uint32_t *)((uint8_t *)msg->i_data + sizeof(struct msg_info)) = IPC_BASIC_CMD_HOST_INIT; + + mutex_lock(&tsseipc->list_lock); + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + sizeof(struct msg_info) + sizeof(uint32_t)); + + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + + return 0; +} + +static void tsse_ipc_bh_handler(unsigned long data) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; + + void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + struct ipc_msg *msg = get_msginf(d2h_payload); + + if (!msg) { + dev_err(tsseipc->dev, "get_msginf is NULL\n"); + return; + } + if (service_rout(tsseipc, msg)) + dev_err(tsseipc->dev, "illegal message class\n"); + kfree(msg); +} + +int tsse_ipc_init(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc; + int rc; + + ipc = devm_kzalloc(&pdev->dev, sizeof(*ipc), GFP_KERNEL); + if (ipc == NULL) + return -ENOMEM; + tdev->ipc = ipc; + ipc->pdev = pdev; + ipc->dev = &pdev->dev; + ipc->virt_addr = TSSE_DEV_BARS(tdev)[2].virt_addr; + + mutex_init(&ipc->list_lock); + tasklet_init(&(ipc->ipc_handle), tsse_ipc_bh_handler, + (ulong)(ipc)); + + rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, + tsse_ipc_d2h_irqhandler, IRQF_SHARED, + "pf-ipc", ipc); + if (rc) { + dev_err(&pdev->dev, "request_threaded_irq failed\n"); + return rc; + } + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) { + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + tsse_ipc_deinit(tdev); + } + return rc; +} + +void tsse_ipc_deinit(void *tdev_t) +{ + struct tsse_ipc *tsseipc; + struct pci_dev *pdev; + struct tsse_dev *tdev; + + tdev = tdev_t; + tsseipc = tdev->ipc; + pdev = tsseipc->pdev; + if (tsseipc) { + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + tdev->ipc = NULL; + } +} + +int tsse_fw_manual_load_ipc(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc = tdev->ipc; + int rc = -EFAULT; + + if (ipc) { + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + } + return rc; +} diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h new file mode 100644 index 0000000000000000000000000000000000000000..82f8df71c98371de379f57b3cba1b936309fbec7 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TM_HOST_IPC_H__ +#define __TM_HOST_IPC_H__ + +#include +#include +#include + +#define TSSE_PASID_SVA + +#define HOST2MAIN_INTR_SET_OFFSET 0x2000 +#define HOST2MAIN_INTR_ENABLE_OFFSET 0x2004 +#define HOST2MAIN_ACK_INTR_CLR_OFFSET 0x2008 +#define HOST2MAIN_ACK_INTR_ENABLE_OFFSET 0x200c +#define HOST2MAIN_VLD_INTR_STATUS_OFFSET 0x2010 +#define HOST2MAIN_ACK_INTR_STATUS_OFFSET 0x2014 +#define MSIX_MASK_EN_REG_OFFSET 0x2020 +#define INTR_MASK_BIT_OFFSET 0x2024 +#define INTR_PENDING_BIT_OFFSET 0x2028 +#define HOST2MAIN_IPC_OFFSET 0x2400 + +#define MAIN2HOST_INTR_SET_OFFSET 0x3000 +#define MAIN2HOST_INTR_ENABLE_OFFSET 0x3004 +#define MAIN2HOST_ACK_INTR_CLR_OFFSET 0x3008 +#define MAIN2HOST_ACK_INTR_ENABLE_OFFSET 0x300c +#define MAIN2HOST_VEN_MSI_FUNC_NUM_OFFSET 0x3010 +#define MAIN2HOST_VEN_MSI_VFUNC_ACTIVE_OFFSET 0x3014 +#define MAIN2HOST_IPC_OFFSET 0x3400 + +#define IPC_REGISTER_INT_SET BIT(0) +#define IPC_REGISTER_INT_MASK BIT(1) + +enum IPC_BASIC_CMD { + IPC_BASIC_CMD_HOST_INIT = 0x1, + IPC_BASIC_CMD_PING = 0x2 +}; + +enum IPC_BOOT_CMD { + IPC_BOOT_CMD_GET_FIRMWARE = 0x1 +}; + +enum IPC_MESSAGE_CLASS { + IPC_MESSAGE_BASIC = 1, + IPC_MESSAGE_BOOT, + IPC_MESSAGE_CLASS_NUM, +}; + +struct ipc_header { + uint32_t inst_id; + pid_t tgid; + uint32_t i_len; + uint32_t pasid : 20; + uint32_t reserved_1 : 4; + uint32_t pasid_en : 8; + + uint32_t reserved[2]; +}; + +struct ipc_msg { + struct ipc_header header; + uint32_t i_data[]; +}; + +struct fw_load { + uint32_t command; + uint32_t result; + uint8_t name[32]; + uint32_t offset; + uint32_t size; +}; + +struct msg_info { + uint32_t host_id; + uint32_t msg_class; + uint32_t flags; + uint32_t reserved[3]; +}; + +struct ipc_layout { + struct ipc_header header; + struct msg_info info; +}; + +struct tsse_ipc { + struct device *dev; + struct pci_dev *pdev; + void __iomem *virt_addr; + struct mutex list_lock; + struct tasklet_struct ipc_handle; +}; + +int tsse_ipc_init(struct pci_dev *pdev); +void tsse_ipc_deinit(void *tdev); +int tsse_fw_manual_load_ipc(struct pci_dev *pdev); +bool check_send_enbit(struct tsse_ipc *tsseipc); +void notify_device(struct tsse_ipc *tsseipc); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_irq.c b/drivers/crypto/montage/tsse/tsse_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..8cb94fea3da4e2c82a755ae61968fddbb9f6aa70 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" + +#undef TSSE_IRQ_DBG + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev) +{ + int request_num = tdev->num_irqs; + int irq_num = pci_alloc_irq_vectors(tdev->tsse_pci_dev.pci_dev, + request_num, request_num, + PCI_IRQ_MSIX); + + if (irq_num < 0) { + dev_err(TSSEDEV_TO_DEV(tdev), + "%s %d :failed to alloc MSIX interrupt vectors\n", + __func__, __LINE__); + return irq_num; + } + + return 0; +} diff --git a/drivers/crypto/montage/tsse/tsse_irq.h b/drivers/crypto/montage/tsse/tsse_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..09bed4e6d58a6f9ffe4af746122b79c2a8af95ad --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_IRQ_H__ +#define __TSSE_IRQ_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "tsse_dev.h" + +static inline void tsse_dev_free_irq_vectors(struct tsse_dev *tdev) +{ + pci_free_irq_vectors(tdev->tsse_pci_dev.pci_dev); +} + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_log.h b/drivers/crypto/montage/tsse/tsse_log.h new file mode 100644 index 0000000000000000000000000000000000000000..153cbe16374e71456efc3075c3dae6dcd9bd60ac --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_log.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_LOG_H__ +#define __TSSE_LOG_H__ + +#define tsse_dev_err(tssedev, fmt, ...) \ + dev_err(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_warn(tssedev, fmt, ...) \ + dev_warn(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_info(tssedev, fmt, ...) \ + dev_info(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_dbg(tssedev, fmt, ...) \ + dev_dbg(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c new file mode 100644 index 0000000000000000000000000000000000000000..e4be85535b7765f6dc3db73642b7d519950bd715 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ +#include +#include "tsse_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + struct msg_info *info; + uint32_t msg_class; + int ret = 0; + + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + switch (msg_class) { + case IPC_MESSAGE_BOOT: + fw_service(tsseipc, msg); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/crypto/montage/tsse/tsse_service.h b/drivers/crypto/montage/tsse/tsse_service.h new file mode 100644 index 0000000000000000000000000000000000000000..d5fd87ee7dce430146e0560973c8a405a0ef0947 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_SERVICE_H__ +#define __TSSE_SERVICE_H__ + +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c new file mode 100644 index 0000000000000000000000000000000000000000..f49d4ffc9f3c7da2d24fa471b19613109f512191 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_vuart_regs.h" +#include "tsse_vuart.h" + +#ifdef DEBUG +#define VUART_PRINT(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#else +#define VUART_PRINT(fmt, ...) +#endif + +#define TSSE_VUART_BAUD (38400) +#define TSSE_VUART_MAX_RX_COUNT (256) +#define BOTH_EMPTY (VUART_FSR_TXFIFOE | VUART_FSR_RXFIFO) +struct tsse_vuart { + struct uart_port port; + unsigned int tx_threshold; + unsigned int rx_threshold; + unsigned int tx_loadsz; + unsigned char shutdown; + unsigned char confige_done; +}; + +#define SERIAL_LSR_NAME "tsse_vuart" + +static struct uart_driver g_vuart_reg = { + .owner = THIS_MODULE, + .driver_name = SERIAL_LSR_NAME, + .dev_name = "ttyTSSE", + .nr = TSSE_VUART_MAX_DEV, +}; + +static unsigned int g_trigger_level[4] = { 0, 31, 63, 111 }; +static unsigned long g_line[TSSE_VUART_BITMAP_SIZE]; + +static unsigned int vuart_serial_in(struct uart_port *port, int offset) +{ + unsigned int ret = le32_to_cpu(readl(port->membase + offset)); +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, ret); +#endif + return ret; +} + +static void vuart_serial_out(struct uart_port *port, int offset, int value) +{ +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, value); +#endif + value = cpu_to_le32(value); + writel(value, port->membase + offset); +} + +static void vuart_wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = vuart_serial_in(port, VUART_FSR); + if (FIELD_GET(VUART_FSR_TXFIFOE, status)) + break; + if (--tmout == 0) { + pr_err("%s:timeout(10ms), TX is not empty.\n", + __func__); + break; + } + udelay(1); + touch_nmi_watchdog(); + } +} + +static unsigned int vuart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&port->lock, flags); + lsr = vuart_serial_in(port, VUART_FSR); + spin_unlock_irqrestore(&port->lock, flags); + + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; +} + +static void vuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int vuart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void vuart_stop_tx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~VUART_IER_HETXEI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + int count; + + if (port->x_char) { + pr_err("x_char %d\n", port->x_char); + return; + } + + if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { + vuart_stop_tx(port); + return; + } + + count = vuart->tx_loadsz; + do { + vuart_serial_out(port, VUART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void vuart_start_tx(struct uart_port *port) +{ + unsigned int ier, fsr; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + if (uart_tx_stopped(port)) { + vuart_stop_tx(port); + return; + } + + fsr = vuart_serial_in(port, VUART_FSR); + VUART_PRINT("==>Existing Data number in TX FIFO %ld\n", + FIELD_GET(VUART_FSR_TFIFODN, fsr)); + VUART_PRINT("==>Existing Data number in RX FIFO %ld\n", + FIELD_GET(VUART_FSR_RFIFODN, fsr)); + if (fsr & VUART_FSR_TXFIFOE) + vuart_tx_chars(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_HETXEI | VUART_IER_HETXUI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_throttle(struct uart_port *port) +{ +} + +static void vuart_unthrottle(struct uart_port *port) +{ +} + +static void vuart_stop_rx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~(VUART_IER_HERXTOI | VUART_IER_HETXDRI | VUART_IER_HERXOI); + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_enable_ms(struct uart_port *port) +{ +} + +static void vuart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static irqreturn_t vuart_interrupt(int irq, void *port) +{ + int handled = 0; + struct uart_port *p = (struct uart_port *)port; + + if (p->handle_irq(p)) + handled = 1; + + return IRQ_RETVAL(handled); +} + +static void vuart_check_config_done(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (vuart_serial_in(port, VUART_CFG) == 1) + vuart->confige_done = 1; +} + +static int vuart_startup(struct uart_port *port) +{ + unsigned int ret, hcr, ier, fcr = 0; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (port->flags & UPF_SHARE_IRQ) + port->irqflags |= IRQF_SHARED; + ret = request_irq(port->irq, vuart_interrupt, port->irqflags, + "tsse_uart", port); + if (ret) + return ret; + + hcr = vuart_serial_in(port, VUART_HCR); + vuart->rx_threshold = FIELD_GET(VUART_HCR_RFIFOT, hcr); + vuart->tx_threshold = FIELD_GET(VUART_HCR_TFIFOT, hcr); + fcr |= FIELD_PREP(VUART_FCR_RFIFOT, vuart->rx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFOT, vuart->tx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFORST, 1); + fcr |= FIELD_PREP(VUART_FCR_RFIFORST, 1); + vuart_serial_out(port, VUART_FCR, fcr); + + vuart->rx_threshold = g_trigger_level[vuart->rx_threshold]; + vuart->tx_threshold = g_trigger_level[vuart->tx_threshold]; + + vuart_check_config_done(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_CCFGDI | VUART_IER_HETXDRI | VUART_IER_HERXTOI; + vuart_serial_out(port, VUART_IER, ier); + + vuart_serial_out(port, VUART_SCR, FIELD_PREP(VUART_SCR_SCR, 1)); + + vuart->shutdown = 0; + + return 0; +} + +static void vuart_shutdown(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + vuart->shutdown = 1; + vuart_stop_rx(port); + vuart_stop_tx(port); + free_irq(port->irq, port); + vuart_serial_out(port, VUART_SCR, 0); +} + +static void vuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) != CS8) + pr_err("Warning:termios is not CS8.\n"); + + baud = uart_get_baud_rate(port, termios, old, 0, TSSE_VUART_BAUD); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = + VUART_FSR_TXFIFOE | VUART_FSR_TXOE | VUART_FSR_RXDR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= VUART_FSR_RXUE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= VUART_FSR_RXUE; + if (termios->c_iflag & (IGNBRK | IGNPAR)) + port->ignore_status_mask |= VUART_FSR_TXFIFOE; + + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= VUART_FSR_RXDR; + pr_err("Warning:termios is not set CREAD.\n"); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void vuart_set_ldisc(struct uart_port *port, struct ktermios *ktermios) +{ +} + +static void vuart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ +} + +static void vuart_release_port(struct uart_port *port) +{ +} + +static int vuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void vuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_16550A; +} + +static int vuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_16550A) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static void vuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned int ier_save; + + ier_save = vuart_serial_in(port, VUART_IER); + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_TX, c); + + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_IER, ier_save); +} + +static int vuart_poll_get_char(struct uart_port *port) +{ + int status; + + status = vuart_serial_in(port, VUART_FSR); + if (!FIELD_GET(VUART_FSR_RXDR, status)) + return NO_POLL_CHAR; + + return vuart_serial_in(port, VUART_RX); +} + +#endif + +static const char *vuart_type(struct uart_port *port) +{ + return "tsse_vuart"; +} + +static const struct uart_ops vuart_ops = { + .tx_empty = vuart_tx_empty, + .set_mctrl = vuart_set_mctrl, + .get_mctrl = vuart_get_mctrl, + .stop_tx = vuart_stop_tx, + .start_tx = vuart_start_tx, + .throttle = vuart_throttle, + .unthrottle = vuart_unthrottle, + .stop_rx = vuart_stop_rx, + .enable_ms = vuart_enable_ms, + .break_ctl = vuart_break_ctl, + .startup = vuart_startup, + .shutdown = vuart_shutdown, + .set_termios = vuart_set_termios, + .set_ldisc = vuart_set_ldisc, + .pm = vuart_pm, + .type = vuart_type, + .release_port = vuart_release_port, + .request_port = vuart_request_port, + .config_port = vuart_config_port, + .verify_port = vuart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vuart_poll_get_char, + .poll_put_char = vuart_poll_put_char, +#endif +}; + +static unsigned int vuart_rx_chars(struct uart_port *port, unsigned int lsr) +{ + int max_count = TSSE_VUART_MAX_RX_COUNT; + unsigned char ch; + struct tty_port *tport = &port->state->port; + + do { + if (lsr & VUART_FSR_RXDR) + ch = vuart_serial_in(port, VUART_RX); + else + ch = 0; + port->icount.rx++; + if (lsr & VUART_FSR_RXUE) { + port->icount.overrun++; + pr_err("income byte underflow, record and clear int.\n"); + vuart_serial_out(port, VUART_IIR, VUART_IIR_RXUE); + } + + if (!uart_prepare_sysrq_char(port, ch)) { + if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) + ++port->icount.buf_overrun; + } + + if (--max_count == 0) + break; + lsr = vuart_serial_in(port, VUART_FSR); + } while (lsr & VUART_FSR_RXDR); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} + +static int vuart_deal_irq(struct uart_port *port, unsigned int iir) +{ + unsigned char status; + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (iir & VUART_IIR_CPUCD) + vuart->confige_done = 1; + + status = vuart_serial_in(port, VUART_FSR); + if (port->read_status_mask & VUART_FSR_RXDR) + vuart_rx_chars(port, status); + else + pr_err("read_status_mask not set VUART_FSR_RXDR, ignor rx.\n"); + + ier = vuart_serial_in(port, VUART_IER); + if (!(status & VUART_FSR_TXOE) && (status & VUART_FSR_TXFIFOE) && + (ier & VUART_IER_HETXEI)) + vuart_tx_chars(port); + + return 1; +} + +#ifdef DEBUG +static void vuart_debug_iir(unsigned int iir) +{ + VUART_PRINT("%s called iir %u.\n", __func__, iir); + if (iir & VUART_IIR_TXEI) + pr_err("TX FIFO empty interrupt.\n"); + + if (iir & VUART_IIR_RXTOI) + pr_err("Host RX FIFO character timeout interrupt.\n"); + + if (iir & VUART_IIR_RXDAI) + pr_err("Host RX FIFO data available interrupt.\n"); + + if (iir & VUART_IIR_RXUE) + pr_err("HOST RX FIFO Underflow error.\n"); + + if (iir & VUART_IIR_TXOE) + pr_err("HOST TX FIFO Overrun error.\n"); + + if (iir & VUART_IIR_CPUCD) + pr_err("CPU has finished configuration for virtual UART"); + + if (iir & VUART_IIR_TXFI) + pr_err("Host TX FIFO full interrupt.\n"); +} +#endif + +static int vuart_handle_irq(struct uart_port *port) +{ + unsigned int iir; + unsigned long flags; + int ret; + + iir = vuart_serial_in(port, VUART_IIR); + vuart_serial_out(port, VUART_IIR, iir); +#ifdef DEBUG + vuart_debug_iir(iir); +#endif + spin_lock_irqsave(&port->lock, flags); + ret = vuart_deal_irq(port, iir); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return ret; +} + +static int vuart_get_line(void) +{ + int bit = 0; + + bit = find_first_zero_bit(&g_line[0], TSSE_VUART_MAX_DEV); + if (bit >= TSSE_VUART_MAX_DEV) + return -ENOSPC; + set_bit(bit, &g_line[0]); + return bit; +} + +static void vuart_free_line(int line) +{ + clear_bit(line, &g_line[0]); +} + +int vuart_init_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = NULL; + struct uart_port *p = NULL; + int ret = 0; + int line = vuart_get_line(); + + if (line == -ENOSPC) { + dev_err(&pdev->dev, "device too more, max is 64.\n"); + return -ENOMEM; + } + + vuart = kzalloc_node(sizeof(struct tsse_vuart), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!vuart) { + ret = -ENOMEM; + goto zalloc_fail; + } + vuart->shutdown = 1; + p = &(vuart->port); + p->mapbase = 0; + p->mapsize = 0; + p->membase = TSSE_DEV_BARS(tdev)[2].virt_addr + RLS_VUART_OFFSET; + p->irq = pci_irq_vector(pdev, RLS_VUART_IRQ_NUM); + p->handle_irq = vuart_handle_irq; + spin_lock_init(&p->lock); + p->line = line; + p->type = PORT_16550A; + p->uartclk = TSSE_VUART_BAUD * 16; + p->iotype = UPIO_MEM; + p->ops = &vuart_ops; + p->fifosize = 128; + vuart->tx_loadsz = 128; + p->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_FIXED_PORT | + UPF_SHARE_IRQ; + p->dev = &pdev->dev; + p->private_data = tdev; + + tdev->port = (struct uart_port *)vuart; + ret = uart_add_one_port(&g_vuart_reg, p); + if (ret != 0) { + dev_err(&pdev->dev, "add port fialed.[%d]\n", ret); + goto add_port_fail; + } + return 0; +add_port_fail: + kfree(vuart); +zalloc_fail: + vuart_free_line(line); + + return ret; +} + +void vuart_uninit_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = (struct tsse_vuart *)(tdev->port); + + if (tdev->port) { + if (!vuart->shutdown) + free_irq(tdev->port->irq, tdev->port); + vuart_free_line(tdev->port->line); + uart_remove_one_port(&g_vuart_reg, tdev->port); + kfree(vuart); + } +} + +int vuart_register(void) +{ + return uart_register_driver(&g_vuart_reg); +} + +void vuart_unregister(void) +{ + uart_unregister_driver(&g_vuart_reg); +} diff --git a/drivers/crypto/montage/tsse/tsse_vuart.h b/drivers/crypto/montage/tsse/tsse_vuart.h new file mode 100644 index 0000000000000000000000000000000000000000..1ed43368751af9218a4ce279adde3332e2c10c0c --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_H__ +#define __TSSE_VUART_H__ + +#include + +#define RLS_VUART_OFFSET (0x680000) +#define RLS_VUART_IRQ_NUM (10) +#define TSSE_VUART_MAX_DEV (64) +#define TSSE_VUART_BITMAP_SIZE (ALIGN(TSSE_VUART_MAX_DEV, 64) / 64) + +int vuart_register(void); +void vuart_unregister(void); +int vuart_init_port(struct pci_dev *pdev); +void vuart_uninit_port(struct pci_dev *pdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart_regs.h b/drivers/crypto/montage/tsse/tsse_vuart_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..26fa62f5014a573dd4c78e23ebebc46568b0aedd --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_REGS_H__ +#define __TSSE_VUART_REGS_H__ + +#include +#include + +#define VUART_ID 0x0 +#define VUART_ID_MASK GENMASK(31, 0) + +#define VUART_HCR 0x10 +#define VUART_HCR_RFIFOT GENMASK(3, 2) +#define VUART_HCR_TFIFOT GENMASK(5, 4) + +#define INTRID_NONE BIT(0) +#define INTRID_CPU_LSR (BIT(2) | BIT(1)) +#define INTRID_TRIGGER_LEVEL BIT(2) +#define INTRID_RX_TIMEOUT (BIT(2) | BIT(3)) +#define INTRID_TX_EMPTY BIT(1) + +#define VUART_IIR 0x28 +#define VUART_IIR_TXEI GENMASK(0, 0) +#define VUART_IIR_RXTOI GENMASK(1, 1) +#define VUART_IIR_RXDAI GENMASK(2, 2) +#define VUART_IIR_CPUCD GENMASK(3, 3) +#define VUART_IIR_TXFI GENMASK(4, 4) +#define VUART_IIR_RXUE GENMASK(5, 5) +#define VUART_IIR_TXOE GENMASK(6, 6) + +#define VUART_FCR 0x30 +#define VUART_FCR_TFIFORST GENMASK(0, 0) +#define VUART_FCR_RFIFORST GENMASK(1, 1) +#define VUART_FCR_RFIFOT GENMASK(3, 2) +#define VUART_FCR_TFIFOT GENMASK(5, 4) + +#define VUART_FSR 0x34 +#define VUART_FSR_TXDR GENMASK(0, 0) +#define VUART_FSR_RXDR GENMASK(1, 1) +#define VUART_FSR_RXFIFO GENMASK(2, 2) +#define VUART_FSR_TXFIFOE GENMASK(3, 3) +#define VUART_FSR_RXFIFOF GENMASK(4, 4) +#define VUART_FSR_TXFIFOF GENMASK(5, 5) +#define VUART_FSR_TFIFODN GENMASK(13, 6) +#define VUART_FSR_RFIFODN GENMASK(21, 14) +#define VUART_FSR_TXOE GENMASK(23, 23) +#define VUART_FSR_RXUE GENMASK(24, 24) + +#define VUART_SCR 0x3c +#define VUART_SCR_SCR GENMASK(7, 0) + +#define VUART_TX 0x40 +#define VUART_RX 0x40 + +#define VUART_IER 0x48 +#define VUART_IER_HETXEI GENMASK(0, 0) +#define VUART_IER_HERXTOI GENMASK(1, 1) +#define VUART_IER_HETXDRI GENMASK(2, 2) +#define VUART_IER_CCFGDI GENMASK(3, 3) +#define VUART_IER_HETXFI GENMASK(4, 4) +#define VUART_IER_HETXUI GENMASK(5, 5) +#define VUART_IER_HERXOI GENMASK(6, 6) + +#define VUART_CFG 0x4c +#define VUART_CFG_CCFGD GENMASK(0, 0) + +#endif diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1be549a07a21976dc20ef45ca1ad8c3c40030ee4..f0c3127941ae2f5cea71a4041aafc3e617af2df9 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a2343611ed56fd561461bf5d7ec23..04858dc8b59794beabd98dbdfe59a2b1e1305a1a 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -491,7 +491,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 0000000000000000000000000000000000000000..e1d029fa9d1ab84df36ef5e6ec9b49d9cb2308be --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __packed + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __aligned(PADLOCK_ALIGNMENT); + +/* + * Whenever making any changes to the following structure *make sure* you keep E, d_data and cword + * aligned on 16 Bytes boundaries and the Hardware can access 16 * 16 bytes of E and d_data (only + * the first 15 * 16 bytes matter but the HW reads more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + u32 d_data[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, zx_paes_last_cword); + +/* Tells whether the ACE is capable to generate the extended key for a given key_len. */ +static inline int aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping as it's possible that the + * capability will be added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) +{ + return aes_ctx_common(crypto_skcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) + return -EINVAL; + + /* + * If the hardware is capable of generating the extended key itself we must supply the + * plain key for both encryption and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (aes_expandkey(&gen_aes, in_key, key_len)) + return -EINVAL; + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(zx_paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(zx_paes_last_cword, cpu)) + per_cpu(zx_paes_last_cword, cpu) = NULL; + + return 0; +} + +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(zx_paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(zx_paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they generate a spurious DNA fault + * when CR0.TS is '1'. Fortunately, the kernel doesn't use CR0.TS. + */ +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, + int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, + u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, +}; + +static int cbc_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (!!ret) + goto aes_err; + + ret = crypto_register_skcipher(&ecb_aes_alg); + if (!!ret) + goto ecb_aes_err; + + ret = crypto_register_skcipher(&cbc_aes_alg); + if (!!ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_skcipher(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("aes"); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 0000000000000000000000000000000000000000..840805f36838e5a2bf6791684e67f35d8698da90 --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +static inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented multiple-parts hash + * supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state) { + .state = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data */ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0da9232ea1754b862455fe6d2d1c0e57398cfde1..6f2bc78f6184c84d09b52d1520c86cfbf3bc956e 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -71,6 +71,20 @@ void dax_remove_host(struct gendisk *disk) } EXPORT_SYMBOL_GPL(dax_remove_host); +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff) +{ + sector_t start_sect = bdev ? get_start_sect(bdev) : 0; + phys_addr_t phys_off = (start_sect + sector) * 512; + + if (pgoff) + *pgoff = PHYS_PFN(phys_off); + if (phys_off % PAGE_SIZE || size % PAGE_SIZE) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(bdev_dax_pgoff); + /** * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax * @bdev: block device to find a dax_device for diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index c5e679070e4633ba48a21f3be68dccca45a61383..2b4a0d406e1e713556e9b166f0a09098e9374ea1 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o idxd_bus-y := bus.o obj-$(CONFIG_INTEL_IDXD) += idxd.o -idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o defaults.o idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c index 6f84621053c6784aa895488fa58c7c117f9dbc9b..b83b27e04f2a410e6ce14db8d398df17c84a1c02 100644 --- a/drivers/dma/idxd/bus.c +++ b/drivers/dma/idxd/bus.c @@ -67,11 +67,17 @@ static void idxd_config_bus_remove(struct device *dev) idxd_drv->remove(idxd_dev); } -struct bus_type dsa_bus_type = { +static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0); +} + +const struct bus_type dsa_bus_type = { .name = "dsa", .match = idxd_config_bus_match, .probe = idxd_config_bus_probe, .remove = idxd_config_bus_remove, + .uevent = idxd_bus_uevent, }; EXPORT_SYMBOL_GPL(dsa_bus_type); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index c18633ad8455fa0ac4ba0b070ec7c7a2a9c17ed3..57f1bf2ab20be040b477b0062f9e21727baf8526 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev) mutex_unlock(&wq->wq_lock); } -static struct device_type idxd_cdev_file_type = { +static const struct device_type idxd_cdev_file_type = { .name = "idxd_file", .release = idxd_file_dev_release, .groups = cdev_file_attribute_groups, @@ -165,11 +165,11 @@ static void idxd_cdev_dev_release(struct device *dev) struct idxd_wq *wq = idxd_cdev->wq; cdev_ctx = &ictx[wq->idxd->data->type]; - ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor); kfree(idxd_cdev); } -static struct device_type idxd_cdev_device_type = { +static const struct device_type idxd_cdev_device_type = { .name = "idxd_cdev", .release = idxd_cdev_dev_release, }; @@ -539,7 +539,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq) cdev = &idxd_cdev->cdev; dev = cdev_dev(idxd_cdev); cdev_ctx = &ictx[wq->idxd->data->type]; - minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL); if (minor < 0) { kfree(idxd_cdev); return minor; @@ -584,6 +584,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) { + struct device *dev = &idxd_dev->conf_dev; struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); struct idxd_device *idxd = wq->idxd; int rc; @@ -591,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace @@ -606,11 +615,10 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + goto wq_err; } - mutex_lock(&wq->wq_lock); - wq->wq = create_workqueue(dev_name(wq_confdev(wq))); if (!wq->wq) { rc = -ENOMEM; @@ -618,7 +626,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) } wq->type = IDXD_WQT_USER; - rc = drv_enable_wq(wq); + rc = idxd_drv_enable_wq(wq); if (rc < 0) goto err; @@ -633,7 +641,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) return 0; err_cdev: - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); err: destroy_workqueue(wq->wq); wq->type = IDXD_WQT_NONE; @@ -648,7 +656,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); idxd_wq_del_cdev(wq); - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; destroy_workqueue(wq->wq); wq->wq = NULL; diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c new file mode 100644 index 0000000000000000000000000000000000000000..c607ae8dd12c614f2b358e033b127dbb9758de81 --- /dev/null +++ b/drivers/dma/idxd/defaults.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2023 Intel Corporation. All rights rsvd. */ +#include +#include "idxd.h" + +int idxd_load_iaa_device_defaults(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct idxd_group *group; + struct idxd_wq *wq; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return 0; + + wq = idxd->wqs[0]; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + /* set mode to "dedicated" */ + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + + /* only setting up 1 wq, so give it all the wq space */ + wq->size = idxd->max_wq_size; + + /* set priority to 10 */ + wq->priority = 10; + + /* set type to "kernel" */ + wq->type = IDXD_WQT_KERNEL; + + /* set wq group to 0 */ + group = idxd->groups[0]; + wq->group = group; + group->num_wqs++; + + /* set name to "iaa_crypto" */ + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1); + + /* set driver_name to "crypto" */ + memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); + strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1); + + engine = idxd->engines[0]; + + /* set engine group to 0 */ + engine->group = idxd->groups[0]; + engine->group->num_engines++; + + return 0; +} diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 542d340552dd71da662f2665738da14a9c9fadff..c41ef195eeb9f218935520301f3582b9b6787c7d 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -161,6 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) free_hw_descs(wq); return rc; } +EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD); void idxd_wq_free_resources(struct idxd_wq *wq) { @@ -174,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq) dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); sbitmap_queue_free(&wq->sbq); } +EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD); int idxd_wq_enable(struct idxd_wq *wq) { @@ -405,6 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq) reinit_completion(&wq->wq_resurrect); return 0; } +EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD); void __idxd_wq_quiesce(struct idxd_wq *wq) { @@ -414,6 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq) complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } +EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD); void idxd_wq_quiesce(struct idxd_wq *wq) { @@ -421,6 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq) __idxd_wq_quiesce(wq); mutex_unlock(&wq->wq_lock); } +EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD); /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) @@ -1273,7 +1278,7 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) tx = &desc->txd; tx->callback = NULL; tx->callback_result = NULL; - idxd_dma_complete_txd(desc, ctype, true); + idxd_dma_complete_txd(desc, ctype, true, NULL, NULL); } } @@ -1357,7 +1362,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq) return rc; } -int drv_enable_wq(struct idxd_wq *wq) +int idxd_drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1489,8 +1494,9 @@ int drv_enable_wq(struct idxd_wq *wq) err: return rc; } +EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD); -void drv_disable_wq(struct idxd_wq *wq) +void idxd_drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1510,6 +1516,7 @@ void drv_disable_wq(struct idxd_wq *wq) wq->type = IDXD_WQT_NONE; wq->client_count = 0; } +EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD); int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 07623fb0f52fc2bb160224ecf5d0dc0e625e2b00..cd835eabd31b08c1ccfa41d378743e1845232bdf 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -22,7 +22,7 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type, - bool free_desc) + bool free_desc, void *ctx, u32 *status) { struct idxd_device *idxd = desc->wq->idxd; struct dma_async_tx_descriptor *tx; @@ -306,9 +306,15 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return -ENXIO; mutex_lock(&wq->wq_lock); + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto err; + } + wq->type = IDXD_WQT_KERNEL; - rc = drv_enable_wq(wq); + rc = idxd_drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); rc = -ENXIO; @@ -327,7 +333,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return 0; err_dma: - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); @@ -341,7 +347,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); mutex_unlock(&wq->wq_lock); } @@ -353,6 +359,7 @@ static enum idxd_dev_type dev_types[] = { struct idxd_device_driver idxd_dmaengine_drv = { .probe = idxd_dmaengine_drv_probe, .remove = idxd_dmaengine_drv_remove, + .desc_complete = idxd_dma_complete_txd, .name = "dmaengine", .type = dev_types, }; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index bea10c5cdb76bb59cc47da6e65be76f03e644a79..868b724a3b75bb3648f34801cf5c5eedc1489cf4 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "registers.h" @@ -57,11 +58,23 @@ enum idxd_type { #define IDXD_ENQCMDS_RETRIES 32 #define IDXD_ENQCMDS_MAX_RETRIES 64 +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, + IDXD_COMPLETE_DEV_FAIL, +}; + +struct idxd_desc; + struct idxd_device_driver { const char *name; enum idxd_dev_type *type; int (*probe)(struct idxd_dev *idxd_dev); void (*remove)(struct idxd_dev *idxd_dev); + void (*desc_complete)(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, + void *ctx, u32 *status); struct device_driver drv; }; @@ -159,6 +172,8 @@ struct idxd_cdev { int minor; }; +#define DRIVER_NAME_SIZE 128 + #define IDXD_ALLOCATED_BATCH_SIZE 128U #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 @@ -172,12 +187,6 @@ enum idxd_op_type { IDXD_OP_NONBLOCK = 1, }; -enum idxd_complete_type { - IDXD_COMPLETE_NORMAL = 0, - IDXD_COMPLETE_ABORT, - IDXD_COMPLETE_DEV_FAIL, -}; - struct idxd_dma_chan { struct dma_chan chan; struct idxd_wq *wq; @@ -227,6 +236,8 @@ struct idxd_wq { /* Lock to protect upasid_xa access. */ struct mutex uc_lock; struct xarray upasid_xa; + + char driver_name[DRIVER_NAME_SIZE + 1]; }; struct idxd_engine { @@ -266,16 +277,19 @@ struct idxd_dma_dev { struct dma_device dma; }; +typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); + struct idxd_driver_data { const char *name_prefix; enum idxd_type type; - struct device_type *dev_type; + const struct device_type *dev_type; int compl_size; int align; int evl_cr_off; int cr_status_off; int cr_result_off; bool user_submission_safe; + load_device_defaults_fn_t load_device_defaults; }; struct idxd_evl { @@ -376,6 +390,14 @@ static inline unsigned int evl_size(struct idxd_device *idxd) return idxd->evl->size * evl_ent_size(idxd); } +struct crypto_ctx { + struct acomp_req *req; + struct crypto_tfm *tfm; + dma_addr_t src_addr; + dma_addr_t dst_addr; + bool compress; +}; + /* IDXD software descriptor */ struct idxd_desc { union { @@ -388,7 +410,10 @@ struct idxd_desc { struct iax_completion_record *iax_completion; }; dma_addr_t compl_dma; - struct dma_async_tx_descriptor txd; + union { + struct dma_async_tx_descriptor txd; + struct crypto_ctx crypto; + }; struct llist_node llnode; struct list_head list; int id; @@ -415,6 +440,15 @@ enum idxd_completion_status { #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) +static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) +{ + struct device *dev = wq_confdev(wq); + struct idxd_device_driver *idxd_drv = + container_of(dev->driver, struct idxd_device_driver, drv); + + return idxd_drv; +} + static inline struct idxd_device *confdev_to_idxd(struct device *dev) { struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); @@ -484,15 +518,15 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } -extern struct bus_type dsa_bus_type; +extern const struct bus_type dsa_bus_type; extern bool support_enqcmd; extern struct ida idxd_ida; -extern struct device_type dsa_device_type; -extern struct device_type iax_device_type; -extern struct device_type idxd_wq_device_type; -extern struct device_type idxd_engine_device_type; -extern struct device_type idxd_group_device_type; +extern const struct device_type dsa_device_type; +extern const struct device_type iax_device_type; +extern const struct device_type idxd_wq_device_type; +extern const struct device_type idxd_engine_device_type; +extern const struct device_type idxd_group_device_type; static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) { @@ -616,6 +650,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq) return wq->client_count; }; +static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private) +{ + dev_set_drvdata(wq_confdev(wq), private); +} + +static inline void *idxd_wq_get_private(struct idxd_wq *wq) +{ + return dev_get_drvdata(wq_confdev(wq)); +} + /* * Intel IAA does not support batch processing. * The max batch size of device, max batch size of wq and @@ -648,6 +692,14 @@ static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wq wqcfg->max_batch_shift = max_batch_shift; } +static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) +{ + return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); +} + +#define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*") +#define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d" + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *module, const char *mod_name); #define idxd_driver_register(driver) \ @@ -658,6 +710,24 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); #define module_idxd_driver(__idxd_driver) \ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, void *ctx, u32 *status); + +static inline void idxd_desc_complete(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc) +{ + struct idxd_device_driver *drv; + u32 status; + + drv = wq_to_idxd_drv(desc->wq); + if (drv->desc_complete) + drv->desc_complete(desc, comp_type, free_desc, + &desc->txd, &status); +} + int idxd_register_bus_type(void); void idxd_unregister_bus_type(void); int idxd_register_devices(struct idxd_device *idxd); @@ -665,6 +735,7 @@ void idxd_unregister_devices(struct idxd_device *idxd); void idxd_wqs_quiesce(struct idxd_device *idxd); bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count); +int idxd_load_iaa_device_defaults(struct idxd_device *idxd); /* device interrupt control */ irqreturn_t idxd_misc_thread(int vec, void *data); @@ -675,8 +746,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd); /* device control */ int idxd_device_drv_probe(struct idxd_dev *idxd_dev); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); -int drv_enable_wq(struct idxd_wq *wq); -void drv_disable_wq(struct idxd_wq *wq); +int idxd_drv_enable_wq(struct idxd_wq *wq); +void idxd_drv_disable_wq(struct idxd_wq *wq); int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); @@ -711,14 +782,11 @@ int idxd_wq_request_irq(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); -void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd); -void idxd_dma_complete_txd(struct idxd_desc *desc, - enum idxd_complete_type comp_type, bool free_desc); /* cdev */ int idxd_cdev_register(void); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 786afb256b6e0df6e4c682e11bffd288f02ee91d..a7295943fa223353187aa2a295d4fc7fe1b7c4ce 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -61,6 +61,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct iax_completion_record, status), .cr_result_off = offsetof(struct iax_completion_record, error_code), + .load_device_defaults = idxd_load_iaa_device_defaults, }, }; @@ -756,6 +757,12 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + if (data->load_device_defaults) { + rc = data->load_device_defaults(idxd); + if (rc) + dev_warn(dev, "IDXD loading device defaults failed\n"); + } + rc = idxd_register_devices(idxd); if (rc) { dev_err(dev, "IDXD sysfs setup failed\n"); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 7efc85b5bad9e91d38cd914cadd0051a04423e16..fc049c9c9892e66302929afd9b0f99b881feca92 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -123,7 +123,7 @@ static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie) list_for_each_entry_safe(d, t, &flist, list) { list_del(&d->list); - idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(d, IDXD_COMPLETE_ABORT, true); } } @@ -433,8 +433,8 @@ irqreturn_t idxd_misc_thread(int vec, void *data) val |= IDXD_INTC_ERR; for (i = 0; i < 4; i++) - dev_warn(dev, "err[%d]: %#16.16llx\n", - i, idxd->sw_err.bits[i]); + dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); err = true; } @@ -533,7 +533,7 @@ static void idxd_int_handle_resubmit_work(struct work_struct *work) */ if (rc != -EAGAIN) { desc->completion->status = IDXD_COMP_DESC_ABORT; - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, false); } idxd_free_desc(wq, desc); } @@ -574,11 +574,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry) * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; } - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); + idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true); } else { spin_lock(&irq_entry->list_lock); list_add_tail(&desc->list, @@ -619,11 +619,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) list_del(&desc->list); if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; } - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); + idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true); } } diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index cfbcd1adb1d1c2eebbf8101911673d438ba58c02..e16dbf9ab324c5ac363b2102eff802cca75f2bd4 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -437,12 +437,14 @@ union wqcfg { /* * This macro calculates the offset into the GRPCFG register * idxd - struct idxd * - * n - wq id - * ofs - the index of the 32b dword for the config register + * n - group id + * ofs - the index of the 64b qword for the config register * - * The WQCFG register block is divided into groups per each wq. The n index - * allows us to move to the register group that's for that particular wq. - * Each register is 32bits. The ofs gives us the number of register to access. + * The GRPCFG register block is divided into three sub-registers, which + * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move + * to the register block that contains the three sub-registers. + * Each register block is 64bits. And the ofs gives us the offset + * within the GRPWQCFG register to access. */ #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ (n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 3f922518e3a525f22b49c56ae655a670c63aa10e..817a564413b08eb9c4fcd1d1d2676f67ce7dbf10 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -61,6 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) return __get_desc(wq, idx, cpu); } +EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD); void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) { @@ -69,6 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) desc->cpu = -1; sbitmap_queue_clear(&wq->sbq, desc->id, cpu); } +EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD); static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) @@ -125,7 +127,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, spin_unlock(&ie->list_lock); if (found) - idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false, + NULL, NULL); /* * completing the descriptor will return desc to allocator and @@ -135,7 +138,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); - idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true, + NULL, NULL); } } @@ -215,3 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) percpu_ref_put(&wq->wq_active); return 0; } +EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD); diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 1fd5a93045f79efeff0e1a07b8c770aa4d3ec3ac..f706eae0e76b1faa1cbdcb2a6e67c30b3e6d888d 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev) kfree(engine); } -struct device_type idxd_engine_device_type = { +const struct device_type idxd_engine_device_type = { .name = "engine", .release = idxd_conf_engine_release, .groups = idxd_engine_attribute_groups, @@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev) kfree(group); } -struct device_type idxd_group_device_type = { +const struct device_type idxd_group_device_type = { .name = "group", .release = idxd_conf_group_release, .groups = idxd_group_attribute_groups, @@ -1282,6 +1282,39 @@ static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *a static struct device_attribute dev_attr_wq_op_config = __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); +static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + + return sysfs_emit(buf, "%s\n", wq->driver_name); +} + +static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + char *input, *pos; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + input = kstrndup(buf, count, GFP_KERNEL); + if (!input) + return -ENOMEM; + + pos = strim(input); + memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); + sprintf(wq->driver_name, "%s", pos); + kfree(input); + return count; +} + +static struct device_attribute dev_attr_wq_driver_name = + __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store); + static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_clients.attr, &dev_attr_wq_state.attr, @@ -1301,6 +1334,7 @@ static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_occupancy.attr, &dev_attr_wq_enqcmds_retries.attr, &dev_attr_wq_op_config.attr, + &dev_attr_wq_driver_name.attr, NULL, }; @@ -1358,7 +1392,7 @@ static void idxd_conf_wq_release(struct device *dev) kfree(wq); } -struct device_type idxd_wq_device_type = { +const struct device_type idxd_wq_device_type = { .name = "wq", .release = idxd_conf_wq_release, .groups = idxd_wq_attribute_groups, @@ -1787,13 +1821,13 @@ static void idxd_conf_device_release(struct device *dev) kfree(idxd); } -struct device_type dsa_device_type = { +const struct device_type dsa_device_type = { .name = "dsa", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, }; -struct device_type iax_device_type = { +const struct device_type iax_device_type = { .name = "iax", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index bb4ff8c86733bf358b1ecb53142ea8c7b2efb176..1e6cc5cdf52a03d8397a9e06debcd35169f8661b 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -419,6 +419,7 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, { struct xgene_dma_desc_hw *desc1, *desc2; size_t len = *nbytes; + __le64 *ext8; int i; desc1 = &desc_sw->desc1; @@ -440,9 +441,12 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, /* Set 1st to 5th source addresses */ for (i = 0; i < src_cnt; i++) { len = *nbytes; - xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : - xgene_dma_lookup_ext8(desc2, i - 1), - &len, &src[i]); + if (i == 0) + ext8 = &desc1->m1; + else + ext8 = xgene_dma_lookup_ext8(desc2, i - 1); + + xgene_dma_set_src_buffer(ext8, &len, &src[i]); desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9cd86390a16747e311a9349b341b3e513205ebf7..d0ab59e80fb09aa969f375371c318ddba87ca87b 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -96,6 +96,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return pcibios_err_to_errno(err); } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -1137,8 +1148,11 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr ctx.nid = nid; ctx.inst_id = umc; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + df_indirect_read_instance(nid, 0, 0x214, umc, &ctx.tmp)) + goto out_err; + else if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -1162,6 +1176,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 2) & 0x3; lgcy_mmio_hole_en = ctx.tmp & BIT(1); intlv_num_chan = (ctx.tmp >> 4) & 0xF; intlv_addr_sel = (ctx.tmp >> 8) & 0x7; @@ -1178,7 +1195,8 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) goto out_err; - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; intlv_num_dies = (ctx.tmp >> 10) & 0x3; dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -1196,6 +1214,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -1214,8 +1235,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -1234,7 +1256,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) goto out_err; - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (ctx.tmp >> 8) & 0x7FF; + else + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -1254,8 +1279,13 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (ctx.tmp >> 12) & 0xF; + die_id_mask = ctx.tmp & 0x7FF; + } else { + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -1263,7 +1293,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (ctx.tmp >> 16) & 0x7FF; + else + socket_id_mask = (ctx.tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } @@ -1610,7 +1643,10 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -1719,11 +1755,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -1741,8 +1783,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1825,7 +1867,8 @@ static void umc_determine_memory_type(struct amd64_pvt *pvt) * Check if the system supports the "DDR Type" field in UMC Config * and has DDR5 DIMMs in use. */ - if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if ((pvt->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { if (umc->dimm_cfg & BIT(5)) umc->dram_type = MEM_LRDDR5; else if (umc->dimm_cfg & BIT(4)) @@ -3059,7 +3102,11 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static void umc_get_err_info(struct mce *m, struct err_info *err) { - err->channel = (m->ipid & GENMASK(31, 0)) >> 20; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + err->channel = (m->ipid & GENMASK(23, 0)) >> 20; + else + err->channel = (m->ipid & GENMASK(31, 0)) >> 20; err->csrow = m->synd & 0x7; } @@ -3070,6 +3117,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; node_id = fixup_node_id(node_id, m); @@ -3100,7 +3148,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } @@ -3174,8 +3227,11 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); @@ -4094,6 +4150,24 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: + if (pvt->model == 0x4) { + pvt->ctl_name = "F18h_M04h"; + pvt->max_mcs = 3; + break; + } else if (pvt->model == 0x5) { + pvt->ctl_name = "F18h_M05h"; + pvt->max_mcs = 1; + break; + } else if (pvt->model == 0x6) { + pvt->ctl_name = "F18h_M06h"; + break; + } else if (pvt->model == 0x7) { + pvt->ctl_name = "F18h_M07h"; + break; + } else if (pvt->model == 0x10) { + pvt->ctl_name = "F18h_M10h"; + break; + } pvt->ctl_name = "F18h"; break; @@ -4357,6 +4431,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; if (ghes_get_devices()) @@ -4374,8 +4449,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -4383,7 +4463,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -4428,6 +4508,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -4439,7 +4520,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df5f19dd64cb07b2f275f9ff8ee118..06e29d2b51d1ed32071d0371369bb1e9de2a3ba1 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1187,8 +1187,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && - xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index f0e9f250669e2fa12a7798ef18bb4e30e0d2e93a..cc4716c037a67eefd4d8317b90889a3c1eb6e27e 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) depends on HAS_IOPORT_MAP default n help diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 3e8051fe829657294cbb29c49c2b3848f98a990f..fe638e40aebb6c785fa654ea2cfff3967bdee6ad 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -43,8 +43,6 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, /* entry point from firmware to arch asm code */ static unsigned long sdei_entry_point; -static int sdei_hp_state; - struct sdei_event { /* These three are protected by the sdei_list_lock */ struct list_head list; @@ -188,6 +186,28 @@ int sdei_api_event_context(u32 query, u64 *result) } NOKPROBE_SYMBOL(sdei_api_event_context); +int sdei_api_event_interrupt_bind(int hwirq) +{ + u64 event_number; + + invoke_sdei_fn(SDEI_1_0_FN_SDEI_INTERRUPT_BIND, hwirq, 0, 0, 0, 0, + &event_number); + + return (int)event_number; +} + +int sdei_api_clear_eoi(int hwirq) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SDEI_CLEAR_EOI, hwirq, 0, 0, 0, 0, + NULL); +} + +int sdei_api_set_secure_timer_period(int sec) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD, sec, 0, 0, 0, + 0, NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, @@ -379,7 +399,7 @@ static int sdei_platform_reset(void) return err; } -static int sdei_api_event_enable(u32 event_num) +int sdei_api_event_enable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 0, NULL); @@ -426,7 +446,7 @@ int sdei_event_enable(u32 event_num) return err; } -static int sdei_api_event_disable(u32 event_num) +int sdei_api_event_disable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 0, 0, NULL); @@ -763,7 +783,7 @@ static int sdei_device_freeze(struct device *dev) int err; /* unregister private events */ - cpuhp_remove_state(sdei_hp_state); + cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE); err = sdei_unregister_shared(); if (err) @@ -784,15 +804,12 @@ static int sdei_device_thaw(struct device *dev) return err; } - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err < 0) { + if (err) pr_warn("Failed to re-register CPU hotplug notifier...\n"); - return err; - } - sdei_hp_state = err; - return 0; + return err; } static int sdei_device_restore(struct device *dev) @@ -824,7 +841,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, * We are going to reset the interface, after this there is no point * doing work when we take CPUs offline. */ - cpuhp_remove_state(sdei_hp_state); + cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE); sdei_platform_reset(); @@ -1004,15 +1021,13 @@ static int sdei_probe(struct platform_device *pdev) goto remove_cpupm; } - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err < 0) { + if (err) { pr_warn("Failed to register CPU hotplug notifier...\n"); goto remove_reboot; } - sdei_hp_state = err; - return 0; remove_reboot: diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 231f1c70d1db548566b56a63bc52dcbe324a4a3d..b76f9df4885a464040d381159fc5d0746c6e9ba1 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -224,7 +224,7 @@ config EFI_DISABLE_PCI_DMA config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on SERIAL_EARLYCON && !ARM && !IA64 && !SW64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT @@ -301,3 +301,12 @@ config UEFI_CPER_X86 bool depends on UEFI_CPER && X86 default y + +config YITIAN_CPER_RAWDATA + bool "Print Yitian custom raw data about platform error info" + depends on EFI && ACPI && ARM64 + help + Allow print Yitian custom raw data about platform error info, + including CMN, GIC, SMMU, DDR, etc. It gathers more useful error + information from hardware, which helps to debug and test RAS + feature. diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index e489fefd23dae0bc21baaf48be6c1499ca5d63c7..7c1b924e8ea3fc24a0460b0011d5da7cb174e760 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -35,8 +35,11 @@ obj-$(CONFIG_SYSFB) += sysfb_efi.o arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) +sw64-obj-$(CONFIG_EFI) := sunway-init.o sunway-runtime.o +obj-$(CONFIG_SW64) += $(sw64-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) +#obj-$(CONFIG_LOONGARCH) += efi-init.o obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781c7071c714aef274e68dbddca026b..993467ea49dffb84bcd38cc88c5071349736cfe3 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -141,6 +141,59 @@ static const char * const proc_flag_strs[] = { "corrected", }; +static const char *const zdi_zpi_err_type_strs[] = { + "No Error", + "Training Error Status (PHY)", + "Data Link Protocol Error Status (DLL)", + "Surprise Down Error Status", + "Flow Control Protocol Error Status (TL)", + "Receiver Overflow Status (TL)", + "Receiver Error Status (PHY)", + "Bad TLP Status (DLL)", + "Bad Data Link Layer Packet (DLLP) Status (DLL)", + "REPLAY_NUM Rollover Status (DLL)", + "Replay Timer Timeout Status (DLL)", + "X16 Link Width Unreliable Status", + "ZPI X8 Link Width Unreliable Status", + "ZPI X4 Link Width Unreliable Status", + "ZPI X2 Link Width Unreliable Status", + "ZPI Gen3 Link Speed Unreliable Status", + "ZPI Gen2 Link Speed Unreliable Status", + "ZDI Gen3 Link Speed Unreliable Status", + "ZDI Gen4 Link Speed Unreliable Status", +}; + +const char *cper_zdi_zpi_err_type_str(unsigned int etype) +{ + return etype < ARRAY_SIZE(zdi_zpi_err_type_strs) ? + zdi_zpi_err_type_strs[etype] : "unknown error"; +} +EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str); + +static void cper_print_proc_generic_zdi_zpi(const char *pfx, + const struct cper_sec_proc_generic *zdi_zpi) +{ +#if IS_ENABLED(CONFIG_X86) + u8 etype = zdi_zpi->responder_id; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) { + if ((zdi_zpi->requestor_id & 0xff) == 7) { + pr_info("%s general processor error(zpi error)\n", pfx); + } else if ((zdi_zpi->requestor_id & 0xff) == 6) { + pr_info("%s general processor error(zdi error)\n", pfx); + } else { + pr_info("%s general processor error(unknown error)\n", pfx); + return; + } + pr_info("%s bus number %llx device number %llx function number 0\n", pfx, + ((zdi_zpi->requestor_id)>>8) & 0xff, zdi_zpi->requestor_id & 0xff); + pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id, + cper_zdi_zpi_err_type_str(etype)); + } +#endif +} + static void cper_print_proc_generic(const char *pfx, const struct cper_sec_proc_generic *proc) { @@ -184,6 +237,8 @@ static void cper_print_proc_generic(const char *pfx, pfx, proc->responder_id); if (proc->validation_bits & CPER_PROC_VALID_IP) printk("%s""IP: 0x%016llx\n", pfx, proc->ip); + + cper_print_proc_generic_zdi_zpi(pfx, proc); } static const char * const mem_err_type_strs[] = { @@ -623,6 +678,142 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +static char *yitian_raw_err_type_str(u64 type) +{ + switch (type) { + case ERR_TYPE_GENERIC: return "GENERIC"; + case ERR_TYPE_CORE: return "CORE"; + case ERR_TYPE_GIC: return "GIC"; + case ERR_TYPE_CMN: return "CMN"; + case ERR_TYPE_SMMU: return "SMMU"; + case ERR_TYPE_DDR: return "DDR"; + case ERR_TYPE_PCI: return "PCI"; + default: return "Reserved"; + } +} + +void yitian_platform_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ras_common_reg *common_reg; + int sub_record_no = 0; + + yitian_estatus_for_each_raw_reg_common(header, common_reg, sub_record_no) { + pr_info("%s sub_type: 0x%x\n", pfx, + header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, common_reg->fr, common_reg->ctrl, + common_reg->status, common_reg->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, common_reg->misc0, common_reg->misc1, + common_reg->misc2, common_reg->misc3); + } +} + +static void yitian_ddr_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ddr_raw_data *data; + + data = (struct yitian_ddr_raw_data *)(header + 1); + + switch (data->ex_type) { + case 0x1: + pr_info("%s Synchronous Exception taken in EL%d\n", pfx, data->el_nr); + break; + case 0x2: + pr_info("%s Interrupt: %d\n", pfx, data->intr); + break; + case 0x3: + pr_info("%s SError\n", pfx); + break; + default: + pr_info("%s Unknown interrupt type\n", pfx); + } + + /* System regs is valid only when it's a synchronous exception */ + if (data->ex_type == 1) { + struct yitian_ddr_sys_reg *sys_regs = &data->sys_regs; + + pr_info("%s ESR: 0x%llx, ELR: 0x%llx, FAR: 0x%llx, SCR: 0x%llx, SCTLR: 0x%llx, LR: 0x%llx\n", + pfx, sys_regs->esr, sys_regs->elr, sys_regs->far, + sys_regs->scr, sys_regs->sctlr, sys_regs->lr); + } + + /* ECC Data is valid only when it's a ECC error */ + if (data->err_type == 1) { + struct yitian_ddr_ecc_data *ecc_data = &data->ecc_data; + + pr_info("%s ECCERRCNT: 0x%x, ECCSTAT: 0x%x, ADVECCSTAT: 0x%x, ECCSYMBOL: 0x%x, ECCERRCNTSTAT: 0x%x, ECCERRCNT0: 0x%x, ECCERRCNT1: 0x%x, ECCCADDR0: 0x%x, ECCCADDR1: 0x%x, ECCCDATA0: 0x%x, ECCCDATA1: 0x%x, ECCUADDR0: 0x%x, ECCUADDR1: 0x%x, ECCUDATA0: 0x%x, ECCUDATA1: 0x%x\n", + pfx, ecc_data->eccerrcnt, ecc_data->eccstat, + ecc_data->adveccstat, ecc_data->eccsymbol, + ecc_data->eccerrcntstat, ecc_data->eccerrcnt0, + ecc_data->eccerrcnt1, ecc_data->ecccaddr0, + ecc_data->ecccaddr1, ecc_data->ecccdata0, + ecc_data->ecccdata1, ecc_data->eccuaddr0, + ecc_data->eccuaddr1, ecc_data->eccudata0, + ecc_data->eccudata1); + } +} + +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (estatus->raw_data_length < sizeof(*header)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return false; + + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!header->common_reg_nr) + return false; + + return true; +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (!yitian_estatus_check_header(estatus)) + return; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, + yitian_raw_err_type_str(header->type), header->type, + header->common_reg_nr); + + switch (header->type) { + case ERR_TYPE_CORE: + case ERR_TYPE_GIC: + case ERR_TYPE_CMN: + case ERR_TYPE_SMMU: + yitian_platform_raw_data_print(pfx, header); + break; + case ERR_TYPE_DDR: + yitian_ddr_raw_data_print(pfx, header); + break; + } +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -643,6 +834,11 @@ void cper_estatus_print(const char *pfx, cper_estatus_print_section(newpfx, gdata, sec_no); sec_no++; } + +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + yitian_raw_data_print(pfx, estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2c1095dcc2f2f82e4d5a5515372246cdf1a5bab9..f5b7f34e806970b28aedb8225510b4dac9484c9a 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -809,7 +809,7 @@ int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) return 0; } -#ifndef CONFIG_IA64 +#if !defined(CONFIG_IA64) && !defined(CONFIG_SW64) static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, size_t size) { diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index d0ef93551c44f64affb5a152b277a8b0c4e827fb..3782d0a187d1f50d91a803ccd5a57094204314ca 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -74,6 +74,8 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, /* Config Direct Mapping */ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); + csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image); diff --git a/drivers/firmware/efi/sunway-init.c b/drivers/firmware/efi/sunway-init.c new file mode 100644 index 0000000000000000000000000000000000000000..870abc2f5afea3ab7c4b9674fc0c4c7a4b93b1e9 --- /dev/null +++ b/drivers/firmware/efi/sunway-init.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "efi: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +unsigned long entSuspend; +unsigned long bios_version; + +static int __init is_memory(efi_memory_desc_t *md) +{ + if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) + return 1; + return 0; +} +static efi_config_table_type_t arch_tables[] __initdata = { + {SMBIOS3_TABLE_GUID, NULL, ""}, + {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"}, + {BIOS_VERSION_GUID, &bios_version, "BIOS VERSION"}, + {}, +}; + +static int __init uefi_init(u64 efi_system_table) +{ + efi_char16_t *c16; + efi_config_table_t *config_tables; + efi_system_table_t *systab; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff); + + efi.runtime = systab->runtime; + efi.runtime_version = systab->hdr.revision; + + /* Show what we know for posterity */ + c16 = early_memremap(systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * systab->nr_tables; + config_tables = early_memremap(systab->tables, table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + + retval = efi_config_parse_tables(config_tables, systab->nr_tables, + arch_tables); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(systab, sizeof(efi_system_table_t)); + + if (!bios_version) + retval = -EINVAL; + + return retval; +} + +/* + * Return true for regions that can be used as System RAM. + */ +static __init int is_usable_memory(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + /* + * According to the spec, these regions are no longer reserved + * after calling ExitBootServices(). However, we can only use + * them as System RAM if they can be mapped writeback cacheable. + */ + return (md->attribute & EFI_MEMORY_WB); + default: + break; + } + return false; +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s\n", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_memory(md)) { + early_init_dt_add_memory_arch(paddr, size); + + if (!is_usable_memory(md)) + memblock_mark_nomap(paddr, size); + + /* keep ACPI reclaim memory intact for kexec etc. */ + if (md->type == EFI_ACPI_RECLAIM_MEMORY) + memblock_reserve(paddr, size); + } + } +} + +void __init efi_init(void) +{ + struct efi_memory_map_data data; + u64 efi_system_table; + + if (sunway_boot_params->efi_systab == 0) { + pr_info("System Table is not exist, disabling EFI.\n"); + return; + } + + /* Grab UEFI information placed in struct boot_params by stub */ + efi_system_table = sunway_boot_params->efi_systab; + if (!efi_system_table) + return; + + data.desc_version = sunway_boot_params->efi_memdesc_version; + data.desc_size = sunway_boot_params->efi_memdesc_size; + data.size = sunway_boot_params->efi_memmap_size; + data.phys_map = sunway_boot_params->efi_memmap; + + if (efi_memmap_init_early(&data) < 0) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + + WARN(efi.memmap.desc_version != 1, + "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", + efi.memmap.desc_version); + + if (uefi_init(efi_system_table) < 0) { + efi_memmap_unmap(); + return; + } + + reserve_regions(); + + memblock_reserve(sunway_boot_params->efi_memmap & PAGE_MASK, + PAGE_ALIGN(sunway_boot_params->efi_memmap_size + + (sunway_boot_params->efi_memmap & ~PAGE_MASK))); + +} diff --git a/drivers/firmware/efi/sunway-runtime.c b/drivers/firmware/efi/sunway-runtime.c new file mode 100644 index 0000000000000000000000000000000000000000..6bd96cff7d5d8f82dbc96e7f62224b5d37b4df6c --- /dev/null +++ b/drivers/firmware/efi/sunway-runtime.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init sunway_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + efi_memmap_unmap(); + + mapsize = efi.memmap.desc_size * efi.memmap.nr_map; + + if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) { + pr_err("Failed to remap EFI memory map\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("EFI runtime services access via paravirt.\n"); + return 0; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + return 0; +} +early_initcall(sunway_enable_runtime_services); + + +static int __init sunway_dmi_init(void) +{ + /* + * On SW64, DMI depends on UEFI, and dmi_scan_machine() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_setup(); + return 0; +} +core_initcall(sunway_dmi_init); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7c0052fc5f66948e7928bbc9969ef3f..f4fea1ec3201c057ce25be6acf540d0f38d28194 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index ebd4e113dc265411ea9317821dd0e0fee7e9e7d1..509f42e6ab6a770a69c0eb96e4fe0ac024d8b4c3 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -246,6 +246,15 @@ config GPIO_DWAPB Say Y or M here to build support for the Synopsys DesignWare APB GPIO block. +config GPIO_SUNWAY + tristate "Sunway gpio driver" + depends on SW64 + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + help + Say Y or M here to build support for the Sunway + GPIO block. + config GPIO_EIC_SPRD tristate "Spreadtrum EIC support" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index eb73b5d633ebad2dc397d7894901421e845ee390..e44a700ec7d3dfa6e59fb59ae54672e54c74ca18 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -195,3 +195,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o +obj-$(CONFIG_GPIO_SUNWAY) += gpio-sunway.o diff --git a/drivers/gpio/gpio-sunway.c b/drivers/gpio/gpio-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..b9c6848317dbf48ef74aa066dcfe574e4c717fce --- /dev/null +++ b/drivers/gpio/gpio-sunway.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011 Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * All enquiries to support@picochip.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" +#include "gpiolib-acpi.h" + + +#define GPIO_SWPORTA_DR (0x00UL<<7) +#define GPIO_SWPORTA_DDR (0X04UL<<7) +#define GPIO_SWPORTB_DR (0X0CUL<<7) +#define GPIO_SWPORTB_DDR (0X10UL<<7) +#define GPIO_SWPORTC_DR (0x18UL<<7) +#define GPIO_SWPORTC_DDR (0x1cUL<<7) +#define GPIO_SWPORTD_DR (0x24UL<<7) +#define GPIO_SWPORTD_DDR (0x28UL<<7) +#define GPIO_INTEN (0x30UL<<7) +#define GPIO_INTMASK (0x34UL<<7) +#define GPIO_INTTYPE_LEVEL (0x38UL<<7) +#define GPIO_INT_POLARITY (0x3cUL<<7) +#define GPIO_INTSTATUS (0x40UL<<7) +#define GPIO_PORTA_DEBOUNCE (0x48UL<<7) +#define GPIO_PORTA_EOI (0x4cUL<<7) +#define GPIO_EXT_PORTA (0x50UL<<7) +#define GPIO_EXT_PORTB (0x54UL<<7) +#define GPIO_EXT_PORTC (0x58UL<<7) +#define GPIO_EXT_PORTD (0x5cUL<<7) + +#define DWAPB_MAX_PORTS 4 +#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ +#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ +#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ + +#define GPIO_REG_OFFSET_V2 1 + +#define GPIO_INTMASK_V2 0x44 +#define GPIO_INTTYPE_LEVEL_V2 0x34 +#define GPIO_INT_POLARITY_V2 0x38 +#define GPIO_INTSTATUS_V2 0x3c +#define GPIO_PORTA_EOI_V2 0x40 + +struct sunway_gpio; + +#ifdef CONFIG_PM_SLEEP +/* Store GPIO context across system-wide suspend/resume transitions */ +struct sunway_context { + u32 data; + u32 dir; + u32 ext; + u32 int_en; + u32 int_mask; + u32 int_type; + u32 int_pol; + u32 int_deb; + u32 wake_en; +}; +#endif + +struct sunway_gpio_port { + struct gpio_chip gc; + bool is_registered; + struct sunway_gpio *gpio; +#ifdef CONFIG_PM_SLEEP + struct sunway_context *ctx; +#endif + unsigned int idx; +}; + +struct sunway_gpio { + struct device *dev; + void __iomem *regs; + struct sunway_gpio_port *ports; + unsigned int nr_ports; + struct irq_domain *domain; + unsigned int flags; + struct reset_control *rst; + struct clk *clk; +}; + +static inline u32 gpio_reg_v2_convert(unsigned int offset) +{ + switch (offset) { + case GPIO_INTMASK: + return GPIO_INTMASK_V2; + case GPIO_INTTYPE_LEVEL: + return GPIO_INTTYPE_LEVEL_V2; + case GPIO_INT_POLARITY: + return GPIO_INT_POLARITY_V2; + case GPIO_INTSTATUS: + return GPIO_INTSTATUS_V2; + case GPIO_PORTA_EOI: + return GPIO_PORTA_EOI_V2; + } + + return offset; +} + +static inline u32 gpio_reg_convert(struct sunway_gpio *gpio, unsigned int offset) +{ + if (gpio->flags & GPIO_REG_OFFSET_V2) + return gpio_reg_v2_convert(offset); + + return offset; +} + +static inline u32 sunway_read(struct sunway_gpio *gpio, unsigned int offset) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset)); +} + +static inline void sunway_write(struct sunway_gpio *gpio, unsigned int offset, + u32 val) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val); +} + +static int sunway_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + + return irq_find_mapping(gpio->domain, offset); +} + +static struct sunway_gpio_port *sunway_offs_to_port(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port; + int i; + + for (i = 0; i < gpio->nr_ports; i++) { + port = &gpio->ports[i]; + if (port->idx == offs / 32) + return port; + } + + return NULL; +} + +static void sunway_toggle_trigger(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port = sunway_offs_to_port(gpio, offs); + struct gpio_chip *gc; + u32 pol; + int val; + + if (!port) + return; + gc = &port->gc; + + pol = sunway_read(gpio, GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offs % 32); + if (val) + pol &= ~BIT(offs); + else + pol |= BIT(offs); + + sunway_write(gpio, GPIO_INT_POLARITY, pol); +} + +static u32 sunway_do_irq(struct sunway_gpio *gpio) +{ + u32 irq_status = sunway_read(gpio, GPIO_INTSTATUS); + u32 ret = irq_status; + + while (irq_status) { + int hwirq = fls(irq_status) - 1; + int gpio_irq = irq_find_mapping(gpio->domain, hwirq); + + generic_handle_irq(gpio_irq); + irq_status &= ~BIT(hwirq); + + if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK) + == IRQ_TYPE_EDGE_BOTH) + sunway_toggle_trigger(gpio, hwirq); + } + + return ret; +} + +static void sunway_irq_handler(struct irq_desc *desc) +{ + struct sunway_gpio *gpio = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + sunway_do_irq(gpio); + + if (chip->irq_eoi) + chip->irq_eoi(irq_desc_get_irq_data(desc)); +} + +static void sunway_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val |= BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static void sunway_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val &= ~BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static int sunway_irq_reqres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int ret; + + ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d)); + if (ret) { + dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + return ret; + } + return 0; +} + +static void sunway_irq_relres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + + gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d)); +} + +static int sunway_irq_set_type(struct irq_data *d, u32 type) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int bit = d->hwirq; + unsigned long level, polarity, flags; + + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + level = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + polarity = sunway_read(gpio, GPIO_INT_POLARITY); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + level |= BIT(bit); + sunway_toggle_trigger(gpio, bit); + break; + case IRQ_TYPE_EDGE_RISING: + level |= BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_EDGE_FALLING: + level |= BIT(bit); + polarity &= ~BIT(bit); + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~BIT(bit); + polarity &= ~BIT(bit); + break; + } + + irq_setup_alt_chip(d, type); + + sunway_write(gpio, GPIO_INTTYPE_LEVEL, level); + if (type != IRQ_TYPE_EDGE_BOTH) + sunway_write(gpio, GPIO_INT_POLARITY, polarity); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct sunway_context *ctx = gpio->ports[0].ctx; + + if (enable) + ctx->wake_en |= BIT(d->hwirq); + else + ctx->wake_en &= ~BIT(d->hwirq); + + return 0; +} +#endif + +static int sunway_gpio_set_debounce(struct gpio_chip *gc, + unsigned int offset, unsigned int debounce) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + unsigned long flags, val_deb; + unsigned long mask = BIT(offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + val_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + if (debounce) + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask); + else + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +static int sunway_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + u32 debounce; + + if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) + return -ENOTSUPP; + + debounce = pinconf_to_config_argument(config); + return sunway_gpio_set_debounce(gc, offset, debounce); +} + +static irqreturn_t sunway_irq_handler_mfd(int irq, void *dev_id) +{ + u32 worked; + struct sunway_gpio *gpio = dev_id; + + worked = sunway_do_irq(gpio); + + return worked ? IRQ_HANDLED : IRQ_NONE; +} + +static void sunway_configure_irqs(struct sunway_gpio *gpio, + struct sunway_gpio_port *port, + struct sunway_port_property *pp) +{ + struct gpio_chip *gc = &port->gc; + struct fwnode_handle *fwnode = pp->fwnode; + struct irq_chip_generic *irq_gc = NULL; + unsigned int hwirq, ngpio = gc->ngpio; + struct irq_chip_type *ct; + int err, i; + + gpio->domain = irq_domain_create_linear(fwnode, ngpio, + &irq_generic_chip_ops, gpio); + if (!gpio->domain) + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, + "gpio-dwapb", handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { + dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); + if (!irq_gc) { + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc->reg_base = gpio->regs; + irq_gc->private = gpio; + + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = sunway_irq_set_type; + ct->chip.irq_enable = sunway_irq_enable; + ct->chip.irq_disable = sunway_irq_disable; + ct->chip.irq_request_resources = sunway_irq_reqres; + ct->chip.irq_release_resources = sunway_irq_relres; +#ifdef CONFIG_PM_SLEEP + ct->chip.irq_set_wake = sunway_irq_set_wake; +#endif + ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI); + ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK); + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; + + if (!pp->irq_shared) { + int i; + + for (i = 0; i < pp->ngpio; i++) { + if (pp->irq[i] >= 0) + irq_set_chained_handler_and_data(pp->irq[i], + sunway_irq_handler, gpio); + } + } else { + /* + * Request a shared IRQ since where MFD would have devices + * using the same irq pin + */ + err = devm_request_irq(gpio->dev, pp->irq[0], + sunway_irq_handler_mfd, + IRQF_SHARED, "gpio-dwapb-mfd", gpio); + if (err) { + dev_err(gpio->dev, "error requesting IRQ\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + } + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_create_mapping(gpio->domain, hwirq); + + port->gc.to_irq = sunway_gpio_to_irq; +} + +static void sunway_irq_teardown(struct sunway_gpio *gpio) +{ + struct sunway_gpio_port *port = &gpio->ports[0]; + struct gpio_chip *gc = &port->gc; + unsigned int ngpio = gc->ngpio; + irq_hw_number_t hwirq; + + if (!gpio->domain) + return; + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); + + irq_domain_remove(gpio->domain); + gpio->domain = NULL; +} + +static int sunway_gpio_add_port(struct sunway_gpio *gpio, + struct sunway_port_property *pp, + unsigned int offs) +{ + struct sunway_gpio_port *port; + void __iomem *dat, *set, *dirout; + int err; + + port = &gpio->ports[offs]; + port->gpio = gpio; + port->idx = pp->idx; + +#ifdef CONFIG_PM_SLEEP + port->ctx = devm_kzalloc(gpio->dev, sizeof(*port->ctx), GFP_KERNEL); + if (!port->ctx) + return -ENOMEM; +#endif + + dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE); + set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE); + dirout = gpio->regs + GPIO_SWPORTA_DDR + + (pp->idx * GPIO_SWPORT_DDR_STRIDE); + + /* This registers 32 GPIO lines per port */ + err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout, + NULL, 0); + if (err) { + dev_err(gpio->dev, "failed to init gpio chip for port%d\n", + port->idx); + return err; + } + +#ifdef CONFIG_OF_GPIO + port->gc.of_node = to_of_node(pp->fwnode); +#endif + port->gc.ngpio = pp->ngpio; + port->gc.base = pp->gpio_base; + + /* Only port A support debounce */ + if (pp->idx == 0) + port->gc.set_config = sunway_gpio_set_config; + + if (pp->has_irq) + sunway_configure_irqs(gpio, port, pp); + + err = gpiochip_add_data(&port->gc, port); + if (err) + dev_err(gpio->dev, "failed to register gpiochip for port%d\n", + port->idx); + else + port->is_registered = true; + + /* Add GPIO-signaled ACPI event support */ + if (pp->has_irq) + acpi_gpiochip_request_interrupts(&port->gc); + + return err; +} + +static void sunway_gpio_unregister(struct sunway_gpio *gpio) +{ + unsigned int m; + + for (m = 0; m < gpio->nr_ports; ++m) + if (gpio->ports[m].is_registered) + gpiochip_remove(&gpio->ports[m].gc); +} + +static struct sunway_platform_data * +sunway_gpio_get_pdata(struct device *dev) +{ + struct fwnode_handle *fwnode; + struct sunway_platform_data *pdata; + struct sunway_port_property *pp; + int nports; + int i, j; + + nports = device_get_child_node_count(dev); + if (nports == 0) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); + if (!pdata->properties) + return ERR_PTR(-ENOMEM); + + pdata->nports = nports; + + i = 0; + device_for_each_child_node(dev, fwnode) { + struct device_node *np = NULL; + + pp = &pdata->properties[i++]; + pp->fwnode = fwnode; + + if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || + pp->idx >= DWAPB_MAX_PORTS) { + dev_err(dev, + "missing/invalid port index for port%d\n", i); + fwnode_handle_put(fwnode); + return ERR_PTR(-EINVAL); + } + + if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", + &pp->ngpio)) { + dev_info(dev, + "failed to get number of gpios for port%d\n", + i); + pp->ngpio = 32; + } + + pp->irq_shared = false; + pp->gpio_base = -1; + + /* + * Only port A can provide interrupts in all configurations of + * the IP. + */ + if (pp->idx != 0) + continue; + + if (dev->of_node && fwnode_property_read_bool(fwnode, + "interrupt-controller")) { + np = to_of_node(fwnode); + } + + for (j = 0; j < pp->ngpio; j++) { + pp->irq[j] = -ENXIO; + + if (np) + pp->irq[j] = of_irq_get(np, j); + else if (has_acpi_companion(dev)) + pp->irq[j] = platform_get_irq(to_platform_device(dev), j); + + if (pp->irq[j] >= 0) + pp->has_irq = true; + } + + if (!pp->has_irq) + dev_warn(dev, "no irq for port%d\n", pp->idx); + } + + return pdata; +} + +static const struct of_device_id sunway_of_match[] = { + { .compatible = "snps,sw-gpio", .data = (void *)0}, + { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway_of_match); + +static const struct acpi_device_id sunway_acpi_match[] = { + {"HISI0181", 0}, + {"APMC0D07", 0}, + {"APMC0D81", GPIO_REG_OFFSET_V2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, sunway_acpi_match); + +static int sunway_gpio_probe(struct platform_device *pdev) +{ + unsigned int i; + struct resource *res; + struct sunway_gpio *gpio; + int err; + struct device *dev = &pdev->dev; + struct sunway_platform_data *pdata = dev_get_platdata(dev); + + if (!pdata) { + pdata = sunway_gpio_get_pdata(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + if (!pdata->nports) + return -ENODEV; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + gpio->dev = &pdev->dev; + gpio->nr_ports = pdata->nports; + + gpio->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(gpio->rst)) + return PTR_ERR(gpio->rst); + + reset_control_deassert(gpio->rst); + + gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports, + sizeof(*gpio->ports), GFP_KERNEL); + if (!gpio->ports) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + /* Optional bus clock */ + gpio->clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(gpio->clk)) { + err = clk_prepare_enable(gpio->clk); + if (err) { + dev_info(&pdev->dev, "Cannot enable clock\n"); + return err; + } + } + + gpio->flags = 0; + if (dev->of_node) { + gpio->flags = (uintptr_t)of_device_get_match_data(dev); + } else if (has_acpi_companion(dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(sunway_acpi_match, dev); + if (acpi_id) { + if (acpi_id->driver_data) + gpio->flags = acpi_id->driver_data; + } + } + + for (i = 0; i < gpio->nr_ports; i++) { + err = sunway_gpio_add_port(gpio, &pdata->properties[i], i); + if (err) + goto out_unregister; + } + platform_set_drvdata(pdev, gpio); + + return 0; + +out_unregister: + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + clk_disable_unprepare(gpio->clk); + + return err; +} + +static int sunway_gpio_remove(struct platform_device *pdev) +{ + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + reset_control_assert(gpio->rst); + clk_disable_unprepare(gpio->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + ctx->dir = sunway_read(gpio, offset); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + ctx->data = sunway_read(gpio, offset); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + ctx->ext = sunway_read(gpio, offset); + + /* Only port A can provide interrupts */ + if (idx == 0) { + ctx->int_mask = sunway_read(gpio, GPIO_INTMASK); + ctx->int_en = sunway_read(gpio, GPIO_INTEN); + ctx->int_pol = sunway_read(gpio, GPIO_INT_POLARITY); + ctx->int_type = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + ctx->int_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + + /* Mask out interrupts */ + sunway_write(gpio, GPIO_INTMASK, + 0xffffffff & ~ctx->wake_en); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + clk_disable_unprepare(gpio->clk); + + return 0; +} + +static int sunway_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + if (!IS_ERR(gpio->clk)) + clk_prepare_enable(gpio->clk); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + sunway_write(gpio, offset, ctx->data); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + sunway_write(gpio, offset, ctx->dir); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + sunway_write(gpio, offset, ctx->ext); + + /* Only port A can provide interrupts */ + if (idx == 0) { + sunway_write(gpio, GPIO_INTTYPE_LEVEL, ctx->int_type); + sunway_write(gpio, GPIO_INT_POLARITY, ctx->int_pol); + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); + sunway_write(gpio, GPIO_INTEN, ctx->int_en); + sunway_write(gpio, GPIO_INTMASK, ctx->int_mask); + + /* Clear out spurious interrupts */ + sunway_write(gpio, GPIO_PORTA_EOI, 0xffffffff); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sunway_gpio_pm_ops, sunway_gpio_suspend, + sunway_gpio_resume); + +static struct platform_driver sunway_gpio_driver = { + .driver = { + .name = "gpio-sunway", + .pm = &sunway_gpio_pm_ops, + .of_match_table = of_match_ptr(sunway_of_match), + .acpi_match_table = ACPI_PTR(sunway_acpi_match), + }, + .probe = sunway_gpio_probe, + .remove = sunway_gpio_remove, +}; + +module_platform_driver(sunway_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); +MODULE_DESCRIPTION("Sunway GPIO driver"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index ec4abf9ff47b5f3b7efb1a89884e1567add4e36d..ffb759c0bd36459c73da81cd81757717ab0f6d5e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -306,6 +306,8 @@ source "drivers/gpu/drm/udl/Kconfig" source "drivers/gpu/drm/ast/Kconfig" +source "drivers/gpu/drm/ast_loongson/Kconfig" + source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/armada/Kconfig" @@ -388,6 +390,10 @@ source "drivers/gpu/drm/solomon/Kconfig" source "drivers/gpu/drm/sprd/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + +source "drivers/gpu/drm/inspur/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV @@ -436,3 +442,9 @@ config DRM_LIB_RANDOM config DRM_PRIVACY_SCREEN bool default n + +config HYDCU_FIXUP_HEADER + bool "Enable fixup header support for HYDCU" + help + Choose this option if you want to use pci passthrough with HYDCU + HYDCU cannot support pci reset, so enable this module to disable pci reset diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 215e78e791250b8ad0d0a273e627b84296ecc4d9..ff0e5faff9d9a603a37d1f1c211cd7b496881c14 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -158,6 +158,7 @@ obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ +obj-$(CONFIG_DRM_AST_LOONGSON) += ast_loongson/ obj-$(CONFIG_DRM_ARMADA) += armada/ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ obj-y += renesas/ @@ -198,3 +199,6 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ +obj-$(CONFIG_DRM_INSPUR) += inspur/ diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index e1224ef4ad83d4d41d70cba06d0d60bbf159643a..529357d1a33335ead97a583b91d0df32fa6274c0 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -50,11 +50,31 @@ module_param_named(modeset, ast_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(ast_fops); +#define DRM_AST_VRAM_TYPE_DEVICE 0x0 +#define DRM_IOCTL_AST_VRAM_TYPE_DEVICE DRM_IO(DRM_COMMAND_BASE\ + + DRM_AST_VRAM_TYPE_DEVICE) + +static int ast_ioctl_check_5c01_device(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct ast_device *ast = to_ast_device(dev); + + return ast->is_5c01_device ? 1 : 0; +} + +static const struct drm_ioctl_desc ast_ioctls[] = { + /* for test, none so far */ + DRM_IOCTL_DEF_DRV(AST_VRAM_TYPE_DEVICE, ast_ioctl_check_5c01_device, + DRM_AUTH|DRM_UNLOCKED), +}; + static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, + .ioctls = ast_ioctls, + .num_ioctls = ARRAY_SIZE(ast_ioctls), .fops = &ast_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index f7053f2972bb92de43db2380187db57ae8702596..b7534f6fbfff185e82ab93c1365d854c76fcdbd2 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -231,6 +231,9 @@ struct ast_device { } bmc; } output; + struct ttm_device *bdev; + + bool is_5c01_device; bool support_wide_screen; enum { ast_use_p2a, diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index bc174bd933b9715de989eb609cf18c451aefc667..f48df729c8115f256b9cb228f4b251a56485ad11 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -30,6 +30,12 @@ #include #include +#include + +#include +#include +#include +#include #include "ast_drv.h" @@ -71,6 +77,25 @@ static u32 ast_get_vram_size(struct ast_device *ast) return vram_size; } +static bool ast_pci_host_is_5c01(struct pci_bus *bus) +{ + struct pci_bus *child = bus; + struct pci_dev *root = NULL; + + while (child) { + if (child->parent->parent) + child = child->parent; + else + break; + } + + root = child->self; + + if ((root->vendor == 0x1db7) && (root->device == 0x5c01)) + return true; + return false; +} + int ast_mm_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; @@ -87,7 +112,14 @@ int ast_mm_init(struct ast_device *ast) vram_size = ast_get_vram_size(ast); - ast->vram = devm_ioremap_wc(dev->dev, base, vram_size); + if (ast_pci_host_is_5c01(pdev->bus)) { + ast->is_5c01_device = true; + ast->vram = devm_ioremap(dev->dev, base, vram_size); + } else { + ast->is_5c01_device = false; + ast->vram = devm_ioremap_wc(dev->dev, base, vram_size); + } + if (!ast->vram) return -ENOMEM; diff --git a/drivers/gpu/drm/ast_loongson/Kconfig b/drivers/gpu/drm/ast_loongson/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..40af6934ac3666ee2134f2fc78eb9008550c4570 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_AST_LOONGSON + tristate "AST server chips for Loongson Platform" + depends on DRM && PCI && MMU && LOONGARCH + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER + help + Say yes for experimental AST GPU driver. Do not enable + this driver without having a working -modesetting, + and a version of AST that knows to fail if KMS + is bound to the driver. These GPUs are commonly found + in server chipsets. diff --git a/drivers/gpu/drm/ast_loongson/Makefile b/drivers/gpu/drm/ast_loongson/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..02d40f992f5ad84b9b18d16e9739e954c302ea07 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o + +obj-$(CONFIG_DRM_AST_LOONGSON) := ast.o diff --git a/drivers/gpu/drm/ast_loongson/ast_dp.c b/drivers/gpu/drm/ast_loongson/ast_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..b7e1f51d558b3029d7f22d25edf0135fd5e76bc7 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dp.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2021, ASPEED Technology Inc. +// Authors: KuoHsiang Chou + +#include +#include +#include +#include "ast_drv.h" + +int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_private *ast = to_ast_private(dev); + u8 i = 0, j = 0; + + /* + * CRD1[b5]: DP MCU FW is executing + * CRDC[b0]: DP link success + * CRDF[b0]: DP HPD + * CRE5[b0]: Host reading EDID process is done + */ + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + ASTDP_MCU_FW_EXECUTING) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + ASTDP_HOST_EDID_READ_DONE_MASK))) { + goto err_astdp_edid_not_ready; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, 0x00); + + for (i = 0; i < 32; i++) { + /* + * CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64 + */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4, + ASTDP_AND_CLEAR_MASK, (u8)i); + j = 0; + + /* + * CRD7[b0]: valid flag for EDID + * CRD6[b0]: mirror read pointer for EDID + */ + while ((ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD7, + ASTDP_EDID_VALID_FLAG_MASK) != + 0x01) || + (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD6, + ASTDP_EDID_READ_POINTER_MASK) != + i)) { + /* + * Delay are getting longer with each retry. + * 1. The Delays are often 2 loops when users request "Display Settings" + * of right-click of mouse. + * 2. The Delays are often longer a lot when system resume from S3/S4. + */ + mdelay(j + 1); + + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xD1, + ASTDP_MCU_FW_EXECUTING) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xDF, ASTDP_HPD))) { + goto err_astdp_jump_out_loop_of_edid; + } + + j++; + if (j > 200) + goto err_astdp_jump_out_loop_of_edid; + } + + *(ediddata) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xD8, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 1) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xD9, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 2) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xDA, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 3) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xDB, ASTDP_EDID_READ_DATA_MASK); + + if (i == 31) { + /* + * For 128-bytes EDID_1.3, + * 1. Add the value of Bytes-126 to Bytes-127. + * The Bytes-127 is Checksum. Sum of all 128bytes should + * equal 0 (mod 256). + * 2. Modify Bytes-126 to be 0. + * The Bytes-126 indicates the Number of extensions to + * follow. 0 represents noextensions. + */ + *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2); + *(ediddata + 2) = 0; + } + + ediddata += 4; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + + return 0; + +err_astdp_jump_out_loop_of_edid: + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + return (~(j + 256) + 1); + +err_astdp_edid_not_ready: + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + ASTDP_MCU_FW_EXECUTING))) + return (~0xD1 + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS))) + return (~0xDC + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))) + return (~0xDF + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + ASTDP_HOST_EDID_READ_DONE_MASK))) + return (~0xE5 + 1); + + return 0; +} + +/* + * Launch Aspeed DP + */ +void ast_dp_launch(struct drm_device *dev, u8 bPower) +{ + u32 i = 0, j = 0, WaitCount = 1; + u8 bDPTX = 0; + u8 bDPExecute = 1; + + struct ast_private *ast = to_ast_private(dev); + // S3 come back, need more time to wait BMC ready. + if (bPower) + WaitCount = 300; + + // Wait total count by different condition. + for (j = 0; j < WaitCount; j++) { + bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + TX_TYPE_MASK); + + if (bDPTX) + break; + + msleep(100); + } + + // 0xE : ASTDP with DPMCU FW handling + if (bDPTX == ASTDP_DPMCU_TX) { + // Wait one second then timeout. + i = 0; + + while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + COPROCESSOR_LAUNCH) != + COPROCESSOR_LAUNCH) { + i++; + // wait 100 ms + msleep(100); + + if (i >= 10) { + // DP would not be ready. + bDPExecute = 0; + break; + } + } + + if (bDPExecute) + ast->tx_chip_types |= BIT(AST_TX_ASTDP); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + } +} + +void ast_dp_power_on_off(struct drm_device *dev, bool on) +{ + struct ast_private *ast = to_ast_private(dev); + // Read and Turn off DP PHY sleep + u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + AST_DP_VIDEO_ENABLE); + + // Turn on DP PHY sleep + if (!on) + bE3 |= AST_DP_PHY_SLEEP; + + // DP Power on/off + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + (u8)~AST_DP_PHY_SLEEP, bE3); +} + +void ast_dp_set_on_off(struct drm_device *dev, bool on) +{ + struct ast_private *ast = to_ast_private(dev); + u8 video_on_off = on; + + // Video On/Off + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + (u8)~AST_DP_VIDEO_ENABLE, on); + + // If DP plug in and link successful then check video on / off status + if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) { + video_on_off <<= 4; + while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, + ASTDP_MIRROR_VIDEO_ENABLE) != + video_on_off) { + // wait 1 ms + mdelay(1); + } + } +} + +void ast_dp_set_mode(struct drm_crtc *crtc, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + + u32 ulRefreshRateIndex; + u8 ModeIdx; + + ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1; + + switch (crtc->mode.crtc_hdisplay) { + case 320: + ModeIdx = ASTDP_320x240_60; + break; + case 400: + ModeIdx = ASTDP_400x300_60; + break; + case 512: + ModeIdx = ASTDP_512x384_60; + break; + case 640: + ModeIdx = (ASTDP_640x480_60 + (u8)ulRefreshRateIndex); + break; + case 800: + ModeIdx = (ASTDP_800x600_56 + (u8)ulRefreshRateIndex); + break; + case 1024: + ModeIdx = (ASTDP_1024x768_60 + (u8)ulRefreshRateIndex); + break; + case 1152: + ModeIdx = ASTDP_1152x864_75; + break; + case 1280: + if (crtc->mode.crtc_vdisplay == 800) + ModeIdx = + (ASTDP_1280x800_60_RB - (u8)ulRefreshRateIndex); + else // 1024 + ModeIdx = (ASTDP_1280x1024_60 + (u8)ulRefreshRateIndex); + break; + case 1360: + case 1366: + ModeIdx = ASTDP_1366x768_60; + break; + case 1440: + ModeIdx = (ASTDP_1440x900_60_RB - (u8)ulRefreshRateIndex); + break; + case 1600: + if (crtc->mode.crtc_vdisplay == 900) + ModeIdx = + (ASTDP_1600x900_60_RB - (u8)ulRefreshRateIndex); + else //1200 + ModeIdx = ASTDP_1600x1200_60; + break; + case 1680: + ModeIdx = (ASTDP_1680x1050_60_RB - (u8)ulRefreshRateIndex); + break; + case 1920: + if (crtc->mode.crtc_vdisplay == 1080) + ModeIdx = ASTDP_1920x1080_60; + else //1200 + ModeIdx = ASTDP_1920x1200_60; + break; + default: + return; + } + + /* + * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) + * CRE1[7:0]: MISC1 (default: 0x00) + * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50) + */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, + ASTDP_AND_CLEAR_MASK, ASTDP_MISC0_24bpp); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, + ASTDP_AND_CLEAR_MASK, ASTDP_MISC1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, + ASTDP_AND_CLEAR_MASK, ModeIdx); +} diff --git a/drivers/gpu/drm/ast_loongson/ast_dp501.c b/drivers/gpu/drm/ast_loongson/ast_dp501.c new file mode 100644 index 0000000000000000000000000000000000000000..39474bff1aeaf53a4bb4f3644b5c68789497cba6 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dp501.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include "ast_drv.h" + +MODULE_FIRMWARE("ast_dp501_fw.bin"); + +static void ast_release_firmware(void *data) +{ + struct ast_private *ast = data; + + release_firmware(ast->dp501_fw); + ast->dp501_fw = NULL; +} + +static int ast_load_dp501_microcode(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + int ret; + + ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); + if (ret) + return ret; + + return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast); +} + +static void send_ack(struct ast_private *ast) +{ + u8 sendack; + + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack |= 0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static void send_nack(struct ast_private *ast) +{ + u8 sendack; + + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack &= ~0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static bool wait_ack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, + 0xff); + waitack &= 0x80; + udelay(100); + } while ((!waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static bool wait_nack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, + 0xff); + waitack &= 0x80; + udelay(100); + } while ((waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static void set_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40); +} + +static void clear_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00); +} + +static bool ast_write_cmd(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = to_ast_private(dev); + int retry = 0; + + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + set_cmd_trigger(ast); + do { + if (wait_ack(ast)) { + clear_cmd_trigger(ast); + send_nack(ast); + return true; + } + } while (retry++ < 100); + } + clear_cmd_trigger(ast); + send_nack(ast); + return false; +} + +static bool ast_write_data(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = to_ast_private(dev); + + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + if (wait_ack(ast)) { + send_nack(ast); + return true; + } + } + send_nack(ast); + return false; +} + +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +{ + ast_write_cmd(dev, 0x40); + ast_write_data(dev, mode); + + /* + * msleep < 20ms can sleep for up to 20ms; + * see Documentation/timers/timers-howto.rst + */ + msleep(20); +} + +static u32 get_fw_base(struct ast_private *ast) +{ + return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; +} + +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, data; + u32 boot_address; + + if (ast->config_mode != ast_use_p2a) + return false; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (data) { + boot_address = get_fw_base(ast); + for (i = 0; i < size; i += 4) + *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i); + return true; + } + return false; +} + +static bool ast_launch_m68k(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, data, len = 0; + u32 boot_address; + u8 *fw_addr = NULL; + u8 jreg; + + if (ast->config_mode != ast_use_p2a) + return false; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (!data) { + if (ast->dp501_fw_addr) { + fw_addr = ast->dp501_fw_addr; + len = 32 * 1024; + } else { + if (!ast->dp501_fw && ast_load_dp501_microcode(dev) < 0) + return false; + + fw_addr = (u8 *)ast->dp501_fw->data; + len = ast->dp501_fw->size; + } + /* Get BootAddress */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + data = ast_mindwm(ast, 0x1e6e0004); + switch (data & 0x03) { + case 0: + boot_address = 0x44000000; + break; + default: + case 1: + boot_address = 0x48000000; + break; + case 2: + boot_address = 0x50000000; + break; + case 3: + boot_address = 0x60000000; + break; + } + boot_address -= 0x200000; /* -2MB */ + + /* copy image to buffer */ + for (i = 0; i < len; i += 4) { + data = *(u32 *)(fw_addr + i); + ast_moutdwm(ast, boot_address + i, data); + } + + /* Init SCU */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + + /* Launch FW */ + ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address); + ast_moutdwm(ast, 0x1e6e2100, 1); + + /* Update Scratch */ + data = ast_mindwm(ast, 0x1e6e2040) & + 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */ + data |= 0x800; + ast_moutdwm(ast, 0x1e6e2040, data); + + jreg = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0x99, + 0xfc); /* D[1:0]: Reserved Video Buffer */ + jreg |= 0x02; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg); + } + return true; +} + +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, boot_address, offset, data; + u32 *pEDIDidx; + + if (ast->config_mode == ast_use_p2a) { + boot_address = get_fw_base(ast); + + /* validate FW version */ + offset = AST_DP501_GBL_VERSION; + data = ast_mindwm(ast, boot_address + offset); + if ((data & AST_DP501_FW_VERSION_MASK) != + AST_DP501_FW_VERSION_1) + return false; + + /* validate PnP Monitor */ + offset = AST_DP501_PNPMONITOR; + data = ast_mindwm(ast, boot_address + offset); + if (!(data & AST_DP501_PNP_CONNECTED)) + return false; + + /* Read EDID */ + offset = AST_DP501_EDID_DATA; + for (i = 0; i < 128; i += 4) { + data = ast_mindwm(ast, boot_address + offset + i); + pEDIDidx = (u32 *)(ediddata + i); + *pEDIDidx = data; + } + } else { + if (!ast->dp501_fw_buf) + return false; + + /* dummy read */ + offset = 0x0000; + data = readl(ast->dp501_fw_buf + offset); + + /* validate FW version */ + offset = AST_DP501_GBL_VERSION; + data = readl(ast->dp501_fw_buf + offset); + if ((data & AST_DP501_FW_VERSION_MASK) != + AST_DP501_FW_VERSION_1) + return false; + + /* validate PnP Monitor */ + offset = AST_DP501_PNPMONITOR; + data = readl(ast->dp501_fw_buf + offset); + if (!(data & AST_DP501_PNP_CONNECTED)) + return false; + + /* Read EDID */ + offset = AST_DP501_EDID_DATA; + for (i = 0; i < 128; i += 4) { + data = readl(ast->dp501_fw_buf + offset + i); + pEDIDidx = (u32 *)(ediddata + i); + *pEDIDidx = data; + } + } + + return true; +} + +static bool ast_init_dvo(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 jreg; + u32 data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if (!(jreg & 0x80)) { + /* Init SCU DVO Settings */ + data = ast_read32(ast, 0x12008); + /* delay phase */ + data &= 0xfffff8ff; + data |= 0x00000500; + ast_write32(ast, 0x12008, data); + + if (ast->chip == AST2300) { + data = ast_read32(ast, 0x12084); + /* multi-pins for DVO single-edge */ + data |= 0xfffe0000; + ast_write32(ast, 0x12084, data); + + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x000fffff; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x12090); + /* multi-pins for DVO single-edge */ + data &= 0xffffffcf; + data |= 0x00000020; + ast_write32(ast, 0x12090, data); + } else { /* AST2400 */ + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x30000000; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x1208c); + /* multi-pins for DVO single-edge */ + data |= 0x000000cf; + ast_write32(ast, 0x1208c, data); + + data = ast_read32(ast, 0x120a4); + /* multi-pins for DVO single-edge */ + data |= 0xffff0000; + ast_write32(ast, 0x120a4, data); + + data = ast_read32(ast, 0x120a8); + /* multi-pins for DVO single-edge */ + data |= 0x0000000f; + ast_write32(ast, 0x120a8, data); + + data = ast_read32(ast, 0x12094); + /* multi-pins for DVO single-edge */ + data |= 0x00000002; + ast_write32(ast, 0x12094, data); + } + } + + /* Force to DVO */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffbffff; + ast_write32(ast, 0x1202c, data); + + /* Init VGA DVO Settings */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); + return true; +} + +static void ast_init_analog(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 data; + + /* + * Set DAC source to VGA mode in SCU2C via the P2A + * bridge. First configure the P2U to target the SCU + * in case it isn't at this stage. + */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + + /* Then unlock the SCU with the magic password */ + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + + /* Finally, clear bits [17:16] of SCU2c */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffcffff; + ast_write32(ast, 0, data); + + /* Disable DVO */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00); +} + +void ast_init_3rdtx(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 jreg; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, + 0xff); + switch (jreg & 0x0e) { + case 0x04: + ast_init_dvo(dev); + break; + case 0x08: + ast_launch_m68k(dev); + break; + case 0x0c: + ast_init_dvo(dev); + break; + default: + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) + ast_init_dvo(dev); + else + ast_init_analog(dev); + } + } +} diff --git a/drivers/gpu/drm/ast_loongson/ast_dram_tables.h b/drivers/gpu/drm/ast_loongson/ast_dram_tables.h new file mode 100644 index 0000000000000000000000000000000000000000..114b1de15c1e714f24ab26165f7b14f8ae4576a4 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dram_tables.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef AST_DRAM_TABLES_H +#define AST_DRAM_TABLES_H + +/* DRAM timing tables */ +struct ast_dramstruct { + u16 index; + u32 data; +}; + +static const struct ast_dramstruct ast2000_dram_table_data[] = { + { 0x0108, 0x00000000 }, { 0x0120, 0x00004a21 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xFFFFFFFF }, { 0x0004, 0x00000089 }, { 0x0008, 0x22331353 }, + { 0x000C, 0x0d07000b }, { 0x0010, 0x11113333 }, { 0x0020, 0x00110350 }, + { 0x0028, 0x1e0828f0 }, { 0x0024, 0x00000001 }, { 0x001C, 0x00000000 }, + { 0x0014, 0x00000003 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000131 }, + { 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000031 }, + { 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0028, 0x1e0828f1 }, + { 0x0024, 0x00000003 }, { 0x002C, 0x1f0f28fb }, { 0x0030, 0xFFFFFE01 }, + { 0xFFFF, 0xFFFFFFFF } +}; + +static const struct ast_dramstruct ast1100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, { 0x2020, 0x000041f0 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00050000 }, + { 0x0004, 0x00000585 }, { 0x0008, 0x0011030f }, { 0x0010, 0x22201724 }, + { 0x0018, 0x1e29011a }, { 0x0020, 0x00c82222 }, { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 }, + { 0x0060, 0x032aa02a }, { 0x0064, 0x002d3000 }, { 0x0068, 0x00000000 }, + { 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000732 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, { 0x002C, 0x00000632 }, { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 }, + { 0x0120, 0x00004c41 }, { 0xffff, 0xffffffff }, +}; + +static const struct ast_dramstruct ast2100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, { 0x2020, 0x00004120 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00070000 }, + { 0x0004, 0x00000489 }, { 0x0008, 0x0011030f }, { 0x0010, 0x32302926 }, + { 0x0018, 0x274c0122 }, { 0x0020, 0x00ce2222 }, { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 }, + { 0x0060, 0x0f2aa02a }, { 0x0064, 0x003f3005 }, { 0x0068, 0x02020202 }, + { 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000942 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, { 0x002C, 0x00000842 }, { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 }, + { 0x0120, 0x00005061 }, { 0xffff, 0xffffffff }, +}; + +/* + * AST2500 DRAM settings modules + */ +#define REGTBL_NUM 17 +#define REGIDX_010 0 +#define REGIDX_014 1 +#define REGIDX_018 2 +#define REGIDX_020 3 +#define REGIDX_024 4 +#define REGIDX_02C 5 +#define REGIDX_030 6 +#define REGIDX_214 7 +#define REGIDX_2E0 8 +#define REGIDX_2E4 9 +#define REGIDX_2E8 10 +#define REGIDX_2EC 11 +#define REGIDX_2F0 12 +#define REGIDX_2F4 13 +#define REGIDX_2F8 14 +#define REGIDX_RFC 15 +#define REGIDX_PLL 16 + +static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { + 0x64604D38, /* 0x010 */ + 0x29690599, /* 0x014 */ + 0x00000300, /* 0x018 */ + 0x00000000, /* 0x020 */ + 0x00000000, /* 0x024 */ + 0x02181E70, /* 0x02C */ + 0x00000040, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x02001300, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001B, /* 0x2E8 */ + 0x35B8C105, /* 0x2EC */ + 0x08090408, /* 0x2F0 */ + 0x9B000800, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x9971452F, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { + 0x63604E37, /* 0x010 */ + 0xE97AFA99, /* 0x014 */ + 0x00019000, /* 0x018 */ + 0x08000000, /* 0x020 */ + 0x00000400, /* 0x024 */ + 0x00000410, /* 0x02C */ + 0x00000101, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x03002900, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001C, /* 0x2E8 */ + 0x35B8C106, /* 0x2EC */ + 0x08080607, /* 0x2F0 */ + 0x9B000900, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x99714545, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +#endif diff --git a/drivers/gpu/drm/ast_loongson/ast_drv.c b/drivers/gpu/drm/ast_loongson/ast_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..2e069fe979392a79f4cadd048b5039b0f2af076a --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_drv.c @@ -0,0 +1,231 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ast_drv.h" + +static int ast_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, ast_modeset, int, 0400); + +/* + * DRM driver + */ + +DEFINE_DRM_GEM_FOPS(ast_fops); + +static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | + DRIVER_GEM | + DRIVER_MODESET, + + .fops = &ast_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + DRM_GEM_VRAM_DRIVER }; + +/* + * PCI driver + */ + +#define PCI_VENDOR_ASPEED 0x1a03 + +#define AST_VGA_DEVICE(id, info) \ + { .class = PCI_BASE_CLASS_DISPLAY << 16, \ + .class_mask = 0xff0000, \ + .vendor = PCI_VENDOR_ASPEED, \ + .device = id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long)info } + +static const struct pci_device_id ast_pciidlist[] = { + AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL), + AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL), + { 0, 0, 0 }, +}; + +MODULE_DEVICE_TABLE(pci, ast_pciidlist); + +static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) +{ + resource_size_t base, size; + + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + + return drm_aperture_remove_conflicting_framebuffers(base, size, + &ast_driver); +} + +static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ast_private *ast; + struct drm_device *dev; + int ret; + + ret = ast_remove_conflicting_framebuffers(pdev); + if (ret) + return ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ast = ast_device_create(&ast_driver, pdev, ent->driver_data); + if (IS_ERR(ast)) + return PTR_ERR(ast); + dev = &ast->base; + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + return ret; + + drm_fbdev_generic_setup(dev, 32); + + return 0; +} + +static void ast_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_dev_unregister(dev); + drm_atomic_helper_shutdown(dev); +} + +static int ast_drm_freeze(struct drm_device *dev) +{ + int error; + + error = drm_mode_config_helper_suspend(dev); + if (error) + return error; + pci_save_state(to_pci_dev(dev->dev)); + return 0; +} + +static int ast_drm_thaw(struct drm_device *dev) +{ + ast_post_gpu(dev); + + return drm_mode_config_helper_resume(dev); +} + +static int ast_drm_resume(struct drm_device *dev) +{ + if (pci_enable_device(to_pci_dev(dev->dev))) + return -EIO; + + return ast_drm_thaw(dev); +} + +static int ast_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + int error; + + error = ast_drm_freeze(ddev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int ast_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_resume(ddev); +} + +static int ast_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_freeze(ddev); +} + +static int ast_pm_thaw(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_thaw(ddev); +} + +static int ast_pm_poweroff(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_freeze(ddev); +} + +static const struct dev_pm_ops ast_pm_ops = { + .suspend = ast_pm_suspend, + .resume = ast_pm_resume, + .freeze = ast_pm_freeze, + .thaw = ast_pm_thaw, + .poweroff = ast_pm_poweroff, + .restore = ast_pm_resume, +}; + +static struct pci_driver ast_pci_driver = { + .name = DRIVER_NAME, + .id_table = ast_pciidlist, + .probe = ast_pci_probe, + .remove = ast_pci_remove, + .driver.pm = &ast_pm_ops, +}; + +drm_module_pci_driver_if_modeset(ast_pci_driver, ast_modeset); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/ast_loongson/ast_drv.h b/drivers/gpu/drm/ast_loongson/ast_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..29a2965080ef37f658a22eb8ed36308863f6bf23 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_drv.h @@ -0,0 +1,528 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#ifndef __AST_DRV_H__ +#define __AST_DRV_H__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "Dave Airlie" + +#define DRIVER_NAME "ast" +#define DRIVER_DESC "AST" +#define DRIVER_DATE "20120228" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define PCI_CHIP_AST2000 0x2000 +#define PCI_CHIP_AST2100 0x2010 + +enum ast_chip { + AST2000, + AST2100, + AST1100, + AST2200, + AST2150, + AST2300, + AST2400, + AST2500, + AST2600, +}; + +enum ast_tx_chip { + AST_TX_NONE, + AST_TX_SIL164, + AST_TX_DP501, + AST_TX_ASTDP, +}; + +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + +#define AST_DRAM_512Mx16 0 +#define AST_DRAM_1Gx16 1 +#define AST_DRAM_512Mx32 2 +#define AST_DRAM_1Gx32 3 +#define AST_DRAM_2Gx16 6 +#define AST_DRAM_4Gx16 7 +#define AST_DRAM_8Gx16 8 + +/* + * Hardware cursor + */ + +#define AST_MAX_HWC_WIDTH 64 +#define AST_MAX_HWC_HEIGHT 64 + +#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2) +#define AST_HWC_SIGNATURE_SIZE 32 + +/* define for signature structure */ +#define AST_HWC_SIGNATURE_CHECKSUM 0x00 +#define AST_HWC_SIGNATURE_SizeX 0x04 +#define AST_HWC_SIGNATURE_SizeY 0x08 +#define AST_HWC_SIGNATURE_X 0x0C +#define AST_HWC_SIGNATURE_Y 0x10 +#define AST_HWC_SIGNATURE_HOTSPOTX 0x14 +#define AST_HWC_SIGNATURE_HOTSPOTY 0x18 + +/* + * Planes + */ + +struct ast_plane { + struct drm_plane base; + + struct drm_gem_vram_object *gbo; + struct iosys_map map; + u64 off; +}; + +static inline struct ast_plane *to_ast_plane(struct drm_plane *plane) +{ + return container_of(plane, struct ast_plane, base); +} + +/* + * Connector with i2c channel + */ + +struct ast_i2c_chan { + struct i2c_adapter adapter; + struct drm_device *dev; + struct i2c_algo_bit_data bit; +}; + +struct ast_vga_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +static inline struct ast_vga_connector * +to_ast_vga_connector(struct drm_connector *connector) +{ + return container_of(connector, struct ast_vga_connector, base); +} + +struct ast_sil164_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +static inline struct ast_sil164_connector * +to_ast_sil164_connector(struct drm_connector *connector) +{ + return container_of(connector, struct ast_sil164_connector, base); +} + +/* + * Device + */ + +struct ast_private { + struct drm_device base; + + struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ + void __iomem *regs; + void __iomem *ioregs; + void __iomem *dp501_fw_buf; + + enum ast_chip chip; + bool vga2_clone; + uint32_t dram_bus_width; + uint32_t dram_type; + uint32_t mclk; + + struct drm_plane primary_plane; + struct ast_plane cursor_plane; + struct drm_crtc crtc; + struct { + struct { + struct drm_encoder encoder; + struct ast_vga_connector vga_connector; + } vga; + struct { + struct drm_encoder encoder; + struct ast_sil164_connector sil164_connector; + } sil164; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } dp501; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } astdp; + } output; + + bool support_wide_screen; + enum { ast_use_p2a, ast_use_dt, ast_use_defaults } config_mode; + + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ + u8 *dp501_fw_addr; + const struct firmware *dp501_fw; /* dp501 fw */ +}; + +static inline struct ast_private *to_ast_private(struct drm_device *dev) +{ + return container_of(dev, struct ast_private, base); +} + +struct ast_private *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, + unsigned long flags); + +#define AST_IO_AR_PORT_WRITE (0x40) +#define AST_IO_MISC_PORT_WRITE (0x42) +#define AST_IO_VGA_ENABLE_PORT (0x43) +#define AST_IO_SEQ_PORT (0x44) +#define AST_IO_DAC_INDEX_READ (0x47) +#define AST_IO_DAC_INDEX_WRITE (0x48) +#define AST_IO_DAC_DATA (0x49) +#define AST_IO_GR_PORT (0x4E) +#define AST_IO_CRTC_PORT (0x54) +#define AST_IO_INPUT_STATUS1_READ (0x5A) +#define AST_IO_MISC_PORT_READ (0x4C) + +#define AST_IO_MM_OFFSET (0x380) + +#define AST_IO_VGAIR1_VREFRESH BIT(3) + +#define AST_IO_VGACRCB_HWC_ENABLED BIT(1) +#define AST_IO_VGACRCB_HWC_16BPP \ + BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ + +static inline u8 ast_read8(struct ast_private *ast, u32 reg) +{ + u8 val = 0; + + val = ioread8(ast->regs + reg); + return val; +} + +static inline u16 ast_read16(struct ast_private *ast, u32 reg) +{ + u16 val = 0; + + val = ioread16(ast->regs + reg); + return val; +} + +static inline u32 ast_read32(struct ast_private *ast, u32 reg) +{ + u32 val = 0; + + val = ioread32(ast->regs + reg); + return val; +} + +static inline u8 ast_io_read8(struct ast_private *ast, u32 reg) +{ + u8 val = 0; + + val = ioread8(ast->ioregs + reg); + return val; +} + +static inline u16 ast_io_read16(struct ast_private *ast, u32 reg) +{ + u16 val = 0; + + val = ioread16(ast->ioregs + reg); + return val; +} + +static inline u32 ast_io_read32(struct ast_private *ast, u32 reg) +{ + u32 val = 0; + + val = ioread32(ast->ioregs + reg); + return val; +} + +#define __ast_write(x) \ + static inline void ast_write##x(struct ast_private *ast, u32 reg, \ + u##x val) \ + { \ + iowrite##x(val, ast->regs + reg); \ + } + +__ast_write(8); +__ast_write(16); +__ast_write(32); + +#define __ast_io_write(x) \ + static inline void ast_io_write##x(struct ast_private *ast, u32 reg, \ + u##x val) \ + { \ + iowrite##x(val, ast->ioregs + reg); \ + } + +__ast_io_write(8); +__ast_io_write(16); +#undef __ast_io_write + +static inline void ast_set_index_reg(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t val) +{ + ast_io_write16(ast, base, ((u16)val << 8) | index); +} + +void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask, uint8_t val); +uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, + uint8_t index); +uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask); + +static inline void ast_open_key(struct ast_private *ast) +{ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); +} + +#define AST_VIDMEM_SIZE_8M 0x00800000 +#define AST_VIDMEM_SIZE_16M 0x01000000 +#define AST_VIDMEM_SIZE_32M 0x02000000 +#define AST_VIDMEM_SIZE_64M 0x04000000 +#define AST_VIDMEM_SIZE_128M 0x08000000 + +#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M + +struct ast_vbios_stdtable { + u8 misc; + u8 seq[4]; + u8 crtc[25]; + u8 ar[20]; + u8 gr[9]; +}; + +struct ast_vbios_enhtable { + u32 ht; + u32 hde; + u32 hfp; + u32 hsync; + u32 vt; + u32 vde; + u32 vfp; + u32 vsync; + u32 dclk_index; + u32 flags; + u32 refresh_rate; + u32 refresh_rate_index; + u32 mode_id; +}; + +struct ast_vbios_dclk_info { + u8 param1; + u8 param2; + u8 param3; +}; + +struct ast_vbios_mode_info { + const struct ast_vbios_stdtable *std_table; + const struct ast_vbios_enhtable *enh_table; +}; + +struct ast_crtc_state { + struct drm_crtc_state base; + + /* Last known format of primary plane */ + const struct drm_format_info *format; + + struct ast_vbios_mode_info vbios_mode_info; +}; + +#define to_ast_crtc_state(state) \ + container_of(state, struct ast_crtc_state, base) + +int ast_mode_config_init(struct ast_private *ast); + +#define AST_MM_ALIGN_SHIFT 4 +#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) + +#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4) +#define AST_DP501_FW_VERSION_1 BIT(4) +#define AST_DP501_PNP_CONNECTED BIT(1) + +#define AST_DP501_DEFAULT_DCLK 65 + +#define AST_DP501_GBL_VERSION 0xf000 +#define AST_DP501_PNPMONITOR 0xf010 +#define AST_DP501_LINKRATE 0xf014 +#define AST_DP501_EDID_DATA 0xf020 + +/* Define for Soc scratched reg */ +#define COPROCESSOR_LAUNCH BIT(5) + +/* + * Display Transmitter Type: + */ +#define TX_TYPE_MASK GENMASK(3, 1) +#define NO_TX (0 << 1) +#define ITE66121_VBIOS_TX (1 << 1) +#define SI164_VBIOS_TX (2 << 1) +#define CH7003_VBIOS_TX (3 << 1) +#define DP501_VBIOS_TX (4 << 1) +#define ANX9807_VBIOS_TX (5 << 1) +#define TX_FW_EMBEDDED_FW_TX (6 << 1) +#define ASTDP_DPMCU_TX (7 << 1) + +#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) +//#define AST_VRAM_INIT_BY_BMC BIT(7) +//#define AST_VRAM_INIT_READY BIT(6) + +/* Define for Soc scratched reg used on ASTDP */ +#define AST_DP_PHY_SLEEP BIT(4) +#define AST_DP_VIDEO_ENABLE BIT(0) + +#define AST_DP_POWER_ON true +#define AST_DP_POWER_OFF false + +/* + * CRD1[b5]: DP MCU FW is executing + * CRDC[b0]: DP link success + * CRDF[b0]: DP HPD + * CRE5[b0]: Host reading EDID process is done + */ +#define ASTDP_MCU_FW_EXECUTING BIT(5) +#define ASTDP_LINK_SUCCESS BIT(0) +#define ASTDP_HPD BIT(0) +#define ASTDP_HOST_EDID_READ_DONE BIT(0) +#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0) + +/* + * CRB8[b1]: Enable VSYNC off + * CRB8[b0]: Enable HSYNC off + */ +#define AST_DPMS_VSYNC_OFF BIT(1) +#define AST_DPMS_HSYNC_OFF BIT(0) + +/* + * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE + * Precondition: A. ~AST_DP_PHY_SLEEP && + * B. DP_HPD && + * C. DP_LINK_SUCCESS + */ +#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4) + +#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0) +#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0) +#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0) + +/* + * ASTDP setmode registers: + * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) + * CRE1[7:0]: MISC1 (default: 0x00) + * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50) + */ +#define ASTDP_MISC0_24bpp BIT(5) +#define ASTDP_MISC1 0 +#define ASTDP_AND_CLEAR_MASK 0x00 + +/* + * ASTDP resoultion table: + * EX: ASTDP_A_B_C: + * A: Resolution + * B: Refresh Rate + * C: Misc information, such as CVT, Reduce Blanked + */ +#define ASTDP_640x480_60 0x00 +#define ASTDP_640x480_72 0x01 +#define ASTDP_640x480_75 0x02 +#define ASTDP_640x480_85 0x03 +#define ASTDP_800x600_56 0x04 +#define ASTDP_800x600_60 0x05 +#define ASTDP_800x600_72 0x06 +#define ASTDP_800x600_75 0x07 +#define ASTDP_800x600_85 0x08 +#define ASTDP_1024x768_60 0x09 +#define ASTDP_1024x768_70 0x0A +#define ASTDP_1024x768_75 0x0B +#define ASTDP_1024x768_85 0x0C +#define ASTDP_1280x1024_60 0x0D +#define ASTDP_1280x1024_75 0x0E +#define ASTDP_1280x1024_85 0x0F +#define ASTDP_1600x1200_60 0x10 +#define ASTDP_320x240_60 0x11 +#define ASTDP_400x300_60 0x12 +#define ASTDP_512x384_60 0x13 +#define ASTDP_1920x1200_60 0x14 +#define ASTDP_1920x1080_60 0x15 +#define ASTDP_1280x800_60 0x16 +#define ASTDP_1280x800_60_RB 0x17 +#define ASTDP_1440x900_60 0x18 +#define ASTDP_1440x900_60_RB 0x19 +#define ASTDP_1680x1050_60 0x1A +#define ASTDP_1680x1050_60_RB 0x1B +#define ASTDP_1600x900_60 0x1C +#define ASTDP_1600x900_60_RB 0x1D +#define ASTDP_1366x768_60 0x1E +#define ASTDP_1152x864_75 0x1F + +int ast_mm_init(struct ast_private *ast); + +/* ast post */ +void ast_enable_vga(struct drm_device *dev); +void ast_enable_mmio(struct drm_device *dev); +bool ast_is_vga_enabled(struct drm_device *dev); +void ast_post_gpu(struct drm_device *dev); +u32 ast_mindwm(struct ast_private *ast, u32 r); +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); +void ast_patch_ahb_2500(struct ast_private *ast); +/* ast dp501 */ +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); +u8 ast_get_dp501_max_clk(struct drm_device *dev); +void ast_init_3rdtx(struct drm_device *dev); + +/* ast_i2c.c */ +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); + +/* aspeed DP */ +int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); +void ast_dp_launch(struct drm_device *dev, u8 bPower); +void ast_dp_power_on_off(struct drm_device *dev, bool no); +void ast_dp_set_on_off(struct drm_device *dev, bool no); +void ast_dp_set_mode(struct drm_crtc *crtc, + struct ast_vbios_mode_info *vbios_mode); + +#endif diff --git a/drivers/gpu/drm/ast_loongson/ast_i2c.c b/drivers/gpu/drm/ast_loongson/ast_i2c.c new file mode 100644 index 0000000000000000000000000000000000000000..a3daabe3b6a6a890fd78ef77d7348707e7e03cc0 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_i2c.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: MIT +/* + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +#include +#include + +#include "ast_drv.h" + +static void ast_i2c_setsda(void *i2c_priv, int data) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, + ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static void ast_i2c_setscl(void *i2c_priv, int clock) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((clock & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, + ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static int ast_i2c_getsda(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & + 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x20) >> + 5) & + 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xb7, 0x20) >> + 5) & + 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static int ast_i2c_getscl(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & + 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x10) >> + 4) & + 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xb7, 0x10) >> + 4) & + 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static void ast_i2c_release(struct drm_device *dev, void *res) +{ + struct ast_i2c_chan *i2c = res; + + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) +{ + struct ast_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); + if (!i2c) + return NULL; + + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = dev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "AST i2c bit bus"); + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 20; + i2c->bit.timeout = 2; + i2c->bit.data = i2c; + i2c->bit.setsda = ast_i2c_setsda; + i2c->bit.setscl = ast_i2c_setscl; + i2c->bit.getsda = ast_i2c_getsda; + i2c->bit.getscl = ast_i2c_getscl; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + drm_err(dev, "Failed to register bit i2c\n"); + goto out_kfree; + } + + ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c); + if (ret) + return NULL; + return i2c; + +out_kfree: + kfree(i2c); + return NULL; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_main.c b/drivers/gpu/drm/ast_loongson/ast_main.c new file mode 100644 index 0000000000000000000000000000000000000000..ab6195b61b952943ac6e7c4ab7a4994acd2b8e68 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_main.c @@ -0,0 +1,486 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "ast_drv.h" + +void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask, uint8_t val) +{ + u8 tmp; + + ast_io_write8(ast, base, index); + tmp = (ast_io_read8(ast, base + 1) & mask) | val; + ast_set_index_reg(ast, base, index, tmp); +} + +uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, uint8_t index) +{ + uint8_t ret; + + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1); + return ret; +} + +uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask) +{ + uint8_t ret; + + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1) & mask; + return ret; +} + +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->dev->of_node; + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && + !of_property_read_u32(np, "aspeed,scu-revision-id", scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + drm_info(dev, "Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Patch AST2500 */ + if (((pdev->revision & 0xF0) == 0x40) && + ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0)) + ast_patch_ahb_2500(ast); + + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if ((data != 0xFFFFFFFF) && (data != 0x00)) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + drm_info(dev, "Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + drm_info(dev, "P2A bridge disabled, using default configuration\n"); +} + +static int ast_detect_chip(struct drm_device *dev, bool *need_post) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + drm_info(dev, + "VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + /* Enable extended register access */ + ast_open_key(ast); + ast_enable_mmio(dev); + + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ + if (pdev->revision >= 0x50) { + ast->chip = AST2600; + drm_info(dev, "AST 2600 detected\n"); + } else if (pdev->revision >= 0x40) { + ast->chip = AST2500; + drm_info(dev, "AST 2500 detected\n"); + } else if (pdev->revision >= 0x30) { + ast->chip = AST2400; + drm_info(dev, "AST 2400 detected\n"); + } else if (pdev->revision >= 0x20) { + ast->chip = AST2300; + drm_info(dev, "AST 2300 detected\n"); + } else if (pdev->revision >= 0x10) { + switch (scu_rev & 0x0300) { + case 0x0200: + ast->chip = AST1100; + drm_info(dev, "AST 1100 detected\n"); + break; + case 0x0100: + ast->chip = AST2200; + drm_info(dev, "AST 2200 detected\n"); + break; + case 0x0000: + ast->chip = AST2150; + drm_info(dev, "AST 2150 detected\n"); + break; + default: + ast->chip = AST2100; + drm_info(dev, "AST 2100 detected\n"); + break; + } + ast->vga2_clone = false; + } else { + ast->chip = AST2000; + drm_info(dev, "AST 2000 detected\n"); + } + + /* Check if we support wide screen */ + switch (ast->chip) { + case AST2000: + ast->support_wide_screen = false; + break; + default: + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, + 0xff); + if (!(jreg & 0x80)) + ast->support_wide_screen = true; + else if (jreg & 0x01) + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + if (ast->chip == AST2500 && + scu_rev == 0x100) /* ast2510 */ + ast->support_wide_screen = true; + if (ast->chip == AST2600) /* ast2600 */ + ast->support_wide_screen = true; + } + break; + } + + /* Check 3rd Tx option (digital output afaik) */ + ast->tx_chip_types |= AST_TX_NONE_BIT; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!*need_post) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, + 0xff); + if (jreg & 0x80) + ast->tx_chip_types = AST_TX_SIL164_BIT; + } + + if ((ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500)) { + /* + * On AST2300 and 2400, look the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, + 0xff); + switch (jreg) { + case 0x04: + ast->tx_chip_types = AST_TX_SIL164_BIT; + break; + case 0x08: + ast->dp501_fw_addr = + drmm_kzalloc(dev, 32 * 1024, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(dev, ast->dp501_fw_addr, + 32 * 1024)) { + drmm_kfree(dev, ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + fallthrough; + case 0x0c: + ast->tx_chip_types = AST_TX_DP501_BIT; + } + } else if (ast->chip == AST2600) + ast_dp_launch(&ast->base, 0); + + /* Print stuff for diagnostic purposes */ + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) + drm_info(dev, "Using Sil164 TMDS transmitter\n"); + if (ast->tx_chip_types & AST_TX_DP501_BIT) + drm_info(dev, "Using DP501 DisplayPort transmitter\n"); + + return 0; +} + +static int ast_get_dram_info(struct drm_device *dev) +{ + struct device_node *np = dev->dev->of_node; + struct ast_private *ast = to_ast_private(dev); + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; + + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + if (ast->chip == AST2500) + ast->mclk = 800; + else + ast->mclk = 396; + return 0; + } + + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2500) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_1Gx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_4Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_8Gx16; + break; + } + } else if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000)); + return 0; +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_device_release(void *data) +{ + struct ast_private *ast = data; + + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); +} + +struct ast_private *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, unsigned long flags) +{ + struct drm_device *dev; + struct ast_private *ast; + bool need_post; + int ret = 0; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base); + if (IS_ERR(ast)) + return ast; + dev = &ast->base; + + pci_set_drvdata(pdev, dev); + + ret = drmm_mutex_init(dev, &ast->ioregs_lock); + if (ret) + return ERR_PTR(ret); + + ast->regs = pcim_iomap(pdev, 1, 0); + if (!ast->regs) + return ERR_PTR(-EIO); + + /* + * If we don't have IO space at all, use MMIO now and + * assume the chip has MMIO enabled by default (rev 0x20 + * and higher). + */ + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { + drm_info(dev, "platform has no IO space, trying MMIO\n"); + ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + } + + /* "map" IO regs if the above hasn't done so already */ + if (!ast->ioregs) { + ast->ioregs = pcim_iomap(pdev, 2, 0); + if (!ast->ioregs) + return ERR_PTR(-EIO); + } + + ast_detect_chip(dev, &need_post); + + ret = ast_get_dram_info(dev); + if (ret) + return ERR_PTR(ret); + + drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n", ast->mclk, + ast->dram_type, ast->dram_bus_width); + + if (need_post) + ast_post_gpu(dev); + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = + pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast); + if (ret) + return ERR_PTR(ret); + + return ast; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_mm.c b/drivers/gpu/drm/ast_loongson/ast_mm.c new file mode 100644 index 0000000000000000000000000000000000000000..6e999408dda9a25acbe492329eb3c94bf40d2b28 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_mm.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include +#include + +#include "ast_drv.h" + +static u32 ast_get_vram_size(struct ast_private *ast) +{ + u8 jreg; + u32 vram_size; + + ast_open_key(ast); + + vram_size = AST_VIDMEM_DEFAULT_SIZE; + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff); + switch (jreg & 3) { + case 0: + vram_size = AST_VIDMEM_SIZE_8M; + break; + case 1: + vram_size = AST_VIDMEM_SIZE_16M; + break; + case 2: + vram_size = AST_VIDMEM_SIZE_32M; + break; + case 3: + vram_size = AST_VIDMEM_SIZE_64M; + break; + } + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff); + switch (jreg & 0x03) { + case 1: + vram_size -= 0x100000; + break; + case 2: + vram_size -= 0x200000; + break; + case 3: + vram_size -= 0x400000; + break; + } + + return vram_size; +} + +int ast_mm_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); + resource_size_t base, size; + u32 vram_size; + int ret; + + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + + /* Don't fail on errors, but performance might be reduced. */ + devm_arch_io_reserve_memtype_wc(dev->dev, base, size); + devm_arch_phys_wc_add(dev->dev, base, size); + + vram_size = ast_get_vram_size(ast); + + ret = drmm_vram_helper_init(dev, base, vram_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_mode.c b/drivers/gpu/drm/ast_loongson/ast_mode.c new file mode 100644 index 0000000000000000000000000000000000000000..5374fc38757f93c7ce9bc2618a780800dc5237d7 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_mode.c @@ -0,0 +1,1881 @@ +/* + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ast_drv.h" +#include "ast_tables.h" + +#define AST_LUT_SIZE 256 + +static inline void ast_load_palette_index(struct ast_private *ast, u8 index, + u8 red, u8 green, u8 blue) +{ + ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, red); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, green); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, blue); + ast_io_read8(ast, AST_IO_SEQ_PORT); +} + +static void ast_crtc_set_gamma_linear(struct ast_private *ast, + const struct drm_format_info *format) +{ + int i; + + switch (format->format) { + case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + for (i = 0; i < AST_LUT_SIZE; i++) + ast_load_palette_index(ast, i, i, i, i); + break; + default: + drm_warn_once(&ast->base, + "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +static void ast_crtc_set_gamma(struct ast_private *ast, + const struct drm_format_info *format, + struct drm_color_lut *lut) +{ + int i; + + switch (format->format) { + case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + for (i = 0; i < AST_LUT_SIZE; i++) + ast_load_palette_index(ast, i, lut[i].red >> 8, + lut[i].green >> 8, + lut[i].blue >> 8); + break; + default: + drm_warn_once(&ast->base, + "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u32 refresh_rate_index = 0, refresh_rate; + const struct ast_vbios_enhtable *best = NULL; + u32 hborder, vborder; + bool check_sync; + + switch (format->cpp[0] * 8) { + case 8: + vbios_mode->std_table = &vbios_stdtable[VGAModeIndex]; + break; + case 16: + vbios_mode->std_table = &vbios_stdtable[HiCModeIndex]; + break; + case 24: + case 32: + vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex]; + break; + default: + return false; + } + + switch (mode->crtc_hdisplay) { + case 640: + vbios_mode->enh_table = &res_640x480[refresh_rate_index]; + break; + case 800: + vbios_mode->enh_table = &res_800x600[refresh_rate_index]; + break; + case 1024: + vbios_mode->enh_table = &res_1024x768[refresh_rate_index]; + break; + case 1152: + vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; + break; + case 1280: + if (mode->crtc_vdisplay == 800) + vbios_mode->enh_table = + &res_1280x800[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1280x1024[refresh_rate_index]; + break; + case 1360: + vbios_mode->enh_table = &res_1360x768[refresh_rate_index]; + break; + case 1440: + vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; + break; + case 1600: + if (mode->crtc_vdisplay == 900) + vbios_mode->enh_table = + &res_1600x900[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1600x1200[refresh_rate_index]; + break; + case 1680: + vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; + break; + case 1920: + if (mode->crtc_vdisplay == 1080) + vbios_mode->enh_table = + &res_1920x1080[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1920x1200[refresh_rate_index]; + break; + default: + return false; + } + + refresh_rate = drm_mode_vrefresh(mode); + check_sync = vbios_mode->enh_table->flags & WideScreenMode; + + while (1) { + const struct ast_vbios_enhtable *loop = vbios_mode->enh_table; + + while (loop->refresh_rate != 0xff) { + if ((check_sync) && + (((mode->flags & DRM_MODE_FLAG_NVSYNC) && + (loop->flags & PVSync)) || + ((mode->flags & DRM_MODE_FLAG_PVSYNC) && + (loop->flags & NVSync)) || + ((mode->flags & DRM_MODE_FLAG_NHSYNC) && + (loop->flags & PHSync)) || + ((mode->flags & DRM_MODE_FLAG_PHSYNC) && + (loop->flags & NHSync)))) { + loop++; + continue; + } + if (loop->refresh_rate <= refresh_rate && + (!best || loop->refresh_rate > best->refresh_rate)) + best = loop; + loop++; + } + if (best || !check_sync) + break; + check_sync = 0; + } + + if (best) + vbios_mode->enh_table = best; + + hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; + vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; + + adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; + adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; + adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; + adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp; + adjusted_mode->crtc_hsync_end = + (vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp + vbios_mode->enh_table->hsync); + + adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; + adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; + adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; + adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp; + adjusted_mode->crtc_vsync_end = + (vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp + vbios_mode->enh_table->vsync); + + return true; +} + +static void +ast_set_vbios_color_reg(struct ast_private *ast, + const struct drm_format_info *format, + const struct ast_vbios_mode_info *vbios_mode) +{ + u32 color_index; + + switch (format->cpp[0]) { + case 1: + color_index = VGAModeIndex - 1; + break; + case 2: + color_index = HiCModeIndex; + break; + case 3: + case 4: + color_index = TrueCModeIndex; + break; + default: + return; + } + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, + (u8)((color_index & 0x0f) << 4)); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); + + if (vbios_mode->enh_table->flags & NewModeInfo) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, + format->cpp[0] * 8); + } +} + +static void ast_set_vbios_mode_reg(struct ast_private *ast, + const struct drm_display_mode *adjusted_mode, + const struct ast_vbios_mode_info *vbios_mode) +{ + u32 refresh_rate_index, mode_id; + + refresh_rate_index = vbios_mode->enh_table->refresh_rate_index; + mode_id = vbios_mode->enh_table->mode_id; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, + refresh_rate_index & 0xff); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); + + if (vbios_mode->enh_table->flags & NewModeInfo) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, + adjusted_mode->clock / 1000); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, + adjusted_mode->crtc_hdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, + adjusted_mode->crtc_hdisplay >> 8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, + adjusted_mode->crtc_vdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, + adjusted_mode->crtc_vdisplay >> 8); + } +} + +static void ast_set_std_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + const struct ast_vbios_stdtable *stdtable; + u32 i; + u8 jreg; + + stdtable = vbios_mode->std_table; + + jreg = stdtable->misc; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); + + /* Set SEQ; except Screen Disable field */ + ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03); + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, + stdtable->seq[0]); + for (i = 1; i < 4; i++) { + jreg = stdtable->seq[i]; + ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1), jreg); + } + + /* Set CRTC; except base address and offset */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + for (i = 0; i < 12; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + for (i = 14; i < 19; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + for (i = 20; i < 25; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + + /* set AR */ + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + for (i = 0; i < 20; i++) { + jreg = stdtable->ar[i]; + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg); + } + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00); + + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20); + + /* Set GR */ + for (i = 0; i < 9; i++) + ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]); +} + +static void ast_set_crtc_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, + jregAE = 0; + u16 temp, precache = 0; + + if ((ast->chip == AST2500 || ast->chip == AST2600) && + (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) + precache = 40; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + + temp = (mode->crtc_htotal >> 3) - 5; + if (temp & 0x100) + jregAC |= 0x01; /* HT D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp); + + temp = (mode->crtc_hdisplay >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x04; /* HDE D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp); + + temp = (mode->crtc_hblank_start >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x10; /* HBS D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp); + + temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f; + if (temp & 0x20) + jreg05 |= 0x80; /* HBE D[5] */ + if (temp & 0x40) + jregAD |= 0x01; /* HBE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, + (temp & 0x1f)); + + temp = ((mode->crtc_hsync_start - precache) >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x40; /* HRS D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp); + + temp = (((mode->crtc_hsync_end - precache) >> 3) - 1) & 0x3f; + if (temp & 0x20) + jregAD |= 0x04; /* HRE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, + (u8)((temp & 0x1f) | jreg05)); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD); + + // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); + if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080)) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00); + + /* vert timings */ + temp = (mode->crtc_vtotal) - 2; + if (temp & 0x100) + jreg07 |= 0x01; + if (temp & 0x200) + jreg07 |= 0x20; + if (temp & 0x400) + jregAE |= 0x01; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp); + + temp = (mode->crtc_vsync_start) - 1; + if (temp & 0x100) + jreg07 |= 0x04; + if (temp & 0x200) + jreg07 |= 0x80; + if (temp & 0x400) + jregAE |= 0x08; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp); + + temp = (mode->crtc_vsync_end - 1) & 0x3f; + if (temp & 0x10) + jregAE |= 0x20; + if (temp & 0x20) + jregAE |= 0x40; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf); + + temp = mode->crtc_vdisplay - 1; + if (temp & 0x100) + jreg07 |= 0x02; + if (temp & 0x200) + jreg07 |= 0x40; + if (temp & 0x400) + jregAE |= 0x02; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp); + + temp = mode->crtc_vblank_start - 1; + if (temp & 0x100) + jreg07 |= 0x08; + if (temp & 0x200) + jreg09 |= 0x20; + if (temp & 0x400) + jregAE |= 0x04; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp); + + temp = mode->crtc_vblank_end - 1; + if (temp & 0x100) + jregAE |= 0x10; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, + (jregAE | 0x80)); + + if (precache) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); +} + +static void ast_set_offset_reg(struct ast_private *ast, + struct drm_framebuffer *fb) +{ + u16 offset; + + offset = fb->pitches[0] >> 3; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); +} + +static void ast_set_dclk_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + const struct ast_vbios_dclk_info *clk_info; + + if ((ast->chip == AST2500) || (ast->chip == AST2600)) + clk_info = + &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; + else + clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, + clk_info->param1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, + clk_info->param2); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f, + (clk_info->param3 & 0xc0) | + ((clk_info->param3 & 0x3) << 4)); +} + +static void ast_set_color_reg(struct ast_private *ast, + const struct drm_format_info *format) +{ + u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; + + switch (format->cpp[0] * 8) { + case 8: + jregA0 = 0x70; + jregA3 = 0x01; + jregA8 = 0x00; + break; + case 15: + case 16: + jregA0 = 0x70; + jregA3 = 0x04; + jregA8 = 0x02; + break; + case 32: + jregA0 = 0x70; + jregA3 = 0x08; + jregA8 = 0x02; + break; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); +} + +static void ast_set_crtthd_reg(struct ast_private *ast) +{ + /* Set Threshold */ + if (ast->chip == AST2600) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0); + } else if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); + } else if (ast->chip == AST2100 || ast->chip == AST1100 || + ast->chip == AST2200 || ast->chip == AST2150) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f); + } else { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f); + } +} + +static void ast_set_sync_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u8 jreg; + + jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ); + jreg &= ~0xC0; + if (vbios_mode->enh_table->flags & NVSync) + jreg |= 0x80; + if (vbios_mode->enh_table->flags & NHSync) + jreg |= 0x40; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); +} + +static void ast_set_start_address_crt1(struct ast_private *ast, + unsigned int offset) +{ + u32 addr; + + addr = offset >> 2; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, + (u8)((addr >> 8) & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, + (u8)((addr >> 16) & 0xff)); +} + +static void ast_wait_for_vretrace(struct ast_private *ast) +{ + unsigned long timeout = jiffies + HZ; + u8 vgair1; + + do { + vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && + time_before(jiffies, timeout)); +} + +/* + * Primary plane + */ + +static const uint32_t ast_primary_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_C8, +}; + +static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc_state *new_crtc_state = NULL; + struct ast_crtc_state *new_ast_crtc_state; + int ret; + + if (new_plane_state->crtc) + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_state->crtc); + + ret = drm_atomic_helper_check_plane_state( + new_plane_state, new_crtc_state, DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, false, true); + if (ret) { + return ret; + } else if (!new_plane_state->visible) { + if (drm_WARN_ON( + dev, + new_plane_state->crtc)) /* cannot legally happen */ + return -EINVAL; + else + return 0; + } + + new_ast_crtc_state = to_ast_crtc_state(new_crtc_state); + + new_ast_crtc_state->format = new_plane_state->fb->format; + + return 0; +} + +static void +ast_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct ast_private *ast = to_ast_private(dev); + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *old_fb = old_plane_state->fb; + struct drm_gem_vram_object *gbo; + s64 gpu_addr; + + if (!old_fb || (fb->format != old_fb->format)) { + struct drm_crtc *crtc = plane_state->crtc; + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct ast_crtc_state *ast_crtc_state = + to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + + ast_set_color_reg(ast, fb->format); + ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info); + } + + gbo = drm_gem_vram_of_gem(fb->obj[0]); + gpu_addr = drm_gem_vram_offset(gbo); + if (drm_WARN_ON_ONCE(dev, gpu_addr < 0)) + return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + + ast_set_offset_reg(ast, fb); + ast_set_start_address_crt1(ast, (u32)gpu_addr); + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); +} + +static void +ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(plane->dev); + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20); +} + +static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, + .atomic_check = ast_primary_plane_helper_atomic_check, + .atomic_update = ast_primary_plane_helper_atomic_update, + .atomic_disable = ast_primary_plane_helper_atomic_disable, +}; + +static const struct drm_plane_funcs ast_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int ast_primary_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_plane *primary_plane = &ast->primary_plane; + int ret; + + ret = drm_universal_plane_init(dev, primary_plane, 0x01, + &ast_primary_plane_funcs, + ast_primary_plane_formats, + ARRAY_SIZE(ast_primary_plane_formats), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); + return ret; + } + drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs); + + return 0; +} + +/* + * Cursor plane + */ + +static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, + int height) +{ + union { + u32 ul; + u8 b[4]; + } srcdata32[2], data32; + union { + u16 us; + u8 b[2]; + } data16; + u32 csum = 0; + s32 alpha_dst_delta, last_alpha_dst_delta; + u8 __iomem *dstxor; + const u8 *srcxor; + int i, j; + u32 per_pixel_copy, two_pixel_copy; + + alpha_dst_delta = AST_MAX_HWC_WIDTH << 1; + last_alpha_dst_delta = alpha_dst_delta - (width << 1); + + srcxor = src; + dstxor = (u8 *)dst + last_alpha_dst_delta + + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta; + per_pixel_copy = width & 1; + two_pixel_copy = width >> 1; + + for (j = 0; j < height; j++) { + for (i = 0; i < two_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; + data32.b[0] = srcdata32[0].b[1] | + (srcdata32[0].b[0] >> 4); + data32.b[1] = srcdata32[0].b[3] | + (srcdata32[0].b[2] >> 4); + data32.b[2] = srcdata32[1].b[1] | + (srcdata32[1].b[0] >> 4); + data32.b[3] = srcdata32[1].b[3] | + (srcdata32[1].b[2] >> 4); + + writel(data32.ul, dstxor); + csum += data32.ul; + + dstxor += 4; + srcxor += 8; + } + + for (i = 0; i < per_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + data16.b[0] = srcdata32[0].b[1] | + (srcdata32[0].b[0] >> 4); + data16.b[1] = srcdata32[0].b[3] | + (srcdata32[0].b[2] >> 4); + writew(data16.us, dstxor); + csum += (u32)data16.us; + + dstxor += 2; + srcxor += 4; + } + dstxor += last_alpha_dst_delta; + } + + /* write checksum + signature */ + dst += AST_HWC_SIZE; + writel(csum, dst); + writel(width, dst + AST_HWC_SIGNATURE_SizeX); + writel(height, dst + AST_HWC_SIGNATURE_SizeY); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); +} + +static void ast_set_cursor_base(struct ast_private *ast, u64 address) +{ + u8 addr0 = (address >> 3) & 0xff; + u8 addr1 = (address >> 11) & 0xff; + u8 addr2 = (address >> 19) & 0xff; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); +} + +static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y, + u8 x_offset, u8 y_offset) +{ + u8 x0 = (x & 0x00ff); + u8 x1 = (x & 0x0f00) >> 8; + u8 y0 = (y & 0x00ff); + u8 y1 = (y & 0x0700) >> 8; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1); +} + +static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled) +{ + static const u8 mask = + (u8) ~(AST_IO_VGACRCB_HWC_16BPP | AST_IO_VGACRCB_HWC_ENABLED); + + u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP; + + if (enabled) + vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb); +} + +static const uint32_t ast_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_framebuffer *new_fb = new_plane_state->fb; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_plane_state->crtc) + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_state->crtc); + + ret = drm_atomic_helper_check_plane_state( + new_plane_state, new_crtc_state, DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, true, true); + if (ret || !new_plane_state->visible) + return ret; + + if (new_fb->width > AST_MAX_HWC_WIDTH || + new_fb->height > AST_MAX_HWC_HEIGHT) + return -EINVAL; + + return 0; +} + +static void +ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_plane *ast_plane = to_ast_plane(plane); + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *old_fb = old_plane_state->fb; + struct ast_private *ast = to_ast_private(plane->dev); + struct iosys_map dst_map = ast_plane->map; + u64 dst_off = ast_plane->off; + struct iosys_map src_map = shadow_plane_state->data[0]; + unsigned int offset_x, offset_y; + u16 x, y; + u8 x_offset, y_offset; + u8 __iomem *dst; + u8 __iomem *sig; + const u8 *src; + + src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ + dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ + sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */ + + /* + * Do data transfer to HW cursor BO. If a new cursor image was installed, + * point the scanout engine to dst_gbo's offset and page-flip the HWC buffers. + */ + + ast_update_cursor_image(dst, src, fb->width, fb->height); + + if (fb != old_fb) + ast_set_cursor_base(ast, dst_off); + + /* + * Update location in HWC signature and registers. + */ + + writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X); + writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y); + + offset_x = AST_MAX_HWC_WIDTH - fb->width; + offset_y = AST_MAX_HWC_HEIGHT - fb->height; + + if (plane_state->crtc_x < 0) { + x_offset = (-plane_state->crtc_x) + offset_x; + x = 0; + } else { + x_offset = offset_x; + x = plane_state->crtc_x; + } + if (plane_state->crtc_y < 0) { + y_offset = (-plane_state->crtc_y) + offset_y; + y = 0; + } else { + y_offset = offset_y; + y = plane_state->crtc_y; + } + + ast_set_cursor_location(ast, x, y, x_offset, y_offset); + + /* Dummy write to enable HWC and make the HW pick-up the changes. */ + ast_set_cursor_enabled(ast, true); +} + +static void +ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(plane->dev); + + ast_set_cursor_enabled(ast, false); +} + +static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = ast_cursor_plane_helper_atomic_check, + .atomic_update = ast_cursor_plane_helper_atomic_update, + .atomic_disable = ast_cursor_plane_helper_atomic_disable, +}; + +static void ast_cursor_plane_destroy(struct drm_plane *plane) +{ + struct ast_plane *ast_plane = to_ast_plane(plane); + struct drm_gem_vram_object *gbo = ast_plane->gbo; + struct iosys_map map = ast_plane->map; + + drm_gem_vram_vunmap(gbo, &map); + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + + drm_plane_cleanup(plane); +} + +static const struct drm_plane_funcs ast_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = ast_cursor_plane_destroy, + DRM_GEM_SHADOW_PLANE_FUNCS, +}; + +static int ast_cursor_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct ast_plane *ast_plane = &ast->cursor_plane; + struct drm_plane *cursor_plane = &ast_plane->base; + size_t size; + struct drm_gem_vram_object *gbo; + struct iosys_map map; + int ret; + s64 off; + + /* + * Allocate backing storage for cursors. The BOs are permanently + * pinned to the top end of the VRAM. + */ + + size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); + + gbo = drm_gem_vram_create(dev, size, 0); + if (IS_ERR(gbo)) + return PTR_ERR(gbo); + + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_TOPDOWN); + if (ret) + goto err_drm_gem_vram_put; + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) + goto err_drm_gem_vram_unpin; + off = drm_gem_vram_offset(gbo); + if (off < 0) { + ret = off; + goto err_drm_gem_vram_vunmap; + } + + ast_plane->gbo = gbo; + ast_plane->map = map; + ast_plane->off = off; + + /* + * Create the cursor plane. The plane's destroy callback will release + * the backing storages' BO memory. + */ + + ret = drm_universal_plane_init(dev, cursor_plane, 0x01, + &ast_cursor_plane_funcs, + ast_cursor_plane_formats, + ARRAY_SIZE(ast_cursor_plane_formats), + NULL, DRM_PLANE_TYPE_CURSOR, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane failed(): %d\n", ret); + goto err_drm_gem_vram_vunmap; + } + drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs); + + return 0; + +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, &map); +err_drm_gem_vram_unpin: + drm_gem_vram_unpin(gbo); +err_drm_gem_vram_put: + drm_gem_vram_put(gbo); + return ret; +} + +/* + * CRTC + */ + +static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF; + struct ast_crtc_state *ast_state; + const struct drm_format_info *format; + struct ast_vbios_mode_info *vbios_mode_info; + + /* TODO: Maybe control display signal generation with + * Sync Enable (bit CR17.7). + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); + if (ast->tx_chip_types & AST_TX_DP501_BIT) + ast_set_dp501_video_output(crtc->dev, 1); + + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); + ast_wait_for_vretrace(ast); + ast_dp_set_on_off(crtc->dev, 1); + } + + ast_state = to_ast_crtc_state(crtc->state); + format = ast_state->format; + + if (format) { + vbios_mode_info = &ast_state->vbios_mode_info; + + ast_set_color_reg(ast, format); + ast_set_vbios_color_reg(ast, format, vbios_mode_info); + if (crtc->state->gamma_lut) + ast_crtc_set_gamma( + ast, format, + crtc->state->gamma_lut->data); + else + ast_crtc_set_gamma_linear(ast, format); + } + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + ch = mode; + if (ast->tx_chip_types & AST_TX_DP501_BIT) + ast_set_dp501_video_output(crtc->dev, 0); + + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ast_dp_set_on_off(crtc->dev, 0); + ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); + } + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; + } +} + +static enum drm_mode_status +ast_crtc_helper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + enum drm_mode_status status; + uint32_t jtemp; + + if (ast->support_wide_screen) { + if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050)) + return MODE_OK; + if ((mode->hdisplay == 1280) && (mode->vdisplay == 800)) + return MODE_OK; + if ((mode->hdisplay == 1440) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1360) && (mode->vdisplay == 768)) + return MODE_OK; + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1152) && (mode->vdisplay == 864)) + return MODE_OK; + + if ((ast->chip == AST2100) || (ast->chip == AST2200) || + (ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500) || (ast->chip == AST2600)) { + if ((mode->hdisplay == 1920) && + (mode->vdisplay == 1080)) + return MODE_OK; + + if ((mode->hdisplay == 1920) && + (mode->vdisplay == 1200)) { + jtemp = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (jtemp & 0x01) + return MODE_NOMODE; + else + return MODE_OK; + } + } + } + + status = MODE_NOMODE; + + switch (mode->hdisplay) { + case 640: + if (mode->vdisplay == 480) + status = MODE_OK; + break; + case 800: + if (mode->vdisplay == 600) + status = MODE_OK; + break; + case 1024: + if (mode->vdisplay == 768) + status = MODE_OK; + break; + case 1152: + if (mode->vdisplay == 864) + status = MODE_OK; + break; + case 1280: + if (mode->vdisplay == 1024) + status = MODE_OK; + break; + case 1600: + if (mode->vdisplay == 1200) + status = MODE_OK; + break; + default: + break; + } + + return status; +} + +static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct drm_crtc_state *old_crtc_state = + drm_atomic_get_old_crtc_state(state, crtc); + struct ast_crtc_state *old_ast_crtc_state = + to_ast_crtc_state(old_crtc_state); + struct drm_device *dev = crtc->dev; + struct ast_crtc_state *ast_state; + const struct drm_format_info *format; + bool succ; + int ret; + + if (!crtc_state->enable) + return 0; + + ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state); + if (ret) + return ret; + + ast_state = to_ast_crtc_state(crtc_state); + + format = ast_state->format; + if (drm_WARN_ON_ONCE(dev, !format)) + return -EINVAL; /* BUG: We didn't set format in primary check(). */ + + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (old_ast_crtc_state->format != format) + crtc_state->color_mgmt_changed = true; + + if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { + if (crtc_state->gamma_lut->length != + AST_LUT_SIZE * sizeof(struct drm_color_lut)) { + drm_err(dev, "Wrong size for gamma_lut %zu\n", + crtc_state->gamma_lut->length); + return -EINVAL; + } + } + + succ = ast_get_vbios_mode_info(format, &crtc_state->mode, + &crtc_state->adjusted_mode, + &ast_state->vbios_mode_info); + if (!succ) + return -EINVAL; + + return 0; +} + +static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (crtc_state->enable && crtc_state->color_mgmt_changed) { + if (crtc_state->gamma_lut) + ast_crtc_set_gamma(ast, ast_crtc_state->format, + crtc_state->gamma_lut->data); + else + ast_crtc_set_gamma_linear(ast, ast_crtc_state->format); + } + + //Set Aspeed Display-Port + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + ast_dp_set_mode(crtc, vbios_mode_info); +} + +static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + + ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); + ast_set_std_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_crtthd_reg(ast); + ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info); + + ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_crtc_state = + drm_atomic_get_old_crtc_state(state, crtc); + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + + ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + /* + * HW cursors require the underlying primary plane and CRTC to + * display a valid mode and image. This is not the case during + * full modeset operations. So we temporarily disable any active + * plane, including the HW cursor. Each plane's atomic_update() + * helper will re-enable it if necessary. + * + * We only do this during *full* modesets. It does not affect + * simple pageflips on the planes. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + /* + * Ensure that no scanout takes place before reprogramming mode + * and format registers. + */ + ast_wait_for_vretrace(ast); +} + +static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { + .mode_valid = ast_crtc_helper_mode_valid, + .atomic_check = ast_crtc_helper_atomic_check, + .atomic_flush = ast_crtc_helper_atomic_flush, + .atomic_enable = ast_crtc_helper_atomic_enable, + .atomic_disable = ast_crtc_helper_atomic_disable, +}; + +static void ast_crtc_reset(struct drm_crtc *crtc) +{ + struct ast_crtc_state *ast_state = + kzalloc(sizeof(*ast_state), GFP_KERNEL); + + if (crtc->state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + if (ast_state) + __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + +static struct drm_crtc_state * +ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct ast_crtc_state *new_ast_state, *ast_state; + struct drm_device *dev = crtc->dev; + + if (drm_WARN_ON(dev, !crtc->state)) + return NULL; + + new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL); + if (!new_ast_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base); + + ast_state = to_ast_crtc_state(crtc->state); + + new_ast_state->format = ast_state->format; + memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info, + sizeof(new_ast_state->vbios_mode_info)); + + return &new_ast_state->base; +} + +static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct ast_crtc_state *ast_state = to_ast_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(&ast_state->base); + kfree(ast_state); +} + +static const struct drm_crtc_funcs ast_crtc_funcs = { + .reset = ast_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = ast_crtc_atomic_duplicate_state, + .atomic_destroy_state = ast_crtc_atomic_destroy_state, +}; + +static int ast_crtc_init(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct drm_crtc *crtc = &ast->crtc; + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane, + &ast->cursor_plane.base, + &ast_crtc_funcs, NULL); + if (ret) + return ret; + + drm_mode_crtc_set_gamma_size(crtc, AST_LUT_SIZE); + drm_crtc_enable_color_mgmt(crtc, 0, false, AST_LUT_SIZE); + + drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs); + + return 0; +} + +/* + * VGA Connector + */ + +static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_vga_connector *ast_vga_connector = + to_ast_vga_connector(connector); + struct drm_device *dev = connector->dev; + struct ast_private *ast = to_ast_private(dev); + struct edid *edid; + int count; + + if (!ast_vga_connector->i2c) + goto err_drm_connector_update_edid_property; + + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->ioregs_lock); + + edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); + if (!edid) + goto err_mutex_unlock; + + mutex_unlock(&ast->ioregs_lock); + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_mutex_unlock: + mutex_unlock(&ast->ioregs_lock); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { + .get_modes = ast_vga_connector_helper_get_modes, +}; + +static const struct drm_connector_funcs ast_vga_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_vga_connector_init(struct drm_device *dev, + struct ast_vga_connector *ast_vga_connector) +{ + struct drm_connector *connector = &ast_vga_connector->base; + int ret; + + ast_vga_connector->i2c = ast_i2c_create(dev); + if (!ast_vga_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); + + if (ast_vga_connector->i2c) + ret = drm_connector_init_with_ddc( + dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &ast_vga_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, + &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_vga_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.vga.encoder; + struct ast_vga_connector *ast_vga_connector = + &ast->output.vga.vga_connector; + struct drm_connector *connector = &ast_vga_connector->base; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_vga_connector_init(dev, ast_vga_connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * SIL164 Connector + */ + +static int +ast_sil164_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_sil164_connector *ast_sil164_connector = + to_ast_sil164_connector(connector); + struct drm_device *dev = connector->dev; + struct ast_private *ast = to_ast_private(dev); + struct edid *edid; + int count; + + if (!ast_sil164_connector->i2c) + goto err_drm_connector_update_edid_property; + + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->ioregs_lock); + + edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); + if (!edid) + goto err_mutex_unlock; + + mutex_unlock(&ast->ioregs_lock); + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_mutex_unlock: + mutex_unlock(&ast->ioregs_lock); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_sil164_connector_helper_funcs = { + .get_modes = ast_sil164_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_sil164_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int +ast_sil164_connector_init(struct drm_device *dev, + struct ast_sil164_connector *ast_sil164_connector) +{ + struct drm_connector *connector = &ast_sil164_connector->base; + int ret; + + ast_sil164_connector->i2c = ast_i2c_create(dev); + if (!ast_sil164_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); + + if (ast_sil164_connector->i2c) + ret = drm_connector_init_with_ddc( + dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII, + &ast_sil164_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, + &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_sil164_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.sil164.encoder; + struct ast_sil164_connector *ast_sil164_connector = + &ast->output.sil164.sil164_connector; + struct drm_connector *connector = &ast_sil164_connector->base; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_sil164_connector_init(dev, ast_sil164_connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * DP501 Connector + */ + +static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector) +{ + void *edid; + bool succ; + int count; + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) + goto err_drm_connector_update_edid_property; + + succ = ast_dp501_read_edid(connector->dev, edid); + if (!succ) + goto err_kfree; + + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_kfree: + kfree(edid); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_dp501_connector_helper_funcs = { + .get_modes = ast_dp501_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_dp501_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_dp501_connector_init(struct drm_device *dev, + struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_dp501_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.dp501.encoder; + struct drm_connector *connector = &ast->output.dp501.connector; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_dp501_connector_init(dev, connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * ASPEED Display-Port Connector + */ + +static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) +{ + void *edid; + + int succ; + int count; + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) + goto err_drm_connector_update_edid_property; + + succ = ast_astdp_read_edid(connector->dev, edid); + if (succ < 0) + goto err_kfree; + + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_kfree: + kfree(edid); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_astdp_connector_helper_funcs = { + .get_modes = ast_astdp_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_astdp_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_astdp_connector_init(struct drm_device *dev, + struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_astdp_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.astdp.encoder; + struct drm_connector *connector = &ast->output.astdp.connector; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_astdp_connector_init(dev, connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * Mode config + */ + +static void +ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(state->dev); + + /* + * Concurrent operations could possibly trigger a call to + * drm_connector_helper_funcs.get_modes by trying to read the + * display modes. Protect access to I/O registers by acquiring + * the I/O-register lock. Released in atomic_flush(). + */ + mutex_lock(&ast->ioregs_lock); + drm_atomic_helper_commit_tail_rpm(state); + mutex_unlock(&ast->ioregs_lock); +} + +static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { + .atomic_commit_tail = ast_mode_config_helper_atomic_commit_tail, +}; + +static const struct drm_mode_config_funcs ast_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int ast_mode_config_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + int ret; + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; + + dev->mode_config.funcs = &ast_mode_config_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + if (ast->chip == AST2100 || ast->chip == AST2200 || + ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500 || ast->chip == AST2600) { + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 2048; + } else { + dev->mode_config.max_width = 1600; + dev->mode_config.max_height = 1200; + } + + dev->mode_config.helper_private = &ast_mode_config_helper_funcs; + + ret = ast_primary_plane_init(ast); + if (ret) + return ret; + + ret = ast_cursor_plane_init(ast); + if (ret) + return ret; + + ast_crtc_init(dev); + + if (ast->tx_chip_types & AST_TX_NONE_BIT) { + ret = ast_vga_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { + ret = ast_sil164_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { + ret = ast_dp501_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ret = ast_astdp_output_init(ast); + if (ret) + return ret; + } + + drm_mode_config_reset(dev); + + return 0; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_post.c b/drivers/gpu/drm/ast_loongson/ast_post.c new file mode 100644 index 0000000000000000000000000000000000000000..a7a9c37dfeeed021fd4538fead7e88f036af41e4 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_post.c @@ -0,0 +1,2090 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include + +#include "ast_dram_tables.h" +#include "ast_drv.h" + +static void ast_post_chip_2300(struct drm_device *dev); +static void ast_post_chip_2500(struct drm_device *dev); + +void ast_enable_vga(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + + ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); +} + +void ast_enable_mmio(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); +} + +bool ast_is_vga_enabled(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 ch; + + ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); + + return !!(ch & 0x01); +} + +static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; + +static void ast_set_def_ext_reg(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + u8 i, index, reg; + const u8 *ext_reg_info; + + /* reset scratch */ + for (i = 0x81; i <= 0x9f; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); + + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { + if (pdev->revision >= 0x20) + ext_reg_info = extreginfo_ast2300; + else + ext_reg_info = extreginfo_ast2300a0; + } else + ext_reg_info = extreginfo; + + index = 0xa0; + while (*ext_reg_info != 0xff) { + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, + *ext_reg_info); + index++; + ext_reg_info++; + } + + /* disable standard IO/MEM decode if secondary */ + /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */ + + /* Set Ext. Default */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00); + + /* Enable RAMDAC for A1 */ + reg = 0x04; + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) + reg |= 0x20; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); +} + +u32 ast_mindwm(struct ast_private *ast, u32 r) +{ + uint32_t data; + + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); +} + +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v) +{ + uint32_t data; + + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); +} + +/* + * AST2100/2150 DLL CBR Setting + */ +#define CBR_SIZE_AST2150 ((16 << 10) - 1) +#define CBR_PASSNUM_AST2150 5 +#define CBR_THRESHOLD_AST2150 10 +#define CBR_THRESHOLD2_AST2150 10 +#define TIMEOUT_AST2150 5000000 + +#define CBR_PATNUM_AST2150 8 + +static const u32 pattern_AST2150[14] = { 0xFF00FF00, 0xCC33CC33, 0xAA55AA55, + 0xFFFE0001, 0x683501FE, 0x0F1929B0, + 0x2D0B4346, 0x60767F02, 0x6FBE36A6, + 0x3A253035, 0x3019686D, 0x41C6167E, + 0x620152BF, 0x20F050E0 }; + +static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static int cbrtest_ast2150(struct ast_private *ast) +{ + int i; + + for (i = 0; i < 8; i++) + if (mmctestburst2_ast2150(ast, i)) + return 0; + return 1; +} + +static int cbrscan_ast2150(struct ast_private *ast, int busw) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); + for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { + if (cbrtest_ast2150(ast)) + break; + } + if (loop == CBR_PASSNUM_AST2150) + return 0; + } + return 1; +} + +static void cbrdlli_ast2150(struct ast_private *ast, int busw) +{ + u32 dll_min[4], dll_max[4], dlli, data, passcnt; + +cbr_start: + dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff; + dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0; + passcnt = 0; + + for (dlli = 0; dlli < 100; dlli++) { + ast_moutdwm(ast, 0x1e6e0068, + dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); + data = cbrscan_ast2150(ast, busw); + if (data != 0) { + if (data & 0x1) { + if (dll_min[0] > dlli) + dll_min[0] = dlli; + if (dll_max[0] < dlli) + dll_max[0] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD_AST2150) + goto cbr_start; + } + if (dll_max[0] == 0 || + (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150) + goto cbr_start; + + dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); + ast_moutdwm(ast, 0x1e6e0068, + dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); +} + +static void ast_init_dram_reg(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 j; + u32 data, temp, i; + const struct ast_dramstruct *dram_reg_info; + + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + + if ((j & 0x80) == 0) { /* VGA only */ + if (ast->chip == AST2000) { + dram_reg_info = ast2000_dram_table_data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x10100, 0xa8); + + do { + ; + } while (ast_read32(ast, 0x10100) != 0xa8); + } else { /* AST2100/1100 */ + if (ast->chip == AST2100 || ast->chip == 2200) + dram_reg_info = ast2100_dram_table_data; + else + dram_reg_info = ast1100_dram_table_data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688A8A8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x01); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x01); + } + + while (dram_reg_info->index != 0xffff) { + if (dram_reg_info->index == 0xff00) { /* delay fn */ + for (i = 0; i < 15; i++) + udelay(dram_reg_info->data); + } else if (dram_reg_info->index == 0x4 && + ast->chip != AST2000) { + data = dram_reg_info->data; + if (ast->dram_type == AST_DRAM_1Gx16) + data = 0x00000d89; + else if (ast->dram_type == AST_DRAM_1Gx32) + data = 0x00000c8d; + + temp = ast_read32(ast, 0x12070); + temp &= 0xc; + temp <<= 2; + ast_write32(ast, 0x10000 + dram_reg_info->index, + data | temp); + } else + ast_write32(ast, 0x10000 + dram_reg_info->index, + dram_reg_info->data); + dram_reg_info++; + } + + /* AST 2100/2150 DRAM calibration */ + data = ast_read32(ast, 0x10120); + if (data == 0x5061) { /* 266Mhz */ + data = ast_read32(ast, 0x10004); + if (data & 0x40) + cbrdlli_ast2150(ast, 16); /* 16 bits */ + else + cbrdlli_ast2150(ast, 32); /* 32 bits */ + } + + switch (ast->chip) { + case AST2000: + temp = ast_read32(ast, 0x10140); + ast_write32(ast, 0x10140, temp | 0x40); + break; + case AST1100: + case AST2100: + case AST2200: + case AST2150: + temp = ast_read32(ast, 0x1200c); + ast_write32(ast, 0x1200c, temp & 0xfffffffd); + temp = ast_read32(ast, 0x12040); + ast_write32(ast, 0x12040, temp | 0x40); + break; + default: + break; + } + } + + /* wait ready */ + do { + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((j & 0x40) == 0); +} + +void ast_post_gpu(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + u32 reg; + + pci_read_config_dword(pdev, 0x04, ®); + reg |= 0x3; + pci_write_config_dword(pdev, 0x04, reg); + + ast_enable_vga(dev); + ast_open_key(ast); + ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + + if (ast->chip == AST2600) { + ast_dp_launch(dev, 1); + } else if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2500) + ast_post_chip_2500(dev); + else if (ast->chip == AST2300 || ast->chip == AST2400) + ast_post_chip_2300(dev); + else + ast_init_dram_reg(dev); + + ast_init_3rdtx(dev); + } else { + if (ast->tx_chip_types & AST_TX_SIL164_BIT) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, + 0xcf, 0x80); /* Enable DVO */ + } +} + +/* AST 2300 DRAM settings */ +#define AST_DDR3 0 +#define AST_DDR2 1 + +struct ast2300_dram_param { + u32 dram_type; + u32 dram_chipid; + u32 dram_freq; + u32 vram_size; + u32 odt; + u32 wodt; + u32 rodt; + u32 dram_config; + u32 reg_PERIOD; + u32 reg_MADJ; + u32 reg_SADJ; + u32 reg_MRS; + u32 reg_EMRS; + u32 reg_AC1; + u32 reg_AC2; + u32 reg_DQSIC; + u32 reg_DRV; + u32 reg_IOZ; + u32 reg_DQIDLY; + u32 reg_FREQ; + u32 madj_max; + u32 dll2_finetune_step; +}; + +/* + * DQSI DLL CBR Setting + */ +#define CBR_SIZE0 ((1 << 10) - 1) +#define CBR_SIZE1 ((4 << 10) - 1) +#define CBR_SIZE2 ((64 << 10) - 1) +#define CBR_PASSNUM 5 +#define CBR_PASSNUM2 5 +#define CBR_THRESHOLD 10 +#define CBR_THRESHOLD2 10 +#define TIMEOUT 5000000 +#define CBR_PATNUM 8 + +static const u32 pattern[8] = { 0xFF00FF00, 0xCC33CC33, 0xAA55AA55, 0x88778877, + 0x92CC4D6E, 0x543D3CDE, 0xF1E843C7, 0x7C61D253 }; + +static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; + if (data & 0x2000) + return false; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return false; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return true; +} + +static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return 0xffffffff; + } + } while (!data); + data = ast_mindwm(ast, 0x1e6e0078); + data = (data | (data >> 16)) & 0xffff; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static bool mmc_test_burst(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0xc1); +} + +static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x41); +} + +static bool mmc_test_single(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0xc5); +} + +static u32 mmc_test_single2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x05); +} + +static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0x85); +} + +static int cbr_test(struct ast_private *ast) +{ + u32 data; + int i; + + data = mmc_test_single2(ast, 0); + if ((data & 0xff) && (data & 0xff00)) + return 0; + for (i = 0; i < 8; i++) { + data = mmc_test_burst2(ast, i); + if ((data & 0xff) && (data & 0xff00)) + return 0; + } + if (!data) + return 3; + else if (data & 0xff) + return 2; + return 1; +} + +static int cbr_scan(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 3; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static u32 cbr_test2(struct ast_private *ast) +{ + u32 data; + + data = mmc_test_burst2(ast, 0); + if (data == 0xffff) + return 0; + data |= mmc_test_single2(ast, 0); + if (data == 0xffff) + return 0; + + return ~data & 0xffff; +} + +static u32 cbr_scan2(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 0xffff; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test2(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static bool cbr_test3(struct ast_private *ast) +{ + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single(ast, 0)) + return false; + return true; +} + +static bool cbr_scan3(struct ast_private *ast) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < 2; loop++) { + if (cbr_test3(ast)) + break; + } + if (loop == 2) + return false; + } + return true; +} + +static bool finetuneDQI_L(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, + passcnt, retry = 0; + bool status = false; +FINETUNE_START: + for (cnt = 0; cnt < 16; cnt++) { + dllmin[cnt] = 0xff; + dllmax[cnt] = 0x0; + } + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001400 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); + data = cbr_scan2(ast); + if (data != 0) { + mask = 0x00010001; + for (cnt = 0; cnt < 16; cnt++) { + if (data & mask) { + if (dllmin[cnt] > dlli) + dllmin[cnt] = dlli; + if (dllmax[cnt] < dlli) + dllmax[cnt] = dlli; + } + mask <<= 1; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD2) { + break; + } + } + gold_sadj[0] = 0x0; + passcnt = 0; + for (cnt = 0; cnt < 16; cnt++) { + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + gold_sadj[0] += dllmin[cnt]; + passcnt++; + } + } + if (retry++ > 10) + goto FINETUNE_DONE; + if (passcnt != 16) + goto FINETUNE_START; + status = true; +FINETUNE_DONE: + gold_sadj[0] = gold_sadj[0] >> 4; + gold_sadj[1] = gold_sadj[0]; + + data = 0; + for (cnt = 0; cnt < 8; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[0] >= dlli) { + dlli = ((gold_sadj[0] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + } else { + dlli = ((dlli - gold_sadj[0]) * 19) >> 5; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0080, data); + + data = 0; + for (cnt = 8; cnt < 16; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[1] >= dlli) { + dlli = ((gold_sadj[1] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + else + dlli = (dlli - 1) & 0x7; + } else { + dlli = ((dlli - gold_sadj[1]) * 19) >> 5; + dlli += 1; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0084, data); + return status; +} /* finetuneDQI_L */ + +static void finetuneDQSI(struct ast_private *ast) +{ + u32 dlli, dqsip, dqidly; + u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; + u32 g_dqidly, g_dqsip, g_margin, g_side; + u16 pass[32][2][2]; + char tag[2][76]; + + /* Disable DQI CBR */ + reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); + reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); + reg_mcr18 &= 0x0000ffff; + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); + + for (dlli = 0; dlli < 76; dlli++) { + tag[0][dlli] = 0x0; + tag[1][dlli] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + pass[dqidly][0][0] = 0xff; + pass[dqidly][0][1] = 0x0; + pass[dqidly][1][0] = 0xff; + pass[dqidly][1][1] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + passcnt[0] = passcnt[1] = 0; + for (dqsip = 0; dqsip < 2; dqsip++) { + ast_moutdwm(ast, 0x1E6E000C, 0); + ast_moutdwm(ast, 0x1E6E0018, + reg_mcr18 | (dqidly << 16) | (dqsip << 23)); + ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001300 | (dlli << 16) | + (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0070, 0); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); + if (cbr_scan3(ast)) { + if (dlli == 0) + break; + passcnt[dqsip]++; + tag[dqsip][dlli] = 'P'; + if (dlli < pass[dqidly][dqsip][0]) + pass[dqidly][dqsip][0] = + (u16)dlli; + if (dlli > pass[dqidly][dqsip][1]) + pass[dqidly][dqsip][1] = + (u16)dlli; + } + if (passcnt[dqsip] >= 5) + break; + if (!cbr_scan3(ast)) { + pass[dqidly][dqsip][0] = 0xff; + pass[dqidly][dqsip][1] = 0x0; + } + } + } + if (passcnt[0] == 0 && passcnt[1] == 0) + dqidly++; + } + /* Search margin */ + g_dqidly = g_dqsip = g_margin = g_side = 0; + + for (dqidly = 0; dqidly < 32; dqidly++) { + for (dqsip = 0; dqsip < 2; dqsip++) { + if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) + continue; + diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; + if ((diff + 2) < g_margin) + continue; + passcnt[0] = passcnt[1] = 0; + for (dlli = pass[dqidly][dqsip][0]; + dlli > 0 && tag[dqsip][dlli] != 0; + dlli--, passcnt[0]++) + ; + for (dlli = pass[dqidly][dqsip][1]; + dlli < 76 && tag[dqsip][dlli] != 0; + dlli++, passcnt[1]++) + ; + if (passcnt[0] > passcnt[1]) + passcnt[0] = passcnt[1]; + passcnt[1] = 0; + if (passcnt[0] > g_side) + passcnt[1] = passcnt[0] - g_side; + if (diff > (g_margin + 1) && + (passcnt[1] > 0 || passcnt[0] > 8)) { + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } else if (passcnt[1] > 1 && g_side < 8) { + if (diff > g_margin) + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } + } + } + reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); +} +static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; + bool status = false; + + finetuneDQSI(ast); + if (finetuneDQI_L(ast, param) == false) + return status; + +CBR_START2: + dllmin[0] = dllmin[1] = 0xff; + dllmax[0] = dllmax[1] = 0x0; + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); + data = cbr_scan(ast); + if (data != 0) { + if (data & 0x1) { + if (dllmin[0] > dlli) + dllmin[0] = dlli; + if (dllmax[0] < dlli) + dllmax[0] = dlli; + } + if (data & 0x2) { + if (dllmin[1] > dlli) + dllmin[1] = dlli; + if (dllmax[1] < dlli) + dllmax[1] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD) { + break; + } + } + if (retry++ > 10) + goto CBR_DONE2; + if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD) + goto CBR_START2; + if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD) + goto CBR_START2; + status = true; +CBR_DONE2: + dlli = (dllmin[1] + dllmax[1]) >> 1; + dlli <<= 8; + dlli += (dllmin[0] + dllmax[0]) >> 1; + ast_moutdwm(ast, 0x1E6E0068, + ast_mindwm(ast, 0x1E720058) | (dlli << 16)); + return status; +} /* CBRDLL2 */ + +static void get_ddr3_info(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = 0x00020000 + (trap << 16); + trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); + trap_MRS = 0x00000010 + (trap << 4); + trap_MRS |= ((trap & 0x2) << 18); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 0; + param->reg_AC1 = 0x22202725; + param->reg_AC2 = 0xAA007613 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x04001400 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA007613 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA00761C | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA007636 | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xCD44961A; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00081830; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 4; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0270); + param->wodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xDE44A61D; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x070000BB; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 4; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0290); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xEF44B61E; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000088; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302A37; + param->reg_AC2 = 0xEF56B61E; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000057C0; + param->madj_max = 136; + param->dll2_finetune_step = 3; + break; + case 600: + ast_moutdwm(ast, 0x1E6E2020, 0x02E1); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xDF56B61F; + param->reg_DQSIC = 0x0000014D; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000058C0; + param->madj_max = 132; + param->dll2_finetune_step = 3; + break; + case 624: + ast_moutdwm(ast, 0x1E6E2020, 0x0160); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xEF56B621; + param->reg_DQSIC = 0x0000015A; + param->reg_MRS = 0x02101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000059C0; + param->madj_max = 128; + param->dll2_finetune_step = 3; + break; + } /* switch freq */ + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x130; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x131; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x132; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x133; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr3_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0034, 0x00000000); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); + ast_moutdwm(ast, 0x1E6E0018, 0x00002370); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); + ast_moutdwm(ast, 0x1E6E0044, 0x22222222); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x00000002); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, + ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000040); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + data = 0; + if (param->wodt) + data = 0x300; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr3_init_start; + + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void get_ddr2_info(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = (trap << 20) | (trap << 16); + trap_AC2 += 0x00110000; + trap_MRS = 0x00000040 | (trap << 4); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 264: + ast_moutdwm(ast, 0x1E6E2020, 0x0130); + param->wodt = 0; + param->reg_AC1 = 0x11101513; + param->reg_AC2 = 0x78117011; + param->reg_DQSIC = 0x00000092; + param->reg_MRS = 0x00000842; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x000000F0; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x0000005A; + param->reg_FREQ = 0x00004AC0; + param->madj_max = 138; + param->dll2_finetune_step = 3; + break; + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 1; + param->reg_AC1 = 0x22202613; + param->reg_AC2 = 0xAA009016 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x00000A02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xAA009012 | trap_AC2; + break; + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA009016 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA009023 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA00903B | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xCD44B01E; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 3; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0261); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xDE44C022; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 3; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0120); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xEF44D024; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F9; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A7; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 552: + ast_moutdwm(ast, 0x1E6E2020, 0x02A1); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E025; + param->reg_DQSIC = 0x00000132; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000AD; + param->reg_FREQ = 0x000056C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E027; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000B3; + param->reg_FREQ = 0x000057C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + } + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x100; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x121; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x122; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x123; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr2_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); + ast_moutdwm(ast, 0x1E6E0018, 0x00002330); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); + ast_moutdwm(ast, 0x1E6E0044, 0x88848466); + ast_moutdwm(ast, 0x1E6E0048, 0x44440008); + ast_moutdwm(ast, 0x1E6E004C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, + ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + + ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); + data = 0; + if (param->wodt) + data = 0x500; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr2_init_start; + + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void ast_post_chip_2300(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct ast2300_dram_param param; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & 0x80) == 0) { /* vga only */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x1); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x1); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + param.dram_freq = 396; + param.dram_type = AST_DDR3; + temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x01000000) + param.dram_type = AST_DDR2; + switch (temp & 0x18000000) { + case 0: + param.dram_chipid = AST_DRAM_512Mx16; + break; + default: + case 0x08000000: + param.dram_chipid = AST_DRAM_1Gx16; + break; + case 0x10000000: + param.dram_chipid = AST_DRAM_2Gx16; + break; + case 0x18000000: + param.dram_chipid = AST_DRAM_4Gx16; + break; + } + switch (temp & 0x0c) { + default: + case 0x00: + param.vram_size = AST_VIDMEM_SIZE_8M; + break; + + case 0x04: + param.vram_size = AST_VIDMEM_SIZE_16M; + break; + + case 0x08: + param.vram_size = AST_VIDMEM_SIZE_32M; + break; + + case 0x0c: + param.vram_size = AST_VIDMEM_SIZE_64M; + break; + } + + if (param.dram_type == AST_DDR3) { + get_ddr3_info(ast, ¶m); + ddr3_init(ast, ¶m); + } else { + get_ddr2_info(ast, ¶m); + ddr2_init(ast, ¶m); + } + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} + +static bool cbr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static bool ddr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_burst(ast, 1)) + return false; + if (!mmc_test_burst(ast, 2)) + return false; + if (!mmc_test_burst(ast, 3)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static void ddr_init_common_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); +} + +static void ddr_phy_init_2500(struct ast_private *ast) +{ + u32 data, pass, timecnt; + + pass = 0; + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + while (!pass) { + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; + if (!data) + break; + } + if (timecnt != TIMEOUT) { + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; + if (!data) + pass = 1; + } + if (!pass) { + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + udelay(10); /* delay 10 us */ + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + } + } + + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); +} + +/* + * Check DRAM Size + * 1Gb : 0x80000000 ~ 0x87FFFFFF + * 2Gb : 0x80000000 ~ 0x8FFFFFFF + * 4Gb : 0x80000000 ~ 0x9FFFFFFF + * 8Gb : 0x80000000 ~ 0xBFFFFFFF + */ +static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) +{ + u32 reg_04, reg_14; + + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; + + ast_moutdwm(ast, 0xA0100000, 0x41424344); + ast_moutdwm(ast, 0x90100000, 0x35363738); + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); + + /* Check 8Gbit */ + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { + reg_04 |= 0x03; + reg_14 |= (tRFC >> 24) & 0xFF; + /* Check 4Gbit */ + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { + reg_04 |= 0x02; + reg_14 |= (tRFC >> 16) & 0xFF; + /* Check 2Gbit */ + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { + reg_04 |= 0x01; + reg_14 |= (tRFC >> 8) & 0xFF; + } else { + reg_14 |= tRFC & 0xFF; + } + ast_moutdwm(ast, 0x1E6E0004, reg_04); + ast_moutdwm(ast, 0x1E6E0014, reg_14); +} + +static void enable_cache_2500(struct ast_private *ast) +{ + u32 reg_04, data; + + reg_04 = ast_mindwm(ast, 0x1E6E0004); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); + + do + data = ast_mindwm(ast, 0x1E6E0004); + while (!(data & 0x80000)); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); +} + +static void set_mpll_2500(struct ast_private *ast) +{ + u32 addr, data, param; + + /* Reset MMC */ + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { + ast_moutdwm(ast, addr, 0x0); + addr += 4; + } + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; + if (data) { + /* CLKIN = 25MHz */ + param = 0x930023E0; + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); + } else { + /* CLKIN = 24MHz */ + param = 0x93002400; + } + ast_moutdwm(ast, 0x1E6E2020, param); + udelay(100); +} + +static void reset_mmc_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E78505C, 0x00000004); + ast_moutdwm(ast, 0x1E785044, 0x00000001); + ast_moutdwm(ast, 0x1E785048, 0x00004755); + ast_moutdwm(ast, 0x1E78504C, 0x00000013); + mdelay(100); + ast_moutdwm(ast, 0x1E785054, 0x00000077); + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); +} + +static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, + ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + u32 data, data2, pass, retrycnt; + u32 ddr_vref, phy_vref; + u32 min_ddr_vref = 0, min_phy_vref = 0; + u32 max_ddr_vref = 0, max_phy_vref = 0; + + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, + ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); + + /* Train PHY Vref first */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + max_phy_vref = 0x0; + pass = 0; + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02CC, + phy_vref | (phy_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + data = ast_mindwm(ast, 0x1E6E03D0); + data2 = data >> 8; + data = data & 0xff; + if (data > data2) + data = data2; + if (max_phy_vref < data) { + max_phy_vref = data; + min_phy_vref = phy_vref; + } + } else if (pass > 0) + break; + } + } + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); + + /* Train DDR Vref next */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + min_ddr_vref = 0xFF; + max_ddr_vref = 0x0; + pass = 0; + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02C0, + 0x00000006 | (ddr_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + if (min_ddr_vref > ddr_vref) + min_ddr_vref = ddr_vref; + if (max_ddr_vref < ddr_vref) + max_ddr_vref = ddr_vref; + } else if (pass != 0) + break; + } + } + + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static bool ast_dram_init_2500(struct ast_private *ast) +{ + u32 data; + u32 max_tries = 5; + + do { + if (max_tries-- == 0) + return false; + set_mpll_2500(ast); + reset_mmc_2500(ast); + ddr_init_common_2500(ast); + + data = ast_mindwm(ast, 0x1E6E2070); + if (data & 0x01000000) + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); + else + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); + } while (!ddr_test_2500(ast)); + + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); + + /* Patch code */ + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); + + return true; +} + +void ast_patch_ahb_2500(struct ast_private *ast) +{ + u32 data; + + /* Clear bus lock condition */ + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); + ast_moutdwm(ast, 0x1e600084, 0x00010000); + ast_moutdwm(ast, 0x1e600088, 0x00000000); + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ + /* + * If "Fast restet" is enabled for ARM-ICE debugger, + * then WDT needs to enable, that + * WDT04 is WDT#1 Reload reg. + * WDT08 is WDT#1 counter restart reg to avoid system deadlock + * WDT0C is WDT#1 control reg + * [6:5]:= 01:Full chip + * [4]:= 1:1MHz clock source + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs + * [0]:= 1:WDT enable + */ + ast_moutdwm(ast, 0x1E785004, 0x00000010); + ast_moutdwm(ast, 0x1E785008, 0x00004755); + ast_moutdwm(ast, 0x1E78500c, 0x00000033); + udelay(1000); + } + do { + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2000); + } while (data != 1); + ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ +} + +void ast_post_chip_2500(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) { /* vga only */ + /* Clear bus lock condition */ + ast_patch_ahb_2500(ast); + + /* Disable watchdog */ + ast_moutdwm(ast, 0x1E78502C, 0x00000000); + ast_moutdwm(ast, 0x1E78504C, 0x00000000); + + /* + * Reset USB port to patch USB unknown device issue + * SCU90 is Multi-function Pin Control #5 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub + * port). + * SCU94 is Multi-function Pin Control #6 + * [14:13]:= 1x:USB2.0 Host2 controller + * SCU70 is Hardware Strap reg + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by + * [18]: 0(24)/1(48) MHz) + * SCU7C is Write clear reg to SCU70 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. + */ + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); + } + /* Modify eSPI reset pin */ + temp = ast_mindwm(ast, 0x1E6E2070); + if (temp & 0x02000000) + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + if (!ast_dram_init_2500(ast)) + drm_err(dev, "DRAM init failed !\n"); + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} diff --git a/drivers/gpu/drm/ast_loongson/ast_tables.h b/drivers/gpu/drm/ast_loongson/ast_tables.h new file mode 100644 index 0000000000000000000000000000000000000000..e92a17a5cf27f933f707256d9e772c227cf7a0ed --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_tables.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that + * copyright notice and this permission notice appear in supporting + * documentation, and that the name of the authors not be used in + * advertising or publicity pertaining to distribution of the software without + * specific, written prior permission. The authors makes no representations + * about the suitability of this software for any purpose. It is provided + * "as is" without express or implied warranty. + * + * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* Ported from xf86-video-ast driver */ + +#ifndef AST_TABLES_H +#define AST_TABLES_H + +/* Std. Table Index Definition */ +#define TextModeIndex 0 +#define EGAModeIndex 1 +#define VGAModeIndex 2 +#define HiCModeIndex 3 +#define TrueCModeIndex 4 + +#define Charx8Dot 0x00000001 +#define HalfDCLK 0x00000002 +#define DoubleScanMode 0x00000004 +#define LineCompareOff 0x00000008 +#define HBorder 0x00000020 +#define VBorder 0x00000010 +#define WideScreenMode 0x00000100 +#define NewModeInfo 0x00000200 +#define NHSync 0x00000400 +#define PHSync 0x00000800 +#define NVSync 0x00001000 +#define PVSync 0x00002000 +#define SyncPP (PVSync | PHSync) +#define SyncPN (PVSync | NHSync) +#define SyncNP (NVSync | PHSync) +#define SyncNN (NVSync | NHSync) +#define AST2500PreCatchCRT 0x00004000 + +/* DCLK Index */ +#define VCLK25_175 0x00 +#define VCLK28_322 0x01 +#define VCLK31_5 0x02 +#define VCLK36 0x03 +#define VCLK40 0x04 +#define VCLK49_5 0x05 +#define VCLK50 0x06 +#define VCLK56_25 0x07 +#define VCLK65 0x08 +#define VCLK75 0x09 +#define VCLK78_75 0x0A +#define VCLK94_5 0x0B +#define VCLK108 0x0C +#define VCLK135 0x0D +#define VCLK157_5 0x0E +#define VCLK162 0x0F +#define VCLK154 0x10 +#define VCLK83_5 0x11 +#define VCLK106_5 0x12 +#define VCLK146_25 0x13 +#define VCLK148_5 0x14 +#define VCLK71 0x15 +#define VCLK88_75 0x16 +#define VCLK119 0x17 +#define VCLK85_5 0x18 +#define VCLK97_75 0x19 +#define VCLK118_25 0x1A + +static const struct ast_vbios_dclk_info dclk_table[] = { + { 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */ + { 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */ + { 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */ + { 0x76, 0x63, 0x01 }, /* 03: VCLK36 */ + { 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */ + { 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */ + { 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */ + { 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */ + { 0x80, 0x64, 0x00 }, /* 08: VCLK65 */ + { 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */ + { 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */ + { 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */ + { 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */ + { 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */ + { 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */ + { 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */ + { 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */ + { 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */ + { 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */ + { 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */ + { 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */ + { 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */ + { 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */ + { 0x77, 0x58, 0x80 }, /* 17: VCLK119 */ + { 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */ + { 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */ + { 0x3b, 0x2c, 0x81 }, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { + { 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */ + { 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */ + { 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */ + { 0x76, 0x63, 0x01 }, /* 03: VCLK36 */ + { 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */ + { 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */ + { 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */ + { 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */ + { 0x80, 0x64, 0x00 }, /* 08: VCLK65 */ + { 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */ + { 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */ + { 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */ + { 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */ + { 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */ + { 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */ + { 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */ + { 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */ + { 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */ + { 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */ + { 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */ + { 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */ + { 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */ + { 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */ + { 0x58, 0x01, 0x42 }, /* 17: VCLK119 */ + { 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */ + { 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */ + { 0x44, 0x20, 0x43 }, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_stdtable vbios_stdtable[] = { + /* MD_2_3_400 */ + { 0x67, + { 0x00, 0x03, 0x00, 0x02 }, + { 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0xbf, 0x1f, 0x00, + 0x4f, 0x0d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x8e, + 0x8f, 0x28, 0x1f, 0x96, 0xb9, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39, + 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0c, 0x00, 0x0f, 0x08 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0e, 0x00, 0xff } }, + /* Mode12/ExtEGATable */ + { 0xe3, + { 0x01, 0x0f, 0x00, 0x06 }, + { 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x8b, + 0xdf, 0x28, 0x00, 0xe7, 0x04, 0xe3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39, + 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x01, 0x00, 0x0f, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, + /* ExtVGATable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f, 0xff } }, + /* ExtHiCTable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, + /* ExtTrueCTable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, +}; + +static const struct ast_vbios_enhtable res_640x480[] = { + { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E }, + { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E }, + { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */ + (SyncNN | Charx8Dot), 75, 3, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ + (SyncNN | Charx8Dot), 85, 4, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ + (SyncNN | Charx8Dot), 0xFF, 4, 0x2E }, +}; + +static const struct ast_vbios_enhtable res_800x600[] = { + { 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ + (SyncPP | Charx8Dot), 56, 1, 0x30 }, + { 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 2, 0x30 }, + { 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */ + (SyncPP | Charx8Dot), 72, 3, 0x30 }, + { 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 4, 0x30 }, + { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 5, 0x30 }, + { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */ + (SyncPP | Charx8Dot), 0xFF, 5, 0x30 }, +}; + +static const struct ast_vbios_enhtable res_1024x768[] = { + { 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */ + (SyncNN | Charx8Dot), 60, 1, 0x31 }, + { 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */ + (SyncNN | Charx8Dot), 70, 2, 0x31 }, + { 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 3, 0x31 }, + { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 4, 0x31 }, + { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 4, 0x31 }, +}; + +static const struct ast_vbios_enhtable res_1280x1024[] = { + { 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x32 }, + { 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 2, 0x32 }, + { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */ + (SyncPP | Charx8Dot), 85, 3, 0x32 }, + { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 3, 0x32 }, +}; + +static const struct ast_vbios_enhtable res_1600x1200[] = { + { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x33 }, + { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */ + (SyncPP | Charx8Dot), 0xFF, 1, 0x33 }, +}; + +static const struct ast_vbios_enhtable res_1152x864[] = { + { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */ + (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B }, + { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */ + (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B }, +}; + +/* 16:9 */ +static const struct ast_vbios_enhtable res_1360x768[] = { + { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 1, 0x39 }, + { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x39 }, +}; + +static const struct ast_vbios_enhtable res_1600x900[] = { + { 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x3A }, + { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x3A }, + { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x3A }, +}; + +static const struct ast_vbios_enhtable res_1920x1080[] = { + { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x38 }, + { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x38 }, +}; + +/* 16:10 */ +static const struct ast_vbios_enhtable res_1280x800[] = { + { 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x35 }, + { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x35 }, + { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x35 }, + +}; + +static const struct ast_vbios_enhtable res_1440x900[] = { + { 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x36 }, + { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x36 }, + { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x36 }, +}; + +static const struct ast_vbios_enhtable res_1680x1050[] = { + { 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x37 }, + { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x37 }, + { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x37 }, +}; + +static const struct ast_vbios_enhtable res_1920x1200[] = { + { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x34 }, + { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x34 }, +}; + +#endif diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2dc816df42398c8988a3599f76c323055ab1839a --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hydcu_pci_fixup_header.o \ No newline at end of file diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c new file mode 100644 index 0000000000000000000000000000000000000000..962f0d74f703d2530b27c3d2ae94d29bafbc4435 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON DCU fixup driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define PCI_VENDOR_ID_HYGON 0x1d94 + +#define DEVICE_Z100SM 0x51b7 +#define DEVICE_C878182 0x52b7 +#define DEVICE_C878186 0x53b7 +#define DEVICE_Z100 0x54b7 +#define DEVICE_Z100L 0x55b7 +#define DEVICE_C878181 0x56b7 +#define DEVICE_C878185 0x57b7 +#define DEVICE_C878188 0x58b7 +#define DEVICE_C878174 0x59b7 +#define DEVICE_KONGMING 0x61b7 +#define DEVICE_KONGMING_E 0x6210 + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "huangjun " +#define DRIVER_DESC "fix dcu header" + +static int hydcu_pci_fixup_header_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + dev_info(&pdev->dev, "add flags NO_BUS_RESET\n"); + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + pdev->pm_cap = 0; + dev_info(&pdev->dev, "will abort probe\n"); + + return -EINVAL; +} + +static void hydcu_pci_fixup_header_remove(struct pci_dev *pdev) +{ +} + +static const struct pci_device_id hydcu_pci_fixup_header_ids[] = { + { PCI_VDEVICE(HYGON, DEVICE_Z100SM), }, + { PCI_VDEVICE(HYGON, DEVICE_C878182), }, + { PCI_VDEVICE(HYGON, DEVICE_C878186), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100L), }, + { PCI_VDEVICE(HYGON, DEVICE_C878181), }, + { PCI_VDEVICE(HYGON, DEVICE_C878185), }, + { PCI_VDEVICE(HYGON, DEVICE_C878188), }, + { PCI_VDEVICE(HYGON, DEVICE_C878174), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING_E), }, + {}, +}; + +static struct pci_driver hydcu_pci_fixup_header_driver = { + .name = "hydcu-fixup-header", + .id_table = hydcu_pci_fixup_header_ids, + .probe = hydcu_pci_fixup_header_probe, + .remove = hydcu_pci_fixup_header_remove, +}; + +static int __init hydcu_pci_fixup_header_init(void) +{ + /* Register and scan for devices */ + return pci_register_driver(&hydcu_pci_fixup_header_driver); +} + +static void __exit hydcu_pci_fixup_header_cleanup(void) +{ + pci_unregister_driver(&hydcu_pci_fixup_header_driver); +} + +module_init(hydcu_pci_fixup_header_init); +module_exit(hydcu_pci_fixup_header_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/inspur/Kconfig b/drivers/gpu/drm/inspur/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..9ee949fc69365ab375e41f72ca03dc6297649ce1 --- /dev/null +++ b/drivers/gpu/drm/inspur/Kconfig @@ -0,0 +1,5 @@ +# License: GPL-2.0 +# +# inspur drm device configuration. + +source "drivers/gpu/drm/inspur/inspur-drm/Kconfig" diff --git a/drivers/gpu/drm/inspur/Makefile b/drivers/gpu/drm/inspur/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9fd0eb7a1035f0292c30b6d5a7c2fdee886aa511 --- /dev/null +++ b/drivers/gpu/drm/inspur/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for inspur drm drivers. + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm/ diff --git a/drivers/gpu/drm/inspur/inspur-drm/Kconfig b/drivers/gpu/drm/inspur/inspur-drm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c060825d6116829856a335cab98468fbd4624978 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Kconfig @@ -0,0 +1,8 @@ +config DRM_INSPUR + tristate "DRM Support for Inspur BMC" + depends on DRM && PCI && MMU + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + help + Choose this option if you have a Inspur soc chipset.If M is selected the + module will be called inspur - drm. diff --git a/drivers/gpu/drm/inspur/inspur-drm/Makefile b/drivers/gpu/drm/inspur/inspur-drm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..be54bb9e51d0327b82a4ca6d63efba702f3e2207 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Makefile @@ -0,0 +1,3 @@ +inspur-drm-y := inspur_drm_drv.o inspur_drm_de.o inspur_drm_vdac.o inspur_ttm.o + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm.o diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c new file mode 100644 index 0000000000000000000000000000000000000000..fae1014e5d59f880e252af88cd1d2ce71b2674b4 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include +#include + + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +struct inspur_dislay_pll_config { + unsigned long hdisplay; + unsigned long vdisplay; + u32 pll1_config_value; + u32 pll2_config_value; +}; + +static const struct inspur_dislay_pll_config inspur_pll_table[] = { + { 640, 480, CRT_PLL1_NS_25MHZ, CRT_PLL2_NS_25MHZ }, + { 800, 600, CRT_PLL1_NS_40MHZ, CRT_PLL2_NS_40MHZ }, + { 1024, 768, CRT_PLL1_NS_65MHZ, CRT_PLL2_NS_65MHZ }, + { 1280, 1024, CRT_PLL1_NS_108MHZ, CRT_PLL2_NS_108MHZ }, + { 1920, 1080, CRT_PLL1_NS_148MHZ, CRT_PLL2_NS_148MHZ }, +}; + +#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1))) + +static int inspur_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *atom_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atom_state, plane); + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + + if (!crtc || !fb) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_DEBUG_ATOMIC("scale not support\n"); + return -EINVAL; + } + + if (state->crtc_x < 0 || state->crtc_y < 0) { + DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n"); + return -EINVAL; + } + + if (!crtc_state->enable) + return 0; + + if (state->crtc_x + state->crtc_w > + crtc_state->adjusted_mode.hdisplay || + state->crtc_y + state->crtc_h > + crtc_state->adjusted_mode.vdisplay) { + DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n"); + return -EINVAL; + } + + if (state->fb->pitches[0] % 16 != 0) { + DRM_DEBUG_ATOMIC("wrong stride with 16-byte aligned\n"); + return -EINVAL; + } + + return 0; +} + +static void inspur_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *old_state) +{ + struct drm_plane_state *state = plane->state; + u32 reg; + int ret; + s64 gpu_addr = 0; + unsigned int line_l; + struct inspur_drm_private *priv = plane->dev->dev_private; + struct drm_gem_vram_object *gbo; + + if (!state->fb) + return; + + gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) { + DRM_ERROR("failed to pin bo: %d", ret); + return; + } + gpu_addr = drm_gem_vram_offset(gbo); + if (gpu_addr < 0) { + drm_gem_vram_unpin(gbo); + return; + } + + writel(gpu_addr, priv->mmio + INSPUR_CRT_FB_ADDRESS); + + reg = state->fb->width * (state->fb->format->cpp[0]); + + line_l = state->fb->pitches[0]; + writel(INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_WIDTH, reg) | + INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_OFFS, line_l), + priv->mmio + INSPUR_CRT_FB_WIDTH); + + /* SET PIXEL FORMAT */ + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_FORMAT_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_FORMAT, + state->fb->format->cpp[0] * 8 / 16); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static const u32 channel_formats1[] = { + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888 +}; + +static struct drm_plane_funcs inspur_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const struct drm_plane_helper_funcs inspur_plane_helper_funcs = { + .atomic_check = inspur_plane_atomic_check, + .atomic_update = inspur_plane_atomic_update, +}; + +static struct drm_plane *inspur_plane_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_plane *plane; + int ret = 0; + + plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) { + DRM_ERROR("failed to alloc memory when init plane\n"); + return ERR_PTR(-ENOMEM); + } + ret = drm_universal_plane_init(dev, plane, 1, &inspur_plane_funcs, + channel_formats1, + ARRAY_SIZE(channel_formats1), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + DRM_ERROR("failed to init plane: %d\n", ret); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &inspur_plane_helper_funcs); + return plane; +} + +static void inspur_crtc_dpms(struct drm_crtc *crtc, int dpms) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + unsigned int reg; + + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_DPMS_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_DPMS, dpms); + reg &= ~INSPUR_CRT_DISP_CTL_TIMING_MASK; + if (dpms == INSPUR_CRT_DPMS_ON) + reg |= INSPUR_CRT_DISP_CTL_TIMING(1); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static void inspur_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + reg |= INSPUR_CURR_GATE_DISPLAY(1); + inspur_set_current_gate(priv, reg); + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_ON); +} + +static void inspur_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_OFF); + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_SLEEP); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(0); + reg |= INSPUR_CURR_GATE_DISPLAY(0); + inspur_set_current_gate(priv, reg); +} + +static enum drm_mode_status +inspur_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + int i = 0; + int vrefresh = drm_mode_vrefresh(mode); + + if (vrefresh < 59 || vrefresh > 61) + return MODE_NOCLOCK; + + for (i = 0; i < ARRAY_SIZE(inspur_pll_table); i++) { + if (inspur_pll_table[i].hdisplay == mode->hdisplay && + inspur_pll_table[i].vdisplay == mode->vdisplay) + return MODE_OK; + } + + return MODE_BAD; +} + +static void set_vclock_inspur(struct drm_device *dev, unsigned long pll) +{ + u32 val; + struct inspur_drm_private *priv = dev->dev_private; + + val = readl(priv->mmio + CRT_PLL1_NS); + val &= ~(CRT_PLL1_NS_OUTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + val = CRT_PLL1_NS_INTER_BYPASS(1) | CRT_PLL1_NS_POWERON(1); + writel(val, priv->mmio + CRT_PLL1_NS); + + writel(pll, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val = pll & ~(CRT_PLL1_NS_POWERON(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val &= ~(CRT_PLL1_NS_INTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val |= CRT_PLL1_NS_OUTER_BYPASS(1); + writel(val, priv->mmio + CRT_PLL1_NS); +} + +static void get_pll_config(unsigned long x, unsigned long y, + u32 *pll1, u32 *pll2) +{ + int i; + int count = ARRAY_SIZE(inspur_pll_table); + + for (i = 0; i < count; i++) { + if (inspur_pll_table[i].hdisplay == x && + inspur_pll_table[i].vdisplay == y) { + *pll1 = inspur_pll_table[i].pll1_config_value; + *pll2 = inspur_pll_table[i].pll2_config_value; + return; + } + } + + /* if found none, we use default value */ + *pll1 = CRT_PLL1_NS_25MHZ; + *pll2 = CRT_PLL2_NS_25MHZ; +} + +/* + * This function takes care the extra registers and bit fields required to + * setup a mode in board. + * Explanation about Display Control register: + * FPGA only supports 7 predefined pixel clocks, and clock select is + * in bit 4:0 of new register 0x802a8. + */ +static unsigned int display_ctrl_adjust(struct drm_device *dev, + struct drm_display_mode *mode, + unsigned int ctrl) +{ + unsigned long x, y; + u32 pll1; /* bit[31:0] of PLL */ + u32 pll2; /* bit[63:32] of PLL */ + struct inspur_drm_private *priv = dev->dev_private; + + x = mode->hdisplay; + y = mode->vdisplay; + + get_pll_config(x, y, &pll1, &pll2); + writel(pll2, priv->mmio + CRT_PLL2_NS); + set_vclock_inspur(dev, pll1); + + /* + * inspur has to set up the top-left and bottom-right + * registers as well. + * Note that normal chip only use those two register for + * auto-centering mode. + */ + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_TOP, 0) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_LEFT, 0), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_TL); + + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_RIGHT, x - 1), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_BR); + + /* + * Assume common fields in ctrl have been properly set before + * calling this function. + * This function only sets the extra fields in ctrl. + */ + + /* Set bit 25 of display controller: Select CRT or VGA clock */ + ctrl &= ~INSPUR_CRT_DISP_CTL_CRTSELECT_MASK; + ctrl &= ~INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK; + + ctrl |= INSPUR_CRT_DISP_CTL_CRTSELECT(INSPUR_CRTSELECT_CRT); + + /* clock_phase_polarity is 0 */ + ctrl |= INSPUR_CRT_DISP_CTL_CLOCK_PHASE(0); + + writel(ctrl, priv->mmio + INSPUR_CRT_DISP_CTL); + + return ctrl; +} + +static void inspur_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + unsigned int val; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + int width = mode->hsync_end - mode->hsync_start; + int height = mode->vsync_end - mode->vsync_start; + + //writel(format_pll_reg(), priv->mmio + INSPUR_CRT_PLL_CTRL); + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) | + INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1), + priv->mmio + INSPUR_CRT_HORZ_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_WIDTH, width) | + INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_START, mode->hsync_start - 1), + priv->mmio + INSPUR_CRT_HORZ_SYNC); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) | + INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1), + priv->mmio + INSPUR_CRT_VERT_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_HEIGHT, height) | + INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_START, mode->vsync_start - 1), + priv->mmio + INSPUR_CRT_VERT_SYNC); + + val = INSPUR_FIELD(INSPUR_CRT_DISP_CTL_VSYNC_PHASE, 0); + val |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_HSYNC_PHASE, 0); + val |= INSPUR_CRT_DISP_CTL_TIMING(1); + val |= INSPUR_CRT_DISP_CTL_PLANE(1); + + display_ctrl_adjust(dev, mode, val); +} + +static void inspur_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + inspur_set_current_gate(priv, reg); + + /* We can add more initialization as needed. */ +} + +static void inspur_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) + +{ + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (crtc->state->event) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +static int inspur_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); + + return 0; +} + +static void inspur_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(0), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); +} + +static const struct drm_crtc_funcs inspur_crtc_funcs = { + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = inspur_crtc_enable_vblank, + .disable_vblank = inspur_crtc_disable_vblank, + +}; + +static const struct drm_crtc_helper_funcs inspur_crtc_helper_funcs = { + .mode_set_nofb = inspur_crtc_mode_set_nofb, + .atomic_begin = inspur_crtc_atomic_begin, + .atomic_flush = inspur_crtc_atomic_flush, + .atomic_enable = inspur_crtc_atomic_enable, + .atomic_disable = inspur_crtc_atomic_disable, + .mode_valid = inspur_crtc_mode_valid, +}; + +int inspur_de_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + int ret; + + plane = inspur_plane_init(priv); + if (IS_ERR(plane)) { + DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane)); + return PTR_ERR(plane); + } + + crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL); + if (!crtc) { + DRM_ERROR("failed to alloc memory when init crtc\n"); + return -ENOMEM; + } + + ret = drm_crtc_init_with_planes(dev, crtc, plane, + NULL, &inspur_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("failed to init crtc: %d\n", ret); + return ret; + } + + ret = drm_mode_crtc_set_gamma_size(crtc, 256); + if (ret) { + DRM_ERROR("failed to set gamma size: %d\n", ret); + return ret; + } + drm_crtc_helper_add(crtc, &inspur_crtc_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..c522ca90b00cdfd1707029c353dcbe7b5a8b62d6 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +#define MEM_SIZE_RESERVE4KVM 0x200000 + +DEFINE_DRM_GEM_FOPS(inspur_fops); +irqreturn_t inspur_drm_interrupt(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct inspur_drm_private *priv = + (struct inspur_drm_private *)dev->dev_private; + u32 status; + + status = readl(priv->mmio + INSPUR_RAW_INTERRUPT); + + if (status & INSPUR_RAW_INTERRUPT_VBLANK(1)) { + writel(INSPUR_RAW_INTERRUPT_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT); + drm_handle_vblank(dev, 0); + } + + return IRQ_HANDLED; +} + +static struct drm_driver inspur_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + + .fops = &inspur_fops, + .name = "inspur", + .date = "20240201", + .desc = "inspur drm driver", + .major = 3, + .minor = 0, + .dumb_create = inspur_dumb_create, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, +}; + +static int __maybe_unused inspur_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_kms_helper_poll_disable(drm_dev); + priv->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(priv->suspend_state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", + PTR_ERR(priv->suspend_state)); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(priv->suspend_state); + } + + return 0; +} + +static int __maybe_unused inspur_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_atomic_helper_resume(drm_dev, priv->suspend_state); + drm_kms_helper_poll_enable(drm_dev); + + return 0; +} + +static const struct dev_pm_ops inspur_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(inspur_pm_suspend, + inspur_pm_resume) +}; + +static int inspur_kms_init(struct inspur_drm_private *priv) +{ + int ret; + + drm_mode_config_init(priv->dev); + priv->mode_config_initialized = true; + + priv->dev->mode_config.min_width = 0; + priv->dev->mode_config.min_height = 0; + priv->dev->mode_config.max_width = 1920; + priv->dev->mode_config.max_height = 1200; + priv->dev->mode_config.preferred_depth = 32; + priv->dev->mode_config.prefer_shadow = 1; + priv->dev->mode_config.funcs = (void *)&inspur_mode_funcs; + + ret = inspur_de_init(priv); + if (ret) { + DRM_ERROR("failed to init de: %d\n", ret); + return ret; + } + + ret = inspur_vdac_init(priv); + if (ret) { + DRM_ERROR("failed to init vdac: %d\n", ret); + return ret; + } + + return 0; +} + +static void inspur_kms_fini(struct inspur_drm_private *priv) +{ + if (priv->mode_config_initialized) { + drm_mode_config_cleanup(priv->dev); + priv->mode_config_initialized = false; + } +} + +/* + * It can operate in one of three modes: 0, 1 or Sleep. + */ +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode) +{ + unsigned int control_value = 0; + void __iomem *mmio = priv->mmio; + unsigned int input = 1; + + if (power_mode > INSPUR_PW_MODE_CTL_MODE_SLEEP) + return; + + if (power_mode == INSPUR_PW_MODE_CTL_MODE_SLEEP) + input = 0; + + control_value = readl(mmio + INSPUR_POWER_MODE_CTRL); + control_value &= ~(INSPUR_PW_MODE_CTL_MODE_MASK | + INSPUR_PW_MODE_CTL_OSC_INPUT_MASK); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_MODE, power_mode); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_OSC_INPUT, input); + writel(control_value, mmio + INSPUR_POWER_MODE_CTRL); +} + +void inspur_set_current_gate(struct inspur_drm_private *priv, unsigned int gate) +{ + unsigned int gate_reg; + unsigned int mode; + void __iomem *mmio = priv->mmio; + + /* Get current power mode. */ + mode = (readl(mmio + INSPUR_POWER_MODE_CTRL) & + INSPUR_PW_MODE_CTL_MODE_MASK) >> INSPUR_PW_MODE_CTL_MODE_SHIFT; + + switch (mode) { + case INSPUR_PW_MODE_CTL_MODE_MODE0: + gate_reg = INSPUR_MODE0_GATE; + break; + + case INSPUR_PW_MODE_CTL_MODE_MODE1: + gate_reg = INSPUR_MODE1_GATE; + break; + + default: + gate_reg = INSPUR_MODE0_GATE; + break; + } + writel(gate, mmio + gate_reg); +} + +static void inspur_hw_config(struct inspur_drm_private *priv) +{ + unsigned int reg; + + /* On hardware reset, power mode 0 is default. */ + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + + inspur_set_current_gate(priv, reg); + + /* + * Reset the memory controller. If the memory controller + * is not reset in chip,the system might hang when sw accesses + * the memory.The memory should be resetted after + * changing the MXCLK. + */ + reg = readl(priv->mmio + INSPUR_MISC_CTRL); + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(0); + writel(reg, priv->mmio + INSPUR_MISC_CTRL); + + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(1); + + writel(reg, priv->mmio + INSPUR_MISC_CTRL); +} + +static int inspur_hw_map(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + resource_size_t addr, size, ioaddr, iosize; + + ioaddr = pci_resource_start(pdev, 1); + iosize = pci_resource_len(pdev, 1); + priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize); + if (!priv->mmio) { + DRM_ERROR("Cannot map mmio region\n"); + return -ENOMEM; + } + + addr = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + priv->fb_map = devm_ioremap(dev->dev, addr, size); + if (!priv->fb_map) { + DRM_ERROR("Cannot map framebuffer\n"); + return -ENOMEM; + } + priv->fb_base = addr; + priv->fb_size = size - MEM_SIZE_RESERVE4KVM; + + return 0; +} + +static void inspur_hw_unmap(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + + if (priv->mmio) { + devm_iounmap(dev->dev, priv->mmio); + priv->mmio = NULL; + } + + if (priv->fb_map) { + devm_iounmap(dev->dev, priv->fb_map); + priv->fb_map = NULL; + } +} + +static int inspur_hw_init(struct inspur_drm_private *priv) +{ + int ret; + + ret = inspur_hw_map(priv); + if (ret) + return ret; + + inspur_hw_config(priv); + + return 0; +} + +void inspur_unload(struct drm_device *dev) +{ + struct inspur_drm_private *priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + drm_atomic_helper_shutdown(dev); + + free_irq(pdev->irq, dev); + + inspur_kms_fini(priv); + inspur_hw_unmap(priv); + pci_disable_msi(to_pci_dev(dev->dev)); + dev->dev_private = NULL; + +} + +int inspur_load(struct drm_device *dev, unsigned long flags) +{ + struct inspur_drm_private *priv; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int ret; + + priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + DRM_ERROR("no memory to allocate for inspur_drm_private\n"); + return -ENOMEM; + } + dev->dev_private = priv; + priv->dev = dev; + + ret = inspur_hw_init(priv); + if (ret) + goto err; + + ret = + drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), + priv->fb_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); + goto err; + } + ret = inspur_kms_init(priv); + if (ret) + goto err; + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(dev); + + return 0; + +err: + inspur_unload(dev); + DRM_ERROR("failed to initialize drm driver: %d\n", ret); + return ret; +} + +static int inspur_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = 0; + struct drm_device *dev; + + ret = + drm_aperture_remove_conflicting_pci_framebuffers(pdev, + &inspur_driver); + if (ret) + return ret; + + dev = drm_dev_alloc(&inspur_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + pci_set_drvdata(pdev, dev); + ret = pci_enable_device(pdev); + if (ret) { + drm_err(dev, "failed to enable pci device: %d\n", ret); + return ret; + } + ret = inspur_load(dev, ent->driver_data); + if (ret) + goto err_return; + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + goto err_inspur_driver_unload; + + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); + + return 0; +err_inspur_driver_unload: + inspur_unload(dev); +err_return: + return ret; +} + +static void inspur_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); + pci_disable_device(pdev); +} + +static void inspur_pci_shutdown(struct pci_dev *pdev) +{ + inspur_pci_remove(pdev); +} + +static struct pci_device_id inspur_pci_table[] = { + { 0x1bd4, 0x0750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver inspur_pci_driver = { + .name = "inspur-drm", + .id_table = inspur_pci_table, + .probe = inspur_pci_probe, + .remove = inspur_pci_remove, + .shutdown = inspur_pci_shutdown, + .driver.pm = &inspur_pm_ops, +}; + +static int __init inspur_init(void) +{ + return pci_register_driver(&inspur_pci_driver); +} + +static void __exit inspur_exit(void) +{ + return pci_unregister_driver(&inspur_pci_driver); +} + +module_init(inspur_init); +module_exit(inspur_exit); + +MODULE_DEVICE_TABLE(pci, inspur_pci_table); +MODULE_AUTHOR(""); +MODULE_DESCRIPTION("DRM Driver for InspurBMC"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("3.0"); diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..d47f1fbc4ad06a19157eaa6ae3675ee2e17efc6a --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_DRV_H +#define INSPUR_DRM_DRV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct drm_device; +struct drm_gem_object; + +#define inspur_framebuffer drm_framebuffer +#define BPP16_RED 0x0000f800 +#define BPP16_GREEN 0x000007e0 +#define BPP16_BLUE 0x0000001f +#define BPP16_WHITE 0x0000ffff +#define BPP16_GRAY 0x00008410 +#define BPP16_YELLOW 0x0000ffe0 +#define BPP16_CYAN 0x000007ff +#define BPP16_PINK 0x0000f81f +#define BPP16_BLACK 0x00000000 +struct inspur_fbdev { + struct drm_fb_helper helper; + struct inspur_framebuffer *fb; + int size; +}; + +struct inspur_cursor { + struct drm_gem_vram_object *gbo[2]; + unsigned int next_index; +}; + +struct inspur_drm_private { + /* hw */ + void __iomem *mmio; + void __iomem *fb_map; + unsigned long fb_base; + unsigned long fb_size; + + /* drm */ + struct drm_device *dev; + + bool mode_config_initialized; + struct drm_atomic_state *suspend_state; + + /* fbdev */ + struct inspur_fbdev *fbdev; + + /* hw cursor */ + struct inspur_cursor cursor; +}; + +#define to_inspur_framebuffer(x) container_of(x, struct inspur_framebuffer, fb) + +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode); +void inspur_set_current_gate(struct inspur_drm_private *priv, + unsigned int gate); +int inspur_load(struct drm_device *dev, unsigned long flags); +void inspur_unload(struct drm_device *dev); + +int inspur_de_init(struct inspur_drm_private *priv); +int inspur_vdac_init(struct inspur_drm_private *priv); + +int inspur_gem_create(struct drm_device *dev, u32 size, bool iskernel, + struct drm_gem_object **obj); + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +extern const struct drm_mode_config_funcs inspur_mode_funcs; + +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..1b845440ba445b744ad656bf9ab6d0bd5fd69a7c --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_HW_H +#define INSPUR_DRM_HW_H + +/* register definition */ +#define INSPUR_MISC_CTRL 0x4 + +#define INSPUR_MSCCTL_LOCALMEM_RESET(x) ((x) << 6) +#define INSPUR_MSCCTL_LOCALMEM_RESET_MASK 0x40 + +#define INSPUR_CURRENT_GATE 0x000040 +#define INSPUR_CURR_GATE_DISPLAY(x) ((x) << 2) +#define INSPUR_CURR_GATE_DISPLAY_MASK 0x4 + +#define INSPUR_CURR_GATE_LOCALMEM(x) ((x) << 1) +#define INSPUR_CURR_GATE_LOCALMEM_MASK 0x2 + +#define INSPUR_MODE0_GATE 0x000044 +#define INSPUR_MODE1_GATE 0x000048 +#define INSPUR_POWER_MODE_CTRL 0x00004C + +#define INSPUR_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3) +#define INSPUR_PW_MODE_CTL_OSC_INPUT_MASK 0x8 + +#define INSPUR_PW_MODE_CTL_MODE(x) ((x) << 0) +#define INSPUR_PW_MODE_CTL_MODE_MASK 0x03 +#define INSPUR_PW_MODE_CTL_MODE_SHIFT 0 + +#define INSPUR_PW_MODE_CTL_MODE_MODE0 0 +#define INSPUR_PW_MODE_CTL_MODE_MODE1 1 +#define INSPUR_PW_MODE_CTL_MODE_SLEEP 2 + +//#define INSPUR_CRT_PLL_CTRL 0x000060 + +#define INSPUR_PLL_CTRL_BYPASS(x) ((x) << 18) +#define INSPUR_PLL_CTRL_BYPASS_MASK 0x40000 + +#define INSPUR_PLL_CTRL_POWER(x) ((x) << 17) +#define INSPUR_PLL_CTRL_POWER_MASK 0x20000 + +#define INSPUR_PLL_CTRL_INPUT(x) ((x) << 16) +#define INSPUR_PLL_CTRL_INPUT_MASK 0x10000 + +#define INSPUR_PLL_CTRL_POD(x) ((x) << 14) +#define INSPUR_PLL_CTRL_POD_MASK 0xC000 + +#define INSPUR_PLL_CTRL_OD(x) ((x) << 12) +#define INSPUR_PLL_CTRL_OD_MASK 0x3000 + +#define INSPUR_PLL_CTRL_N(x) ((x) << 8) +#define INSPUR_PLL_CTRL_N_MASK 0xF00 + +#define INSPUR_PLL_CTRL_M(x) ((x) << 0) +#define INSPUR_PLL_CTRL_M_MASK 0xFF + +#define INSPUR_CRT_DISP_CTL 0x80200 + +#define INSPUR_CRT_DISP_CTL_DPMS(x) ((x) << 30) +#define INSPUR_CRT_DISP_CTL_DPMS_MASK 0xc0000000 + +#define INSPUR_CRT_DPMS_ON 0 +#define INSPUR_CRT_DPMS_OFF 3 + +#define INSPUR_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25) +#define INSPUR_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000 + +#define INSPUR_CRTSELECT_CRT 1 + +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14) +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000 + +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13) +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000 + +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12) +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000 + +#define INSPUR_CRT_DISP_CTL_TIMING(x) ((x) << 8) +#define INSPUR_CRT_DISP_CTL_TIMING_MASK 0x100 + +#define INSPUR_CRT_DISP_CTL_PLANE(x) ((x) << 2) +#define INSPUR_CRT_DISP_CTL_PLANE_MASK 4 + +#define INSPUR_CRT_DISP_CTL_FORMAT(x) ((x) << 0) +#define INSPUR_CRT_DISP_CTL_FORMAT_MASK 0x03 + +#define INSPUR_CRT_FB_ADDRESS 0x080204 + +#define INSPUR_CRT_FB_WIDTH 0x080208 +#define INSPUR_CRT_FB_WIDTH_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000 +#define INSPUR_CRT_FB_WIDTH_OFFS(x) ((x) << 0) +#define INSPUR_CRT_FB_WIDTH_OFFS_MASK 0x3FFF + +#define INSPUR_CRT_HORZ_TOTAL 0x08020C +#define INSPUR_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000 + +#define INSPUR_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF + +#define INSPUR_CRT_HORZ_SYNC 0x080210 +#define INSPUR_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000 + +#define INSPUR_CRT_HORZ_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_HORZ_SYNC_START_MASK 0xFFF + +#define INSPUR_CRT_VERT_TOTAL 0x080214 +#define INSPUR_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000 + +#define INSPUR_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF + +#define INSPUR_CRT_VERT_SYNC 0x080218 +#define INSPUR_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16) +#define INSPUR_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000 + +#define INSPUR_CRT_VERT_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_VERT_SYNC_START_MASK 0x7FF + +/* Hardware Cursor */ +#define INSPUR_HWC_ADDRESS 0x080230 +#define INSPUR_HWC_ADDRESS_ENABLE(x) ((x) << 31) +#define INSPUR_HWC_ADDRESS_ENABLE_MASK 0x80000000 +#define INSPUR_HWC_ADDRESS_ADDRESS(x) ((x) << 0) +#define INSPUR_HWC_ADDRESS_ADDRESS_MASK 0xFFFFFFF + +#define INSPUR_HWC_LOCATION 0x080234 +#define INSPUR_HWC_LOCATION_TOP(x) ((x) << 27) +#define INSPUR_HWC_LOCATION_TOP_MASK 0x8000000 +#define INSPUR_HWC_LOCATION_Y(x) ((x) << 16) +#define INSPUR_HWC_LOCATION_Y_MASK 0x7FF0000 +#define INSPUR_HWC_LOCATION_LEFT(x) ((x) << 11) +#define INSPUR_HWC_LOCATION_LEFT_MASK 0x800 +#define INSPUR_HWC_LOCATION_X(x) ((x) << 0) +#define INSPUR_HWC_LOCATION_X_MASK 0x7FF + +#define INSPUR_HWC_COLOR_12 0x080238 +#define INSPUR_HWC_COLOR_12_2_RGB(x) ((x) << 16) +#define INSPUR_HWC_COLOR_12_2_RGB_MASK 0xFFFF0000 +#define INSPUR_HWC_COLOR_12_1_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_12_1_RGB_MASK 0xFFFF + +#define INSPUR_HWC_COLOR_3 0x08023C +#define INSPUR_HWC_COLOR_3_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_3_RGB_MASK 0xFFFF + +/* Auto Centering */ +#define INSPUR_CRT_AUTO_CENTERING_TL 0x080280 +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF + +#define INSPUR_CRT_AUTO_CENTERING_BR 0x080284 +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF + +/* register to control panel output */ +#define INSPUR_DISPLAY_CONTROL_HISILE 0x80288 +#define INSPUR_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0) +#define INSPUR_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1) +#define INSPUR_DISPLAY_CONTROL_FPEN(x) ((x) << 2) +#define INSPUR_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3) + +#define INSPUR_RAW_INTERRUPT 0x80290 +#define INSPUR_RAW_INTERRUPT_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_VBLANK_MASK 0x4 + +#define INSPUR_RAW_INTERRUPT_EN 0x80298 +#define INSPUR_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_EN_VBLANK_MASK 0x4 + +/* register and values for PLL control */ +#define CRT_PLL1_NS 0x802a8 +#define CRT_PLL1_NS_OUTER_BYPASS(x) ((x) << 30) +#define CRT_PLL1_NS_INTER_BYPASS(x) ((x) << 29) +#define CRT_PLL1_NS_POWERON(x) ((x) << 24) + +#define CRT_PLL1_NS_25MHZ 0x00006691 //640x480 +#define CRT_PLL1_NS_40MHZ 0x00004580 //800x600 +#define CRT_PLL1_NS_65MHZ 0x00002568 //1024x768 +#define CRT_PLL1_NS_83MHZ 0x000027bb //1280x800 +#define CRT_PLL1_NS_106MHZ 0x000027ef //1440x900 +#define CRT_PLL1_NS_108MHZ 0x000027f2 //1280x1024 +#define CRT_PLL1_NS_146MHZ 0x00001575 //1680x1050 +#define CRT_PLL1_NS_148MHZ 0x0000145f //1920x1080 +#define CRT_PLL1_NS_193MHZ 0x000018f7 //1920x1200 + +#define CRT_PLL2_NS 0x802ac +#define CRT_PLL2_NS_25MHZ 0x0 +#define CRT_PLL2_NS_40MHZ 0x0 +#define CRT_PLL2_NS_65MHZ 0x0 +#define CRT_PLL2_NS_83MHZ 0x0 +#define CRT_PLL2_NS_106MHZ 0x0 +#define CRT_PLL2_NS_108MHZ 0x0 +#define CRT_PLL2_NS_146MHZ 0x0 +#define CRT_PLL2_NS_148MHZ 0x0 +#define CRT_PLL2_NS_193MHZ 0x0 + +#define INSPUR_FIELD(field, value) (field(value) & field##_MASK) +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c new file mode 100644 index 0000000000000000000000000000000000000000..4b31d82b00f89b2016eedf7c9e95828eaa16ded0 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +static int inspur_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, + connector->dev->mode_config.max_width, + connector->dev->mode_config.max_height); + drm_set_preferred_mode(connector, 1024, 768); + return count; +} + +static int inspur_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs inspur_connector_helper_funcs = { + .get_modes = inspur_connector_get_modes, + .mode_valid = inspur_connector_mode_valid, +}; + +static const struct drm_connector_funcs inspur_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void inspur_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + u32 reg; + struct drm_device *dev = encoder->dev; + struct inspur_drm_private *priv = dev->dev_private; + + reg = readl(priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); + reg |= INSPUR_DISPLAY_CONTROL_FPVDDEN(1); + reg |= INSPUR_DISPLAY_CONTROL_PANELDATE(1); + reg |= INSPUR_DISPLAY_CONTROL_FPEN(1); + reg |= INSPUR_DISPLAY_CONTROL_VBIASEN(1); + writel(reg, priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); +} + +static const struct drm_encoder_helper_funcs inspur_encoder_helper_funcs = { + .mode_set = inspur_encoder_mode_set, +}; + +static const struct drm_encoder_funcs inspur_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int inspur_vdac_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) { + DRM_ERROR("failed to alloc memory when init encoder\n"); + return -ENOMEM; + } + + encoder->possible_crtcs = 0x1; + ret = drm_encoder_init(dev, encoder, &inspur_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + DRM_ERROR("failed to init encoder: %d\n", ret); + return ret; + } + + drm_encoder_helper_add(encoder, &inspur_encoder_helper_funcs); + + connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL); + if (!connector) { + DRM_ERROR("failed to alloc memory when init connector\n"); + return -ENOMEM; + } + + ret = drm_connector_init(dev, connector, + &inspur_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ret; + } + drm_connector_helper_add(connector, &inspur_connector_helper_funcs); + + drm_connector_register(connector); + drm_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c new file mode 100644 index 0000000000000000000000000000000000000000..1c9acc77610201291b6c682f0cffac6367d0add5 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include "inspur_drm_drv.h" + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + + return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args); +} + +const struct drm_mode_config_funcs inspur_mode_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, +}; diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c index d2a51bd395f6c9153b155515802bee7af025a885..37b7d97c4e70153bce54d2ce639a65ba0113fc4f 100644 --- a/drivers/gpu/drm/loongson/loongson_module.c +++ b/drivers/gpu/drm/loongson/loongson_module.c @@ -19,6 +19,21 @@ module_param_named(vblank, loongson_vblank, int, 0400); static int __init loongson_module_init(void) { + struct pci_dev *pdev = NULL; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { + /* + * Multiple video card workaround + * + * This integrated video card will always be selected as + * default boot device by vgaarb subsystem. + */ + if (pdev->vendor != PCI_VENDOR_ID_LOONGSON || pdev->device == 0x1a05) { + pr_info("Discrete graphic card detected, abort\n"); + return 0; + } + } + if (!loongson_modeset || video_firmware_drivers_only()) return -ENODEV; diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5f540962129a5a140ed576c617f406468f46339b --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM && ARCH_PHYTIUM + select DRM_KMS_HELPER + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDCP_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1f68cdcd80dac4071c5d3f1578ea7e0d85fbccfd --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + px210_dp.o \ + phytium_panel.o \ + px210_dc.o \ + phytium_pci.o \ + pe220x_dp.o \ + pe220x_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..8f74199f9a477422246896a885c20b4da5f3c6ec --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void pe220x_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int pe220x_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t pe220x_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t pe220x_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int pe220x_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, PE220X_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void pe220x_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + pe220x_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void pe220x_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + pe220x_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_primary_formats; + *format_count = ARRAY_SIZE(pe220x_primary_formats); +} + +void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_cursor_formats_modifiers; + *formats = pe220x_cursor_formats; + *format_count = ARRAY_SIZE(pe220x_cursor_formats); +} + +void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PE220X_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..f88a054cf0d0e82343132e1cc0f60f45c4347097 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DC_H__ +#define __PE220X_DC_H__ + +#define PE220X_DC_PIX_CLOCK_MAX (594000) +#define PE220X_DC_HDISPLAY_MAX 3840 +#define PE220X_DC_VDISPLAY_MAX 2160 +#define PE220X_DC_ADDRESS_MASK 0x7f + +extern void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void pe220x_dc_hw_disable(struct drm_crtc *crtc); +extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void pe220x_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __PE220X_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..54a6e8ac454b22e0cad76699a4ae59a6202f6de2 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_dp.h" +#include "pe220x_dp.h" + +static uint8_t pe220x_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int pe220x_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int pe220x_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, PE220X_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, PE220X_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > PE220X_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool pe220x_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int pe220x_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, PE220X_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t pe220x_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return pe220x_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func pe220x_dp_funcs = { + .dp_hw_get_source_lane_count = pe220x_dp_hw_get_source_lane_count, + .dp_hw_reset = pe220x_dp_hw_reset, + .dp_hw_spread_is_enable = pe220x_dp_hw_spread_is_enable, + .dp_hw_set_backlight = pe220x_dp_hw_set_backlight, + .dp_hw_get_backlight = pe220x_dp_hw_get_backlight, + .dp_hw_disable_backlight = pe220x_dp_hw_disable_backlight, + .dp_hw_enable_backlight = pe220x_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = pe220x_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = pe220x_dp_hw_poweron_panel, + .dp_hw_init_phy = pe220x_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = pe220x_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = pe220x_dp_hw_set_phy_lane_and_rate, +}; + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &pe220x_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..6b763d9966310115dd8fdeed8895c0b9b64e9bd7 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DP_H__ +#define __PE220X_DP_H__ + +#define PE220X_DP_BACKLIGHT_MAX 100 + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_reg.h b/drivers/gpu/drm/phytium/pe220x_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..88fc9c7383a58da2d059187bc7b0067a3f4ef378 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display engine register + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_REG_H__ +#define __PE220X_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define PE220X_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define PE220X_DC_CMD_REGISTER(pipe) (PE220X_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define PE220X_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define PE220X_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define PE220X_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define PE220X_PHY_BASE(pipe) (0x100000*pipe) + +#define PE220X_PHY_PIPE_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define PE220X_PHY_MODE(pipe) (PE220X_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define PE220X_PHY_LINK_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define PE220X_PHY_PLL_EN(pipe) (PE220X_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define PE220X_PHY_PMA_WIDTH(pipe) (PE220X_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define PE220X_PHY_PLL_SOURCE_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x4004C) + +#define PE220X_PHY_PMA0_POWER(pipe) (PE220X_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define PE220X_PHY_LINK_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define PE220X_PHY_SGMII_DPSEL_INIT(pipe) (PE220X_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define PE220X_PHY_APB_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define PE220X_PHY_PLL_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define PE220X_PHY_PMA_CONTROL(pipe) (PE220X_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define PE220X_PHY_PMA_CONTROL2(pipe) (PE220X_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define PE220X_PHY_PLL0_CLK_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define PE220X_PHY_HSCLK0_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define PE220X_PHY_HSCLK0_DIV(pipe) (PE220X_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define PE220X_PHY_PLLDRC0_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define PE220X_PHY_PLL0_DSM_M0(pipe) (PE220X_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define PE220X_PHY_PLL0_VCOCAL_START(pipe) (PE220X_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define PE220X_PHY_PLL0_VCOCAL_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define PE220X_PHY_PLL0_CP_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x690) +#define PE220X_PHY_PLL0_CP_IADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x694) +#define PE220X_PHY_PLL0_CP_FILT_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x698) +#define PE220X_PHY_PLL0_INTDIV(pipe) (PE220X_PHY_BASE(pipe) + 0x240) +#define PE220X_PHY_PLL0_FRACDIVL(pipe) (PE220X_PHY_BASE(pipe) + 0x244) +#define PE220X_PHY_PLL0_FRACDIVH(pipe) (PE220X_PHY_BASE(pipe) + 0x248) +#define PE220X_PHY_PLL0_HIGH_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x24c) +#define PE220X_PHY_PLL0_PDIAG_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x680) +#define PE220X_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x220) +#define PE220X_PHY_PLL0_LOCK_PEFCNT(pipe) (PE220X_PHY_BASE(pipe) + 0x270) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x278) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x27c) + +#define PE220X_PHY_PLL0_TX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define PE220X_PHY_PLL0_TX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define PE220X_PHY_PLL0_TX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define PE220X_PHY_PLL0_RX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define PE220X_PHY_PLL0_RX_PSC_CAL(pipe) (PE220X_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define PE220X_PHY_PLL0_XCVR_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define PE220X_PHY_PLL0_RX_GCSM1_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_GCSM2_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PE220X_PHY_PLL0_TX_DIAG_ACYA(pipe) (PE220X_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define PE220X_PHY_PLL0_TX_TXCC_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define PE220X_PHY_PLL0_TX_DRV(pipe) (PE220X_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define PE220X_PHY_PLL0_TX_MGNFS(pipe) (PE220X_PHY_BASE(pipe) + 0x18140) + +#define PE220X_PHY_PLL0_TX_CPOST(pipe) (PE220X_PHY_BASE(pipe) + 0x18130) + +#endif /* __PE220X_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 0000000000000000000000000000000000000000..628357837da6e34ca839e859eb85b75a7f705397 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) ((float)((X) + (Y))) +#define MATH_Multiply(X, Y) ((float)((X) * (Y))) +#define MATH_Divide(X, Y) ((float)((X) / (Y))) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) ((float)(X)) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static int phytium_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + if (crtc->state->gamma_lut) + phytium_crtc_gamma_set(crtc); + else + phytium_crtc_gamma_init(crtc); + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_crtc->dc_hw_config_pix_clock = px210_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = px210_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = PX210_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = PX210_DCREQ_BASE(phys_pipe); + priv->address_transform_base = PX210_ADDRESS_TRANSFORM_BASE; + } else if (IS_PE220X(priv)) { + phytium_crtc->dc_hw_config_pix_clock = pe220x_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = pe220x_dc_hw_disable; + phytium_crtc->dc_hw_reset = pe220x_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = PE220X_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = PE220X_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 0000000000000000000000000000000000000000..78a841c1c68412b939ba4f1dc7cc40b5a9e6bedd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..eedad22c153653c1dde2052db8b49c3cb6388f34 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..dc784bc557a7355db3f311e6b864b2e54d7185cc --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..31c080573414d25671a2192fab4c45700af35460 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.fb_modifiers_not_supported = false; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) + priv->vram_hw_init(priv); + + phytium_irq_preinstall(dev); + ret = request_irq(priv->irq, phytium_display_irq_handler, + IRQF_SHARED, dev->driver->name, dev); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + + phytium_drm_fbdev_fini(dev); + phytium_irq_uninstall(dev); + free_irq(priv->irq, dev); + drm_mode_config_cleanup(dev); +} + +/* phytium display specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_PHYTIUM_VRAM_TYPE_DEVICE 0x0 +#define DRM_IOCTL_PHYTIUM_VRAM_TYPE_DEVICE DRM_IO(DRM_COMMAND_BASE\ + + DRM_PHYTIUM_VRAM_TYPE_DEVICE) + +static int phytium_ioctl_check_vram_device(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct phytium_display_private *priv = dev->dev_private; + + return ((priv->support_memory_type == MEMORY_TYPE_VRAM_DEVICE) ? 1 : 0); +} + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ + DRM_IOCTL_DEF_DRV(PHYTIUM_VRAM_TYPE_DEVICE, phytium_ioctl_check_vram_device, + DRM_AUTH|DRM_UNLOCKED), +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .dumb_create = phytium_gem_dumb_create, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..9038bf6ebd8ce7b3f32e29d81b4a4cd5ffafb4f6 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_PX210, + PHYTIUM_PLATFORM_PE220X, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM_WC 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 +#define MEMORY_TYPE_VRAM_DEVICE 0x8 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_PX210(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PX210) +#define IS_PE220X(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PE220X) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..98a06ccbc48d0ca5e49cc1f999ff91f28e5e2628 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "px210_dp.h" +#include "pe220x_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; +static int codec_id = PHYTIUM_DP_AUDIO_ID; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear PX210_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strscpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + if (phytium_dp->is_edp) + edid = phytium_dp->edp_edid; + else + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + if (!phytium_dp->is_edp) + kfree(edid); + + return ret; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(&phytium_dp->aux, phytium_dp->dpcd); + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(&phytium_dp->aux, phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_PHY_TEST_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_fini_connector(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) + phytium_edp_backlight_on(phytium_dp); + +} + +enum drm_mode_status +phytium_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if (dc_fake_mode_enable && + (phytium_dp->native_mode.clock == mode->clock) && + (phytium_dp->native_mode.htotal == mode->htotal) && + (phytium_dp->native_mode.vtotal == mode->vtotal)) + return MODE_OK; + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, + .mode_valid = phytium_encoder_mode_valid, +}; + +void phytium_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + phytium_dp_audio_codec_fini(phytium_dp); + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = phytium_dp_encoder_destroy, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_mute_stream(struct device *dev, void *data, bool enable, int direction) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .mute_stream = phytium_dp_audio_mute_stream, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + codec_id, + &codec_data, sizeof(codec_data)); + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + codec_id += 1; + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp) +{ + + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + platform_device_unregister(phytium_dp->audio_pdev); + phytium_dp->audio_pdev = NULL; + codec_id -= 1; +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) { + DRM_ERROR("detect edp dpcd failed\n"); + return false; + } + + phytium_dp->edp_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (!phytium_dp->edp_edid) { + DRM_ERROR("get edp edid failed\n"); + return false; + } + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp) +{ + kfree(phytium_dp->edp_edid); + + phytium_dp->edp_edid = NULL; + phytium_edp_panel_poweroff(phytium_dp); +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_PX210(priv)) { + px210_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PX210_DP_BASE(port); + priv->phy_access_base[port] = PX210_PHY_ACCESS_BASE(port); + } else if (IS_PE220X(priv)) { + pe220x_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PE220X_DP_BASE(port); + priv->phy_access_base[port] = PE220X_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..ada3f42a68684243bcd45577e068815f9b540aca --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + struct edid *edp_edid; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define PHYTIUM_DP_AUDIO_ID (('P' << 24) + ('H' << 16) + ('Y' << 8)) +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 0000000000000000000000000000000000000000..879065964729e79326c2ecf4db6196ed70ea1f0b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_put(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) { + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + phytium_fb->base.obj[i] = &phytium_gem_obj[i]->base; + } + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_put(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 0000000000000000000000000000000000000000..e096aa30ccb508143ae8940a483eaf64aded4da1 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +#include + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 0000000000000000000000000000000000000000..e929ad281724ade0b47f826bb537b594d7a7af45 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static void phytium_fbdev_destroy(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); +} + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static const struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + FB_DEFAULT_IOMEM_OPS, + .fb_destroy = phytium_fbdev_destroy, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_info(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_info(fbi, helper, sizes); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, 32, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_initial_config(helper); + return 0; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_info(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 0000000000000000000000000000000000000000..fe352557a4f9d14ef037dc6170215b49c5e760e3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 0000000000000000000000000000000000000000..da873a9de811ff10f311cf60f50a1235d9b35833 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,518 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +int phytium_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + iosys_map_set_vaddr(map, phytium_obj->vaddr); + + return 0; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE)) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vm_flags_clear(vma, VM_PFNMAP); + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE) { + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs phytium_drm_gem_object_funcs = { + .free = phytium_gem_free_object, + .get_sg_table = phytium_gem_prime_get_sg_table, + .vmap = phytium_gem_prime_vmap, + .vunmap = phytium_gem_prime_vunmap, + .vm_ops = &phytium_vm_ops, +}; + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = priv->support_memory_type; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->base.funcs = &phytium_drm_gem_object_funcs; + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_put(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_put(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 0000000000000000000000000000000000000000..17c438e6e63c9f37cee1948e2547956d2da07afe --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..1cd266e868b37a9e376885776da5223ed75a75e0 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..f9e2c7e65896f2c129c5d5274f64beb091a73b23 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..e849830540a123dd652718376a7ee6ce7878fca2 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "px210_dc.h" +#include "px210_dp.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +static bool phytium_pci_host_is_5c01(struct pci_bus *bus) +{ + struct pci_bus *child = bus; + struct pci_dev *root = NULL; + + while (child) { + if (child->parent->parent) + child = child->parent; + else + break; + } + + root = child->self; + if ((root->vendor == 0x1db7) && (root->device == 0x5c01)) + return true; + return false; +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + if ((pdev->device == 0xdc3e) && phytium_pci_host_is_5c01(pdev->bus)) { + priv->pool_virt_addr = devm_ioremap(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + priv->support_memory_type = MEMORY_TYPE_VRAM_DEVICE; + } else { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + priv->support_memory_type = MEMORY_TYPE_VRAM_WC; + } + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if ((priv->support_memory_type == MEMORY_TYPE_VRAM_WC) || + (priv->support_memory_type == MEMORY_TYPE_VRAM_DEVICE)) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check px210 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_PX210(priv)) { + pci_priv->dc_hw_vram_init = px210_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = px210_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = px210_dc_hw_fb_format_check; + } else if (IS_PE220X(priv)) { + pci_priv->dc_hw_vram_init = pe220x_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_remove_conflicting_framebuffers(struct pci_dev *pdev) +{ + resource_size_t base, size; + + base = pci_resource_start(pdev, 2); + size = pci_resource_len(pdev, 2); + + return drm_aperture_remove_conflicting_framebuffers(base, size, + &phytium_display_drm_driver); + +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + ret = phytium_remove_conflicting_framebuffers(pdev); + if (ret) { + DRM_ERROR("failed to remove conflicting phytium framebuffers\n"); + return ret; + } + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enable device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enable msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_PX210(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_PX210(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info px210_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PX210), + .total_pipes = 3, + .crtc_clock_max = PX210_DC_PIX_CLOCK_MAX, + .hdisplay_max = PX210_DC_HDISPLAY_MAX, + .vdisplay_max = PX210_DC_VDISPLAY_MAX, + .address_mask = PX210_DC_ADDRESS_MASK, + .backlight_max = PX210_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&px210_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&pe220x_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..92b08fcb045296bc8507960ecd73366994254ec4 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 0000000000000000000000000000000000000000..9f35d57cd726a34af80cacaf382facc086116b95 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = px210_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = pe220x_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 0000000000000000000000000000000000000000..5527579b03489313f9ef8223e76cfcc2a3bf3def --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..d28aadba7c30ebe41007a7c02654c52360ffcb76 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_PE220X(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &pe220x_info, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&pe220x_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..42f6570b476fbc252b5702f7571c3c45c836d1d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..99ac9d4cb4d969831443d92db500eeda6af7fc62 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define PX210_PIPE_BASE(pipe) (0x8000*pipe) +#define PX210_DC_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x0000) +#define PX210_DCREQ_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x2000) +#define PX210_DP_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x3000) +#define PX210_ADDRESS_TRANSFORM_BASE 0x4000 +#define PX210_PHY_ACCESS_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x5000) + +#define PE220X_DC_BASE(pipe) (0x1000*pipe) +#define PE220X_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define PE220X_ADDRESS_TRANSFORM_BASE 0x8000 +#define PE220X_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define PX210_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..ae022f9fe3fb3418cad9cacf747ae5953f83750b --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int px210_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t px210_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t px210_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int px210_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void px210_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_MSI_CLEAR); +} + +void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void px210_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + px210_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); +} + +int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_primary_formats_modifiers; + *formats = px210_primary_formats; + *format_count = ARRAY_SIZE(px210_primary_formats); +} + +void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_cursor_formats_modifiers; + *formats = px210_cursor_formats; + *format_count = ARRAY_SIZE(px210_cursor_formats); +} + +void px210_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, PX210_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, PX210_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, PX210_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_PLANE0_CONFIG); + } +} + +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/px210_dc.h b/drivers/gpu/drm/phytium/px210_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..1d8220faadc7399f9fad696db53787cff1a0968e --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DC_H__ +#define __PX210_DC_H__ + +#define PX210_DC_PIX_CLOCK_MAX (594000) +#define PX210_DC_HDISPLAY_MAX 3840 +#define PX210_DC_VDISPLAY_MAX 2160 +#define PX210_DC_ADDRESS_MASK 0x7f + +extern void px210_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void px210_dc_hw_disable(struct drm_crtc *crtc); +extern int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void px210_dc_hw_update_dcreq(struct drm_plane *plane); +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __PX210_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dp.c b/drivers/gpu/drm/phytium/px210_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..be3c520a3c09d4ecb4e063cc1a031382c700f5cc --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_dp.h" +#include "px210_dp.h" + +static uint8_t px210_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int px210_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int px210_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, PX210_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void px210_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void px210_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void px210_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t px210_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int px210_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > PX210_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool px210_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int px210_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], PX210_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t px210_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return px210_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func px210_dp_funcs = { + .dp_hw_get_source_lane_count = px210_dp_hw_get_source_lane_count, + .dp_hw_reset = px210_dp_hw_reset, + .dp_hw_spread_is_enable = px210_dp_hw_spread_is_enable, + .dp_hw_set_backlight = px210_dp_hw_set_backlight, + .dp_hw_get_backlight = px210_dp_hw_get_backlight, + .dp_hw_disable_backlight = px210_dp_hw_disable_backlight, + .dp_hw_enable_backlight = px210_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = px210_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = px210_dp_hw_poweron_panel, + .dp_hw_init_phy = px210_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = px210_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = px210_dp_hw_set_phy_lane_and_rate, +}; + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &px210_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..f2436ace18453f5409ac034199a39299b7a4e6fe --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DP_H__ +#define __PX210_DP_H__ + +#define PX210_DP_BACKLIGHT_MAX 100 + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_reg.h b/drivers/gpu/drm/phytium/px210_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..e594fbc8d96f34a0566d710ce832be14d9a79346 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_REG_H__ +#define __PX210_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define PX210_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define PX210_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define PX210_DCREQ_PLANE0_ADDR_START 0x00 +#define PX210_DCREQ_PLANE0_ADDR_END 0x04 +#define PX210_DCREQ_PLANE1_ADDR_START 0x08 +#define PX210_DCREQ_PLANE1_ADDR_END 0x0c +#define PX210_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PX210_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define PX210_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define PX210_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define PX210_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define PX210_DCREQ_FRAME_START 0x54 +#define PX210_DCREQ_FILTER_CONFIG 0x58 +#define PX210_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define PX210_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define PX210_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PX210_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define PX210_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PX210_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define PX210_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PX210_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PX210_PHY1_EN_REFCLK 0x100070 + +#define PX210_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PX210_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PX210_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PX210_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PX210_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PX210_PHY0_PMA0_POWER 0x40014 +#define PX210_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PX210_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PX210_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PX210_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PX210_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PX210_PHY0_PLL_CFG 0x30038 +#define PX210_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PX210_PHY0_PMA_CONTROL 0x3800c +#define PX210_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PX210_PHY0_PMA_CONTROL2 0x38004 +#define PX210_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define PX210_PHY0_PLL0_CLK_SEL 0X684 +#define PX210_PHY0_PLL1_CLK_SEL 0x704 +#define PX210_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PX210_PHY0_HSCLK0_SEL 0x18398 +#define PX210_PHY0_HSCLK1_SEL 0x1a398 +#define PX210_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PX210_PHY0_HSCLK0_DIV 0x1839c +#define PX210_PHY0_HSCLK1_DIV 0x1a39c +#define PX210_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PX210_PHY0_PLLDRC0_CTRL 0x18394 +#define PX210_PHY0_PLLDRC1_CTRL 0x1a394 +#define PX210_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PX210_PHY0_PLL0_DSM_M0 0x250 +#define PX210_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PX210_PHY0_PLL0_VCOCAL_START 0x218 +#define PX210_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PX210_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PX210_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL0_CP_PADJ 0x690 +#define PX210_PHY0_PLL0_CP_IADJ 0x694 +#define PX210_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PX210_PHY0_PLL0_INTDIV 0x240 +#define PX210_PHY0_PLL0_FRACDIVL 0x244 +#define PX210_PHY0_PLL0_FRACDIVH 0x248 +#define PX210_PHY0_PLL0_HIGH_THR 0x24c +#define PX210_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PX210_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PX210_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PX210_PHY0_PLL1_CP_PADJ 0x710 +#define PX210_PHY0_PLL1_CP_IADJ 0x714 +#define PX210_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PX210_PHY0_PLL1_INTDIV 0x340 +#define PX210_PHY0_PLL1_FRACDIVL 0x344 +#define PX210_PHY0_PLL1_FRACDIVH 0x348 +#define PX210_PHY0_PLL1_HIGH_THR 0x34c +#define PX210_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PX210_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PX210_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PX210_PHY1_PLL0_CP_PADJ 0x80690 +#define PX210_PHY1_PLL0_CP_IADJ 0x80694 +#define PX210_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PX210_PHY1_PLL0_INTDIV 0x80240 +#define PX210_PHY1_PLL0_FRACDIVL 0x80244 +#define PX210_PHY1_PLL0_FRACDIVH 0x80248 +#define PX210_PHY1_PLL0_HIGH_THR 0x8024c +#define PX210_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PX210_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PX210_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PX210_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PX210_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PX210_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PX210_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PX210_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PX210_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PX210_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PX210_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PX210_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PX210_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PX210_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PX210_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PX210_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PX210_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PX210_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PX210_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PX210_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PX210_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PX210_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PX210_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PX210_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PX210_PHY0_PLL0_TX_DRV 0x18318 +#define PX210_PHY0_PLL1_TX_DRV 0x1a318 +#define PX210_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PX210_PHY0_PLL0_TX_MGNFS 0x18140 +#define PX210_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PX210_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PX210_PHY0_PLL0_TX_CPOST 0x18130 +#define PX210_PHY0_PLL1_TX_CPOST 0x1a130 +#define PX210_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PX210_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __PX210_REG_H__ */ diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 341441b241835fda62f42865b9006885d34dabae..c2d6b723aea8ac21dfb04c7fec8ede57f8970f85 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8093,6 +8093,9 @@ int cik_irq_process(struct radeon_device *rdev) if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a7f9fc2b52399904d1490150a935083492ee86b5..ca2cc4c6a5ba58c74070f049e43581675f8d1001 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4922,6 +4922,9 @@ int evergreen_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 98d075c540e5e54db59a87536db80b5160d06816..c7a9956a410d90569b766c83735b001f9e9a395a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4328,6 +4328,9 @@ int r600_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 312fe76944a943e764a29218c7eef2045d37e959..5bf7e40bf35492fa7ca305530a0b58f36f458db8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6442,6 +6442,9 @@ int si_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 7903c8638e8173f2778fce729e7fa03a9a32dc91..c2ffa01585edc632d4023d960d38a7617502aa29 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -172,9 +172,9 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr * The following set contains all functions we agree BPF programs * can use. */ -BTF_SET8_START(hid_bpf_kfunc_ids) +BTF_KFUNCS_START(hid_bpf_kfunc_ids) BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) -BTF_SET8_END(hid_bpf_kfunc_ids) +BTF_KFUNCS_END(hid_bpf_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_kfunc_set = { .owner = THIS_MODULE, @@ -479,12 +479,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = { }; /* for syscall HID-BPF */ -BTF_SET8_START(hid_bpf_syscall_kfunc_ids) +BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids) BTF_ID_FLAGS(func, hid_bpf_attach_prog) BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) BTF_ID_FLAGS(func, hid_bpf_hw_request) -BTF_SET8_END(hid_bpf_syscall_kfunc_ids) +BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { .owner = THIS_MODULE, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index a4c361b6619c167b751028f10ec88eb20ed5a7fa..ea41c7e24c6e1455490270fb4e0d9a95b61b0935 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -38,6 +38,16 @@ config HWMON_DEBUG_CHIP comment "Native drivers" +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI @@ -2165,6 +2175,19 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + default m + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + + This driver can also be built as a module. If so, the module + will be called zhaoxin-cputemp. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 4ac9452b54304a0d60c68e3e479a0e5f8aac6898..f7da084cfc4646091ae53de160746c8cf1cd6209 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -211,6 +211,7 @@ obj-$(CONFIG_SENSORS_TMP464) += tmp464.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP)+= zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o @@ -220,6 +221,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_SENSORS_PECI) += peci/ diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index c906731c6c2d3ec6cc0cfbf6e13e4aebbf35f7cb..6bb096687f3d67263421464544ff132377d4775a 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -84,6 +84,11 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define AMD_I3255_STR "3255" +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -94,6 +99,7 @@ struct k10temp_data { bool is_zen; u32 ccd_offset; bool disp_negative; + void *priv; }; #define TCTL_BIT 0 @@ -202,6 +208,23 @@ static int k10temp_read_labels(struct device *dev, return 0; } +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) +{ + struct hygon_private *h_priv; + + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); +} + static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { @@ -223,13 +246,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev), - ZEN_CCD_TEMP(data->ccd_offset, channel - 2), - ®val); - - if (ret) - return ret; - + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else { + ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + ®val); + if (ret) + return ret; + } *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; break; default: @@ -406,14 +431,48 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, } } +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -441,7 +500,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + } else if (boot_cpu_data.x86 == 0x17) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; data->is_zen = true; @@ -466,6 +525,25 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(pdev, data, 8); break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; @@ -547,6 +625,9 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 0000000000000000000000000000000000000000..aedc29d44077ef32f8e68a1622571b85faa76cf5 --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, 0444, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, 0444, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index e5d18dac8ee7ba91682cc9c421baa6632169443b..0a5057dbe51a63fae6b5a92e22e3ee630a129927 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -273,7 +273,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 0000000000000000000000000000000000000000..751d2c5a868ab615db076a18f4963a8972c6d50e --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME, SHOW_CRIT, SHOW_MAX }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u32 id; + u32 msr_temp; + u32 msr_crit; + u32 msr_max; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t crit_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_crit, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static ssize_t max_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_max, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); +static SENSOR_DEVICE_ATTR_RO(temp1_crit, crit, SHOW_CRIT); +static SENSOR_DEVICE_ATTR_RO(temp1_max, max, SHOW_MAX); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + struct cpuinfo_x86 *c = &cpu_data(pdev->id); + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + data->msr_temp = 0x1423; + if (c->x86_model == 0x6b) { + data->msr_crit = 0x175b; + data->msr_max = 0x175a; + } else { + data->msr_crit = 0x1416; + data->msr_max = 0x1415; + } + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + data->hwmon_dev = hwmon_device_register_for_thermal(&pdev->dev, data->name, data); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id cputemp_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(HWMON_THERMAL); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 982007a112c2a082d1d201eb9116684623f7c3a3..3161c33981e1932abce5a94cc834a1c5f9bf731e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -336,6 +336,28 @@ config I2C_VIAPRO This driver can also be built as a module. If so, the module will be called i2c-viapro. +config I2C_ZHAOXIN + tristate "Zhaoxin I2C controller driver" + depends on PCI + select I2C_ALGOBIT + help + If you say yes to this option, support will be included for the + Zhaoxin I2C interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin. + +config I2C_SUNWAY + tristate "Sunway i2c lib" + depends on SW64 + help + If you say yes to this option, support will be included for the + Sunway Soc I2C interface on SW64 platform. + + This driver can also be built as a module. If so, the module + will be called i2c-sunway. + + if ACPI comment "ACPI drivers" @@ -350,6 +372,16 @@ config I2C_SCMI To compile this driver as a module, choose M here: the module will be called i2c-scmi. +config I2C_ZHAOXIN_SMBUS + tristate "Zhaoxin SMBus Interface" + depends on PCI || COMPILE_TEST + help + If you say yes to this option, support will be included for the + ZHAOXIN SMBus interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin-smbus. + endif # ACPI comment "Mac SMBus host controller drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 9be9fdb07f3dca5dc335553eab2a623d80050ac2..738519b0a9cbb232fb5b764122cc3ae50e9402e9 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o +obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o +obj-$(CONFIG_I2C_SUNWAY) += i2c-sunway.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o @@ -138,6 +140,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o +obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index ced2fb4aeda8d3dcd54d8f9313d72b5620d2ddf9..c283743916fe20ceb5d4a7aefe7a8d1eb2afdae8 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -63,6 +63,9 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + *val = readl(dev->base + reg); return 0; @@ -72,6 +75,9 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + writel(val, dev->base + reg); return 0; @@ -149,6 +155,8 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) return ret; reg = readl(dev->base + DW_IC_COMP_TYPE); + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = readl(dev->base + (DW_IC_COMP_TYPE << 7)); i2c_dw_release_lock(dev); if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 5eb130c1d671950e364209bce451c66c443a3c17..d4909e9b1c84f91561db695aae25eb75fb971f68 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -310,7 +310,8 @@ struct dw_i2c_dev { #define MODEL_BAIKAL_BT1 BIT(9) #define MODEL_AMD_NAVI_GPU BIT(10) #define MODEL_WANGXUN_SP BIT(11) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_SUNWAY BIT(12) +#define MODEL_MASK GENMASK(12, 8) /* * Enable UCSI interrupt by writing 0xd at register diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 855b698e99c08004df29a5da485722974de63335..c818e9d14b9acc670f7ec77334e647e82090ddca 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -150,9 +150,14 @@ static int dw_i2c_of_configure(struct platform_device *pdev) } static const struct of_device_id dw_i2c_of_match[] = { +#ifdef CONFIG_SW64 + { .compatible = "snps,designware-i2c", .data = (void *)MODEL_SUNWAY }, +#else { .compatible = "snps,designware-i2c", }, +#endif { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + { .compatible = "sunway,suntai-i2c", .data = (void *)MODEL_SUNWAY }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 809fbd014cd6833749a677bba4b6845854459d3b..cc170c114e1090ddfd1a6600fca310961711ac6f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1043,8 +1043,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; diff --git a/drivers/i2c/busses/i2c-sunway.c b/drivers/i2c/busses/i2c-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..cc7268c6a2da81e0f302aa5bc7ac285d00214740 --- /dev/null +++ b/drivers/i2c/busses/i2c-sunway.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 WXIAT Platform Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The drivers in this file are synchronous/blocking. In addition, + * use poll mode to read/write slave devices on the I2C bus instead + * of the interrupt mode. + */ + +#include +#include +#include +#include + +#include + +#define CPLD_BUSNR 2 + +#define IC_CLK_KHZ 25000 + +/* I2C register definitions */ +#define DW_IC_CON 0x0 +#define DW_IC_STATUS 0x3800 +#define DW_IC_DATA_CMD 0x0800 +#define DW_IC_TAR 0x00200 +#define DW_IC_ENABLE 0x3600 +#define DW_IC_CMD 0x0100 +#define DW_IC_STOP 0x0200 +#define DW_IC_SDA_HOLD 0x3e00 +#define DW_IC_SDA_SETUP 0x4a00 +#define DW_IC_SS_SCL_HCNT 0x0a00 +#define DW_IC_SS_SCL_LCNT 0x0c00 +#define DW_IC_FS_SCL_HCNT 0x0e00 +#define DW_IC_FS_SCL_LCNT 0x1000 +#define DW_IC_TX_TL 0x1e00 +#define DW_IC_RX_TL 0x1c00 +#define DW_IC_INTR_MASK 0x1800 + +#define MAX_RETRY 10000000 + +#define DW_IC_STATUS_ACTIVITY 0x1 +#define DW_IC_STATUS_TFNF 0x2 +#define DW_IC_STATUS_TFE 0x4 +#define DW_IC_STATUS_RFNE 0x8 +#define DW_IC_STATUS_RFF 0x10 + +#define DW_IC_CON_MASTER 0x1 +#define DW_IC_CON_SPEED_STD 0x2 +#define DW_IC_CON_SPEED_FAST 0x4 +#define DW_IC_CON_10BITADDR_MASTER 0x10 +#define DW_IC_CON_RESTART_EN 0x20 +#define DW_IC_CON_SLAVE_DISABLE 0x40 + +#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ + DW_IC_CON_SLAVE_DISABLE | \ + DW_IC_CON_RESTART_EN) + +#define DW_IC_INTR_RX_UNDER 0x001 +#define DW_IC_INTR_RX_OVER 0x002 +#define DW_IC_INTR_RX_FULL 0x004 +#define DW_IC_INTR_TX_OVER 0x008 +#define DW_IC_INTR_TX_EMPTY 0x010 +#define DW_IC_INTR_RD_REQ 0x020 +#define DW_IC_INTR_TX_ABRT 0x040 +#define DW_IC_INTR_RX_DONE 0x080 +#define DW_IC_INTR_ACTIVITY 0x100 +#define DW_IC_INTR_STOP_DET 0x200 +#define DW_IC_INTR_START_DET 0x400 +#define DW_IC_INTR_GEN_CALL 0x800 + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_EMPTY | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) + +enum i2c_bus_operation { + I2C_BUS_READ, + I2C_BUS_WRITE, +}; + +static void __iomem *m_i2c_base_address; + +/* + * This function get I2Cx controller base address + * + * @param i2c_controller_index Bus Number of I2C controller. + * @return I2C BAR. + */ +void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index) +{ + switch (i2c_controller_index) { + case 0: + return __va(IO_BASE | IIC0_BASE); + case 1: + return __va(IO_BASE | IIC1_BASE); + case 2: + return __va(IO_BASE | IIC2_BASE); + default: + return NULL; + } +} + +static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data) +{ + writel(data, m_i2c_base_address + offset); +} + +static inline uint32_t read_cpu_i2c_controller(uint64_t offset) +{ + return readl(m_i2c_base_address + offset); +} + +static int poll_for_status_set0(uint16_t status_bit) +{ + uint64_t retry = 0; + uint32_t temp = read_cpu_i2c_controller(DW_IC_STATUS); + + temp = read_cpu_i2c_controller(DW_IC_STATUS); + + while (retry < MAX_RETRY) { + if (read_cpu_i2c_controller(DW_IC_STATUS) & status_bit) + break; + retry++; + } + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static uint32_t i2c_dw_scl_lcnt(uint32_t ic_clk, uint32_t t_low, + uint32_t tf, uint32_t offset) +{ + /* + * Conditional expression: + * + * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (t_low + tf) + * + * DW I2C core starts counting the SCL CNTs for the LOW period + * of the SCL clock (t_low) as soon as it pulls the SCL line. + * In order to meet the t_low timing spec, we need to take into + * account the fall time of SCL signal (tf). Default tf value + * should be 0.3 us, for safety. + */ + return ((ic_clk * (t_low + tf) + 500000) / 1000000) - 1 + offset; +} + +static uint32_t i2c_dw_scl_hcnt(uint32_t ic_clk, uint32_t t_symbol, + uint32_t tf, uint32_t cond, uint32_t offset) +{ + /* + * DesignWare I2C core doesn't seem to have solid strategy to meet + * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec + * will result in violation of the tHD;STA spec. + */ + if (cond) + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH + * + * This is based on the DW manuals, and represents an ideal + * configuration. The resulting I2C bus speed will be faster + * than any of the others. + * + * If your hardware is free from tHD;STA issue, try this one. + */ + return (ic_clk * t_symbol + 500000) / 1000000 - 8 + offset; + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). + */ + return (ic_clk * (t_symbol + tf) + 500000) / 1000000 - 3 + offset; +} + +static int wait_for_cpu_i2c_bus_busy(void) +{ + uint64_t retry = 0; + uint32_t status = 0; + + do { + retry++; + status = !!(read_cpu_i2c_controller(DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY); + } while ((retry < MAX_RETRY) && status); + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) + return status; + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD); + + if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0) + buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD); + else + pr_err("Read timeout line %d.\n", __LINE__); + } + + return 0; +} + +static int i2c_write(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + /* Data transfer, poll till transmit ready bit is set */ + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (poll_for_status_set0(DW_IC_STATUS_TFNF) == 0) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i] | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i]); + } else { + pr_err("Write timeout %d.\n", __LINE__); + } + } + + mdelay(200); + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + return 0; +} + +/* Initialize I2c controller */ +void init_cpu_i2c_controller(void) +{ + uint32_t h_cnt; + uint32_t l_cnt; + uint32_t input_ic_clk_rate = IC_CLK_KHZ; /* by unit KHz ie. 25MHz */ + uint32_t sda_falling_time = 300; + uint32_t scl_falling_time = 300; + + /* + * The I2C protocol specification requires 300ns of hold time on the + * SDA signal (tHD;DAT) in standard and fast speed modes, and a hold + * time long enough to bridge the undefined part between logic 1 and + * logic 0 of the falling edge of SCL in high speed mode. + */ + uint32_t sda_hold_time = 432; + uint32_t sda_hold = 0; + + /* Firstly disable the controller. */ + pr_debug("Initialize CPU I2C controller\n"); + + write_cpu_i2c_controller(DW_IC_ENABLE, 0); + + sda_hold = (input_ic_clk_rate * sda_hold_time + 500000) / 1000000; + write_cpu_i2c_controller(DW_IC_SDA_HOLD, sda_hold); + + /* Set standard and fast speed deviders for high/low periods. */ + /* Standard-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 4000, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 4700, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_SS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_SS_SCL_LCNT, l_cnt); + + pr_debug("Standard-mode HCNT=%x, LCNT=%x\n", h_cnt, l_cnt); + + /* Fast-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 600, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 1300, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_FS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_FS_SCL_LCNT, l_cnt); + + pr_debug("Fast-mode HCNT=%x, LCNT=%d\n\n", h_cnt, l_cnt); + + /* Configure Tx/Rx FIFO threshold levels, since we will be working + * in polling mode set both thresholds to their minimum + */ + write_cpu_i2c_controller(DW_IC_TX_TL, 0); + write_cpu_i2c_controller(DW_IC_RX_TL, 0); + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + /* Configure the i2c master */ + write_cpu_i2c_controller(DW_IC_CON, + INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD); + +} + +/* + * This function enables I2C controllers. + * + * @param i2c_controller_index Bus Number of I2C controllers. + */ +void enable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + init_cpu_i2c_controller(); +} + +/* + * Write/Read data from I2C device. + * + * @i2c_controller_index: i2c bus number + * @slave_address: slave address + * @operation: to read or write + * @length: number of bytes + * @reg_offset: register offset + * @buffer: in/out buffer + */ +int i2c_bus_rw(uint8_t i2c_controller_index, uint8_t slave_address, + enum i2c_bus_operation operation, uint32_t length, + uint8_t reg_offset, void *buffer) +{ + uint8_t *byte_buffer = buffer; + int status = 0; + uint32_t databuffer, temp; + + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + status = wait_for_cpu_i2c_bus_busy(); + if (status) { + pr_err("%d\n", __LINE__); + return status; + } + + mdelay(1000); + + /* Set the slave address. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); /* Disable controller */ + databuffer = read_cpu_i2c_controller(DW_IC_CON); + databuffer &= ~DW_IC_CON_10BITADDR_MASTER; + write_cpu_i2c_controller(DW_IC_CON, databuffer); + + /* Fill the target addr. */ + write_cpu_i2c_controller(DW_IC_TAR, slave_address); + + temp = read_cpu_i2c_controller(DW_IC_TAR); + + /* Configure Tx/Rx FIFO threshold levels. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x1); /* Enable the adapter */ + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + if (operation == I2C_BUS_READ) + status = i2c_read(reg_offset, byte_buffer, length); + else if (operation == I2C_BUS_WRITE) + status = i2c_write(reg_offset, byte_buffer, length); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + + return status; +} + +void disable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + m_i2c_base_address = 0; +} + +void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data) +{ + enable_i2c_controller(CPLD_BUSNR); + i2c_bus_rw(CPLD_BUSNR, slave_addr, I2C_BUS_WRITE, sizeof(uint8_t), reg, &data); + disable_i2c_controller(CPLD_BUSNR); +} diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c new file mode 100644 index 0000000000000000000000000000000000000000..52c689e928afff5782fd97ab5079fdc61c85d953 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin SMBus controller driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "3.1.0" + +#define ZXSMB_NAME "smbus_zhaoxin" + +/* + * registers + */ +/* SMBus MMIO address offsets */ +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 + +/* + * platform related information + */ + /* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C + +#define ZXSMB_TIMEOUT 500 + +struct zxsmb { + struct device *dev; + struct i2c_adapter adap; + struct completion complete; + u16 base; + int irq; + u8 status; + int size; + u8 pec; +}; + +static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) +{ + struct zxsmb *smb = (struct zxsmb *)dev_id; + + smb->status = inb(smb->base + ZXSMB_STS); + if ((smb->status & ZXSMB_STS_MASK) == 0) + return IRQ_NONE; + + /* clear status */ + outb(smb->status, smb->base + ZXSMB_STS); + complete(&smb->complete); + + return IRQ_HANDLED; +} + +static int zxsmb_status_check(struct zxsmb *smb) +{ + if (smb->status & ZXSMB_CMD_CMPLET) + return 0; + + if (smb->status & ZXSMB_BUS_CLSI) { + dev_err(smb->dev, "Lost arbitration\n"); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + return -EAGAIN; + } + + dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status); + + return -EIO; +} + +static int zxsmb_wait_interrput_finish(struct zxsmb *smb) +{ + int time_left; + + time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT)); + if (time_left == 0) { + u8 status = inb(smb->base + ZXSMB_STS); + + /* some host's irq config not work well */ + if (status & ZXSMB_STS_MASK) { + outb(status, smb->base + ZXSMB_STS); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + devm_free_irq(smb->dev, smb->irq, smb); + smb->irq = 0; + dev_warn(smb->dev, "change to polling mode\n"); + + return -EAGAIN; + } + dev_dbg(smb->dev, "interrput timeout\n"); + return -EIO; + } + + return zxsmb_status_check(smb); +} + +static int zxsmb_wait_polling_finish(struct zxsmb *smb) +{ + int status; + int time_left = ZXSMB_TIMEOUT * 10; + + do { + usleep_range(100, 200); + status = inb(smb->base + ZXSMB_STS); + } while ((status & ZXSMB_BUSY) && (--time_left)); + + if (time_left == 0) { + dev_dbg(smb->dev, "polling timeout\n"); + return -EIO; + } + + /* clear status */ + outb(status, smb->base + ZXSMB_STS); + smb->status = status; + + return zxsmb_status_check(smb); +} + +static int zxsmb_trans_start(struct zxsmb *smb) +{ + u16 base = smb->base; + int tmp; + + /* Make sure the SMBus host is ready to start transmitting */ + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + outb(tmp, base + ZXSMB_STS); + usleep_range(1000, 5000); + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp); + return -EIO; + } + } + + tmp = ZXSMB_START | smb->size; + + if (smb->pec) + tmp |= ZXSMB_PEC_EN; + else + tmp &= (~ZXSMB_PEC_EN); + + if (smb->irq) + tmp |= ZXSMB_CMPLT_EN; + + reinit_completion(&smb->complete); + smb->status = 0; + outb(tmp, base + ZXSMB_CTL); + return 0; +} + +static int zxsmb_transaction(struct zxsmb *smb) +{ + int err; + + err = zxsmb_trans_start(smb); + if (err) + return err; + + if (smb->irq) + err = zxsmb_wait_interrput_finish(smb); + else + err = zxsmb_wait_polling_finish(smb); + + outb(0, smb->base + ZXSMB_CTL); + return err; +} + +static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, + int size, union i2c_smbus_data *data) +{ + int i; + int err; + u8 len; + struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap); + u16 base = smb->base; + + switch (size) { + case I2C_SMBUS_QUICK: + size = ZXSMB_QUICK; + break; + case I2C_SMBUS_BYTE: + size = ZXSMB_BYTE; + if (!read) + outb(command, base + ZXSMB_CMD); + break; + case I2C_SMBUS_BYTE_DATA: + outb(command, base + ZXSMB_CMD); + if (!read) + outb(data->byte, base + ZXSMB_DAT0); + size = ZXSMB_BYTE_DATA; + break; + case I2C_SMBUS_PROC_CALL: + case I2C_SMBUS_WORD_DATA: + if (read && size == I2C_SMBUS_PROC_CALL) + goto exit_unsupported; + outb(command, base + ZXSMB_CMD); + if (!read) { + outb(data->word & 0xff, base + ZXSMB_DAT0); + outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); + } + size = (size == I2C_SMBUS_PROC_CALL) ? + ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + case I2C_SMBUS_BLOCK_DATA: + len = data->block[0]; + if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) + outb(len, base + ZXSMB_DAT1); + outb(command, base + ZXSMB_CMD); + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + if (!read) { + outb(len, base + ZXSMB_DAT0); + outb(0, base + ZXSMB_DAT1); + for (i = 1; i <= len; i++) + outb(data->block[i], base + ZXSMB_BLKDAT); + } + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? + ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + break; + default: + goto exit_unsupported; + } + + outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD); + smb->size = size; + smb->pec = flags & I2C_CLIENT_PEC; + err = zxsmb_transaction(smb); + if (err) + return err; + + if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) { + if (unlikely(size == ZXSMB_PROC_CALL)) + goto prepare_read; + return 0; + } + +prepare_read: + switch (size) { + case ZXSMB_BYTE: + case ZXSMB_BYTE_DATA: + data->byte = inb(base + ZXSMB_DAT0); + break; + case ZXSMB_PROC_CALL: + case ZXSMB_WORD_DATA: + data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); + break; + case ZXSMB_I2C_BLOCK_DATA: + case ZXSMB_BLOCK_DATA: + data->block[0] = inb(base + ZXSMB_DAT0); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + for (i = 1; i <= data->block[0]; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; + } + + return 0; + +exit_unsupported: + dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write"); + return -EOPNOTSUPP; +} + +static u32 zxsmb_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smbus_algorithm = { + .smbus_xfer = zxsmb_smbus_xfer, + .functionality = zxsmb_func, +}; + +static int zxsmb_probe(struct platform_device *pdev) +{ + struct zxsmb *smb; + struct resource *res; + struct i2c_adapter *adap; + + smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); + if (!smb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (IS_ERR(res)) + return -ENODEV; + smb->base = res->start; + if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "Can't get I/O resource\n"); + return -EBUSY; + } + + smb->irq = platform_get_irq(pdev, 0); + if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, + pdev->name, smb)) { + dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); + smb->irq = 0; + } else + init_completion(&smb->complete); + + smb->dev = &pdev->dev; + platform_set_drvdata(pdev, (void *)smb); + + adap = &smb->adap; + adap->algo = &smbus_algorithm; + adap->retries = 2; + adap->owner = THIS_MODULE; + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(smb->dev)); + i2c_set_adapdata(&smb->adap, smb); + + return i2c_add_adapter(&smb->adap); +} + +static int zxsmb_remove(struct platform_device *pdev) +{ + struct zxsmb *smb = platform_get_drvdata(pdev); + + i2c_del_adapter(&(smb->adap)); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, smb); + + return 0; +} + +static const struct acpi_device_id zxsmb_acpi_match[] = { + {"SMB3324", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); + +static struct platform_driver zxsmb_driver = { + .probe = zxsmb_probe, + .remove = zxsmb_remove, + .driver = { + .name = ZXSMB_NAME, + .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + }, +}; + +module_platform_driver(zxsmb_driver); + +MODULE_AUTHOR("hanshu@zhaoxin.com"); +MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..ef6b03ec7fa382fea23a72637f68d92f4637d990 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2024 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#define DRIVER_VERSION "1.6.1" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZX_I2C_NAME "i2c_zhaoxin" + +/* REG_CR Bit fields */ +#define ZXI2C_REG_CR 0x00 +#define ZXI2C_CR_ENABLE BIT(0) +#define ZXI2C_CR_RX_END BIT(1) +#define ZXI2C_CR_TX_END BIT(2) +#define ZXI2C_CR_END_MASK GENMASK(2, 1) +#define ZXI2C_CR_CPU_RDY BIT(3) +#define ZXI2C_CR_MST_RST BIT(7) +#define ZXI2C_CR_FIFO_MODE BIT(14) + +/* REG_TCR Bit fields */ +#define ZXI2C_REG_TCR 0x02 +#define ZXI2C_TCR_HS_MODE BIT(13) +#define ZXI2C_TCR_READ BIT(14) +#define ZXI2C_TCR_FAST BIT(15) + +/* REG_CSR Bit fields */ +#define ZXI2C_REG_CSR 0x04 +#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) +#define ZXI2C_CSR_READY_MASK BIT(1) + +/* REG_ISR Bit fields */ +#define ZXI2C_REG_ISR 0x06 +#define ZXI2C_ISR_NACK_ADDR BIT(0) +#define ZXI2C_ISR_BYTE_END BIT(1) +#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) +#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) +#define ZXI2C_IRQ_FIFOEND BIT(3) +#define ZXI2C_IRQ_FIFONACK BIT(4) +#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) + +/* REG_IMR Bit fields */ +#define ZXI2C_REG_IMR 0x08 +#define ZXI2C_IMR_ADDRNACK BIT(0) +#define ZXI2C_IMR_BYTE BIT(1) +#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) +#define ZXI2C_IMR_ENABLE_ALL GENMASK(2, 0) + +#define ZXI2C_REG_CLK 0x10 +#define ZXI2C_CLK_50M BIT(0) +#define ZXI2C_REG_REV 0x11 +#define ZXI2C_REG_HCR 0x12 +#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) +#define ZXI2C_REG_HTDR 0x13 +#define ZXI2C_REG_HRDR 0x14 +#define ZXI2C_REG_HTLR 0x15 +#define ZXI2C_REG_HRLR 0x16 +#define ZXI2C_REG_HWCNTR 0x18 +#define ZXI2C_REG_HRCNTR 0x19 + +#define ZXI2C_REG_CDR 0x0A +#define ZXI2C_REG_TR 0x0C +#define ZXI2C_REG_MCR 0x0E + +enum { + ZXI2C_BYTE_MODE, + ZXI2C_FIFO_MODE +}; + +struct zxi2c { + struct i2c_adapter adapter; + struct completion complete; + struct device *dev; + void __iomem *base; + struct clk *clk; + struct i2c_msg *msg; + int irq; + int ret; + u16 tcr; + u16 tr; + u16 mcr; + u16 csr; + u8 fstp; + u8 hrv; + bool last; + u16 xfer_len; + u16 xfered_len; + unsigned int mode; +}; + +/* parameters Constants */ +#define ZXI2C_GOLD_FSTP_100K 0xF3 +#define ZXI2C_GOLD_FSTP_400K 0x38 +#define ZXI2C_GOLD_FSTP_1M 0x13 +#define ZXI2C_GOLD_FSTP_3400K 0x37 +#define ZXI2C_HS_MASTER_CODE (0x08 << 8) +#define ZXI2C_FIFO_SIZE 32 + +#define ZXI2C_TIMEOUT 200 + +static int zxi2c_wait_bus_ready(struct zxi2c *i2c) +{ + unsigned long timeout; + void __iomem *base = i2c->base; + u16 tmp; + + timeout = jiffies + msecs_to_jiffies(200); + while (!(readw(base + ZXI2C_REG_CSR) & ZXI2C_CSR_READY_MASK)) { + if (time_after(jiffies, timeout)) { + dev_warn(i2c->dev, "timeout waiting for bus ready\n"); + return -EBUSY; + } + tmp = ioread16(base + ZXI2C_REG_CR); + iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR); + + msleep(20); + } + + return 0; +} + +static int zxi2c_irq_xfer(struct zxi2c *i2c) +{ + u16 val; + struct i2c_msg *msg = i2c->msg; + u8 read = msg->flags & I2C_M_RD; + void __iomem *base = i2c->base; + + if (read) { + msg->buf[i2c->xfered_len] = readw(base + ZXI2C_REG_CDR) >> 8; + + val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; + if (i2c->xfered_len == msg->len - 2) + val |= ZXI2C_CR_RX_END; + writew(val, base + ZXI2C_REG_CR); + } else { + + val = readw(base + ZXI2C_REG_CSR); + if (val & ZXI2C_CSR_RCV_NOT_ACK) { + dev_dbg_ratelimited(i2c->dev, "write RCV NACK error\n"); + return -EIO; + } + + if (msg->len == 0) { + val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; + writew(val, base + ZXI2C_REG_CR); + return 0; + } + + if ((i2c->xfered_len + 1) == msg->len) { + if (i2c->last) + writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); + } else { + writew(msg->buf[i2c->xfered_len + 1] & 0xFF, base + ZXI2C_REG_CDR); + writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); + } + } + + i2c->xfered_len++; + + return i2c->xfered_len == msg->len; +} + +/* 'irq == true' means in interrupt context */ +int zxi2c_fifo_irq_xfer(struct zxi2c *i2c, bool irq) +{ + u16 i; + u8 tmp; + struct i2c_msg *msg = i2c->msg; + void __iomem *base = i2c->base; + bool read = !!(msg->flags & I2C_M_RD); + + if (irq) { + /* get the received data */ + if (read) + for (i = 0; i < i2c->xfer_len; i++) + msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); + + i2c->xfered_len += i2c->xfer_len; + if (i2c->xfered_len == msg->len) + return 1; + } + + /* reset fifo buffer */ + tmp = ioread8(base + ZXI2C_REG_HCR); + iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); + + /* set xfer len */ + i2c->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE); + if (read) { + iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HRLR); + } else { + iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HTLR); + /* set write data */ + for (i = 0; i < i2c->xfer_len; i++) + iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR); + } + + /* prepare to stop transmission */ + if (i2c->hrv && msg->len == (i2c->xfered_len + i2c->xfer_len)) { + tmp = ioread8(base + ZXI2C_REG_CR); + tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; + iowrite8(tmp, base + ZXI2C_REG_CR); + } + + if (irq) { + /* continue transmission */ + tmp = ioread8(base + ZXI2C_REG_CR); + iowrite8(tmp |= ZXI2C_CR_CPU_RDY, base + ZXI2C_REG_CR); + } else { + u16 tcr_val = i2c->tcr; + + /* start transmission */ + tcr_val |= read ? ZXI2C_TCR_READ : 0; + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + } + + return 0; +} + +static irqreturn_t zxi2c_isr(int irq, void *data) +{ + struct zxi2c *i2c = data; + void __iomem *base = i2c->base; + u8 status; + + /* save the status and write-clear it */ + status = readw(base + ZXI2C_REG_ISR); + if (!status) + return IRQ_NONE; + + writew(status, base + ZXI2C_REG_ISR); + + i2c->ret = 0; + if (status & ZXI2C_ISR_NACK_ADDR) + i2c->ret = -EIO; + + if (!i2c->ret) { + if (i2c->mode == ZXI2C_BYTE_MODE) + i2c->ret = zxi2c_irq_xfer(i2c); + else + i2c->ret = zxi2c_fifo_irq_xfer(i2c, true); + } + + if (i2c->ret) + complete(&i2c->complete); + + return IRQ_HANDLED; +} + +static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, int last) +{ + u16 tcr_val = i2c->tcr; + void __iomem *base = i2c->base; + + i2c->last = last; + + writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); + + reinit_completion(&i2c->complete); + + tcr_val |= msg->addr & 0x7f; + + writew(tcr_val, base + ZXI2C_REG_TCR); + + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) + return -ETIMEDOUT; + + return i2c->ret; +} + +static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) +{ + u16 val, tcr_val = i2c->tcr; + void __iomem *base = i2c->base; + + val = readw(base + ZXI2C_REG_CR); + val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END); + + if (msg->len == 1) + val |= ZXI2C_CR_RX_END; + + writew(val, base + ZXI2C_REG_CR); + + reinit_completion(&i2c->complete); + + tcr_val |= ZXI2C_TCR_READ | (msg->addr & 0x7f); + + writew(tcr_val, base + ZXI2C_REG_TCR); + + if (!first) { + val = readw(base + ZXI2C_REG_CR); + val |= ZXI2C_CR_CPU_RDY; + writew(val, base + ZXI2C_REG_CR); + } + + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) + return -ETIMEDOUT; + + return i2c->ret; +} + +int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct i2c_msg *msg; + int i; + int ret = 0; + struct zxi2c *i2c = i2c_get_adapdata(adap); + + i2c->mode = ZXI2C_BYTE_MODE; + for (i = 0; ret >= 0 && i < num; i++) { + i2c->msg = msg = &msgs[i]; + i2c->xfered_len = 0; + if (msg->len == 0) + return -EIO; + + if (msg->flags & I2C_M_RD) + ret = zxi2c_read(i2c, msg, i == 0); + else + ret = zxi2c_write(i2c, msg, (i + 1) == num); + } + + return (ret < 0) ? ret : i; +} + +static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + u8 tmp; + int ret; + struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap); + void __iomem *base = i2c->base; + + ret = zxi2c_wait_bus_ready(i2c); + if (ret) + return ret; + + tmp = ioread8(base + ZXI2C_REG_CR); + tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END); + + if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) { + /* enable fifo mode */ + iowrite16(ZXI2C_CR_FIFO_MODE | tmp, base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR); + /* enable fifo irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, base + ZXI2C_REG_IMR); + + i2c->msg = msgs; + i2c->mode = ZXI2C_FIFO_MODE; + i2c->xfer_len = i2c->xfered_len = 0; + + zxi2c_fifo_irq_xfer(i2c, 0); + + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) { + dev_dbg(i2c->dev, "fifo mode timeout\n"); + return -ETIMEDOUT; + } + + ret = i2c->ret; + } else { + /* enable byte mode */ + iowrite16(tmp, base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR); + /* enable byte irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, base + ZXI2C_REG_IMR); + + ret = zxi2c_xfer(adap, msgs, num); + if (ret == -ETIMEDOUT) { + dev_dbg(i2c->dev, "byte mode timeout\n"); + iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR); + } + } + /* dis interrupt */ + iowrite8(0, base + ZXI2C_REG_IMR); + + return ret; +} + +static u32 zxi2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm zxi2c_algorithm = { + .master_xfer = zxi2c_master_xfer, + .functionality = zxi2c_func, +}; + +static const struct i2c_adapter_quirks zxi2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static const u32 zxi2c_speed_params_table[][3] = { + /* speed, ZXI2C_TCR, ZXI2C_FSTP */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, ZXI2C_GOLD_FSTP_100K }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M }, + { I2C_MAX_HIGH_SPEED_MODE_FREQ, ZXI2C_TCR_HS_MODE | ZXI2C_TCR_FAST, + ZXI2C_GOLD_FSTP_3400K }, + /* never reached, keep for debug. freq src is 27M mode */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, 0x83 }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, 0x1e }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, 10 } +}; + +static void zxi2c_set_bus_speed(struct zxi2c *i2c) +{ + void __iomem *base = i2c->base; + + iowrite16(i2c->tr, base + ZXI2C_REG_TR); + iowrite8(ZXI2C_CLK_50M, base + ZXI2C_REG_CLK); + iowrite16(i2c->mcr, base + ZXI2C_REG_MCR); +} + +static void zxi2c_get_bus_speed(struct zxi2c *i2c) +{ + u8 i, count; + u8 fstp; + const u32 *params; + void __iomem *base = i2c->base; + + u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev); + + count = ARRAY_SIZE(zxi2c_speed_params_table); + for (i = 0; i < count; i++) + if (acpi_speed == zxi2c_speed_params_table[i][0]) + break; + /* if not found, use 400k as default */ + i = i < count ? i : 1; + + params = zxi2c_speed_params_table[i]; + fstp = ioread8(base + ZXI2C_REG_TR); + if (abs(fstp - params[2]) > 0x10) { + /* + * if BIOS setting value far from golden value, + * use golden value and warn user + */ + dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", params[0], fstp, + params[2]); + i2c->tr = params[2] | 0xff00; + } else { + i2c->tr = fstp | 0xff00; + } + + i2c->tcr = params[1]; + i2c->mcr = ioread16(base + ZXI2C_REG_MCR); + /* for Hs-mode, use 0000 1000 as master code */ + if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ) + i2c->mcr |= ZXI2C_HS_MASTER_CODE; + + dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0])); +} + +static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c) +{ + int err; + struct zxi2c *i2c; + struct resource *res; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "IORESOURCE_MEM failed\n"); + return -ENODEV; + } + i2c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->base)) + return PTR_ERR(i2c->base); + + i2c->irq = platform_get_irq(pdev, 0); + if (i2c->irq < 0) + return i2c->irq; + + err = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr, IRQF_SHARED, pdev->name, i2c); + if (err) { + dev_err(&pdev->dev, "failed to request irq %i\n", i2c->irq); + return err; + } + + i2c->dev = &pdev->dev; + init_completion(&i2c->complete); + platform_set_drvdata(pdev, i2c); + + *pi2c = i2c; + + return 0; +} + +static int zxi2c_probe(struct platform_device *pdev) +{ + int error; + struct zxi2c *i2c; + struct i2c_adapter *adap; + + error = zxi2c_init(pdev, &i2c); + if (error) + return error; + + zxi2c_get_bus_speed(i2c); + zxi2c_set_bus_speed(i2c); + i2c->hrv = ioread8(i2c->base + ZXI2C_REG_REV); + + adap = &i2c->adapter; + adap->owner = THIS_MODULE; + adap->algo = &zxi2c_algorithm; + + adap->quirks = &zxi2c_quirks; + + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(i2c->dev)); + i2c_set_adapdata(adap, i2c); + + error = devm_i2c_add_adapter(&pdev->dev, adap); + if (error) + return error; + + dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", adap->nr, + DRIVER_VERSION); + + return 0; +} + +static int zxi2c_resume(struct device *dev) +{ + struct zxi2c *i2c = dev_get_drvdata(dev); + + iowrite8(ZXI2C_CR_MST_RST, i2c->base + ZXI2C_REG_CR); + zxi2c_set_bus_speed(i2c); + + return 0; +} + +static const struct dev_pm_ops zxi2c_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, zxi2c_resume) +}; + +static const struct acpi_device_id zxi2c_acpi_match[] = { + {"IIC1D17", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); + +static struct platform_driver zxi2c_driver = { + .probe = zxi2c_probe, + .driver = { + .name = ZX_I2C_NAME, + .acpi_match_table = zxi2c_acpi_match, + .pm = &zxi2c_pm, + }, +}; + +module_platform_driver(zxi2c_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR("HansHu@zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 45500d2d5b4bb56129d9a1ae2c32b6b1cc171446..f86b6ede17e26a4939927122ffa0205026b20cc6 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -120,6 +120,12 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_INIT_XSTATE BIT(17) +/* + * Ignore the sub-state when matching mwait hints between the ACPI _CST and + * custom tables. + */ +#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -993,6 +999,47 @@ static struct cpuidle_state spr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state gnr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 170, + .target_residency = 650, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6P", + .desc = "MWAIT 0x21", + .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 210, + .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1237,6 +1284,45 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1354,6 +1440,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_gnr __initconst = { + .state_table = gnr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1382,6 +1474,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1421,12 +1519,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &idle_cpu_gnr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; @@ -1578,7 +1678,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) } } -static bool __init intel_idle_off_by_default(u32 mwait_hint) +static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) { int cstate, limit; @@ -1595,7 +1695,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) * the interesting states are ACPI_CSTATE_FFH. */ for (cstate = 1; cstate < limit; cstate++) { - if (acpi_state_table.states[cstate].address == mwait_hint) + u32 acpi_hint = acpi_state_table.states[cstate].address; + u32 table_hint = mwait_hint; + + if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { + acpi_hint &= ~MWAIT_SUBSTATE_MASK; + table_hint &= ~MWAIT_SUBSTATE_MASK; + } + + if (acpi_hint == table_hint) return false; } return true; @@ -1605,7 +1713,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) static inline bool intel_idle_acpi_cst_extract(void) { return false; } static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } -static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) +{ + return false; +} #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ /** @@ -1929,7 +2040,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && - intel_idle_off_by_default(mwait_hint) && + intel_idle_off_by_default(state->flags, mwait_hint) && !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) state->flags |= CPUIDLE_FLAG_OFF; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a5827d11e9346a890c55804052c9bfa21076dde1..9d6a7cbab0ae84d0433702263e38f7ad0a28bc06 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -95,6 +95,7 @@ source "drivers/infiniband/hw/qedr/Kconfig" source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" +source "drivers/infiniband/hw/xsc/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" endif # !UML source "drivers/infiniband/sw/rxe/Kconfig" diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1211f4317a9f4fdab32278e2000c1b2b392e64d5..b8fc3871dd1862bd8fb8ef80b7823d2a8bb18ed3 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_INFINIBAND_HNS) += hns/ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/ obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/ +obj-$(CONFIG_INFINIBAND_XSC) += xsc/ diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 9d316fdc6f9a55a5055f793197117424800ccb01..a155519a862f83e6e4bec2dea2dec9fbc27eb141 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -11,8 +11,6 @@ #include /* PCIe device related definition. */ -#define PCI_VENDOR_ID_ALIBABA 0x1ded - #define ERDMA_PCI_WIDTH 64 #define ERDMA_FUNC_BAR 0 #define ERDMA_MISX_BAR 2 diff --git a/drivers/infiniband/hw/xsc/Kconfig b/drivers/infiniband/hw/xsc/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..6c3d4b7b330e196903f74770843287e5b3338c10 --- /dev/null +++ b/drivers/infiniband/hw/xsc/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +config INFINIBAND_XSC + tristate "Yunsilicon XSC RDMA driver" + default n + depends on NETDEVICES && ETHERNET && PCI && INET + depends on YUNSILICON_XSC_PCI && YUNSILICON_XSC_ETH + help + This driver provides RDMA support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_ib. diff --git a/drivers/infiniband/hw/xsc/Makefile b/drivers/infiniband/hw/xsc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b4fa5748bbad46598a3538218e8d39bba030dece --- /dev/null +++ b/drivers/infiniband/hw/xsc/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y := -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc +ccflags-y += -Wno-implicit-fallthrough +ifeq ($(USE_INTERNAL_IB_CORE), 1) + ccflags-y += -include /usr/src/ofa_kernel/include/rdma/ib_umem.h +endif + +obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o + +xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o \ + counters.o devx.o private_dev.o ib_umem_ex.o\ + rtt.o xsc_ib_sysfs.o + +xsc_ib-$(CONFIG_XSC_PEER_SUPPORT) += peer_mem.o diff --git a/drivers/infiniband/hw/xsc/ah.c b/drivers/infiniband/hw/xsc/ah.c new file mode 100644 index 0000000000000000000000000000000000000000..39da2861897d7da4da31fe5693785504e101ee85 --- /dev/null +++ b/drivers/infiniband/hw/xsc/ah.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "xsc_ib.h" +#include "user.h" + +static u32 xsc_calc_roce_udp_flow_label(void) +{ + u32 factor = 0; + u32 hash = 0; + u32 flow_label = 0; + + /*This function will generate a 20 bit flow_label*/ + factor = (IB_GRH_FLOWLABEL_MASK - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + 1); + hash = get_random_u32() % factor; + flow_label = hash & IB_GRH_FLOWLABEL_MASK; + + return flow_label; +} + +static u16 xsc_ah_get_udp_sport(const struct xsc_ib_dev *dev, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type; + u16 sport = 0; + u32 fl = 0; + + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && + (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK)) { + fl = ah_attr->grh.flow_label; + } else { + /*generate a 20bit flow_label and output to user layer*/ + fl = xsc_calc_roce_udp_flow_label(); + ah_attr->grh.flow_label = fl; + } + + sport = xsc_flow_label_to_udp_sport(fl); + xsc_ib_dbg(dev, "fl=0x%x,sport=0x%x\n", fl, sport); + return sport; +} + +static struct ib_ah *create_ib_ah(struct xsc_ib_dev *dev, + struct xsc_ib_ah *ah, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type; + + if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + + memcpy(ah->av.rgid, &grh->dgid, 16); + ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label | + (1 << 30) | + grh->sgid_index << 20); + ah->av.hop_limit = grh->hop_limit; + ah->av.tclass = grh->traffic_class; + } + + ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4); + + if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { + gid_type = ah_attr->grh.sgid_attr->gid_type; + + memcpy(ah->av.rmac, ah_attr->roce.dmac, + sizeof(ah_attr->roce.dmac)); + + ah->av.udp_sport = xsc_ah_get_udp_sport(dev, ah_attr); + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1; + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) +#define XSC_ECN_ENABLED BIT(1) + ah->av.tclass |= XSC_ECN_ENABLED; + } else { + ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); + ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f; + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf); + } + + return &ah->ibah; +} + +xsc_ib_create_ah_def() +{ + struct xsc_ib_ah *ah = to_mah(ibah); + struct xsc_ib_dev *dev = to_mdev(ibah->device); + struct rdma_ah_attr *ah_attr = init_attr->ah_attr; + enum rdma_ah_attr_type ah_type = ah_attr->type; + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && + !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) + return RET_VALUE(-EINVAL); + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) { + int err; + struct xsc_ib_create_ah_resp resp = {}; + u32 min_resp_len = offsetof(typeof(resp), dmac) + + sizeof(resp.dmac); + + if (udata->outlen < min_resp_len) + return RET_VALUE(-EINVAL); + + resp.response_length = min_resp_len; + memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN); + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) + return RET_VALUE(err); + } + + create_ib_ah(dev, ah, ah_attr); /* never fails */ + return 0; +} + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + return 0; +} + +xsc_ib_destroy_ah_def() +{ + return 0; +} diff --git a/drivers/infiniband/hw/xsc/counters.c b/drivers/infiniband/hw/xsc/counters.c new file mode 100644 index 0000000000000000000000000000000000000000..971ecf4ff1af0355a96892d0b19ce6152c151e60 --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_lag.h" +#include "common/xsc_cmd.h" +#include "counters.h" + +#define COUNTERS_FILE_NAME "counters" +#define COUNTERS_NAMES_FILE_NAME "counters_names" +#define COUNTERS_VALUE_FILE_NAME "counters_value" +#define COUNTERS_ATTER_GROUP_NAME "counters" +#define GLOBAL_COUNTERS_GROUP_NAME "global_counters" +#define GLOBAL_COUNTERS_FILE_NAME "counters" + +static const struct counter_desc hw_rdma_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_cnp_sent) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_handled) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_ecn_marked_roce_packets) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_ignored) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, read_rsp_out_of_seq) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, implied_nak_seq_err) }, + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, duplicate_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_payload_bytes) }, + /*global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_rdma_stats_vf_desc[] = { + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, duplicate_requests) }, +}; + +static const struct counter_desc hw_global_rdma_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rx_icrc_encapsulated) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, req_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, resp_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, cqe_msg_code_error) }, +}; + +static int get_hw_stats_rdma(struct xsc_core_device *dev, struct xsc_hw_stats_rdma *stats_rdma) +{ + int i = 0; + int ret; + int inlen; + struct xsc_lag *lag; + struct xsc_hw_stats_mbox_in *in; + struct xsc_hw_stats_rdma_mbox_out out; + struct xsc_core_device *xdev_tmp; + + memset(stats_rdma, 0, sizeof(*stats_rdma)); + + if (!dev) + return -1; + + inlen = sizeof(struct xsc_hw_stats_mbox_in) + XSC_MAX_PORTS; + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + xsc_board_lag_lock(dev); + if (xsc_lag_is_roce(dev)) { + lag = xsc_get_lag(dev); + in->lag_member_num = lag->xsc_member_cnt; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) + in->member_port[i++] = xdev_tmp->mac_port; + in->is_lag = 1; + } else { + in->is_lag = 0; + in->mac_port = dev->mac_port; + } + xsc_board_lag_unlock(dev); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_RDMA); + memset(&out, 0, sizeof(out)); + ret = xsc_cmd_exec(dev, (void *)in, inlen, (void *)&out, sizeof(out)); + if (ret || out.hdr.status) { + kfree(in); + return -1; + } + + memcpy(stats_rdma, &out.hw_stats, sizeof(*stats_rdma)); + kfree(in); + return 0; +} + +static ssize_t counters_names_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_counters_attribute *xsc_counters_name_attr; + + xsc_counters_name_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + if (is_support_hw_pf_stats(xsc_counters_name_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + } + + for (i = 0; i < desc_size; ++i) + count += sprintf(&buf[count], "%s\n", desc[i].format); + + return count; +} + +static ssize_t counters_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_attribute *xsc_counters_attr; + + xsc_counters_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + ret = get_hw_stats_rdma(xsc_counters_attr->dev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xsc_counters_attr->dev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xsc_counters_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t counters_value_read(struct file *file, + struct kobject *kob, + struct bin_attribute *bin_attr, + char *buf, loff_t loff, size_t size) +{ + int i; + int ret; + u8 *stats; + int bin_size; + int desc_size; + u64 *tmp_value; + struct xsc_core_device *xdev; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_bin_attribute *xsc_counters_bin_attr; + + xsc_counters_bin_attr = container_of(&bin_attr->attr, + struct xsc_counters_bin_attribute, + attr); + + if (xsc_counters_bin_attr->size > size || xsc_counters_bin_attr->size == 0) + return 0; + + xdev = (struct xsc_core_device *)xsc_counters_bin_attr->private; + ret = get_hw_stats_rdma(xdev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xdev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xdev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + bin_size = desc_size * sizeof(u64); + if (xsc_counters_bin_attr->size < bin_size) + return 0; + + tmp_value = kzalloc(xsc_counters_bin_attr->size, GFP_KERNEL); + if (!tmp_value) + return 0; + + for (i = 0; i < desc_size; i++) { + tmp_value[i] = *(u64 *)(stats + desc[i].offset); + tmp_value[i] = be64_to_cpu(tmp_value[i]); + } + + memcpy(buf, tmp_value, xsc_counters_bin_attr->size); + + kfree(tmp_value); + return xsc_counters_bin_attr->size; +} + +static int counters_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct attribute_group *counters_attr_g; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + int ret = -ENOMEM; + + xsc_counters_name = kzalloc(sizeof(*xsc_counters_name), GFP_KERNEL); + if (!xsc_counters_name) + return -ENOMEM; + + xsc_counters = kzalloc(sizeof(*xsc_counters), GFP_KERNEL); + if (!xsc_counters) + goto err_xsc_counters; + + xsc_counters_bin = kzalloc(sizeof(*xsc_counters_bin), GFP_KERNEL); + if (!xsc_counters_bin) + goto err_xsc_counters_bin; + + counters_bin_attrs = kzalloc(sizeof(*counters_bin_attrs) * 2, GFP_KERNEL); + if (!counters_bin_attrs) + goto err_counters_bin_attrs; + + counters_attrs = kzalloc(sizeof(*counters_attrs) * 3, GFP_KERNEL); + if (!counters_attrs) + goto err_counters_attrs; + + counters_attr_g = kzalloc(sizeof(*counters_attr_g), GFP_KERNEL); + if (!counters_attr_g) + goto err_counters_attr_g; + + sysfs_attr_init(&xsc_counters_name->attr); + xsc_counters_name->attr.name = COUNTERS_NAMES_FILE_NAME; + xsc_counters_name->attr.mode = 0444; + xsc_counters_name->show = counters_names_show; + xsc_counters_name->dev = dev; + + sysfs_attr_init(&xsc_counters->attr); + xsc_counters->attr.name = COUNTERS_FILE_NAME; + xsc_counters->attr.mode = 0444; + xsc_counters->show = counters_show; + xsc_counters->dev = dev; + + sysfs_attr_init(&xsc_counters_bin->attr); + xsc_counters_bin->attr.name = COUNTERS_VALUE_FILE_NAME; + xsc_counters_bin->attr.mode = 0444; + xsc_counters_bin->read = counters_value_read; + xsc_counters_bin->private = dev; + xsc_counters_bin->size = sizeof(struct xsc_hw_stats_rdma); + + counters_bin_attrs[0] = (struct bin_attribute *)xsc_counters_bin; + counters_attrs[0] = (struct attribute *)xsc_counters_name; + counters_attrs[1] = (struct attribute *)xsc_counters; + + counters_attr_g->name = COUNTERS_ATTER_GROUP_NAME; + counters_attr_g->attrs = counters_attrs; + counters_attr_g->bin_attrs = counters_bin_attrs; + + dev->counters_priv = counters_attr_g; + + ret = sysfs_create_group(&ib_dev->dev.kobj, counters_attr_g); + if (ret) + goto err_counters_create_group; + + return 0; + +err_counters_create_group: + kfree(counters_attr_g); + counters_attr_g = NULL; + +err_counters_attr_g: + kfree(counters_attrs); + counters_attrs = NULL; + +err_counters_attrs: + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + +err_counters_bin_attrs: + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + +err_xsc_counters_bin: + kfree(xsc_counters); + xsc_counters = NULL; + +err_xsc_counters: + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + return ret; +} + +static void counters_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + struct attribute_group *counters_attr_g; + + counters_attr_g = dev->counters_priv; + counters_attrs = counters_attr_g->attrs; + counters_bin_attrs = counters_attr_g->bin_attrs; + + xsc_counters_bin = (struct xsc_counters_bin_attribute *)counters_bin_attrs[0]; + xsc_counters_name = (struct xsc_counters_attribute *)counters_attrs[0]; + xsc_counters = (struct xsc_counters_attribute *)counters_attrs[1]; + + if (counters_attr_g) { + sysfs_remove_group(&ib_dev->dev.kobj, counters_attr_g); + kfree(counters_attr_g); + counters_attr_g = NULL; + } + + kfree(counters_attrs); + counters_attrs = NULL; + + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + kfree(xsc_counters); + xsc_counters = NULL; +} + +static ssize_t global_cnt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->show) + return -EIO; + + return a->show(g, a, buf); +} + +static ssize_t global_cnt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->store) + return -EIO; + + return a->store(g, a, buf, size); +} + +static ssize_t global_counters_show(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_global_stats_mbox_in in; + struct xsc_hw_global_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_GLOBAL_STATS); + ret = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return 0; + + desc = &hw_global_rdma_stats_desc[0]; + desc_size = ARRAY_SIZE(hw_global_rdma_stats_desc); + stats = (u8 *)&out.hw_stats; + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t global_counters_store(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define GLOBAL_CNT_ATTR(_name) struct xsc_global_cnt_attributes xsc_global_cnt_attr_##_name = \ + __ATTR(_name, 0444, global_##_name##_show, global_##_name##_store) + +GLOBAL_CNT_ATTR(counters); + +static const struct sysfs_ops global_cnt_sysfs_ops = { + .show = global_cnt_attr_show, + .store = global_cnt_attr_store, +}; + +static struct attribute *global_cnt_attrs[] = { + &xsc_global_cnt_attr_counters.attr, + NULL +}; + +ATTRIBUTE_GROUPS(global_cnt); + +static const struct kobj_type global_cnt_ktype = { + .sysfs_ops = &global_cnt_sysfs_ops, + .default_groups = global_cnt_groups, +}; + +static struct xsc_global_cnt_interface *g_global_cnt_interface; + +static int global_cnt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_global_cnt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return 0; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &global_cnt_ktype, + &ib_dev->dev.kobj, GLOBAL_COUNTERS_GROUP_NAME); + if (err) + goto error_return; + + g_global_cnt_interface = tmp; + tmp->xdev = xdev; + return 0; + +error_return: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +static void global_cnt_sysfs_fini(struct xsc_core_device *xdev) +{ + if (!g_global_cnt_interface || !xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return; + + kobject_put(&g_global_cnt_interface->kobj); + kfree(g_global_cnt_interface); + g_global_cnt_interface = NULL; +} + +int xsc_counters_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + + ret = counters_sysfs_init(ib_dev, dev); + if (ret) + goto error_return; + + ret = global_cnt_sysfs_init(ib_dev, dev); + if (ret) + goto error_global_cnt; + + return 0; + +error_global_cnt: + counters_sysfs_fini(ib_dev, dev); +error_return: + return ret; +} + +void xsc_counters_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + counters_sysfs_fini(ib_dev, dev); + global_cnt_sysfs_fini(dev); +} diff --git a/drivers/infiniband/hw/xsc/counters.h b/drivers/infiniband/hw/xsc/counters.h new file mode 100644 index 0000000000000000000000000000000000000000..001a57b8372d0704c0e0cc976713afb5c4fc3f3f --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __COUNTERS_H__ +#define __COUNTERS_H__ + +#define STRING_LEN 32 +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) + +struct counter_desc { + char format[STRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_counters_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count); + struct xsc_core_device *dev; +}; + +struct xsc_counters_bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + ssize_t (*write)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + int (*mmap)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + struct vm_area_struct *vma); +}; + +struct xsc_global_cnt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_global_cnt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + const char *buf, size_t count); +}; + +#endif diff --git a/drivers/infiniband/hw/xsc/cq.c b/drivers/infiniband/hw/xsc/cq.c new file mode 100644 index 0000000000000000000000000000000000000000..102902410b86509c8c17c0524dc10614bd4f7876 --- /dev/null +++ b/drivers/infiniband/hw/xsc/cq.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include + +enum { + CQ_OK = 0, + CQ_EMPTY = -1, + CQ_POLL_ERR = -2 +}; + +enum { + XSC_CQE_APP_TAG_MATCHING = 1, +}; + +enum { + XSC_CQE_APP_OP_TM_CONSUMED = 0x1, + XSC_CQE_APP_OP_TM_EXPECTED = 0x2, + XSC_CQE_APP_OP_TM_UNEXPECTED = 0x3, + XSC_CQE_APP_OP_TM_NO_TAG = 0x4, + XSC_CQE_APP_OP_TM_APPEND = 0x5, + XSC_CQE_APP_OP_TM_REMOVE = 0x6, + XSC_CQE_APP_OP_TM_NOOP = 0x7, + XSC_CQE_APP_OP_TM_CONSUMED_SW_RDNV = 0x9, + XSC_CQE_APP_OP_TM_CONSUMED_MSG = 0xA, + XSC_CQE_APP_OP_TM_CONSUMED_MSG_SW_RDNV = 0xB, + XSC_CQE_APP_OP_TM_MSG_COMPLETION_CANCELED = 0xC, +}; + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND, + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND_IMMDT, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_READ, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_MAD][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_MAD][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +static const u32 xsc_cqe_opcode[] = { + [XSC_OPCODE_RDMA_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_REQ_SEND_IMMDT] = IB_WC_SEND, + [XSC_OPCODE_RDMA_RSP_RECV] = IB_WC_RECV, + [XSC_OPCODE_RDMA_RSP_RECV_IMMDT] = IB_WC_RECV, + [XSC_OPCODE_RDMA_REQ_WRITE] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_REQ_WRITE_IMMDT] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_RSP_WRITE_IMMDT] = IB_WC_RECV_RDMA_WITH_IMM, + [XSC_OPCODE_RDMA_REQ_READ] = IB_WC_RDMA_READ, + [XSC_OPCODE_RDMA_MAD_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_MAD_RSP_RECV] = IB_WC_RECV, +}; + +int xsc_stall_num_loop = 60; +int xsc_stall_cq_poll_min = 60; +int xsc_stall_cq_poll_max = 100000; +int xsc_stall_cq_inc_step = 100; +int xsc_stall_cq_dec_step = 10; + +static inline u8 xsc_get_cqe_opcode(struct xsc_cqe *cqe) +{ + if (cqe->is_error) + return cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (cqe->msg_opcode > XSC_MSG_OPCODE_MAD) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[cqe->msg_opcode][cqe->type][cqe->with_immdt]; +} + +static void xsc_ib_cq_comp(struct xsc_core_cq *cq) +{ + struct ib_cq *ibcq = &to_xibcq(cq)->ibcq; + + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void xsc_ib_cq_event(struct xsc_core_cq *xcq, enum xsc_event type) +{ + struct xsc_ib_cq *cq = container_of(xcq, struct xsc_ib_cq, xcq); + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event event; + + if (type != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_ib_err(dev, "Unexpected event type %d on CQ %06x\n", + type, xcq->cqn); + return; + } + + if (ibcq->event_handler) { + event.device = &dev->ib_dev; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct xsc_ib_cq_buf *buf, int n, int size) +{ + return xsc_buf_offset(&buf->buf, n * size); +} + +static void *get_cqe(struct xsc_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n, cq->xcq.cqe_sz); +} + +static void *get_sw_cqe(struct xsc_ib_cq *cq, int n) +{ + struct xsc_cqe *cqe; + + cqe = (struct xsc_cqe *)get_cqe(cq, n & (cq->ibcq.cqe - 1)); + + return ((cqe->owner & XSC_CQE_OWNER_MASK) ^ + !!(n & cq->ibcq.cqe)) ? NULL : cqe; +} + +static inline void handle_good_req(struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + wc->opcode = xsc_cqe_opcode[opcode]; + if (opcode == XSC_OPCODE_RDMA_REQ_READ) + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->status = IB_WC_SUCCESS; +} + +static void handle_responder(struct ib_wc *wc, struct xsc_cqe *cqe, + struct xsc_ib_qp *qp, u8 opcode) +{ + struct xsc_ib_wq *wq = &qp->rq; + u16 idx; + + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->opcode = xsc_cqe_opcode[opcode]; + wc->status = IB_WC_SUCCESS; + + idx = wq->tail & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + ++wq->tail; +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +static void xsc_handle_rdma_mad_resp_recv(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + void *recv; + struct xsc_wqe_data_seg *data_seg; + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct ib_unpacked_vlan *vlan; + struct ib_grh *grh; + struct ib_mad *mad; + struct rxe_bth *bth; + struct rxe_deth *deth; + unsigned int pading_sz = 0; + struct xsc_ib_wq *wq; + int idx; + u16 eth_type; + void *l3_start; + + wq = &(*cur_qp)->rq; + idx = wq->tail & (wq->wqe_cnt - 1); + + handle_responder(wc, cqe, *cur_qp, opcode); + + data_seg = get_seg_wqe(get_recv_wqe(*cur_qp, idx), 0); + recv = xsc_ib_recv_mad_sg_virt_addr(&dev->ib_dev, wc, data_seg->va); + + eth = (struct ib_unpacked_eth *)recv; + grh = (struct ib_grh *)recv; + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + eth_type = ntohs(vlan->type); + l3_start = vlan + 1; + + wc->vlan_id = ntohs(vlan->tag) & 0x0fff; + wc->sl = (ntohs(vlan->tag) >> 13) & 0x7; + wc->wc_flags |= IB_WC_WITH_VLAN; + } else { + eth_type = ntohs(eth->type); + l3_start = eth + 1; + } + + if (eth_type == ETH_P_IP) { + ip4h = (struct iphdr *)l3_start; + udph = (struct udphdr *)(ip4h + 1); + } else { + ip6h = (struct ipv6hdr *)l3_start; + udph = (struct udphdr *)(ip6h + 1); + } + bth = (struct rxe_bth *)(udph + 1); + deth = (struct rxe_deth *)(bth + 1); + mad = (struct ib_mad *)(deth + 1); + + if (eth_type == ETH_P_IP) { + pading_sz = sizeof(*grh) - sizeof(*ip4h); + memmove((u8 *)(grh + 1) - sizeof(*ip4h), ip4h, sizeof(*ip4h)); + memset(grh, 0, pading_sz); + } else { + memmove(grh, ip6h, sizeof(*ip6h)); + } + memmove(grh + 1, mad, sizeof(*mad)); + + wc->wc_flags |= IB_WC_GRH; + + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u\n", wq->tail, wc->byte_len); + xsc_ib_info(dev, "qp[%d] recv MAD packet, msg_len=%d\n", (*cur_qp)->xqp.qpn, wc->byte_len); + wc->status = IB_WC_SUCCESS; +} + +static int xsc_poll_one(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct xsc_core_qp *xqp; + struct xsc_ib_wq *wq; + u8 opcode; + u32 qpn; + int idx; + struct xsc_cqe *cqe; + u32 *p = NULL; + + cqe = get_sw_cqe(cq, cq->xcq.cons_index); + if (!cqe) + return -EAGAIN; + + ++cq->xcq.cons_index; + + /* Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + p = (u32 *)cqe; + + qpn = cqe->qp_id; + qpn = le32_to_cpu(qpn); + if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + /* We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + xqp = __xsc_qp_lookup(dev->xdev, qpn); + if (unlikely(!xqp)) { + xsc_ib_warn(dev, "CQE@CQ %d for unknown QPN %d\n", + cq->xcq.cqn, qpn); + return -EINVAL; + } + + *cur_qp = to_xibqp(xqp); + } + + memset(wc, 0, sizeof(*wc)); + wc->qp = &(*cur_qp)->ibqp; + opcode = xsc_get_cqe_opcode(cqe); + switch (opcode) { + case XSC_OPCODE_RDMA_REQ_SEND: + case XSC_OPCODE_RDMA_REQ_WRITE: + case XSC_OPCODE_RDMA_REQ_READ: + case XSC_OPCODE_RDMA_MAD_REQ_SEND: + wq = &(*cur_qp)->sq; + idx = cqe->wqe_id >> (wq->wqe_shift - XSC_BASE_WQE_SHIFT); + idx &= (wq->wqe_cnt - 1); + handle_good_req(wc, cqe, opcode); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + xsc_ib_dbg(dev, "wqeid:%u, wq tail:%u qpn:%u\n", idx, wq->tail, qpn); + wc->status = IB_WC_SUCCESS; + break; + case XSC_OPCODE_RDMA_RSP_RECV: + wq = &(*cur_qp)->rq; + handle_responder(wc, cqe, *cur_qp, opcode); + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u, qpn:%u\n", wq->tail, wc->byte_len, qpn); + wc->status = IB_WC_SUCCESS; + break; + + case XSC_OPCODE_RDMA_MAD_RSP_RECV: + xsc_ib_dbg(dev, "recv MAD, qpn:%u\n", qpn); + xsc_handle_rdma_mad_resp_recv(cq, cur_qp, wc, cqe, opcode); + break; + + default: + xsc_ib_err(dev, "completion error\n%08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[5], p[6]); + wc->status = IB_WC_GENERAL_ERR; + wc->wr_id = 0; + break; + } + + return 0; +} + +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct xsc_ib_cq *cq = to_xcq(ibcq); + struct xsc_core_cq *xcq = &cq->xcq; + struct xsc_ib_qp *cur_qp = NULL; + int npolled = 0; + int err = 0; + unsigned long flags; + u32 next_cid; + + spin_lock_irqsave(&cq->lock, flags); + next_cid = xcq->cons_index; + + for (npolled = 0; npolled < num_entries; npolled++) { + err = xsc_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + /* make sure cqe read out before update ci */ + rmb(); + + if (next_cid != xcq->cons_index) + xsc_cq_set_ci(xcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + return npolled; +} + +int xsc_cqe_is_empty(struct xsc_ib_cq *cq) +{ + struct xsc_cqe *cqe = get_sw_cqe(cq, cq->xcq.cons_index); + + if (!cqe) + return 1; + + return 0; +} + +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ +#ifdef MSIX_SUPPORT + union xsc_cq_doorbell db; + struct xsc_ib_cq *xcq = to_xcq(ibcq); + struct xsc_core_cq *cq = &xcq->xcq; + int ret = 0; + unsigned long irq_flags; + + spin_lock_irqsave(&xcq->lock, irq_flags); + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + if (flags & IB_CQ_NEXT_COMP) + db.arm = 0; + else if (flags & IB_CQ_SOLICITED) + db.arm = 1;/* arm next:0 arm solicited:1 */ + + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && (!xsc_cqe_is_empty(xcq))) { + ret = 1; + goto out; + } + + /* make sure val write to memory done */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +out: + spin_unlock_irqrestore(&xcq->lock, irq_flags); + return ret; +#else + if ((flags & IB_CQ_REPORT_MISSED_EVENTS)) + return 1; + return 0; +#endif +} + +static int alloc_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf, + int nent, int cqe_size) +{ + int err; + + err = xsc_buf_alloc(dev->xdev, nent * cqe_size, + PAGE_SIZE, &buf->buf); + if (err) + return err; + + buf->cqe_size = cqe_size; + + return 0; +} + +static void free_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf) +{ + xsc_buf_free(dev->xdev, &buf->buf); +} + +static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, + struct ib_ucontext *context, struct xsc_ib_cq *cq, + int entries, struct xsc_create_cq_mbox_in **cqb, + int *cqe_size, int *index, int *inlen) +{ + struct xsc_ib_create_cq ucmd; + int page_shift; + int npages; + int ncont; + int err; + int log_cq_sz; + int hw_npages; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + *cqe_size = ucmd.cqe_size; + + cq->buf.umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, + entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->buf.umem)) { + err = PTR_ERR(cq->buf.umem); + return err; + } + + xsc_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + xsc_ib_dbg(dev, "bad page_shift:%d, ncont:%d\n", page_shift, ncont); + /* amber doesn't support compound pages */ + page_shift = PAGE_SHIFT; + ncont = npages; + xsc_ib_dbg(dev, "overwrite to page_shift:%d, ncont:%d\n", page_shift, ncont); + } + log_cq_sz = ilog2(entries); + hw_npages = DIV_ROUND_UP((1 << log_cq_sz) * sizeof(struct xsc_cqe), PAGE_SIZE_4K); + xsc_ib_info(dev, "addr 0x%llx, entries %d, size %u, npages %d, page_shift %d, ncont %d, hw_npages %d\n", + ucmd.buf_addr, entries, ucmd.cqe_size, npages, page_shift, ncont, hw_npages); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, hw_npages, true); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_umem: + ib_umem_release(cq->buf.umem); + return err; +} + +static void destroy_cq_user(struct xsc_ib_cq *cq, struct ib_udata *udata) +{ + ib_umem_release(cq->buf.umem); +} + +static int create_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq, + int entries, int cqe_size, + struct xsc_create_cq_mbox_in **cqb, + int *index, int *inlen) +{ + int err; + int i = 0; + struct xsc_cqe *cqe = NULL; + int hw_npages; + + cq->xcq.cqe_sz = cqe_size; + + err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + if (err) + return err; + + for (i = 0; i < entries; i++) { + cqe = (struct xsc_cqe *)get_cqe(cq, i); + cqe->owner = 1; + } + + hw_npages = DIV_ROUND_UP(entries * cqe_size, PAGE_SIZE_4K); + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_buf; + } + xsc_fill_page_array(&cq->buf.buf, (*cqb)->pas, hw_npages); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_buf: + free_cq_buf(dev, &cq->buf); + return err; +} + +static void destroy_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq) +{ + free_cq_buf(dev, &cq->buf); +} + +xsc_ib_create_cq_def() +{ + struct ib_device *ibdev = ibcq->device; + int entries = attr->cqe; + int vector = attr->comp_vector; + struct xsc_create_cq_mbox_in *cqb = NULL; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_cq *cq; + int index; + int inlen; + int cqe_size; + int irqn; + int err; + unsigned int eqn; + + entries = roundup_pow_of_two(entries); + + xsc_ib_info(dev, "entries:%d, vector:%d, max_cqes:%d\n", entries, vector, + dev->xdev->caps.max_cqes); + + if (entries > dev->xdev->caps.max_cqes) + entries = dev->xdev->caps.max_cqes; + cq = to_xcq(ibcq); + cq->ibcq.cqe = entries; + mutex_init(&cq->resize_mutex); + spin_lock_init(&cq->lock); + cq->resize_buf = NULL; + cq->resize_umem = NULL; + + if (udata) { + err = create_cq_user(dev, udata, NULL, cq, entries, + &cqb, &cqe_size, &index, &inlen); + if (err) + goto err_create; + } else { + cqe_size = sizeof(struct xsc_cqe); + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); + if (err) + goto err_create; + } + + cq->cqe_size = cqe_size; + cqb->ctx.log_cq_sz = ilog2(entries); + cqb->ctx.glb_func_id = cpu_to_be16(dev->xdev->glb_func_id); + + err = xsc_vector2eqn(dev->xdev, vector, &eqn, &irqn); + if (err) + goto err_cqb; + + cqb->ctx.eqn = eqn; + cqb->ctx.eqn = cpu_to_be16(cqb->ctx.eqn); + + err = xsc_core_create_cq(dev->xdev, &cq->xcq, cqb, inlen); + if (err) + goto err_cqb; + + xsc_ib_info(dev, "succeeded to create cqn %d, vector=%d, cq_sz=%d, eqn=%d\n", + cq->xcq.cqn, vector, entries, eqn); + cq->xcq.irqn = irqn; + cq->xcq.comp = xsc_ib_cq_comp; + cq->xcq.event = xsc_ib_cq_event; + + if (udata) { + if (ib_copy_to_udata(udata, &cq->xcq.cqn, sizeof(__u32))) { + err = -EFAULT; + goto err_cmd; + } + } + + xsc_vfree(cqb); + + return 0; + +err_cmd: + xsc_core_destroy_cq(dev->xdev, &cq->xcq); + +err_cqb: + xsc_vfree(cqb); + if (udata) + destroy_cq_user(cq, udata); + else + destroy_cq_kernel(dev, cq); + +err_create: + return RET_VALUE(err); +} + +xsc_ib_destroy_cq_def() +{ + struct xsc_ib_dev *dev = to_mdev(cq->device); + struct xsc_ib_cq *xcq = to_xcq(cq); + + xsc_core_destroy_cq(dev->xdev, &xcq->xcq); + if (udata) + destroy_cq_user(xcq, udata); + else + destroy_cq_kernel(dev, xcq); + + return 0; +} + +static int is_equal_rsn(struct xsc_cqe *cqe, u32 rsn) +{ + u32 qpn = le32_to_cpu(cqe->qp_id); + return rsn == qpn; +} + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 rsn) +{ + struct xsc_cqe *cqe, *dest; + u32 prod_index; + int nfreed = 0; + u8 owner_bit; + + if (!cq) + return; + + /* First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->xcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) + if (prod_index == cq->xcq.cons_index + cq->ibcq.cqe) + break; + + /* Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int)(--prod_index) - (int)cq->xcq.cons_index >= 0) { + cqe = (struct xsc_cqe *)get_cqe(cq, prod_index & (cq->ibcq.cqe - 1)); + if (is_equal_rsn(cqe, rsn)) { + ++nfreed; + } else if (nfreed) { + dest = (struct xsc_cqe *)get_cqe(cq, (prod_index + nfreed) & + (cq->ibcq.cqe - 1)); + owner_bit = dest->owner & XSC_CQE_OWNER_MASK; + memcpy(dest, cqe, cq->xcq.cqe_sz); + dest->owner = owner_bit | + (dest->owner & ~XSC_CQE_OWNER_MASK); + } + } + + if (nfreed) { + cq->xcq.cons_index += nfreed; + /* Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + xsc_cq_set_ci(&cq->xcq); + } +} + +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn) +{ + if (!cq) + return; + + spin_lock_irq(&cq->lock); + __xsc_ib_cq_clean(cq, qpn); + spin_unlock_irq(&cq->lock); +} diff --git a/drivers/infiniband/hw/xsc/devx.c b/drivers/infiniband/hw/xsc/devx.c new file mode 100644 index 0000000000000000000000000000000000000000..fca43076bae1838296062a04a56b83072718ec0d --- /dev/null +++ b/drivers/infiniband/hw/xsc/devx.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "xsc_ib.h" +#define UVERBS_MODULE_NAME xsc_ib +#include +#include "user.h" + +static struct xsc_ib_ucontext *devx_uattrs2uctx(struct uverbs_attr_bundle *attrs) +{ + return to_xucontext(ib_uverbs_get_ucontext(attrs)); +} + +static bool devx_is_general_cmd(void *in) +{ + struct xsc_inbox_hdr *hdr = + (struct xsc_inbox_hdr *)in; + u16 opcode = be16_to_cpu(hdr->opcode); + + switch (opcode) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return true; + default: + return false; + } +} + +static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *attrs) +{ + struct xsc_ib_ucontext *c; + struct xsc_ib_dev *dev; + void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); + int cmd_out_len = uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); + void *cmd_out; + int err; + + c = devx_uattrs2uctx(attrs); + if (IS_ERR(c)) + return PTR_ERR(c); + dev = to_mdev(c->ibucontext.device); + + if (!devx_is_general_cmd(cmd_in)) + return -EINVAL; + + cmd_out = uverbs_zalloc(attrs, cmd_out_len); + if (IS_ERR(cmd_out)) + return PTR_ERR(cmd_out); + + err = xsc_cmd_exec(dev->xdev, cmd_in, + uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN), + cmd_out, cmd_out_len); + if (err) + return err; + + return uverbs_copy_to(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len); +} + +DECLARE_UVERBS_NAMED_METHOD(XSC_IB_METHOD_DEVX_OTHER, + UVERBS_ATTR_PTR_IN(XSC_IB_ATTR_DEVX_OTHER_CMD_IN, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_inbox_hdr)), + UA_MANDATORY, + UA_ALLOC_AND_COPY), + UVERBS_ATTR_PTR_OUT(XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_outbox_hdr)), + UA_MANDATORY)); + +DECLARE_UVERBS_GLOBAL_METHODS(XSC_IB_OBJECT_DEVX, + &UVERBS_METHOD(XSC_IB_METHOD_DEVX_OTHER)); + +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void) +{ + return NULL; +} diff --git a/drivers/infiniband/hw/xsc/ib_peer_mem.h b/drivers/infiniband/hw/xsc/ib_peer_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..b955ac53bfde27503d6d6df3843bc2b78ef3152f --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_peer_mem.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(IB_PEER_MEM_H) +#define IB_PEER_MEM_H + +#include "peer_mem.h" + +struct ib_peer_memory_statistics { + atomic64_t num_alloc_mrs; + atomic64_t num_dealloc_mrs; + atomic64_t num_reg_pages; + atomic64_t num_dereg_pages; + atomic64_t num_reg_bytes; + atomic64_t num_dereg_bytes; + unsigned long num_free_callbacks; +}; + +struct ib_ucontext; +struct ib_umem_ex; +struct invalidation_ctx; + +struct ib_peer_memory_client { + const struct peer_memory_client *peer_mem; + struct list_head core_peer_list; + int invalidation_required; + struct kref ref; + struct completion unload_comp; + /* lock is used via the invalidation flow */ + struct mutex lock; + struct list_head core_ticket_list; + u64 last_ticket; + struct ib_peer_memory_statistics stats; +}; + +enum ib_peer_mem_flags { + IB_PEER_MEM_ALLOW = 1, + IB_PEER_MEM_INVAL_SUPP = (1 << 1), +}; + +struct core_ticket { + unsigned long key; + void *context; + struct list_head ticket_list; +}; + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context); + +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem, + struct invalidation_ctx **invalidation_ctx); + +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx); + +int ib_get_peer_private_data(struct ib_ucontext *context, __u64 peer_id, + char *peer_name); +void ib_put_peer_private_data(struct ib_ucontext *context); + +#endif diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.c b/drivers/infiniband/hw/xsc/ib_umem_ex.c new file mode 100644 index 0000000000000000000000000000000000000000..b2d57a885b65d41bf300a0b4855c60f89a07bacc --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +#include "ib_peer_mem.h" +#endif + +#include +#include "ib_umem_ex.h" + +#if defined(IB_CORE_UMEM_EX_V1) +#define get_mm(umem_ctx) ((umem_ctx)->mm) +#elif defined(IB_CORE_UMEM_EX_V2) +#define get_mm(umem_ctx) ((umem_ctx)->owning_mm) +#endif + +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) +static struct ib_umem_ex *peer_umem_get(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, unsigned long addr, + int dmasync, unsigned long peer_mem_flags) +{ + int ret; + const struct peer_memory_client *peer_mem = ib_peer_mem->peer_mem; + struct invalidation_ctx *invalidation_ctx = NULL; + struct ib_umem *umem = (struct ib_umem *)umem_ex; + + umem_ex->ib_peer_mem = ib_peer_mem; + if (peer_mem_flags & IB_PEER_MEM_INVAL_SUPP) { + ret = ib_peer_create_invalidation_ctx(ib_peer_mem, umem_ex, &invalidation_ctx); + if (ret) + goto end; + } + + /* + * We always request write permissions to the pages, to force breaking of any CoW + * during the registration of the MR. For read-only MRs we use the "force" flag to + * indicate that CoW breaking is required but the registration should not fail if + * referencing read-only areas. + */ + ret = peer_mem->get_pages(addr, umem->length, + 1, !umem->writable, + &umem->sg_head, + umem_ex->peer_mem_client_context, + invalidation_ctx ? + invalidation_ctx->context_ticket : 0); + if (ret) + goto out; + + ret = peer_mem->dma_map(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device, + dmasync, + &umem->nmap); + if (ret) + goto put_pages; + + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_reg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), &ib_peer_mem->stats.num_reg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_alloc_mrs); + return umem_ex; + +put_pages: + peer_mem->put_pages(&umem->sg_head, umem_ex->peer_mem_client_context); +out: + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); +end: + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + // renamed in different kernel + mmdrop(get_mm(umem)); + kfree(umem_ex); + return ERR_PTR(ret); +} +#endif + +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) +{ + struct ib_umem_ex *ret_umem; + + if (!umem) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + ret_umem = (struct ib_umem_ex *)umem; +#else + ret_umem = kzalloc(sizeof(*ret_umem), GFP_KERNEL); + if (!ret_umem) + return ERR_PTR(-ENOMEM); + + ret_umem->umem = *umem; + kfree(umem); +#endif + return ret_umem; +} + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, + size_t size, int access, + int dmasync, u8 *peer_exists) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *peer_mem_client; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) + return ERR_PTR(-EINVAL); + + if (!can_do_mlock()) + return ERR_PTR(-EPERM); + + umem_ex = kzalloc(sizeof(*umem_ex), GFP_KERNEL); + if (!umem_ex) + return ERR_PTR(-ENOMEM); + umem = &umem_ex->umem; + + umem->context = context; + umem->length = size; + umem->address = addr; + umem->writable = ib_access_writable(access); + get_mm(umem) = current->mm; + +#if defined(IB_CORE_UMEM_EX_V1) + umem->odp_data = NULL; +#endif + + mmgrab(get_mm(umem)); + + peer_mem_client = ib_get_peer_client(context, addr, size, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP, + &umem_ex->peer_mem_client_context); + if (peer_mem_client) { + *peer_exists = 1; + umem->hugetlb = 0; + return peer_umem_get(peer_mem_client, umem_ex, addr, dmasync, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP); + } + + return ERR_PTR(-ENOMEM); +#else + return NULL; +#endif +} + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex) +{ + struct ib_umem *umem = (struct ib_umem *)umem_ex; +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *ib_peer_mem = umem_ex->ib_peer_mem; + const struct peer_memory_client *peer_mem; + struct invalidation_ctx *invalidation_ctx; + + if (ib_peer_mem) { + peer_mem = ib_peer_mem->peer_mem; + invalidation_ctx = umem_ex->invalidation_ctx; + + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); + + peer_mem->dma_unmap(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device); + peer_mem->put_pages(&umem->sg_head, + umem_ex->peer_mem_client_context); + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_dereg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), + &ib_peer_mem->stats.num_dereg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_dealloc_mrs); + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + kfree(umem_ex); + } else { + // kernel ib umem release + ib_umem_release(umem); + } +#else + ib_umem_release(umem); +#endif +} + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct invalidation_ctx *invalidation_ctx = umem_ex->invalidation_ctx; + int ret = 0; + + mutex_lock(&umem_ex->ib_peer_mem->lock); + if (invalidation_ctx->peer_invalidated) { + pr_err("ib_umem_activate_invalidation_notifier: pages were invalidated by peer\n"); + ret = -EINVAL; + goto end; + } + invalidation_ctx->func = func; + invalidation_ctx->cookie = cookie; + /* from that point any pending invalidations can be called */ +end: + mutex_unlock(&umem_ex->ib_peer_mem->lock); + return ret; +#else + return 0; +#endif +} diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.h b/drivers/infiniband/hw/xsc/ib_umem_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..034d1c55e5aa647a8f50a4e06b032d074e451143 --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_UMEM_EX_H +#define XSC_IB_UMEM_EX_H + +#include + +struct ib_umem_ex; +struct invalidation_ctx; + +// ib umem ex ib_umem add peer memory support +struct ib_umem_ex { + struct ib_umem umem; +#ifndef CONFIG_INFINIBAND_PEER_MEMORY + struct ib_peer_memory_client *ib_peer_mem; + struct invalidation_ctx *invalidation_ctx; + void *peer_mem_client_context; +#endif +}; + +// expand ib_umem to ib_umem_ex by reallocate +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem); + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +typedef void (*umem_invalidate_func_t)(void *invalidation_cookie, + struct ib_umem_ex *umem_ex, unsigned long addr, size_t size); + +struct invalidation_ctx { + struct ib_umem_ex *umem_ex; + u64 context_ticket; + umem_invalidate_func_t func; + void *cookie; + int peer_callback; + int inflight_invalidation; + int peer_invalidated; + struct completion comp; +}; +#endif + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, size_t size, int access, + int dmasync, u8 *peer_exists); + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex); + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie); +#endif diff --git a/drivers/infiniband/hw/xsc/main.c b/drivers/infiniband/hw/xsc/main.c new file mode 100644 index 0000000000000000000000000000000000000000..9381b9fc426649e52039621c21b3d789035cf912 --- /dev/null +++ b/drivers/infiniband/hw/xsc/main.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/driver.h" +#include "common/xsc_lag.h" + +#include +#include +#include + +#include "user.h" +#include "xsc_ib.h" +#include "xsc_rdma_ctrl.h" + +#define DRIVER_NAME "xsc_ib" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "Jan 2022" + +MODULE_DESCRIPTION("Yunsilicon Amber HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +static char xsc_version[] = + DRIVER_NAME ": Yunsilicon Infiniband driver" + DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + +static int xsc_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + int max_rq_sg; + int max_sq_sg; + u64 flags; + struct xsc_ib_query_device_resp resp; + size_t resp_len; + u64 max_tso; + int err = -ENOMEM; + union xsc_ib_fw_ver fw_ver; + + memset(&resp, 0, sizeof(resp)); + memset(props, 0, sizeof(*props)); + + resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); + /*check param*/ + if (udata->outlen && udata->outlen < resp_len) + return -EINVAL; + + if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) + return -EINVAL; + + resp.response_length = resp_len; + + fw_ver.data = 0; + fw_ver.s.ver_major = dev->xdev->fw_version_major; + fw_ver.s.ver_minor = dev->xdev->fw_version_minor; + fw_ver.s.ver_patch = dev->xdev->fw_version_patch; + fw_ver.s.ver_tweak = dev->xdev->fw_version_tweak; + props->fw_ver = fw_ver.data; + + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; + props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; + + flags = dev->xdev->caps.flags; + if (flags & XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + if (flags & XSC_DEV_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + + props->page_size_cap = dev->xdev->caps.min_page_sz; + props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt) * PAGE_SIZE; + props->max_qp = 1 << dev->xdev->caps.log_max_qp; + props->max_qp_wr = (32 * 1024); /* hack for GPFS */ + max_rq_sg = dev->xdev->caps.max_rq_desc_sz / sizeof(struct xsc_wqe_data_seg); + max_sq_sg = (dev->xdev->caps.max_sq_desc_sz - sizeof(struct xsc_wqe_ctrl_seg_2)) / + sizeof(struct xsc_wqe_data_seg_2); + + props->max_send_sge = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - + XSC_RADDR_SEG_NUM; + props->max_recv_sge = dev->xdev->caps.recv_ds_num; + props->max_sge_rd = 1;/*max sge per read wqe*/ + props->max_cq = 1 << dev->xdev->caps.log_max_cq; + props->max_cqe = dev->xdev->caps.max_cqes - 1; + props->max_mr = 1 << dev->xdev->caps.log_max_mkey; + props->max_pd = 1 << dev->xdev->caps.log_max_pd; + props->max_qp_rd_atom = dev->xdev->caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->xdev->caps.max_ra_res_qp; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = + dev->xdev->caps.log_max_srq ? (1 << dev->xdev->caps.log_max_srq) : 0; + props->max_srq_wr = dev->xdev->caps.max_srq_wqes - 1; + props->max_srq_sge = dev->xdev->caps.log_max_srq ? (max_rq_sg - 1) : 0; + props->max_fast_reg_page_list_len = (unsigned int)-1; + props->local_ca_ack_delay = dev->xdev->caps.local_ca_ack_delay; + props->atomic_cap = dev->xdev->caps.flags & XSC_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; + props->max_mcast_grp = + dev->xdev->caps.log_max_mcg ? (1 << dev->xdev->caps.log_max_mcg) : 0; + props->max_mcast_qp_attach = dev->xdev->caps.max_qp_mcg; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + + props->sys_image_guid = dev->xdev->board_info->guid; + props->vendor_id = dev->xdev->pdev->vendor; + props->vendor_part_id = dev->xdev->pdev->device; + props->hw_ver = ((dev->xdev->chip_ver_l & 0xffff) << 16) | + (dev->xdev->hotfix_num & 0xffff); + props->max_pkeys = 0x80; + props->max_wq_type_rq = 1 << dev->xdev->caps.log_max_qp; + + props->hca_core_clock = dev->xdev->caps.hca_core_clock * 1000;//KHz + props->rss_caps.max_rwq_indirection_tables = + dev->xdev->caps.max_rwq_indirection_tables; + props->rss_caps.max_rwq_indirection_table_size = + dev->xdev->caps.max_rwq_indirection_table_size; + props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; + + /*response tso_caps extend param*/ + if (field_avail(typeof(resp), tso_caps, udata->outlen)) { + max_tso = dev->xdev->caps.log_max_tso ? (1 << dev->xdev->caps.log_max_tso) : 0; + if (max_tso) { + resp.tso_caps.max_tso = max_tso; + resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + resp.response_length += sizeof(resp.tso_caps); + } + } + + /*response rss_caps extend param*/ + if (field_avail(typeof(resp), rss_caps, udata->outlen)) { + resp.rss_caps.rx_hash_function = XSC_RX_HASH_FUNC_TOEPLITZ; + resp.rss_caps.rx_hash_fields_mask = + XSC_RX_HASH_SRC_IPV4 | + XSC_RX_HASH_DST_IPV4 | + XSC_RX_HASH_SRC_IPV6 | + XSC_RX_HASH_DST_IPV6 | + XSC_RX_HASH_SRC_PORT_TCP | + XSC_RX_HASH_DST_PORT_TCP | + XSC_RX_HASH_SRC_PORT_UDP | + XSC_RX_HASH_DST_PORT_UDP | + XSC_RX_HASH_INNER; + resp.response_length += sizeof(resp.rss_caps); + } + + /*response packet pacing caps*/ + if (field_avail(typeof(resp), packet_pacing_caps, udata->outlen)) { + resp.packet_pacing_caps.qp_rate_limit_max = + dev->xdev->caps.qp_rate_limit_max; + resp.packet_pacing_caps.qp_rate_limit_min = + dev->xdev->caps.qp_rate_limit_min; + resp.packet_pacing_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + + resp.response_length += sizeof(resp.packet_pacing_caps); + } + + /*copy response data to user*/ + if (udata->outlen) { + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) { + xsc_ib_err(dev, "copy response info to udata fail,err=%d\n", err); + return err; + } + } + + return 0; +} + +void xsc_calc_link_info(struct xsc_core_device *xdev, + struct ib_port_attr *props) +{ + switch (xsc_get_link_speed(xdev)) { + case MODULE_SPEED_10G: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 1; + break; + case MODULE_SPEED_25G: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + case MODULE_SPEED_40G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 2; + break; + case MODULE_SPEED_50G_R: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_50G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_100G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_100G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 4; + break; + case MODULE_SPEED_400G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 4; + break; + default: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + } +} + +static enum rdma_link_layer xsc_ib_port_link_layer(struct ib_device *ibdev, u32 port) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct net_device *ndev = dev->netdev; + struct xsc_core_device *xdev = dev->xdev; + + if (port < 1 || port > xdev->caps.num_ports) { + xsc_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + memset(props, 0, sizeof(*props)); + + props->state = IB_PORT_ACTIVE; + props->max_mtu = IB_MTU_4096; + props->active_mtu = min(props->max_mtu, xsc_net_to_ib_mtu(ndev->mtu)); + props->gid_tbl_len = 256; + props->port_cap_flags = 0x4010000; + props->max_msg_sz = 0x40000000; + props->bad_pkey_cntr = 0; + props->qkey_viol_cntr = 0; + props->pkey_tbl_len = 1; + props->lid = 0; + props->sm_lid = 0; + props->lmc = 0; + props->max_vl_num = 0; + props->sm_sl = 0; + props->subnet_timeout = 0; + props->init_type_reply = 0; + if (!is_support_rdma(xdev)) { + props->active_width = 1; + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + } else { + xsc_calc_link_info(xdev, props); + } + + props->phys_state = netif_carrier_ok(ndev) ? XSC_RDMA_PHY_STATE_LINK_UP : + XSC_RDMA_PHY_STATE_DISABLED; + return 0; +} + +const struct xsc_gid xsc_gid_zero; + +static int xsc_ib_query_gid(struct ib_device *ibdev, u32 port_num, + int index, union ib_gid *gid) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + /* Ignore port_num */ + memset(gid, 0, sizeof(*gid)); + if (index >= sgid_tbl->max) + return -EINVAL; + + memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + + return 0; +} + +static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) +{ + int index = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (!sgid_tbl->count) + return -ENOMEM; + + for (index = 0; index < sgid_tbl->max; index++) { + if (!memcmp(&sgid_tbl->tbl[index], gid_raw, sizeof(*gid_raw))) + break; + } + + if (index == sgid_tbl->max) + return 0; + + memcpy(&sgid_tbl->tbl[index], &xsc_gid_zero, sizeof(xsc_gid_zero)); + sgid_tbl->count--; + xsc_ib_info(dev, "Del gid from index:%u, count:%u\n", index, sgid_tbl->count); + + return 0; +} + +int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) +{ + int i = 0; + u32 free_idx = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (sgid_tbl->count == sgid_tbl->max) + return -ENOMEM; + + free_idx = sgid_tbl->max; + for (i = 0; i < sgid_tbl->max; i++) { + if (!memcmp(&sgid_tbl->tbl[i], gid_raw, sizeof(*gid_raw))) { + return 0; + } else if (!memcmp(&sgid_tbl->tbl[i], &xsc_gid_zero, sizeof(xsc_gid_zero)) && + free_idx == sgid_tbl->max) { + free_idx = i; + } + } + + if (free_idx == sgid_tbl->max) + return -ENOMEM; + + memcpy(&sgid_tbl->tbl[free_idx], gid_raw, sizeof(*gid_raw)); + sgid_tbl->count++; + xsc_ib_info(dev, "Add gid to index:%u, count:%u, max:%u\n", free_idx, sgid_tbl->count, + sgid_tbl->max); + + return 0; +} + +static int xsc_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, + u16 *pkey) +{ + *pkey = 0xffff; + return 0; +} + +struct xsc_reg_node_desc { + u8 desc[64]; +}; + +static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_reg_node_desc in; + struct xsc_reg_node_desc out; + int err; + + return 0; + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + /* + * If possible, pass node desc to FW, so it can generate + * a 144 trap. If cmd fails, just ignore. + */ + memcpy(&in, props->node_desc, 64); + err = xsc_core_access_reg(dev->xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_NODE_DESC, 0, 1); + if (err) + return err; + + memcpy(ibdev->node_desc, props->node_desc, 64); + + return err; +} + +static int xsc_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct ib_port_attr attr; + u32 tmp; + int err; + + return 0; + + mutex_lock(&dev->cap_mask_mutex); + + err = xsc_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + tmp = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = xsc_set_port_caps(dev->xdev, port, tmp); + +out: + mutex_unlock(&dev->cap_mask_mutex); + return err; +} + +xsc_ib_alloc_ucontext_def() +{ + struct ib_device *ibdev = uctx->device; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_alloc_ucontext_req req; + struct xsc_ib_alloc_ucontext_resp resp; + struct xsc_ib_ucontext *context; + int err; + + if (!dev->ib_active) + return RET_VALUE(-EAGAIN); + + err = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err) + return RET_VALUE(err); + + resp.qp_tab_size = 1 << dev->xdev->caps.log_max_qp; + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = dev->xdev->caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->xdev->caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->xdev->caps.max_wqes; + resp.max_recv_wr = dev->xdev->caps.max_wqes; + resp.qpm_tx_db = dev->xdev->regs.tx_db; + resp.qpm_rx_db = dev->xdev->regs.rx_db; + resp.cqm_next_cid_reg = dev->xdev->regs.complete_reg; + resp.cqm_armdb = dev->xdev->regs.complete_db; + resp.send_ds_num = dev->xdev->caps.send_ds_num; + resp.recv_ds_num = dev->xdev->caps.recv_ds_num; + resp.cmds_supp_uhw |= XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE; + + context = to_xucontext(uctx); + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + resp.num_ports = dev->xdev->caps.num_ports; + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) + goto out_ctx; + + return 0; + +out_ctx: + return RET_VALUE(err); +} + +xsc_ib_dealloc_ucontext_def() +{ +} + +static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct xsc_ib_dev *dev = to_mdev(ibcontext->device); + struct xsc_core_device *xdev = dev->xdev; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + resource_size_t reg_base; + resource_size_t reg_size = vma->vm_end - vma->vm_start; + + xsc_core_dbg(xdev, "offset:0x%lx", offset); + + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.tx_db & PAGE_MASK); + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.rx_db & PAGE_MASK); + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_reg & PAGE_MASK); + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_db & PAGE_MASK); + else + return -EINVAL; + + xsc_core_dbg(xdev, "regbase:0x%llx", reg_base); + + reg_base = xsc_core_is_pf(xdev) ? reg_base - 0xA0000000 : reg_base; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, reg_base >> PAGE_SHIFT, + reg_size, vma->vm_page_prot); + + return 0; +} + +xsc_ib_alloc_pd_def() +{ + struct ib_device *ibdev = ibpd->device; + struct xsc_ib_alloc_pd_resp resp; + struct xsc_ib_pd *pd; + int err; + + pd = to_mpd(ibpd); + + err = xsc_core_alloc_pd(to_mdev(ibdev)->xdev, &pd->pdn); + if (err) { + kfree(pd); + return RET_VALUE(err); + } + + if (udata) { + resp.pdn = pd->pdn; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + xsc_core_dealloc_pd(to_mdev(ibdev)->xdev, pd->pdn); + + return RET_VALUE(-EFAULT); + } + } else { + pd->pa_lkey = 0; + } + + return 0; +} + +xsc_ib_dealloc_pd_def() +{ + struct xsc_ib_dev *mdev = to_mdev(pd->device); + struct xsc_ib_pd *mpd = to_mpd(pd); + + xsc_core_dealloc_pd(mdev->xdev, mpd->pdn); + + return 0; +} + +static int xsc_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + immutable->max_mad_size = IB_MGMT_MAD_SIZE * 2; + + return 0; +} + +static void _xsc_get_netdev(struct xsc_ib_dev *dev) +{ + struct net_device *netdev = (struct net_device *)(dev->xdev->netdev); + + dev->netdev = netdev; +} + +static struct net_device *xsc_get_netdev(struct ib_device *ibdev, u32 port_num) +{ + struct xsc_ib_dev *xsc_ib_dev = to_mdev(ibdev); + struct net_device *dev = xsc_ib_dev->netdev; + struct xsc_core_device *xdev = xsc_ib_dev->xdev; + + if (dev) { + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + struct net_device *upper = NULL; + + rcu_read_lock(); + upper = netdev_master_upper_dev_get_rcu(dev); + if (upper) { + struct net_device *active; + + active = bond_option_active_slave_get_rcu(netdev_priv(upper)); + if (active) + dev = active; + } + rcu_read_unlock(); + } + dev_hold(dev); + xsc_board_lag_unlock(xdev); + } + + return dev; +} + +void xsc_get_guid(const u8 *dev_addr, u8 *guid) +{ + u8 mac[ETH_ALEN]; + + /* MAC-48 to EUI-64 mapping */ + memcpy(mac, dev_addr, ETH_ALEN); + guid[0] = mac[0] ^ 2; + guid[1] = mac[1]; + guid[2] = mac[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac[3]; + guid[6] = mac[4]; + guid[7] = mac[5]; +} + +static int init_node_data(struct xsc_ib_dev *dev) +{ + int err = -ENOMEM; + + strscpy(dev->ib_dev.node_desc, "xsc_node_desc", sizeof(dev->ib_dev.node_desc)); + + if (unlikely(!dev->netdev->dev_addr)) + _xsc_get_netdev(dev); + xsc_get_guid(dev->netdev->dev_addr, (u8 *)&dev->ib_dev.node_guid); + err = 0; + return err; +} + +void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, + unsigned long param) +{ + struct xsc_priv *priv = &xdev->priv; + struct xsc_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + /* After xsc_detach_device, the dev_ctx->intf is still set and dev_ctx is + * still in priv->ctx_list. In this case, only notify the dev_ctx if its + * ADDED or ATTACHED bit are set. + */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->event) + dev_ctx->intf->event(xdev, dev_ctx->context, 0, param); + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +static void xsc_ib_event(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long data) +{ + struct xsc_ib_dev *ibdev = (struct xsc_ib_dev *)context; + struct ib_event ibev; + u8 port = 0; + + switch (event) { + case XSC_DEV_EVENT_SYS_ERROR: + ibdev->ib_active = false; + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + case XSC_DEV_EVENT_PORT_UP: + ibev.event = IB_EVENT_PORT_ACTIVE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_DOWN: + ibev.event = IB_EVENT_PORT_ERR; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_INITIALIZED: + /* not used by ULPs */ + return; + + case XSC_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PKEY_CHANGE: + ibev.event = IB_EVENT_PKEY_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_GUID_CHANGE: + ibev.event = IB_EVENT_GID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_CLIENT_REREG: + ibev.event = IB_EVENT_CLIENT_REREGISTER; + port = *(u8 *)data; + break; + } + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = port; + + if (ibdev->ib_active) + ib_dispatch_event(&ibev); +} + +static int get_port_caps(struct xsc_ib_dev *dev) +{ + struct ib_device_attr *dprops = NULL; + struct ib_port_attr *pprops = NULL; + int err = -ENOMEM; + u32 port; + /*used to prevent coredump when insmod xsc*/ + struct ib_udata uhw = {.inlen = 0, .outlen = 0}; + + pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); + if (!pprops) + goto out; + + dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); + if (!dprops) + goto out; + + err = xsc_ib_query_device(&dev->ib_dev, dprops, &uhw); + if (err) { + xsc_ib_warn(dev, "query_device failed %d\n", err); + goto out; + } + + for (port = 1; port <= dev->xdev->caps.num_ports; port++) { + err = xsc_ib_query_port(&dev->ib_dev, port, pprops); + if (err) { + xsc_ib_warn(dev, "query_port %d failed %d\n", port, err); + break; + } + dev->xdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->xdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + xsc_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", + dprops->max_pkeys, pprops->gid_tbl_len); + } + +out: + kfree(pprops); + kfree(dprops); + + return err; +} + +static int xsc_create_dev_res(struct xsc_ib_res *ib_res) +{ + struct xsc_ib_dev *dev; + + dev = container_of(ib_res, struct xsc_ib_dev, ib_res); + ib_res->sgid_tbl.max = dev->xdev->caps.port[0].gid_table_len; + + ib_res->sgid_tbl.tbl = kcalloc(ib_res->sgid_tbl.max, sizeof(struct xsc_gid), + GFP_KERNEL); + + if (!ib_res->sgid_tbl.tbl) + return -ENOMEM; + + return 0; +} + +static void xsc_destroy_dev_res(struct xsc_ib_res *ib_res) +{ + kfree(ib_res->sgid_tbl.tbl); +} + +static int populate_specs_root(struct xsc_ib_dev *dev) +{ + const struct uverbs_object_tree_def **trees = + (const struct uverbs_object_tree_def **)dev->driver_trees; + size_t num_trees = 0; + + trees[num_trees++] = xsc_ib_get_devx_tree(); + WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); + trees[num_trees] = NULL; + + return 0; +} + +static void crc_table_init(struct xsc_ib_dev *dev) +{ + u32 c, i, j; + + for (i = 0; i < 256; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = 0xedb88320L ^ (c >> 1); + else + c = c >> 1; + } + dev->crc_32_table[i] = c; + } +} + +static void xsc_ib_get_dev_fw_str(struct ib_device *ibdev, char *str) +{ + struct xsc_core_device *dev = to_mdev(ibdev)->xdev; + u8 ver_major = dev->fw_version_major; + u8 ver_minor = dev->fw_version_minor; + u16 ver_patch = dev->fw_version_patch; + u32 ver_tweak = dev->fw_version_tweak; + + if (ver_tweak == 0) { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u", + ver_major, ver_minor, ver_patch); + } else { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u+%u", + ver_major, ver_minor, ver_patch, ver_tweak); + } +} + +static void xsc_ib_dev_setting(struct xsc_ib_dev *dev) +{ + dev->ib_dev.ops.owner = THIS_MODULE; + dev->ib_dev.ops.uverbs_abi_ver = XSC_IB_UVERBS_ABI_VERSION; + dev->ib_dev.ops.driver_id = (enum rdma_driver_id)RDMA_DRIVER_XSC5; + dev->ib_dev.ops.uverbs_no_driver_id_binding = 1; + dev->ib_dev.ops.query_device = xsc_ib_query_device; + dev->ib_dev.ops.query_port = xsc_ib_query_port; + dev->ib_dev.ops.query_gid = xsc_ib_query_gid; + dev->ib_dev.ops.add_gid = xsc_ib_add_gid; + dev->ib_dev.ops.del_gid = xsc_ib_del_gid; + dev->ib_dev.ops.query_pkey = xsc_ib_query_pkey; + + dev->ib_dev.ops.modify_device = xsc_ib_modify_device; + dev->ib_dev.ops.modify_port = xsc_ib_modify_port; + dev->ib_dev.ops.alloc_ucontext = xsc_ib_alloc_ucontext; + dev->ib_dev.ops.dealloc_ucontext = xsc_ib_dealloc_ucontext; + dev->ib_dev.ops.mmap = xsc_ib_mmap; + + dev->ib_dev.ops.alloc_pd = xsc_ib_alloc_pd; + dev->ib_dev.ops.dealloc_pd = xsc_ib_dealloc_pd; + dev->ib_dev.ops.create_ah = xsc_ib_create_ah; + dev->ib_dev.ops.query_ah = xsc_ib_query_ah; + dev->ib_dev.ops.destroy_ah = xsc_ib_destroy_ah; + + dev->ib_dev.ops.get_link_layer = xsc_ib_port_link_layer; + dev->ib_dev.ops.get_netdev = xsc_get_netdev; + + dev->ib_dev.ops.create_qp = xsc_ib_create_qp; + dev->ib_dev.ops.modify_qp = xsc_ib_modify_qp; + dev->ib_dev.ops.query_qp = xsc_ib_query_qp; + dev->ib_dev.ops.destroy_qp = xsc_ib_destroy_qp; + dev->ib_dev.ops.post_send = xsc_ib_post_send; + dev->ib_dev.ops.post_recv = xsc_ib_post_recv; + dev->ib_dev.ops.create_cq = xsc_ib_create_cq; + dev->ib_dev.ops.destroy_cq = xsc_ib_destroy_cq; + dev->ib_dev.ops.poll_cq = xsc_ib_poll_cq; + dev->ib_dev.ops.req_notify_cq = xsc_ib_arm_cq; + dev->ib_dev.ops.get_dma_mr = xsc_ib_get_dma_mr; + dev->ib_dev.ops.reg_user_mr = xsc_ib_reg_user_mr;//optional + dev->ib_dev.ops.dereg_mr = xsc_ib_dereg_mr; + dev->ib_dev.ops.alloc_mr = xsc_ib_alloc_mr; + dev->ib_dev.ops.map_mr_sg = xsc_ib_map_mr_sg; + + dev->ib_dev.ops.get_port_immutable = xsc_port_immutable; + + dev->ib_dev.ops.drain_sq = xsc_ib_drain_sq; + dev->ib_dev.ops.drain_rq = xsc_ib_drain_rq; + dev->ib_dev.ops.get_dev_fw_str = xsc_ib_get_dev_fw_str; + + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ah, xsc_ib_ah, ibah); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_cq, xsc_ib_cq, ibcq); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_pd, xsc_ib_pd, ibpd); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ucontext, xsc_ib_ucontext, ibucontext); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_qp, xsc_ib_qp, ibqp); +} + +static void xsc_get_port_state(struct net_device *ndev, enum xsc_dev_event *ev) +{ + *ev = XSC_DEV_EVENT_PORT_DOWN; + if (netif_running(ndev) && netif_carrier_ok(ndev)) + *ev = XSC_DEV_EVENT_PORT_UP; +} + +static int xsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct xsc_ib_dev *ibdev = container_of(this, struct xsc_ib_dev, nb); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + enum xsc_dev_event ev; + u8 port = 1; + + if (ndev != ibdev->netdev) + goto done; + + xsc_ib_info(ibdev, "netdev notfiy event:%ld\n", event); + switch (event) { + case NETDEV_CHANGE: + case NETDEV_UP: + case NETDEV_DOWN: + xsc_get_port_state(ibdev->netdev, &ev); + xsc_ib_event(ibdev->xdev, ibdev, ev, (unsigned long)&port); + break; + default: + break; + } +done: + return NOTIFY_DONE; +} + +static int xsc_register_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + ibdev->nb.notifier_call = xsc_netdev_event; + return register_netdevice_notifier(&ibdev->nb); +} + +static int xsc_unregister_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + return unregister_netdevice_notifier(&ibdev->nb); +} + +static int init_one(struct xsc_core_device *xdev, + struct xsc_ib_dev **m_ibdev) +{ + struct xsc_ib_dev *dev; + int err; + + pr_info_once("%s", xsc_version); + + dev = (struct xsc_ib_dev *)ib_alloc_device(xsc_ib_dev, ib_dev); + if (!dev) + return -ENOMEM; + + dev->xdev = xdev; + xdev->event = xsc_core_event; + _xsc_get_netdev(dev); + err = get_port_caps(dev); + if (err) + goto err_free; + if (!xdev->caps.msix_enable) + dev->num_comp_vectors = 1; + else + dev->num_comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + + if (xsc_lag_is_roce(xdev)) + strscpy(dev->ib_dev.name, "xscale_bond_%d", IB_DEVICE_NAME_MAX); + else + strscpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); + + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0xFF; + dev->num_ports = xdev->caps.num_ports; + dev->ib_dev.phys_port_cnt = dev->num_ports; + dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; + dev->ib_dev.dev.parent = &xdev->pdev->dev; + xsc_ib_dev_setting(dev); + dev->cm_dscp = DSCP_PCP_UNSET; + dev->cm_pcp = DSCP_PCP_UNSET; + dev->force_pcp = DSCP_PCP_UNSET; + dev->force_dscp = DSCP_PCP_UNSET; + + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_REREG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + init_node_data(dev); + + mutex_init(&dev->cap_mask_mutex); + spin_lock_init(&dev->mr_lock); + + err = xsc_create_dev_res(&dev->ib_res); + if (err) + goto err_free; + + crc_table_init(dev); + + populate_specs_root(dev); + + xsc_reg_local_dma_mr(xdev); + + if (ib_register_device(&dev->ib_dev, dev->ib_dev.name, dev->xdev->device)) + goto err_rsrc; + + rdma_roce_rescan_device(&dev->ib_dev); + dev->ib_active = true; + *m_ibdev = dev; + + xdev->xsc_ib_dev = dev; + + xsc_register_netdev_notifier(dev); + + xsc_counters_init(&dev->ib_dev, xdev); + + xsc_priv_dev_init(&dev->ib_dev, xdev); + + xsc_rtt_sysfs_init(&dev->ib_dev, xdev); + + xsc_ib_sysfs_init(&dev->ib_dev, xdev); + + return 0; + +err_rsrc: + xsc_destroy_dev_res(&dev->ib_res); + +err_free: + ib_dealloc_device((struct ib_device *)dev); + + return err; +} + +static void remove_one(struct xsc_core_device *xdev, void *intf_ctx) +{ + struct xsc_ib_dev *dev = (struct xsc_ib_dev *)intf_ctx; + + xsc_rtt_sysfs_fini(xdev); + xsc_ib_sysfs_fini(&dev->ib_dev, xdev); + xsc_priv_dev_fini(&dev->ib_dev, xdev); + xsc_counters_fini(&dev->ib_dev, xdev); + xsc_unregister_netdev_notifier(dev); + ib_unregister_device(&dev->ib_dev); + ib_dealloc_device(&dev->ib_dev); +} + +static void init_iommu_state(struct xsc_ib_dev *xdev) +{ + if (xdev) { + struct iommu_domain *domain; + + xdev->iommu_state = XSC_IB_IOMMU_MAP_DISABLE; + domain = iommu_get_domain_for_dev(xdev->ib_dev.dma_device); + if (domain) { + if (domain->type & __IOMMU_DOMAIN_DMA_API) + xdev->iommu_state = XSC_IB_IOMMU_MAP_NORMAL; + } else { + /* try to allocate dma memory, if dma address is not equal to phys address, + * the iommu map is enabled, but iommu domain is unknown. + */ + dma_addr_t dma_addr; + + void *tmp = dma_alloc_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + &dma_addr, GFP_KERNEL); + if (tmp) { + if (virt_to_phys(tmp) != dma_addr) + xdev->iommu_state = XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN; + dma_free_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + tmp, dma_addr); + } + } + + if (xdev->iommu_state) + xsc_ib_dbg(xdev, "ibdev supports iommu dma map, state=%d\n", + xdev->iommu_state); + else + xsc_ib_dbg(xdev, "ibdev does not support iommu dma map\n"); + } +} + +static bool xsc_need_create_ib_device(struct xsc_core_device *dev) +{ + if (xsc_get_roce_lag_xdev(dev) == dev) + return true; + + return false; +} + +static void *xsc_add(struct xsc_core_device *xpdev) +{ + struct xsc_ib_dev *m_ibdev = NULL; + int ret = -1; + + if (!xsc_need_create_ib_device(xpdev)) + return NULL; + + pr_info("add rdma driver\n"); + + ret = init_one(xpdev, &m_ibdev); + if (ret) { + pr_err("xsc ib dev add fail, ret = %d\n", ret); + return NULL; + } + + init_iommu_state(m_ibdev); + + return m_ibdev; +} + +static void xsc_remove(struct xsc_core_device *xpdev, void *context) +{ + pr_info("remove rdma driver\n"); + remove_one(xpdev, context); +} + +static struct xsc_interface xsc_interface = { + .add = xsc_add, + .remove = xsc_remove, + .event = xsc_ib_event, + .protocol = XSC_INTERFACE_PROTOCOL_IB, +}; + +int xsc_ib_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc ib driver recv %lu event\n", action); + + if (exist_incomplete_qp_flush()) { + xsc_set_exit_flag(); + return NOTIFY_OK; + } + + xsc_remove_rdma_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_ib_nb = { + .notifier_call = xsc_ib_reboot_event_handler, + .next = NULL, + .priority = 2, +}; + +void xsc_remove_rdma_driver(void) +{ + xsc_rdma_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); +} + +static int __init xsc_ib_init(void) +{ + int ret; + + ret = xsc_priv_alloc_chrdev_region(); + if (ret) + goto out; + + ret = xsc_register_interface(&xsc_interface); + if (ret) { + xsc_priv_unregister_chrdev_region(); + goto out; + } + + ret = xsc_rdma_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); + goto out; + } + + register_reboot_notifier(&xsc_ib_nb); + + return 0; +out: + return ret; +} + +static void __exit xsc_ib_cleanup(void) +{ + unregister_reboot_notifier(&xsc_ib_nb); + xsc_remove_rdma_driver(); +} + +module_init(xsc_ib_init); +module_exit(xsc_ib_cleanup); diff --git a/drivers/infiniband/hw/xsc/mem.c b/drivers/infiniband/hw/xsc/mem.c new file mode 100644 index 0000000000000000000000000000000000000000..cf258aa8ea51a46989d93f8f637cb3d33b206912 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mem.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" + +static inline int xsc_count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, + int is_first, + int is_last) +{ + static const int max_count = sizeof(int) << 3; + dma_addr_t pa, end_pa; + u64 va, end_va; + size_t length; + int start_count, end_count; + int va_start_count, va_end_count; + + pa = chunk->pa; + va = chunk->va; + length = chunk->length; + end_pa = pa + length; + end_va = va + length; + start_count = max_count; + end_count = max_count; + + if (!is_first) { + start_count = xsc_count_trailing_zeros((unsigned long)pa); + va_start_count = xsc_count_trailing_zeros(va); + start_count = min_t(int, start_count, va_start_count); + } + + if (!is_last) { + end_count = xsc_count_trailing_zeros((unsigned long)end_pa); + va_end_count = xsc_count_trailing_zeros(end_va); + end_count = min_t(int, end_count, va_end_count); + } + + return start_count > end_count ? end_count : start_count; +} + +int xsc_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt, + int *npages, + int *shift, + u64 **pas) +{ + struct scatterlist *sg; + unsigned long va; + dma_addr_t pa; + struct xsc_pa_chunk *chunk, *tmp; + struct list_head chunk_list; + int i; + int chunk_cnt; + int min_count_0 = sizeof(int) << 3; + int count_0; + int is_first = 0, is_end = 0; + size_t pgsz; + u64 mask; + int err = 0; + int pa_index; + u64 chunk_pa; + int chunk_npages; + unsigned long page_shift = PAGE_SHIFT; + + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, 0); + + va = (virt >> page_shift) << page_shift; + + INIT_LIST_HEAD(&chunk_list); + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + list_add_tail(&chunk->list, &chunk_list); + + chunk_cnt = 1; + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { + pa = sg_dma_address(sg); + if (i == 0) { + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + continue; + } + + if (pa == chunk->pa + chunk->length) { + chunk->length += sg_dma_len(sg); + va += sg_dma_len(sg); + } else { + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + list_add_tail(&chunk->list, &chunk_list); + chunk_cnt++; + } + } + + i = 0; + list_for_each_entry(chunk, &chunk_list, list) { + is_first = (i == 0 ? 1 : 0); + is_end = (i == chunk_cnt - 1 ? 1 : 0); + count_0 = xsc_find_chunk_cont_0(chunk, is_first, is_end); + if (count_0 < min_count_0) + min_count_0 = count_0; + i++; + } + + pgsz_bitmap &= GENMASK(min_count_0, 0); + pgsz = rounddown_pow_of_two(pgsz_bitmap); + *shift = ilog2(pgsz); + *npages = 0; + + if (chunk_cnt == 1) { + list_for_each_entry(chunk, &chunk_list, list) { + mask = GENMASK(*shift - 1, min_t(int, page_shift, *shift)); + *npages += DIV_ROUND_UP(chunk->length + (virt & mask), pgsz); + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + chunk_pa = chunk->pa - (virt & mask); + for (i = 0; i < *npages; i++) + (*pas)[i] = chunk_pa + i * pgsz; + } + } else { + list_for_each_entry(chunk, &chunk_list, list) { + *npages += DIV_ROUND_UP(chunk->length, pgsz); + } + + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + pa_index = 0; + list_for_each_entry(chunk, &chunk_list, list) { + chunk_npages = DIV_ROUND_UP(chunk->length, pgsz); + chunk_pa = chunk->pa; + for (i = 0; i < chunk_npages; i++) { + if (pa_index == 0) { + mask = GENMASK(*shift - 1, + min_t(int, page_shift, *shift)); + chunk_pa -= (virt & mask); + } + (*pas)[pa_index] = chunk_pa + i * pgsz; + + pa_index++; + } + } + } + +err_alloc: + list_for_each_entry_safe(chunk, tmp, &chunk_list, list) { + list_del(&chunk->list); + kfree(chunk); + } + return err; +} + +/* @umem: umem object to scan + * @addr: ib virtual address requested by the user + * @count: number of PAGE_SIZE pages covered by umem + * @shift: page shift for the compound pages found in the region + * @ncont: number of compund pages + * @order: log2 of the number of compound pages + */ +void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + unsigned long max_page_shift, + int *count, int *shift, + int *ncont, int *order) +{ + unsigned long tmp; + unsigned long m; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; + struct scatterlist *sg; + int entry; + unsigned long page_shift = PAGE_SHIFT; + + addr = addr >> page_shift; + tmp = (unsigned long)addr; + m = find_first_bit(&tmp, BITS_PER_LONG); + if (max_page_shift) + m = min_t(unsigned long, max_page_shift - page_shift, m); + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> page_shift; + pfn = sg_dma_address(sg) >> page_shift; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; + } + + p += len; + i += len; + } + + if (i) { + m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); + + if (order) + *order = ilog2(roundup_pow_of_two(i) >> m); + + *ncont = DIV_ROUND_UP(i, (1 << m)); + } else { + m = 0; + + if (order) + *order = 0; + + *ncont = 0; + } + *shift = page_shift + m; + *count = i; +} + +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + int *count, int *shift, + int *ncont, int *order) +{ + // no limit for page_shift + __xsc_ib_cont_pages(umem, addr, 0, count, shift, ncont, order); +} + +void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, size_t offset, size_t num_pages, + __be64 *pas, int access_flags, bool need_to_devide) +{ + unsigned long umem_page_shift = PAGE_SHIFT; + int shift = page_shift - umem_page_shift; + int mask = (1 << shift) - 1; + int i = 0; + int k, idx; + u64 cur = 0; + u64 base; + int len; + struct scatterlist *sg; + int entry; + + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> umem_page_shift; + if (need_to_devide) + len = sg_dma_len(sg) >> PAGE_SHIFT_4K; + else + len = sg_dma_len(sg) >> umem_page_shift; + base = sg_dma_address(sg); + + /* Skip elements below offset */ + if (i + len < offset << shift) { + i += len; + continue; + } + + /* Skip pages below offset */ + if (i < offset << shift) { + k = (offset << shift) - i; + i = offset << shift; + } else { + k = 0; + } + + for (; k < len; k++) { + if (!(i & mask)) { + if (need_to_devide) + cur = base + (k << PAGE_SHIFT_4K); + else + cur = base + (k << umem_page_shift); + cur |= access_flags; + idx = (i >> shift) - offset; + + pas[idx] = cpu_to_be64(cur); + xsc_ib_dbg(dev, "pas[%d] 0x%llx\n", + i >> shift, be64_to_cpu(pas[idx])); + } + i++; + + /* Stop after num_pages reached */ + if (i >> shift >= offset + num_pages) + return; + } + } +} + +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide) +{ + return __xsc_ib_populate_pas(dev, umem, page_shift, 0, + npages, pas, 0, need_to_devide); +} + +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) +{ + u64 page_size; + u64 page_mask; + u64 off_size; + u64 off_mask; + u64 buf_off; + + page_size = 1 << page_shift; + page_mask = page_size - 1; + buf_off = addr & page_mask; + off_size = page_size >> 6; + off_mask = off_size - 1; + + if (buf_off & off_mask) + return -EINVAL; + + *offset = buf_off >> ilog2(off_size); + return 0; +} diff --git a/drivers/infiniband/hw/xsc/mr.c b/drivers/infiniband/hw/xsc/mr.c new file mode 100644 index 0000000000000000000000000000000000000000..2dddd3b6f7166f82f633b4197295a0a19ca7e3c8 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mr.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_cmd.h" +#include +#include "ib_umem_ex.h" +#include "xsc_ib.h" + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, size_t size); +#endif + +enum { + DEF_CACHE_SIZE = 10, +}; + +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_register_mr_mbox_in *in; + struct xsc_register_mr_request *req; + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + req = &in->req; + req->acc = convert_access(acc); + req->va_base = 0; + req->map_en = !(XSC_MPT_MAP_EN); + + err = xsc_core_create_mkey(xdev, &mr->mmr); + if (err) + goto err_in; + req->mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(xdev, &mr->mmr, in, sizeof(*in)); + if (err) + goto err_reg_mr; + kfree(in); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; +err_reg_mr: + xsc_core_destroy_mkey(xdev, &mr->mmr); +err_in: + kfree(in); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) +{ + int i; + + for (i = 0; i < npages; i++) + req_pas[i] = cpu_to_be64(pas[i]); +} + +static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, + u64 length, struct ib_umem *umem, + int npages, u64 *pas, int page_shift, + int access_flags) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_register_mr_mbox_in *in; + struct xsc_ib_mr *mr; + int inlen; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_0; + } + + inlen = sizeof(*in) + sizeof(*in->req.pas) * npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_1; + } + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "create mkey failed\n"); + goto err_2; + } + + xsc_fill_pas(npages, pas, in->req.pas); + + in->req.acc = convert_access(access_flags); + in->req.pa_num = cpu_to_be32(npages); + in->req.pdn = cpu_to_be32(to_mpd(pd)->pdn); + in->req.va_base = cpu_to_be64(virt_addr); + in->req.map_en = XSC_MPT_MAP_EN; + in->req.len = cpu_to_be32((u32)length); + in->req.page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + in->req.mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(dev->xdev, &mr->mmr, in, inlen); + if (err) { + xsc_ib_warn(dev, "register mr failed, err = %d\n", err); + goto err_reg_mr; + } + mr->umem = umem; + xsc_vfree(in); + vfree(pas); + + xsc_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + + return mr; +err_reg_mr: + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); +err_2: + xsc_vfree(in); +err_1: + kfree(mr); +err_0: + vfree(pas); + + return ERR_PTR(err); +} + +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr = NULL; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + int page_shift; + int npages; + u64 *pas; + int err; + struct ib_peer_memory_client *ib_peer_mem = NULL; + struct xsc_ib_peer_id *xsc_ib_peer_id = NULL; + + xsc_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", + start, virt_addr, length); +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + umem = ib_umem_get_peer(&dev->ib_dev, start, length, + access_flags, IB_PEER_MEM_INVAL_SUPP); +#else + umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); +#endif + if (IS_ERR(umem)) { + // check client peer memory +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + xsc_ib_warn(dev, "umem get failed\n"); + return (void *)umem; +#else + u8 peer_exists = 0; + + umem_ex = ib_client_umem_get(pd->uobject->context, + start, length, access_flags, 0, &peer_exists); + if (!peer_exists) { + xsc_ib_dbg(dev, "umem get failed\n"); + return (void *)umem; + } + ib_peer_mem = umem_ex->ib_peer_mem; + xsc_ib_peer_id = kzalloc(sizeof(*xsc_ib_peer_id), GFP_KERNEL); + if (!xsc_ib_peer_id) { + err = -ENOMEM; + goto error; + } + init_completion(&xsc_ib_peer_id->comp); + err = ib_client_umem_activate_invalidation_notifier(umem_ex, + xsc_invalidate_umem, + xsc_ib_peer_id); + if (err) + goto error; +#endif + } else { + umem_ex = ib_umem_ex(umem); + if (IS_ERR(umem_ex)) { + err = -ENOMEM; + goto error; + } + } + umem = &umem_ex->umem; + + err = xsc_find_best_pgsz(umem, 0x40211000, start, &npages, &page_shift, &pas); + if (err) { + vfree(pas); + pas = NULL; + xsc_ib_warn(dev, "find best page size failed\n"); + goto error; + } + if (!npages) { + xsc_ib_warn(dev, "avoid zero region\n"); + err = -EINVAL; + goto error; + } + + xsc_ib_dbg(dev, "npages %d, page_shift %d\n", npages, page_shift); + + mr = reg_create(pd, virt_addr, length, umem, npages, pas, page_shift, access_flags); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto error; + } + + xsc_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + + mr->umem = umem; + mr->npages = npages; + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages += npages; + spin_unlock(&dev->mr_lock); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.length = length; + atomic_set(&mr->invalidated, 0); + if (ib_peer_mem) { + init_completion(&mr->invalidation_comp); + xsc_ib_peer_id->mr = mr; + mr->peer_id = xsc_ib_peer_id; + complete(&xsc_ib_peer_id->comp); + } + + return &mr->ibmr; + +error: + if (xsc_ib_peer_id) { + complete(&xsc_ib_peer_id->comp); + kfree(xsc_ib_peer_id); + xsc_ib_peer_id = NULL; + } + + ib_umem_ex_release(umem_ex); + return ERR_PTR(err); +} + +xsc_ib_dereg_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(ibmr->device); + struct xsc_ib_mr *mr = to_mmr(ibmr); + struct ib_umem *umem = mr->umem; + struct ib_umem_ex *umem_ex = (struct ib_umem_ex *)umem; + int npages = mr->npages; + int err; + + xsc_ib_dbg(dev, "dereg mkey = 0x%x\n", mr->mmr.key); + + if (atomic_inc_return(&mr->invalidated) > 1) { + /* In case there is inflight invalidation call pending for its termination */ + wait_for_completion(&mr->invalidation_comp); + kfree(mr); + return 0; + } + + if (mr->npages) { + err = xsc_core_dereg_mr(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to dereg mr 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + } + err = xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + + if (umem_ex) { + ib_umem_ex_release(umem_ex); + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages -= npages; + spin_unlock(&dev->mr_lock); + } + + kfree(mr->pas); + kfree(mr); + + return 0; +} + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, + size_t size) +{ + struct xsc_ib_mr *mr; + struct xsc_ib_dev *dev; + struct xsc_ib_peer_id *peer_id = (struct xsc_ib_peer_id *)invalidation_cookie; + + wait_for_completion(&peer_id->comp); + if (!peer_id->mr) + return; + + mr = peer_id->mr; + /* This function is called under client peer lock so its resources are race protected */ + if (atomic_inc_return(&mr->invalidated) > 1) { + umem->invalidation_ctx->inflight_invalidation = 1; + return; + } + + umem->invalidation_ctx->peer_callback = 1; + dev = to_mdev(mr->ibmr.device); + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + xsc_core_dereg_mr(dev->xdev, &mr->mmr); + complete(&mr->invalidation_comp); +} +#endif + +xsc_ib_alloc_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->npages = 0; + mr->mmr.pd = to_mpd(pd)->pdn; + mr->pas = kcalloc(max_num_sg, sizeof(__be64), GFP_KERNEL); + if (!mr->pas) { + err = -ENOMEM; + goto err_alloc; + } + + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) + goto err_create_mkey; + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.device = &dev->ib_dev; + + return &mr->ibmr; +err_create_mkey: + kfree(mr->pas); +err_alloc: + kfree(mr); + return ERR_PTR(err); +} + +static int xsc_set_page(struct ib_mr *ibmr, u64 pa) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->pas[mmr->npages] = pa; + mmr->npages++; + return 0; +} + +u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u32 page_shift) +{ + u8 page_mode = 0; + + page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + + return page_mode; +} + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->npages = 0; + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, xsc_set_page); +} + +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + const struct ib_reg_wr *reg_wr = container_of(wr, struct ib_reg_wr, wr); + struct ib_mr *ibmr = reg_wr->mr; + struct xsc_ib_mr *mmr = to_mmr(ibmr); + struct xsc_register_mr_mbox_in *in; + int inlen; + int i; + int err; + __be64 *pas; + + inlen = sizeof(*in) + sizeof(__be64) * mmr->npages; + in = kzalloc(inlen, GFP_ATOMIC); + if (!in) + return -ENOMEM; + + in->req.pdn = cpu_to_be32(mmr->mmr.pd); + in->req.mkey = cpu_to_be32(ibmr->rkey); + in->req.acc = convert_access(reg_wr->access); + in->req.page_mode = 0; + in->req.map_en = XSC_MPT_MAP_EN; + + if (xsc_ib_iommu_dma_map(ibmr->device)) { + static u32 support_page_shift[] = {12, 16, 21, 30}; + u64 va_base; + u64 pa_base; + int len; + int i; + u32 page_shift; + + for (i = 0; i < ARRAY_SIZE(support_page_shift); i++) { + page_shift = support_page_shift[i]; + va_base = ALIGN_DOWN(ibmr->iova, 1 << page_shift); + len = ibmr->iova + ibmr->length - va_base; + if (len <= (1 << page_shift)) { + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + pa_base = ALIGN_DOWN(mmr->pas[0], (1 << page_shift)); + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + in->req.pa_num = cpu_to_be32(1); + in->req.len = cpu_to_be32(len); + in->req.va_base = cpu_to_be64(va_base); + in->req.pas[0] = cpu_to_be64(pa_base); + goto out; + } + } + + xsc_ib_warn(dev, "Not found suitable page mode for iommu dma map, using 4k mode"); + } + + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, PAGE_SHIFT_4K); + in->req.va_base = cpu_to_be64(ibmr->iova); + in->req.pa_num = cpu_to_be32(mmr->npages); + in->req.len = cpu_to_be32(ibmr->length); + pas = in->req.pas; + for (i = 0; i < mmr->npages; i++) + pas[i] = cpu_to_be64(mmr->pas[i]); + +out: + xsc_ib_dbg(dev, "iova=%llx, pas=%llx, req.page_mode=%u, req.va_base=%llx, req.pas=%llx, req.len=%d, req.pa_num=%d\n", + ibmr->iova, + mmr->pas[0], + in->req.page_mode, + be64_to_cpu(in->req.va_base), + be64_to_cpu(in->req.pas[0]), + be32_to_cpu(in->req.len), + be32_to_cpu(in->req.pa_num)); + + err = xsc_core_register_mr(dev->xdev, &mmr->mmr, in, sizeof(*in)); + + kfree(in); + return err; +} + +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + struct xsc_core_mr mr; + int err = 0; + + if (!wr) + return -1; + mr.key = wr->ex.invalidate_rkey; + err = xsc_core_dereg_mr(dev->xdev, &mr); + return err; +} + +void xsc_reg_local_dma_mr(struct xsc_core_device *dev) +{ + struct xsc_register_mr_mbox_in in; + int err = 0; + + in.req.pdn = 0; + in.req.pa_num = 0; + in.req.len = 0; + in.req.mkey = cpu_to_be32(0xFF); + in.req.acc = XSC_PERM_LOCAL_WRITE | XSC_PERM_LOCAL_READ; + in.req.page_mode = 0; + in.req.map_en = !(XSC_MPT_MAP_EN); + in.req.va_base = 0; + + err = xsc_core_register_mr(dev, NULL, &in, sizeof(in)); + if (err) + xsc_core_err(dev, "\n"); +} diff --git a/drivers/infiniband/hw/xsc/peer_mem.c b/drivers/infiniband/hw/xsc/peer_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..eba572973b397dbd5ebe1ba26e126b4b9b7ab362 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "ib_peer_mem.h" +#include +#include "ib_umem_ex.h" + +static DEFINE_MUTEX(peer_memory_mutex); +static LIST_HEAD(peer_memory_list); + +static void complete_peer(struct kref *kref); + +/* Caller should be holding the peer client lock, ib_peer_client->lock */ +static struct core_ticket *ib_peer_search_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) + return core_ticket; + } + + return NULL; +} + +static int ib_invalidate_peer_memory(void *reg_handle, u64 core_context) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + struct invalidation_ctx *invalidation_ctx; + struct core_ticket *core_ticket; + int need_unlock = 1; + + mutex_lock(&ib_peer_client->lock); + ib_peer_client->stats.num_free_callbacks += 1; + core_ticket = ib_peer_search_context(ib_peer_client, core_context); + if (!core_ticket) + goto out; + + invalidation_ctx = (struct invalidation_ctx *)core_ticket->context; + /* If context is not ready yet, mark it to be invalidated */ + if (!invalidation_ctx->func) { + invalidation_ctx->peer_invalidated = 1; + goto out; + } + invalidation_ctx->func(invalidation_ctx->cookie, + invalidation_ctx->umem_ex, 0, 0); + if (invalidation_ctx->inflight_invalidation) { + /* init the completion to wait on before letting other thread to run */ + init_completion(&invalidation_ctx->comp); + mutex_unlock(&ib_peer_client->lock); + need_unlock = 0; + wait_for_completion(&invalidation_ctx->comp); + } + + kfree(invalidation_ctx); +out: + if (need_unlock) + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +static int ib_peer_insert_context(struct ib_peer_memory_client *ib_peer_client, + void *context, + u64 *context_ticket) +{ + struct core_ticket *core_ticket = kzalloc(sizeof(*core_ticket), GFP_KERNEL); + + if (!core_ticket) + return -ENOMEM; + + mutex_lock(&ib_peer_client->lock); + core_ticket->key = ib_peer_client->last_ticket++; + core_ticket->context = context; + list_add_tail(&core_ticket->ticket_list, + &ib_peer_client->core_ticket_list); + *context_ticket = core_ticket->key; + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +/* + * Caller should be holding the peer client lock, specifically, + * the caller should hold ib_peer_client->lock + */ +static int ib_peer_remove_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) { + list_del(&core_ticket->ticket_list); + kfree(core_ticket); + return 0; + } + } + + return 1; +} + +/* + * ib_peer_create_invalidation_ctx - creates invalidation context for a given umem + * @ib_peer_mem: peer client to be used + * @umem: umem struct belongs to that context + * @invalidation_ctx: output context + */ +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, + struct invalidation_ctx **invalidation_ctx) +{ + int ret; + struct invalidation_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = ib_peer_insert_context(ib_peer_mem, ctx, + &ctx->context_ticket); + if (ret) { + kfree(ctx); + return ret; + } + + ctx->umem_ex = umem_ex; + umem_ex->invalidation_ctx = ctx; + *invalidation_ctx = ctx; + + return 0; +} + +/** + * ** ib_peer_destroy_invalidation_ctx - destroy a given invalidation context + * ** @ib_peer_mem: peer client to be used + * ** @invalidation_ctx: context to be invalidated + * **/ +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx) +{ + int peer_callback; + int inflight_invalidation; + + /* If we are under peer callback lock was already taken.*/ + if (!invalidation_ctx->peer_callback) + mutex_lock(&ib_peer_mem->lock); + ib_peer_remove_context(ib_peer_mem, invalidation_ctx->context_ticket); + /* make sure to check inflight flag after took the lock and remove from tree. + * in addition, from that point using local variables for peer_callback and + * inflight_invalidation as after the complete invalidation_ctx can't be accessed + * any more as it may be freed by the callback. + */ + peer_callback = invalidation_ctx->peer_callback; + inflight_invalidation = invalidation_ctx->inflight_invalidation; + if (inflight_invalidation) + complete(&invalidation_ctx->comp); + + /* On peer callback lock is handled externally */ + if (!peer_callback) + mutex_unlock(&ib_peer_mem->lock); + + /* in case under callback context or callback is pending + * let it free the invalidation context + */ + if (!peer_callback && !inflight_invalidation) + kfree(invalidation_ctx); +} + +static int ib_memory_peer_check_mandatory(const struct peer_memory_client + *peer_client) +{ +#define PEER_MEM_MANDATORY_FUNC(x) { offsetof(struct peer_memory_client, x), #x } + static const struct { + size_t offset; + char *name; + } mandatory_table[] = { + PEER_MEM_MANDATORY_FUNC(acquire), + PEER_MEM_MANDATORY_FUNC(get_pages), + PEER_MEM_MANDATORY_FUNC(put_pages), + PEER_MEM_MANDATORY_FUNC(get_page_size), + PEER_MEM_MANDATORY_FUNC(dma_map), + PEER_MEM_MANDATORY_FUNC(dma_unmap) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { + if (!*(void **)((void *)peer_client + mandatory_table[i].offset)) { + pr_err("Peer memory %s is missing mandatory function %s\n", + peer_client->name, mandatory_table[i].name); + return -EINVAL; + } + } + + return 0; +} + +static void complete_peer(struct kref *kref) +{ + struct ib_peer_memory_client *ib_peer_client = + container_of(kref, struct ib_peer_memory_client, ref); + + complete(&ib_peer_client->unload_comp); +} + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback) +{ + struct ib_peer_memory_client *ib_peer_client; + + if (ib_memory_peer_check_mandatory(peer_client)) + return NULL; + + ib_peer_client = kzalloc(sizeof(*ib_peer_client), GFP_KERNEL); + if (!ib_peer_client) + return NULL; + + INIT_LIST_HEAD(&ib_peer_client->core_ticket_list); + mutex_init(&ib_peer_client->lock); + init_completion(&ib_peer_client->unload_comp); + kref_init(&ib_peer_client->ref); + ib_peer_client->peer_mem = peer_client; + + /* Once peer supplied a non NULL callback it's an indication that + * invalidation support is required for any memory owning. + */ + if (invalidate_callback) { + *invalidate_callback = ib_invalidate_peer_memory; + ib_peer_client->invalidation_required = 1; + } + ib_peer_client->last_ticket = 1; + + mutex_lock(&peer_memory_mutex); + list_add_tail(&ib_peer_client->core_peer_list, &peer_memory_list); + + mutex_unlock(&peer_memory_mutex); + return ib_peer_client; +} +EXPORT_SYMBOL(ib_register_peer_memory_client); + +void ib_unregister_peer_memory_client(void *reg_handle) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + + mutex_lock(&peer_memory_mutex); + list_del(&ib_peer_client->core_peer_list); + mutex_unlock(&peer_memory_mutex); + + kref_put(&ib_peer_client->ref, complete_peer); + wait_for_completion(&ib_peer_client->unload_comp); + kfree(ib_peer_client); +} +EXPORT_SYMBOL(ib_unregister_peer_memory_client); + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context) +{ + struct ib_peer_memory_client *ib_peer_client = NULL; + + int ret = 0; + + mutex_lock(&peer_memory_mutex); + list_for_each_entry(ib_peer_client, &peer_memory_list, core_peer_list) { + /* In case peer requires invalidation it can't own + * memory which doesn't support it + */ + if ((ib_peer_client->invalidation_required && + (!(peer_mem_flags & IB_PEER_MEM_INVAL_SUPP)))) + continue; + ret = ib_peer_client->peer_mem->acquire(addr, size, NULL, NULL, + peer_client_context); + if (ret > 0) + goto found; + } + + ib_peer_client = NULL; + +found: + if (ib_peer_client) + kref_get(&ib_peer_client->ref); + + mutex_unlock(&peer_memory_mutex); + + return ib_peer_client; +} +EXPORT_SYMBOL(ib_get_peer_client); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context) +{ + if (ib_peer_client->peer_mem->release) + ib_peer_client->peer_mem->release(peer_client_context); + + kref_put(&ib_peer_client->ref, complete_peer); +} +EXPORT_SYMBOL(ib_put_peer_client); + +int ib_get_peer_private_data(struct ib_ucontext *context, u64 peer_id, + char *peer_name) +{ + pr_warn("predefine peer mem is not supported by now"); + return -1; +} +EXPORT_SYMBOL(ib_get_peer_private_data); + +void ib_put_peer_private_data(struct ib_ucontext *context) +{ + pr_warn("predefine peer mem is not supported by now"); +} +EXPORT_SYMBOL(ib_put_peer_private_data); diff --git a/drivers/infiniband/hw/xsc/peer_mem.h b/drivers/infiniband/hw/xsc/peer_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..7e3f803ac246315558efab858a2fca505ccb26b4 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(PEER_MEM_H) +#define PEER_MEM_H + +#include +#include +#include +#include +#include +#include + +#define IB_PEER_MEMORY_NAME_MAX 64 +#define IB_PEER_MEMORY_VER_MAX 16 +#define PEER_MEM_U64_CORE_CONTEXT + +/** + * struct peer_memory_client - registration information for peer client. + * @name: peer client name + * @version: peer client version + * @acquire: callback function to be used by IB core to detect whether a + * virtual address in under the responsibility of a specific peer client. + * @get_pages: callback function to be used by IB core asking the peer client to pin + * the physical pages of the given address range and returns that information. + * It equivalents to the kernel API of get_user_pages(), but targets peer memory. + * @dma_map: callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * @dma_unmap: callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * @put_pages: callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * @get_page_size: callback function to be used by IB core to query the peer client for + * the page size for the given allocation. + * @release: callback function to be used by IB core asking peer client to release all + * resources associated with previous acquire call. The call will be performed + * only for contexts that have been successfully acquired (i.e. acquire returned a + * non-zero value). + * Additionally, IB core guarentees that there will be no pages pinned through this + * context when the callback is called. + * + * The subsections in this description contain detailed description + * of the callback arguments and expected return values for the + * callbacks defined in this struct. + * + * acquire: + * + * Callback function to be used by IB core to detect + * whether a virtual address in under the responsibility + * of a specific peer client. + * + * addr [IN] - virtual address to be checked whether belongs to peer. + * + * size [IN] - size of memory area starting at addr. + * + * peer_mem_private_data [IN] - The contents of ib_ucontext-> peer_mem_private_data. + * This parameter allows usage of the peer-direct + * API in implementations where it is impossible + * to detect if the memory belongs to the device + * based upon the virtual address alone. In such + * cases, the peer device can create a special + * ib_ucontext, which will be associated with the + * relevant peer memory. + * + * peer_mem_name [IN] - The contents of ib_ucontext-> peer_mem_name. + * Used to identify the peer memory client that + * initialized the ib_ucontext. + * This parameter is normally used along with + * peer_mem_private_data. + * client_context [OUT] - peer opaque data which holds a peer context for + * the acquired address range, will be provided + * back to the peer memory in subsequent + * calls for that given memory. + * + * If peer takes responsibility on the given address range further calls for memory + * management will be directed to the callbacks of this peer client. + * + * Return - 1 in case peer client takes responsibility on that range otherwise 0. + * Any peer internal error should resulted in a zero answer, in case address + * range really belongs to the peer, no owner will be found and application + * will get an error + * from IB Core as expected. + * + * get_pages: + * + * Callback function to be used by IB core asking the + * peer client to pin the physical pages of the given + * address range and returns that information. It + * equivalents to the kernel API of get_user_pages(), but + * targets peer memory. + * + * addr [IN] - start virtual address of that given allocation. + * + * size [IN] - size of memory area starting at addr. + * + * write [IN] - indicates whether the pages will be written to by the caller. + * Same meaning as of kernel API get_user_pages, can be + * ignored if not relevant. + * + * force [IN] - indicates whether to force write access even if user + * mapping is read only. Same meaning as of kernel API + * get_user_pages, can be ignored if not relevant. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. + * The peer client should allocate a table big + * enough to store all of the required entries. This + * function should fill the table with physical addresses + * and sizes of the memory segments composing this + * memory mapping. + * The table allocation can be done using sg_alloc_table. + * Filling in the physical memory addresses and size can + * be done using sg_set_page. + * + * client_context [IN] - peer context for the given allocation, as received from + * the acquire call. + * + * core_context [IN] - IB core context. If the peer client wishes to + * invalidate any of the pages pinned through this API, + * it must provide this context as an argument to the + * invalidate callback. + * + * Return - 0 success, otherwise errno error code. + * + * dma_map: + * + * Callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * dmasync [IN] - flush in-flight DMA when the memory region is written. + * Same meaning as with host memory mapping, can be ignored if + * not relevant. + * + * nmap [OUT] - number of mapped/set entries. + * + * Return - 0 success, otherwise errno error code. + * + * dma_unmap: + * + * Callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * + * sg_head [IN] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * Return - 0 success, otherwise errno error code. + * + * put_pages: + * + * Callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * get_page_size: + * + * Callback function to be used by IB core to query the + * peer client for the page size for the given + * allocation. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * Return - Page size in bytes + * + * release: + * + * Callback function to be used by IB core asking peer + * client to release all resources associated with + * previous acquire call. The call will be performed only + * for contexts that have been successfully acquired + * (i.e. acquire returned a non-zero value). + * Additionally, IB core guarentees that there will be no + * pages pinned through this context when the callback is + * called. + * + * client_context [IN] - peer context for the given allocation. + * + **/ +struct peer_memory_client { + char name[IB_PEER_MEMORY_NAME_MAX]; + char version[IB_PEER_MEMORY_VER_MAX]; + int (*acquire)(unsigned long addr, size_t size, void *peer_mem_private_data, + char *peer_mem_name, void **client_context); + int (*get_pages)(unsigned long addr, + size_t size, int write, int force, + struct sg_table *sg_head, + void *client_context, u64 core_context); + int (*dma_map)(struct sg_table *sg_head, void *client_context, + struct device *dma_device, int dmasync, int *nmap); + int (*dma_unmap)(struct sg_table *sg_head, void *client_context, + struct device *dma_device); + void (*put_pages)(struct sg_table *sg_head, void *client_context); + unsigned long (*get_page_size)(void *client_context); + void (*release)(void *client_context); + void* (*get_context_private_data)(u64 peer_id); + void (*put_context_private_data)(void *context); +}; + +typedef int (*invalidate_peer_memory)(void *reg_handle, u64 core_context); + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback); +void ib_unregister_peer_memory_client(void *reg_handle); + +#endif diff --git a/drivers/infiniband/hw/xsc/private_dev.c b/drivers/infiniband/hw/xsc/private_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..29fe98fd6b0c838937b06c234a5d3afd1e91281c --- /dev/null +++ b/drivers/infiniband/hw/xsc/private_dev.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/res_obj.h" +#include "xsc_ib.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(9) + +static int xsc_priv_dev_open(struct inode *inode, struct file *file) +{ + struct xsc_priv_device *priv_dev = + container_of(inode->i_cdev, struct xsc_priv_device, cdev); + struct xsc_core_device *xdev = + container_of(priv_dev, struct xsc_core_device, priv_device); + struct xsc_bdf_file *bdf_file; + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + + bdf_file->xdev = xdev; + bdf_file->key = bdf_to_key(pci_domain_nr(xdev->pdev->bus), + xdev->pdev->bus->number, xdev->pdev->devfn); + bdf_file->restore_nic_fn = NULL; + + radix_tree_preload(GFP_KERNEL); + spin_lock(&priv_dev->bdf_lock); + radix_tree_insert(&priv_dev->bdf_tree, bdf_file->key, bdf_file); + spin_unlock(&priv_dev->bdf_lock); + radix_tree_preload_end(); + file->private_data = bdf_file; + + return 0; +} + +static int xsc_priv_dev_release(struct inode *inode, struct file *filp) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + + xsc_close_bdf_file(bdf_file); + + if (bdf_file->restore_nic_fn) { + xsc_set_user_mode(xdev, false); + bdf_file->restore_nic_fn(xdev); + } + + spin_lock(&xdev->priv_device.bdf_lock); + radix_tree_delete(&xdev->priv_device.bdf_tree, bdf_file->key); + spin_unlock(&xdev->priv_device.bdf_lock); + + kfree(bdf_file); + + return 0; +} + +static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + int in_size; + int err = 0; + u8 lfound = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + if (minfo->vir_addr && minfo->phy_addr) { + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num && + m_ent->mem_info.size == minfo->size) { + if (m_ent->mem_info.phy_addr == minfo->phy_addr && + m_ent->mem_info.vir_addr == minfo->vir_addr) { + lfound = 1; + list_del(&m_ent->list); + } else { + err = -ENOMEM; + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (lfound) { + dma_free_coherent(&xdev->pdev->dev, + minfo->size, + (void *)minfo->vir_addr, + minfo->phy_addr); + } + } else { + kvfree(in); + return -EFAULT; + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + u64 vaddr = 0; + u64 paddr = 0; + int in_size; + int err = 0; + u8 lfound = 0; + u8 needfree = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num) { + if (m_ent->mem_info.size == minfo->size) { + minfo->phy_addr = m_ent->mem_info.phy_addr; + minfo->vir_addr = m_ent->mem_info.vir_addr; + lfound = 1; + } else { + needfree = 1; + list_del(&m_ent->list); + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (needfree) { + dma_free_coherent(&xdev->pdev->dev, + m_ent->mem_info.size, + (void *)m_ent->mem_info.vir_addr, + m_ent->mem_info.phy_addr); + } + + if (!lfound) { + vaddr = (u64)dma_alloc_coherent(&xdev->pdev->dev, + minfo->size, + (dma_addr_t *)&paddr, + GFP_KERNEL); + if (vaddr) { + memset((void *)vaddr, 0, minfo->size); + minfo->phy_addr = paddr; + minfo->vir_addr = vaddr; + m_ent = kzalloc(sizeof(*m_ent), GFP_KERNEL); + if (!m_ent) { + kvfree(in); + return -ENOMEM; + } + strscpy(m_ent->task_name, tname, sizeof(m_ent->task_name)); + m_ent->mem_info.mem_num = minfo->mem_num; + m_ent->mem_info.size = minfo->size; + m_ent->mem_info.phy_addr = paddr; + m_ent->mem_info.vir_addr = vaddr; + spin_lock_irq(&priv_dev->mem_lock); + list_add(&m_ent->list, &priv_dev->mem_list); + spin_unlock_irq(&priv_dev->mem_lock); + } else { + kvfree(in); + return -ENOMEM; + } + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_priv_dev_ioctl_mem(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_priv_device *priv_dev = &xdev->priv_device; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_IOCTL_MEM_ALLOC: + return xsc_ioctl_mem_alloc(priv_dev, xdev, user_hdr, &hdr); + case XSC_IOCTL_MEM_FREE: + return xsc_ioctl_mem_free(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static int xsc_priv_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_err(xdev, "xsc_ioctl_qp_range: resp->num == 0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) + return -ENOMEM; + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_priv_dev_ioctl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_PCP: + xsc_core_dbg(xdev, "getting global pcp\n"); + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_priv_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(xdev, "xsc_priv_dev exec_ioctl.ret=%u\n", ret); + + return ret; +} + +static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CONTEXT: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user((void *)arg, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, + void *in, unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_priv_device *priv_dev = &bdf_file->xdev->priv_device; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(bdf_file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static long xsc_priv_dev_ioctl_cmdq_raw(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + u16 out_len; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, out_len); + xsc_pci_ctrl_cmdq_handle_res_obj(bdf_file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_ioctl_user_mode(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *dev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_user_mode_attr *attr; + u8 *buf; + int err = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + buf = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = copy_from_user(buf, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(dev, "failed to copy ioctl user data.\n"); + kvfree(buf); + return -EFAULT; + } + + switch (hdr.attr.opcode) { + case XSC_IOCTL_OPCODE_ENABLE_USER_MODE: + attr = (struct xsc_ioctl_user_mode_attr *)buf; + xsc_set_user_mode(dev, (attr->enable ? true : false)); + if (attr->enable) + bdf_file->restore_nic_fn = xsc_eth_restore_nic_hca; + else + bdf_file->restore_nic_fn = NULL; + + break; + default: + err = -EOPNOTSUPP; + break; + } + + kvfree(buf); + return err; +} + +static long xsc_priv_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_priv_dev_ioctl_cmdq(filp, arg); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = xsc_priv_dev_ioctl_getinfo(filp, arg); + break; + case XSC_IOCTL_MEM: + err = xsc_priv_dev_ioctl_mem(filp, arg); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_priv_dev_ioctl_cmdq_raw(filp, arg); + break; + case XSC_IOCTL_USER_MODE: + err = xsc_ioctl_user_mode(filp, arg); + break; + default: + err = -EFAULT; + break; + } + return err; +} + +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .open = xsc_priv_dev_open, + .unlocked_ioctl = xsc_priv_dev_ioctl, + .compat_ioctl = xsc_priv_dev_ioctl, + .release = xsc_priv_dev_release, +}; + +#define XSC_MAX_CDEV_NUM 1024 +static dev_t g_priv_cdev_no; +static int g_priv_cdev_cnt; +static char *g_priv_class_name = "xscale"; +static struct class *g_priv_class; +DECLARE_BITMAP(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + int dev_id = 0; + struct xsc_priv_device *priv_dev = &dev->priv_device; + + if (g_priv_cdev_cnt >= XSC_MAX_CDEV_NUM) { + xsc_core_err(dev, "too many xscale cdevice\n"); + priv_dev->devno = U32_MAX; + return -EBUSY; + } + + sprintf(priv_dev->device_name, "%s", ib_dev->name); + + xsc_core_dbg(dev, "device_name %s\n", priv_dev->device_name); + + cdev_init(&priv_dev->cdev, &dev_fops); + priv_dev->cdev.owner = THIS_MODULE; + dev_id = find_first_zero_bit(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + priv_dev->devno = g_priv_cdev_no + dev_id; + + ret = cdev_add(&priv_dev->cdev, priv_dev->devno, 1); + if (ret) { + xsc_core_err(dev, "%s cdev_add error ret:%d major:%d\n", + priv_dev->device_name, ret, MAJOR(priv_dev->devno)); + return ret; + } + + device_create(g_priv_class, NULL, priv_dev->devno, + NULL, "%s!%s", g_priv_class_name, priv_dev->device_name); + g_priv_cdev_cnt++; + set_bit(dev_id, g_bitmap_cdev_id); + + INIT_LIST_HEAD(&priv_dev->mem_list); + spin_lock_init(&priv_dev->mem_lock); + + INIT_RADIX_TREE(&priv_dev->bdf_tree, GFP_ATOMIC); + spin_lock_init(&priv_dev->bdf_lock); + + xsc_core_dbg(dev, "init success\n"); + + return 0; +} + +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_priv_device *priv_dev; + struct cdev *char_dev; + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + int dev_id = 0; + + if (!dev || !ib_dev) { + pr_err("[%s:%d] device is null pointer\n", __func__, __LINE__); + return; + } + + priv_dev = &dev->priv_device; + if (priv_dev->devno == U32_MAX) + return; + + char_dev = &priv_dev->cdev; + + dev_id = MINOR(priv_dev->devno); + spin_lock(&priv_dev->bdf_lock); + radix_tree_for_each_slot(slot, &priv_dev->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&priv_dev->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&priv_dev->bdf_lock); + device_destroy(g_priv_class, priv_dev->devno); + cdev_del(&priv_dev->cdev); + + clear_bit(dev_id, g_bitmap_cdev_id); + g_priv_cdev_cnt--; + xsc_core_dbg(dev, "fini success\n"); +} + +int xsc_priv_alloc_chrdev_region(void) +{ + int ret = 0; + char *device_name = "xscale"; + + ret = alloc_chrdev_region(&g_priv_cdev_no, 0, XSC_MAX_CDEV_NUM, device_name); + if (ret) { + pr_err("%s cant't get major %d\n", device_name, MAJOR(g_priv_cdev_no)); + return ret; + } + g_priv_class = class_create(g_priv_class_name); + g_priv_cdev_cnt = 0; + + return 0; +} + +void xsc_priv_unregister_chrdev_region(void) +{ + class_destroy(g_priv_class); + unregister_chrdev_region(g_priv_cdev_no, XSC_MAX_CDEV_NUM); +} diff --git a/drivers/infiniband/hw/xsc/qp.c b/drivers/infiniband/hw/xsc/qp.c new file mode 100644 index 0000000000000000000000000000000000000000..6df90c841af4c9e95b68ce909538069915399482 --- /dev/null +++ b/drivers/infiniband/hw/xsc/qp.c @@ -0,0 +1,1939 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include +#include +#include + +/* not supported currently */ +static int wq_signature; + +#define MAD_QUEUE_DEPTH 128 + +enum { + XSC_IB_CACHE_LINE_SIZE = 64, +}; + +#define MAC_INVALID 0xff + +#define LAG_PORT_NUM_MASK_EN 0x80000000 +#define LAG_PORT_NUM_MASK_EN_OFFSET 31 +#define LAG_PORT_NUM_MASK 0x30000 +#define LAG_PORT_NUM_OFFSET 16 + +#define UDP_SPORT_MASK_EN 0x40000000 +#define UDP_SPORT_MASK_EN_OFFSET 30 +#define UDP_SPORT_MASK 0xffff +#define UDP_SPORT_OFFSET 0 + +static const u32 xsc_ib_opcode[] = { + [IB_WR_SEND] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_IMM] = XSC_MSG_OPCODE_SEND, + [IB_WR_RDMA_WRITE] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_READ] = XSC_MSG_OPCODE_RDMA_READ, + [IB_WR_LOCAL_INV] = XSC_MSG_OPCODE_SEND, + [IB_WR_REG_MR] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_INV] = XSC_MSG_OPCODE_SEND, +}; + +static int is_qp0(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_SMI; +} + +static int is_qp1(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_GSI; +} + +static int is_sqp(enum ib_qp_type qp_type) +{ + return is_qp0(qp_type) || is_qp1(qp_type); +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); +} + +static int iboe_tos_to_sl(struct net_device *ndev, int tos) +{ + int prio; + struct net_device *dev; + + prio = rt_tos2priority(tos); + dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + if (dev->num_tc) + return netdev_get_prio_tc_map(dev, prio); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (is_vlan_dev(ndev)) + return (vlan_dev_get_egress_qos_mask(ndev, prio) & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#endif + return 0; +} + +static inline void set_remote_addr_seg(struct xsc_wqe_data_seg *remote_seg, + u32 msg_len, u64 remote_addr, u32 rkey) +{ + remote_seg->in_line = 0; + WR_LE_32(remote_seg->seg_len, msg_len); + WR_LE_32(remote_seg->mkey, rkey); + WR_LE_64(remote_seg->va, remote_addr); +} + +static void set_local_data_seg(struct xsc_wqe_data_seg *data_seg, struct ib_sge *sg) +{ + data_seg->in_line = 0; + WR_LE_32(data_seg->seg_len, sg->length); + WR_LE_32(data_seg->mkey, sg->lkey); + WR_LE_64(data_seg->va, sg->addr); +} + +static int set_data_inl_seg(struct xsc_ib_qp *qp, const struct ib_send_wr *wr, void *ctrl) +{ + struct xsc_wqe_data_seg *data_seg; + unsigned int seg_index; + void *addr; + int len; + int i; + + for (i = 0, seg_index = 1; i < wr->num_sge; ++i, ++seg_index) { + if (likely(wr->sg_list[i].length)) { + addr = (void *)wr->sg_list[i].addr; + len = wr->sg_list[i].length; + + if (unlikely(len > qp->max_inline_data)) + return -ENOMEM; + + data_seg = get_seg_wqe(ctrl, seg_index); + data_seg->in_line = 1; + data_seg->len = len; + memcpy(data_seg->in_line_data, addr, len); + } + } + + return 0; +} + +static __be32 send_ieth(const struct ib_send_wr *wr) +{ + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + return wr->ex.imm_data; + default: + return 0; + } +} + +static void xsc_ib_qp_event(struct xsc_core_qp *qp, int type) +{ + struct ib_qp *ibqp = &to_xibqp(qp)->ibqp; + struct ib_event event; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("xsc_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int set_rq_size(struct xsc_ib_dev *dev, struct ib_qp_cap *cap, + int has_rq, struct xsc_ib_qp *qp, struct xsc_ib_create_qp *ucmd) +{ + u32 wqe_cnt = roundup_pow_of_two(cap->max_recv_wr); + + /* Sanity check RQ size before proceeding */ + if (wqe_cnt > dev->xdev->caps.max_wqes) { + xsc_ib_warn(dev, "max_recv_wr:%d exceed max rq depth\n", cap->max_recv_wr); + wqe_cnt = dev->xdev->caps.max_wqes; + } + + if (!has_rq) { + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; + qp->rq.wqe_shift = ucmd->rq_wqe_shift; + qp->rq.max_gs = 1; + qp->rq.max_post = qp->rq.wqe_cnt; + } else { + qp->rq.wqe_cnt = wqe_cnt; + qp->rq.wqe_shift = dev->xdev->caps.recv_wqe_shift; + qp->rq.max_gs = dev->xdev->caps.recv_ds_num; + qp->rq.max_post = qp->rq.wqe_cnt; + } + } + + return 0; +} + +static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, + struct xsc_ib_qp *qp) +{ + int wqe_size; + int wq_size; + + if (!attr->cap.max_send_wr) { + xsc_ib_err(dev, "invalid max_send_wr:%d\n", attr->cap.max_send_wr); + return -1; + } + + wqe_size = 1 << dev->xdev->caps.send_wqe_shift; + qp->max_inline_data = (dev->xdev->caps.send_ds_num - 2) * sizeof(struct xsc_wqe_data_seg); + attr->cap.max_inline_data = qp->max_inline_data; + + qp->sq.wqe_cnt = roundup_pow_of_two(attr->cap.max_send_wr); + qp->sq.wqe_cnt = min_t(int, qp->sq.wqe_cnt, (int)dev->xdev->caps.max_wqes); + qp->sq.ds_cnt = qp->sq.wqe_cnt << (dev->xdev->caps.send_wqe_shift - XSC_BASE_WQE_SHIFT); + wq_size = qp->sq.wqe_cnt * wqe_size; + qp->sq.wqe_shift = ilog2(wqe_size); + qp->sq.max_gs = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - XSC_RADDR_SEG_NUM; + qp->sq.max_post = qp->sq.wqe_cnt; + + return wq_size; +} + +static int qp_has_rq(struct ib_qp_init_attr *attr) +{ + if (attr->qp_type == IB_QPT_XRC_INI || + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + !attr->cap.max_recv_wr) + return 0; + + return 1; +} + +static enum xsc_qp_state to_xsc_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return XSC_QP_STATE_RST; + case IB_QPS_INIT: return XSC_QP_STATE_INIT; + case IB_QPS_RTR: return XSC_QP_STATE_RTR; + case IB_QPS_RTS: return XSC_QP_STATE_RTS; + case IB_QPS_SQD: return XSC_QP_STATE_SQD; + case IB_QPS_SQE: return XSC_QP_STATE_SQER; + case IB_QPS_ERR: return XSC_QP_STATE_ERR; + default: return -1; + } +} + +static char *qp_state_to_str(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return "RST"; + case IB_QPS_INIT: return "INIT"; + case IB_QPS_RTR: return "RTR"; + case IB_QPS_RTS: return "RTS"; + case IB_QPS_SQD: return "SQD"; + case IB_QPS_SQE: return "SQE"; + case IB_QPS_ERR: return "ERR"; + default: return "UNKNOWN"; + } +} + +static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct xsc_ib_qp *qp, struct ib_udata *udata, + struct xsc_create_qp_mbox_in **in, + struct xsc_ib_create_qp_resp *resp, int *inlen) +{ + struct xsc_ib_ucontext *context; + struct xsc_ib_create_qp ucmd; + int page_shift; + int npages; + u32 offset; + int ncont; + int err; + int hw_npages; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) { + xsc_ib_err(dev, "failed to copy from udata, err=%d\n", err); + return err; + } + xsc_ib_info(dev, "buf_addr:0x%lx db_addr:0x%lx sq cnt:%u, rq cnt:%u, rq shift:%u\n", + (uintptr_t)ucmd.buf_addr, (uintptr_t)ucmd.db_addr, + ucmd.sq_wqe_count, ucmd.rq_wqe_count, ucmd.rq_wqe_shift); + + context = to_xucontext(pd->uobject->context); + + qp->sq.ds_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_shift = XSC_BASE_WQE_SHIFT; + qp->rq.ds_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_shift = XSC_BASE_WQE_SHIFT; + + qp->buf_size = (qp->sq.wqe_cnt << qp->sq.wqe_shift) + (qp->rq.wqe_cnt << qp->rq.wqe_shift); + qp->umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, qp->buf_size, 0); + if (IS_ERR(qp->umem)) { + xsc_ib_err(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + + xsc_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + page_shift = PAGE_SHIFT; + ncont = npages; + } + + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + err = xsc_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + xsc_ib_err(dev, "bad offset:%d\n", offset); + goto err_umem; + } + xsc_ib_info(dev, "npage:%d, page_shift:%d, ncont:%d, offset:%d, hw_npages %d\n", + npages, page_shift, ncont, offset, hw_npages); + + *inlen = sizeof(**in) + sizeof(*((*in)->req.pas)) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, qp->umem, page_shift, (*in)->req.pas, hw_npages, true); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + err = ib_copy_to_udata(udata, resp, sizeof(*resp)); + if (err) { + xsc_ib_err(dev, "failed to copy to udata, err=%d\n", err); + goto err_umem; + } + qp->create_type = XSC_QP_USER; + + return 0; + +err_umem: + ib_umem_release(qp->umem); + +err_uuar: + return err; +} + +static void destroy_qp_user(struct ib_pd *pd, struct xsc_ib_qp *qp) +{ + struct xsc_ib_ucontext *context; + + context = to_xucontext(pd->uobject->context); + ib_umem_release(qp->umem); +} + +#define MAX_QP1_SQ_HDR_SIZE_V2 512 +#define MAX_QP1_SQ_HDR_SIZE 86 + /* Ethernet header = 14 */ + /* ib_grh = 40 (provided by MAD) */ + /* ib_bth + ib_deth = 20 */ + /* MAD = 256 (provided by MAD) */ + /* iCRC = 4 */ +#define MAX_QP1_RQ_HDR_SIZE_V2 512 + +static int create_kernel_qp(struct xsc_ib_dev *dev, + struct ib_qp_init_attr *init_attr, + struct xsc_ib_qp *qp, + struct xsc_create_qp_mbox_in **in, int *inlen) +{ + int err; + int sq_size; + int hw_npages; + + sq_size = calc_sq_size(dev, init_attr, qp); + if (sq_size < 0) { + err = -ENOMEM; + xsc_ib_err(dev, "err %d\n", err); + return err; + } + + qp->rq.ds_cnt = qp->rq.wqe_cnt << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + qp->rq.offset = 0; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; + qp->buf_size = qp->sq.offset + sq_size; + qp->send_psn = 0; + + err = xsc_buf_alloc(dev->xdev, qp->buf_size, PAGE_SIZE, &qp->buf); + if (err) { + xsc_ib_err(dev, "failed to alloc qp buffer,err=%d\n", err); + return err; + } + + qp->sq.qend = qp->buf.direct.buf + qp->sq.offset + sq_size; + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + *inlen = sizeof(**in) + sizeof(*(*in)->req.pas) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + + xsc_fill_page_array(&qp->buf, (*in)->req.pas, hw_npages); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wqe_head), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || + !qp->sq.w_list || !qp->sq.wqe_head) { + err = -ENOMEM; + goto err_wrid; + } + qp->create_type = XSC_QP_KERNEL; + + if (init_attr->qp_type == IB_QPT_GSI) { + qp->sq.mad_index = 0; + qp->sq.mad_queue_depth = MAD_QUEUE_DEPTH; + qp->sq.hdr_size = MAX_QP1_SQ_HDR_SIZE_V2 * MAD_QUEUE_DEPTH; + qp->sq.hdr_buf = dma_alloc_coherent(dev->ib_dev.dma_device, + qp->sq.hdr_size, + &qp->sq.hdr_dma, + GFP_KERNEL); + if (!qp->sq.hdr_buf) { + err = -ENOMEM; + xsc_ib_err(dev, "Failed to create sq_hdr_buf"); + goto err_wrid; + } + } + + return 0; + +err_wrid: + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + +err_buf: + xsc_buf_free(dev->xdev, &qp->buf); + return err; +} + +static void destroy_qp_kernel(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + if (qp->sq.hdr_buf) + dma_free_coherent(dev->ib_dev.dma_device, qp->sq.hdr_size, + qp->sq.hdr_buf, qp->sq.hdr_dma); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + xsc_buf_free(dev->xdev, &qp->buf); +} + +static u8 ib_to_xsc_qp_type(enum ib_qp_type qp_type, __u32 flags) +{ + if (qp_type == IB_QPT_RC) { + return XSC_QUEUE_TYPE_RDMA_RC; + } else if ((qp_type == IB_QPT_GSI) || (qp_type == IB_QPT_SMI)) { + return XSC_QUEUE_TYPE_RDMA_MAD; + } else if (qp_type == IB_QPT_RAW_PACKET) { + if (flags & XSC_QP_FLAG_RAWPACKET_TSO) + return XSC_QUEUE_TYPE_RAW_TSO; + else if (flags & XSC_QP_FLAG_RAWPACKET_TX) + return XSC_QUEUE_TYPE_RAW_TX; + else + return XSC_QUEUE_TYPE_RAW; + } else { + return XSC_QUEUE_TYPE_INVALID; + } +} + +static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct xsc_ib_qp *qp) +{ + struct xsc_ib_resources *devr = &dev->devr; + struct xsc_ib_create_qp_resp resp; + struct xsc_create_qp_mbox_in *in = NULL; + struct xsc_ib_create_qp ucmd; + int inlen = sizeof(*in); + int err; + char buf[256]; + char *ptr = buf; + int ret = 0; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + spin_lock_init(&qp->lock); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = XSC_WQE_CTRL_CQ_UPDATE; + + if (pd && pd->uobject) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + xsc_ib_err(dev, "failed to copy from udata\n"); + return -EFAULT; + } + + qp->wq_sig = !!(ucmd.flags & XSC_QP_FLAG_SIGNATURE); + qp->scat_cqe = !!(ucmd.flags & XSC_QP_FLAG_SCATTER_CQE); + } else { + qp->wq_sig = !!wq_signature; + } + + qp->has_rq = qp_has_rq(init_attr); + + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, + qp, (pd && pd->uobject) ? &ucmd : NULL); + if (err) { + xsc_ib_err(dev, "failed to set rq size %d\n", err); + return err; + } + + if (pd) { + if (pd->uobject) { + err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); + if (err) + xsc_ib_err(dev, "failed to create user qp, err = %d\n", err); + } else { + err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); + if (err) + xsc_ib_err(dev, "failed to create kernel qp, err = %d\n", err); + else + qp->pa_lkey = to_mpd(pd)->pa_lkey; + } + + if (err) + return err; + } else { + in = xsc_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + qp->create_type = XSC_QP_EMPTY; + } + + if (is_sqp(init_attr->qp_type)) + qp->port = init_attr->port_num; + + in->req.qp_type = init_attr->qp_type; + if (is_qp1(init_attr->qp_type)) + in->req.input_qpn = cpu_to_be16(1); + + if (init_attr->qp_type != XSC_IB_QPT_REG_UMR) + in->req.pdn = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + + if (qp->rq.ds_cnt) + in->req.log_rq_sz = ilog2(qp->rq.ds_cnt); + + if (qp->sq.ds_cnt) + in->req.log_sq_sz = ilog2(qp->sq.ds_cnt); + else + in->req.log_sq_sz = ilog2(0x80); + + if (init_attr->send_cq) { + qp->send_cq = init_attr->send_cq; + in->req.cqn_send = to_xcq(init_attr->send_cq)->xcq.cqn; + in->req.cqn_send = cpu_to_be16(in->req.cqn_send); +#ifndef MSIX_SUPPORT + init_attr->send_cq->comp_handler(init_attr->send_cq, + init_attr->send_cq->cq_context); +#endif + } + + if (init_attr->recv_cq) { + qp->recv_cq = init_attr->recv_cq; + in->req.cqn_recv = to_xcq(init_attr->recv_cq)->xcq.cqn; + in->req.cqn_recv = cpu_to_be16(in->req.cqn_recv); + } + + in->req.qp_type = ib_to_xsc_qp_type(init_attr->qp_type, ucmd.flags); + + if (in->req.qp_type == XSC_QUEUE_TYPE_INVALID) { + xsc_ib_err(dev, "invalid qp type:%d\n", init_attr->qp_type); + goto err_create; + } + in->req.glb_funcid = cpu_to_be16(dev->xdev->glb_func_id); + + qp->xqp.qp_type_internal = in->req.qp_type; + + err = xsc_core_create_qp(dev->xdev, &qp->xqp, in, inlen); + if (err) { + xsc_ib_err(dev, "create qp failed, err=%d\n", err); + goto err_create; + } + + qp->doorbell_qpn = qp->xqp.qpn; + + qp->xqp.event = xsc_ib_qp_event; + qp->xqp.qp_type = init_attr->qp_type; + ret += snprintf(ptr + ret, 256 - ret, "pdn=%d,", to_mpd(pd ? pd : devr->p0)->pdn); + ret += snprintf(ptr + ret, 256 - ret, "log_rq_sz=%d,", in->req.log_rq_sz); + ret += snprintf(ptr + ret, 256 - ret, "log_sq_sz=%d,", in->req.log_sq_sz); + ret += snprintf(ptr + ret, 256 - ret, "scqn=%d,", to_xcq(init_attr->send_cq)->xcq.cqn); + ret += snprintf(ptr + ret, 256 - ret, "rcqn=%d", to_xcq(init_attr->recv_cq)->xcq.cqn); + + xsc_ib_info(dev, "succeeded to create qp:%d, %s\n", qp->xqp.qpn, buf); + + xsc_vfree(in); + + return 0; + +err_create: + if (qp->create_type == XSC_QP_USER) + destroy_qp_user(pd, qp); + else if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + + xsc_vfree(in); + return err; +} + +static void xsc_ib_lock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __acquires(&send_cq->lock) __acquires(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, + SINGLE_DEPTH_NESTING); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, + SINGLE_DEPTH_NESTING); + } + } else { + spin_lock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_lock_irq(&recv_cq->lock); + } +} + +static void xsc_ib_unlock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __releases(&send_cq->lock) __releases(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + __release(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } + } else { + spin_unlock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_unlock_irq(&recv_cq->lock); + } +} + +static struct xsc_ib_pd *get_pd(struct xsc_ib_qp *qp) +{ + return to_mpd(qp->ibqp.pd); +} + +static void get_cqs(struct xsc_ib_qp *qp, + struct xsc_ib_cq **send_cq, struct xsc_ib_cq **recv_cq) +{ + switch (qp->ibqp.qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = NULL; + *recv_cq = NULL; + break; + case XSC_IB_QPT_REG_UMR: + case IB_QPT_XRC_INI: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = NULL; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = to_xcq(qp->ibqp.recv_cq); + break; + + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + *send_cq = NULL; + *recv_cq = NULL; + break; + } +} + +static void destroy_qp_common(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_modify_qp_mbox_in *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return; + + if (qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TSO || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TX || + qp->state != IB_QPS_RESET) + if (xsc_core_qp_modify(dev->xdev, to_xsc_state(qp->state), + XSC_QP_STATE_RST, in, sizeof(*in), &qp->xqp)) + xsc_ib_warn(dev, "modify QP %06x to RESET failed\n", qp->xqp.qpn); + + get_cqs(qp, &send_cq, &recv_cq); + + if (qp->create_type == XSC_QP_KERNEL) { + xsc_ib_lock_cqs(send_cq, recv_cq); + __xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + __xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + xsc_ib_unlock_cqs(send_cq, recv_cq); + } + + err = xsc_core_destroy_qp(dev->xdev, &qp->xqp); + if (err) + xsc_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->xqp.qpn); + kfree(in); + + if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + else if (qp->create_type == XSC_QP_USER) + destroy_qp_user(&get_pd(qp)->ibpd, qp); +} + +static const char *ib_qp_type_str(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_SMI: + return "IB_QPT_SMI"; + case IB_QPT_GSI: + return "IB_QPT_GSI"; + case IB_QPT_RC: + return "IB_QPT_RC"; + case IB_QPT_UC: + return "IB_QPT_UC"; + case IB_QPT_UD: + return "IB_QPT_UD"; + case IB_QPT_RAW_IPV6: + return "IB_QPT_RAW_IPV6"; + case IB_QPT_RAW_ETHERTYPE: + return "IB_QPT_RAW_ETHERTYPE"; + case IB_QPT_XRC_INI: + return "IB_QPT_XRC_INI"; + case IB_QPT_XRC_TGT: + return "IB_QPT_XRC_TGT"; + case IB_QPT_RAW_PACKET: + return "IB_QPT_RAW_PACKET"; + case XSC_IB_QPT_REG_UMR: + return "XSC_IB_QPT_REG_UMR"; + case IB_QPT_MAX: + default: + return "Invalid QP type"; + } +} + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev; + struct xsc_ib_qp *qp; + struct ib_pd *pd = ibqp->pd; + int err; + + if (pd) { + dev = to_mdev(pd->device); + } else { + /* being cautious here */ + if (init_attr->qp_type != IB_QPT_XRC_TGT && + init_attr->qp_type != XSC_IB_QPT_REG_UMR) { + pr_warn("%s: no PD for transport %s\n", __func__, + ib_qp_type_str(init_attr->qp_type)); + return RET_VALUE(-EINVAL); + } + dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); + } + + if (init_attr->qp_type != IB_QPT_RAW_PACKET) { + if (!is_support_rdma(dev->xdev) || + (is_qp1(init_attr->qp_type) && !is_support_rdma_cm(dev->xdev))) { + return RET_VALUE(-EPROTONOSUPPORT); + } + } + + qp = to_xqp(ibqp); + + qp->xqp.mac_id = MAC_INVALID; + + switch (init_attr->qp_type) { + case IB_QPT_RC: + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RAW_PACKET: + err = create_qp_common(dev, pd, init_attr, udata, qp); + if (err) { + xsc_ib_err(dev, "create_qp_common failed\n"); + return RET_VALUE(err); + } + + if (is_qp0(init_attr->qp_type)) { + qp->ibqp.qp_num = 0; + } else if (is_qp1(init_attr->qp_type)) { + qp->ibqp.qp_num = 1; + dev->xdev->gsi_qpn = qp->xqp.qpn; + } else { + qp->ibqp.qp_num = qp->xqp.qpn; + } + + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + case IB_QPT_MAX: + default: + xsc_ib_err(dev, "unsupported qp type %d\n", + init_attr->qp_type); + /* Don't support raw QPs */ + return RET_VALUE(-EINVAL); + } + + return 0; +} + +xsc_ib_destroy_qp_def() +{ + struct xsc_ib_dev *dev = to_mdev(qp->device); + struct xsc_ib_qp *xqp = to_xqp(qp); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_lag *lag; + + destroy_qp_common(dev, xqp); + + xsc_board_lag_lock(xdev); + if (xqp->xqp.mac_id != MAC_INVALID && xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + atomic_dec(&lag->qp_cnt[xqp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + + return 0; +} + +static inline u16 xsc_calc_udp_sport(u32 lqpn, u32 rqpn) +{ + unsigned char *p; + u8 ports[2]; + u16 sport; + u64 tqpn; + + tqpn = ((u64)(lqpn & 0xffffff)) * ((u64)(rqpn & 0xffffff)); + p = (unsigned char *)&tqpn; + ports[0] = p[0] ^ p[2] ^ p[4]; + ports[1] = p[1] ^ p[3] ^ p[5]; + sport = *((u16 *)ports) | 0xC000; + + return sport; +} + +static inline void xsc_path_set_udp_sport(struct xsc_qp_path *path, + const struct rdma_ah_attr *ah, + u32 lqpn, u32 rqpn) +{ + if ((ah->grh.flow_label & UDP_SPORT_MASK) != 0) { + if ((ah->grh.flow_label & UDP_SPORT_MASK_EN) == 0) + path->sport = cpu_to_be16(xsc_flow_label_to_udp_sport(ah->grh.flow_label)); + else + path->sport = cpu_to_be16((ah->grh.flow_label & UDP_SPORT_MASK) >> + UDP_SPORT_OFFSET); + } else { + path->sport = cpu_to_be16(xsc_calc_udp_sport(lqpn, rqpn)); + } +} + +static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, + struct xsc_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr, struct xsc_ib_qp *qp) +{ + struct ib_global_route *grh = rdma_ah_retrieve_grh((struct rdma_ah_attr *)ah); + union ib_gid *dgid = &grh->dgid; + const struct ib_gid_attr *sgid_attr = grh->sgid_attr; + union ib_gid *sgid = &((struct ib_gid_attr *)sgid_attr)->gid; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; + int force_pcp, force_dscp; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + + if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { + if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) + return -EINVAL; + + if (qp->ibqp.qp_type == IB_QPT_RC || + qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_XRC_INI || + qp->ibqp.qp_type == IB_QPT_XRC_TGT) + xsc_path_set_udp_sport(path, ah, qp->ibqp.qp_num, attr->dest_qp_num); + + if (sgid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) { + xsc_ib_err(dev, "gid type not ROCEv2\n"); + return -EINVAL; + } + + force_dscp = dev->force_dscp; + if (force_dscp == DSCP_PCP_UNSET) + path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; + else + path->ecn_dscp = force_dscp; + path->hop_limit = grh->hop_limit; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); + + if (sgid_addr._sockaddr.sa_family == AF_INET && + dgid_addr._sockaddr.sa_family == AF_INET) { + memcpy(path->sip, &sgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + memcpy(path->dip, &dgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + path->af_type = AF_INET; + ret += snprintf(ptr + ret, 256 - ret, "sip=%#x,", + be32_to_cpu(path->sip[0])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%#x,", + be32_to_cpu(path->dip[0])); + } else if (sgid_addr._sockaddr.sa_family == AF_INET6 && + dgid_addr._sockaddr.sa_family == AF_INET6) { + memcpy(path->sip, &sgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->sip)); + memcpy(path->dip, &dgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->dip)); + path->af_type = AF_INET6; + ret += snprintf(ptr + ret, 256 - ret, "sip=%08x%08x%08x%08x,", + be32_to_cpu(path->sip[0]), be32_to_cpu(path->sip[1]), + be32_to_cpu(path->sip[2]), be32_to_cpu(path->sip[3])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%08x%08x%08x%08x,", + be32_to_cpu(path->dip[0]), be32_to_cpu(path->dip[1]), + be32_to_cpu(path->dip[2]), be32_to_cpu(path->dip[3])); + } else { + return -EINVAL; + } + + ether_addr_copy(path->smac, dev->netdev->dev_addr); + + memcpy(path->dmac, ah->roce.dmac, sizeof(ah->roce.dmac)); + ret += snprintf(ptr + ret, 256 - ret, "smac=%02x%02x%02x%02x%02x%02x,", + path->smac[0], path->smac[1], path->smac[2], + path->smac[3], path->smac[4], path->smac[5]); + ret += snprintf(ptr + ret, 256 - ret, "dmac=%02x%02x%02x%02x%02x%02x", + path->dmac[0], path->dmac[1], path->dmac[2], + path->dmac[3], path->dmac[4], path->dmac[5]); + xsc_ib_info(dev, "ib path info:%s\n", buf); + + if (is_vlan_dev(sgid_attr->ndev)) { + path->vlan_valid = 1; + path->vlan_id = cpu_to_be16(vlan_dev_vlan_id(sgid_attr->ndev)); + + force_pcp = dev->force_pcp; + if (force_pcp == DSCP_PCP_UNSET) + path->dci_cfi_prio_sl = (ah->sl & 0x7); + else + path->dci_cfi_prio_sl = force_pcp; + } else { + path->vlan_valid = 0; + } + } + xsc_ib_info(dev, "path dscp %d pcp %d\n", path->ecn_dscp, path->dci_cfi_prio_sl); + return 0; +} + +static inline u8 __xsc_get_min_qp_cnt_mac(struct xsc_lag *lag) +{ + int array_size = lag->xsc_member_cnt; + int min = atomic_read(&lag->qp_cnt[0]); + u8 mac_index = 0, i; + + for (i = 0; i < array_size; i++) { + if (atomic_read(&lag->qp_cnt[i]) < min) { + min = atomic_read(&lag->qp_cnt[i]); + mac_index = i; + } + } + + return mac_index; +} +static int __xsc_ib_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_qp_context *context; + struct xsc_modify_qp_mbox_in *in; + struct xsc_qp_path path; + int sqd_event; + int err; + struct xsc_lag *lag; + u8 lag_port_num; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + struct xsc_core_device *xdev = dev->xdev; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + context = &qp->ctx; + + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu != IB_MTU_1024 && + attr->path_mtu != IB_MTU_4096) { + xsc_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); + } + + context->mtu_mode = (attr->path_mtu < IB_MTU_4096) ? 0 : 1; + ret = snprintf(ptr, 256, "path_mtu=%d,", attr->path_mtu); + } + + if (attr_mask & IB_QP_DEST_QPN) { + context->remote_qpn = cpu_to_be32(attr->dest_qp_num); + ret += snprintf(ptr + ret, 256 - ret, "dest_qp_num=%d,", attr->dest_qp_num); + } + + if (attr_mask & IB_QP_AV) { + err = xsc_set_path(dev, &attr->ah_attr, &path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr, qp); + if (err) + goto out; + + context->src_udp_port = path.sport; + context->dscp = path.ecn_dscp; + context->hop_limit = path.hop_limit; + context->ip_type = (path.af_type == AF_INET ? 0 : 1); + context->ip_type = cpu_to_be16(context->ip_type); + memcpy(context->dip, path.dip, sizeof(context->dip)); + memcpy(context->sip, path.sip, sizeof(context->sip)); + memcpy(context->dmac, path.dmac, sizeof(path.dmac)); + memcpy(context->smac, path.smac, sizeof(path.smac)); + + context->vlan_valid = path.vlan_valid; + context->dci_cfi_prio_sl = path.dci_cfi_prio_sl; + context->vlan_id = path.vlan_id; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + context->lag_id = cpu_to_be16(lag->lag_id); + context->lag_sel_en = 1; + lag_port_num = lag->xsc_member_cnt; + if ((attr->ah_attr.grh.flow_label & LAG_PORT_NUM_MASK_EN) != 0) { + context->lag_sel = ((attr->ah_attr.grh.flow_label & + LAG_PORT_NUM_MASK) >> + LAG_PORT_NUM_OFFSET) % + lag_port_num; + } else { + context->lag_sel = __xsc_get_min_qp_cnt_mac(lag); + } + + if (qp->xqp.mac_id != MAC_INVALID && + context->lag_sel != qp->xqp.mac_id) + atomic_dec(&lag->qp_cnt[qp->xqp.mac_id]); + + qp->xqp.mac_id = context->lag_sel; + atomic_inc(&lag->qp_cnt[qp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + context->rnr_retry = attr->rnr_retry; + ret += snprintf(ptr + ret, 256 - ret, "rnr_retry=%d,", attr->rnr_retry); + } + + if (attr_mask & IB_QP_RETRY_CNT) { + context->retry_cnt = attr->retry_cnt; + ret += snprintf(ptr + ret, 256 - ret, "retry_cnt=%d,", attr->retry_cnt); + } + + if (attr_mask & IB_QP_SQ_PSN) { + context->next_send_psn = cpu_to_be32(attr->sq_psn); + ret += snprintf(ptr + ret, 256 - ret, "sq_psn=%#x,", attr->sq_psn); + } + + if (attr_mask & IB_QP_RQ_PSN) { + context->next_recv_psn = cpu_to_be32(attr->rq_psn); + ret += snprintf(ptr + ret, 256 - ret, "rq_psn=%#x,", attr->rq_psn); + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + memcpy(&in->ctx, context, sizeof(*context)); + err = xsc_core_qp_modify(xdev, to_xsc_state(cur_state), + to_xsc_state(new_state), in, sqd_event, + &qp->xqp); + if (err) { + xsc_ib_err(dev, "failed to modify qp[%d] from %s to %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state)); + goto out; + } + + qp->state = new_state; + xsc_ib_info(dev, "succeeded to modify qp[%d] from %s to %s with attr_mask=%#x, %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state), + attr_mask, buf); + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + get_cqs(qp, &send_cq, &recv_cq); + xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.cur_post = 0; + qp->sq.last_poll = 0; + } + +out: + kfree(in); + return err; +} + +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + enum ib_qp_state cur_state, new_state; + int err = -EINVAL; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->xdev->caps.num_ports)) { + xsc_ib_err(dev, "error port num\n"); + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->xdev->caps.max_ra_res_qp) { + xsc_ib_err(dev, "rd atomic:%u exeeded", attr->max_rd_atomic); + goto out; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > dev->xdev->caps.max_ra_req_qp) { + xsc_ib_err(dev, "dest rd atomic:%u exeeded", attr->max_dest_rd_atomic); + goto out; + } + + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + err = 0; + goto out; + } + + err = __xsc_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); +out: + mutex_unlock(&qp->mutex); + return err; +} + +static int xsc_wq_overflow(struct xsc_ib_wq *wq, int nreq, struct xsc_ib_cq *cq) +{ + unsigned int cur; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max_post)) + return 0; + + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max_post; +} + +static inline void xsc_post_send_db(struct xsc_ib_qp *qp, + struct xsc_core_device *xdev, + int nreq) +{ + u16 next_pid; + union xsc_db_data db; + + if (unlikely(!nreq)) + return; + + qp->sq.head += nreq; + + next_pid = qp->sq.head << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.sq_next_pid = next_pid; + db.sqn = qp->doorbell_qpn; + /* + * Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +static inline u32 xsc_crc32(struct xsc_ib_dev *dev, u32 crc, u8 *buf, size_t len) +{ + u32 i; + + for (i = 0; i < len; i++) + crc = dev->crc_32_table[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc; +} + +#define BTH_QPN_MASK (0x00ffffff) +#define BTH_PSN_MASK (0x00ffffff) + +/* Compute a partial ICRC for all the IB transport headers. */ +u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) +{ + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h = NULL; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct rxe_bth *bth; + struct ib_unpacked_deth *deth; + struct ib_unpacked_vlan *vlan; + int crc; + int crc_field_len; + __be16 l3_type; + u8 *l3_start; + + int hdr_size; + + /* pseudo header buffer size is calculate using ipv6 header size since + * it is bigger than ipv4 + */ + u8 pshdr[sizeof(struct udphdr) + + sizeof(struct ipv6hdr) + + sizeof(*bth) + sizeof(*deth)]; + + eth = pkt; + + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + l3_type = vlan->type; + l3_start = (u8 *)(vlan + 1); + size -= 4; + } else { + l3_type = eth->type; + l3_start = (u8 *)(eth + 1); + } + + hdr_size = sizeof(struct udphdr) + + (l3_type == htons(ETH_P_IP) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)); + + crc_field_len = hdr_size + sizeof(*bth) + sizeof(*deth); + + if (crc_field_len != size) { + xsc_ib_err(dev, "Unmatched hdr: expect %d actual %d\n", + crc_field_len, size); + return -EINVAL; + } + + ip4h = (struct iphdr *)(l3_start); + ip6h = (struct ipv6hdr *)(l3_start); + udph = (struct udphdr *)(ip4h + 1); + bth = (struct rxe_bth *)(udph + 1); + + memcpy(pshdr, l3_start, crc_field_len); + + /* This seed is the result of computing a CRC with a seed of + * 0xfffffff and 8 bytes of 0xff representing a masked LRH. + */ + crc = 0xdebb20e3; + + if (l3_type == htons(ETH_P_IP)) { /* IPv4 */ + memcpy(pshdr, ip4h, hdr_size); + ip4h = (struct iphdr *)pshdr; + udph = (struct udphdr *)(ip4h + 1); + + ip4h->ttl = 0xff; + ip4h->check = CSUM_MANGLED_0; + ip4h->tos = 0xff; + } else { /* IPv6 */ + memcpy(pshdr, ip6h, hdr_size); + ip6h = (struct ipv6hdr *)pshdr; + udph = (struct udphdr *)(ip6h + 1); + + memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl)); + ip6h->priority = 0xf; + ip6h->hop_limit = 0xff; + } + udph->check = CSUM_MANGLED_0; + + bth = (struct rxe_bth *)(udph + 1); + /* exclude bth.resv8a */ + bth->qpn |= cpu_to_be32(~BTH_QPN_MASK); + + *icrc = xsc_crc32(dev, crc, pshdr, crc_field_len); + + return 0; +} + +/* Routine for sending QP1 packets for RoCE V1 an V2 + */ + // TO BE DONE: sq hdr buf should be create dynamically for mult entry +int build_qp1_send_v2(struct xsc_ib_dev *dev, + struct xsc_ib_qp *qp, + const struct ib_send_wr *wr, + struct ib_sge *sge, + int payload_size, u32 *crc) +{ + struct xsc_ib_ah *ah = container_of(ud_wr((struct ib_send_wr *)wr)->ah, struct xsc_ib_ah, + ibah); + const struct ib_gid_attr *sgid_attr = ah->ibah.sgid_attr; + u16 ether_type; + union ib_gid dgid; + bool is_eth = false; + bool is_vlan = false; + bool is_grh = false; + bool is_udp = false; + u8 ip_version = 0; + u16 vlan_id = 0xFFFF; + int rc = 0; + int cm_pcp = 0; + void *hdr_buf; + + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + + if (!qp->sq.hdr_buf) { + xsc_ib_err(dev, "QP1 buffer is empty!"); + return -ENOMEM; + } + hdr_buf = (u8 *)qp->sq.hdr_buf + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + if (!sgid_attr || !sgid_attr->ndev) { + xsc_ib_err(dev, "sgid_addr or ndev is null\n"); + return -ENXIO; + } + + if (is_vlan_dev(sgid_attr->ndev)) + vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); + + is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; + memcpy(&dgid.raw, &ah->av.rgid, 16); + if (is_udp) { + if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { + ip_version = 4; + ether_type = ETH_P_IP; + } else { + ip_version = 6; + ether_type = ETH_P_IPV6; + } + is_grh = false; + } else { + ether_type = ETH_P_IBOE; + is_grh = true; + } + + is_eth = true; + is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; + + ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, + ip_version, is_udp, 0, &qp->qp1_hdr); + + /* ETH */ + ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->av.rmac); + ether_addr_copy(qp->qp1_hdr.eth.smac_h, dev->netdev->dev_addr); + + /* For vlan, check the sgid for vlan existence */ + if (!is_vlan) { + qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); + } else { + if (dev->cm_pcp != DSCP_PCP_UNSET) + cm_pcp = dev->cm_pcp << 13; + else + cm_pcp = (iboe_tos_to_sl(sgid_attr->ndev, ah->av.tclass) << 13); + qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); + qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id | cm_pcp); + } + +#define ECN_CAPABLE_TRANSPORT 0x2 + if (is_grh || ip_version == 6) { + memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, + sizeof(sgid_attr->gid)); + memcpy(qp->qp1_hdr.grh.destination_gid.raw, ah->av.rgid, + sizeof(ah->av.rgid)); + qp->qp1_hdr.grh.hop_limit = ah->av.hop_limit; + + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.grh.traffic_class = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.grh.traffic_class = ECN_CAPABLE_TRANSPORT; + } + + if (ip_version == 4) { + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.ip4.tos = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.ip4.tos = ECN_CAPABLE_TRANSPORT; + qp->qp1_hdr.ip4.id = 0; + qp->qp1_hdr.ip4.frag_off = htons(IP_DF); + qp->qp1_hdr.ip4.ttl = ah->av.hop_limit; + + memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); + memcpy(&qp->qp1_hdr.ip4.daddr, ah->av.rgid + 12, 4); + qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); + } + + if (is_udp) { + qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); + qp->qp1_hdr.udp.sport = htons(ah->av.udp_sport); + qp->qp1_hdr.udp.csum = 0; + xsc_ib_dbg(dev, "CM packet used udp_sport=%d\n", ah->av.udp_sport); + } + + /* BTH */ + if (wr->opcode == IB_WR_SEND_WITH_IMM) { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + qp->qp1_hdr.immediate_present = 1; + } else { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + } + if (wr->send_flags & IB_SEND_SOLICITED) + qp->qp1_hdr.bth.solicited_event = 1; + /* pad_count */ + qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; + + /* P_key for QP1 is for all members */ + qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); + qp->qp1_hdr.bth.destination_qpn = IB_QP1; + qp->qp1_hdr.bth.ack_req = 0; + qp->send_psn++; + qp->send_psn &= BTH_PSN_MASK; + qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); + /* DETH */ + /* Use the priviledged Q_Key for QP1 */ + qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); + qp->qp1_hdr.deth.source_qpn = IB_QP1; + + /* Pack the QP1 to the transmit buffer */ + sge->addr = (dma_addr_t)(qp->sq.hdr_dma + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index); + sge->lkey = 0xFFFFFFFF; + sge->length = MAX_QP1_SQ_HDR_SIZE; + + ib_ud_header_pack(&qp->qp1_hdr, hdr_buf); + /* + * Max Header buf size for IPV6 RoCE V2 is 86, + * which is same as the QP1 SQ header buffer. + * Header buf size for IPV4 RoCE V2 can be 66. + * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). + * Subtract 20 bytes from QP1 SQ header buf size + */ + if (is_udp && ip_version == 4) + sge->length -= 20; + /* + * Max Header buf size for RoCE V1 is 78. + * ETH(14) + VLAN(4) + GRH(40) + BTH(20). + * Subtract 8 bytes from QP1 SQ header buf size + */ + if (!is_udp) + sge->length -= 8; + + /* Subtract 4 bytes for non vlan packets */ + if (!is_vlan) + sge->length -= 4; + + rc = xsc_icrc_hdr(dev, hdr_buf, sge->length - sizeof(struct ib_unpacked_eth), crc); + if (rc) { + xsc_ib_err(dev, "CRC error: hdr size %ld\n", + sge->length - sizeof(struct ib_unpacked_eth)); + } + return rc; +} + +static void zero_send_ds(struct xsc_ib_qp *qp, int idx) +{ + void *seg; + int i; + int ds_num; + u64 *p; + + ds_num = XSC_SEND_SEG_NUM << (qp->sq.wqe_shift - XSC_SEND_WQE_SHIFT); + seg = (void *)xsc_get_send_wqe(qp, idx); + for (i = 1; i < ds_num; i++) { + p = get_seg_wqe(seg, i); + p[0] = 0; + p[1] = 0; + } +} + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + void *seg; + struct xsc_send_wqe_ctrl_seg *ctrl; + struct xsc_wqe_data_seg *data_seg; + u32 crc; + int nreq; + int err = 0; + int i; + unsigned int idx; + unsigned long irqflag = 0; + struct ib_sge sg; + u8 *cur_p = NULL; + u8 *mad_send_base = NULL; + struct ib_wc wc; + void *vaddr; + int sig = 0; + + if (wr->opcode == IB_WR_LOCAL_INV) { + wc.status = IB_WC_SUCCESS; + wc.wr_cqe = wr->wr_cqe; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_invalidate_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + wr = wr->next; + if (!wr) + return 0; + } + + if (wr->opcode == IB_WR_REG_MR) { + wc.status = IB_WC_SUCCESS; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_reg_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + } + + spin_lock_irqsave(&qp->sq.lock, irqflag); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + unsigned int seg_index = 1; + unsigned int msg_len = 0; + struct ib_sge *sgl = &wr->sg_list[0]; + int sg_n = wr->num_sge; + + if (unlikely(wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(xsc_ib_opcode))) { + xsc_ib_err(dev, "bad opcode %d\n", wr->opcode); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + if (unlikely(xsc_wq_overflow(&qp->sq, nreq, + to_xcq(qp->ibqp.send_cq)))) { + xsc_ib_err(dev, "send work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->sq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->sq.max_gs); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && wr->num_sge > 1)) { + xsc_ib_err(dev, "rdma read, max gs exceeded %d (max = 1)\n", + wr->num_sge); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); + zero_send_ds(qp, idx); + seg = xsc_get_send_wqe(qp, idx); + ctrl = seg; + ctrl->wqe_id = cpu_to_le16(qp->sq.cur_post << + (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT)); + ctrl->ds_data_num = 0; + ctrl->se = wr->send_flags & IB_SEND_SOLICITED ? 1 : 0; + ctrl->ce = wr->send_flags & IB_SEND_SIGNALED ? 1 : 0; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) + msg_len += wr->sg_list[i].length; + } + ctrl->msg_len = msg_len; + ctrl->with_immdt = 0; + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && msg_len == 0)) { + xsc_ib_err(dev, "rdma read, msg len should not be 0\n"); + /* workaround, return success for posting zero-length read */ + err = 0; + goto out; + } + switch (ibqp->qp_type) { + case IB_QPT_RC: + ctrl->ds_data_num = wr->num_sge; + switch (wr->opcode) { + case IB_WR_SEND_WITH_INV: + case IB_WR_SEND: + break; + case IB_WR_SEND_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + ctrl->with_immdt = 0; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + set_remote_addr_seg(data_seg, + msg_len, + rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); + seg_index++; + break; + case IB_WR_REG_MR: + break; + default: + xsc_ib_err(dev, "debug: opcode:%u NOT supported\n", wr->opcode); + err = EPERM; + *bad_wr = wr; + goto out; + } + ctrl->msg_opcode = xsc_ib_opcode[wr->opcode]; + break; + case IB_QPT_UD: + case IB_QPT_GSI: + ctrl->msg_opcode = XSC_MSG_OPCODE_MAD; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + mad_send_base = (u8 *)qp->sq.hdr_buf + + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + err = build_qp1_send_v2(dev, qp, wr, &sg, msg_len, &crc); + if (err) { + *bad_wr = wr; + goto out; + } + + cur_p = mad_send_base + sg.length; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) { + vaddr = xsc_ib_send_mad_sg_virt_addr(&dev->ib_dev, wr, i); + memcpy(cur_p, vaddr, wr->sg_list[i].length); + } + cur_p += wr->sg_list[i].length; + } + crc = xsc_crc32(dev, crc, mad_send_base + sg.length, ctrl->msg_len); + ctrl->msg_len += sg.length; + seg_index++; + + *(u32 *)&mad_send_base[ctrl->msg_len] = ~crc; + ctrl->msg_len += sizeof(crc); + sg.length = ctrl->msg_len; + set_local_data_seg(data_seg, &sg); + xsc_ib_info(dev, "qp[%d] send MAD packet, msg_len:%d\n", + qp->xqp.qpn, ctrl->msg_len); + qp->sq.mad_index = (qp->sq.mad_index + 1) % MAD_QUEUE_DEPTH; + + sg_n = 0; + break; + default: + xsc_ib_err(dev, "qp type:%u NOT supported\n", ibqp->qp_type); + err = EPERM; + *bad_wr = wr; + goto out; + } + + if (wr->opcode == IB_WR_REG_MR) { + nreq--; + continue; + } + + if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { + err = set_data_inl_seg(qp, wr, ctrl); + if (unlikely(err)) { + *bad_wr = wr; + xsc_ib_err(dev, "inline layout failed, err %d\n", err); + goto out; + } + } else { + for (i = 0; i < sg_n; ++i, ++seg_index) { + if (likely(sgl[i].length)) { + data_seg = get_seg_wqe(ctrl, seg_index); + set_local_data_seg(data_seg, &sgl[i]); + } + } + } + qp->sq.wrid[idx] = wr->wr_id; + qp->sq.wqe_head[idx] = qp->sq.head + nreq; + qp->sq.cur_post += 1; + } +out: + xsc_ib_dbg(dev, "nreq:%d\n", nreq); + xsc_post_send_db(qp, dev->xdev, nreq); + spin_unlock_irqrestore(&qp->sq.lock, irqflag); + + return err; +} + +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_wqe_data_seg *recv_head; + struct xsc_wqe_data_seg *data_seg; + unsigned long flags; + int err = 0; + u16 next_pid = 0; + union xsc_db_data db; + int nreq; + u16 idx; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + idx = qp->rq.head & (qp->rq.wqe_cnt - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(xsc_wq_overflow(&qp->rq, nreq, to_xcq(qp->ibqp.recv_cq)))) { + xsc_ib_err(dev, "recv work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->rq.max_gs); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + recv_head = get_recv_wqe(qp, idx); + + for (i = 0; i < wr->num_sge; ++i) { + if (unlikely(!wr->sg_list[i].length)) + continue; + data_seg = get_seg_wqe(recv_head, i); + data_seg->in_line = 0; + WR_LE_64(data_seg->va, wr->sg_list[i].addr); + WR_LE_32(data_seg->mkey, wr->sg_list[i].lkey); + if (is_qp1(qp->xqp.qp_type)) + WR_LE_32(data_seg->seg_len, xdev->caps.rx_pkt_len_max); + else + WR_LE_32(data_seg->seg_len, wr->sg_list[i].length); + } + + qp->rq.wrid[idx] = wr->wr_id; + + idx = (idx + 1) & (qp->rq.wqe_cnt - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + next_pid = qp->rq.head << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.rq_next_pid = next_pid; + db.rqn = qp->doorbell_qpn; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.rx_db)); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +static inline enum ib_qp_state to_ib_qp_state(enum xsc_qp_state xsc_state) +{ + switch (xsc_state) { + case XSC_QP_STATE_RST: return IB_QPS_RESET; + case XSC_QP_STATE_INIT: return IB_QPS_INIT; + case XSC_QP_STATE_RTR: return IB_QPS_RTR; + case XSC_QP_STATE_RTS: return IB_QPS_RTS; + case XSC_QP_STATE_SQ_DRAINING: + case XSC_QP_STATE_SQD: return IB_QPS_SQD; + case XSC_QP_STATE_SQER: return IB_QPS_SQE; + case XSC_QP_STATE_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int xsc_mig_state) +{ + switch (xsc_mig_state) { + case XSC_QP_PM_ARMED: return IB_MIG_ARMED; + case XSC_QP_PM_REARM: return IB_MIG_REARM; + case XSC_QP_PM_MIGRATED: return IB_MIG_MIGRATED; + default: return -1; + } +} + +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_query_qp_mbox_out *outb; + struct xsc_qp_context *context; + int xsc_state; + int err = 0; + + mutex_lock(&qp->mutex); + outb = kzalloc(sizeof(*outb), GFP_KERNEL); + if (!outb) { + err = -ENOMEM; + goto out; + } + context = &outb->ctx; + err = xsc_core_qp_query(dev->xdev, &qp->xqp, outb, sizeof(*outb)); + if (err) + goto out_free; + + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_mode ? IB_MTU_4096 : IB_MTU_1024; + qp_attr->rq_psn = be32_to_cpu(context->next_recv_psn) & 0xffffff; + qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; + qp_attr->sq_draining = xsc_state == XSC_QP_STATE_SQ_DRAINING; + qp_attr->retry_cnt = context->retry_cnt; + qp_attr->rnr_retry = context->rnr_retry; + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } else { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } + + /* We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + qp_attr->cap.max_inline_data = 0; + + qp_init_attr->cap = qp_attr->cap; + + qp_init_attr->create_flags = 0; + if (qp->flags & XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK) + qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + qp_init_attr->sq_sig_type = qp->sq_signal_bits & XSC_WQE_CTRL_CQ_UPDATE ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + +out_free: + kfree(outb); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +void xsc_ib_drain_rq(struct ib_qp *qp __maybe_unused) +{ +} + +void xsc_ib_drain_sq(struct ib_qp *qp __maybe_unused) +{ +} diff --git a/drivers/infiniband/hw/xsc/rtt.c b/drivers/infiniband/hw/xsc/rtt.c new file mode 100644 index 0000000000000000000000000000000000000000..e7a68f1ab41a7f830564f62d4057818e90a64f09 --- /dev/null +++ b/drivers/infiniband/hw/xsc/rtt.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" + +struct xsc_rtt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_rtt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count); +}; + +static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_en_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_EN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", out.en); +} + +static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u16 rtt_enable; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + + err = kstrtou16(buf, 0, &rtt_enable); + if (err != 0) + return -EINVAL; + + if (rtt_enable > 1) { + xsc_core_err(g->xdev, "Failed to set rtt en, rtt_enable(%u) out of range[0,1]\n", + rtt_enable); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = rtt_enable; + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err, i; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_get_rtt_qpn_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_QPN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%hu,", __be32_to_cpu(out.qpn[i])); + + count += sprintf(&buf[count], "%hu\n", __be32_to_cpu(out.qpn[i])); + + return count; +} + +#define RTT_CFG_QPN_FORMAT "%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u," \ +"%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u" + +static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err, i, num; + struct xsc_rtt_qpn_mbox_in in; + struct xsc_rtt_qpn_mbox_out out; + u32 *ptr = in.qpn; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + num = sscanf(buf, RTT_CFG_QPN_FORMAT, &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], + &ptr[5], &ptr[6], &ptr[7], &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], + &ptr[13], &ptr[14], &ptr[15], &ptr[16], &ptr[17], &ptr[18], &ptr[19], + &ptr[20], &ptr[21], &ptr[22], &ptr[23], &ptr[24], &ptr[25], &ptr[26], + &ptr[27], &ptr[28], &ptr[29], &ptr[30], &ptr[31]); + if (num == 0) + return -EINVAL; + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_QPN); + + for (i = 0 ; i < XSC_RTT_CFG_QPN_MAX; i++) + in.qpn[i] = __cpu_to_be32(ptr[i]); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_qpn_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_period_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_PERIOD); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.period)); +} + +#define RTT_CFG_PERIOD_MAX 10000 //ms, 10s +#define RTT_CFG_PERIOD_MIN 1000 //ms, 1s +static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u32 rtt_period; + struct xsc_rtt_period_mbox_in in; + struct xsc_rtt_period_mbox_out out; + + err = kstrtouint(buf, 0, &rtt_period); + if (err != 0) + return -EINVAL; + + if (rtt_period > RTT_CFG_PERIOD_MAX || rtt_period < RTT_CFG_PERIOD_MIN) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + in.period = __cpu_to_be32(rtt_period); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int i, err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_result_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_RESULT); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt result, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%lld,", __be64_to_cpu(out.result[i])); + + count += sprintf(&buf[count], "%lld\n", __be64_to_cpu(out.result[i])); + + return count; +} + +static ssize_t result_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_STATS); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt stats, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + count += sprintf(&buf[count], "rtt_succ_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_succ_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_req_cnt)); + count += sprintf(&buf[count], "rtt_rcv_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_unk_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_unk_cnt)); + count += sprintf(&buf[count], "rtt_grp_invalid_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_grp_invalid_cnt)); + + return count; +} + +static ssize_t stats_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define RTT_ATTR(_name) struct xsc_rtt_attributes xsc_rtt_attr_##_name = \ + __ATTR(rtt_probe_##_name, 0644, _name##_show, _name##_store) + +RTT_ATTR(enable); +RTT_ATTR(qpn); +RTT_ATTR(period); +RTT_ATTR(result); +RTT_ATTR(stats); + +static ssize_t rtt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t rtt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static const struct sysfs_ops rtt_sysfs_ops = { + .show = rtt_attr_show, + .store = rtt_attr_store, +}; + +static struct attribute *rtt_attrs[] = { + &xsc_rtt_attr_enable.attr, + &xsc_rtt_attr_qpn.attr, + &xsc_rtt_attr_period.attr, + &xsc_rtt_attr_result.attr, + &xsc_rtt_attr_stats.attr, + NULL +}; + +ATTRIBUTE_GROUPS(rtt); + +static const struct kobj_type rtt_ktype = { + .sysfs_ops = &rtt_sysfs_ops, + .default_groups = rtt_groups, +}; + +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_rtt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return -EACCES; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &rtt_ktype, + &ib_dev->dev.kobj, "rtt"); + if (err) + goto rtt_attr_err; + + xdev->rtt_priv = tmp; + tmp->xdev = xdev; + return 0; + +rtt_attr_err: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) +{ + int err; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + struct xsc_rtt_period_mbox_in period_in; + struct xsc_rtt_period_mbox_out period_out; + struct xsc_rtt_interface *rtt; + + if (!xdev || !xdev->rtt_priv) + return; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = 0; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt disable, err(%u), status(%u)\n", + err, out.hdr.status); + + memset(&period_in, 0, sizeof(period_in)); + memset(&period_out, 0, sizeof(period_out)); + + period_in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + period_in.period = __cpu_to_be32(RTT_CFG_PERIOD_MAX); + + err = xsc_cmd_exec(xdev, (void *)&period_in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || period_out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt period default, err(%u), status(%u)\n", + err, out.hdr.status); + + rtt = xdev->rtt_priv; + kobject_put(&rtt->kobj); + kfree(rtt); + xdev->rtt_priv = NULL; +} diff --git a/drivers/infiniband/hw/xsc/user.h b/drivers/infiniband/hw/xsc/user.h new file mode 100644 index 0000000000000000000000000000000000000000..6e2b6ff542ae8de163a5190f651db3d1d4754450 --- /dev/null +++ b/drivers/infiniband/hw/xsc/user.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_USER_H +#define XSC_IB_USER_H + +#include +#include /* For ETH_ALEN. */ +#include + +enum xsc_ib_devx_methods { + XSC_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_METHOD_DEVX_QUERY_UAR, + XSC_IB_METHOD_DEVX_QUERY_EQN, +}; + +enum xsc_ib_devx_other_attrs { + XSC_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, +}; + +enum xsc_ib_objects { + XSC_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_OBJECT_DEVX_OBJ, + XSC_IB_OBJECT_DEVX_UMEM, + XSC_IB_OBJECT_FLOW_MATCHER, +}; + +/* Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define XSC_IB_UVERBS_ABI_VERSION 1 + +/* Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +enum { + XSC_QP_FLAG_SIGNATURE = 1 << 0, + XSC_QP_FLAG_SCATTER_CQE = 1 << 1, + XSC_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, + XSC_QP_FLAG_BFREG_INDEX = 1 << 3, + XSC_QP_FLAG_TYPE_DCT = 1 << 4, + XSC_QP_FLAG_TYPE_DCI = 1 << 5, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, + XSC_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, + XSC_QP_FLAG_RAWPACKET_TSO = 1 << 9, + XSC_QP_FLAG_RAWPACKET_TX = 1 << 10, +}; + +struct xsc_ib_alloc_ucontext_req { + __u32 rsvd0; + __u32 rsvd1; +}; + +enum xsc_user_cmds_supp_uhw { + XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, + XSC_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, +}; + +struct xsc_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 cache_line_size; + __u16 max_sq_desc_sz; + __u16 max_rq_desc_sz; + __u32 max_send_wqebb; + __u32 max_recv_wr; + __u16 num_ports; + __u16 reserved; + __u64 qpm_tx_db; + __u64 qpm_rx_db; + __u64 cqm_next_cid_reg; + __u64 cqm_armdb; + __u32 send_ds_num; + __u32 recv_ds_num; + __u32 cmds_supp_uhw; +}; + +struct xsc_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u32 sq_wqe_count; + __u32 rq_wqe_count; + __u32 rq_wqe_shift; + __u32 flags; +}; + +struct xsc_ib_create_qp_resp { + __u32 uuar_index; + __u32 reserved; +}; + +struct xsc_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; + __u32 cqe_size; +}; + +struct xsc_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct xsc_ib_create_ah_resp { + __u32 response_length; + __u8 dmac[ETH_ALEN]; + __u8 reserved[6]; +}; + +struct xsc_ib_alloc_pd_resp { + __u32 pdn; +}; + +struct xsc_ib_tso_caps { + __u32 max_tso; /* Maximum tso payload size in bytes */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_UD + */ + __u32 supported_qpts; +}; + +/* RX Hash function flags */ +enum xsc_rx_hash_function_flags { + XSC_RX_HASH_FUNC_TOEPLITZ = 1 << 0, +}; + +enum xsc_rdma_link_speed { + XSC_RDMA_LINK_SPEED_2_5GB = 1 << 0, + XSC_RDMA_LINK_SPEED_5GB = 1 << 1, + XSC_RDMA_LINK_SPEED_10GB = 1 << 3, + XSC_RDMA_LINK_SPEED_14GB = 1 << 4, + XSC_RDMA_LINK_SPEED_25GB = 1 << 5, + XSC_RDMA_LINK_SPEED_50GB = 1 << 6, + XSC_RDMA_LINK_SPEED_100GB = 1 << 7, +}; + +enum xsc_rdma_phys_state { + XSC_RDMA_PHY_STATE_SLEEP = 1, + XSC_RDMA_PHY_STATE_POLLING, + XSC_RDMA_PHY_STATE_DISABLED, + XSC_RDMA_PHY_STATE_PORT_CONFIGURATION_TRAINNING, + XSC_RDMA_PHY_STATE_LINK_UP, + XSC_RDMA_PHY_STATE_LINK_ERROR_RECOVERY, + XSC_RDMA_PHY_STATE_PHY_TEST, +}; + +/* + * RX Hash flags, these flags allows to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, + * when the flag is set the field that is represented by the flag will + * participate in RX Hash calculation. + * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP + * and *TCP and *UDP flags can't be enabled together on the same QP. + */ +enum xsc_rx_hash_fields { + XSC_RX_HASH_SRC_IPV4 = 1 << 0, + XSC_RX_HASH_DST_IPV4 = 1 << 1, + XSC_RX_HASH_SRC_IPV6 = 1 << 2, + XSC_RX_HASH_DST_IPV6 = 1 << 3, + XSC_RX_HASH_SRC_PORT_TCP = 1 << 4, + XSC_RX_HASH_DST_PORT_TCP = 1 << 5, + XSC_RX_HASH_SRC_PORT_UDP = 1 << 6, + XSC_RX_HASH_DST_PORT_UDP = 1 << 7, + XSC_RX_HASH_IPSEC_SPI = 1 << 8, + /* Save bits for future fields */ + XSC_RX_HASH_INNER = (1UL << 31), +}; + +struct xsc_ib_rss_caps { + __aligned_u64 rx_hash_fields_mask; /* enum xsc_rx_hash_fields */ + __u8 rx_hash_function; /* enum xsc_rx_hash_function_flags */ + __u8 reserved[7]; +}; + +enum xsc_ib_cqe_comp_res_format { + XSC_IB_CQE_RES_FORMAT_HASH = 1 << 0, + XSC_IB_CQE_RES_FORMAT_CSUM = 1 << 1, + XSC_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, +}; + +struct xsc_ib_cqe_comp_caps { + __u32 max_num; + __u32 supported_format; /* enum xsc_ib_cqe_comp_res_format */ +}; + +enum xsc_ib_packet_pacing_cap_flags { + XSC_IB_PP_SUPPORT_BURST = 1 << 0, +}; + +struct xsc_packet_pacing_caps { + __u32 qp_rate_limit_min; + __u32 qp_rate_limit_max; /* In kpbs */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u8 cap_flags; /* enum xsc_ib_packet_pacing_cap_flags */ + __u8 reserved[3]; +}; + +enum xsc_ib_mpw_caps { + MPW_RESERVED = 1 << 0, + XSC_IB_ALLOW_MPW = 1 << 1, + XSC_IB_SUPPORT_EMPW = 1 << 2, +}; + +enum xsc_ib_sw_parsing_offloads { + XSC_IB_SW_PARSING = 1 << 0, + XSC_IB_SW_PARSING_CSUM = 1 << 1, + XSC_IB_SW_PARSING_LSO = 1 << 2, +}; + +struct xsc_ib_sw_parsing_caps { + __u32 sw_parsing_offloads; /* enum xsc_ib_sw_parsing_offloads */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; +}; + +struct xsc_ib_striding_rq_caps { + __u32 min_single_stride_log_num_of_bytes; + __u32 max_single_stride_log_num_of_bytes; + __u32 min_single_wqe_log_num_of_strides; + __u32 max_single_wqe_log_num_of_strides; + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u32 reserved; +}; + +enum xsc_ib_query_dev_resp_flags { + /* Support 128B CQE compression */ + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, +}; + +enum xsc_ib_tunnel_offloads { + XSC_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0, + XSC_IB_TUNNELED_OFFLOADS_GRE = 1 << 1, + XSC_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2, + XSC_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3, + XSC_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4, +}; + +struct xsc_ib_query_device_resp { + __u32 comp_mask; + __u32 response_length; + struct xsc_ib_tso_caps tso_caps; + struct xsc_ib_rss_caps rss_caps; + struct xsc_ib_cqe_comp_caps cqe_comp_caps; + struct xsc_packet_pacing_caps packet_pacing_caps; + __u32 xsc_ib_support_multi_pkt_send_wqes; + __u32 flags; /* Use enum xsc_ib_query_dev_resp_flags */ + struct xsc_ib_sw_parsing_caps sw_parsing_caps; + struct xsc_ib_striding_rq_caps striding_rq_caps; + __u32 tunnel_offloads_caps; /* enum xsc_ib_tunnel_offloads */ + __u32 reserved; +}; + +#endif /* XSC_IB_USER_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib.h b/drivers/infiniband/hw/xsc/xsc_ib.h new file mode 100644 index 0000000000000000000000000000000000000000..0753b3ba1c328aaefc7a8ef64b4b886114502e2a --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_H +#define XSC_IB_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include +#include +#include +#include +#include + +#include "xsc_ib_compat.h" + +#define xsc_ib_dbg(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_err(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_ERR) \ + pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_warn(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_WARN) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_info(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_INFO) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +struct xsc_ib_ucontext { + struct ib_ucontext ibucontext; + struct list_head db_page_list; + + /* protect doorbell record alloc/free + */ + struct mutex db_page_mutex; +}; + +#define field_avail(type, fld, sz) (offsetof(type, fld) + \ + sizeof(((type *)0)->fld) <= (sz)) + +static inline struct xsc_ib_ucontext *to_xucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct xsc_ib_ucontext, ibucontext); +} + +struct xsc_ib_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pa_lkey; +}; + +/* Use macros here so that don't have to duplicate + * enum ib_send_flags and enum ib_qp_type for low-level driver + */ + +#define XSC_IB_QPT_REG_UMR IB_QPT_RESERVED1 + +enum { + XSC_PAGE_SHIFT_4K = 12, + XSC_PAGE_SHIFT_64K = 16, + XSC_PAGE_SHIFT_2M = 21, + XSC_PAGE_SHIFT_1G = 30, +}; + +enum { + XSC_PAGE_MODE_4K = 0, + XSC_PAGE_MODE_64K = 1, + XSC_PAGE_MODE_2M = 2, + XSC_PAGE_MODE_1G = 3, +}; + +struct wr_list { + u16 opcode; + u16 next; +}; + +struct xsc_ib_wq { + u64 *wrid; + u32 *wr_data; + struct wr_list *w_list; + unsigned long *wqe_head; + u16 unsig_count; + + /* serialize post to the work queue + */ + spinlock_t lock; + int wqe_cnt; + int ds_cnt; + int max_post; + int max_gs; + int offset; + int wqe_shift; + unsigned int head; + unsigned int tail; + u16 cur_post; + u16 last_poll; + void *qend; + void *hdr_buf; + u32 hdr_size; + dma_addr_t hdr_dma; + int mad_queue_depth; + int mad_index; +}; + +enum { + XSC_QP_USER, + XSC_QP_KERNEL, + XSC_QP_EMPTY +}; + +struct xsc_ib_qp { + struct ib_qp ibqp; + struct xsc_core_qp xqp; + struct xsc_buf buf; + + struct xsc_db db; + struct xsc_ib_wq rq; + + u32 doorbell_qpn; + u8 sq_signal_bits; + u8 fm_cache; + int sq_max_wqes_per_wr; + int sq_spare_wqes; + struct xsc_ib_wq sq; + + struct ib_umem *umem; + int buf_size; + + /* serialize qp state modifications + */ + struct mutex mutex; + u16 xrcdn; + u32 flags; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; + int xsc_type; + int wq_sig; + int scat_cqe; + int max_inline_data; + int has_rq; + + int create_type; + u32 pa_lkey; + /* For QP1 */ + struct ib_ud_header qp1_hdr; + u32 send_psn; + struct xsc_qp_context ctx; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + /* For qp resources */ + spinlock_t lock; +}; + +struct xsc_ib_cq_buf { + struct xsc_buf buf; + struct ib_umem *umem; + int cqe_size; +}; + +enum xsc_ib_qp_flags { + XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, + XSC_IB_QP_SIGNATURE_HANDLING = 1 << 1, +}; + +struct xsc_shared_mr_info { + int mr_id; + struct ib_umem *umem; +}; + +struct xsc_ib_cq { + struct ib_cq ibcq; + struct xsc_core_cq xcq; + struct xsc_ib_cq_buf buf; + struct xsc_db db; + + /* serialize access to the CQ + */ + spinlock_t lock; + + /* protect resize cq + */ + struct mutex resize_mutex; + struct xsc_ib_cq_resize *resize_buf; + struct ib_umem *resize_umem; + int cqe_size; +}; + +struct xsc_ib_xrcd { + struct ib_xrcd ibxrcd; + u32 xrcdn; +}; + +struct xsc_ib_peer_id; + +struct xsc_ib_mr { + struct ib_mr ibmr; + struct xsc_core_mr mmr; + struct ib_umem *umem; + struct xsc_shared_mr_info *smr_info; + struct list_head list; + int order; + __be64 *pas; + dma_addr_t dma; + int npages; + struct completion done; + enum ib_wc_status status; + struct xsc_ib_peer_id *peer_id; + atomic_t invalidated; + struct completion invalidation_comp; +}; + +struct xsc_ib_peer_id { + struct completion comp; + struct xsc_ib_mr *mr; +}; + +struct xsc_cache_ent { + struct list_head head; + /* sync access to the cahce entry + */ + spinlock_t lock; + + struct dentry *dir; + char name[4]; + u32 order; + u32 size; + u32 cur; + u32 miss; + u32 limit; + + struct dentry *fsize; + struct dentry *fcur; + struct dentry *fmiss; + struct dentry *flimit; + + struct xsc_ib_dev *dev; + struct work_struct work; + struct delayed_work dwork; +}; + +struct xsc_mr_cache { + struct workqueue_struct *wq; + struct xsc_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + int stopped; + struct dentry *root; + unsigned long last_add; +}; + +struct xsc_gid { + u8 data[16]; +}; + +struct xsc_sgid_tbl { + struct xsc_gid *tbl; + u32 max; + u32 count; +}; + +struct xsc_ib_res { + struct xsc_sgid_tbl sgid_tbl; +}; + +struct xsc_ib_resources { + struct ib_cq *c0; + struct ib_xrcd *x0; + struct ib_xrcd *x1; + struct ib_pd *p0; + struct ib_srq *s0; +}; + +struct xsc_ib_dev { + struct ib_device ib_dev; + struct uverbs_object_tree_def *driver_trees[6]; + struct net_device *netdev; + struct xsc_core_device *xdev; + XSC_DECLARE_DOORBELL_LOCK(uar_lock); + struct list_head eqs_list; + int num_ports; + int num_comp_vectors; + /* serialize update of capability mask + */ + struct mutex cap_mask_mutex; + u8 ib_active; + /* sync used page count stats + */ + spinlock_t mr_lock; + struct xsc_ib_res ib_res; + struct xsc_ib_resources devr; + struct xsc_mr_cache cache; + u32 crc_32_table[256]; + int cm_pcp; + int cm_dscp; + int force_pcp; + int force_dscp; + int iommu_state; + struct notifier_block nb; +}; + +union xsc_ib_fw_ver { + u64 data; + struct { + u8 ver_major; + u8 ver_minor; + u16 ver_patch; + u32 ver_tweak; + } s; +}; + +struct xsc_pa_chunk { + struct list_head list; + u64 va; + dma_addr_t pa; + size_t length; +}; + +static inline struct xsc_ib_cq *to_xibcq(struct xsc_core_cq *xcq) +{ + return container_of(xcq, struct xsc_ib_cq, xcq); +} + +static inline struct xsc_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct xsc_ib_xrcd, ibxrcd); +} + +static inline struct xsc_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct xsc_ib_dev, ib_dev); +} + +static inline struct xsc_ib_cq *to_xcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct xsc_ib_cq, ibcq); +} + +static inline struct xsc_ib_qp *to_xibqp(struct xsc_core_qp *xqp) +{ + return container_of(xqp, struct xsc_ib_qp, xqp); +} + +static inline struct xsc_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct xsc_ib_pd, ibpd); +} + +static inline struct xsc_ib_qp *to_xqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct xsc_ib_qp, ibqp); +} + +static inline struct xsc_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct xsc_ib_mr, ibmr); +} + +struct xsc_ib_ah { + struct ib_ah ibah; + struct xsc_av av; +}; + +static inline struct xsc_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct xsc_ib_ah, ibah); +} + +static inline struct xsc_ib_dev *xdev2ibdev(struct xsc_core_device *xdev) +{ + return container_of((void *)xdev, struct xsc_ib_dev, xdev); +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr); +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr); + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n); +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order); +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide); +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void); + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, + unsigned long addr, int *npage, int *shift, u64 **pas); + +void xsc_ib_drain_rq(struct ib_qp *qp); +void xsc_ib_drain_sq(struct ib_qp *qp); + +static inline void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static inline u8 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? XSC_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? XSC_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? XSC_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? XSC_PERM_LOCAL_WRITE : 0) | + XSC_PERM_LOCAL_READ; +} + +static inline enum ib_mtu xsc_net_to_ib_mtu(unsigned int mtu) +{ + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + + IB_ICRC_BYTES); + + if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) + return IB_MTU_4096; + else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) + return IB_MTU_1024; + else + return 0; +} + +/** + * UDP source port selection must adhere IANA port allocation ranges. Thus + * we will be using IANA recommendation for Ephemeral port range of: + * 49152-65535, or in hex: 0xC000-0xFFFF. + */ +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) +#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) + +/** + * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based + * on the flow_label + * + * This function will convert the 20 bit flow_label input to a valid RoCE v2 + * UDP src port 14 bit value. All RoCE V2 drivers should use this same + * convention. + */ +static inline u16 xsc_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); +} + +#define XSC_IB_IOMMU_MAP_DISABLE 0 +#define XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN 1 +#define XSC_IB_IOMMU_MAP_NORMAL 2 + +static inline int xsc_ib_iommu_dma_map(struct ib_device *ibdev) +{ + return to_mdev(ibdev)->iommu_state; +} + +static inline void *xsc_ib_iova_to_virt(struct ib_device *ibdev, dma_addr_t iova) +{ + phys_addr_t phyaddr; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(ibdev->dma_device); + if (likely(domain)) { + phyaddr = iommu_iova_to_phys(domain, iova); + phyaddr |= iova & (PAGE_SIZE - 1); + } else { + phyaddr = dma_to_phys(ibdev->dma_device, iova); + } + + return phys_to_virt(phyaddr); +} + +struct ib_mad_list_head { + struct list_head list; + struct ib_cqe cqe; + struct ib_mad_queue *mad_queue; +}; + +#define IB_MAD_SEND_REQ_MAX_SG 2 +struct ib_mad_send_wr_private { + struct ib_mad_list_head mad_list; + struct list_head agent_list; + struct ib_mad_agent_private *mad_agent_priv; + struct ib_mad_send_buf send_buf; + u64 header_mapping; + u64 payload_mapping; + struct ib_ud_wr send_wr; + struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; + __be64 tid; + unsigned long timeout; + int max_retries; + int retries_left; + int retry; + int refcount; + enum ib_wc_status status; + + /* RMPP control */ + struct list_head rmpp_list; + struct ib_rmpp_segment *last_ack_seg; + struct ib_rmpp_segment *cur_seg; + int last_ack; + int seg_num; + int newwin; + int pad; +}; + +struct ib_mad_private_header { + struct ib_mad_list_head mad_list; + struct ib_mad_recv_wc recv_wc; + struct ib_wc wc; + u64 mapping; +} __packed; + +struct ib_mad_private { + struct ib_mad_private_header header; + size_t mad_size; + struct ib_grh grh; + u8 mad[]; +} __packed; + +static inline void *xsc_ib_send_mad_sg_virt_addr(struct ib_device *ibdev, + const struct ib_send_wr *wr, + int sg) +{ + struct ib_mad_send_wr_private *mad_send_wr; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, wr->sg_list[sg].addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, wr->sg_list[sg].addr); + + mad_list = container_of(wr->wr_cqe, struct ib_mad_list_head, cqe); + mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, + mad_list); + + /* sg_list[] */ + if (sg == 0) + return mad_send_wr->send_buf.mad; + + /* sg_list[1] */ + if (mad_send_wr->send_buf.seg_count) + return ib_get_rmpp_segment(&mad_send_wr->send_buf, + mad_send_wr->seg_num); + return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; +} + +static inline void *xsc_ib_recv_mad_sg_virt_addr(struct ib_device *ibdev, + struct ib_wc *wc, + u64 sg_addr) +{ + struct ib_mad_private_header *mad_priv_hdr; + struct ib_mad_private *recv; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, sg_addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, sg_addr); + + mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); + mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); + recv = container_of(mad_priv_hdr, struct ib_mad_private, header); + return &recv->grh; +} + +#endif /* XSC_IB_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib_compat.h b/drivers/infiniband/hw/xsc/xsc_ib_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..9d43cfd5d41c362a7cc4704c009ab8bd2a13c0f8 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_compat.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_COMPAT_H +#define XSC_IB_COMPAT_H + +/* + * adaptive to different ib_core versions + */ + +struct xsc_ib_ucontext; + +int xsc_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *ah_attr, + struct ib_udata *udata); +#define xsc_ib_create_ah_def() int xsc_ib_create_ah(\ + struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) + +int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags); +#define xsc_ib_destroy_ah_def() int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags) +int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); +int xsc_ib_create_cq(struct ib_cq *ibcq, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); + +// from main.c static functions +int xsc_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); +int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); + +int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#define xsc_ib_dealloc_pd_def() int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) + +int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +#define xsc_ib_destroy_cq_def() int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + +#define xsc_ib_destroy_qp_def() int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) +#define xsc_ib_create_cq_def() int xsc_ib_create_cq(struct ib_cq *ibcq,\ + const struct ib_cq_init_attr *attr, struct ib_udata *udata) +#define xsc_ib_dereg_mr_def() int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +#define xsc_ib_alloc_ucontext_def() int xsc_ib_alloc_ucontext(\ + struct ib_ucontext *uctx, struct ib_udata *udata) +#define xsc_ib_dealloc_ucontext_def() void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +#define xsc_ib_alloc_pd_def() int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) + +#define RET_VALUE(x) (x) + +#ifdef IB_ALLOC_MR_HAVE_UDATA +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata) +#else +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) +#endif + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f94f76394b2d74f7b34f4396f926da6e42699a10 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" +#include "xsc_ib.h" + +static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + struct pci_dev *pdev = dev->pdev; + + return sprintf(buf, "%x\n", pdev->subsystem_device); +} + +static DEVICE_ATTR_RO(hca_type); + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + u32 hw_ver = 0; + + hw_ver = ((dev->chip_ver_l & 0xffff) << 16) | + (dev->hotfix_num & 0xffff); + return sprintf(buf, "0x%x\n", hw_ver); +} + +static DEVICE_ATTR_RO(hw_rev); + +static struct device_attribute *xsc_ib_attributes[] = { + &dev_attr_hca_type, + &dev_attr_hw_rev, +}; + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int err = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) { + err = device_create_file(&ib_dev->dev, xsc_ib_attributes[i]); + if (err) + xsc_core_err(xdev, "Create sysfs file for %s failed.\n", + xsc_ib_attributes[i]->attr.name); + } +} + +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) + device_remove_file(&ib_dev->dev, xsc_ib_attributes[i]); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..dcf934b61e9bf48154f04a6958b5190519a0885d --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_ib.h" + +#define XSC_RDMA_CTRL_NAME "rdma_ctrl" + +static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_enable_np(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->alpha = __cpu_to_be32(cc_cmd->alpha); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_g(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->g = __cpu_to_be32(cc_cmd->g); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_ai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ai = __cpu_to_be32(cc_cmd->ai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_hai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->hai = __cpu_to_be32(cc_cmd->hai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_th(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->threshold = __cpu_to_be32(cc_cmd->threshold); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_bc(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bytecount = __cpu_to_be32(cc_cmd->bytecount); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_opcode(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *)data; + + cc_cmd->opcode = __cpu_to_be32(cc_cmd->opcode); +} + +static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_b = __cpu_to_be32(cc_cmd->bth_b); +} + +static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_f = __cpu_to_be32(cc_cmd->bth_f); +} + +static void encode_cc_cmd_cnp_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *)data; + + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->interval = __cpu_to_be32(cc_cmd->interval); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_rsttime *cc_cmd = + (struct xsc_cc_cmd_evt_rsttime *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->dscp = __cpu_to_be32(cc_cmd->dscp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->pcp = __cpu_to_be32(cc_cmd->pcp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_clamp_tgt_rate(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_clamp_tgt_rate *cc_cmd = (struct xsc_cc_cmd_clamp_tgt_rate *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->clamp_tgt_rate = __cpu_to_be32(cc_cmd->clamp_tgt_rate); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_max_hai_factor(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_max_hai_factor *cc_cmd = (struct xsc_cc_cmd_max_hai_factor *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->max_hai_factor = __cpu_to_be32(cc_cmd->max_hai_factor); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_scale(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_scale *cc_cmd = (struct xsc_cc_cmd_scale *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->scale = __cpu_to_be32(cc_cmd->scale); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_get_cfg(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_cfg(void *data) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __be16_to_cpu(cc_cmd->cmd); + cc_cmd->len = __be16_to_cpu(cc_cmd->len); + cc_cmd->enable_rp = __be32_to_cpu(cc_cmd->enable_rp); + cc_cmd->enable_np = __be32_to_cpu(cc_cmd->enable_np); + cc_cmd->init_alpha = __be32_to_cpu(cc_cmd->init_alpha); + cc_cmd->g = __be32_to_cpu(cc_cmd->g); + cc_cmd->ai = __be32_to_cpu(cc_cmd->ai); + cc_cmd->hai = __be32_to_cpu(cc_cmd->hai); + cc_cmd->threshold = __be32_to_cpu(cc_cmd->threshold); + cc_cmd->bytecount = __be32_to_cpu(cc_cmd->bytecount); + cc_cmd->opcode = __be32_to_cpu(cc_cmd->opcode); + cc_cmd->bth_b = __be32_to_cpu(cc_cmd->bth_b); + cc_cmd->bth_f = __be32_to_cpu(cc_cmd->bth_f); + cc_cmd->cnp_ecn = __be32_to_cpu(cc_cmd->cnp_ecn); + cc_cmd->data_ecn = __be32_to_cpu(cc_cmd->data_ecn); + cc_cmd->cnp_tx_interval = __be32_to_cpu(cc_cmd->cnp_tx_interval); + cc_cmd->evt_period_rsttime = __be32_to_cpu(cc_cmd->evt_period_rsttime); + cc_cmd->cnp_dscp = __be32_to_cpu(cc_cmd->cnp_dscp); + cc_cmd->cnp_pcp = __be32_to_cpu(cc_cmd->cnp_pcp); + cc_cmd->evt_period_alpha = __be32_to_cpu(cc_cmd->evt_period_alpha); + cc_cmd->clamp_tgt_rate = __be32_to_cpu(cc_cmd->clamp_tgt_rate); + cc_cmd->max_hai_factor = __be32_to_cpu(cc_cmd->max_hai_factor); + cc_cmd->scale = __be32_to_cpu(cc_cmd->scale); + cc_cmd->section = __be32_to_cpu(cc_cmd->section); +} + +static void encode_cc_get_stat(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_stat(void *data) +{ + struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *)data; + + cc_cmd->cnp_handled = __be32_to_cpu(cc_cmd->cnp_handled); + cc_cmd->alpha_recovery = __be32_to_cpu(cc_cmd->alpha_recovery); + cc_cmd->reset_timeout = __be32_to_cpu(cc_cmd->reset_timeout); + cc_cmd->reset_bytecount = __be32_to_cpu(cc_cmd->reset_bytecount); +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *resp = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->cm_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *resp = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->cm_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *req = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *req = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_dscp = req->dscp; + return 0; +} + +static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, u16 expect_resp_size, + void (*encode)(void *, u32), void (*decode)(void *)) +{ + struct xsc_cc_mbox_in *in; + struct xsc_cc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_cc_mbox_in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_cc_mbox_out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_PCP: + ret = xsc_priv_dev_ioctl_get_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_get_cma_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_PCP: + ret = xsc_priv_dev_ioctl_set_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_set_cma_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CMA_PCP: + case XSC_IOCTL_GET_CMA_DSCP: + case XSC_IOCTL_SET_CMA_PCP: + case XSC_IOCTL_SET_CMA_DSCP: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user(user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_rp), + 0, encode_cc_cmd_enable_rp, NULL); + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_np), + 0, encode_cc_cmd_enable_np, NULL); + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_init_alpha), + 0, encode_cc_cmd_init_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_G: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_g), + 0, encode_cc_cmd_g, NULL); + case XSC_CMD_OP_IOCTL_SET_AI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_ai), + 0, encode_cc_cmd_ai, NULL); + case XSC_CMD_OP_IOCTL_SET_HAI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_hai), + 0, encode_cc_cmd_hai, NULL); + case XSC_CMD_OP_IOCTL_SET_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_th), + 0, encode_cc_cmd_th, NULL); + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_bc), + 0, encode_cc_cmd_bc, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_opcode), + 0, encode_cc_cmd_cnp_opcode, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_b), + 0, encode_cc_cmd_cnp_bth_b, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_f), + 0, encode_cc_cmd_cnp_bth_f, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), + 0, encode_cc_cmd_cnp_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_data_ecn), + 0, encode_cc_cmd_data_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_tx_interval), + 0, encode_cc_cmd_cnp_tx_interval, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_rsttime), + 0, encode_cc_cmd_evt_rsttime, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_dscp), + 0, encode_cc_cmd_cnp_dscp, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_pcp), + 0, encode_cc_cmd_cnp_pcp, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_period_alpha), + 0, encode_cc_cmd_evt_period_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_clamp_tgt_rate), + 0, encode_cc_cmd_clamp_tgt_rate, NULL); + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_max_hai_factor), + 0, encode_cc_cmd_max_hai_factor, NULL); + case XSC_CMD_OP_IOCTL_SET_SCALE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_scale), + 0, encode_cc_cmd_scale, NULL); + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), + sizeof(struct xsc_cc_cmd_get_cfg), + encode_cc_get_cfg, decode_cc_get_cfg); + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), + sizeof(struct xsc_cc_cmd_stat), + encode_cc_get_stat, decode_cc_get_stat); + default: + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _rdma_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _rdma_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = _rdma_ctrl_ioctl_getinfo(xdev, user_hdr); + break; + default: + err = -EFAULT; + break; + } + + return err; +} + +static void _rdma_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_RDMA_CTRL_NAME); +} + +static int _rdma_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_RDMA_CTRL_NAME, _rdma_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_RDMA_CTRL_NAME); + + return ret; +} + +void xsc_rdma_ctrl_fini(void) +{ + _rdma_ctrl_reg_fini(); +} + +int xsc_rdma_ctrl_init(void) +{ + return _rdma_ctrl_reg_init(); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..5049377101f9a34c3132ad4b913cbd0b3fa8ec75 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RDMA_CTRL_H +#define XSC_RDMA_CTRL_H + +void xsc_rdma_ctrl_fini(void); +int xsc_rdma_ctrl_init(void); + +#endif diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d57c5adf932e3676ee9b790c582fd2f4ca8118fe..d44df703e96eefe282a6471defdfefb92a782679 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -7,6 +7,10 @@ config IOMMU_IOVA config IOMMU_API bool +config IOMMUFD_DRIVER + bool + default n + menuconfig IOMMU_SUPPORT bool "IOMMU Hardware Support" depends on MMU @@ -146,7 +150,7 @@ config OF_IOMMU # IOMMU-agnostic DMA-mapping layer config IOMMU_DMA - def_bool ARM64 || IA64 || X86 + def_bool ARM64 || IA64 || X86 || LOONGARCH select DMA_OPS select IOMMU_API select IOMMU_IOVA @@ -187,6 +191,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" +source "drivers/iommu/sw64/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" @@ -494,4 +499,17 @@ config SPRD_IOMMU Say Y here if you want to use the multimedia devices listed above. +# LOONGARCH IOMMU support +config LOONGARCH_IOMMU + tristate "LOONGARCH IOMMU support" + select IOMMU_API + select IOMMU_DEFAULT_PASSTHROUGH + depends on LOONGARCH + help + With this option you can enable support for LOONGARCH IOMMU hardware in + your system. An IOMMU is a hardware component which provides + remapping of DMA memory accesses from devices. With an LOONGARCH IOMMU you + can isolate the DMA memory of different devices and protect the + system from misbehaving device drivers or hardware. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce89810033064bfd3baa8420889bed..724a56c2976a3f99f56d6bf1e6df630c9a04ad4d 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ sw64/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o @@ -30,3 +30,4 @@ obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o obj-$(CONFIG_APPLE_DART) += apple-dart.o +obj-$(CONFIG_LOONGARCH_IOMMU) += loongarch_iommu.o diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 9b5fc3356bf2d8ac1ca9e3a8398200ae8ccd2940..8bd4c3b183ec6e475b58a1990d7b5c33ab141120 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -10,6 +10,7 @@ config AMD_IOMMU select IOMMU_API select IOMMU_IOVA select IOMMU_IO_PGTABLE + select IOMMUFD_DRIVER if IOMMUFD depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 7dc30c2b56b302d8bd3cd129f410610d536059e6..dec4e5c2b66b8236fcd6faeb8497fdc9b42dfe20 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -97,7 +97,9 @@ #define FEATURE_GATS_MASK (3ULL) #define FEATURE_GAM_VAPIC BIT_ULL(21) #define FEATURE_GIOSUP BIT_ULL(48) +#define FEATURE_HASUP BIT_ULL(49) #define FEATURE_EPHSUP BIT_ULL(50) +#define FEATURE_HDSUP BIT_ULL(52) #define FEATURE_SNP BIT_ULL(63) #define FEATURE_PASID_SHIFT 32 @@ -212,6 +214,7 @@ /* macros and definitions for device table entries */ #define DEV_ENTRY_VALID 0x00 #define DEV_ENTRY_TRANSLATION 0x01 +#define DEV_ENTRY_HAD 0x07 #define DEV_ENTRY_PPR 0x34 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e @@ -370,10 +373,16 @@ #define PTE_LEVEL_PAGE_SIZE(level) \ (1ULL << (12 + (9 * (level)))) +/* + * The IOPTE dirty bit + */ +#define IOMMU_PTE_HD_BIT (6) + /* * Bit value definition for I/O PTE fields */ #define IOMMU_PTE_PR BIT_ULL(0) +#define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT) #define IOMMU_PTE_U BIT_ULL(59) #define IOMMU_PTE_FC BIT_ULL(60) #define IOMMU_PTE_IR BIT_ULL(61) @@ -384,6 +393,7 @@ */ #define DTE_FLAG_V BIT_ULL(0) #define DTE_FLAG_TV BIT_ULL(1) +#define DTE_FLAG_HAD (3ULL << 7) #define DTE_FLAG_GIOV BIT_ULL(54) #define DTE_FLAG_GV BIT_ULL(55) #define DTE_GLX_SHIFT (56) @@ -413,6 +423,7 @@ #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) +#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD) #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) @@ -563,6 +574,7 @@ struct protection_domain { int nid; /* Node ID */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ + bool dirty_tracking; /* dirty tracking is enabled in the domain */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ef3fae113dd64302e33b04afd44c4cc9d995e2d3..f6c1f7e04d471d342d5ecc01067aeb49097dd08a 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3013,6 +3013,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -3038,7 +3041,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 2892aa1b4dc1db1771b9ebe5d418a14da9e5f456..6c0621f6f572a4c4c0fb72ea1bdb5abe9d504311 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -486,6 +486,73 @@ static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo return (__pte & ~offset_mask) | (iova & offset_mask); } +static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size, + unsigned long flags) +{ + bool test_only = flags & IOMMU_DIRTY_NO_CLEAR; + bool dirty = false; + int i, count; + + /* + * 2.2.3.2 Host Dirty Support + * When a non-default page size is used , software must OR the + * Dirty bits in all of the replicated host PTEs used to map + * the page. The IOMMU does not guarantee the Dirty bits are + * set in all of the replicated PTEs. Any portion of the page + * may have been written even if the Dirty bit is set in only + * one of the replicated PTEs. + */ + count = PAGE_SIZE_PTE_COUNT(size); + for (i = 0; i < count && test_only; i++) { + if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) { + dirty = true; + break; + } + } + + for (i = 0; i < count && !test_only; i++) { + if (test_and_clear_bit(IOMMU_PTE_HD_BIT, + (unsigned long *)&ptep[i])) { + dirty = true; + } + } + + return dirty; +} + +static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); + unsigned long end = iova + size - 1; + + do { + unsigned long pgsize = 0; + u64 *ptep, pte; + + ptep = fetch_pte(pgtable, iova, &pgsize); + if (ptep) + pte = READ_ONCE(*ptep); + if (!ptep || !IOMMU_PTE_PRESENT(pte)) { + pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0); + iova += pgsize; + continue; + } + + /* + * Mark the whole IOVA range as dirty even if only one of + * the replicated PTEs were marked dirty. + */ + if (pte_test_and_clear_dirty(ptep, pgsize, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + /* * ---------------------------------------------------- */ @@ -527,6 +594,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo pgtable->iop.ops.map_pages = iommu_v1_map_pages; pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; + pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; return &pgtable->iop; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 95bd7c25ba6f366b5db2582e8cb5318491cbb523..caad10f9cee3f903d38a30df988bd7fa78551655 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "amd_iommu.h" #include "../dma-iommu.h" @@ -65,6 +66,7 @@ LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); const struct iommu_ops amd_iommu_ops; +const struct iommu_dirty_ops amd_dirty_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; @@ -1610,6 +1612,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid, pte_root |= 1ULL << DEV_ENTRY_PPR; } + if (domain->dirty_tracking) + pte_root |= DTE_FLAG_HAD; + if (domain->flags & PD_IOMMUV2_MASK) { u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); u64 glx = domain->glx; @@ -2155,28 +2160,76 @@ static inline u64 dma_max_address(void) return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); } -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) +static bool amd_iommu_hd_support(struct amd_iommu *iommu) +{ + return iommu && (iommu->features & FEATURE_HDSUP); +} + +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, + struct device *dev, u32 flags) { + bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; + struct amd_iommu *iommu = NULL; + + if (dev) { + iommu = rlookup_amd_iommu(dev); + if (!iommu) + return ERR_PTR(-ENODEV); + } /* * Since DTE[Mode]=0 is prohibited on SNP-enabled system, * default to use IOMMU_DOMAIN_DMA[_FQ]. */ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) - return NULL; + return ERR_PTR(-EINVAL); + + if (dirty_tracking && !amd_iommu_hd_support(iommu)) + return ERR_PTR(-EOPNOTSUPP); domain = protection_domain_alloc(type); if (!domain) - return NULL; + return ERR_PTR(-ENOMEM); domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; + if (iommu) { + domain->domain.type = type; + domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; + domain->domain.ops = iommu->iommu.ops->default_domain_ops; + + if (dirty_tracking) + domain->domain.dirty_ops = &amd_dirty_ops; + } + return &domain->domain; } +static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) +{ + struct iommu_domain *domain; + + domain = do_iommu_domain_alloc(type, NULL, 0); + if (IS_ERR(domain)) + return NULL; + + return domain; +} + +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, + u32 flags) +{ + unsigned int type = IOMMU_DOMAIN_UNMANAGED; + + if (flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) + return ERR_PTR(-EOPNOTSUPP); + + return do_iommu_domain_alloc(type, dev, flags); +} + static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; @@ -2214,6 +2267,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, dev_data->defer_attach = false; + /* + * Restrict to devices with compatible IOMMU hardware support + * when enforcement of dirty tracking is enabled. + */ + if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) + return -EINVAL; + if (dev_data->domain) detach_device(dev); @@ -2332,6 +2392,11 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return true; case IOMMU_CAP_DEFERRED_FLUSH: return true; + case IOMMU_CAP_DIRTY_TRACKING: { + struct amd_iommu *iommu = rlookup_amd_iommu(dev); + + return amd_iommu_hd_support(iommu); + } default: break; } @@ -2339,6 +2404,73 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct dev_table_entry *dev_table; + struct iommu_dev_data *dev_data; + bool domain_flush = false; + struct amd_iommu *iommu; + unsigned long flags; + u64 pte_root; + + spin_lock_irqsave(&pdomain->lock, flags); + if (!(pdomain->dirty_tracking ^ enable)) { + spin_unlock_irqrestore(&pdomain->lock, flags); + return 0; + } + + list_for_each_entry(dev_data, &pdomain->dev_list, list) { + iommu = rlookup_amd_iommu(dev_data->dev); + if (!iommu) + continue; + + dev_table = get_dev_table(iommu); + pte_root = dev_table[dev_data->devid].data[0]; + + pte_root = (enable ? pte_root | DTE_FLAG_HAD : + pte_root & ~DTE_FLAG_HAD); + + /* Flush device DTE */ + dev_table[dev_data->devid].data[0] = pte_root; + device_flush_dte(dev_data); + domain_flush = true; + } + + /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ + if (domain_flush) { + amd_iommu_domain_flush_tlb_pde(pdomain); + amd_iommu_domain_flush_complete(pdomain); + } + pdomain->dirty_tracking = enable; + spin_unlock_irqrestore(&pdomain->lock, flags); + + return 0; +} + +static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + unsigned long lflags; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + spin_lock_irqsave(&pdomain->lock, lflags); + if (!pdomain->dirty_tracking && dirty->bitmap) { + spin_unlock_irqrestore(&pdomain->lock, lflags); + return -EINVAL; + } + spin_unlock_irqrestore(&pdomain->lock, lflags); + + return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); +} + static void amd_iommu_get_resv_regions(struct device *dev, struct list_head *head) { @@ -2461,9 +2593,15 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) return true; } +const struct iommu_dirty_ops amd_dirty_ops = { + .set_dirty_tracking = amd_iommu_set_dirty_tracking, + .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, +}; + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, + .domain_alloc_user = amd_iommu_domain_alloc_user, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 42c5012ba8aac1cc81382773436078ad2adc3549..818d4d2344dbc1fdd08692138c59dbce0683b77e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -35,6 +35,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include #include "arm-smmu.h" @@ -51,6 +55,7 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +#define SMR_MASK_SHIFT 16 static int force_stage; module_param(force_stage, int, S_IRUGO); @@ -1374,6 +1379,19 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) return ERR_PTR(-ENODEV); } +#ifdef CONFIG_ARCH_PHYTIUM +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + + for (i = 0; i < num; i++) { + u32 fwid = FWID_READ(fwspec->ids[i]); + + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); @@ -1469,7 +1487,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) mutex_unlock(&smmu->stream_map_mutex); return ERR_PTR(-EINVAL); } - +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_s2500()) + break; + if (typeof_ft2000plus() && !smmu->s2crs[idx].group) + continue; +#endif group = smmu->s2crs[idx].group; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2da969fc899004c173c8bb28caccf768c1fc7a8a..2eabbe24e8978a370e34e65e683e80650b0f9023 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -478,6 +478,25 @@ static int iova_reserve_pci_windows(struct pci_dev *dev, return 0; } +int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + + unsigned long lo, hi; + + lo = iova_pfn(iovad, start); + hi = iova_pfn(iovad, end); + + if (!cookie) + return -EINVAL; + + reserve_iova(iovad, lo, hi); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_reserve_domain_addr); + static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain) { @@ -607,7 +626,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } ret = 0; - goto done_unlock; + goto iova_reserve; } init_iova_domain(iovad, 1UL << order, base_pfn); @@ -620,6 +639,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain))) domain->type = IOMMU_DOMAIN_DMA; +iova_reserve: ret = iova_reserve_iommu_regions(dev, domain); done_unlock: @@ -1610,7 +1630,7 @@ static size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } -static const struct dma_map_ops iommu_dma_ops = { +static const struct dma_map_ops iommu_dmafops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, @@ -1653,7 +1673,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) if (iommu_is_dma_domain(domain)) { if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) goto out_err; - dev->dma_ops = &iommu_dma_ops; + dev->dma_ops = &iommu_dmafops; } return; diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 2e56bd79f589d30c2787e695b5c93750fcb480e0..f5348b80652b65bdc043c2a01168789c65a2e626 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -15,6 +15,7 @@ config INTEL_IOMMU select DMA_OPS select IOMMU_API select IOMMU_IOVA + select IOMMUFD_DRIVER if IOMMUFD select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 7a38e18b18196b156d65365336ee1152afc5423c..e14d496a9cbd8884b7443aeb18cc7e748eb4ea9a 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -767,6 +767,59 @@ static void __init dmar_acpi_insert_dev_scope(u8 device_number, device_number, dev_name(&adev->dev)); } +/* Return: > 0 if match found, 0 if no match found */ +bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, + void *start, void *end, + struct dmar_dev_scope *devices, + int devices_cnt) +{ + struct acpi_dmar_device_scope *scope; + struct device *tmp; + int i; + struct acpi_dmar_pci_path *path; + + for (; start < end; start += scope->length) { + scope = start; + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) + continue; + if (scope->enumeration_id != device_number) + continue; + path = (void *)(scope + 1); + pr_info("ACPI device \"%s\" under DMAR as %02x:%02x.%d\n", dev_name(&adev->dev), + scope->bus, path->device, path->function); + for_each_dev_scope(devices, devices_cnt, i, tmp) + if (tmp == NULL) { + devices[i].bus = scope->bus; + devices[i].devfn = PCI_DEVFN(path->device, path->function); + rcu_assign_pointer(devices[i].dev, get_device(&adev->dev)); + return true; + } + WARN_ON(i >= devices_cnt); + } + return false; +} + +static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev) +{ + struct dmar_drhd_unit *dmaru; + struct acpi_dmar_hardware_unit *drhd; + int ret; + + for_each_drhd_unit(dmaru) { + drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1), + ((void *)drhd)+drhd->header.length, + dmaru->devices, dmaru->devices_cnt); + if (ret) + break; + } + if (ret > 0) + ret = dmar_rmrr_add_acpi_dev(device_number, adev); + + return ret; +} + static int __init dmar_acpi_dev_scope_init(void) { struct acpi_dmar_andd *andd; @@ -794,7 +847,11 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - dmar_acpi_insert_dev_scope(andd->device_number, adev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + dmar_acpi_bus_add_dev(andd->device_number, adev); + else + dmar_acpi_insert_dev_scope(andd->device_number, adev); } } return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d6381c00bb8ddc64b0407d2239286eadc0bb0101..6f993e34bb94949b82cb41d417caa882c220aa22 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -300,6 +300,7 @@ static int iommu_skip_te_disable; #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; +static const struct iommu_dirty_ops intel_dirty_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -3479,6 +3480,24 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) return ret; } +int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + int ret; + struct dmar_rmrr_unit *rmrru; + struct acpi_dmar_reserved_memory *rmrr; + + list_for_each_entry(rmrru, &dmar_rmrr_units, list) { + rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + rmrru->devices, rmrru->devices_cnt); + if (ret) + break; + } + + return 0; +} + int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { int ret; @@ -3737,6 +3756,43 @@ static int __init platform_optin_force_iommu(void) return 1; } +static inline int acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, + struct device *dev) +{ + int ret; + + pr_info("rmrr andd dev:%s enter to %s\n", dev_name(dev), __func__); + ret = __acpi_rmrr_device_create_direct_mappings(domain, dev); + + return ret; +} + +static inline int acpi_rmrr_andd_probe(struct device *dev) +{ + struct intel_iommu *iommu = NULL; + struct pci_dev *pci_device = NULL; + u8 bus, devfn; + int ret = 0; + + ret = iommu_probe_device(dev); + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) { + pr_info("dpoint-- cannot get acpi device corresponding iommu\n"); + return -EINVAL; + } + + pci_device = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn); + if (!pci_device) { + pr_info("dpoint-- cannot get acpi devie corresponding pci_device\n"); + return -EINVAL; + } + ret = acpi_rmrr_device_create_direct_mappings(iommu_get_domain_for_dev(&pci_device->dev), + dev); + + return ret; +} + static int __init probe_acpi_namespace_devices(void) { struct dmar_drhd_unit *drhd; @@ -3759,6 +3815,10 @@ static int __init probe_acpi_namespace_devices(void) list_for_each_entry(pn, &adev->physical_node_list, node) { ret = iommu_probe_device(pn->dev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + ret = acpi_rmrr_andd_probe(dev); + if (ret) break; } @@ -4075,6 +4135,48 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } +static struct iommu_domain * +intel_iommu_domain_alloc_user(struct device *dev, u32 flags) +{ + struct iommu_domain *domain; + struct intel_iommu *iommu; + bool dirty_tracking; + + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + iommu = device_to_iommu(dev, NULL, NULL); + if (!iommu) + return ERR_PTR(-ENODEV); + + if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !nested_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + dirty_tracking = (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING); + if (dirty_tracking && !ssads_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + /* + * domain_alloc_user op needs to fully initialize a domain + * before return, so uses iommu_domain_alloc() here for + * simple. + */ + domain = iommu_domain_alloc(dev->bus); + if (!domain) + domain = ERR_PTR(-ENOMEM); + + if (!IS_ERR(domain) && dirty_tracking) { + if (to_dmar_domain(domain)->use_first_level) { + iommu_domain_free(domain); + return ERR_PTR(-EOPNOTSUPP); + } + domain->dirty_ops = &intel_dirty_ops; + } + + return domain; +} + static void intel_iommu_domain_free(struct iommu_domain *domain) { if (domain != &si_domain->domain && domain != &blocking_domain) @@ -4095,6 +4197,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; + if (domain->dirty_ops && !ssads_supported(iommu)) + return -EINVAL; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4350,6 +4455,8 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) return dmar_platform_optin(); case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: return ecap_sc_support(info->iommu->ecap); + case IOMMU_CAP_DIRTY_TRACKING: + return ssads_supported(info->iommu); default: return false; } @@ -4445,6 +4552,9 @@ static void intel_iommu_probe_finalize(struct device *dev) { set_dma_ops(dev, NULL); iommu_setup_dma_ops(dev, 0, U64_MAX); + + if (is_zhaoxin_kh40000) + kh40000_set_iommu_dma_ops(dev); } static void intel_iommu_get_resv_regions(struct device *device, @@ -4747,6 +4857,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; + if (domain->dirty_ops) + return -EINVAL; + if (context_copied(iommu, info->bus, info->devfn)) return -EBUSY; @@ -4805,10 +4918,88 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type) return vtd; } +static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct device_domain_info *info; + int ret; + + spin_lock(&dmar_domain->lock); + if (dmar_domain->dirty_tracking == enable) + goto out_unlock; + + list_for_each_entry(info, &dmar_domain->devices, link) { + ret = intel_pasid_setup_dirty_tracking(info->iommu, + info->domain, info->dev, + IOMMU_NO_PASID, enable); + if (ret) + goto err_unwind; + } + + dmar_domain->dirty_tracking = enable; +out_unlock: + spin_unlock(&dmar_domain->lock); + + return 0; + +err_unwind: + list_for_each_entry(info, &dmar_domain->devices, link) + intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain, + info->dev, IOMMU_NO_PASID, + dmar_domain->dirty_tracking); + spin_unlock(&dmar_domain->lock); + return ret; +} + +static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long end = iova + size - 1; + unsigned long pgsize; + + /* + * IOMMUFD core calls into a dirty tracking disabled domain without an + * IOVA bitmap set in order to clean dirty bits in all PTEs that might + * have occurred when we stopped dirty tracking. This ensures that we + * never inherit dirtied bits from a previous cycle. + */ + if (!dmar_domain->dirty_tracking && dirty->bitmap) + return -EINVAL; + + do { + struct dma_pte *pte; + int lvl = 0; + + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl, + GFP_ATOMIC); + pgsize = level_size(lvl) << VTD_PAGE_SHIFT; + if (!pte || !dma_pte_present(pte)) { + iova += pgsize; + continue; + } + + if (dma_sl_pte_test_and_clear_dirty(pte, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + +static const struct iommu_dirty_ops intel_dirty_ops = { + .set_dirty_tracking = intel_iommu_set_dirty_tracking, + .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, +}; + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, + .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index e6a3e70656166a081b0184f9f8c281d86d8f27b1..49ea164cb006f54a480c7204f13c05b3f48a7882 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -48,6 +48,9 @@ #define DMA_FL_PTE_DIRTY BIT_ULL(6) #define DMA_FL_PTE_XD BIT_ULL(63) +#define DMA_SL_PTE_DIRTY_BIT 9 +#define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) + #define ADDR_WIDTH_5LEVEL (57) #define ADDR_WIDTH_4LEVEL (48) @@ -539,6 +542,10 @@ enum { #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) +#define ssads_supported(iommu) (sm_supported(iommu) && \ + ecap_slads((iommu)->ecap)) +#define nested_supported(iommu) (sm_supported(iommu) && \ + ecap_nest((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; @@ -595,6 +602,7 @@ struct dmar_domain { u8 has_mappings:1; /* Has mappings configured through * iommu_map() interface. */ + u8 dirty_tracking:1; /* Dirty tracking is enabled */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -784,6 +792,16 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, + unsigned long flags) +{ + if (flags & IOMMU_DIRTY_NO_CLEAR) + return (pte->val & DMA_SL_PTE_DIRTY) != 0; + + return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, + (unsigned long *)&pte->val); +} + static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 8faa93cffac45d179d201c6bba776572d9b8dee6..06ea2dd5354215a28f7a3bb2eabbf0cc08bc7ace 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -277,6 +277,11 @@ static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) WRITE_ONCE(*ptr, (old & ~mask) | bits); } +static inline u64 pasid_get_bits(u64 *ptr) +{ + return READ_ONCE(*ptr); +} + /* * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode * PASID entry. @@ -335,6 +340,36 @@ static inline void pasid_set_fault_enable(struct pasid_entry *pe) pasid_set_bits(&pe->val[0], 1 << 1, 0); } +/* + * Enable second level A/D bits by setting the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_set_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9); +} + +/* + * Disable second level A/D bits by clearing the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_clear_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 0); +} + +/* + * Checks if second level A/D bits specifically the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry is set. + */ +static inline bool pasid_get_ssade(struct pasid_entry *pe) +{ + return pasid_get_bits(&pe->val[0]) & (1 << 9); +} + /* * Setup the WPE(Write Protect Enable) field (Bit 132) of a * scalable mode PASID entry. @@ -630,6 +665,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (domain->dirty_tracking) + pasid_set_ssade(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); @@ -639,6 +676,78 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return 0; } +/* + * Set up dirty tracking on a second only or nested translation type. + */ +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled) +{ + struct pasid_entry *pte; + u16 did, pgtt; + + spin_lock(&iommu->lock); + + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, "Failed to get pasid entry of PASID %d\n", pasid); + return -ENODEV; + } + + did = domain_id_iommu(domain, iommu); + pgtt = pasid_pte_get_pgtt(pte); + if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && + pgtt != PASID_ENTRY_PGTT_NESTED) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, + "Dirty tracking not supported on translation type %d\n", + pgtt); + return -EOPNOTSUPP; + } + + if (pasid_get_ssade(pte) == enabled) { + spin_unlock(&iommu->lock); + return 0; + } + + if (enabled) + pasid_set_ssade(pte); + else + pasid_clear_ssade(pte); + spin_unlock(&iommu->lock); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * From VT-d spec table 25 "Guidance to Software for Invalidations": + * + * - PASID-selective-within-Domain PASID-cache invalidation + * If (PGTT=SS or Nested) + * - Domain-selective IOTLB invalidation + * Else + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + + /* Device IOTLB doesn't need to be flushed in caching mode. */ + if (!cap_caching_mode(iommu->cap)) + devtlb_invalidation_with_pasid(iommu, dev, pasid); + + return 0; +} + /* * Set up the scalable mode pasid entry for passthrough translation type. */ diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 4e9e68c3c3888f6acd4c3ecff8ebc90f1db39955..958050b093aa24df0d8cebcb327b00371b64cb01 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -106,6 +106,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled); int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3f1029c0825e95cf7c1dd1122df472a4e8927167..1bb7a4a39d28ae790c3d9f47e6449f41c95e13cd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1103,7 +1103,8 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain, map_size = 0; } } - + if (apply_zhaoxin_dmar_acpi_a_behavior()) + iova_reserve_domain_addr(domain, start, end); } if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) @@ -1171,6 +1172,16 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, return ERR_PTR(ret); } +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev) +{ + int ret; + + ret = iommu_create_device_direct_mappings(domain, dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__acpi_rmrr_device_create_direct_mappings); + /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 8aeba81800c512dc9e9eb1c32b3240080b503db1..34b446146961c29e7b24dc5cc890a5aa557a6ce8 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -11,3 +11,4 @@ iommufd-y := \ iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o +obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index ce78c3671539c77d27059d3aa11c7367f493eeff..2a41fd2b6ef8e13b0c39e512dd484ec5c9ebab21 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -540,7 +540,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, } hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, - immediate_attach); + 0, immediate_attach); if (IS_ERR(hwpt)) { destroy_hwpt = ERR_CAST(hwpt); goto out_unlock; @@ -1185,6 +1185,10 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) */ cmd->data_len = data_len; + cmd->out_capabilities = 0; + if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) + cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_free: kfree(data); diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index cf2c1504e20d843a6c00741f52c4fa6f66b01a90..72a5269984b0183ba262421486a09c0ccebf1077 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -5,6 +5,7 @@ #include #include +#include "../iommu-priv.h" #include "iommufd_private.h" void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) @@ -60,6 +61,7 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) * @ictx: iommufd context * @ioas: IOAS to associate the domain with * @idev: Device to get an iommu_domain for + * @flags: Flags from userspace * @immediate_attach: True if idev should be attached to the hwpt * * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT @@ -72,13 +74,18 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) */ struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach) + struct iommufd_device *idev, u32 flags, + bool immediate_attach) { + const struct iommu_ops *ops = dev_iommu_ops(idev->dev); struct iommufd_hw_pagetable *hwpt; int rc; lockdep_assert_held(&ioas->mutex); + if (flags && !ops->domain_alloc_user) + return ERR_PTR(-EOPNOTSUPP); + hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); if (IS_ERR(hwpt)) return hwpt; @@ -88,10 +95,19 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, refcount_inc(&ioas->obj.users); hwpt->ioas = ioas; - hwpt->domain = iommu_domain_alloc(idev->dev->bus); - if (!hwpt->domain) { - rc = -ENOMEM; - goto out_abort; + if (ops->domain_alloc_user) { + hwpt->domain = ops->domain_alloc_user(idev->dev, flags); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + } else { + hwpt->domain = iommu_domain_alloc(idev->dev->bus); + if (!hwpt->domain) { + rc = -ENOMEM; + goto out_abort; + } } /* @@ -141,7 +157,9 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) struct iommufd_ioas *ioas; int rc; - if (cmd->flags || cmd->__reserved) + if ((cmd->flags & ~(IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_ALLOC_DIRTY_TRACKING)) || + cmd->__reserved) return -EOPNOTSUPP; idev = iommufd_get_device(ucmd, cmd->dev_id); @@ -155,7 +173,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) } mutex_lock(&ioas->mutex); - hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false); + hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, + idev, cmd->flags, false); if (IS_ERR(hwpt)) { rc = PTR_ERR(hwpt); goto out_unlock; @@ -177,3 +196,50 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) iommufd_put_object(&idev->obj); return rc; } + +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + bool enable; + + if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) + return rc; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; + + rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt->domain, enable); + + iommufd_put_object(&hwpt->obj); + return rc; +} + +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + + if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || + cmd->__reserved) + return -EOPNOTSUPP; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + rc = iopt_read_and_clear_dirty_data(&ioas->iopt, hwpt->domain, + cmd->flags, cmd); + + iommufd_put_object(&hwpt->obj); + return rc; +} diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index e76b22939994812f0a05b32d9670a2bf39eb2721..9f193c933de6ef6ee58ef6e2dbb01c98c2d82a8d 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "io_pagetable.h" #include "double_span.h" @@ -432,6 +433,177 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, return 0; } +struct iova_bitmap_fn_arg { + unsigned long flags; + struct io_pagetable *iopt; + struct iommu_domain *domain; + struct iommu_dirty_bitmap *dirty; +}; + +static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap, + unsigned long iova, size_t length, + void *opaque) +{ + struct iopt_area *area; + struct iopt_area_contig_iter iter; + struct iova_bitmap_fn_arg *arg = opaque; + struct iommu_domain *domain = arg->domain; + struct iommu_dirty_bitmap *dirty = arg->dirty; + const struct iommu_dirty_ops *ops = domain->dirty_ops; + unsigned long last_iova = iova + length - 1; + unsigned long flags = arg->flags; + int ret; + + iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + + ret = ops->read_and_clear_dirty(domain, iter.cur_iova, + last - iter.cur_iova + 1, flags, + dirty); + if (ret) + return ret; + } + + if (!iopt_area_contig_done(&iter)) + return -EINVAL; + return 0; +} + +static int +iommu_read_and_clear_dirty(struct iommu_domain *domain, + struct io_pagetable *iopt, unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iova_bitmap_fn_arg arg; + struct iova_bitmap *iter; + int ret = 0; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + iter = iova_bitmap_alloc(bitmap->iova, bitmap->length, + bitmap->page_size, + u64_to_user_ptr(bitmap->data)); + if (IS_ERR(iter)) + return -ENOMEM; + + iommu_dirty_bitmap_init(&dirty, iter, &gather); + + arg.flags = flags; + arg.iopt = iopt; + arg.domain = domain; + arg.dirty = &dirty; + iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty); + + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) + iommu_iotlb_sync(domain, &gather); + + iova_bitmap_free(iter); + + return ret; +} + +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + size_t iommu_pgsize = iopt->iova_alignment; + u64 last_iova; + + if (check_add_overflow(bitmap->iova, bitmap->length - 1, &last_iova)) + return -EOVERFLOW; + + if (bitmap->iova > ULONG_MAX || last_iova > ULONG_MAX) + return -EOVERFLOW; + + if ((bitmap->iova & (iommu_pgsize - 1)) || + ((last_iova + 1) & (iommu_pgsize - 1))) + return -EINVAL; + + if (!bitmap->page_size) + return -EINVAL; + + if ((bitmap->iova & (bitmap->page_size - 1)) || + ((last_iova + 1) & (bitmap->page_size - 1))) + return -EINVAL; + + return 0; +} + +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + int ret; + + ret = iommufd_check_iova_range(iopt, bitmap); + if (ret) + return ret; + + down_read(&iopt->iova_rwsem); + ret = iommu_read_and_clear_dirty(domain, iopt, flags, bitmap); + up_read(&iopt->iova_rwsem); + + return ret; +} + +static int iopt_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iopt_area *area; + int ret = 0; + + lockdep_assert_held_read(&iopt->iova_rwsem); + + iommu_dirty_bitmap_init(&dirty, NULL, &gather); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + if (!area->pages) + continue; + + ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area), + iopt_area_length(area), 0, + &dirty); + if (ret) + break; + } + + iommu_iotlb_sync(domain, &gather); + return ret; +} + +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + int ret = 0; + + if (!ops) + return -EOPNOTSUPP; + + down_read(&iopt->iova_rwsem); + + /* Clear dirty bits from PTEs to ensure a clean snapshot */ + if (enable) { + ret = iopt_clear_dirty_data(iopt, domain); + if (ret) + goto out_unlock; + } + + ret = ops->set_dirty_tracking(domain, enable); + +out_unlock: + up_read(&iopt->iova_rwsem); + return ret; +} + int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, unsigned long length, struct list_head *pages_list) { diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 2c58670011fe979b6da6687a9904408bba5f8a9a..034129130db3757ef58ee8a789747dcaa3586d7e 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include struct iommu_domain; struct iommu_group; @@ -70,6 +73,13 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, unsigned long length, unsigned long *unmapped); int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap); +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable); + void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, unsigned long length); int iopt_table_add_domain(struct io_pagetable *iopt, @@ -222,6 +232,8 @@ int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx); int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap); /* * A HW pagetable is called an iommu_domain inside the kernel. This user object @@ -240,9 +252,20 @@ struct iommufd_hw_pagetable { struct list_head hwpt_item; }; +static inline struct iommufd_hw_pagetable * +iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_HW_PAGETABLE), + struct iommufd_hw_pagetable, obj); +} +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd); +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd); + struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach); + struct iommufd_device *idev, u32 flags, + bool immediate_attach); int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt); int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 3f3644375bf13c8fa78600f1f9e15d893195af65..1f2e93d3d4e87f758f0eb67e93343bf86f813f16 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -19,6 +19,8 @@ enum { IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE, IOMMU_TEST_OP_ACCESS_REPLACE_IOAS, + IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + IOMMU_TEST_OP_DIRTY, }; enum { @@ -40,6 +42,10 @@ enum { MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES = 1 << 0, }; +enum { + MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, +}; + struct iommu_test_cmd { __u32 size; __u32 op; @@ -56,6 +62,13 @@ struct iommu_test_cmd { /* out_idev_id is the standard iommufd_bind object */ __u32 out_idev_id; } mock_domain; + struct { + __u32 out_stdev_id; + __u32 out_hwpt_id; + __u32 out_idev_id; + /* Expand mock_domain to set mock device flags */ + __u32 dev_flags; + } mock_domain_flags; struct { __u32 pt_id; } mock_domain_replace; @@ -95,6 +108,14 @@ struct iommu_test_cmd { struct { __u32 ioas_id; } access_replace_ioas; + struct { + __u32 flags; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 uptr; + __aligned_u64 out_nr_dirty; + } dirty; }; __u32 last; }; diff --git a/drivers/vfio/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c similarity index 98% rename from drivers/vfio/iova_bitmap.c rename to drivers/iommu/iommufd/iova_bitmap.c index 7af5b204990bb52d6747506cac55b5414cde748c..a365e18128da52240b4ef649b9d6c607a060a2af 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -269,6 +269,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, iova_bitmap_free(bitmap); return ERR_PTR(rc); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_alloc, IOMMUFD); /** * iova_bitmap_free() - Frees an IOVA bitmap object @@ -290,6 +291,7 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) kfree(bitmap); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_free, IOMMUFD); /* * Returns the remaining bitmap indexes from mapped_total_index to process for @@ -388,6 +390,7 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, return ret; } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_for_each, IOMMUFD); /** * iova_bitmap_set() - Records an IOVA range in bitmap @@ -425,4 +428,4 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, cur_bit += nbits; } while (cur_bit <= last_bit); } -EXPORT_SYMBOL_GPL(iova_bitmap_set); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index e71523cbd0de4352479aadeb8f33dd6d2ba87df8..d50f42a730aa36c63b408be1174d8f553880467b 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -307,6 +307,8 @@ union ucmd_buffer { struct iommu_destroy destroy; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; + struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; + struct iommu_hwpt_set_dirty_tracking set_dirty_tracking; struct iommu_ioas_alloc alloc; struct iommu_ioas_allow_iovas allow_iovas; struct iommu_ioas_copy ioas_copy; @@ -342,6 +344,10 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, __reserved), + IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap, + struct iommu_hwpt_get_dirty_bitmap, data), + IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking, + struct iommu_hwpt_set_dirty_tracking, __reserved), IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, struct iommu_ioas_alloc, out_ioas_id), IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, @@ -552,5 +558,6 @@ MODULE_ALIAS_MISCDEV(VFIO_MINOR); MODULE_ALIAS("devname:vfio/vfio"); #endif MODULE_IMPORT_NS(IOMMUFD_INTERNAL); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 00b794d74e03be7a6641cad3741f21d746b54bee..22c60651d60fd25a68cf7d0a8302d20f68087c03 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -24,6 +24,7 @@ static struct platform_device *selftest_iommu_dev; size_t iommufd_test_memory_limit = 65536; enum { + MOCK_DIRTY_TRACK = 1, MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, /* @@ -36,6 +37,7 @@ enum { _MOCK_PFN_START = MOCK_PFN_MASK + 1, MOCK_PFN_START_IOVA = _MOCK_PFN_START, MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, + MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, }; /* @@ -101,6 +103,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, } struct mock_iommu_domain { + unsigned long flags; struct iommu_domain domain; struct xarray pfns; }; @@ -111,6 +114,7 @@ enum selftest_obj_type { struct mock_dev { struct device dev; + unsigned long flags; }; struct selftest_obj { @@ -133,6 +137,11 @@ static void mock_domain_blocking_free(struct iommu_domain *domain) static int mock_domain_nop_attach(struct iommu_domain *domain, struct device *dev) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return -EINVAL; + return 0; } @@ -161,6 +170,69 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) return info; } +static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long flags = mock->flags; + + if (enable && !domain->dirty_ops) + return -EINVAL; + + /* No change? */ + if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) + return 0; + + flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); + + mock->flags = flags; + return 0; +} + +static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long i, max = size / MOCK_IO_PAGE_SIZE; + void *ent, *old; + + if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) + return -EINVAL; + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE; + + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); + if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { + /* Clear dirty */ + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { + unsigned long val; + + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, + cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + } + iommu_dirty_bitmap_record(dirty, cur, + MOCK_IO_PAGE_SIZE); + } + } + + return 0; +} + +const struct iommu_dirty_ops dirty_ops = { + .set_dirty_tracking = mock_domain_set_dirty_tracking, + .read_and_clear_dirty = mock_domain_read_and_clear_dirty, +}; + +static const struct iommu_ops mock_ops; + static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) { struct mock_iommu_domain *mock; @@ -177,10 +249,34 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) mock->domain.geometry.aperture_start = MOCK_APERTURE_START; mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + mock->domain.ops = mock_ops.default_domain_ops; + mock->domain.type = iommu_domain_type; xa_init(&mock->pfns); return &mock->domain; } +static struct iommu_domain * +mock_domain_alloc_user(struct device *dev, u32 flags) +{ + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct iommu_domain *domain; + + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && + (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EOPNOTSUPP); + + domain = mock_domain_alloc(IOMMU_DOMAIN_UNMANAGED); + if (domain && !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + domain->dirty_ops = &dirty_ops; + if (!domain) + domain = ERR_PTR(-ENOMEM); + return domain; +} + static void mock_domain_free(struct iommu_domain *domain) { struct mock_iommu_domain *mock = @@ -258,7 +354,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); - WARN_ON(!ent); + /* * iommufd generates unmaps that must be a strict * superset of the map's performend So every starting @@ -268,13 +364,13 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, * passed to map_pages */ if (first) { - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_START_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); first = false; } if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_LAST_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); iova += MOCK_IO_PAGE_SIZE; ret += MOCK_IO_PAGE_SIZE; @@ -298,7 +394,18 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) { - return cap == IOMMU_CAP_CACHE_COHERENCY; + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_DIRTY_TRACKING: + return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); + default: + break; + } + + return false; } static void mock_domain_set_plaform_dma_ops(struct device *dev) @@ -322,6 +429,7 @@ static const struct iommu_ops mock_ops = { .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc = mock_domain_alloc, + .domain_alloc_user = mock_domain_alloc_user, .capable = mock_domain_capable, .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, .device_group = generic_device_group, @@ -377,16 +485,20 @@ static void mock_dev_release(struct device *dev) kfree(mdev); } -static struct mock_dev *mock_dev_create(void) +static struct mock_dev *mock_dev_create(unsigned long dev_flags) { struct mock_dev *mdev; int rc; + if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EINVAL); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return ERR_PTR(-ENOMEM); device_initialize(&mdev->dev); + mdev->flags = dev_flags; mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; @@ -422,6 +534,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, struct iommufd_device *idev; struct selftest_obj *sobj; u32 pt_id = cmd->id; + u32 dev_flags = 0; u32 idev_id; int rc; @@ -432,7 +545,10 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, sobj->idev.ictx = ucmd->ictx; sobj->type = TYPE_IDEV; - sobj->idev.mock_dev = mock_dev_create(); + if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) + dev_flags = cmd->mock_domain_flags.dev_flags; + + sobj->idev.mock_dev = mock_dev_create(dev_flags); if (IS_ERR(sobj->idev.mock_dev)) { rc = PTR_ERR(sobj->idev.mock_dev); goto out_sobj; @@ -992,6 +1108,73 @@ static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == __IOMMUFD_ACCESS_RW_SLOW_PATH); +static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, + unsigned long iova, size_t length, + unsigned long page_size, void __user *uptr, + u32 flags) +{ + unsigned long bitmap_size, i, max; + struct iommu_test_cmd *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct mock_iommu_domain *mock; + int rc, count = 0; + void *tmp; + + if (!page_size || !length || iova % page_size || length % page_size || + !uptr) + return -EINVAL; + + hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + if (!(mock->flags & MOCK_DIRTY_TRACK)) { + rc = -EINVAL; + goto out_put; + } + + max = length / page_size; + bitmap_size = max / BITS_PER_BYTE; + + tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + if (!tmp) { + rc = -ENOMEM; + goto out_put; + } + + if (copy_from_user(tmp, uptr, bitmap_size)) { + rc = -EFAULT; + goto out_free; + } + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * page_size; + void *ent, *old; + + if (!test_bit(i, (unsigned long *)tmp)) + continue; + + ent = xa_load(&mock->pfns, cur / page_size); + if (ent) { + unsigned long val; + + val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / page_size, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + count++; + } + } + + cmd->dirty.out_nr_dirty = count; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_free: + kvfree(tmp); +out_put: + iommufd_put_object(&hwpt->obj); + return rc; +} + void iommufd_selftest_destroy(struct iommufd_object *obj) { struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); @@ -1015,6 +1198,7 @@ int iommufd_test(struct iommufd_ucmd *ucmd) cmd->add_reserved.start, cmd->add_reserved.length); case IOMMU_TEST_OP_MOCK_DOMAIN: + case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: return iommufd_test_mock_domain(ucmd, cmd); case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: return iommufd_test_mock_domain_replace( @@ -1056,6 +1240,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd) return -EINVAL; iommufd_test_memory_limit = cmd->memory_limit.limit; return 0; + case IOMMU_TEST_OP_DIRTY: + return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, + cmd->dirty.length, + cmd->dirty.page_size, + u64_to_user_ptr(cmd->dirty.uptr), + cmd->dirty.flags); default: return -EOPNOTSUPP; } diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c new file mode 100644 index 0000000000000000000000000000000000000000..490d68f4a91310bf118d0e0e37006b0c2efd1ef4 --- /dev/null +++ b/drivers/iommu/loongarch_iommu.c @@ -0,0 +1,1834 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson IOMMU Driver + * + * Copyright (C) 2024 Loongson Technology Ltd. + * Author: Lv Chen + * Wang Yang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "loongarch_iommu.h" + +MODULE_LICENSE("GPL"); + +#define LOOP_TIMEOUT 100000 + +#define IVRS_HEADER_LENGTH 48 +#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 +#define IVHD_DEV_ALL 0x01 +#define IVHD_DEV_SELECT 0x02 +#define IVHD_DEV_SELECT_RANGE_START 0x03 +#define IVHD_DEV_RANGE_END 0x04 +#define IVHD_DEV_ALIAS 0x42 +#define IVHD_DEV_EXT_SELECT 0x46 +#define IVHD_DEV_ACPI_HID 0xf0 + +#define IVHD_HEAD_TYPE10 0x10 +#define IVHD_HEAD_TYPE11 0x11 +#define IVHD_HEAD_TYPE40 0x40 + +#define MAX_BDF_NUM 0xffff + +#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) + +/* + * structure describing one IOMMU in the ACPI table. Typically followed by one + * or more ivhd_entrys. + */ +struct ivhd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 cap_ptr; + u64 mmio_phys; + u16 pci_seg; + u16 info; + u32 efr_attr; + + /* Following only valid on IVHD type 11h and 40h */ + u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ + u64 res; +} __packed; + +/* + * A device entry describing which devices a specific IOMMU translates and + * which requestor ids they use. + */ +struct ivhd_entry { + u8 type; + u16 devid; + u8 flags; + u32 ext; + u32 hidh; + u64 cid; + u8 uidf; + u8 uidl; + u8 uid; +} __packed; + +struct iommu_callback_data { + const struct iommu_ops *ops; +}; + +LIST_HEAD(la_rlookup_iommu_list); +LIST_HEAD(la_iommu_list); /* list of all loongarch + * IOMMUs in the system + */ + +static u32 rlookup_table_size; /* size if the rlookup table */ +static int la_iommu_target_ivhd_type; +u16 la_iommu_last_bdf; /* largest PCI device id + * we have to handle + */ + +int loongarch_iommu_disable; + +#define iommu_write_regl(iommu, off, val) \ + writel(val, iommu->confbase + off) +#define iommu_read_regl(iommu, off) readl(iommu->confbase + off) + +static void switch_huge_to_page(unsigned long *ptep, unsigned long start); + +static void iommu_translate_disable(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Disable */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val &= ~(1 << 31); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Write cmd */ + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); +} + +static void iommu_translate_enable(struct loongarch_iommu *iommu) +{ + u32 val = 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Enable use mem */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (1 << 29); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Enable */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (1 << 31); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Write cmd */ + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); +} + +static bool la_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + default: + return false; + } +} + +static struct dom_info *to_dom_info(struct iommu_domain *dom) +{ + return container_of(dom, struct dom_info, domain); +} + +static void flush_iotlb_by_domain_id(struct loongarch_iommu *iommu, u16 domain_id, bool read) +{ + u32 val; + u32 flush_read_tlb = read ? 1 : 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + val = iommu_read_regl(iommu, LA_IOMMU_EIVDB); + val &= ~0xf0000; + val |= ((u32)domain_id) << 16; + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + /* Flush all */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x10f; + val |= (flush_read_tlb << 8) | 4; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + +static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) +{ + u32 val; + + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + return val & IOMMU_PGTABLE_BUSY; +} + +static int iommu_flush_iotlb_by_domain(struct la_iommu_dev_data *dev_data) +{ + u32 retry = 0; + struct loongarch_iommu *iommu; + u16 domain_id; + + if (dev_data == NULL) { + pr_err("%s dev_data is NULL", __func__); + return 0; + } + + if (dev_data->iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + if (dev_data->iommu_entry == NULL) { + pr_err("%s iommu_entry is NULL", __func__); + return 0; + } + + iommu = dev_data->iommu; + domain_id = dev_data->iommu_entry->id; + + flush_iotlb_by_domain_id(iommu, domain_id, 0); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: %s %d iotlb flush busy\n", __func__, __LINE__); + return -EIO; + } + retry++; + udelay(1); + } + + flush_iotlb_by_domain_id(iommu, domain_id, 1); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: %s %d iotlb flush busy\n", __func__, __LINE__); + return -EIO; + } + retry++; + udelay(1); + } + iommu_translate_enable(iommu); + return 0; +} + +static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) +{ + u32 val = 0; + int index; + unsigned short bdf; + struct loongarch_iommu *iommu; + u16 domain_id; + + if (dev_data == NULL) { + pr_err("%s dev_data is NULL", __func__); + return 0; + } + + if (dev_data->iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + if (dev_data->iommu_entry == NULL) { + pr_err("%s iommu_entry is NULL", __func__); + return 0; + } + + iommu = dev_data->iommu; + domain_id = dev_data->iommu_entry->id; + bdf = dev_data->bdf; + + /* Set device table */ + if (flag) { + index = find_first_zero_bit(iommu->devtable_bitmap, + MAX_ATTACHED_DEV_ID); + if (index < MAX_ATTACHED_DEV_ID) { + __set_bit(index, iommu->devtable_bitmap); + dev_data->index = index; + } else { + pr_err("%s get id from dev table failed\n", __func__); + return 0; + } + + pr_info("%s bdf %x domain_id %d iommu devid %x iommu segment %d flag %x\n", + __func__, bdf, domain_id, iommu->devid, + iommu->segment, flag); + + val = bdf & 0xffff; + val |= ((domain_id & 0xf) << 16); /* domain id */ + val |= ((index & 0xf) << 24); /* index */ + val |= (0x1 << 20); /* valid */ + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + val = (0x1 << 31) | (0xf << 0); + val |= (0x1 << 29); /* 1: use main memory */ + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); + } else { + /* Flush device table */ + index = dev_data->index; + pr_info("%s bdf %x domain_id %d iommu devid %x iommu segment %d flag %x\n", + __func__, bdf, domain_id, iommu->devid, + iommu->segment, flag); + + val = iommu_read_regl(iommu, LA_IOMMU_EIVDB); + val &= ~(0xffffffff); + val |= ((index & 0xf) << 24); /* index */ + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (0x1 << 29); /* 1: use main memory */ + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + if (index < MAX_ATTACHED_DEV_ID) + __clear_bit(index, iommu->devtable_bitmap); + } + + iommu_flush_iotlb_by_domain(dev_data); + return 0; +} + +static void flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Flush all tlb */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x1f; + val |= 0x5; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + +static int iommu_flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 retry = 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + flush_iotlb(iommu); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: iotlb flush busy\n"); + return -EIO; + } + retry++; + udelay(1); + } + iommu_translate_enable(iommu); + return 0; +} + +static void la_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct dom_info *priv = to_dom_info(domain); + struct iommu_info *info; + + spin_lock(&priv->lock); + list_for_each_entry(info, &priv->iommu_devlist, list) + iommu_flush_iotlb(info->iommu); + spin_unlock(&priv->lock); +} + +static void do_attach(struct iommu_info *info, struct la_iommu_dev_data *dev_data) +{ + if (dev_data->count) + return; + + dev_data->count++; + dev_data->iommu_entry = info; + + spin_lock(&info->devlock); + list_add(&dev_data->list, &info->dev_list); + info->dev_cnt += 1; + spin_unlock(&info->devlock); + + update_dev_table(dev_data, 1); +} + +static void do_detach(struct la_iommu_dev_data *dev_data) +{ + struct iommu_info *info; + + if (!dev_data || !dev_data->iommu_entry || (dev_data->count == 0)) { + pr_err("%s dev_data or iommu_entry is NULL", __func__); + return; + } + dev_data->count--; + info = dev_data->iommu_entry; + list_del(&dev_data->list); + info->dev_cnt -= 1; + update_dev_table(dev_data, 0); + dev_data->iommu_entry = NULL; +} + +static void detach_all_dev_by_domain(struct iommu_info *info) +{ + struct la_iommu_dev_data *dev_data = NULL; + + spin_lock(&info->devlock); + while (!list_empty(&info->dev_list)) { + dev_data = list_first_entry(&info->dev_list, + struct la_iommu_dev_data, list); + do_detach(dev_data); + } + spin_unlock(&info->devlock); +} + +static int domain_id_alloc(struct loongarch_iommu *iommu) +{ + int id = -1; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return id; + } + spin_lock(&iommu->domain_bitmap_lock); + id = find_first_zero_bit(iommu->domain_bitmap, MAX_DOMAIN_ID); + if (id < MAX_DOMAIN_ID) + __set_bit(id, iommu->domain_bitmap); + spin_unlock(&iommu->domain_bitmap_lock); + if (id >= MAX_DOMAIN_ID) { + id = -1; + pr_err("LA-IOMMU: Alloc domain id over max domain id\n"); + } + return id; +} + +static void domain_id_free(struct loongarch_iommu *iommu, int id) +{ + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + if ((id >= 0) && (id < MAX_DOMAIN_ID)) { + spin_lock(&iommu->domain_bitmap_lock); + __clear_bit(id, iommu->domain_bitmap); + spin_unlock(&iommu->domain_bitmap_lock); + } +} + +/* + * Check whether the system has a priv. + * If yes, it returns 1 and if not, it returns 0 + */ +static int has_dom(struct loongarch_iommu *iommu) +{ + int ret = 0; + + spin_lock(&iommu->dom_info_lock); + while (!list_empty(&iommu->dom_list)) { + ret = 1; + break; + } + spin_unlock(&iommu->dom_info_lock); + return ret; +} + +/* + * This function adds a private domain to the global domain list + */ +static struct dom_entry *find_domain_in_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry, *found = NULL; + + if (priv == NULL) + return found; + spin_lock(&iommu->dom_info_lock); + list_for_each_entry(entry, &iommu->dom_list, list) { + if (entry->domain_info == priv) { + found = entry; + break; + } + } + spin_unlock(&iommu->dom_info_lock); + return found; +} + +static void add_domain_to_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry; + + if (priv == NULL) + return; + entry = find_domain_in_list(iommu, priv); + if (entry != NULL) + return; + entry = kzalloc(sizeof(struct dom_entry), GFP_KERNEL); + entry->domain_info = priv; + spin_lock(&iommu->dom_info_lock); + list_add(&entry->list, &iommu->dom_list); + spin_unlock(&iommu->dom_info_lock); +} + +static void del_domain_from_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry; + + entry = find_domain_in_list(iommu, priv); + if (entry == NULL) + return; + spin_lock(&iommu->dom_info_lock); + list_del(&entry->list); + spin_unlock(&iommu->dom_info_lock); + kfree(entry); +} + +static void free_pagetable(void *pt_base, int level) +{ + int i; + unsigned long *ptep, *pgtable; + + ptep = pt_base; + if (level == IOMMU_PT_LEVEL1) { + kfree(pt_base); + return; + } + for (i = 0; i < IOMMU_PTRS_PER_LEVEL; i++, ptep++) { + if (!iommu_pte_present(ptep)) + continue; + + if (((level - 1) == IOMMU_PT_LEVEL1) && iommu_pte_huge(ptep)) { + *ptep = 0; + continue; + } + + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + free_pagetable(pgtable, level - 1); + } + kfree(pt_base); +} + +static void iommu_free_pagetable(struct dom_info *info) +{ + free_pagetable(info->pgd, IOMMU_LEVEL_MAX); + info->pgd = NULL; +} + +static struct dom_info *alloc_dom_info(void) +{ + struct dom_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return NULL; + + info->pgd = kzalloc(IOMMU_PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (info->pgd == NULL) { + kfree(info); + return NULL; + } + INIT_LIST_HEAD(&info->iommu_devlist); + spin_lock_init(&info->lock); + mutex_init(&info->ptl_lock); + info->domain.geometry.aperture_start = 0; + info->domain.geometry.aperture_end = ~0ULL; + info->domain.geometry.force_aperture = true; + + return info; +} + +static void dom_info_free(struct dom_info *info) +{ + if (info->pgd != NULL) { + kfree(info->pgd); + info->pgd = NULL; + } + kfree(info); +} + +static struct iommu_domain *la_iommu_domain_alloc(unsigned int type) +{ + struct dom_info *info; + + switch (type) { + case IOMMU_DOMAIN_BLOCKED: + case IOMMU_DOMAIN_UNMANAGED: + info = alloc_dom_info(); + if (info == NULL) + return NULL; + break; + default: + return NULL; + } + return &info->domain; +} + +void domain_deattach_iommu(struct dom_info *priv, struct iommu_info *info) +{ + if ((priv == NULL) || (info == NULL) || + (info->dev_cnt != 0) || (info->iommu == NULL)) { + pr_err("%s invalid parameter", __func__); + return; + } + del_domain_from_list(info->iommu, priv); + domain_id_free(info->iommu, info->id); + spin_lock(&priv->lock); + list_del(&info->list); + spin_unlock(&priv->lock); + kfree(info); +} + +static void la_iommu_domain_free(struct iommu_domain *domain) +{ + struct dom_info *priv; + struct loongarch_iommu *iommu = NULL; + struct iommu_info *info, *tmp; + + priv = to_dom_info(domain); + spin_lock(&priv->lock); + list_for_each_entry_safe(info, tmp, &priv->iommu_devlist, list) { + if (info->dev_cnt > 0) + detach_all_dev_by_domain(info); + iommu = info->iommu; + spin_unlock(&priv->lock); + domain_deattach_iommu(priv, info); + spin_lock(&priv->lock); + iommu_flush_iotlb(iommu); + if (!has_dom(iommu)) + iommu_translate_disable(iommu); + } + spin_unlock(&priv->lock); + mutex_lock(&priv->ptl_lock); + iommu_free_pagetable(priv); + mutex_unlock(&priv->ptl_lock); + dom_info_free(priv); +} + +struct iommu_rlookup_entry *lookup_rlooptable(int pcisegment) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + list_for_each_entry(rlookupentry, &la_rlookup_iommu_list, list) { + if (rlookupentry->pcisegment == pcisegment) + return rlookupentry; + } + return NULL; +} + +struct loongarch_iommu *find_iommu_by_dev(struct pci_dev *pdev) +{ + int pcisegment; + unsigned short devid; + struct iommu_rlookup_entry *rlookupentry = NULL; + struct loongarch_iommu *iommu = NULL; + struct pci_bus *bus = pdev->bus; + + devid = PCI_DEVID(bus->number, pdev->devfn); + pcisegment = pci_domain_nr(bus); + rlookupentry = lookup_rlooptable(pcisegment); + if (rlookupentry == NULL) { + pr_info("%s find segment %d rlookupentry failed\n", __func__, + pcisegment); + return iommu; + } + iommu = rlookupentry->rlookup_table[devid]; + if (iommu && (!iommu->confbase)) + iommu = NULL; + return iommu; +} + +struct iommu_device *iommu_init_device(struct device *dev) +{ + struct la_iommu_dev_data *dev_data; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->bus; + unsigned short devid; + struct loongarch_iommu *iommu = NULL; + struct iommu_device *iommu_dev = ERR_PTR(-ENODEV); + + if (!dev_is_pci(dev)) + return iommu_dev; + + if (dev->archdata.iommu != NULL || bus == NULL) { + pr_info("LA-IOMMU: bdf:0x%x has added\n", pdev->devfn); + return iommu_dev; + } + iommu = find_iommu_by_dev(pdev); + if (iommu == NULL) { + pci_info(pdev, "%s find iommu failed by dev\n", __func__); + return iommu_dev; + } + dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); + if (!dev_data) + return iommu_dev; + devid = PCI_DEVID(bus->number, pdev->devfn); + dev_data->bdf = devid; + + pci_info(pdev, "%s bdf %#x iommu dev id %#x\n", __func__, dev_data->bdf, iommu->devid); + /* The initial state is 0, and 1 is added only when attach dev */ + dev_data->count = 0; + dev_data->iommu = iommu; + dev_data->dev = dev; + dev->archdata.iommu = dev_data; + iommu_dev = &iommu->iommu_dev; + return iommu_dev; +} + +struct iommu_device *la_iommu_probe_device(struct device *dev) +{ + return iommu_init_device(dev); +} + +static struct iommu_group *la_iommu_device_group(struct device *dev) +{ + struct iommu_group *group; + + /* + * We don't support devices sharing stream IDs other than PCI RID + * aliases, since the necessary ID-to-device lookup becomes rather + * impractical given a potential sparse 32-bit stream ID space. + */ + if (dev_is_pci(dev)) + group = pci_device_group(dev); + else + group = generic_device_group(dev); + return group; +} + +static void la_iommu_remove_device(struct device *dev) +{ + struct la_iommu_dev_data *dev_data; + + iommu_group_remove_device(dev); + dev_data = dev->archdata.iommu; + dev->archdata.iommu = NULL; + kfree(dev_data); +} + +struct iommu_info *get_iommu_info_from_dom(struct dom_info *priv, struct loongarch_iommu *iommu) +{ + struct iommu_info *info; + + spin_lock(&priv->lock); + list_for_each_entry(info, &priv->iommu_devlist, list) { + if (info->iommu == iommu) { + spin_unlock(&priv->lock); + return info; + } + } + spin_unlock(&priv->lock); + return NULL; +} + +struct iommu_info *domain_attach_iommu(struct dom_info *priv, struct loongarch_iommu *iommu) +{ + u32 dir_ctrl; + struct iommu_info *info; + unsigned long phys; + + info = get_iommu_info_from_dom(priv, iommu); + if (info) + return info; + + info = kzalloc(sizeof(struct iommu_info), GFP_KERNEL_ACCOUNT); + if (!info) + return NULL; + + INIT_LIST_HEAD(&info->dev_list); + info->iommu = iommu; + info->id = domain_id_alloc(iommu); + if (info->id == -1) { + pr_info("%s alloc id for domain failed\n", __func__); + kfree(info); + return NULL; + } + + phys = virt_to_phys(priv->pgd); + dir_ctrl = (IOMMU_LEVEL_STRIDE << 26) | (IOMMU_LEVEL_SHIFT(2) << 20); + dir_ctrl |= (IOMMU_LEVEL_STRIDE << 16) | (IOMMU_LEVEL_SHIFT(1) << 10); + dir_ctrl |= (IOMMU_LEVEL_STRIDE << 6) | IOMMU_LEVEL_SHIFT(0); + iommu_write_regl(iommu, LA_IOMMU_DIR_CTRL(info->id), dir_ctrl); + iommu_write_regl(iommu, LA_IOMMU_PGD_HI(info->id), phys >> 32); + iommu_write_regl(iommu, LA_IOMMU_PGD_LO(info->id), phys & UINT_MAX); + + spin_lock(&priv->lock); + list_add(&info->list, &priv->iommu_devlist); + spin_unlock(&priv->lock); + add_domain_to_list(iommu, priv); + return info; +} + +static struct la_iommu_dev_data *get_devdata_from_iommu_info(struct dom_info *info, + struct loongarch_iommu *iommu, unsigned long bdf) +{ + struct iommu_info *entry; + struct la_iommu_dev_data *dev_data, *found = NULL; + + entry = get_iommu_info_from_dom(info, iommu); + if (!entry) + return found; + spin_lock(&entry->devlock); + list_for_each_entry(dev_data, &entry->dev_list, list) { + if (dev_data->bdf == bdf) { + found = dev_data; + break; + } + } + spin_unlock(&entry->devlock); + return found; +} +static void la_iommu_detach_dev(struct device *dev); + +static int la_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct dom_info *priv = to_dom_info(domain); + struct pci_dev *pdev = to_pci_dev(dev); + unsigned char busnum = pdev->bus->number; + struct la_iommu_dev_data *dev_data; + struct loongarch_iommu *iommu; + struct iommu_info *info; + unsigned short bdf; + + if (domain != NULL && domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + + la_iommu_detach_dev(dev); + + if (domain == NULL) + return 0; + + bdf = PCI_DEVID(busnum, pdev->devfn); + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + if (dev_data == NULL) { + pci_info(pdev, "%s dev_data is Invalid\n", __func__); + return 0; + } + + iommu = dev_data->iommu; + if (iommu == NULL) { + pci_info(pdev, "%s iommu is Invalid\n", __func__); + return 0; + } + + pci_info(pdev, "%s bdf %#x priv %lx iommu devid %#x\n", __func__, + bdf, (unsigned long)priv, iommu->devid); + dev_data = get_devdata_from_iommu_info(priv, iommu, bdf); + if (dev_data) { + pci_info(pdev, "LA-IOMMU: bdf 0x%x devfn %x has attached, count:0x%x\n", + bdf, pdev->devfn, dev_data->count); + return 0; + } + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + + info = domain_attach_iommu(priv, iommu); + if (!info) { + pci_info(pdev, "domain attach iommu failed\n"); + return 0; + } + dev_data->domain = domain; + do_attach(info, dev_data); + return 0; +} + +static void la_iommu_detach_dev(struct device *dev) +{ + struct iommu_domain *domain; + struct dom_info *priv; + struct pci_dev *pdev = to_pci_dev(dev); + unsigned char busnum = pdev->bus->number; + struct la_iommu_dev_data *dev_data; + struct loongarch_iommu *iommu; + struct iommu_info *iommu_entry = NULL; + unsigned short bdf; + + bdf = PCI_DEVID(busnum, pdev->devfn); + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + if (dev_data == NULL) { + pci_info(pdev, "%s dev_data is Invalid\n", __func__); + return; + } + + domain = dev_data->domain; + if (domain == NULL) + return; + + priv = to_dom_info(domain); + iommu = dev_data->iommu; + if (iommu == NULL) { + pci_info(pdev, "%s iommu is Invalid\n", __func__); + return; + } + dev_data = get_devdata_from_iommu_info(priv, iommu, bdf); + if (dev_data == NULL) { + pci_info(pdev, "%s bdf %#x hasn't attached\n", + __func__, bdf); + return; + } + + iommu = dev_data->iommu; + dev_data->dev = NULL; + iommu_entry = get_iommu_info_from_dom(priv, iommu); + if (iommu_entry == NULL) { + pci_info(pdev, "%s get iommu_entry failed\n", __func__); + return; + } + + spin_lock(&iommu_entry->devlock); + do_detach(dev_data); + spin_unlock(&iommu_entry->devlock); + dev_data->domain = NULL; + + pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__, + iommu->devid, iommu->segment); +} + +static unsigned long *iommu_get_pte(void *pt_base, unsigned long vaddr, int level) +{ + int i; + unsigned long *ptep, *pgtable; + + if (level > (IOMMU_LEVEL_MAX - 1)) + return NULL; + pgtable = pt_base; + for (i = IOMMU_LEVEL_MAX - 1; i >= level; i--) { + ptep = iommu_pte_offset(pgtable, vaddr, i); + if (!iommu_pte_present(ptep)) + break; + if (iommu_pte_huge(ptep)) + break; + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + } + return ptep; +} + +static int iommu_get_page_table(unsigned long *ptep) +{ + void *addr; + unsigned long pte; + + if (!iommu_pte_present(ptep)) { + addr = kzalloc(IOMMU_PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!addr) + return -ENOMEM; + pte = virt_to_phys(addr) & IOMMU_PAGE_MASK; + pte |= IOMMU_PTE_RW; + *ptep = pte; + } + return 0; +} + +static size_t iommu_page_map(void *pt_base, + unsigned long start, unsigned long end, + phys_addr_t paddr, int level) +{ + unsigned long next, old, step; + unsigned long pte, *ptep, *pgtable; + int ret, huge, switch_page; + + old = start; + ptep = iommu_pte_offset(pt_base, start, level); + if (level == IOMMU_PT_LEVEL0) { + paddr = paddr & IOMMU_PAGE_MASK; + do { + pte = paddr | IOMMU_PTE_RW; + *ptep = pte; + ptep++; + start += IOMMU_PAGE_SIZE; + paddr += IOMMU_PAGE_SIZE; + } while (start < end); + + return start - old; + } + + do { + next = iommu_ptable_end(start, end, level); + step = next - start; + huge = 0; + switch_page = 0; + if (level == IOMMU_PT_LEVEL1) { + if ((step == IOMMU_HPAGE_SIZE) && + (!iommu_pte_present(ptep) || iommu_pte_huge(ptep))) + huge = 1; + else if (iommu_pte_present(ptep) && iommu_pte_huge(ptep)) + switch_page = 1; + } + + if (switch_page) + switch_huge_to_page(ptep, start); + + if (huge) { + pte = (paddr & IOMMU_HPAGE_MASK) | + IOMMU_PTE_RW | IOMMU_PTE_HP; + *ptep = pte; + } else { + ret = iommu_get_page_table(ptep); + if (ret != 0) + break; + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_map(pgtable, start, next, paddr, level - 1); + } + + ptep++; + paddr += step; + start = next; + } while (start < end); + return start - old; +} + +static void switch_huge_to_page(unsigned long *ptep, unsigned long start) +{ + phys_addr_t paddr = *ptep & IOMMU_HPAGE_MASK; + unsigned long next = start + IOMMU_HPAGE_SIZE; + unsigned long *pgtable; + int ret; + + *ptep = 0; + ret = iommu_get_page_table(ptep); + if (ret == 0) { + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_map(pgtable, start, next, paddr, 0); + } +} + +static int domain_map_page(struct dom_info *priv, unsigned long start, + phys_addr_t paddr, size_t size) +{ + int ret = 0; + phys_addr_t end; + size_t map_size; + + end = start + size; + mutex_lock(&priv->ptl_lock); + map_size = iommu_page_map(priv->pgd, start, + end, paddr, IOMMU_LEVEL_MAX - 1); + if (map_size != size) + ret = -EFAULT; + mutex_unlock(&priv->ptl_lock); + la_iommu_flush_iotlb_all(&priv->domain); + return ret; +} + +static size_t iommu_page_unmap(void *pt_base, + unsigned long start, unsigned long end, int level) +{ + unsigned long next, old; + unsigned long *ptep, *pgtable; + + old = start; + ptep = iommu_pte_offset(pt_base, start, level); + if (level == IOMMU_PT_LEVEL0) { + do { + *ptep++ = 0; + start += IOMMU_PAGE_SIZE; + } while (start < end); + } else { + do { + next = iommu_ptable_end(start, end, level); + if (!iommu_pte_present(ptep)) + continue; + + if ((level == IOMMU_PT_LEVEL1) && + iommu_pte_huge(ptep) && + ((next - start) < IOMMU_HPAGE_SIZE)) + switch_huge_to_page(ptep, start); + + if (iommu_pte_huge(ptep)) { + if ((next - start) != IOMMU_HPAGE_SIZE) + pr_err( + "Map pte on hugepage not supported now\n"); + *ptep = 0; + } else { + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_unmap(pgtable, start, + next, level - 1); + } + } while (ptep++, start = next, start < end); + } + return start - old; +} + +static size_t domain_unmap_page(struct dom_info *priv, + unsigned long start, size_t size) +{ + size_t unmap_len; + unsigned long end; + + end = start + size; + mutex_lock(&priv->ptl_lock); + unmap_len = iommu_page_unmap(priv->pgd, start, + end, (IOMMU_LEVEL_MAX - 1)); + mutex_unlock(&priv->ptl_lock); + la_iommu_flush_iotlb_all(&priv->domain); + return unmap_len; +} + +static int la_iommu_map(struct iommu_domain *domain, unsigned long vaddr, + phys_addr_t paddr, size_t len, int prot, gfp_t gfp) +{ + int ret; + struct dom_info *priv = to_dom_info(domain); + + ret = domain_map_page(priv, vaddr, paddr, len); + return ret; +} + +static size_t la_iommu_unmap(struct iommu_domain *domain, unsigned long vaddr, + size_t len, struct iommu_iotlb_gather *iotlb_gather) +{ + struct dom_info *priv = to_dom_info(domain); + + return domain_unmap_page(priv, vaddr, len); +} + +static phys_addr_t _iommu_iova_to_phys(struct dom_info *info, dma_addr_t vaddr) +{ + unsigned long *ptep; + unsigned long page_size, page_mask; + phys_addr_t paddr; + + mutex_lock(&info->ptl_lock); + ptep = iommu_get_pte(info->pgd, vaddr, IOMMU_PT_LEVEL0); + mutex_unlock(&info->ptl_lock); + + if (!ptep || !iommu_pte_present(ptep)) { + pr_warn_once( + "LA-IOMMU: shadow pte is null or not present with vaddr %llx\n", + vaddr); + paddr = 0; + return paddr; + } + + if (iommu_pte_huge(ptep)) { + page_size = IOMMU_HPAGE_SIZE; + page_mask = IOMMU_HPAGE_MASK; + } else { + page_size = IOMMU_PAGE_SIZE; + page_mask = IOMMU_PAGE_MASK; + } + paddr = *ptep & page_mask; + paddr |= vaddr & (page_size - 1); + return paddr; +} + +static phys_addr_t la_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t vaddr) +{ + struct dom_info *priv = to_dom_info(domain); + phys_addr_t phys; + + spin_lock(&priv->lock); + phys = _iommu_iova_to_phys(priv, vaddr); + spin_unlock(&priv->lock); + return phys; +} + +static void la_domain_set_plaform_dma_ops(struct device *dev) +{ + /* + * loongarch doesn't setup default domains because we can't hook into the + * normal probe path + */ +} + +const struct iommu_ops la_iommu_ops = { + .capable = la_iommu_capable, + .domain_alloc = la_iommu_domain_alloc, + .probe_device = la_iommu_probe_device, + .release_device = la_iommu_remove_device, + .device_group = la_iommu_device_group, + .pgsize_bitmap = LA_IOMMU_PGSIZE, + .owner = THIS_MODULE, + .set_platform_dma_ops = la_domain_set_plaform_dma_ops, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = la_iommu_attach_dev, + .map = la_iommu_map, + .unmap = la_iommu_unmap, + .iova_to_phys = la_iommu_iova_to_phys, + .flush_iotlb_all = la_iommu_flush_iotlb_all, + .free = la_iommu_domain_free, + } +}; + + +struct loongarch_iommu *loongarch_get_iommu_by_devid(struct pci_dev *pdev) +{ + int pcisegment; + unsigned short devid; + struct loongarch_iommu *iommu = NULL; + struct pci_bus *bus = pdev->bus; + + devid = PCI_DEVID(bus->number, pdev->devfn); + pcisegment = pci_domain_nr(pdev->bus); + list_for_each_entry(iommu, &la_iommu_list, list) { + if ((iommu->segment == pcisegment) && + (iommu->devid == devid)) { + return iommu; + } + } + return NULL; +} + +bool check_device_compat(struct pci_dev *pdev) +{ + bool compat = true; + + if ((pdev->revision == 0) && (pdev->device == 0x7a1f)) + compat = false; + return compat; +} + +static int loongarch_iommu_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = 1; + int bitmap_sz = 0; + int tmp; + bool compat = false; + struct loongarch_iommu *iommu = NULL; + resource_size_t base, size; + + iommu = loongarch_get_iommu_by_devid(pdev); + if (iommu == NULL) { + pci_info(pdev, "%s can't find iommu\n", __func__); + return -ENODEV; + } + + compat = check_device_compat(pdev); + if (!compat) { + pci_info(pdev, + "%s The iommu driver is not compatible with this device\n", + __func__); + return -ENODEV; + } + + iommu->pdev = pdev; + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + if (!request_mem_region(base, size, "loongarch_iommu")) { + pci_err(pdev, + "%d can't reserve mmio registers base %llx size %llx\n", + __LINE__, base, size); + return -ENOMEM; + } + iommu->confbase_phy = base; + iommu->conf_size = size; + iommu->confbase = ioremap(base, size); + if (iommu->confbase == NULL) { + pci_info(pdev, "%s iommu pci dev bar0 is NULL\n", __func__); + return ret; + } + + pr_info("iommu confbase %llx pgtsize %llx\n", + (u64)iommu->confbase, size); + tmp = MAX_DOMAIN_ID / 8; + bitmap_sz = (MAX_DOMAIN_ID % 8) ? (tmp + 1) : tmp; + iommu->domain_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL); + if (iommu->domain_bitmap == NULL) { + pr_err("LA-IOMMU: domain bitmap alloc err bitmap_sz:%d\n", + bitmap_sz); + goto out_err; + } + + tmp = MAX_ATTACHED_DEV_ID / 8; + bitmap_sz = (MAX_ATTACHED_DEV_ID % 8) ? (tmp + 1) : tmp; + iommu->devtable_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL); + if (iommu->devtable_bitmap == NULL) { + pr_err("LA-IOMMU: devtable bitmap alloc err bitmap_sz:%d\n", + bitmap_sz); + goto out_err_1; + } + + ret = iommu_device_sysfs_add(&iommu->iommu_dev, &pdev->dev, + NULL, "ivhd-%#x", iommu->devid); + iommu_device_register(&iommu->iommu_dev, &la_iommu_ops, NULL); + return 0; + +out_err_1: + iommu->pdev = NULL; + iounmap(iommu->confbase); + iommu->confbase = NULL; + release_mem_region(iommu->confbase_phy, iommu->conf_size); + iommu->confbase_phy = 0; + iommu->conf_size = 0; + kfree(iommu->domain_bitmap); + iommu->domain_bitmap = NULL; +out_err: + return ret; +} + +static void loongarch_iommu_remove(struct pci_dev *pdev) +{ + struct loongarch_iommu *iommu = NULL; + + iommu = loongarch_get_iommu_by_devid(pdev); + if (iommu == NULL) + return; + if (iommu->domain_bitmap != NULL) { + kfree(iommu->domain_bitmap); + iommu->domain_bitmap = NULL; + } + if (iommu->devtable_bitmap != NULL) { + kfree(iommu->devtable_bitmap); + iommu->devtable_bitmap = NULL; + } + if (iommu->confbase != NULL) { + iounmap(iommu->confbase); + iommu->confbase = NULL; + } + if (iommu->confbase_phy != 0) { + release_mem_region(iommu->confbase_phy, iommu->conf_size); + iommu->confbase_phy = 0; + iommu->conf_size = 0; + } +} + +static int __init check_ivrs_checksum(struct acpi_table_header *table) +{ + int i; + u8 checksum = 0, *p = (u8 *)table; + + for (i = 0; i < table->length; ++i) + checksum += p[i]; + if (checksum != 0) { + /* ACPI table corrupt */ + pr_err("IVRS invalid checksum\n"); + return -ENODEV; + } + return 0; +} + +struct iommu_rlookup_entry *create_rlookup_entry(int pcisegment) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + rlookupentry = kzalloc(sizeof(struct iommu_rlookup_entry), + GFP_KERNEL); + if (rlookupentry == NULL) + return rlookupentry; + + rlookupentry->pcisegment = pcisegment; + /* IOMMU rlookup table - find the IOMMU for a specific device */ + rlookupentry->rlookup_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(rlookup_table_size)); + if (rlookupentry->rlookup_table == NULL) { + kfree(rlookupentry); + rlookupentry = NULL; + } else { + list_add(&rlookupentry->list, &la_rlookup_iommu_list); + } + return rlookupentry; +} + +/* Writes the specific IOMMU for a device into the rlookup table */ +static void __init set_iommu_for_device(struct loongarch_iommu *iommu, + u16 devid) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + rlookupentry = lookup_rlooptable(iommu->segment); + if (rlookupentry == NULL) + rlookupentry = create_rlookup_entry(iommu->segment); + if (rlookupentry != NULL) + rlookupentry->rlookup_table[devid] = iommu; +} + +static inline u32 get_ivhd_header_size(struct ivhd_header *h) +{ + u32 size = 0; + + switch (h->type) { + case IVHD_HEAD_TYPE10: + size = 24; + break; + case IVHD_HEAD_TYPE11: + case IVHD_HEAD_TYPE40: + size = 40; + break; + } + return size; +} + +static inline void update_last_devid(u16 devid) +{ + if (devid > la_iommu_last_bdf) + la_iommu_last_bdf = devid; +} + +/* + * This function calculates the length of a given IVHD entry + */ +static inline int ivhd_entry_length(u8 *ivhd) +{ + u32 type = ((struct ivhd_entry *)ivhd)->type; + + if (type < 0x80) { + return 0x04 << (*ivhd >> 6); + } else if (type == IVHD_DEV_ACPI_HID) { + /* For ACPI_HID, offset 21 is uid len */ + return *((u8 *)ivhd + 21) + 22; + } + return 0; +} + +/* + * After reading the highest device id from the IOMMU PCI capability header + * this function looks if there is a higher device id defined in the ACPI table + */ +static int __init find_last_devid_from_ivhd(struct ivhd_header *h) +{ + u8 *p = (void *)h, *end = (void *)h; + struct ivhd_entry *dev; + + u32 ivhd_size = get_ivhd_header_size(h); + + if (!ivhd_size) { + pr_err("la-iommu: Unsupported IVHD type %#x\n", h->type); + return -EINVAL; + } + + p += ivhd_size; + end += h->length; + + while (p < end) { + dev = (struct ivhd_entry *)p; + switch (dev->type) { + case IVHD_DEV_ALL: + /* Use maximum BDF value for DEV_ALL */ + update_last_devid(MAX_BDF_NUM); + break; + case IVHD_DEV_SELECT: + case IVHD_DEV_RANGE_END: + case IVHD_DEV_ALIAS: + case IVHD_DEV_EXT_SELECT: + /* all the above subfield types refer to device ids */ + update_last_devid(dev->devid); + break; + default: + break; + } + p += ivhd_entry_length(p); + } + + WARN_ON(p != end); + + return 0; +} + +/* + * Iterate over all IVHD entries in the ACPI table and find the highest device + * id which we need to handle. This is the first of three functions which parse + * the ACPI table. So we check the checksum here. + */ +static int __init find_last_devid_acpi(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + + p += IVRS_HEADER_LENGTH; + + end += table->length; + while (p < end) { + h = (struct ivhd_header *)p; + if (h->type == la_iommu_target_ivhd_type) { + int ret = find_last_devid_from_ivhd(h); + + if (ret) + return ret; + } + + if (h->length == 0) + break; + + p += h->length; + } + + if (p != end) + return -EINVAL; + return 0; +} + +/* + * Takes a pointer to an loongarch IOMMU entry in the ACPI table and + * initializes the hardware and our data structures with it. + */ +static int __init init_iommu_from_acpi(struct loongarch_iommu *iommu, + struct ivhd_header *h) +{ + u8 *p = (u8 *)h; + u8 *end = p; + u16 devid = 0, devid_start = 0; + u32 dev_i; + struct ivhd_entry *e; + u32 ivhd_size; + + /* + * Done. Now parse the device entries + */ + ivhd_size = get_ivhd_header_size(h); + if (!ivhd_size) { + pr_err("loongarch iommu: Unsupported IVHD type %#x\n", h->type); + return -EINVAL; + } + + if (h->length == 0) + return -EINVAL; + + p += ivhd_size; + end += h->length; + + while (p < end) { + e = (struct ivhd_entry *)p; + switch (e->type) { + case IVHD_DEV_ALL: + for (dev_i = 0; dev_i <= la_iommu_last_bdf; ++dev_i) + set_iommu_for_device(iommu, dev_i); + break; + case IVHD_DEV_SELECT: + + pr_info(" DEV_SELECT\t\t\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid = e->devid; + set_iommu_for_device(iommu, devid); + break; + case IVHD_DEV_SELECT_RANGE_START: + + pr_info(" DEV_SELECT_RANGE_START\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid_start = e->devid; + break; + case IVHD_DEV_RANGE_END: + + pr_info(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid = e->devid; + for (dev_i = devid_start; dev_i <= devid; ++dev_i) + set_iommu_for_device(iommu, dev_i); + break; + default: + break; + } + + p += ivhd_entry_length(p); + } + + return 0; +} + +/* + * This function clues the initialization function for one IOMMU + * together and also allocates the command buffer and programs the + * hardware. It does NOT enable the IOMMU. This is done afterwards. + */ +static int __init init_iommu_one(struct loongarch_iommu *iommu, + struct ivhd_header *h) +{ + int ret; + struct iommu_rlookup_entry *rlookupentry = NULL; + + spin_lock_init(&iommu->domain_bitmap_lock); + spin_lock_init(&iommu->dom_info_lock); + + /* Add IOMMU to internal data structures */ + INIT_LIST_HEAD(&iommu->dom_list); + + list_add_tail(&iommu->list, &la_iommu_list); + + /* + * Copy data from ACPI table entry to the iommu struct + */ + iommu->devid = h->devid; + iommu->segment = h->pci_seg; + ret = init_iommu_from_acpi(iommu, h); + if (ret) { + pr_err("%s init iommu from acpi failed\n", __func__); + return ret; + } + rlookupentry = lookup_rlooptable(iommu->segment); + if (rlookupentry != NULL) { + /* + * Make sure IOMMU is not considered to translate itself. + * The IVRS table tells us so, but this is a lie! + */ + rlookupentry->rlookup_table[iommu->devid] = NULL; + } + return 0; +} + +/* + * Iterates over all IOMMU entries in the ACPI table, allocates the + * IOMMU structure and initializes it with init_iommu_one() + */ +static int __init init_iommu_all(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + struct loongarch_iommu *iommu; + int ret; + + end += table->length; + p += IVRS_HEADER_LENGTH; + + while (p < end) { + h = (struct ivhd_header *)p; + + if (h->length == 0) + break; + + if (*p == la_iommu_target_ivhd_type) { + + pr_info("device: %02x:%02x.%01x seg: %d\n", + PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), + PCI_FUNC(h->devid), h->pci_seg); + + iommu = kzalloc(sizeof(struct loongarch_iommu), + GFP_KERNEL); + if (iommu == NULL) + return -ENOMEM; + + ret = init_iommu_one(iommu, h); + if (ret) { + kfree(iommu); + pr_info("%s init iommu failed\n", __func__); + return ret; + } + } + p += h->length; + } + if (p != end) + return -EINVAL; + return 0; +} + +/** + * get_highest_supported_ivhd_type - Look up the appropriate IVHD type + * @ivrs Pointer to the IVRS header + * + * This function search through all IVDB of the maximum supported IVHD + */ +static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) +{ + u8 *base = (u8 *)ivrs; + struct ivhd_header *ivhd = (struct ivhd_header *) + (base + IVRS_HEADER_LENGTH); + u8 last_type = ivhd->type; + u16 devid = ivhd->devid; + + while (((u8 *)ivhd - base < ivrs->length) && + (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED) && + (ivhd->length > 0)) { + u8 *p = (u8 *) ivhd; + + if (ivhd->devid == devid) + last_type = ivhd->type; + ivhd = (struct ivhd_header *)(p + ivhd->length); + } + return last_type; +} + +static inline unsigned long tbl_size(int entry_size) +{ + unsigned int shift = PAGE_SHIFT + + get_order(((int)la_iommu_last_bdf + 1) * entry_size); + + return 1UL << shift; +} + +static int __init loongarch_iommu_ivrs_init(void) +{ + struct acpi_table_header *ivrs_base; + acpi_status status; + int ret = 0; + + status = acpi_get_table("IVRS", 0, &ivrs_base); + if (status == AE_NOT_FOUND) { + pr_info("%s get ivrs table failed\n", __func__); + return -ENODEV; + } + + /* + * Validate checksum here so we don't need to do it when + * we actually parse the table + */ + ret = check_ivrs_checksum(ivrs_base); + if (ret) + goto out; + + la_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); + pr_info("Using IVHD type %#x\n", la_iommu_target_ivhd_type); + + /* + * First parse ACPI tables to find the largest Bus/Dev/Func + * we need to handle. Upon this information the shared data + * structures for the IOMMUs in the system will be allocated + */ + ret = find_last_devid_acpi(ivrs_base); + if (ret) { + pr_err("%s find last devid failed\n", __func__); + goto out; + } + + rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); + + /* + * now the data structures are allocated and basically initialized + * start the real acpi table scan + */ + ret = init_iommu_all(ivrs_base); +out: + /* Don't leak any ACPI memory */ + acpi_put_table(ivrs_base); + ivrs_base = NULL; + return ret; +} + +static void free_iommu_rlookup_entry(void) +{ + struct loongarch_iommu *iommu = NULL; + struct iommu_rlookup_entry *rlookupentry = NULL; + + while (!list_empty(&la_iommu_list)) { + iommu = list_first_entry(&la_iommu_list, struct loongarch_iommu, list); + list_del(&iommu->list); + kfree(iommu); + } + + while (!list_empty(&la_rlookup_iommu_list)) { + rlookupentry = list_first_entry(&la_rlookup_iommu_list, + struct iommu_rlookup_entry, list); + + list_del(&rlookupentry->list); + if (rlookupentry->rlookup_table != NULL) { + free_pages( + (unsigned long)rlookupentry->rlookup_table, + get_order(rlookup_table_size)); + + rlookupentry->rlookup_table = NULL; + } + kfree(rlookupentry); + } +} + +static int __init la_iommu_setup(char *str) +{ + if (!str) + return -EINVAL; + while (*str) { + if (!strncmp(str, "on", 2)) { + loongarch_iommu_disable = 0; + pr_info("IOMMU enabled\n"); + } else if (!strncmp(str, "off", 3)) { + loongarch_iommu_disable = 1; + pr_info("IOMMU disabled\n"); + } + str += strcspn(str, ","); + while (*str == ',') + str++; + } + return 0; +} +__setup("loongarch_iommu=", la_iommu_setup); + +static const struct pci_device_id loongson_iommu_pci_tbl[] = { + { PCI_DEVICE(0x14, 0x3c0f) }, + { PCI_DEVICE(0x14, 0x7a1f) }, + { 0, } +}; + +static struct pci_driver loongarch_iommu_driver = { + .name = "loongarch-iommu", + .id_table = loongson_iommu_pci_tbl, + .probe = loongarch_iommu_probe, + .remove = loongarch_iommu_remove, +}; + +static int __init loongarch_iommu_driver_init(void) +{ + int ret = 0; + + if (loongarch_iommu_disable == 0) { + ret = loongarch_iommu_ivrs_init(); + if (ret != 0) { + free_iommu_rlookup_entry(); + pr_err("Failed to init iommu by ivrs\n"); + } + + ret = pci_register_driver(&loongarch_iommu_driver); + if (ret != 0) { + pr_err("Failed to register IOMMU driver\n"); + return ret; + } + } + return ret; +} + +static void __exit loongarch_iommu_driver_exit(void) +{ + struct loongarch_iommu *iommu = NULL; + + if (loongarch_iommu_disable == 0) { + list_for_each_entry(iommu, &la_iommu_list, list) { + iommu_device_sysfs_remove(&iommu->iommu_dev); + iommu_device_unregister(&iommu->iommu_dev); + loongarch_iommu_remove(iommu->pdev); + } + free_iommu_rlookup_entry(); + pci_unregister_driver(&loongarch_iommu_driver); + } +} + +module_init(loongarch_iommu_driver_init); +module_exit(loongarch_iommu_driver_exit); diff --git a/drivers/iommu/loongarch_iommu.h b/drivers/iommu/loongarch_iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..a411d2b34d01d70991cfeda572ba65d1f9a70403 --- /dev/null +++ b/drivers/iommu/loongarch_iommu.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Loongson IOMMU Driver + * + * Copyright (C) 2020-2021 Loongson Technology Ltd. + * Author: Lv Chen + * Wang Yang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LOONGARCH_IOMMU_H +#define LOONGARCH_IOMMU_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IOVA_WIDTH 47 + +/* Bit value definition for I/O PTE fields */ +#define IOMMU_PTE_PR (1ULL << 0) /* Present */ +#define IOMMU_PTE_HP (1ULL << 1) /* HugePage */ +#define IOMMU_PTE_IR (1ULL << 2) /* Readable */ +#define IOMMU_PTE_IW (1ULL << 3) /* Writeable */ +#define IOMMU_PTE_RW (IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) + +#define iommu_pte_present(ptep) ((*ptep != 0)) +#define iommu_pte_huge(ptep) ((*ptep) & IOMMU_PTE_HP) + +#define LA_IOMMU_PGSIZE (SZ_16K | SZ_32M) + +#define IOMMU_PT_LEVEL0 0x00 +#define IOMMU_PT_LEVEL1 0x01 + +/* IOMMU page table */ +#define IOMMU_PAGE_SHIFT PAGE_SHIFT +#define IOMMU_PAGE_SIZE (_AC(1, UL) << IOMMU_PAGE_SHIFT) +#define IOMMU_LEVEL_STRIDE (IOMMU_PAGE_SHIFT - 3) +#define IOMMU_PTRS_PER_LEVEL (IOMMU_PAGE_SIZE >> 3) +#define IOMMU_LEVEL_SHIFT(n) (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT) +#define IOMMU_LEVEL_SIZE(n) (_AC(1, UL) << (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) +#define IOMMU_LEVEL_MASK(n) (~(IOMMU_LEVEL_SIZE(n) - 1)) +#define IOMMU_LEVEL_MAX DIV_ROUND_UP((IOVA_WIDTH - IOMMU_PAGE_SHIFT), IOMMU_LEVEL_STRIDE) +#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) + +#define IOMMU_HPAGE_SIZE (1UL << IOMMU_LEVEL_SHIFT(IOMMU_PT_LEVEL1)) +#define IOMMU_HPAGE_MASK (~(IOMMU_HPAGE_SIZE - 1)) + +/* wired | index | domain | shift */ +#define LA_IOMMU_WIDS 0x10 +/* valid | busy | tlbar/aw | cmd */ +#define LA_IOMMU_VBTC 0x14 +#define IOMMU_PGTABLE_BUSY (1 << 16) +/* enable |index | valid | domain | bdf */ +#define LA_IOMMU_EIVDB 0x18 +/* enable | valid | cmd */ +#define LA_IOMMU_CMD 0x1C +#define LA_IOMMU_PGD0_LO 0x20 +#define LA_IOMMU_PGD0_HI 0x24 +#define STEP_PGD 0x8 +#define STEP_PGD_SHIFT 3 +#define LA_IOMMU_PGD_LO(domain_id) \ + (LA_IOMMU_PGD0_LO + ((domain_id) << STEP_PGD_SHIFT)) +#define LA_IOMMU_PGD_HI(domain_id) \ + (LA_IOMMU_PGD0_HI + ((domain_id) << STEP_PGD_SHIFT)) + +#define LA_IOMMU_DIR_CTRL0 0xA0 +#define LA_IOMMU_DIR_CTRL1 0xA4 +#define LA_IOMMU_DIR_CTRL(x) (LA_IOMMU_DIR_CTRL0 + ((x) << 2)) + +#define LA_IOMMU_SAFE_BASE_HI 0xE0 +#define LA_IOMMU_SAFE_BASE_LO 0xE4 +#define LA_IOMMU_EX_ADDR_LO 0xE8 +#define LA_IOMMU_EX_ADDR_HI 0xEC + +#define LA_IOMMU_PFM_CNT_EN 0x100 + +#define LA_IOMMU_RD_HIT_CNT_0 0x110 +#define LA_IOMMU_RD_MISS_CNT_O 0x114 +#define LA_IOMMU_WR_HIT_CNT_0 0x118 +#define LA_IOMMU_WR_MISS_CNT_0 0x11C +#define LA_IOMMU_RD_HIT_CNT_1 0x120 +#define LA_IOMMU_RD_MISS_CNT_1 0x124 +#define LA_IOMMU_WR_HIT_CNT_1 0x128 +#define LA_IOMMU_WR_MISS_CNT_1 0x12C +#define LA_IOMMU_RD_HIT_CNT_2 0x130 +#define LA_IOMMU_RD_MISS_CNT_2 0x134 +#define LA_IOMMU_WR_HIT_CNT_2 0x138 +#define LA_IOMMU_WR_MISS_CNT_2 0x13C + +#define MAX_DOMAIN_ID 16 +#define MAX_ATTACHED_DEV_ID 16 + +#define iommu_ptable_end(addr, end, level) \ +({ unsigned long __boundary = ((addr) + IOMMU_LEVEL_SIZE(level)) & \ + IOMMU_LEVEL_MASK(level); \ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +/* To find an entry in an iommu page table directory */ +#define iommu_page_index(addr, level) \ + (((addr) >> ((level * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) \ + & (IOMMU_PTRS_PER_LEVEL - 1)) + +struct loongarch_iommu { + struct list_head list; /* for la_iommu_list */ + spinlock_t domain_bitmap_lock; /* Lock for domain allocing */ + spinlock_t dom_info_lock; /* Lock for dom_list */ + void *domain_bitmap; /* Bitmap of global domains */ + void *devtable_bitmap; /* Bitmap of devtable */ + struct list_head dom_list; /* List of all domain privates */ + /* PCI device id of the IOMMU device */ + u16 devid; + int segment; /* PCI segment# */ + /* iommu configures the register space base address */ + void *confbase; + /* iommu configures the register space physical base address */ + resource_size_t confbase_phy; + /* iommu configures the register space size */ + resource_size_t conf_size; + struct pci_dev *pdev; + /* Handle for IOMMU core code */ + struct iommu_device iommu_dev; +} loongarch_iommu; + +struct iommu_rlookup_entry { + struct list_head list; + struct loongarch_iommu **rlookup_table; + int pcisegment; +}; + +struct iommu_info { + struct list_head list; /* for dom_info->iommu_devlist */ + struct loongarch_iommu *iommu; + spinlock_t devlock; /* priv dev list lock */ + struct list_head dev_list; /* List of all devices in this domain iommu */ + unsigned int dev_cnt; /* devices assigned to this domain iommu */ + short id; +} iommu_info; + +/* One vm is equal to a domain,one domain has a priv */ +struct dom_info { + struct list_head iommu_devlist; + struct iommu_domain domain; + struct mutex ptl_lock; /* Lock for page table */ + void *pgd; + spinlock_t lock; /* Lock for dom_info->iommu_devlist */ +} dom_info; + +struct dom_entry { + struct list_head list; /* for loongarch_iommu->dom_list */ + struct dom_info *domain_info; +} dom_entry; + +/* A device for passthrough */ +struct la_iommu_dev_data { + struct list_head list; /* for iommu_entry->dev_list */ + struct loongarch_iommu *iommu; + struct iommu_info *iommu_entry; + struct iommu_domain *domain; + struct device *dev; + unsigned short bdf; + int count; + int index; /* index in device table */ +}; + +static inline unsigned long *iommu_pte_offset(unsigned long *ptep, unsigned long addr, int level) +{ + return ptep + iommu_page_index(addr, level); +} +#endif /* LOONGARCH_IOMMU_H */ diff --git a/drivers/iommu/sw64/Kconfig b/drivers/iommu/sw64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3a6a1e994f315761cd086dd020c4d966fa80ade1 --- /dev/null +++ b/drivers/iommu/sw64/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# SW64 IOMMU SUPPORT +config SUNWAY_IOMMU + bool "Sunway IOMMU Support" + select IOMMU_API + select IOMMU_IOVA + select IOMMU_DMA + depends on SW64 && PCI && SUBARCH_C3B + help + Support for IOMMU on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". + +# SW64 IOMMU V2 SUPPORT +config SUNWAY_IOMMU_V2 + bool "Sunway IOMMU V2 Support" + select IOMMU_API + select IOMMU_IOVA + depends on SW64 && PCI && SUBARCH_C4 + help + Support for IOMMU V2 on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". diff --git a/drivers/iommu/sw64/Makefile b/drivers/iommu/sw64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e61b343490aa82963ec6da62fddafd7c6a8526bf --- /dev/null +++ b/drivers/iommu/sw64/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SUNWAY_IOMMU) += iommu.o +obj-$(CONFIG_SUNWAY_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/sw64/iommu.c b/drivers/iommu/sw64/iommu.c new file mode 100644 index 0000000000000000000000000000000000000000..32b18f726fd919dca67ba722cc133f1ed26567cf --- /dev/null +++ b/drivers/iommu/sw64/iommu.c @@ -0,0 +1,1277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_DMA_LIMIT (0xe0000000 - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff + +#define SW64_IOMMU_GRN_8K ((0UL) << 4) /* page size as 8KB */ +#define SW64_IOMMU_GRN_8M ((0x2UL) << 4) /* page size as 8MB */ +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; + +static int iommu_identity_mapping; + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, + struct sunway_iommu_dev *sdev_data) +{ + struct pci_controller *hose; + int devid; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + devid = sdev_data->devid; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + } +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + + flush_addr = __pa(flush_addr); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + pdev = sdev_data->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long pde; + unsigned long *pde_ptr; + int i, pdes_one_page; + + pde_ptr = sdomain->pt_root; + if (!pde_ptr) + return; + + pdes_one_page = PAGE_SIZE/sizeof(pde); + for (i = 0; i < pdes_one_page; i++, pde_ptr++) { + pde = *pde_ptr; + if ((pde & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + pde &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + pde |= PAGE_OFFSET; + free_page(pde); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + + dma_dom->sdomain.pt_root = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (dma_dom->sdomain.pt_root == NULL) { + pr_err("Allocating a new sdomain pt_root failed!\n"); + dma_domain_free(dma_dom); + return NULL; + } + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + page = alloc_pages_node(iommu->node, __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (iommu_identity_mapping) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev_data; + unsigned long flags; + + sdev_data = dev_iommu_priv_get(dev); + sunway_domain = sdev_data->domain; + + if (WARN_ON(!sdev_data->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev_data); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn, pte_l1_val, pte_l2_val; + unsigned long *pte_l1, *pte_l2; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + return pte_l2_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 59) & 0x7; + devid = (iommu_status >> 37) & 0xffff; + dva = iommu_status & 0xffffffff; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case UNAUTHORIZED_ACCESS: + pr_info("unauthorized access\n"); + break; + case ILLEGAL_RESPONSE: + pr_info("illegal response\n"); + break; + default: + pr_info("unknown error\n"); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + + iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); + } + + ret = iova_cache_get(); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +device_initcall(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long *pte_l2, unmapped; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + unmapped = 0; + while (unmapped < page_size) { + pte_l2 = (unsigned long *)fetch_pte(sunway_domain, iova, PTE_LEVEL2); + *pte_l2 = 0; + + flush_pcache_by_addr(sunway_domain, (unsigned long)pte_l2); + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + + iova += PAGE_SIZE; + unmapped += PAGE_SIZE; + } + + return unmapped; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + /* + * pde: page table entry + * pte: level 2 page table entry + * pte_root: page table root + */ + struct page *page; + struct sunway_iommu *iommu; + unsigned long pde, pte, iova_pfn; + unsigned long pdebaseaddr; + u64 *ptebasecond, ptebaseaddr; + u64 pte_root = (__pa(sunway_domain->pt_root) & PAGE_MASK); + + iova_pfn = (unsigned long)(bus_addr >> PAGE_SHIFT); + + pdebaseaddr = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pdebaseaddr += ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)) + + PAGE_OFFSET; + + pde = *(unsigned long *)pdebaseaddr; + if (pde) { + ptebaseaddr = (pde & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + + goto direct_map; + } + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating pages failed.\n"); + return -1; + } + + ptebasecond = page_address(page); + pde = (__pa(ptebasecond) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + + /* + * If pde exists, no need to allocate a new page. + * Atomic compare and exchange, compare the value the pointer points to + * with 0UL. If identical, store pde where the pointer points to, return + * 0UL. Otherwise, return the value the pointer points to. + */ + if (cmpxchg64((volatile u64 *)pdebaseaddr, 0ULL, pde)) { + ptebaseaddr = ((*(volatile u64 *)pdebaseaddr) + & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + free_page((unsigned long)ptebasecond); + } else { + flush_pcache_by_addr(sunway_domain, pdebaseaddr); + ptebaseaddr = (unsigned long)ptebasecond + + ((iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3); + } + +direct_map: + /* case 8K */ + if (page_size == (1UL << PAGE_SHIFT)) { + if (*(volatile u64 *)ptebaseaddr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8K | SW64_IOMMU_ENABLE; + *(volatile u64 *)ptebaseaddr = pte; + flush_pcache_by_addr(sunway_domain, ptebaseaddr); + /* case 8M */ + } else if (page_size == (1UL << PAGE_8M_SHIFT)) { + unsigned long *ptr; + int i, ptes_one_page, ptes_one_cache; + + ptr = (unsigned long *)ptebaseaddr; + ptes_one_page = PAGE_SIZE/sizeof(pte); + ptes_one_cache = L1_CACHE_BYTES/sizeof(pte); + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8M | SW64_IOMMU_ENABLE; + + for (i = 0; i < ptes_one_page; i++) { + if (*ptr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + *ptr = pte; + + /* just do once flush per cache line */ + if (i % ptes_one_cache == (ptes_one_cache - 1)) + flush_pcache_by_addr(sunway_domain, (unsigned long)ptr); + ptr++; + } + } +#ifdef CONFIG_SW64_GUEST + flush_ptlb_by_addr(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG); +#endif + return 0; +} + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0ULL; + sdomain->domain.geometry.aperture_end = (~0ULL); + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + iommu_identity_mapping = 1; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev_data; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + if (!dev_is_pci(dev)) + return -ENODEV; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev_data = dev_iommu_priv_get(dev); + if (!sdev_data) + return -EINVAL; + + if (sdev_data->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + + if (iova >= SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + paddr &= ~SW64_IOMMU_ENTRY_VALID; + grn = paddr & SW64_PTE_GRN_MASK; /* get page granularity */ + paddr &= PAGE_MASK; + + switch (grn) { + case SW64_IOMMU_GRN_8M: + paddr += (iova & ~HPAGE_MASK); + break; + case SW64_IOMMU_GRN_8K: + default: + paddr += (iova & ~PAGE_MASK); + break; + } + + return paddr; +} + +static int +sunway_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t size = pgcount << PAGE_SHIFT; + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova >= SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + if (ret) { + pr_info("Failed to map page from IOVA %lx.\n", iova); + return ret; + } + iova += page_size; + paddr += page_size; + } + mutex_unlock(&sdomain->api_lock); + + if (!ret && mapped) + *mapped = size; + + return ret; +} + +static size_t +sunway_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, + size_t page_size, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + size_t total_unmap = 0; + + if (iova >= SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + iova += page_size; + total_unmap += page_size; + } + mutex_unlock(&sdomain->api_lock); + + return total_unmap; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return generic_device_group(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + if (!dev_is_pci(dev)) + return ERR_PTR(-ENODEV); + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) + return &iommu->iommu; + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + if (dev_is_pci(dev)) { + if (iommu_identity_mapping) + return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +} + +static bool sunway_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + return false; +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, SW64_DMA_LIMIT); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sunway_iommu_attach_device, + .map_pages = sunway_iommu_map_pages, + .unmap_pages = sunway_iommu_unmap_pages, + .iova_to_phys = sunway_iommu_iova_to_phys, + .free = sunway_iommu_domain_free, + } +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +early_param("iommu_enable", iommu_enable_setup); diff --git a/drivers/iommu/sw64/iommu_v2.c b/drivers/iommu/sw64/iommu_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..f3e19e524210d5da2e362ee48b7dd658742dc29d --- /dev/null +++ b/drivers/iommu/sw64/iommu_v2.c @@ -0,0 +1,1780 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_32BIT_DMA_LIMIT (0xe0000000 - 1) +#define SW64_64BIT_DMA_LIMIT ((1UL << 41) - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) \ + | ((1ULL) << PAGE_8M_SHIFT) \ + | ((1ULL) << PAGE_512M_SHIFT) \ + | ((1ULL) << PAGE_8G_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +#define PTE_VALID 0x8000000000000000UL +#define LAST_STAGE 0x100UL +#define PTE_GRN_8M 0x10UL +#define PTE_GRN_512M 0x20UL +#define PTE_GRN_8G 0x30UL +#define PTE_WRITEE 0x2UL +#define PTE_READE 0x1UL +#define PTE_RWE 0x3UL +#define PTE_FLAGS_MASK 0x8000000000000133UL +#define PAGE_8G_OFFSET_MASK ((1UL << PAGE_8G_SHIFT) - 1) +#define PAGE_512M_OFFSET_MASK ((1UL << PAGE_512M_SHIFT) - 1) +#define PAGE_8M_OFFSET_MASK ((1UL << PAGE_8M_SHIFT) - 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + PTE_LEVEL3, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, + PTE_LEVEL3_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; +static const struct dma_map_ops sunway_dma_ops; + + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev->pdev->bus); + + flush_addr = __pa(flush_addr); + /* Set memory bar here */ + mb(); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + pdev = sdev->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long *l2_pte, *l3_pte; + unsigned long l2_pte_val, l3_pte_val; + int l2_index, l3_index, ptes_one_page; + + l2_pte = sdomain->pt_root; + if (!l2_pte) + return; + + ptes_one_page = PAGE_SIZE/sizeof(unsigned long); + for (l2_index = 0; l2_index < ptes_one_page; l2_index++, l2_pte++) { + l2_pte_val = *l2_pte; + if ((l2_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l2_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l2_pte_val |= PAGE_OFFSET; + l3_pte = (unsigned long *)l2_pte_val; + for (l3_index = 0; l3_index < ptes_one_page; l3_index++, l3_pte++) { + l3_pte_val = *l3_pte; + if ((l3_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l3_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l3_pte_val |= PAGE_OFFSET; + free_page(l3_pte_val); + } + free_page(l2_pte_val); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_PFN(SW64_DMA_START)); + reserve_iova(&dma_dom->iovad, (0xe0000000UL >> PAGE_SHIFT), (0x100000000UL >> PAGE_SHIFT)); + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *dt_page, *pt_page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + dt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!dt_page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(dt_page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + if (!sdomain->pt_root) { + pt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, 0); + if (!pt_page) { + pr_err("Allocating pt_root failed!\n"); + return; + } + + sdomain->pt_root = page_address(pt_page); + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev; + unsigned long flags; + + sdev = dev_iommu_priv_get(dev); + sunway_domain = sdev->domain; + + if (WARN_ON(!sdev->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/* dma_ops helpers*/ +static struct sunway_iommu_domain *get_sunway_domain(struct device *dev) +{ + struct sunway_iommu_domain *sdomain; + struct iommu_domain *domain; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + sdev = dev_iommu_priv_get(dev); + sdomain = sdev->domain; + if (sdomain == NULL) { + domain = iommu_get_domain_for_dev(dev); + sdomain = to_sunway_domain(domain); + attach_device(dev, sdomain); + } + + if (sdomain == NULL) + return ERR_PTR(-EBUSY); + + return sdomain; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn; + unsigned long pte_l1_val, pte_l2_val, pte_l3_val; + unsigned long *pte_l1, *pte_l2, *pte_l3; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 20) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + pte_l2_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l2_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL3_OFFSET) << 3; + pte_l3 = (unsigned long *)(pte_l2_val + offset); + if (type == PTE_LEVEL3) + return (unsigned long)pte_l3; + + pte_l3_val = *pte_l3; + if (type == PTE_LEVEL3_VAL) + return pte_l3_val; + + return pte_l3_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 58) & 0xf; + devid = (iommu_status >> 36) & 0xffff; + dva = ((iommu_status & 0xffffffff) >> 3) << 13; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case PTE_LEVEL3: + pr_info("invalid level3 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL3), + fetch_pte(sdomain, dva, PTE_LEVEL3_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + default: + pr_info("iommu exception type %ld\n", type); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_device_set_ops(&iommu->iommu, &sunway_iommu_ops); + iommu_device_register(&iommu->iommu); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + } + + ret = iova_cache_get(); + if (ret) + return ret; + + ret = bus_set_iommu(&pci_bus_type, &sunway_iommu_ops); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +subsys_initcall_sync(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long offset, iova_pfn; + unsigned long *pte_base, *pte; + unsigned long grn; + int level, current_level; + int tmp = 1; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + level = 2; + grn = PTE_GRN_8M; + break; + default: + level = 3; + break; + } + + pte_base = sunway_domain->pt_root; + iova_pfn = iova >> PAGE_SHIFT; + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + if (current_level == level) { + if (grn == PTE_GRN_512M) { + int i; + + for (i = 0; i < 64; i++) { + *(pte + i) = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + + } else { + *pte = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + break; + } + + pte_base = (unsigned long *)((*pte & (~PTE_FLAGS_MASK)) | PAGE_OFFSET); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return page_size; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + struct page *page; + struct sunway_iommu *iommu; + unsigned long iova_pfn, pte_val; + unsigned long *pte_base, *pte; + unsigned long offset, grn = 0; + int level = 0, current_level; + int tmp = 1; + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + iova_pfn = bus_addr >> PAGE_SHIFT; + pte_base = sunway_domain->pt_root; + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + grn = PTE_GRN_8M; + level = 2; + break; + default: + level = 3; + break; + } + + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + + if (!(*pte) || (current_level == level)) { + pte_val = PTE_VALID | PTE_RWE | grn; + if (current_level == level) { + *(volatile u64 *)(pte) = 0; + pte_val |= ((paddr & PAGE_MASK) | LAST_STAGE); + } else { + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating level%d page table pages failed.\n", (level + 1)); + return -ENOMEM; + } + + pte_val |= (page_to_phys(page) & PAGE_MASK); + } + + if ((grn == PTE_GRN_512M) && (current_level == 2)) { + int i; + + for (i = 0; i < 64; i++) { + cmpxchg64((volatile u64 *)(pte + i), 0UL, pte_val); + flush_pcache_by_addr(sunway_domain, (unsigned long)(pte + i)); + } + } else { + if (cmpxchg64((volatile u64 *)pte, 0UL, pte_val)) + free_page((unsigned long)page_address(page)); + else + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + } + + pte_base = (unsigned long *)__va((*pte) & (~PTE_FLAGS_MASK)); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return 0; +} + +static unsigned long +sunway_alloc_iova(struct dma_domain *dma_dom, unsigned long pages, struct pci_dev *pdev) +{ + struct device *dev; + unsigned long pfn = 0; + + pages = __roundup_pow_of_two(pages); + dev = &(pdev->dev); + if (min(dev->coherent_dma_mask, *dev->dma_mask) == DMA_BIT_MASK(32)) { + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_32BIT_DMA_LIMIT), true); + } else { + /* IOVA boundary should be 16M ~ 3.5G */ + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_64BIT_DMA_LIMIT), true); + } + + return (pfn << PAGE_SHIFT); +} + +static void sunway_free_iova(struct dma_domain *dma_dom, + unsigned long address, unsigned long pages) +{ + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + free_iova_fast(&dma_dom->iovad, address, pages); +} + +static dma_addr_t +__sunway_map_single(struct dma_domain *dma_dom, + struct pci_dev *pdev, phys_addr_t paddr, size_t size) +{ + dma_addr_t ret, address, start; + unsigned long npages, i; + + npages = iommu_num_pages(paddr, size, PAGE_SIZE); + + address = sunway_alloc_iova(dma_dom, npages, pdev); + if (!address) + return 0; + + start = address; + for (i = 0; i < npages; ++i) { + ret = sunway_iommu_map_page(&dma_dom->sdomain, start, + paddr, PAGE_SIZE); + if (ret) { + pr_info("error when map page.\n"); + goto out_unmap; + } + + start += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + address += paddr & ~PAGE_MASK; + return address; + +out_unmap: + for (--i; i >= 0; --i) { + start -= PAGE_SIZE; + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + } + + sunway_free_iova(dma_dom, address, npages); + return 0; +} + +static dma_addr_t +pci_iommu_map_single(struct pci_dev *pdev, + struct dma_domain *dma_dom, void *cpu_addr, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + unsigned long paddr; + + if (hose == NULL) { + pr_err("%s: hose does not exist!\n", __func__); + return 0; + } + + paddr = __sunway_map_single(dma_dom, pdev, __pa(cpu_addr), size); + + pr_debug("pci_alloc_consistent: %zx -> [%px,%lx] from %ps\n", + size, cpu_addr, paddr, __builtin_return_address(0)); + + return paddr; +} + +static void *sunway_alloc_coherent(struct device *dev, + size_t size, + dma_addr_t *dma_addr, gfp_t gfp, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct sunway_iommu_dev *sdev; + struct page *page; + void *cpu_addr; + + if (!pdev) + return NULL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return NULL; + + gfp &= ~GFP_DMA; + +try_again: + page = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, get_order(size)); + if (!page) { + pr_err("Allocating pages failed.\n"); + return NULL; + } + + cpu_addr = page_address(page); + if (!cpu_addr) { + pr_info + ("pci_alloc_consistent: get_free_pages failed from %ps\n", + __builtin_return_address(0)); + + return NULL; + } + + *dma_addr = __pa(cpu_addr); + if (!(hose->iommu_enable)) + return cpu_addr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return cpu_addr; + } + + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); + if (*dma_addr == 0) { + free_pages((unsigned long)cpu_addr, get_order(size)); + if (gfp & GFP_DMA) + return NULL; + + gfp |= GFP_DMA; + goto try_again; + } + + return cpu_addr; +} + +static void +__sunway_unmap_single(struct dma_domain *dma_dom, dma_addr_t dma_addr, size_t size) +{ + dma_addr_t start; + unsigned long npages; + int i; + + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); + dma_addr &= PAGE_MASK; + start = dma_addr; + + for (i = 0; i < npages; i++) { + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + start += PAGE_SIZE; + } + + sunway_free_iova(dma_dom, dma_addr, npages); + pr_debug("pci_free_consistent: %zx -> [%llx] from %ps\n", + size, dma_addr, __builtin_return_address(0)); + +} + +static void +sunway_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + if (!pdev) + goto out_unmap; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + goto out_unmap; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + goto out_unmap; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); + goto out_free; + +out_unmap: + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + +out_free: + pr_debug("sunway_free_consistent: [%llx,%zx] from %ps\n", + dma_addr, size, __builtin_return_address(0)); + + free_pages((unsigned long)vaddr, get_order(size)); +} + +static dma_addr_t +sunway_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + phys_addr_t paddr = page_to_phys(page) + offset; + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + return paddr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, size, dir, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + return pci_iommu_map_single(pdev, dma_dom, + (char *)page_address(page) + offset, size); +} + +static void +sunway_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (hose == NULL) + return; + + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); +} + +#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) +static int +sunway_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom = NULL; + struct scatterlist *sg; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + int i, out_nents = 0; + + if (dir == PCI_DMA_NONE) + BUG(); + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return 0; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg_dma_address(sg) = __pa(SG_ENT_VIRT_ADDRESS(sg)); + if (!(hose->iommu_enable)) + goto check; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + goto check; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, dir, attrs); + } + + sg_dma_address(sg) = + pci_iommu_map_single(pdev, dma_dom, + SG_ENT_VIRT_ADDRESS(sg), sg->length); +check: + if (sg_dma_address(sg) == 0) + goto error; + + sg_dma_len(sg) = sg->length; + out_nents++; + } + + return nents; + +error: + pr_warn("pci_map_sg failed:"); + pr_warn("could not allocate dma page tables\n"); + + if (out_nents) + pci_unmap_sg(pdev, sgl, out_nents, dir); + return 0; +} + +static void +sunway_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct scatterlist *sg; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + dma_addr_t dma_addr; + long size; + int j; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, j) { + dma_addr = sg->dma_address; + size = sg->dma_length; + if (!size) + break; + + __sunway_unmap_single(dma_dom, dma_addr, size); + } +} + +static const struct dma_map_ops sunway_dma_ops = { + .alloc = sunway_alloc_coherent, + .free = sunway_free_coherent, + .map_sg = sunway_map_sg, + .unmap_sg = sunway_unmap_sg, + .map_page = sunway_map_page, + .unmap_page = sunway_unmap_page, + .dma_supported = dma_direct_supported, +}; + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0UL; + sdomain->domain.geometry.aperture_end = ~0ULL; + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + BUG_ON(!entry->domain); + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + BUG_ON(sdomain->dev_cnt != 0); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return -EINVAL; + + if (sdev->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static void sunway_iommu_detach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain != NULL) + detach_device(dev); +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + unsigned long is_last; + + if (iova > SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL1_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_8G) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8G_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_512M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_512M_OFFSET_MASK; + return paddr; + } + + if (grn == PTE_GRN_8M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8M_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL3_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + grn = paddr & SW64_PTE_GRN_MASK; + if (grn != 0) + return 0; + + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_MASK; + return paddr; +} + +static int +sunway_iommu_map(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, int iommu_prot, gfp_t gfp) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova > SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + mutex_unlock(&sdomain->api_lock); + + return ret; +} + +static size_t +sunway_iommu_unmap(struct iommu_domain *dom, unsigned long iova, + size_t page_size, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + + if (iova > SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + mutex_unlock(&sdomain->api_lock); + + return unmap_size; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return pci_device_group(dev); +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return ERR_PTR(-ENODEV); + + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) { + iommu = hose->pci_iommu; + return &iommu->iommu; + } + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain) + return 0; + + return sdev->domain->type; +} + +static bool sunway_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_INTR_REMAP: + return true; + default: + return false; + } +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) + set_dma_ops(dev, &sunway_dma_ops); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .domain_free = sunway_iommu_domain_free, + .attach_dev = sunway_iommu_attach_device, + .detach_dev = sunway_iommu_detach_device, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .map = sunway_iommu_map, + .unmap = sunway_iommu_unmap, + .iova_to_phys = sunway_iommu_iova_to_phys, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +__setup("iommu_enable=", iommu_enable_setup); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..94a155001d1b192e99bdbd814ef996550a31e27f --- /dev/null +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#include +#include +#include +#include + +struct sunway_iommu_bypass_id { + unsigned int vendor; + unsigned int device; +}; + +struct sunway_iommu { + int index; + bool enabled; + unsigned long *iommu_dtbr; + spinlock_t dt_lock; /* Device Table Lock */ + int node; /* NUMA node */ + + struct pci_controller *hose_pt; + struct iommu_device iommu; /* IOMMU core code handle */ +}; + +struct sunway_iommu_dev { + struct list_head list; /* For domain->dev_list */ + struct llist_node dev_data_list; /* Global device list */ + u16 devid; + int alias; + unsigned int passthrough; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + + spinlock_t lock; /* Lock the page table mainly */ + struct sunway_iommu_domain *domain; /* Domain device is bound to */ +}; + +struct sunway_iommu_domain { + unsigned int type; + spinlock_t lock; + struct mutex api_lock; + u16 id; /* Domain ID */ + struct list_head list; /* For list of all SW domains */ + struct list_head dev_list; /* List of devices in this domain */ + struct iommu_domain domain; /* IOMMU domain handle */ + unsigned long *pt_root; /* Page Table root */ + unsigned int dev_cnt; /* Number of devices in this domain */ +}; + +struct sw64dev_table_entry { + u64 data; +}; + +struct sunway_iommu_group { + struct pci_dev *dev; + struct iommu_group *group; +}; + +#define SW64_IOMMU_ENTRY_VALID ((1UL) << 63) +#define SW64_PTE_LAST_MASK ((1UL) << 8) /*last stage valid*/ +#define SW64_DMA_START 0x1000000 +#define SW64_PTE_GRN_MASK ((0x3UL) << 4) +#define PAGE_8M_SHIFT 23 +#define PAGE_512M_SHIFT 29 +#define PAGE_8G_SHIFT 33 +#define SW64_IOMMU_ENABLE 3 +#define SW64_IOMMU_DISABLE 0 +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff +#define SW64_IOMMU_LEVEL3_OFFSET 0x3ff +#define SW64_IOMMU_BYPASS 0x1 +#define SW64_IOMMU_MAP_FLAG ((0x1UL) << 20) + +#define PAGE_SHIFT_IOMMU 18 +#define PAGE_SIZE_IOMMU (_AC(1, UL) << PAGE_SHIFT_IOMMU) + +#define PCACHE_FLUSHPADDR_MASK 0xffffffffff80UL diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e7b736800dd0234d916be0cddfd3d4b9285cdabc..027df575d57f43f61294346323bee5d57d1552c3 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -11,6 +11,39 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP +config SW64_INTC_V2 + bool "SW64 Interrupt Controller V2" + depends on UNCORE_XUELANG + default y + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + This enables support for the INTC chip found in SW CHIP3 systems. + The INTC controls devices interrupts and connects them to each + core's local interrupt controller. + +config SW64_LPC_INTC + bool "SW64 cpu builtin LPC Interrupt Controller" + depends on SW64_INTC_V2 + help + Say yes here to add support for the SW64 cpu builtin LPC + IRQ controller. + +config SW64_IRQ_CPU + bool + depends on SW64 + default y + +config SW64_IRQ_MSI + bool + depends on SW64 && PCI_MSI + default y + +config SW64_IRQ_MSI_VT + bool + depends on SW64_IRQ_MSI + default y + config ARM_GIC_PM bool depends on PM @@ -56,6 +89,14 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ffd945fe71aa2ce7e97d1c7e86509886b2fecb23..787206e166fc3318a280957647c56ebb9da2bbe9 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -27,6 +27,17 @@ obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_SW64_INTC_V2) += irq-sw64-intc-v2.o +obj-$(CONFIG_SW64_LPC_INTC) += irq-sw64-lpc-intc.o +obj-$(CONFIG_SW64_IRQ_CPU) += irq-sunway-cpu.o + +ifeq ($(CONFIG_UNCORE_XUELANG),y) +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi.o +else +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi-v2.o +endif + +obj-$(CONFIG_SW64_IRQ_MSI_VT) += irq-sunway-msi-vt.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o @@ -34,6 +45,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o @@ -104,7 +116,7 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o -obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 0000000000000000000000000000000000000000..5685f5f901a1c8f771db25eb43a4b84a38deb88a --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,5766 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) + +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + void __iomem *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + int vlpi_redist_offset; +}; + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count_ft2500); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + return d->hwirq - its_dev->event_map.lpi_base; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static struct irq_chip its_vpe_irq_chip; + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vpe *vpe = NULL; + int cpu; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_invall_cmd.col; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr, vconf_addr; + u64 target; + bool alloc; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (!require_its_list_vmovp(vpe->its_vm, its)) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); +} + +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (map) { + va = page_address(map->vm->vprop_page); + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void __direct_lpi_inv(struct irq_data *d, u64 val) +{ + void __iomem *rdbase; + unsigned long flags; + int cpu; + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + __direct_lpi_inv(d, val); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) + return; + + map->db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issuing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + unsigned int skt_t1, skt_t2, cpu_idx; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + cpu_idx = its_cpumask_select(its_dev, mask_val, cpu_mask); + skt_t1 = (cpu_logical_map(cpu_idx) >> 16) & 0xff; + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); + skt_t2 = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_t1 != skt_t2) + cpu = cpu_idx; + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } + + return 0; +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map; + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = *map; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!is_v4(its_dev->its)) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + bitmap_free(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_range(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char * const its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + struct page *page; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_BASER_SHAREABILITY_MASK; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order > MAX_ORDER) { + new_order = MAX_ORDER; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", + &its->phys_base, its_base_type_string[type], + device_ids(its), ids); + } + + *order = new_order; + + return indirect; +} + +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + + case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + sibling = find_sibling_its(its); + if (sibling != NULL) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + if (is_kdump_kernel()) + return true; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + unsigned long mpid; + phys_addr_t its_phys_base; + unsigned long skt_id; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < device_ids(its)); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + int cpu; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!is_v4(its)) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + bitmap_free(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + cpu = its_cpumask_first(its_dev, cpu_mask); + + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditioned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int from, cpu = cpumask_first(mask_val); + unsigned long flags; + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base, 0, 0); + + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else + its_vpe_send_cmd(vpe, its_send_inv); +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + raw_spin_lock_init(&vpe->vpe_lock); + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct irq_chip *irqchip = &its_vpe_irq_chip; + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + irqchip, vm->vpes[i]); + set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. + */ + if (!gic_requires_eager_mapping()) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, false); + } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &its_msi_domain_ops; + info->data = its; + + inner_domain = irq_domain_create_hierarchy(its_parent, + its->msi_domain_flags, 0, + its->fwnode_handle, &its_domain_ops, + info); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) + return -ENOMEM; + + /* Use the last possible DevID */ + devid = GENMASK(device_ids(its) - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct its_node *its) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &its->phys_base); + return -EINVAL; + } + + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &its->phys_base, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct its_node *its) +{ + u64 baser, tmp; + struct page *page; + u32 ctlr; + int err; + + if (is_v4(its)) { + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); + if (err < 0) + goto out; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &its->phys_base, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); + } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out; + } + + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &its->phys_base, its->mpidr, svpet); + } + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_unmap_sgir; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (is_v4(its)) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-phytium-2500-its", }, + {}, +}; + +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + int err; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) + return; + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct its_node *its; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); + if (!err) + return 0; + +node_err: + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + bool has_v4_1 = false; + int err; + + gic_rdists = rdists; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; + + if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 0000000000000000000000000000000000000000..dbeeb795b58146d8b01834a41162e1d790f0d0bd --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2916 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +#define T241_CHIPS_MAX 4 +static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; +static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); + +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis_ft2500); + +extern struct static_key_false gic_nonsecure_priorities; + +/* + * When the Non-secure world has access to group 0 interrupts (as a + * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will + * return the Distributor's view of the interrupt priority. + * + * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority + * written by software is moved to the Non-secure range by the Distributor. + * + * If both are true (which is when gic_nonsecure_priorities gets enabled), + * we need to shift down the priority programmed by software to match it + * against the value returned by ICC_RPR_EL1. + */ +#define GICD_INT_RPR_PRI(priority) \ + ({ \ + u32 __priority = (priority); \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __priority = 0x80 | (__priority >> 1); \ + \ + __priority; \ + }) + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info __initdata; +static DEFINE_PER_CPU(bool, has_rss_ft2500); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline bool gic_irq_in_rdist(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base_alias(struct irq_data *d) +{ + if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 chip; + + /* + * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} + * registers are directed to the chip that owns the SPI. The + * the alias region can also be used for writes to the + * GICD_In{E} except GICD_ICENABLERn. Each chip has support + * for 320 {E}SPIs. Mappings for all 4 chips: + * Chip0 = 32-351 + * Chip1 = 352-671 + * Chip2 = 672-991 + * Chip3 = 4096-4415 + */ + switch (__get_intid_range(hwirq)) { + case SPI_RANGE: + chip = (hwirq - 32) / 320; + break; + case ESPI_RANGE: + chip = 3; + break; + default: + unreachable(); + } + return t241_dist_base_alias[chip]; + } + + return gic_data.dist_base; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if (mpidr & 0xFFFF) // either Aff1 or Aff0 is not zero + return; + + rbase = rbase + 64 * SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; // next redistributor + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + unsigned int skt; + + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); // RD from SGI base + rbase = rbase + SZ_128K; + } + } // core 0 of each socket + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi_ft2500(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis_ft2500); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) +{ + switch (__get_intid_range(hwirq)) { + case PPI_RANGE: + return hwirq - 16; + case EPPI_RANGE: + return hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + return __gic_get_ppi_index(d->hwirq); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi_ft2500()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi_ft2500())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(gic_irq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base, *rbase; + u32 offset, index, skt; + int ret, i; + unsigned long mpidr; + + range = get_intid_range(d); + + /* Interrupt configuration for SGIs can't be changed */ + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset, NULL); + gic_do_wait_for_rwp(base); + } + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_cpu_to_affinity(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 aff; + + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} + +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Deactivate the interrupt when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} + +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi_ft2500()) + return false; + + return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); +} + +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi_ft2500() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val, skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff */ + gic_dist_config(base, GIC_LINE_NR, NULL); + gic_do_wait_for_rwp(base); + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1, and wait for it to drain */ + writel_relaxed(val, base + GICD_CTLR); + gic_dist_wait_for_rwp(); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_cpu_to_affinity(smp_processor_id()); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr; + u64 typer; + u32 aff; + u32 aff2_skt; + u32 redist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + redist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != redist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + + raw_spin_lock_init(&gic_data_rdist()->rd_lock); + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanup */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GICv3 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = gic_cpu_to_affinity(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else if (gic_supports_nmi_ft2500()) { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + * + * The boot CPU calls this function before enabling NMI support, + * and as a result we'll never see this warning in the boot path + * for that CPU. + */ + if (static_branch_unlikely(&gic_nonsecure_priorities)) + WARN_ON(!group0 || gic_dist_security_disabled()); + else + WARN_ON(group0 && !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss_ft2500, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss_ft2500, i) && per_cpu(has_rss_ft2500, cpu); + + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)gic_cpu_to_affinity(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return kstrtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 is zero + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr; + u16 tlist = 0; + + mpidr = gic_cpu_to_affinity(cpu); + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = gic_cpu_to_affinity(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + + if (WARN_ON(d->hwirq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(ishst); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, d->hwirq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void __init gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv3:starting", + gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (irq_skt != 0) + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (irq_skt == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + unsigned int skt; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + GICD_IROUTER + (index * 8); + reg = gic_dist_base(d) + offset + (index * 8); + val = gic_cpu_to_affinity(cpu); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GICv3-phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitioned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, + irq_hw_number_t hwirq) +{ + enum gic_intid_range range; + + if (!gic_data.ppi_descs) + return false; + + if (!is_of_node(fwspec->fwnode)) + return false; + + if (fwspec->param_count < 4 || !fwspec->param[3]) + return false; + + range = __get_intid_range(hwirq); + if (range != PPI_RANGE && range != EPPI_RANGE) + return false; + + return true; +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + unsigned int type, ret, ppi_idx; + irq_hw_number_t hwirq; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; + + if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) + return d == gic_data.domain; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + ppi_idx = __gic_get_ppi_index(hwirq); + return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + unsigned long ppi_intid; + struct device_node *np; + unsigned int ppi_idx; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); + if (WARN_ON_ONCE(ret)) + return 0; + + ppi_idx = __gic_get_ppi_index(ppi_intid); + ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static bool gic_enable_quirk_msm8996(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + + return true; +} + +static bool gic_enable_quirk_mtk_gicr(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; + + return true; +} + +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; +} + +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +#define T241_CHIPN_MASK GENMASK_ULL(45, 44) +#define T241_CHIP_GICDA_OFFSET 0x1580000 +#define SMCCC_SOC_ID_T241 0x036b0241 + +static bool gic_enable_quirk_nvidia_t241(void *data) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + unsigned long chip_bmask = 0; + phys_addr_t phys; + u32 i; + + /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ + if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) + return false; + + /* Find the chips based on GICR regions PHYS addr */ + for (i = 0; i < gic_data.nr_redist_regions; i++) { + chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, + (u64)gic_data.redist_regions[i].phys_base)); + } + + if (hweight32(chip_bmask) < 3) + return false; + + /* Setup GICD alias regions */ + for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { + if (chip_bmask & BIT(i)) { + phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; + phys |= FIELD_PREP(T241_CHIPN_MASK, i); + t241_dist_base_alias[i] = ioremap(phys, SZ_64K); + WARN_ON_ONCE(!t241_dist_base_alias[i]); + } + } + static_branch_enable(&gic_nvidia_t241_erratum); + return true; +} + +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { + .desc = "GICv3: Mediatek Chromebook GICR save problem", + .property = "mediatek,broken-save-restore-fw", + .init = gic_enable_quirk_mtk_gicr, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { + .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", + .iidr = 0x0402043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_nvidia_t241, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * ICC_PMR_EL1 register and the priority that software assigns to + * interrupts: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority + * ----------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ----------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ----------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * where non-secure means that the value is right-shifted by one and the + * MSB bit set, to make it fit in the non-secure priority range. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we use a different PMR value to mask IRQs + * and the rest of the values that we use remain unchanged. + */ + if (gic_has_group0() && !gic_dist_security_disabled()) + static_branch_enable(&gic_nonsecure_priorities); + + static_branch_enable(&supports_pseudo_nmis_ft2500); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(phys_addr_t dist_phys_base, + void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_phys_base = dist_phys_base; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { + /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + } + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + + gic_dist_init(); + gic_cpu_init(); + gic_smp_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + goto out_put_node; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); + continue; + } + + pr_info("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); + } + + pr_info("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *rdist_regs; + struct resource res; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + unsigned long skt; + + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return PTR_ERR(dist_base); + } + + dist_phys_base = res.start; + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3_soc_bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, + nr_redist_regions, redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + gic_request_region(redist->base_address, redist->length, "GICR"); + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + +static int __init +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + size_t size; + int i, err; + int skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + +#ifdef CONFIG_ACPI + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (is_kdump_kernel()) + mars3_sockets_bitmap = 0x3; + + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); +#endif + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(dist->base_address, acpi_data.dist_base, + acpi_data.redist_regs, acpi_data.nr_redist_regions, + 0, gsi_domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(gsi_domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index b1e60c13c1e1e7bd46948bd7341dc91b266d4576..5cacfafb76dc0af46ca52a7a2542440115ef0577 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -37,6 +37,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include "irq-gic-common.h" #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) @@ -1736,6 +1740,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return; +#endif + iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index e7f000f90bb4672ad85914792a5a19fd9b89aeeb..741e3775a1dbdd12404f0cf297f0d2c9dd99b6da 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -216,11 +216,10 @@ static inline void __iomem *gic_dist_base_alias(struct irq_data *d) chip = 3; break; default: - unreachable(); + BUG(); } return t241_dist_base_alias[chip]; } - return gic_data.dist_base; } diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c new file mode 100644 index 0000000000000000000000000000000000000000..b9202fdb6d925047f56ace77e415219a337c9f45 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2024 Loongson Technologies, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "irq-loongson.h" + +#define VECTORS_PER_REG 64 +#define IRR_VECTOR_MASK 0xffUL +#define IRR_INVALID_MASK 0x80000000UL +#define AVEC_MSG_OFFSET 0x100000 + +#ifdef CONFIG_SMP +struct pending_list { + struct list_head head; +}; + +bool disable_pci_irq_limit; +static struct cpumask intersect_mask; +static DEFINE_PER_CPU(struct pending_list, pending_list); +#endif + +static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); + +struct avecintc_chip { + raw_spinlock_t lock; + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct irq_matrix *vector_matrix; + phys_addr_t msi_base_addr; +}; + +static struct avecintc_chip loongarch_avec; + +struct avecintc_data { + struct list_head entry; + unsigned int cpu; + unsigned int vec; + unsigned int prev_cpu; + unsigned int prev_vec; + unsigned int moving; +}; + +static inline void avecintc_ack_irq(struct irq_data *d) +{ +} + +static inline void avecintc_mask_irq(struct irq_data *d) +{ +} + +static inline void avecintc_unmask_irq(struct irq_data *d) +{ +} + +#ifdef CONFIG_SMP +static inline void pending_list_init(int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + INIT_LIST_HEAD(&plist->head); +} + +static void avecintc_sync(struct avecintc_data *adata) +{ + struct pending_list *plist; + + if (cpu_online(adata->prev_cpu)) { + plist = per_cpu_ptr(&pending_list, adata->prev_cpu); + list_add_tail(&adata->entry, &plist->head); + adata->moving = 1; + smp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); + } +} + +static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) +{ + int cpu, ret, vector; + struct avecintc_data *adata; + + raw_spin_lock(&loongarch_avec.lock); + adata = irq_data_get_irq_chip_data(data); + + if (adata->moving) { + raw_spin_unlock(&loongarch_avec.lock); + return -EBUSY; + } + + if (adata->vec == UINT_MAX) { + raw_spin_unlock(&loongarch_avec.lock); + return -EINVAL; + } + + if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) { + raw_spin_unlock(&loongarch_avec.lock); + return 0; + } + + cpumask_and(&intersect_mask, dest, cpu_online_mask); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu); + if (ret < 0) { + raw_spin_unlock(&loongarch_avec.lock); + return ret; + } + + vector = ret; + adata->cpu = cpu; + adata->vec = vector; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); + avecintc_sync(adata); + + raw_spin_unlock(&loongarch_avec.lock); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; +} + +static int avecintc_cpu_online(unsigned int cpu) +{ + long value; + if (!loongarch_avec.vector_matrix) + return 0; + + raw_spin_lock(&loongarch_avec.lock); + + irq_matrix_online(loongarch_avec.vector_matrix); + + pending_list_init(cpu); + + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); + + raw_spin_unlock(&loongarch_avec.lock); + + return 0; +} + +static int avecintc_cpu_offline(unsigned int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + if (!loongarch_avec.vector_matrix) + return 0; + + raw_spin_lock(&loongarch_avec.lock); + + if (!list_empty(&plist->head)) + pr_warn("CPU#%d vector is busy\n", cpu); + irq_matrix_offline(loongarch_avec.vector_matrix); + + raw_spin_unlock(&loongarch_avec.lock); + + return 0; +} + +void complete_irq_moving(void) +{ + struct pending_list *plist = this_cpu_ptr(&pending_list); + struct avecintc_data *adata, *tdata; + int cpu, vector, bias; + uint64_t isr; + + raw_spin_lock(&loongarch_avec.lock); + + list_for_each_entry_safe(adata, tdata, &plist->head, entry) { + cpu = adata->prev_cpu; + vector = adata->prev_vec; + bias = vector / VECTORS_PER_REG; + switch (bias) { + case 0: + isr = csr_read64(LOONGARCH_CSR_ISR0); + break; + case 1: + isr = csr_read64(LOONGARCH_CSR_ISR1); + break; + case 2: + isr = csr_read64(LOONGARCH_CSR_ISR2); + break; + case 3: + isr = csr_read64(LOONGARCH_CSR_ISR3); + break; + } + + if (isr & (1UL << (vector % VECTORS_PER_REG))) { + smp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR); + continue; + } + list_del(&adata->entry); + irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false); + this_cpu_write(irq_map[vector], NULL); + adata->moving = 0; + adata->prev_cpu = adata->cpu; + adata->prev_vec = adata->vec; + } + + raw_spin_unlock(&loongarch_avec.lock); +} +#endif + +static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + msg->address_hi = 0x0; + msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) + | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); + msg->data = 0x0; +} + +static struct irq_chip avec_irq_controller = { + .name = "AVECINTC", + .irq_ack = avecintc_ack_irq, + .irq_mask = avecintc_mask_irq, + .irq_unmask = avecintc_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = avecintc_set_affinity, +#endif + .irq_compose_msi_msg = avecintc_compose_msi_msg, +}; + +static void avecintc_irq_dispatch(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_desc *d; + + chained_irq_enter(chip, desc); + + while (true) { + unsigned long vector = csr_read64(LOONGARCH_CSR_IRR); + if (vector & IRR_INVALID_MASK) + break; + + vector &= IRR_VECTOR_MASK; + + d = this_cpu_read(irq_map[vector]); + if (d) { + generic_handle_irq_desc(d); + } else { + spurious_interrupt(); + pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector); + } + } + + chained_irq_exit(chip, desc); +} + +static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + int cpu, ret; + unsigned long flags; + + raw_spin_lock_irqsave(&loongarch_avec.lock, flags); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu); + if (ret < 0) { + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + return ret; + } + + adata->prev_cpu = adata->cpu = cpu; + adata->prev_vec = adata->vec = ret; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); + + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + + return 0; +} + +static int avecintc_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, void *arg) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i); + struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL); + int ret; + + if (!adata) + return -ENOMEM; + + ret = avecintc_alloc_vector(irqd, adata); + if (ret < 0) { + kfree(adata); + return ret; + } + + irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller, + adata, handle_edge_irq, NULL, NULL); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; +} + +static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&loongarch_avec.lock, flags); + + per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false); + +#ifdef CONFIG_SMP + if (!adata->moving) { + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + return; + } + + per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); + list_del_init(&adata->entry); +#endif + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); +} + +static void avecintc_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + if (d) { + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + avecintc_free_vector(d, adata); + irq_domain_reset_irq_data(d); + kfree(adata); + } + } +} + +static const struct irq_domain_ops avecintc_domain_ops = { + .alloc = avecintc_domain_alloc, + .free = avecintc_domain_free, +}; + +static int __init irq_matrix_init(void) +{ + loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS); + if (!loongarch_avec.vector_matrix) + return -ENOMEM; + + for (int i = 0; i < NR_LEGACY_VECTORS; i++) + irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); + + irq_matrix_online(loongarch_avec.vector_matrix); + + return 0; +} + +static int __init avecintc_init(struct irq_domain *parent) +{ + int ret, parent_irq; + unsigned long value; + + disable_pci_irq_limit = true; + raw_spin_lock_init(&loongarch_avec.lock); + + loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); + if (!loongarch_avec.fwnode) { + pr_err("Unable to allocate domain handle\n"); + ret = -ENOMEM; + goto out; + } + + loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, + &avecintc_domain_ops, NULL); + if (!loongarch_avec.domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -ENOMEM; + goto out_free_handle; + } + + parent_irq = irq_create_mapping(parent, INT_AVEC); + if (!parent_irq) { + pr_err("Failed to mapping hwirq\n"); + ret = -EINVAL; + goto out_remove_domain; + } + + ret = irq_matrix_init(); + if (ret < 0) { + pr_err("Failed to init irq matrix\n"); + goto out_remove_domain; + } + irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL); + +#ifdef CONFIG_SMP + pending_list_init(0); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING, + "irqchip/loongarch/avecintc:starting", + avecintc_cpu_online, avecintc_cpu_offline); +#endif + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); + + return ret; + +out_remove_domain: + irq_domain_remove(loongarch_avec.domain); +out_free_handle: + irq_domain_free_fwnode(loongarch_avec.fwnode); +out: + return ret; +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + + loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; + + return pch_msi_acpi_init_avec(loongarch_avec.domain); +} + +static inline int __init acpi_cascade_irqdomain_init(void) +{ + return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); +} + +int __init avecintc_acpi_init(struct irq_domain *parent) +{ + int ret = avecintc_init(parent); + if (ret < 0) { + pr_err("Failed to init IRQ domain\n"); + return ret; + } + + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) { + pr_err("Failed to init cascade IRQ domain\n"); + return ret; + } + + return ret; +} diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index b35903a06902f73e719626205b0ba765d2319269..c6e0c9849ba91cf0158f53ad72bfd6fcf8676ffd 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -13,6 +13,8 @@ #include #include +#include "irq-loongson.h" + static struct irq_domain *irq_domain; struct fwnode_handle *cpuintc_handle; @@ -140,10 +142,18 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; - return 0; + if (cpu_has_avecint) + r = avecintc_acpi_init(irq_domain); + + return r; +} + +struct irq_domain *get_cpudomain(void) +{ + return irq_domain; } -static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, +int __init cpuintc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { int ret; diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 08e95fad5b12e3179d8a93e77e312618b8f86c8b..ca70e443c5a5b4df822e815226419138edcff2ee 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + #define EIOINTC_REG_NODEMAP 0x14a0 #define EIOINTC_REG_IPMAP 0x14c0 #define EIOINTC_REG_ENABLE 0x1600 @@ -24,6 +26,16 @@ #define EIOINTC_REG_ISR 0x1800 #define EIOINTC_REG_ROUTE 0x1c00 +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION 0 +#define EXTIOI_HAS_ENABLE_OPTION 1 +#define EXTIOI_HAS_INT_ENCODE 2 +#define EXTIOI_HAS_CPU_ENCODE 3 +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE 1 +#define EXTIOI_ENABLE_INT_ENCODE 2 +#define EXTIOI_ENABLE_CPU_ENCODE 3 + #define VEC_REG_COUNT 4 #define VEC_COUNT_PER_REG 64 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) @@ -42,6 +54,7 @@ struct eiointc_priv { cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; struct irq_domain *eiointc_domain; + bool cpu_encoded; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -57,7 +70,9 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + return cpu_logical_map(cpu) / cores; } static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) @@ -88,6 +103,20 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, static DEFINE_RAW_SPINLOCK(affinity_lock); +static void virt_extioi_set_irq_route(int irq, unsigned int cpu) +{ + int data; + + /* + * get irq route info for continuous 4 vectors + * and set affinity for specified vector + */ + data = iocsr_read32(EIOINTC_REG_ROUTE + (irq & ~3)); + data &= ~(0xff << ((irq & 3) * 8)); + data |= cpu_logical_map(cpu) << ((irq & 3) * 8); + iocsr_write32(data, EIOINTC_REG_ROUTE + (irq & ~3)); +} + static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int cpu; @@ -110,16 +139,22 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + if (priv->cpu_encoded) { + iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); + virt_extioi_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -146,13 +181,14 @@ static int eiointc_router_init(unsigned int cpu) uint32_t data; uint32_t node = cpu_to_eio_node(cpu); int index = eiointc_index(node); + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); if (index < 0) { pr_err("Error: invalid nodemap!\n"); return -1; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { @@ -168,7 +204,9 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ - if (index == 0) + if (eiointc_priv[index]->cpu_encoded) + bit = cpu_logical_map(0); + else if (index == 0) bit = BIT(cpu_logical_map(0)); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -199,6 +237,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { int bit = __ffs(pending); @@ -305,23 +349,7 @@ static int eiointc_suspend(void) static void eiointc_resume(void) { - int i, j; - struct irq_desc *desc; - struct irq_data *irq_data; - eiointc_router_init(0); - - for (i = 0; i < nr_pics; i++) { - for (j = 0; j < eiointc_priv[0]->vec_count; j++) { - desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); - if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { - raw_spin_lock(&desc->lock); - irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); - raw_spin_unlock(&desc->lock); - } - } - } } static struct syscore_ops eiointc_syscore_ops = { @@ -329,7 +357,7 @@ static struct syscore_ops eiointc_syscore_ops = { .resume = eiointc_resume, }; -static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, +int __init pch_pic_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; @@ -342,7 +370,7 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, return 0; } -static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, +int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct irq_domain *parent; @@ -370,6 +398,9 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; + if (cpu_has_avecint) + return 0; + r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); if (r < 0) return r; @@ -380,7 +411,7 @@ static int __init acpi_cascade_irqdomain_init(void) static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, u64 node_map) { - int i; + int i, val; node_map = node_map ? node_map : -1ULL; for_each_possible_cpu(i) { @@ -400,14 +431,25 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, return -ENOMEM; } + if (cpu_has_hypervisor) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + if (val & BIT(EXTIOI_HAS_CPU_ENCODE)) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= BIT(EXTIOI_ENABLE_CPU_ENCODE); + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->cpu_encoded = true; + pr_info("loongson-extioi: enable cpu encodig\n"); + } + } + eiointc_priv[nr_pics++] = priv; eiointc_router_init(0); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); if (nr_pics == 1) { register_syscore_ops(&eiointc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, - "irqchip/loongarch/intc:starting", + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, + "irqchip/loongarch/eiointc:starting", eiointc_router_init, NULL); } diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c index 0bff728b25e3da85ebb1d166f0cae5b7a7f0ea51..5da02c7ad0b3075c2ac0a0aa1d9a5724d8906b9c 100644 --- a/drivers/irqchip/irq-loongson-htvec.c +++ b/drivers/irqchip/irq-loongson-htvec.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define HTVEC_EN_OFF 0x20 #define HTVEC_MAX_PARENT_IRQ 8 diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 7c4fe7ab4b830e499ec36140ae6cd39ad1325ab0..2b1bd4a96665b4eab6832dd95daca4468c3d661d 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -22,6 +22,8 @@ #include #endif +#include "irq-loongson.h" + #define LIOINTC_CHIP_IRQ 32 #define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_CORES 4 diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c index 9b35492fb6be9eacfbfe07a2b7d54f147843f094..2d4c3ec128b8f27056ccec04515c423174a1c0f9 100644 --- a/drivers/irqchip/irq-loongson-pch-lpc.c +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -15,6 +15,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define LPC_INT_CTL 0x00 #define LPC_INT_ENA 0x04 diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index dd4d699170f4ec5fe731e0e08c48acebb629d70f..2c9f58536fce403fdf22c2898811fba859b511d9 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,6 +15,8 @@ #include #include +#include "irq-loongson.h" + static int nr_pics; struct pch_msi_data { @@ -266,17 +268,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); #ifdef CONFIG_ACPI struct fwnode_handle *get_pch_msi_handle(int pci_segment) { - int i; + if (cpu_has_avecint) + return pch_msi_handle[0]; - for (i = 0; i < MAX_IO_PICS; i++) { + for (int i = 0; i < MAX_IO_PICS; i++) { if (msi_group[i].pci_segment == pci_segment) return pch_msi_handle[i]; } - return NULL; + return pch_msi_handle[0]; } -int __init pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi) +int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi) { int ret; struct fwnode_handle *domain_handle; @@ -289,4 +291,36 @@ int __init pch_msi_acpi_init(struct irq_domain *parent, return ret; } + +static struct irq_chip pch_msi_irq_chip_avec = { + .name = "PCH PCI MSI", + .irq_ack = irq_chip_ack_parent, +}; + +static struct msi_domain_info pch_msi_domain_info_avec = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &pch_msi_irq_chip_avec, +}; + +int __init pch_msi_acpi_init_avec(struct irq_domain *parent) +{ + struct irq_domain *msi_domain; + + if (pch_msi_handle[0]) + return 0; + + pch_msi_handle[0] = parent->fwnode; + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + msi_domain = pci_msi_create_irq_domain(pch_msi_handle[0], + &pch_msi_domain_info_avec, parent); + if (!msi_domain) { + pr_err("Failed to create PCI MSI domain\n"); + kfree(pch_msi_handle[0]); + return -ENOMEM; + } + + return 0; +} #endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 63db8e2172e017031396003c9a1bb32be8bf59ab..d2356e63e4d40ede1bd754543f08476db70f7e2f 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define PCH_PIC_MASK 0x20 #define PCH_PIC_HTMSI_EN 0x40 @@ -33,6 +35,11 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_UNDEF_VECTOR 255 +#define PIC_COUNT_PER_REG64 64 +#define PIC_REG64_COUNT 1 +#define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) +#define PIC_REG64_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG64) static int nr_pics; @@ -46,12 +53,24 @@ struct pch_pic { u32 saved_vec_en[PIC_REG_COUNT]; u32 saved_vec_pol[PIC_REG_COUNT]; u32 saved_vec_edge[PIC_REG_COUNT]; + u8 table[PIC_COUNT]; + int inuse; }; static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq) +{ + return priv->table[hirq]; +} + +struct irq_domain *get_pchpic_irq_domain(void) +{ + return pch_pic_priv[0]->pic_domain; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; @@ -80,45 +99,47 @@ static void pch_pic_mask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq)); irq_chip_mask_parent(d); } static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); irq_chip_unmask_parent(d); - pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_MASK, bit); } static int pch_pic_set_type(struct irq_data *d, unsigned int type) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); int ret = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; case IRQ_TYPE_LEVEL_LOW: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; default: @@ -133,11 +154,12 @@ static void pch_pic_ack_irq(struct irq_data *d) { unsigned int reg; struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); - if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4); + if (reg & BIT(PIC_REG_BIT(bit))) { + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); } irq_chip_ack_parent(d); } @@ -159,6 +181,8 @@ static int pch_pic_domain_translate(struct irq_domain *d, { struct pch_pic *priv = d->host_data; struct device_node *of_node = to_of_node(fwspec->fwnode); + unsigned long flags; + int i; if (of_node) { if (fwspec->param_count < 2) @@ -171,12 +195,33 @@ static int pch_pic_domain_translate(struct irq_domain *d, return -EINVAL; *hwirq = fwspec->param[0] - priv->gsi_base; + if (fwspec->param_count > 1) *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; else *type = IRQ_TYPE_NONE; } + raw_spin_lock_irqsave(&priv->pic_lock, flags); + /* Check pic-table to confirm if the hwirq has been assigned */ + for (i = 0; i < priv->inuse; i++) { + if (priv->table[i] == *hwirq) { + *hwirq = i; + break; + } + } + if (i == priv->inuse) { + /* Assign a new hwirq in pic-table */ + if (priv->inuse >= PIC_COUNT) { + pr_err("pch-pic domain has no free vectors\n"); + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return -EINVAL; + } + priv->table[priv->inuse] = *hwirq; + *hwirq = priv->inuse++; + } + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return 0; } @@ -194,6 +239,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, if (err) return err; + /* Write vector ID */ + writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq))); + parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; parent_fwspec.param[0] = hwirq + priv->ht_vec_base; @@ -222,7 +270,7 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_COUNT; i++) { /* Write vector ID */ - writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); + writeb(i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); } @@ -230,13 +278,15 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_REG_COUNT; i++) { /* Clear IRQ cause registers, mask all interrupts */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i); - writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i); /* Clear auto bounce, we don't need that */ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i); writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i); /* Enable HTMSI transformer */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i); } + + for (i = 0; i < PIC_REG64_COUNT; i++) + writeq_relaxed((u64)-1, priv->base + PCH_PIC_CLR + 8 * i); } static int pch_pic_suspend(void) @@ -284,6 +334,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, u32 gsi_base) { struct pch_pic *priv; + int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -294,6 +345,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, if (!priv->base) goto free_priv; + priv->inuse = 0; + for (i = 0; i < PIC_COUNT; i++) + priv->table[i] = PIC_UNDEF_VECTOR; + priv->ht_vec_base = vec_base; priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; priv->gsi_base = gsi_base; diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h new file mode 100644 index 0000000000000000000000000000000000000000..11fa138d1f4434ebdfb2ae49e5b781dee913c5ba --- /dev/null +++ b/drivers/irqchip/irq-loongson.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H +#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H + +int find_pch_pic(u32 gsi); + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +int avecintc_acpi_init(struct irq_domain *parent); + +int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_msi_acpi_init_avec(struct irq_domain *parent); + +#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */ diff --git a/drivers/irqchip/irq-sunway-cpu.c b/drivers/irqchip/irq-sunway-cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..ff7455c0f3ec282c4a55ace6c92ad55a0aa8d9e6 --- /dev/null +++ b/drivers/irqchip/irq-sunway-cpu.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static void handle_intx(unsigned int offset) +{ + struct pci_controller *hose; + unsigned long value; + + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + value = read_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7)); + if (value >> 63) { + value = value & (~(1UL << 62)); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + handle_irq(hose->int_irq); + value = value | (1UL << 62); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + } + + if (IS_ENABLED(CONFIG_PCIE_PME)) { + value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value); + } + } + + if (IS_ENABLED(CONFIG_PCIEAER)) { + value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value); + } + } + + if (hose->iommu_enable) { + value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (value >> 63) + handle_irq(hose->int_irq); + } + } +} + +static void handle_device_interrupt(unsigned long irq_info) +{ + unsigned int i; + + if (is_guest_or_emul()) { + handle_irq(irq_info); + return; + } + + for (i = 0; i < 4; i++) { + if ((irq_info >> i) & 0x1) + handle_intx(i); + } +} + +/* Performance counter hook. A module can override this to do something useful. */ +static void dummy_perf(unsigned long vector, struct pt_regs *regs) +{ + irq_err_count++; + pr_crit("Performance counter interrupt!\n"); +} + +void (*perf_irq)(unsigned long vector, struct pt_regs *regs) = dummy_perf; +EXPORT_SYMBOL(perf_irq); + +static void handle_fault_int(void) +{ + int node; + unsigned long value; + + node = __this_cpu_read(hard_node_id); + pr_info("enter fault int, si_fault_stat = %#lx\n", + sw64_io_read(node, SI_FAULT_STAT)); + sw64_io_write(node, SI_FAULT_INT_EN, 0); + sw64_io_write(node, DLI_RLTD_FAULT_INTEN, 0); +#if defined(CONFIG_UNCORE_XUELANG) + value = 0; +#elif defined(CONFIG_UNCORE_JUNZHANG) + value = sw64_io_read(node, FAULT_INT_CONFIG); + value |= (1 << 8); +#endif + __io_write_fault_int_en(node, value); +} + +static void handle_mt_int(void) +{ + pr_info("enter mt int\n"); +} + +static void handle_nmi_int(void) +{ + pr_info("enter nmi int\n"); +} + +static void handle_dev_int(struct pt_regs *regs) +{ + unsigned long config_val, val, stat; + int node = 0; + unsigned int hwirq; + + config_val = sw64_io_read(node, DEV_INT_CONFIG); + val = config_val & (~(1UL << 8)); + sw64_io_write(node, DEV_INT_CONFIG, val); + stat = sw64_io_read(node, MCU_DVC_INT); + + while (stat) { + hwirq = ffs(stat) - 1; + generic_handle_domain_irq(NULL, hwirq); + stat &= ~(1UL << hwirq); + } + /*do handle irq */ + + sw64_io_write(node, DEV_INT_CONFIG, config_val); +} + +asmlinkage void do_entInt(unsigned long type, unsigned long vector, + unsigned long irq_arg, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + extern char __idle_start[], __idle_end[]; + + if (is_guest_or_emul()) { + if ((type & 0xffff) > 15) { + vector = type; + if (vector == 16) + type = INT_INTx; + else + type = INT_MSI; + } + } + + /* restart idle routine if it is interrupted */ + if (regs->pc > (u64)__idle_start && regs->pc < (u64)__idle_end) + regs->pc = (u64)__idle_start; + + switch (type & 0xffff) { + case INT_MSI: + old_regs = set_irq_regs(regs); + handle_pci_msi_interrupt(type, vector, irq_arg); + set_irq_regs(old_regs); + return; + case INT_INTx: + old_regs = set_irq_regs(regs); + handle_device_interrupt(vector); + set_irq_regs(old_regs); + return; + + case INT_IPI: +#ifdef CONFIG_SMP + handle_ipi(regs); + return; +#else + irq_err_count++; + pr_crit("Interprocessor interrupt? You must be kidding!\n"); +#endif + break; + case INT_RTC: + old_regs = set_irq_regs(regs); + sw64_timer_interrupt(); + set_irq_regs(old_regs); + return; + case INT_VT_SERIAL: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_VT_HOTPLUG: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_PC0: + perf_irq(PMC_PC0, regs); + return; + case INT_PC1: + perf_irq(PMC_PC1, regs); + return; + case INT_DEV: + old_regs = set_irq_regs(regs); + handle_dev_int(regs); + set_irq_regs(old_regs); + return; + case INT_FAULT: + old_regs = set_irq_regs(regs); + handle_fault_int(); + set_irq_regs(old_regs); + return; + case INT_MT: + old_regs = set_irq_regs(regs); + handle_mt_int(); + set_irq_regs(old_regs); + return; + case INT_NMI: + old_regs = set_irq_regs(regs); + handle_nmi_int(); + set_irq_regs(old_regs); + return; + default: + pr_crit("Hardware intr %ld %lx? uh?\n", type, vector); + } + pr_crit("PC = %016lx PS = %04lx\n", regs->pc, regs->ps); +} +EXPORT_SYMBOL(do_entInt); diff --git a/drivers/irqchip/irq-sunway-msi-v2.c b/drivers/irqchip/irq-sunway-msi-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..36790dfedb33a71d8ed2bd4f0f41eb2075d01bc6 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-v2.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + int rcid; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + rcid = cpu_to_rcid(chip_data->dst_cpu); + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = + (unsigned int)chip_data->msiaddr | + (rcid_to_msicid(rcid) << MSI_ADDR_DEST_ID_SHIFT); + msg->data = chip_data->vector; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static bool find_free_cpu_vectors(const struct cpumask *search_mask, int *found_cpu, int *found_vector, unsigned int nr_irqs) +{ + int i, vector, cpu; + bool found = false, find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + for (vector = 0; vector < 256; vector++) { + for (i = 0; i < nr_irqs; i++) + if (per_cpu(vector_irq, cpu)[vector + i]) + break; + + if (i == nr_irqs) { + found = true; + *found_cpu = cpu; + *found_vector = vector; + return found; + } + + vector += i; + } + + cpu = cpumask_next(cpu, search_mask); + if (cpu < nr_cpu_ids) + goto try_again; + else { + if (find_once_global) { + pr_warn("No global free vectors\n"); + return found; + } + pr_warn("No local free vectors\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags; + int vector, cpu; + int i; + struct msi_msg msg; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (cdata->multi_msi > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, cdata->multi_msi)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } else { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + spin_lock(&cdata->cdata_lock); + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cpu)[vector + i] = entry->irq + i; + BUG_ON(irq_chip_compose_msi_msg(irqd, &msg)); + __pci_write_msi_msg(entry, &msg); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy(irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int node; + int i, vector, cpu; + unsigned long msiaddr; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (type == IRQ_ALLOC_TYPE_MSI && nr_irqs > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, nr_irqs)) + return -ENOSPC; + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + for (i = 0; i < nr_irqs; i++) { + per_cpu(vector_irq, cpu)[vector + i] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + irq_data->chip_data = cdata; + } + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = nr_irqs; + cdata->move_in_progress = false; + } else { + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = 1; + cdata->move_in_progress = false; + } + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, type); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i, j; + struct irq_data *irq_data; + unsigned long flags; + unsigned int multi_msi; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_domain_reset_irq_data(irq_data); + multi_msi = cdata->multi_msi; + for (j = 0; j < multi_msi; j++) + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector + j] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + if (multi_msi > 1) + break; + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + for_each_pci_msi_entry(desc, dev) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + enum irq_alloc_type msi_type; + + if (arg == NULL) + return -ENODEV; + msi_type = info->type; + err = assign_irq_vector(virq, nr_irqs, domain, msi_type); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct msi_desc *desc = first_pci_msi_entry(pdev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (desc->msi_attrib.is_msix) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + err = msi_domain_alloc_irqs(msi_default_domain, &pdev->dev, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (cdata->dst_cpu == cpu) { + if (vector >= cdata->vector && + vector < cdata->vector + cdata->multi_msi) { + int i; + + raw_spin_lock(&vector_lock); + cdata->move_in_progress = false; + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector + i] = 0; + raw_spin_unlock(&vector_lock); + } + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, msi_index = 0; + int cpu, vector_index = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + int irq = 0; + + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sunway-msi-vt.c b/drivers/irqchip/irq-sunway-msi-vt.c new file mode 100644 index 0000000000000000000000000000000000000000..df8c7d72671b43b7f1d7ce0c6d239c0810afc7fe --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-vt.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(vector_lock); + +static void __vt_irq_msi_compose_msg(struct sw64_msi_chip_data *cdata, + struct msi_msg *msg) +{ + msg->address_hi = (u32)(VT_MSIX_MSG_ADDR >> 32); + msg->address_lo = (u32)(VT_MSIX_MSG_ADDR & 0xffffffff) + | VT_MSIX_ADDR_DEST_ID(cdata->dst_cpu); + msg->data = cdata->vector; +} + +static void vt_irq_msi_compose_msg(struct irq_data *irqd, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *cdata; + + cdata = irqd->chip_data; + __vt_irq_msi_compose_msg(cdata, msg); +} + +static void vt_irq_msi_update_msg(struct irq_data *irqd, + struct sw64_msi_chip_data *cdata) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __vt_irq_msi_compose_msg(cdata, msg); + pci_write_msi_msg(irqd->irq, msg); +} + +static int +vt_set_affinity(struct irq_data *irqd, const struct cpumask *cpumask, + bool force) +{ + struct sw64_msi_chip_data *cdata; + struct cpumask searchmask; + int cpu, vector; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target coreid is already in the new mask, + * and is online then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + spin_lock(&cdata->cdata_lock); + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->prev_vector = cdata->vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + vt_irq_msi_update_msg(irqd, irqd->chip_data); + + return 0; +} + +static struct irq_chip vt_pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = sw64_irq_noop, + .irq_compose_msi_msg = vt_irq_msi_compose_msg, + .irq_set_affinity = vt_set_affinity, +}; + +int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + int virq, val_node = 0; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long flags, node, rc_index; + const struct cpumask *mask; + + struct cpumask searchmask; + int cpu, vector; + + node = hose->node; + rc_index = hose->index; + mask = cpumask_of_node(node); + + raw_spin_lock_irqsave(&vector_lock, flags); + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq; + + irq_set_msi_desc(virq, desc); + irq_set_chip_and_handler_name(virq, &vt_pci_msi_controller, + handle_edge_irq, "edge"); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msix_irq); + +int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + unsigned long node, rc_index; + int virq = -1, val_node = 0; + unsigned long flags; + + const struct cpumask *mask; + struct cpumask searchmask; + int i, vector, cpu; + + if (type == PCI_CAP_ID_MSI && nvec > 32) + return 1; + + node = hose->node; + rc_index = hose->index; + raw_spin_lock_irqsave(&vector_lock, flags); + msi_for_each_desc(desc, &(dev->dev), MSI_DESC_ALL) { + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < desc->nvec_used; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + irq_set_msi_desc_off(virq, i, desc); + irq_set_chip_and_handler_name(virq + i, &vt_pci_msi_controller, handle_edge_irq, "edge"); + irq_data = irq_get_irq_data(virq + i); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + } + } + + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msi_irqs); + +void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs) +{ + int i; + unsigned long flags; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_get_irq_data(virq + i); + if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_data->hwirq = 0; + irq_data->chip = &no_irq_chip; + irq_data->chip_data = NULL; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +int __arch_setup_vt_msix_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *entry; + int ret; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + ret = chip_setup_vt_msix_irq(dev, entry); + if (ret) + return ret; + } + + return 0; +} + +int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = 0; + + if (type == PCI_CAP_ID_MSI) + ret = chip_setup_vt_msi_irqs(dev, nvec, type); + else if (type == PCI_CAP_ID_MSIX) + ret = __arch_setup_vt_msix_irqs(dev, nvec, type); + else + pr_info("SW arch do not identify ID:%d\n", type); + + return ret; +} diff --git a/drivers/irqchip/irq-sunway-msi.c b/drivers/irqchip/irq-sunway-msi.c new file mode 100644 index 0000000000000000000000000000000000000000..060aa96711b7a96497d3704237b122c31103be03 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = chip_data->msi_config_index; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static unsigned long set_piu_msi_config(struct pci_controller *hose, int cpu, + int msiconf_index, int vector) +{ + unsigned int reg; + unsigned long msi_config; + int phy_cpu; + + msi_config = (1UL << 62) | ((unsigned long)vector << 10); + phy_cpu = cpu_to_rcid(cpu); + msi_config |= ((phy_cpu >> 5) << 6) | (phy_cpu & 0x1f); + reg = MSICONFIG0 + (unsigned long)(msiconf_index << 7); + write_piu_ior0(hose->node, hose->index, reg, msi_config); + msi_config = read_piu_ior0(hose->node, hose->index, reg); + set_bit(msiconf_index, hose->piu_msiconfig); + + return msi_config; +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose; + struct pci_dev *pdev; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags, msi_config; + int vector, cpu; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + spin_lock(&cdata->cdata_lock); + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + msi_config = set_piu_msi_config(hose, cpu, cdata->msi_config_index, vector); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msi_config = msi_config; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int msiconf_index, node; + int i, vector, cpu; + unsigned long msi_config; + int start_index; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + msiconf_index = bitmap_find_next_zero_area(hose->piu_msiconfig, 256, 0, + nr_irqs, nr_irqs - 1); + + if (msiconf_index >= 256) { + pr_warn("No free msi on PIU!\n"); + return -ENOSPC; + } + + start_index = msiconf_index; + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + msiconf_index = start_index + i; + msi_config = set_piu_msi_config(hose, cpu, msiconf_index, vector); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->msi_config = msi_config; + cdata->msi_config_index = msiconf_index; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->move_in_progress = false; + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, hose); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i; + struct irq_data *irq_data; + struct pci_dev *pdev; + unsigned long flags; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + struct msi_desc *entry; + struct pci_controller *hose; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + entry = irq_get_msi_desc(virq + i); + if (entry) { + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + clear_bit(cdata->msi_config_index, hose->piu_msiconfig); + } + irq_domain_reset_irq_data(irq_data); + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + struct pci_controller *hose; + + if (arg == NULL) + return -ENODEV; + + hose = pci_bus_to_pci_controller(info->msi_dev->bus); + err = assign_irq_vector(virq, nr_irqs, domain, hose); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (pdev->msix_enabled) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + + err = msi_domain_alloc_irqs_all_locked(&pdev->dev, MSI_DEFAULT_DOMAIN, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +int pcibios_device_add(struct pci_dev *dev) +{ + if (msi_default_domain) + dev_set_msi_domain(&dev->dev, msi_default_domain); + return 0; +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (vector == cdata->vector && cdata->dst_cpu == cpu) { + raw_spin_lock(&vector_lock); + cdata->move_in_progress = 0; + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector] = 0; + raw_spin_unlock(&vector_lock); + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, piu_index, msi_index = 0; + int cpu, vector_index = 0; + unsigned long value = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + piu_index = cdata->msi_config_index; + value = cdata->msi_config | (1UL << 63); + write_piu_ior0(cdata->rc_node, cdata->rc_index, MSICONFIG0 + (piu_index << 7), value); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sw64-intc-v2.c b/drivers/irqchip/irq-sw64-intc-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..bc2c8ef3ed2fca25a4742c8f4232f27854bea948 --- /dev/null +++ b/drivers/irqchip/irq-sw64-intc-v2.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +static void fake_irq_mask(struct irq_data *data) +{ +} + +static void fake_irq_unmask(struct irq_data *data) +{ +} + +static struct irq_chip onchip_intc = { + .name = "SW fake Intc", + .irq_mask = fake_irq_mask, + .irq_unmask = fake_irq_unmask, +}; + +static int sw64_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + + irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops sw64_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = sw64_intc_domain_map, +}; + +#ifdef CONFIG_OF +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + + int node = 0; + int hwirq = 0, nirq = 8; + + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_linear(intc, 8, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + for (hwirq = 0 ; hwirq < nirq ; hwirq++) + irq_create_mapping(root_domain, hwirq); + + /*enable MCU_DVC_INT_EN*/ + sw64_io_write(node, MCU_DVC_INT_EN, 0xff); + + return 0; +} + +IRQCHIP_DECLARE(sw64_intc, "sw64,sw6_irq_controller", init_onchip_IRQ); + +static int __init +init_onchip_vt_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, 16, 0, 0, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(sw64_vt_intc, "sw64,sw6_irq_vt_controller", init_onchip_vt_IRQ); +#endif diff --git a/drivers/irqchip/irq-sw64-lpc-intc.c b/drivers/irqchip/irq-sw64-lpc-intc.c new file mode 100644 index 0000000000000000000000000000000000000000..1cbf8747824232bb62bce64432320ea50ad50451 --- /dev/null +++ b/drivers/irqchip/irq-sw64-lpc-intc.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#define LPC_NR_IRQS 16 +#define LPC_IRQ 0x4 +#define LPC_IRQ_MASK 0x8 + +struct lpc_intc_data { + struct irq_domain *domain; + struct irq_chip_generic *gc; +}; + +static void lpc_irq_mask_ack(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + unsigned int mask = data->mask; + + irq_gc_lock(gc); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + +static void lpc_irq_handler(struct irq_desc *desc) +{ + struct lpc_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq; + u32 status; + + chained_irq_enter(chip, desc); + + status = irq_reg_readl(b->gc, LPC_IRQ); + + if (status == 0) { + raw_spin_lock(&desc->lock); + handle_bad_irq(desc); + raw_spin_unlock(&desc->lock); + goto out; + } + + while (status) { + irq = __ffs(status); + status &= ~BIT(irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } + +out: + chained_irq_exit(chip, desc); +} + +static int __init lpc_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int set = IRQ_NOPROBE | IRQ_LEVEL; + struct lpc_intc_data *data; + struct irq_chip_type *ct; + int parent_irq, ret; + void __iomem *base; + int hwirq = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to remap lpc intc registers\n"); + ret = -ENOMEM; + goto out_free; + } + + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { + pr_err("failed to find parent interrupt\n"); + ret = -EINVAL; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, LPC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 16, 1, np->name, + handle_level_irq, 0, set, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_chained_handler_and_data(parent_irq, + lpc_irq_handler, data); + + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + + ct = data->gc->chip_types; + + ct->regs.ack = LPC_IRQ; + ct->regs.mask = LPC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = lpc_irq_mask_ack; + + for (hwirq = 0 ; hwirq < 16 ; hwirq++) + irq_create_mapping(data->domain, hwirq); + + /* Enable LPC interrupts */ + writel(0xffffebdd, base + LPC_IRQ_MASK); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(sw_lpc_intc, "sw64,lpc_intc", lpc_intc_of_init); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 68d71b4b55bd350af0e5cefbd1713ca206d131ca..6b653487d954adef5fd8aa457654b5ed7aefb40b 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -604,6 +604,22 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +config LPC_CHIP3 + tristate "CHIP3 LPC" + depends on UNCORE_XUELANG + select MFD_CORE + help + LPC bridge function of the chip3 provides support for + System Management Bus and General Purpose I/O. + +config SUNWAY_SUPERIO_AST2400 + tristate "SUNWAY SUPERIO AST2400" + depends on SW64 + select MFD_CORE + help + Nuvoton AST2400 Super I/O chip platform driver written + for SUNWAY LPC controller. + config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index db1ba39de3b590fe37fe39a22faaec30380134e9..50b42df268ea9cb1badff2c71bfbcd11c3eecab7 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -269,6 +269,9 @@ obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o +obj-$(CONFIG_LPC_CHIP3) += lpc_sunway_chip3.o +obj-$(CONFIG_SUNWAY_SUPERIO_AST2400) += sunway_ast2400.o + obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o obj-$(CONFIG_MFD_SMPRO) += smpro-core.o diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c new file mode 100644 index 0000000000000000000000000000000000000000..1bcf40d6a6f7b6d7a6d87574386fccb57f7dd1b8 --- /dev/null +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * lpc_sunway_chip3.c - LPC interface for SUNWAY CHIP3 + * + * LPC bridge function contains many other functional units, + * such as Interrupt controllers, Timers, Power Management, + * System Management, GPIO, RTC, and LPC Configuration + * Registers. + * + * Copyright (c) 2014 JN + * Author: Weiqiang Su + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum features { + LPC_USE_MSI = (1 << 0), + LPC_USE_INTX = (1 << 1), +}; + +enum { + LPC_HST_BAR = 0, + LPC_MEM_BAR = 2, + LPC_FWH_BAR = 4, +}; + +enum { + LPC_CTL = 0x0, + LPC_IRQ = 0x4, + LPC_IRQ_MASK = 0x8, + LPC_STAT = 0xc, + LPC_ERR_INF = 0x10, + LPC_MEM_HADDR = 0x14, + LPC_FWH_IDSEL_R1 = 0x18, + LPC_FWH_IDSEL_R2 = 0x1c, + LPC_FWH_IDSEL_R3 = 0x20, + LPC_FWH_IDSEL_R4 = 0x24, + LPC_FWH_IDSEL_R5 = 0x28, + LPC_FWH_DEC_EN1 = 0x2c, + LPC_FWH_DEC_EN2 = 0x30, + LPC_DMA_CTL = 0x34, + LPC_CH_STAT = 0x38, + LPC_CH0_ADDR = 0x3c, + LPC_CH1_ADDR = 0x40, + LPC_CH2_ADDR = 0x44, + LPC_CH3_ADDR = 0x48, + LPC_CH0_LENG = 0x4c, + LPC_CH1_LENG = 0x50, + LPC_CH2_LENG = 0x54, + LPC_CH3_LENG = 0x58, + LPC_CH0_MODE = 0x5c, + LPC_CH1_MODE = 0x60, + LPC_CH2_MODE = 0x64, + LPC_CH3_MODE = 0x68, + LPC_CH_MASK = 0x6c, + LPC_DMA_SWRST = 0x70, +}; + +struct lpc_chip3_adapter { + void __iomem *hst_regs; + struct device *dev; + int irq; + unsigned int features; +}; + +static struct resource superio_chip3_resources[] = { + { + .flags = IORESOURCE_IO, + } +}; + +static struct resource mem_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct resource fw_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct physmap_flash_data mem_flash_data = { + .width = 1, +}; + +static struct physmap_flash_data fw_flash_data = { + .width = 1, +}; + +static struct mfd_cell lpc_chip3_cells[] = { + { + .name = "sunway_superio_ast2400", + .id = 0, + .num_resources = ARRAY_SIZE(superio_chip3_resources), + .resources = superio_chip3_resources, + }, + { + .name = "chip3-flash", + .id = 0, + .num_resources = 1, + .resources = &mem_flash_resource, + .platform_data = &mem_flash_data, + .pdata_size = sizeof(mem_flash_data), + }, + { + .name = "chip3_fwh-flash", + .id = 0, + .num_resources = 1, + .resources = &fw_flash_resource, + .platform_data = &fw_flash_data, + .pdata_size = sizeof(fw_flash_data), + } +}; + +static inline void lpc_writel(void *address, int reg_base, int value) +{ + unsigned long addr = (unsigned long)address + reg_base; + + writel(value, (void *)addr); +} + +static inline int lpc_readl(void *address, int reg_base) +{ + unsigned long addr = (unsigned long)address + reg_base; + int value = readl((void *)addr); + + return value; +} + +static void lpc_enable(struct lpc_chip3_adapter *lpc_adapter) +{ + unsigned int value; + + value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + value |= 0x1600; + + /* LPC host enable */ + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, value); +} + +static void lpc_mem_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + mem_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x2UL << 28)); + mem_flash_resource.end = mem_flash_resource.start + SZ_256M - 1; + + writel(0x1f, lpc_adapter->hst_regs + LPC_MEM_HADDR); +} + +static void lpc_fw_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + fw_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x3UL << 28)); + fw_flash_resource.end = fw_flash_resource.start + SZ_256M - 1; + + writel(0xff0f, lpc_adapter->hst_regs + LPC_FWH_DEC_EN1); + writel(0xffff11ff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R5); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R4); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R3); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R2); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R1); + +} + +static int lpc_chip3_probe(struct platform_device *pdev) +{ + int ret; + struct lpc_chip3_adapter *lpc_adapter; + struct resource *mem; + + lpc_adapter = kzalloc(sizeof(*lpc_adapter), GFP_KERNEL); + if (lpc_adapter == NULL) { + dev_err(&pdev->dev, "%s kzalloc failed !\n", __func__); + return -ENOMEM; + } + + platform_set_drvdata(pdev, lpc_adapter); + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + lpc_adapter->hst_regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(lpc_adapter->hst_regs)) { + dev_err(&pdev->dev, "lpc region map failed\n"); + return PTR_ERR(lpc_adapter->hst_regs); + } + + lpc_adapter->dev = &pdev->dev; + lpc_adapter->features = 0; + + lpc_enable(lpc_adapter); + + lpc_mem_flash_init(pdev, lpc_adapter); + lpc_fw_flash_init(pdev, lpc_adapter); + + ret = mfd_add_devices(&pdev->dev, 0, + lpc_chip3_cells, ARRAY_SIZE(lpc_chip3_cells), + NULL, 0, NULL); + if (ret) + goto out_dev; + + dev_info(lpc_adapter->dev, "probe succeed !\n"); + + return ret; + +out_dev: + dev_info(lpc_adapter->dev, "probe failed !\n"); + + mfd_remove_devices(&pdev->dev); + kfree(lpc_adapter); + + return ret; +} + +static int lpc_chip3_remove(struct platform_device *pdev) +{ + struct lpc_chip3_adapter *lpc_adapter = platform_get_drvdata(pdev); + + mfd_remove_devices(&pdev->dev); + iounmap(lpc_adapter->hst_regs); + kfree(lpc_adapter); + + return 0; +} + +static const struct of_device_id chip3_lpc_of_match[] = { + {.compatible = "sunway,chip3_lpc",}, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, chip3_lpc_of_match); + +#ifdef CONFIG_PM_SLEEP +unsigned int lpc_irq_ctrl_value; +unsigned int lpc_irq_irq_value; +unsigned int lpc_irq_mask_value; + +/** + * chip3_lpc_platform_suspend - Suspend an chip3_lpc-platform device + * @dev: the platform device to suspend + * + * This function stores the lpc controller register values and + * restores them when the machine wakes up. + */ +int chip3_lpc_platform_suspend(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_irq_ctrl_value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + lpc_irq_irq_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); + lpc_irq_mask_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ_MASK); + + return 0; +} + +/** + * chip3_lpc_platform_resume - Resume an chip3_lpc-platform device + * @dev: the platform device to resume + * + * This function restores the register value before the suspend. + */ +int chip3_lpc_platform_resume(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, lpc_irq_ctrl_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, lpc_irq_irq_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, lpc_irq_mask_value); + + return 0; +} +static SIMPLE_DEV_PM_OPS(chip3_lpc_pm_ops, chip3_lpc_platform_suspend, + chip3_lpc_platform_resume); +#endif + + +static struct platform_driver chip3_lpc_platform_driver = { + .driver = { + .name = "chip3_lpc", + .of_match_table = chip3_lpc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &chip3_lpc_pm_ops, +#endif + }, + .remove = lpc_chip3_remove, +}; + +static int __init chip3_lpc_drvinit(void) +{ + return platform_driver_probe(&chip3_lpc_platform_driver, + lpc_chip3_probe); +} + +/* + * lpc controller init configure before serial drivers; + * The lpc & ast2400 should be initialized much before + * the serial initialized functions are called. + */ +subsys_initcall_sync(chip3_lpc_drvinit); + +static void __exit chip3_lpc_drvexit(void) +{ + platform_driver_unregister(&chip3_lpc_platform_driver); +} + +module_exit(chip3_lpc_drvexit); + +MODULE_AUTHOR("Weiqiang Su "); +MODULE_DESCRIPTION("LPC Interface for CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/sunway_ast2400.c b/drivers/mfd/sunway_ast2400.c new file mode 100644 index 0000000000000000000000000000000000000000..fbea07813643fea6b19ed0c9380ea0e422551b37 --- /dev/null +++ b/drivers/mfd/sunway_ast2400.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/mfd/sunway_ast2400.c + * + * Copyright (C) 20014 - 2015 JN + * Author: Weiqiang Su + * + * Nuvoton AST2400 Super I/O chip platform driver written for + * SUNWAY LPC controller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int superio_uart0_irq; +static int superio_uart1_irq; +static void pnp_enable(device_t dev) +{ + pnp_enter_conf_mode(dev); + pnp_set_logical_device(dev); + pnp_set_enable(dev, 1); + pnp_exit_conf_mode(dev); +} + +const struct pnp_mode_ops pnp_conf_mode_8787_aa = { + .enter_conf_mode = pnp_enter_conf_mode_a5a5, + .exit_conf_mode = pnp_exit_conf_mode_aa, +}; + +static struct device_operations ops = { + .enable = pnp_enable, + .ops_pnp_mode = &pnp_conf_mode_8787_aa, +}; + +static struct pnp_info pnp_dev_info[] = { + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_FDC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_PP }, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP1}, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_KBC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIR}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_ACPI}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_HWM_FPLED}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_VID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIRWKUP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO_PP_OD}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SVID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_DSLP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA_LDN}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_WDT1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOBASE}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO0}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO3}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO4}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO5}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO6}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO7}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO8}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO9}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA}, +}; + +static void superio_com1_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x3); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart0_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void superio_com2_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x2); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart1_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void pnp_enable_devices(superio_device_t superio_device, + struct device_operations *ops, + unsigned int functions, struct pnp_info *info) +{ + int i = 0; + struct pnp_info *each_info; + struct pnp_device *each_device; + + /* Setup the ops and resources on the newly allocated devices. */ + for (i = 0; i < functions; i++) { + each_info = info + i; + each_device = &each_info->pnp_device; + + /* Skip logical devices this Super I/O doesn't enable. */ + if (each_info->enabled == false) + continue; + + each_device->device = each_info->function; + each_device->ops = ops; + each_device->port = superio_device->superio_ast2400_efir; + + switch (each_device->device) { + case AST2400_SP1: + each_device->ops->init = superio_com1_init; + break; + case AST2400_SP2: + each_device->ops->init = superio_com2_init; + break; + } + + if (each_device->ops->init) + each_device->ops->init(each_device); + } +} + +static void superio_enable_devices(superio_device_t superio_device) +{ + pnp_enable_devices(superio_device, &ops, + ARRAY_SIZE(pnp_dev_info), pnp_dev_info); +} + +static int superio_ast2400_probe(struct platform_device *pdev) +{ + int err = 0; + superio_device_t superio_device; + struct resource *res; + resource_size_t physaddr = 0; + + /* allocate space for device info */ + superio_device = kzalloc(sizeof(struct superio_ast2400_device), GFP_KERNEL); + if (superio_device == NULL) { + err = -ENOMEM; + return err; + } + + res = platform_get_resource(pdev, IORESOURCE_IO, 1); + if (res) { + physaddr = res->start; + dev_info(&pdev->dev, "request memory region %pR\n", res); + } + + superio_device->dev = &pdev->dev; + superio_device->enabled = 1; + superio_device->superio_ast2400_efir = physaddr + SUPERIO_PNP_PORT; + superio_device->superio_ast2400_efdr = physaddr + SUPERIO_PNP_PORT + 1; + superio_uart0_irq = platform_get_irq_byname(pdev, "uart0_irq"); + superio_uart1_irq = platform_get_irq_byname(pdev, "uart1_irq"); + + superio_enable_devices(superio_device); + + platform_set_drvdata(pdev, superio_device); + + dev_info(superio_device->dev, "probe succeed !\n"); + + return 0; +} + +static int superio_ast2400_remove(struct platform_device *pdev) +{ + superio_device_t superio_device = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + kfree(superio_device); + + return 0; +} + +static struct platform_driver superio_nuvoton_ast2400_driver = { + .probe = superio_ast2400_probe, + .remove = superio_ast2400_remove, + .driver = { + .name = "sunway_superio_ast2400" + }, +}; + +static int __init superio_nuvoton_ast2400_init(void) +{ + return platform_driver_register(&superio_nuvoton_ast2400_driver); +} + +subsys_initcall_sync(superio_nuvoton_ast2400_init); + +static void __exit superio_nuvoton_ast2400_exit(void) +{ + platform_driver_unregister(&superio_nuvoton_ast2400_driver); +} + +module_exit(superio_nuvoton_ast2400_exit); + +MODULE_DESCRIPTION("NUVOTON AST2400 Super I/O DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Weiqiang Su"); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index cadd4a820c03364ae1c3afd32278dcdcace8cb61..1e9def44eb09ca5eb13abb78f4dac2ca88a244a9 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -376,6 +376,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SUNWAY_GED + tristate "sunway generic device driver for memhotplug" + depends on SW64 + depends on MEMORY_HOTPLUG + help + This driver provides support for sunway generic device driver for + memhotplug, providing configuration and heading data via sysfs. + config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d46a2a014b6e40ed737d26a68a25d0..ccf5456e1d880373d1776276275a8658e374af44 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o +obj-$(CONFIG_SUNWAY_GED) += sunway-ged.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e5413a6a6c361084ae85a51925bb8776..6cd73f2a487f34bd781c5c45818122a7422860ec 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -130,7 +130,8 @@ static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; -#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) \ + || defined(CONFIG_SW64) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; diff --git a/drivers/misc/sunway-ged.c b/drivers/misc/sunway-ged.c new file mode 100644 index 0000000000000000000000000000000000000000..b4e4ca31585257961b54120024bdf049af9ae8bd --- /dev/null +++ b/drivers/misc/sunway-ged.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Generic Event Device for ACPI. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OFFSET_START_ADDR 0 +#define OFFSET_LENGTH 8 +#define OFFSET_STATUS 16 +#define OFFSET_SLOT 24 + +/* Memory hotplug event */ +#define SUNWAY_MEMHOTPLUG_ADD 0x1 +#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 + +struct sunway_memory_device { + struct sunway_ged_device *device; + unsigned int state; /* State of the memory device */ + struct list_head list; + + u64 start_addr; /* Memory Range start physical addr */ + u64 length; /* Memory Range length */ + u64 slot; /* Memory Range slot */ + unsigned int enabled:1; +}; + +struct sunway_ged_device { + struct device *dev; + void __iomem *membase; + void *driver_data; + spinlock_t lock; + struct list_head dev_list; +}; + +static int sunway_memory_enable_device(struct sunway_memory_device *mem_device) +{ + int num_enabled = 0; + int result = 0; + + if (mem_device->enabled) { /* just sanity check...*/ + num_enabled++; + goto out; + } + + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!mem_device->length) + goto out; + + lock_device_hotplug(); + /* suppose node = 0, fix me! */ + result = __add_memory(0, mem_device->start_addr, mem_device->length); + unlock_device_hotplug(); + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) + goto out; + + mem_device->enabled = 1; + + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ + num_enabled++; +out: + if (!num_enabled) { + dev_err(mem_device->device->dev, "add_memory failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sunway_memory_get_meminfo(struct sunway_memory_device *mem_device) +{ + struct sunway_ged_device *geddev; + + if (!mem_device) + return -EINVAL; + + if (mem_device->enabled) + return 0; + + geddev = mem_device->device; + + mem_device->start_addr = readq(geddev->membase + OFFSET_START_ADDR); + mem_device->length = readq(geddev->membase + OFFSET_LENGTH); + + return 0; +} + +static void sunway_memory_device_remove(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_dev, *n; + unsigned long start_addr, length, slot; + + if (!device) + return; + + start_addr = readq(device->membase + OFFSET_START_ADDR); + length = readq(device->membase + OFFSET_LENGTH); + slot = readq(device->membase + OFFSET_SLOT); + + list_for_each_entry_safe(mem_dev, n, &device->dev_list, list) { + if (!mem_dev->enabled) + continue; + + if ((start_addr == mem_dev->start_addr) && + (length == mem_dev->length)) { + /* suppose node = 0, fix me! */ + remove_memory(0, start_addr, length); + list_del(&mem_dev->list); + kfree(mem_dev); + } + } + + writeq(slot, device->membase + OFFSET_SLOT); +} + +static int sunway_memory_device_add(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_device; + int result; + + if (!device) + return -EINVAL; + + mem_device = kzalloc(sizeof(struct sunway_memory_device), GFP_KERNEL); + if (!mem_device) + return -ENOMEM; + + INIT_LIST_HEAD(&mem_device->list); + mem_device->device = device; + + /* Get the range from the IO */ + mem_device->start_addr = readq(device->membase + OFFSET_START_ADDR); + mem_device->length = readq(device->membase + OFFSET_LENGTH); + mem_device->slot = readq(device->membase + OFFSET_SLOT); + + result = sunway_memory_enable_device(mem_device); + if (result) { + dev_err(device->dev, "sunway_memory_enable_device() error\n"); + sunway_memory_device_remove(device); + + return result; + } + + list_add_tail(&mem_device->list, &device->dev_list); + dev_dbg(device->dev, "Memory device configured\n"); + + hcall(HCALL_MEMHOTPLUG, mem_device->start_addr, 0, 0); + + return 1; +} + +static irqreturn_t sunwayged_ist(int irq, void *data) +{ + struct sunway_ged_device *sunwayged_dev = data; + unsigned int status; + + status = readl(sunwayged_dev->membase + OFFSET_STATUS); + + /* through IO status to add or remove memory device */ + if (status & SUNWAY_MEMHOTPLUG_ADD) + sunway_memory_device_add(sunwayged_dev); + + if (status & SUNWAY_MEMHOTPLUG_REMOVE) + sunway_memory_device_remove(sunwayged_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t sunwayged_irq_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static int sunwayged_probe(struct platform_device *pdev) +{ + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct sunway_ged_device *geddev; + struct device *dev; + int irqflags; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL); + if (!geddev) + return -ENOMEM; + + spin_lock_init(&geddev->lock); + geddev->membase = devm_ioremap(&pdev->dev, + regs->start, resource_size(regs)); + if (!geddev->membase) + return -ENOMEM; + + INIT_LIST_HEAD(&geddev->dev_list); + geddev->dev = &pdev->dev; + irqflags = IRQF_SHARED; + + if (request_threaded_irq(irq, sunwayged_irq_handler, sunwayged_ist, + irqflags, "SUNWAY:Ged", geddev)) { + dev_err(dev, "failed to setup event handler for irq %u\n", irq); + + return -EINVAL; + } + + platform_set_drvdata(pdev, geddev); + + return 0; +} + +static int sunwayged_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id sunwayged_of_match[] = { + {.compatible = "sw6,sunway-ged", }, + { } +}; +MODULE_DEVICE_TABLE(of, sunwayged_of_match); + +static struct platform_driver sunwayged_platform_driver = { + .driver = { + .name = "sunway-ged", + .of_match_table = sunwayged_of_match, + }, + .probe = sunwayged_probe, + .remove = sunwayged_remove, +}; +module_platform_driver(sunwayged_platform_driver); + +MODULE_AUTHOR("Lu Feifei"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sunway ged driver"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5a274b99f2992131505443f461a42f7f59a3bd07..268c84e49194e534d4d702ec4f6c11520632da9d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -69,6 +69,7 @@ config DNET To compile this driver as a module, choose M here: the module will be called dnet. +source "drivers/net/ethernet/bzwx/Kconfig" source "drivers/net/ethernet/dec/Kconfig" source "drivers/net/ethernet/dlink/Kconfig" source "drivers/net/ethernet/emulex/Kconfig" @@ -85,6 +86,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/yunsilicon/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" @@ -127,6 +129,7 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" +source "drivers/net/ethernet/motorcomm/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d872d4efcd10b1d29752e7fcd6aede2ec657902..423e9edd67771d60498eea0f5ff8d73a56d4cb7b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ +obj-$(CONFIG_NET_VENDOR_YUNSILICON) += yunsilicon/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o @@ -61,6 +62,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MOTORCOMM) += motorcomm/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ @@ -104,3 +106,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ diff --git a/drivers/net/ethernet/bzwx/Kconfig b/drivers/net/ethernet/bzwx/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5cc757ceba64ef73585a945df46d98e5e7fe7bac --- /dev/null +++ b/drivers/net/ethernet/bzwx/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration +# + +config NET_VENDOR_BZWX + bool "BeiZhongWangXin devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about BeiZhongWangXin devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_BZWX + +source "drivers/net/ethernet/bzwx/nce/Kconfig" + +endif # NET_VENDOR_BZWX diff --git a/drivers/net/ethernet/bzwx/Makefile b/drivers/net/ethernet/bzwx/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..05273f2858c508df85160f259d429196ee81e292 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +obj-$(CONFIG_NCE) += nce/ diff --git a/drivers/net/ethernet/bzwx/nce/Kconfig b/drivers/net/ethernet/bzwx/nce/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..694c1108f8b4da4e974bd27b62913d228f1f5cbc --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Kconfig @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration + + +config NCE + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + depends on PCI + help + This selects the drivers support BeiZhongWangXin Ethernet Connection N5/N6 Series devices. + +if NCE + +config NE6X + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + default n + depends on PCI_MSI + help + This driver supports BeiZhongWangXin Ethernet Connection N5/N6 Series + of devices. + + To compile this driver as a module, choose M here. + The module will be called ncepf. + +config NE6XVF + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function support" + depends on PCI_MSI + depends on NE6X + help + This driver supports virtual functions for BeiZhongWangXin Ethernet Connection N5/N6 Series + Virtual Function devices. + + To compile this driver as a module, choose M here. The module + will be called ncevf. + +endif #NCE diff --git a/drivers/net/ethernet/bzwx/nce/Makefile b/drivers/net/ethernet/bzwx/nce/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5ec82cec67b30a889c1a68d4ca38901e3a9c62ef --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/comm +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x_vf +subdir-ccflags-y += -I$(src)/comm +subdir-ccflags-y += -I$(src)/ne6x +subdir-ccflags-y += -I$(src)/ne6x_vf + +obj-$(CONFIG_NE6X) += ncepf.o +ncepf-objs := comm/txrx.o \ + ne6x/ne6x_main.o \ + ne6x/ne6x_ethtool.o \ + ne6x/ne6x_procfs.o \ + ne6x/ne6x_netlink.o \ + ne6x/ne6x_interrupt.o \ + ne6x/ne6x_reg.o \ + ne6x/ne6x_dev.o \ + ne6x/ne6x_txrx.o + +ncepf-$(CONFIG_DEBUG_FS) += ne6x/ne6x_debugfs.o +ncepf-$(CONFIG_PCI_IOV) += ne6x/ne6x_virtchnl_pf.o +ncepf-$(CONFIG_RFS_ACCEL) += ne6x/ne6x_arfs.o + +obj-$(CONFIG_NE6XVF) += ncevf.o +ncevf-objs := comm/txrx.o \ + ne6x_vf/ne6xvf_main.o \ + ne6x_vf/ne6xvf_ethtool.o \ + ne6x_vf/ne6xvf_virtchnl.o \ + ne6x_vf/ne6xvf_txrx.o + +ncevf-$(CONFIG_DEBUG_FS) += ne6x_vf/ne6xvf_debugfs.o diff --git a/drivers/net/ethernet/bzwx/nce/comm/common.h b/drivers/net/ethernet/bzwx/nce/comm/common.h new file mode 100644 index 0000000000000000000000000000000000000000..b3c35edbf124b80b2a05954499997cb09fd42bab --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/common.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMMON_H +#define _NE6X_COMMON_H + +#define NE6X_MAX_U64 0xFFFFFFFFFFFFFFFFULL + +#define NE6X_MODULE_TYPE_TOTAL_BYTE 3 + +#define NE6X_AQ_LINK_UP 0x1ULL +#define NE6X_AQ_AN_COMPLETED BIT(0) + +#define PCI_VENDOR_ID_BZWX 0xD20C + +struct ne6x_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_miss; + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ + u64 rx_malform; + u64 tx_malform; +}; + +enum ne6x_phy_type { + NE6X_PHY_TYPE_UNKNOWN = 0, + NE6X_PHY_TYPE_10GBASE = 1, + NE6X_PHY_TYPE_25GBASE, + NE6X_PHY_TYPE_40GBASE, + NE6X_PHY_TYPE_100GBASE, + NE6X_PHY_TYPE_200GBASE, +}; + +#define NE6X_LINK_SPEED_10GB_SHIFT 0x1 +#define NE6X_LINK_SPEED_40GB_SHIFT 0x2 +#define NE6X_LINK_SPEED_25GB_SHIFT 0x3 +#define NE6X_LINK_SPEED_100GB_SHIFT 0x4 +#define NE6X_LINK_SPEED_200GB_SHIFT 0x5 + +enum ne6x_sdk_link_speed { + NE6X_LINK_SPEED_UNKNOWN = 0, + NE6X_LINK_SPEED_10GB = BIT(NE6X_LINK_SPEED_10GB_SHIFT), + NE6X_LINK_SPEED_40GB = BIT(NE6X_LINK_SPEED_40GB_SHIFT), + NE6X_LINK_SPEED_25GB = BIT(NE6X_LINK_SPEED_25GB_SHIFT), + NE6X_LINK_SPEED_100GB = BIT(NE6X_LINK_SPEED_100GB_SHIFT), + NE6X_LINK_SPEED_200GB = BIT(NE6X_LINK_SPEED_200GB_SHIFT), +}; + +struct ne6x_link_status { + u64 phy_type_low; + u64 phy_type_high; + + u16 max_frame_size; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + u32 link_speed; + u8 module_type[NE6X_MODULE_TYPE_TOTAL_BYTE]; +}; + +struct ne6x_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct ne6x_link_info { + u32 link; + u32 speed; +}; + +enum ne6x_media_type { + NE6X_MEDIA_UNKNOWN = 0, + NE6X_MEDIA_FIBER, + NE6X_MEDIA_BASET, + NE6X_MEDIA_BACKPLANE, + NE6X_MEDIA_DA, + NE6X_MEDIA_AUI, +}; + +struct ne6x_phy_info { + struct ne6x_link_status link_info; + struct ne6x_link_status link_info_old; + u64 phy_type_low; + u64 phy_type_high; + enum ne6x_media_type media_type; + u8 get_link_info; + u16 curr_user_speed_req; +}; + +struct ne6x_port_info { + struct ne6x_hw *hw; /* back pointer to HW instance */ + + u8 lport; + u8 hw_port_id; /* hardware port id */ + u8 hw_trunk_id; + u32 hw_queue_base_old; + u32 hw_queue_base; + u32 hw_max_queue; + + u32 queue; /* current used queue */ + struct ne6x_link_info link_status; + struct ne6x_mac_info mac; + struct ne6x_phy_info phy; +}; + +struct ne6x_bus_info { + u16 domain_num; + u16 device; + u8 func; + u8 bus_num; +}; + +struct ne6x_mbx_snap_buffer_data { + u8 state : 4; + u8 len : 4; + u8 type; + u8 data[6]; +}; + +/* Structure to track messages sent by VFs on mailbox: + * 1. vf_cntr : a counter array of VFs to track the number of + * asynchronous messages sent by each VF + * 2. vfcntr_len : number of entries in VF counter array + */ +struct ne6x_mbx_vf_counter { + u32 *vf_cntr; + u32 vfcntr_len; +}; + +/* Enum defining the different states of the mailbox snapshot in the + * PF-VF mailbox overflow detection algorithm. The + * snapshot can be in + * states: + * 1. NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot + * within + * the mailbox buffer. + * 2. NE6X_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot + * 3. + * NE6X_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the + * mailbox and mark any VFs sending more + * messages than the threshold limit set. + * 4. NE6X_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to + * 0xFFFFFFFF. + */ +enum ne6x_mbx_snapshot_state { + NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, + NE6X_MAL_VF_DETECT_STATE_TRAVERSE, + NE6X_MAL_VF_DETECT_STATE_DETECT, + NE6X_MAL_VF_DETECT_STATE_INVALID = 0xF, +}; + +struct ne6x_mbx_snapshot { + enum ne6x_mbx_snapshot_state state; + struct ne6x_mbx_vf_counter mbx_vf; +}; + +enum virtchnl_vf_config_codes { + VIRTCHNL_VF_CONFIG_TRUST = 0, + VIRTCHNL_VF_CONFIG_FORCE_LINK = 1, +}; + +struct virtchnl_vf_config { + u8 type; + u8 data[5]; +}; + +enum ne6x_adapter_state { + NE6X_ADPT_DOWN, + NE6X_ADPT_NEEDS_RESTART, + NE6X_ADPT_NETDEV_ALLOCD, + NE6X_ADPT_NETDEV_REGISTERED, + NE6X_ADPT_UMAC_FLTR_CHANGED, + NE6X_ADPT_MMAC_FLTR_CHANGED, + NE6X_ADPT_VLAN_FLTR_CHANGED, + NE6X_ADPT_PROMISC_CHANGED, + NE6X_ADPT_RELEASING, + NE6X_ADPT_RECOVER, + NE6X_ADPT_DOWN_REQUESTED, + NE6X_ADPT_OPEN, + NE6X_ADPT_NBITS /* must be last */ +}; + +struct ne6x_adapt_comm { + u16 port_info; + DECLARE_BITMAP(state, NE6X_ADPT_NBITS); +}; + +struct ne6x_vlan { + u16 tpid; + u16 vid; + u8 prio; +}; + +struct ne6x_vf_vlan { + u16 vid; + u16 tpid; +}; + +struct ne6x_macvlan { + struct list_head list; + struct net_device *vdev; + u8 mac[ETH_ALEN]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_RSS_HASH_TYPE_NONE = 0x0, + NE6X_RSS_HASH_TYPE_IPV4 = 0x01, + NE6X_RSS_HASH_TYPE_IPV4_TCP = 0x02, + NE6X_RSS_HASH_TYPE_IPV6 = 0x04, + NE6X_RSS_HASH_TYPE_IPV6_TCP = 0x08, + NE6X_RSS_HASH_TYPE_IPV4_UDP = 0x10, + NE6X_RSS_HASH_TYPE_IPV6_UDP = 0x20, +}; + +enum { + NE6X_RSS_HASH_FUNC_NONE = 0x0, + NE6X_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define NE6X_RSS_MAX_KEY_SIZE 40 +#define NE6X_RSS_MAX_IND_TABLE_SIZE 128 + +struct ne6x_rss_info { + u16 hash_type; + u16 hash_func; + u16 hash_key_size; + u16 ind_table_size; + u8 hash_key[NE6X_RSS_MAX_KEY_SIZE]; + u8 ind_table[NE6X_RSS_MAX_IND_TABLE_SIZE]; +}; + +#define NE6X_VF_VLAN(vid, tpid) ((struct ne6x_vf_vlan){vid, tpid}) + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/feature.h b/drivers/net/ethernet/bzwx/nce/comm/feature.h new file mode 100644 index 0000000000000000000000000000000000000000..482b4d2d1d3993068d08702eae297ae4e2cb5995 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/feature.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_FEATURE_H +#define _NE6X_FEATURE_H + +#define NE6X_F_RSS BIT(0) +#define NE6X_F_PROMISC BIT(1) +#define NE6X_F_RX_IPV4_CKSUM BIT(2) +#define NE6X_F_RX_UDP_CKSUM BIT(3) +#define NE6X_F_RX_TCP_CKSUM BIT(4) +#define NE6X_F_RX_SCTP_CKSUM BIT(5) +#define NE6X_F_RX_VLAN_STRIP BIT(6) +#define NE6X_F_RX_QINQ_STRIP BIT(7) +#define NE6X_F_RX_VLAN_FILTER BIT(8) +#define NE6X_F_LRO BIT(9) +#define NE6X_F_RX_DISABLE BIT(10) +#define NE6X_F_RX_FW_LLDP BIT(11) +#define NE6X_F_RX_ALLMULTI BIT(12) +#define NE6X_F_FLOW_STEERING BIT(15) +#define NE6X_F_TX_VLAN BIT(16) +#define NE6X_F_TX_IP_CKSUM BIT(17) +#define NE6X_F_TX_TCP_CKSUM BIT(18) +#define NE6X_F_TX_UDP_CKSUM BIT(19) +#define NE6X_F_TX_SCTP_CKSUM BIT(20) +#define NE6X_F_TX_TCP_SEG BIT(21) +#define NE6X_F_TX_UDP_SEG BIT(22) +#define NE6X_F_TX_QINQ BIT(23) +#define NE6X_F_TX_NIC_SWITCH BIT(24) +#define NE6X_F_TX_MAC_LEARN BIT(25) +#define NE6X_F_TX_DISABLE BIT(26) +#define NE6X_F_TX_QOSBANDWIDTH BIT(27) +#define NE6X_F_TX_UDP_TNL_SEG BIT(28) +#define NE6X_F_TX_UDP_TNL_CSUM BIT(29) + +#define NE6X_OFFLOAD_RSS NE6X_F_RSS +#define NE6X_OFFLOAD_RXCSUM (NE6X_F_RX_IPV4_CKSUM | \ + NE6X_F_RX_UDP_CKSUM | \ + NE6X_F_RX_TCP_CKSUM | \ + NE6X_F_RX_SCTP_CKSUM) +#define NE6X_OFFLOAD_TXCSUM (NE6X_F_TX_IP_CKSUM | \ + NE6X_F_TX_TCP_CKSUM | \ + NE6X_F_TX_UDP_CKSUM | \ + NE6X_F_TX_UDP_TNL_CSUM) + +#define NE6X_OFFLOAD_LRO NE6X_F_LRO +#define NE6X_OFFLOAD_TSO NE6X_F_TX_TCP_SEG +#define NE6X_OFFLOAD_UFO NE6X_F_TX_UDP_SEG +#define NE6X_OFFLOAD_SCTP_CSUM NE6X_F_TX_SCTP_CKSUM + +#define NE6X_OFFLOAD_RXD_VLAN (NE6X_F_RX_VLAN_STRIP | \ + NE6X_F_RX_QINQ_STRIP | \ + NE6X_F_RX_VLAN_FILTER) +#define NE6X_OFFLOAD_TXD_VLAN (NE6X_F_TX_VLAN | NE6X_F_TX_QINQ) +#define NE6X_OFFLOAD_L2 NE6X_F_TX_NIC_SWITCH + +#define NE6X_F_SMART_ENABLED BIT(0) +#define NE6X_F_SRIOV_ENABLED BIT(1) +#define NE6X_F_SWITCH_ENABLED BIT(2) +#define NE6X_F_L2FDB_LEARN_ENABLED BIT(3) +#define NE6X_F_VLAN_ENABLED BIT(4) +#define NE6X_F_WHITELIST_ENABLED BIT(5) +#define NE6X_F_DDOS_ENABLED BIT(6) +#define NE6X_F_TRUST_VLAN_ENABLED BIT(7) +#define NE6X_F_S_ROCE_ICRC_ENABLED BIT(8) + +#define NE6X_F_ACK_FLOOD BIT(0) +#define NE6X_F_PUSH_ACK_FLOOD BIT(1) +#define NE6X_F_SYN_ACK_FLOOD BIT(2) +#define NE6X_F_FIN_FLOOD BIT(3) +#define NE6X_F_RST_FLOOD BIT(4) +#define NE6X_F_PUSH_SYN_ACK_FLOOD BIT(5) +#define NE6X_F_UDP_FLOOD BIT(6) +#define NE6X_F_ICMP_FLOOD BIT(7) +#define NE6X_F_FRAGMENT_FLOOD BIT(8) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/mailbox.h b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h new file mode 100644 index 0000000000000000000000000000000000000000..85ae76b1321fe68691f37bc5c1a8b00061185112 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_MAILBOX_H +#define _NE6X_COMM_MAILBOX_H + +enum virtchnl_ops { + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_ADPT_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + /* promiscuous mode / unicast promisc / multicast promisc */ + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_EVENT = 17, /* link state */ + VIRTCHNL_OP_SET_VF_ADDR = 18, + VIRTCHNL_OP_VF_CONFIG = 19, + VIRTCHNL_OP_CONFIG_OFFLOAD = 27, + VIRTCHNL_OP_GET_VF_FEATURE = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_CONFIG_RSS = 30, + VIRTCHNL_OP_GET_PORT_STATUS = 31, + VIRTCHNL_OP_CHANGED_RSS = 32, + VIRTCHNL_OP_SET_VF_STATE = 33, + VIRTCHNL_OP_SET_FAST_MDOE = 34, + VIRTCHNL_OP_CONFIG_VLAN = 40, + VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD = 41, + VIRTCHNL_OP_CONFIG_MTU = 42, + VIRTCHNL_OP_CONFIG_FLOW_CTRL = 43, + + VIRTCHNL_OP_MAX, +}; + +static char local_error_buffer[64]; +static inline const char *ne6x_opcode_str(enum virtchnl_ops opcode) +{ + sprintf(local_error_buffer, "__OPCODE_UNKNOWN_OPCODE(%d)", opcode); + switch (opcode) { + case VIRTCHNL_OP_VERSION: + return "__OPCODE_GET_VERSION"; + case VIRTCHNL_OP_RESET_VF: + return "__OPCODE_RESET_VF"; + case VIRTCHNL_OP_GET_VF_RESOURCES: + return "__OPCODE_GET_VF_RESOURCES"; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + return "__OPCODE_CONFIG_TX_QUEUE"; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + return "__OPCODE_INIT_EXTENDED_CAPS"; + case VIRTCHNL_OP_CONFIG_ADPT_QUEUES: + return "__OPCODE_CONFIG_ADPT_QUEUES"; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + return "__OPCODE_CONFIG_IRQ_MAP"; + case VIRTCHNL_OP_ENABLE_QUEUES: + return "__OPCODE_ENABLE_QUEUES"; + case VIRTCHNL_OP_DISABLE_QUEUES: + return "__OPCODE_DISABLE_QUEUES"; + case VIRTCHNL_OP_ADD_ETH_ADDR: + return "__OPCODE_ADD_ETH_ADDR"; + case VIRTCHNL_OP_DEL_ETH_ADDR: + return "__OPCODE_DEL_ETH_ADDR"; + case VIRTCHNL_OP_ADD_VLAN: + return "__OPCODE_ADD_VLAN"; + case VIRTCHNL_OP_DEL_VLAN: + return "__OPCODE_DEL_VLAN"; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + return "__OPCODE_CONFIG_PROMISCUOUS_MODE"; + case VIRTCHNL_OP_EVENT: + return "__OPCODE_EVENT"; + case VIRTCHNL_OP_CONFIG_RSS: + return "__OPCODE_CONFIG_RSS"; + case VIRTCHNL_OP_CHANGED_RSS: + return "__OP_CHANGED_RSS"; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + return "__OPCODE_CONFIGURE_OFFLOAD"; + case VIRTCHNL_OP_GET_VF_FEATURE: + return "VIRTCHNL_OP_GET_VF_FEATURE"; + case VIRTCHNL_OP_REQUEST_QUEUES: + return "__OPCODE_REQUEST_QUEUES"; + case VIRTCHNL_OP_GET_PORT_STATUS: + return "__OP_GET_PORT_STATUS"; + case VIRTCHNL_OP_SET_VF_ADDR: + return "__OPCODE_SET_VF_ADDR"; + case VIRTCHNL_OP_VF_CONFIG: + return "__VIRTCHNL_OP_VF_CONFIG"; + case VIRTCHNL_OP_SET_VF_STATE: + return "__VIRTCHNL_OP_SET_VF_STATE"; + case VIRTCHNL_OP_SET_FAST_MDOE: + return "__VIRTCHNL_OP_SET_FAST_MDOE"; + case VIRTCHNL_OP_CONFIG_VLAN: + return "__VIRTCHNL_OP_CONFIG_VLAN"; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + return "__VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD"; + case VIRTCHNL_OP_CONFIG_MTU: + return "__VIRTCHNL_OP_CONFIG_MTU"; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + return "__VIRTCHNL_OP_CONFIG_FLOW_CTRL"; + default: + return local_error_buffer; + } +} + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +static inline const char *ne6x_mbox_status_str(enum virtchnl_status_code opcode) +{ + switch (opcode) { + case VIRTCHNL_STATUS_SUCCESS: + return "__STATUS_SUCCESS"; + case VIRTCHNL_STATUS_ERR_PARAM: + return "__STATUS_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "__STATUS_ERR_NO_MEMORY"; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + return "__STATUS_ERR_OPCODE_MISMATCH"; + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + return "__STATUS_ERR_CQP_COMPL_ERROR"; + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return "__STATUS_ERR_INVALID_VF_ID"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "__STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "__STATUS_ERR_NOT_SUPPORTED"; + default: + return "__STATUS_UNKNOWN"; + } +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/reg.h b/drivers/net/ethernet/bzwx/nce/comm/reg.h new file mode 100644 index 0000000000000000000000000000000000000000..15a745bb06f3b04f5a706701c5b2632c7c3a81ef --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/reg.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_REG_H +#define _NE6X_COMM_REG_H + +#include + +#define NE6X_BAR2_VP_TDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (0 << 11) | (((__reg) & 0xff) << 3)) +#define NE6X_BAR2_VP_RDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (1 << 11) | (((__reg) & 0xff) << 3)) + +/* CIU */ +#define NE6X_VP_BASE_ADDR 0x0 +#define NE6X_VPINT_DYN_CTLN(_VPID, _OFFSET) \ + (((_VPID) << 12) + ((_OFFSET) << 4)) /* _i=0...64 * Reset: PFR */ +#define NE6X_PF_BASE_ADDR 0x138ULL +#define NE6X_PFINT_DYN_CTLN(_PFID, _OFFSET) \ + (((NE6X_PF_BASE_ADDR + (_PFID)) << 12) + ((_OFFSET) << 4)) + /* _i=0...7 */ /* Reset: PFR */ + +#define NE6X_VP_INT 0x00 +#define NE6X_VP_INT_SET 0x01 +#define NE6X_VP_INT_MASK 0x02 +#define NE6X_VP_CQ_INTSHIFT 16 +#define NE6X_CQ_BASE_ADDR 0x03 +#define NE6X_CQ_HD_POINTER 0x04 +#define NE6X_CQ_CFG 0x05 +#define NE6X_RQ_BASE_ADDR 0x07 +#define NE6X_RQ_CFG 0x08 +#define NE6X_RQ_TAIL_POINTER 0x09 +#define NE6X_VP_RELOAD 0x0a +#define NE6X_SQ_BASE_ADDR 0x0b +#define NE6X_SQ_CFG 0x0c +#define NE6X_SQ_TAIL_POINTER 0x0d +#define NE6X_CQ_TAIL_POINTER 0x11 +#define NE6X_RQ_BUFF_OFST 0x12 +#define NE6X_RQ_HD_POINTER 0x13 +#define NE6X_SQ_BUFF_OFST 0x14 +#define NE6X_SQ_HD_POINTER 0x15 +#define NE6X_RQ_OFST 0x16 +#define NE6X_SQ_OFST 0x17 +#define NE6X_RQ_BLOCK_CFG 0x1b +#define NE6X_SQ_METER_CFG0 0x1c +#define NE6X_SQ_METER_CFG1 0x1d +#define NE6X_SQ_METER_CFG2 0x1e +#define NE6X_SQ_METER_CFG3 0x1f +#define NE6X_INT_CFG 0x21 +#define NE6X_CIU_TIME_OUT_CFG 0x45 +#define NE6X_ALL_CQ_CFG 0x46 +#define NE6X_ALL_SQ_CFG 0x47 +#define NE6X_ALL_RQ_CFG 0x48 +#define NE6X_MERGE_CFG 0x49 +#define NE6X_BFD_RECV_CNT 0x4a +#define NE6X_ETH_RECV_CNT 0x4b + +#define NE6X_PF_CON_ADDR(_OFST) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((_OFST) << 4)) +#define NE6X_PF_MAILBOX_DATA 0x40 +#define NE6X_VF_MAILBOX_DATA 0x80 +#define NE6X_PF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_PF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_VF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_VF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_PF_DB_INT_REQ 0xC0 +#define NE6X_PF_DB_INT_ACK 0xC1 +#define NE6X_PF_DB_DREQ_INT 0xC2 +#define NE6X_PF_DB_DREQ_INT_SET 0xC3 +#define NE6X_PF_DB_DREQ_INT_MASK 0xC4 +#define NE6X_PF_DB_DACK_INT 0xC5 +#define NE6X_PF_DB_DACK_INT_SET 0xC6 +#define NE6X_PF_DB_DACK_INT_MASK 0xC7 + +union ne6x_vp_int { + struct vp_int { + u64 csr_ciu_int_vp : 64; + } reg; + u64 val; +}; + +union ne6x_vp_int_mask { + struct vp_int_mask { + u64 csr_ciu_mask_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_base_addr { + struct cq_base_addr { + u64 csr_cq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_cfg { + struct cq_cfg { + u64 csr_cq_len_vp : 16; + u64 csr_cq_merge_time_vp : 16; + u64 csr_cq_merge_size_vp : 4; + u64 rsv0 : 28; + } reg; + u64 val; +}; + +union ne6x_rq_base_addr { + struct rq_base_addr { + u64 csr_rq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_rq_cfg { + struct rq_cfg { + u64 csr_rq_len_vp : 16; + u64 csr_rdq_pull_en : 1; + u64 csr_rqevt_write_back_vp : 1; + u64 csr_recv_pd_type_vp : 2; + u64 csr_recv_pd_revers_en : 1; + u64 rsv0 : 11; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_base_addr { + struct sq_base_addr { + u64 csr_sq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_sq_cfg { + struct sq_cfg { + u64 csr_sq_len_vp : 16; + u64 csr_tdq_pull_en : 1; + u64 csr_sqevt_write_back_vp : 1; + u64 csr_send_pd_revers_en : 1; + u64 rsv0 : 13; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_rq_block_cfg { + struct rq_block_cfg { + u64 csr_rdq_mop_len : 16; + u64 csr_rdq_sop_len : 16; + u64 rsv0 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg0 { + struct sq_meter_cfg0 { + u64 csr_meter_pkt_token_num_vp : 16; + u64 csr_meter_ipg_len_vp : 8; + u64 csr_meter_refresh_en_vp : 1; + u64 csr_meter_rate_limit_en_vp : 1; + u64 csr_meter_packet_mode_vp : 1; + u64 reserved : 37; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg1 { + struct sq_meter_cfg1 { + u64 csr_meter_refresh_count_vp : 28; + u64 reserved : 4; + u64 csr_meter_refresh_interval_vp : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg2 { + struct sq_meter_cfg2 { + u64 csr_meter_resume_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg3 { + struct sq_meter_cfg3 { + u64 csr_meter_pause_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_int_cfg { + struct int_cfg { + u64 csr_sq_hdle_half_int_cnt_vp : 16; + u64 csr_rq_hdle_half_int_cnt_vp : 16; + u64 csr_cq_hdle_half_int_cnt_vp : 16; + u64 rsv0 : 16; + } reg; + u64 val; +}; + +union ne6x_ciu_time_out_cfg { + struct ciu_time_out_cfg { + u64 csr_int_timer_out_cnt : 12; + u64 rsv0 : 52; + } reg; + u64 val; +}; + +union ne6x_all_cq_cfg { + struct all_cq_cfg { + u64 csr_allcq_merge_size : 4; + u64 rsv0 : 4; + u64 csr_allcq_wt_rr_cnt : 7; + u64 csr_allcq_wt_rr_flag : 1; + u64 rsv1 : 48; + } reg; + u64 val; +}; + +union ne6x_all_sq_cfg { + struct all_sq_cfg { + u64 csr_allsq_wb_trigger_info : 8; + u64 csr_allsq_csum_zero_negate : 1; + u64 csr_allsq_pull_merge_cfg : 5; + u64 rsv0 : 50; + } reg; + u64 val; +}; + +union ne6x_all_rq_cfg { + struct all_rq_cfg { + u64 csr_allrq_wb_trigger_info : 8; + u64 csr_allrq_pull_merge_cfg : 5; + u64 rsv0 : 51; + } reg; + u64 val; +}; + +union ne6x_merge_cfg { + struct merge_cfg { + u64 csr_merge_clk_cnt : 16; + u64 rsv0 : 48; + } reg; + u64 val; +}; + +union ne6x_eth_recv_cnt { + struct eth_recv_cnt { + u64 csr_eth_pkt_drop_cnt : 32; + u64 csr_eth_rdq_drop_cnt : 32; + } reg; + u64 val; +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.c b/drivers/net/ethernet/bzwx/nce/comm/txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..d5b386250835f1ea5da6f9df0673ad59bccc2329 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "txrx.h" + +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_buf); + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + tx_ring->tx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_buf) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct ne6x_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + return -ENOMEM; +} + +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring) +{ + struct device *dev = cq_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + cq_ring->size = cq_ring->count * sizeof(struct ne6x_cq_desc); + cq_ring->size = ALIGN(cq_ring->size, 4096); + cq_ring->desc = dma_alloc_coherent(dev, cq_ring->size, &cq_ring->dma, GFP_KERNEL); + if (!cq_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + cq_ring->size); + goto err; + } + + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring) +{ + struct device *dev = tg_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + tg_ring->size = tg_ring->count * sizeof(struct ne6x_tx_tag); + tg_ring->size = ALIGN(tg_ring->size, 4096); + tg_ring->desc = dma_alloc_coherent(dev, tg_ring->size, &tg_ring->dma, GFP_KERNEL); + if (!tg_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tg_ring->size); + goto err; + } + + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int err = -ENOMEM; + int bi_size; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_buf); + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + rx_ring->rx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_buf) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ne6x_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + return err; +} + +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + if (!dev) + return -ENOMEM; + tx_ring->sgl = kzalloc(sizeof(*tx_ring->sgl), GFP_KERNEL); + + if (!tx_ring->sgl) + goto err; + + return 0; +err: + return -ENOMEM; +} + +static inline unsigned int ne6x_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +bool __ne6x_chk_linearize(struct sk_buff *skb); +static inline bool ne6x_chk_linearize(struct sk_buff *skb, int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < NE6X_MAX_BUFFER_TXD)) + return false; + + if (skb_is_gso(skb)) + return __ne6x_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != NE6X_MAX_BUFFER_TXD; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size); + +static inline int ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + if (likely(NE6X_DESC_UNUSED(tx_ring) >= size)) + return 0; + + return __ne6x_maybe_stop_tx(tx_ring, size); +} + +static inline bool ne6x_rx_is_programming_status(u8 status) +{ + return status & 0x20; +} + +static void ne6x_reuse_rx_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct ne6x_rx_buf *new_buff; + + new_buff = &rx_ring->rx_buf[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void ne6x_clean_programming_status(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + u8 status) +{ + u32 ntc = rx_ring->next_to_clean; + struct ne6x_rx_buf *rx_buffer; + + /* fetch, update, and store next to clean */ + rx_buffer = &rx_ring->rx_buf[ntc++]; + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* place unused page back on the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static struct ne6x_rx_buf *ne6x_get_rx_buffer(struct ne6x_ring *rx_ring, const unsigned int size) +{ + struct ne6x_rx_buf *rx_buffer; + + rx_buffer = &rx_ring->rx_buf[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void ne6x_add_rx_frag(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, + size, truesize); + + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *ne6x_construct_skb(struct ne6x_ring *rx_ring, + struct ne6x_rx_buf *rx_buffer, + unsigned int size) +{ + void *page_addr = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, NE6X_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > NE6X_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, page_addr, NE6X_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), page_addr, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, + truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static inline bool ne6x_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); +} + +static bool ne6x_can_reuse_rx_page(struct ne6x_rx_buf *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* Is any reuse possible? */ + if (unlikely(!ne6x_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define NE6X_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - NE6X_RXBUFFER_4096) + if (rx_buffer->page_offset > NE6X_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void ne6x_put_rx_buffer(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer) +{ + if (ne6x_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static inline bool ne6x_test_staterr(union ne6x_rx_desc *rx_desc, const u8 stat_err_bits) +{ + return !!(rx_desc->wb.u.val & stat_err_bits); +} + +static bool ne6x_is_non_eop(struct ne6x_ring *rx_ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define NE6X_RXD_EOF BIT(NE6X_RX_DESC_STATUS_EOF_SHIFT) + if (likely(ne6x_test_staterr(rx_desc, NE6X_RXD_EOF))) + return false; + + rx_ring->rx_stats.non_eop_descs++; + rx_desc->wb.u.val = 0; + + return true; +} + +static bool ne6x_cleanup_headers(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc) +{ + if (unlikely(ne6x_test_staterr(rx_desc, BIT(NE6X_RX_DESC_STATUS_ERR_SHIFT)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.rx_mem_error++; + return true; + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static inline void ne6x_rx_hash(struct ne6x_ring *ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_rss_hash) + skb_set_hash(skb, rx_hdr->rss_hash, PKT_HASH_TYPE_NONE); +} + +static inline void ne6x_rx_checksum(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc, + struct rx_hdr_info *rx_hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + skb_checksum_none_assert(skb); + + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_bad) { + rx_ring->rx_stats.csum_err++; + } else if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_good) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } +} + +static inline void ne6x_process_skb_fields(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, + struct rx_hdr_info *rx_hdr) +{ + netdev_features_t features = rx_ring->netdev->features; + bool non_zero_vlan = false; + + ne6x_rx_hash(rx_ring, rx_desc, skb, rx_hdr); + rx_hdr->vlan_tci = ntohs(rx_hdr->vlan_tci); + rx_hdr->vlan_tci_outer = ntohs(rx_hdr->vlan_tci_outer); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_vlan_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_hdr->vlan_tci_outer)); + } + } + } else if (features & NETIF_F_HW_VLAN_STAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_qinq_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + (rx_hdr->vlan_tci_outer)); + } + } + } + + ne6x_rx_checksum(rx_ring, skb, rx_desc, rx_hdr); + skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ne6x_receive_skb(struct ne6x_ring *rx_ring, struct sk_buff *skb) +{ + struct ne6x_q_vector *q_vector = rx_ring->q_vector; + + napi_gro_receive(&q_vector->napi, skb); +} + +static bool ne6x_alloc_mapped_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = dev_alloc_pages(ne6x_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, ne6x_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + NE6X_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ne6x_rx_pg_order(rx_ring)); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +void ne6x_tail_update(struct ne6x_ring *ring, int val) +{ + int i; + + for (i = 0; i < NE6X_TAIL_REG_NUM; i++) + writeq(val, ring->tail + i); +} + +static inline void ne6x_release_rx_desc(struct ne6x_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ne6x_tail_update(rx_ring, val); +} + +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union ne6x_rx_desc *rx_desc; + struct ne6x_rx_buf *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = NE6X_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_buf[ntu]; + + do { + if (!ne6x_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->wb.u.val = 0; + rx_desc->w.buffer_mop_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->w.buffer_sop_addr = 0; + rx_desc->w.mop_mem_len = rx_ring->rx_buf_len; + rx_desc->wb.pkt_len = 0; + rx_desc->w.vp = rx_ring->reg_idx; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = NE6X_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buf; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.u.val = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +static void ne6x_get_rx_head_info(struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + skb_frag_t *frag; + void *page_addr; + u32 temp_len, i; + + if (skb->data_len == 0) { + memcpy(rx_hdr, &skb->data[skb->len - 16], sizeof(struct rx_hdr_info)); + } else { + if (skb_shinfo(skb)->nr_frags > 1) { + i = skb_shinfo(skb)->nr_frags - 1; + frag = &skb_shinfo(skb)->frags[i]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } else { + frag = &skb_shinfo(skb)->frags[0]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } + } +} + +static void ne6x_clean_tx_desc(struct ne6x_tx_desc *tx_desc, struct ne6x_ring *ring) +{ + if (tx_desc->u.flags.tx_drop_addr) + ring->tx_stats.tx_drop_addr++; + + if (tx_desc->u.flags.tx_ecc_err) + ring->tx_stats.tx_ecc_err++; + + if (tx_desc->u.flags.tx_pcie_read_err) { + ring->tx_stats.tx_pcie_read_err++; + dev_info(ring->dev, "**** tx_desc: flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d], sopv[%d], eopv[%d], tso[%d], l3chk[%d], l3oft[%d], l4chk[%d], l4oft[%d], pld[%d], mop[%d], sop[%d], mss[%d],mopa[%lld],sopa[%lld]\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, + tx_desc->tso, tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, + tx_desc->l4_ofst, tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + + tx_desc->u.val = 0; + tx_desc->vp = 0; + tx_desc->event_trigger = 0; + tx_desc->chain = 0; + tx_desc->transmit_type = 0; + tx_desc->sop_valid = 0; + tx_desc->eop_valid = 0; + tx_desc->tso = 0; + tx_desc->l3_csum = 0; + tx_desc->l3_ofst = 0; + tx_desc->l4_csum = 0; + tx_desc->l4_ofst = 0; + tx_desc->pld_ofst = 0; + tx_desc->mop_cnt = 0; + tx_desc->sop_cnt = 0; + tx_desc->mss = 0; + tx_desc->buffer_mop_addr = 0; + tx_desc->buffer_sop_addr = 0; +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *clean_ring = NULL; + union ne6x_rx_desc *rx_desc = NULL; + int i, cq_num, off_idx, ntc; + int budget = napi_budget; + int last_expect = 0; + int total = 0; + + do { + cq_desc = NE6X_CQ_DESC(cq_ring, cq_ring->next_to_use); + cq_num = cq_desc->num; + if (!cq_num) + break; + + dma_rmb(); + cq_ring->stats.packets += cq_num; + + if (cq_desc->ctype) { + clean_ring = q_vector->rx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.rx_cq[i].cq_rx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_err(cq_ring->netdev, "ne6xpf: cqrx err, need debug! cq: %d, rx: %d\n", + off_idx, last_expect); + netdev_err(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, rxq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + rx_desc = NE6X_RX_DESC(clean_ring, off_idx); + rx_desc->wb.u.val = cq_desc->payload.rx_cq[i].cq_rx_stats; + rx_desc->wb.pkt_len = cq_desc->payload.rx_cq[i].cq_rx_len; + if (rx_desc->wb.pkt_len > clean_ring->rx_buf_len) { + if (!rx_desc->wb.u.flags.rx_eop) + rx_desc->wb.pkt_len = clean_ring->rx_buf_len; + else + rx_desc->wb.pkt_len = rx_desc->wb.pkt_len % + clean_ring->rx_buf_len ? + rx_desc->wb.pkt_len % + clean_ring->rx_buf_len : + clean_ring->rx_buf_len; + } + + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.rx_num += cq_num; + } else { + clean_ring = q_vector->tx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.tx_cq[i].cq_tx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_info(cq_ring->netdev, "ne6xpf: cqtx err, need debug! cq: %d, tx: %d\n", + off_idx, last_expect); + netdev_info(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, txq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + tx_desc = NE6X_TX_DESC(clean_ring, off_idx); + tx_desc->u.val = cq_desc->payload.tx_cq[i].cq_tx_stats; + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.tx_num += cq_num; + } + + clean_ring->cq_last_expect = last_expect; + cq_ring->cq_stats.cq_num++; + + /* clean cq desc */ + cq_desc->num = 0; + ntc = cq_ring->next_to_use + 1; + ntc = (ntc < cq_ring->count) ? ntc : 0; + cq_ring->next_to_use = ntc; + prefetch(NE6X_CQ_DESC(cq_ring, ntc)); + + budget--; + total++; + } while (likely(budget)); + + if (NE6X_DESC_UNUSED(cq_ring) < 1024) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + return total; +} + +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = NE6X_DESC_UNUSED(rx_ring); + struct ne6x_rx_buf *rx_buffer = NULL; + struct sk_buff *skb = rx_ring->skb; + union ne6x_rx_desc *rx_desc = NULL; + struct rx_hdr_info rx_hdr; + bool failure = false; + unsigned int size; + u8 rx_status; + + while (likely(total_rx_packets < (unsigned int)budget)) { + if (cleaned_count >= NE6X_RX_BUFFER_WRITE) { + failure = failure || ne6x_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = NE6X_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_status = rx_desc->wb.u.val; + if (!rx_status) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + if (unlikely(ne6x_rx_is_programming_status(rx_status))) { + rx_ring->rx_stats.rx_err++; + ne6x_clean_programming_status(rx_ring, rx_desc, rx_status); + cleaned_count++; + continue; + } + + size = rx_desc->wb.pkt_len; + rx_buffer = ne6x_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + ne6x_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = ne6x_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + ne6x_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + if (ne6x_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + if (ne6x_cleanup_headers(rx_ring, skb, rx_desc)) { + skb = NULL; + continue; + } + + ne6x_get_rx_head_info(skb, &rx_hdr); + pskb_trim(skb, skb->len - 16); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + ne6x_process_skb_fields(rx_ring, rx_desc, skb, &rx_hdr); + + ne6x_receive_skb(rx_ring, skb); + skb = NULL; + + rx_desc->wb.u.val = 0; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_packets; +} + +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct ne6x_tx_desc *eop_desc = NULL; + u16 i = tx_ring->next_to_clean; + struct ne6x_tx_desc *tx_desc; + struct ne6x_tx_buf *tx_buf; + unsigned int budget = 256; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = NE6X_TX_DESC(tx_ring, i); + + if (unlikely(tx_buf->jumbo_frame)) { + tx_buf->napi_budget += napi_budget; + if (!tx_buf->jumbo_finsh) + return !!budget; + + napi_budget = tx_buf->napi_budget; + } + + do { + eop_desc = tx_buf->next_to_watch; + if (!eop_desc) + break; + + prefetchw(&tx_buf->skb->users); + + if (!eop_desc->u.val) + break; + + dma_rmb(); + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + tx_buf->jumbo_frame = 0; + tx_buf->jumbo_finsh = 0; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + if (unlikely(tx_buf->jumbo_frame && !tx_buf->jumbo_finsh)) + break; + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (total_packets) { + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (NE6X_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(NE6X_ADPT_DOWN, comm->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_q; + } + } + } + + return !!budget; +} + +static inline int ne6x_xmit_descriptor_count(struct sk_buff *skb) +{ + int count = 0; + + count = 1; + count += skb_shinfo(skb)->nr_frags; + + return count; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(NE6X_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static inline u16 ne6x_gso_get_seg_hdrlen(struct sk_buff *skb) +{ + u16 gso_hdr_len; + + gso_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (unlikely(skb->encapsulation)) + gso_hdr_len = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + + return gso_hdr_len; +} + +static int ne6x_tso(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + u8 hdrlen = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) + return 0; + + hdrlen = ne6x_gso_get_seg_hdrlen(skb); + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* update gso_segs and bytecount */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + ptx_tag->tag_mss = skb_shinfo(skb)->gso_size; + + return 1; +} + +static void ne6x_tx_prepare_vlan_flags(struct ne6x_ring *tx_ring, + struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + + /* nothing left to do, software offloaded VLAN */ + if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) + return; + + /* the VLAN ethertype/tpid is determined by adapter configuration and netdev + * feature flags, which the driver only allows either 802.1Q or 802.1ad + * VLAN offloads exclusively so we only care about the VLAN ID here + */ + if (skb_vlan_tag_present(skb)) { + if (tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + ptx_tag->tag_vlan2 = cpu_to_be16(skb_vlan_tag_get(skb)); + else if (tx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_TX) + ptx_tag->tag_vlan1 = cpu_to_be16(skb_vlan_tag_get(skb)); + } +} + +static int ne6x_tx_csum(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + tx_ring->tx_stats.csum_good++; + return 1; +} + +static inline void ne6x_tx_desc_push(struct ne6x_tx_desc *tx_desc, + dma_addr_t dma, u32 size) +{ + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->mop_cnt = size; + tx_desc->event_trigger = 1; +} + +void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, + struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static inline void ne6x_fill_gso_sg(void *p, u16 offset, u16 len, struct ne6x_sg_info *sg) +{ + sg->p = p; + sg->offset = offset; + sg->len = len; +} + +int ne6x_fill_jumbo_sgl(struct ne6x_ring *tx_ring, struct sk_buff *skb) +{ + u16 sg_max_dlen = 0, dlen = 0, len = 0, offset = 0, send_dlen = 0, total_dlen = 0; + u16 subframe = 0, send_subframe = 0, sg_avail = 0, i = 0, j = 0; + u16 gso_hdr_len = ne6x_gso_get_seg_hdrlen(skb); + struct ne6x_sg_list *sgl = tx_ring->sgl; + + WARN_ON(!sgl); + + memset(sgl, 0, sizeof(struct ne6x_sg_list)); + dlen = skb_headlen(skb) - gso_hdr_len; + sgl->mss = skb_shinfo(skb)->gso_size; + sg_max_dlen = NE6X_MAX_DATA_PER_TXD - gso_hdr_len; + sg_max_dlen = ((u16)(sg_max_dlen / sgl->mss)) * sgl->mss; + total_dlen = skb->data_len + dlen; + sgl->sgl_mss_cnt = sg_max_dlen / sgl->mss; + subframe = total_dlen / sg_max_dlen; + subframe += total_dlen % sg_max_dlen ? 1 : 0; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FST_SG_FLAG | NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + offset = gso_hdr_len; + sg_avail = sg_max_dlen; + ++send_subframe; + i++; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(skb->data, offset, len, &sgl->sg[i]); + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[j]; + + dlen = skb_frag_size(f); + offset = 0; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(f, offset, len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FRAG_FLAG; + + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + offset = 0; + } +end: + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + sgl->sg_num = ++i; + return 0; +err: + return -1; +} + +void ne6x_fill_tx_desc(struct ne6x_tx_desc *tx_desc, u8 vp, dma_addr_t tag_dma, + dma_addr_t dma, struct ne6x_sg_info *sg) +{ + memset(tx_desc, 0, NE6X_TX_DESC_SIZE); + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->buffer_sop_addr = (sg->flag & NE6X_SG_SOP_FLAG) ? cpu_to_le64(tag_dma) : 0; + tx_desc->mop_cnt = sg->len; + tx_desc->event_trigger = 1; + tx_desc->vp = vp; + tx_desc->sop_valid = (sg->flag & NE6X_SG_SOP_FLAG) ? 1u : 0u; + tx_desc->eop_valid = (sg->flag & NE6X_SG_EOP_FLAG) ? 1u : 0u; + tx_desc->sop_cnt = (sg->flag & NE6X_SG_SOP_FLAG) ? 32 : 0; + if (tx_desc->eop_valid) { + tx_desc->sop_cnt = tx_desc->mop_cnt; + tx_desc->buffer_sop_addr = tx_desc->buffer_mop_addr; + tx_desc->mop_cnt = 4; + } +} + +void ne6x_fill_tx_priv_tag(struct ne6x_ring *tx_ring, struct ne6x_tx_tag *tx_tag, + int mss, struct ne6x_sg_info *sg) +{ + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + + tx_tag->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + tx_tag->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + tx_tag->tag_vport = (comm->port_info >> 8) & 0xFF; + tx_tag->tag_mss = cpu_to_be16(mss); + tx_tag->tag_num = sg->base_mss_no | (sg->flag & NE6X_SG_JUMBO_FLAG) | + (sg->flag & NE6X_SG_LST_SG_FLAG) | + (sg->flag & NE6X_SG_FST_SG_FLAG); + tx_tag->tag_num = cpu_to_be16(tx_tag->tag_num); +} + +void ne6x_xmit_jumbo(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + int j = 0; + struct ne6x_sg_list *sgl = tx_ring->sgl; + struct ne6x_sg_info *sg; + dma_addr_t dma, tag_dma; + struct sk_buff *skb = first->skb; + struct ne6x_tx_buf *tx_bi; + struct ne6x_tx_tag *tag_desc = tx_tag; + u32 i = tx_ring->next_to_use; + struct ne6x_tx_desc *tx_desc = NE6X_TX_DESC(tx_ring, i); + + for (; j < sgl->sg_num; j++) { + sg = &sgl->sg[j]; + if (likely(sg->flag & NE6X_SG_FRAG_FLAG)) { + dma = skb_frag_dma_map(tx_ring->dev, sg->p, sg->offset, sg->len, + DMA_TO_DEVICE); + } else { + dma = dma_map_single(tx_ring->dev, sg->p + sg->offset, sg->len, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_bi = &tx_ring->tx_buf[i]; + + dma_unmap_len_set(tx_bi, len, sg->len); + + dma_unmap_addr_set(tx_bi, dma, dma); + + if (sg->flag & NE6X_SG_SOP_FLAG) { + tag_dma = tag_ring->dma + tag_ring->next_to_use * NE6X_TX_PRIV_TAG_SIZE; + tag_desc = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + ne6x_fill_tx_priv_tag(tx_ring, tag_desc, sgl->mss, sg); + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + } else { + tag_dma = 0; + } + + tx_desc = NE6X_TX_DESC(tx_ring, i); + ne6x_fill_tx_desc(tx_desc, tx_ring->reg_idx, tag_dma, dma, sg); + if (++i == tx_ring->count) + i = 0; + } + tx_ring->next_to_use = i; + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + first->jumbo_finsh = 1u; + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +void ne6x_xmit_simple(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + struct sk_buff *skb = first->skb; + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + struct ne6x_tx_desc *tx_desc, *first_desc; + unsigned int size = skb_headlen(skb); + u32 i = tx_ring->next_to_use; + struct ne6x_tx_tag *ttx_desc; + struct ne6x_tx_buf *tx_bi; + bool is_first = true; + int send_len = 0; + skb_frag_t *frag; + dma_addr_t dma; + __le64 mss = 0; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + first_desc = NE6X_TX_DESC(tx_ring, i); + tx_desc = NE6X_TX_DESC(tx_ring, i); + mss = tx_desc->mss; + tx_desc->sop_valid = 1; + tx_desc->eop_valid = 0; + tx_bi = first; + + ttx_desc = (struct ne6x_tx_tag *)tx_tag; + ttx_desc->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + ttx_desc->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + ttx_desc->tag_vport = (comm->port_info >> 8) & 0xFF; + ttx_desc->tag_mss = tx_tag->tag_mss; + ttx_desc->tag_num = 0x0; + send_len += size; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + ne6x_tx_desc_push(tx_desc, dma, size); + tx_desc->vp = tx_ring->reg_idx; + tx_desc->tso = 0x0; + tx_desc->l3_csum = 0x00; + tx_desc->l3_ofst = 0x00; + tx_desc->l4_csum = 0x00; + tx_desc->l4_ofst = 0x00; + tx_desc->pld_ofst = 0x00; + tx_desc->u.val = 0x0; + tx_desc->rsv4 = 0; + if (is_first) { + tx_desc->sop_valid = 1u; + is_first = false; + tx_desc->sop_cnt = 32; + tx_desc->buffer_sop_addr = cpu_to_le64(first->tag_dma); + } + + if (send_len == skb->len) { + tx_desc->eop_valid = 1u; + break; + } + + if (++i == tx_ring->count) + i = 0; + + tx_desc = NE6X_TX_DESC(tx_ring, i); + + size = skb_frag_size(frag); + send_len += size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_buf[i]; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame) +{ + struct ne6x_tx_tag *tx_tagx = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + struct ne6x_tx_buf *first; + int tso, count; + + /* prefetch the data, we'll need it later */ + prefetch(tx_tagx); + prefetch(skb->data); + + if (!jumbo_frame) { + count = ne6x_xmit_descriptor_count(skb); + } else { + if (ne6x_fill_jumbo_sgl(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = tx_ring->sgl->sg_num; + } + /* reserve 5 descriptors to avoid tail over-write */ + if (ne6x_maybe_stop_tx(tx_ring, count + 4 + 1)) { + /* this is a hard error */ + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buf[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* record initial flags and protocol */ + + first->jumbo_frame = 0; + first->jumbo_finsh = 0; + first->tag_dma = tag_ring->dma + tag_ring->next_to_use * sizeof(struct ne6x_tx_tag); + memset(tx_tagx, 0x00, sizeof(*tx_tagx)); + + ne6x_tx_prepare_vlan_flags(tx_ring, first, tx_tagx); + + tso = ne6x_tso(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tso = ne6x_tx_csum(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tx_tagx->tag_mss = cpu_to_be16(tx_tagx->tag_mss); + + if (!jumbo_frame) { + ne6x_xmit_simple(tx_ring, first, tag_ring, tx_tagx); + } else { + first->jumbo_frame = true; + ne6x_xmit_jumbo(tx_ring, first, tag_ring, tx_tagx); + } + + return NETDEV_TX_OK; + +out_drop: + ne6x_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.h b/drivers/net/ethernet/bzwx/nce/comm/txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..8b35bc385aa512d69ab410a417006dadc1da783c --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.h @@ -0,0 +1,476 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _TXRX_H +#define _TXRX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define NE6X_MAX_NUM_DESCRIPTORS (16 * 1024) +#define NE6X_DEFAULT_NUM_DESCRIPTORS (4 * 1024) +#define NE6X_MIN_NUM_DESCRIPTORS 64 +#define NE6X_REQ_DESCRIPTOR_MULTIPLE 32 + +#define NE6X_MAX_BUFFER_TXD 8 +#define NE6X_MIN_TX_LEN 60 + +#define NE6X_TAIL_REG_NUM 4 +#define NE6X_RX_BUFFER_WRITE 32 /* Must be power of 2 */ + +/* The size limit for a transmit buffer in a descriptor is 15K. + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define NE6X_MAX_READ_REQ_SIZE 4096 +#define NE6X_MAX_DATA_PER_TXD (15500 - 32 - 4 - 1) +#define NE6X_MAX_DATA_PER_TXD_ALIGNED \ + (NE6X_MAX_DATA_PER_TXD & ~(NE6X_MAX_READ_REQ_SIZE - 1)) + +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define NE6X_PACKET_HDR_PAD ETH_HLEN +#define NE6X_RXBUFFER_256 256 +#define NE6X_RXBUFFER_2048 2048 +#define NE6X_RXBUFFER_4096 4096 /* Used for large frames w/ padding */ +/*CIU buffer max len is 15k*/ +#define NE6X_MAX_RXBUFFER 15360 /* largest size for single descriptor */ +#define NE6X_MIN_MTU_SIZE 128 +#define NE6X_RX_HDR_SIZE NE6X_RXBUFFER_256 + +#define NE6X_TX_PRIV_TAG_SIZE 32 +#define NE6X_TX_DESC_SIZE 32 +/* iterator for handling rings in ring container */ +#define ne6x_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define NE6X_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define NE6X_RX_DESC(R, i) (&(((union ne6x_rx_desc *)((R)->desc))[i])) +#define NE6X_TX_DESC(R, i) (&(((struct ne6x_tx_desc *)((R)->desc))[i])) +#define NE6X_TX_TAG(R, i) (&(((struct ne6x_tx_tag *)((R)->desc))[i])) +#define NE6X_CQ_DESC(R, i) (&(((struct ne6x_cq_desc *)((R)->desc))[i])) + +#define NE6X_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? \ + 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +struct ne6x_tx_desc_status { + /* pkt drop */ + u8 tx_drop_addr : 1; + u8 rsv3 : 1; + u8 rsv2 : 1; + /* normal */ + u8 tx_done : 1; + /* ecc error */ + u8 tx_ecc_err : 1; + u8 rsv1 : 1; + u8 rsv0 : 1; + /* pcie error */ + u8 tx_pcie_read_err : 1; +}; + +struct ne6x_tx_desc { + union { + /* Hardware write back*/ + struct ne6x_tx_desc_status flags; + u8 val; + } u; + + u8 rsv0 : 1; + u8 vp : 7; + u8 event_trigger : 1; + u8 chain : 1; + u8 transmit_type : 2; + u8 sop_valid : 1; + u8 eop_valid : 1; + u8 tso : 1; + u8 rsv1 : 1; + u8 rsv2; + u8 rsv3; + + u8 l3_csum : 1; + u8 l3_ofst : 7; + u8 l4_csum : 1; + u8 l4_ofst : 7; + u8 pld_ofst; + + __le64 mop_cnt : 24; + __le64 sop_cnt : 16; + __le64 rsv4 : 8; + __le64 mss : 16; + __le64 buffer_mop_addr; + __le64 buffer_sop_addr; +}; + +struct ne6x_tx_tag { + u8 resv0; + u8 tag_pi1 : 1; + u8 resv1 : 7; + u8 l3_csum : 1; + u8 l4_csum : 1; + u8 vxl_l3_csum : 1; + u8 vxl_l4_csum : 1; + u8 tag_resv : 3; + u8 tag_pi0 : 1; + u8 tag_vport; + u16 tag_vlan1; /* 1q vlan */ + u16 tag_vlan2; /* 1ad vlan */ + + __le64 resv2 : 32; + __le64 tag_num : 16; + __le64 tag_mss : 16; /* mss */ + + u8 l3_ofst; + u8 l4_ofst; + u16 l4_len; /* l4hdr + pld_size */ + u8 vxl_l3_ofst; + u8 vxl_l4_ofst; + u16 vxl_l4_len; /* l4hdr + pld_size */ + + __le64 resv3; +}; + +struct ne6x_tx_buf { + struct ne6x_tx_desc *next_to_watch; + struct sk_buff *skb; + u32 bytecount; + u8 jumbo_frame; /* fragment when bytecount > 15.5KB*/ + u8 jumbo_finsh; /* when last frame of jumbo packet transmitted, set it 1 */ + u16 rsv; + int napi_budget; /* when bytecount > 15.5KB, accumulating NPAI trigger count + * in transmit irq handler + */ + u16 gso_segs; + dma_addr_t tag_dma; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct ne6x_rx_desc_status { + u8 rx_mem_err : 1; /* MOP_MEM_ADDR/SOP_MEM_ADDR/MOP_MEM_LEN=0, pkt need drop */ + u8 rx_mem_ovflow : 1; /* SOP_MEM_OVFLOW ==1, mop have pkt */ + u8 rsv : 1; + u8 rx_eop : 1; /* EOP flag */ + u8 rx_csum_err : 1; /* checksum error */ + u8 rx_err : 1; /* Not enough descriptors */ + u8 rx_mem_used : 1; /* MEM_USED, Normal */ + u8 pd_type : 1; /* 0 ingress pd, 1 egress pd */ +}; + +#define NE6X_RX_DESC_STATUS_EOF_SHIFT 3 +#define NE6X_RX_DESC_STATUS_ERR_SHIFT 0 + +/* Receive Descriptor */ +union ne6x_rx_desc { + struct { + u8 rsv3; + u8 rsv2 : 1; + u8 vp : 7; + __le16 mop_mem_len; + __le16 sop_mem_len; + __le16 rsv1; + __le64 buffer_sop_addr; + __le64 buffer_mop_addr; + + __le64 rsv0; + } w; /* write */ + + struct { + union { + struct ne6x_rx_desc_status flags; + u8 val; + } u; + u8 rsv2 : 1; + u8 vp : 7; + u8 pd[24]; + __le16 rsv0; + __le16 rsv1; + __le16 pkt_len; + } wb; /* Writeback */ +}; + +struct ne6x_tx_cq_desc { + u8 cq_tx_stats; + u16 cq_tx_offset; +} __packed; + +struct ne6x_rx_cq_desc { + u8 cq_rx_stats; + u16 cq_rx_len; + u16 cq_rx_offset; +} __packed; + +struct ne6x_cq_desc { + u8 ctype : 1; + u8 rsv0 : 3; + u8 num : 4; + u8 rsv1; + + union { + struct ne6x_tx_cq_desc tx_cq[10]; + struct ne6x_rx_cq_desc rx_cq[6]; + u8 data[30]; + } payload; +}; + +struct ne6x_rx_buf { + dma_addr_t dma; + struct page *page; + u32 page_offset; + u16 pagecnt_bias; +}; + +struct ne6x_q_stats { + u64 packets; + u64 bytes; +}; + +struct ne6x_txq_stats { + u64 restart_q; + u64 tx_busy; + u64 tx_linearize; + u64 csum_err; + u64 csum_good; + u64 tx_pcie_read_err; + u64 tx_ecc_err; + u64 tx_drop_addr; +}; + +struct ne6x_rxq_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buf_failed; + u64 page_reuse_count; + u64 csum_err; + u64 csum_good; + u64 rx_mem_error; + u64 rx_err; +}; + +struct ne6x_cq_stats { + u64 cq_num; + u64 tx_num; + u64 rx_num; +}; + +#define NE6X_SG_SOP_FLAG BIT(0) +#define NE6X_SG_EOP_FLAG BIT(1) +#define NE6X_SG_FST_SG_FLAG BIT(13) +#define NE6X_SG_LST_SG_FLAG BIT(14) +#define NE6X_SG_JUMBO_FLAG BIT(15) +#define NE6X_SG_FRAG_FLAG BIT(4) +#define NE6X_MAX_DESC_NUM_PER_SKB 16 + +struct ne6x_sg_info { + void *p; + u16 offset; + u16 len; + u16 flag; + u16 base_mss_no; +}; + +struct ne6x_sg_list { + u16 sg_num; + u16 mss; + u16 sgl_mss_cnt; + struct ne6x_sg_info sg[NE6X_MAX_DESC_NUM_PER_SKB]; +}; + +/* descriptor ring, associated with a adapter */ +struct ne6x_ring { + /* CL1 - 1st cacheline starts here */ + void *adpt; + struct ne6x_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + struct ne6x_q_vector *q_vector; /* Backreference to associated vector */ + + u64 __iomem *tail; + + struct ne6x_sg_list *sgl; + + union { + struct ne6x_tx_buf *tx_buf; + struct ne6x_rx_buf *rx_buf; + }; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + u16 cq_last_expect; + + u16 queue_index; /* Queue number of ring */ + u16 rx_buf_len; + + /* stats structs */ + struct ne6x_q_stats stats; + struct u64_stats_sync syncp; + + union { + struct ne6x_txq_stats tx_stats; + struct ne6x_rxq_stats rx_stats; + struct ne6x_cq_stats cq_stats; + }; + + struct rcu_head rcu; /* to avoid race on free */ + dma_addr_t dma; /* physical address of ring */ + unsigned int size; /* length of descriptor ring in bytes */ + struct sk_buff *skb; /* When ne6x_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * ne6x_clean_rx_ring_irq() is called + * for this ring. + */ +} ____cacheline_internodealigned_in_smp; + +struct ne6x_ring_container { + /* head of linked-list of rings */ + struct ne6x_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; +}; + +union rx_ol_flags { + u32 ol_flags; /* Offload Feature Bits. */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 rx_tnl_csum :1; + u32 rsv0 :1; + u32 tag_num :8; + u32 rsv1 :6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 rsv1 :6; + u32 tag_num :8; + u32 rsv0 :1; + u32 rx_tnl_csum :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; +#endif + } flag_bits; +}; + +struct rx_hdr_info { + union rx_ol_flags ol_flag; + u32 rss_hash; /* RSS Hash Value */ + u32 vlan_tci_outer:16; /* VLAN Outer Tag Control Identifier */ + u32 vlan_tci:16; /* VLAN Tag Control Identifier */ +}; + +#define NE6X_INT_NAME_STR_LEN (IFNAMSIZ + 16) + +/* struct that defines an interrupt vector */ +struct ne6x_q_vector { + void *adpt; + + u16 v_idx; /* index in the adpt->q_vector array. */ + u16 reg_idx; + + struct napi_struct napi; + + struct ne6x_ring_container rx; + struct ne6x_ring_container tx; + struct ne6x_ring_container cq; + struct ne6x_ring_container tg; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[NE6X_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) + +static inline unsigned int ne6x_rx_pg_order(struct ne6x_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define ne6x_rx_pg_size(_ring) (PAGE_SIZE << ne6x_rx_pg_order(_ring)) + +static inline struct netdev_queue *txring_txq(const struct ne6x_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget); +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget); +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget); +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame); +void ne6x_tail_update(struct ne6x_ring *ring, int val); +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring); +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring); +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring); +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring); +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/version.h b/drivers/net/ethernet/bzwx/nce/comm/version.h new file mode 100644 index 0000000000000000000000000000000000000000..9affdb9803b1f616cb98ac4b3045100896a26663 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _VERSION_H +#define _VERSION_H + +#define VERSION "1.0.4" + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h new file mode 100644 index 0000000000000000000000000000000000000000..1206d8ab3cfd19b2eaccdf0b1b5996299a1ac173 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_H +#define _NE6X_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "feature.h" +#include "txrx.h" +#include "common.h" +#include "ne6x_txrx.h" +#include "ne6x_ethtool.h" +#include "ne6x_procfs.h" +#include "ne6x_virtchnl_pf.h" +#include "version.h" + +#define NE6X_MAX_VP_NUM 64 +#define NE6X_PF_VP0_NUM 64 +#define NE6X_PF_VP1_NUM 65 +#define NE6X_MAILBOX_VP_NUM NE6X_PF_VP0_NUM +#define NE6X_MAX_MSIX_NUM 72 +#define NE6X_MIN_MSIX 2 + +#define NE6X_NIC_INT_VP 71 +#define NE6X_NIC_INT_START_BIT 42 + +#define wr64(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr0) + (reg))) +#define rd64(a, reg) \ + readq((void __iomem *)((a)->hw_addr0) + (reg)) +#define wr64_bar4(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr4) + (reg))) +#define rd64_bar4(a, reg) \ + readq((void __iomem *)((a)->hw_addr4) + (reg)) + +#define ne6x_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ne6x_get_vf_by_id(pf, vf_id) (&((pf)->vf[vf_id])) + +#define ADPT_PPORT(adpt) ((adpt)->port_info->hw_port_id) +#define ADPT_LPORT(adpt) ((adpt)->port_info->lport) +#define ADPT_VPORT(adpt) ((adpt)->vport) +#define ADPT_VPORTCOS(adpt) ((adpt)->base_queue + 160) + +enum ne6x_adapter_type { + NE6X_ADPT_PF = 0, + NE6X_ADPT_VF, +}; + +enum ne6x_adapter_flags { + NE6X_ADPT_F_DISABLE_FW_LLDP, + NE6X_ADPT_F_LINKDOWN_ON_CLOSE, + NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, + NE6X_ADPT_F_DDOS_SWITCH, + NE6X_ADPT_F_ACL, + NE6X_ADPT_F_TRUST_VLAN, + NE6X_ADPT_F_NBITS /* must be last */ +}; + +enum ne6x_pf_state { + NE6X_TESTING, + NE6X_DOWN, + NE6X_SERVICE_SCHED, + NE6X_INT_INIT_DOWN, + NE6X_CLIENT_SERVICE_REQUESTED, + NE6X_LINK_POOLING, + NE6X_CONFIG_BUSY, + NE6X_TIMEOUT_RECOVERY_PENDING, + NE6X_PF_RESET_REQUESTED, + NE6X_CORE_RESET_REQUESTED, + NE6X_GLOBAL_RESET_REQUESTED, + NE6X_RESET_INTR_RECEIVED, + NE6X_DOWN_REQUESTED, + NE6X_VF_DIS, + NE6X_MAILBOXQ_EVENT_PENDING, + NE6X_PF_INTX, + NE6X_PF_MSI, + NE6X_PF_MSIX, + NE6X_FLAG_SRIOV_ENA, + NE6X_REMOVE, + NE6X_STATE_NBITS /* must be last */ +}; + +enum { + NE6X_ETHTOOL_FLASH_810_LOADER = 0, + NE6X_ETHTOOL_FLASH_810_APP = 1, + NE6X_ETHTOOL_FLASH_807_APP = 2, + NE6X_ETHTOOL_FLASH_NP = 3, + NE6X_ETHTOOL_FLASH_PXE = 4, + NE6X_ETHTOOL_FRU = 0xf2, +}; + +/* MAC addr list head node struct */ +struct mac_addr_head { + struct list_head list; + struct mutex mutex; /* mutex */ +}; + +/* MAC addr list node struct */ +struct mac_addr_node { + struct list_head list; + u8 addr[32]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_FW_VER_NORMAL = 0x0, + NE6X_FW_VER_WHITELIST = 0x100, +}; + +struct ne6x_lump_tracking { + u16 num_entries; + u16 list[]; +}; + +struct ne6x_hw_port_stats { + u64 mac_rx_eth_byte; + u64 mac_rx_eth; + u64 mac_rx_eth_undersize; + u64 mac_rx_eth_crc; + u64 mac_rx_eth_64b; + u64 mac_rx_eth_65_127b; + u64 mac_rx_eth_128_255b; + u64 mac_rx_eth_256_511b; + u64 mac_rx_eth_512_1023b; + u64 mac_rx_eth_1024_15360b; + u64 mac_tx_eth_byte; + u64 mac_tx_eth; + u64 mac_tx_eth_undersize; + u64 mac_tx_eth_64b; + u64 mac_tx_eth_65_127b; + u64 mac_tx_eth_128_255b; + u64 mac_tx_eth_256_511b; + u64 mac_tx_eth_512_1023b; + u64 mac_tx_eth_1024_15360b; +}; + +/* struct that defines a adapter, associated with a dev */ +struct ne6x_adapter { + struct ne6x_adapt_comm comm; + struct net_device *netdev; + struct ne6x_pf *back; /* back pointer to PF */ + struct ne6x_port_info *port_info; /* back pointer to port_info */ + struct ne6x_ring **rx_rings; /* Rx ring array */ + struct ne6x_ring **tx_rings; /* Tx ring array */ + struct ne6x_ring **cq_rings; /* Tx ring array */ + struct ne6x_ring **tg_rings; /* Tx tag ring array */ + struct ne6x_q_vector **q_vectors; /* q_vector array */ + + /* used for loopback test */ + char *send_buffer; + wait_queue_head_t recv_notify; + u8 recv_done; + + irqreturn_t (*irq_handler)(int irq, void *data); + + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + u16 num_q_vectors; + u16 base_vector; /* IRQ base for OS reserved vectors */ + enum ne6x_adapter_type type; + struct ne6x_vf *vf; /* VF associated with this adapter */ + u16 idx; /* software index in pf->adpt[] */ + u16 max_frame; + u16 rx_buf_len; + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct ne6x_eth_stats eth_stats; + struct ne6x_eth_stats eth_stats_offsets; + struct ne6x_rss_info rss_info; + int rss_size; + + bool irqs_ready; + bool current_isup; /* Sync 'link up' logging */ + u16 current_speed; + u16 vport; + u16 num_queue; /* Used queues */ + u16 base_queue; /* adapter's first queue in hw array */ + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_cq_desc; + u16 num_tg_desc; + + u32 hw_feature; + bool netdev_registered; + + /* unicast MAC head node */ + struct mac_addr_head uc_mac_addr; + /* multicast MAC head node */ + struct mac_addr_head mc_mac_addr; + + struct work_struct set_rx_mode_task; + + struct ne6x_hw_port_stats stats; + DECLARE_BITMAP(flags, NE6X_ADPT_F_NBITS); + + struct list_head vlan_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + + /* aRFS members only allocated for the PF ADPT */ +#define NE6X_MAX_RFS_FILTERS 0xFFFF +#define NE6X_MAX_ARFS_LIST 1024 +#define NE6X_ARFS_LST_MASK (NE6X_MAX_ARFS_LIST - 1) + struct hlist_head *arfs_fltr_list; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ + atomic_t *arfs_last_fltr_id; +} ____cacheline_internodealigned_in_smp; + +struct ne6x_dev_eeprom_info { + u8 vendor_id[3]; + u8 ocp_record_version; + u8 max_power_s0; + u8 max_power_s5; + u8 hot_card_cooling_passive_tier; + u8 cold_card_cooling_passive_tier; + u8 cooling_mode; + u16 hot_standby_airflow_require; + u16 cold_standby_airflow_require; + u8 uart_configuration_1; + u8 uart_configuration_2; + u8 usb_present; + u8 manageability_type; + u8 fru_write_protection; + u8 prog_mode_power_state_supported; + u8 hot_card_cooling_active_tier; + u8 cold_card_cooling_active_tier; + u8 transceiver_ref_power_Level; + u8 transceiver_ref_temp_Level; + u8 card_thermal_tier_with_local_fan_fail; + u16 product_mode; + u8 is_pcie_exist; + u32 logic_port_to_phyical; + u8 resv[3]; + u8 number_of_physical_controllers; + u8 control_1_udid[16]; + u8 control_2_udid[16]; + u8 control_3_udid[16]; + u8 control_4_udid[16]; + u32 hw_feature; + u32 hw_flag; + u8 port_0_mac[6]; + u8 port_1_mac[6]; + u8 port_2_mac[6]; + u8 port_3_mac[6]; + u8 rsv[9]; + u32 spd_verify_value; +} __packed; + +struct ne6x_hw { + u64 __iomem *hw_addr0; + u64 __iomem *hw_addr2; + u64 __iomem *hw_addr4; + + struct ne6x_port_info *port_info; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 dvm_ena; /* double vlan enable */ + struct ne6x_pf *back; + struct ne6x_bus_info bus; + u16 pf_port; + + u32 expect_vp; + u32 max_queue; + + struct ne6x_mbx_snapshot mbx_snapshot; + u8 ne6x_mbx_ready_to_send[64]; +}; + +#define ne6x_hw_to_dev(ptr) (&(container_of((ptr), struct ne6x_pf, hw))->pdev->dev) + +struct ne6x_firmware_ver_info { + u32 firmware_soc_ver; + u32 firmware_np_ver; + u32 firmware_pxe_ver; +}; + +/* struct that defines the Ethernet device */ +struct ne6x_pf { + struct pci_dev *pdev; + + /* OS reserved IRQ details */ + struct msix_entry *msix_entries; + u16 ctrl_adpt_idx; /* control adapter index in pf->adpt array */ + + struct ne6x_adapter **adpt; /* adapters created by the driver */ + + struct mutex switch_mutex; /* switch_mutex */ + struct mutex mbus_comm_mutex; /* mbus_comm_mutex */ + struct timer_list serv_tmr; + struct timer_list linkscan_tmr; + unsigned long service_timer_period; + struct work_struct serv_task; + struct work_struct linkscan_work; + + /* Virtchnl/SR-IOV config info */ + struct ne6x_vf *vf; + u16 num_alloc_vfs; + u16 num_qps_per_vf; + + u16 next_adpt; /* Next free slot in pf->adpt[] - 0-based! */ + u16 num_alloc_adpt; + + DECLARE_BITMAP(state, NE6X_STATE_NBITS); + + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + struct ne6x_firmware_ver_info verinfo; + struct ne6x_dev_eeprom_info sdk_spd_info; + + struct ne6x_hw hw; + struct ne6x_lump_tracking *irq_pile; +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6x_dbg_pf; + struct dentry *ne6x_dbg_info_pf; +#endif /* CONFIG_DEBUG_FS */ + struct proc_dir_entry *ne6x_proc_pf; + struct list_head key_filter_list; + spinlock_t key_list_lock; /* Lock to protect accesses to key filter */ + + char link_intname[NE6X_INT_NAME_STR_LEN]; + char mailbox_intname[NE6X_INT_NAME_STR_LEN]; + bool link_int_irq_ready; + bool mailbox_int_irq_ready; + bool is_fastmode; + u32 hw_flag; + u32 dump_info; + u16 dev_type; +}; + +static inline void ne6x_adpt_setup_irqhandler(struct ne6x_adapter *adpt, + irqreturn_t (*irq_handler)(int, void *)) +{ + adpt->irq_handler = irq_handler; +} + +struct ne6x_netdev_priv { + struct ne6x_adapter *adpt; +}; + +static inline bool ne6x_is_supported_port_vlan_proto(struct ne6x_hw *hw, + u16 vlan_proto) +{ + bool is_supported = false; + + switch (vlan_proto) { + case ETH_P_8021Q: + is_supported = true; + break; + case ETH_P_8021AD: + if (hw->dvm_ena) + is_supported = true; + break; + } + + return is_supported; +} + +static inline struct ne6x_pf *ne6x_netdev_to_pf(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt->back; +} + +static inline struct ne6x_adapter *ne6x_netdev_to_adpt(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt; +} + +#define NE6X_VLAN(tpid, vid, prio) \ + ((struct ne6x_vlan){ tpid, vid, prio }) + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt); + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value); +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr); +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable); +void ne6x_update_pf_stats(struct ne6x_adapter *adpt); +void ne6x_service_event_schedule(struct ne6x_pf *pf); + +void ne6x_down(struct ne6x_adapter *adpt); +int ne6x_up(struct ne6x_adapter *adpt); +int ne6x_adpt_configure(struct ne6x_adapter *adpt); +void ne6x_adpt_close(struct ne6x_adapter *adpt); + +int ne6x_alloc_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt); + +int ne6x_close(struct net_device *netdev); +int ne6x_open(struct net_device *netdev); +int ne6x_adpt_open(struct ne6x_adapter *adpt); +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt); +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt); +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors); +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt); +bool netif_is_ne6x(struct net_device *dev); + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate); + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +void ne6x_sync_features(struct net_device *netdev); + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf); +void ne6x_linkscan_schedule(struct ne6x_pf *pf); + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e5793f89cd4a1ba7461f44676325359cd8d62a49 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add); + +int ne6x_dev_add_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + u32 table_id = 0xffffffff; + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + /* hash data */ + memcpy(&fster.data, &input->data, sizeof(fster.data)); + + /* flow steer info */ + for (index = 0; index < 24; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + + if (ret == -ENOENT) { + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster), &table_id); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "insert flow steer table fail %02x\n", + ADPT_LPORT(adpt)); + } else { + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_ARFS_TABLE, result.key_index + 8, + (u32 *)&fster.data, sizeof(fster.data)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "update flow steer table fail ret:%d\n", + ret); + } + + return 0; +} + +int ne6x_dev_del_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + + /* flow steer info */ + for (index = 0; index < 16; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + if (!ret) { + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_ARFS_TABLE, + (u32 *)&fster.ip, sizeof(fster.ip)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "delete flow steer table fail ret:%d\n", + ret); + } else { + dev_err(ne6x_pf_to_dev(adpt->back), "search flow steer table fail ret:%d\n", ret); + } + return 0; +} + +static bool ne6x_is_arfs_active(struct ne6x_adapter *adpt) +{ + return !!adpt->arfs_fltr_list; +} + +static bool +ne6x_arfs_is_flow_expired(struct ne6x_adapter *adpt, struct ne6x_arfs_entry *arfs_entry) +{ +#define NE6X_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + if (rps_may_expire_flow(adpt->netdev, arfs_entry->fltr_info.q_index, + arfs_entry->flow_id, + arfs_entry->fltr_info.fltr_id)) + return true; + + /* expiration timer only used for UDP filters */ + if (arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV4_UDP && + arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + return false; + + return time_in_range64(arfs_entry->time_activated + + NE6X_ARFS_TIME_DELTA_EXPIRATION, + arfs_entry->time_activated, get_jiffies_64()); +} + +static void +ne6x_arfs_update_flow_rules(struct ne6x_adapter *adpt, u16 idx, + struct hlist_head *add_list, + struct hlist_head *del_list) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + /* go through the aRFS hlist at this idx and check for needed updates */ + hlist_for_each_entry_safe(e, n, &adpt->arfs_fltr_list[idx], list_entry) { + /* check if filter needs to be added to HW */ + if (e->fltr_state == NE6X_ARFS_INACTIVE) { + enum ne6x_fltr_ptype flow_type = e->fltr_info.flow_type; + struct ne6x_arfs_entry_ptr *ep = + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); + + if (!ep) + continue; + INIT_HLIST_NODE(&ep->list_entry); + /* reference aRFS entry to add HW filter */ + ep->arfs_entry = e; + hlist_add_head(&ep->list_entry, add_list); + e->fltr_state = NE6X_ARFS_ACTIVE; + /* expiration timer only used for UDP flows */ + if (flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + e->time_activated = get_jiffies_64(); + } else if (e->fltr_state == NE6X_ARFS_ACTIVE) { + /* check if filter needs to be removed from HW */ + if (ne6x_arfs_is_flow_expired(adpt, e)) { + /* remove aRFS entry from hash table for delete + * and to prevent referencing it the next time + * through this hlist index + */ + hlist_del(&e->list_entry); + e->fltr_state = NE6X_ARFS_TODEL; + /* save reference to aRFS entry for delete */ + hlist_add_head(&e->list_entry, del_list); + } + } + } +} + +int ne6x_arfs_add_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *add_list_head) +{ + struct ne6x_arfs_entry_ptr *ep; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { + int result; + + result = ne6x_dev_add_fster_rules(adpt, &ep->arfs_entry->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, ep->arfs_entry, true); + else + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, ep->arfs_entry->fltr_state, + ep->arfs_entry->fltr_info.fltr_id, + ep->arfs_entry->flow_id, + ep->arfs_entry->fltr_info.q_index); + + hlist_del(&ep->list_entry); + devm_kfree(dev, ep); + } + + return 0; +} + +int ne6x_arfs_del_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *del_list_head) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { + int result; + + result = ne6x_dev_del_fster_rules(adpt, &e->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, e, false); + else + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, e->fltr_state, e->fltr_info.fltr_id, + e->flow_id, e->fltr_info.q_index); + + /* The aRFS hash table is no longer referencing this entry */ + hlist_del(&e->list_entry); + devm_kfree(dev, e); + } + + return 0; +} + +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) +{ + struct ne6x_adapter *pf_adpt; + unsigned int i; + u8 idx = 0; + + ne6x_for_each_pf(pf, idx) { + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_add_list); + + pf_adpt = pf->adpt[idx]; + + if (!pf_adpt) + continue; + + if (unlikely(!(pf_adpt->netdev->features & NETIF_F_NTUPLE))) + continue; + + if (!ne6x_is_arfs_active(pf_adpt)) + continue; + + spin_lock_bh(&pf_adpt->arfs_lock); + /* Once we process aRFS for the PF ADPT get out */ + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + ne6x_arfs_update_flow_rules(pf_adpt, i, &tmp_add_list, + &tmp_del_list); + spin_unlock_bh(&pf_adpt->arfs_lock); + + /* use list of ne6x_arfs_entry(s) for delete */ + ne6x_arfs_del_flow_rules(pf_adpt, &tmp_del_list); + + /* use list of ne6x_arfs_entry(s) for add */ + ne6x_arfs_add_flow_rules(pf_adpt, &tmp_add_list); + } +} + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add) +{ + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = adpt->arfs_fltr_cntrs; + + switch (entry->fltr_info.flow_type) { + case NE6X_FLTR_PTYPE_NONF_IPV4_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV4_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv6_cnt); + break; + default: + dev_err(ne6x_pf_to_dev(adpt->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", + entry->fltr_info.flow_type); + } +} + +static bool +ne6x_arfs_cmp(struct ne6x_fster_fltr *fltr_info, const struct flow_keys *fk) +{ + bool is_v4; + + if (!fltr_info || !fk) + return false; + + is_v4 = (fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_TCP); + + if (fk->basic.n_proto == htons(ETH_P_IP) && is_v4) + return (fltr_info->ip.v4.proto == fk->basic.ip_proto && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst); + + else if (fk->basic.n_proto == htons(ETH_P_IPV6) && !is_v4) + return (fltr_info->ip.v6.proto == fk->basic.ip_proto && + fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + !memcmp(&fltr_info->ip.v6.src_ip, + &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, + &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr))); + + return false; +} + +static struct ne6x_arfs_entry * +ne6x_arfs_build_entry(struct ne6x_adapter *adpt, const struct flow_keys *fk, + u32 hash, u16 rxq_idx, u32 flow_id) +{ + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_fster_fltr *fltr_info; + u8 ip_proto; + + arfs_entry = devm_kzalloc(ne6x_pf_to_dev(adpt->back), + sizeof(*arfs_entry), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_entry) + return NULL; + + fltr_info = &arfs_entry->fltr_info; + fltr_info->q_index = rxq_idx; + fltr_info->dest_adpt = adpt->idx; + ip_proto = fk->basic.ip_proto; + + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->ip.v4.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV4_TCP : + NE6X_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->ip.v4.src_port = fk->ports.src; + fltr_info->ip.v4.dst_port = fk->ports.dst; + fltr_info->ip.v4.proto = fk->basic.ip_proto; + fltr_info->ip.v4.pi = ADPT_LPORT(adpt); + } else { /* ETH_P_IPV6 */ + fltr_info->ip.v6.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV6_TCP : + NE6X_FLTR_PTYPE_NONF_IPV6_UDP; + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->ip.v6.src_port = fk->ports.src; + fltr_info->ip.v6.dst_port = fk->ports.dst; + fltr_info->ip.v6.proto = fk->basic.ip_proto; + fltr_info->ip.v6.pi = ADPT_LPORT(adpt); + } + fltr_info->data.tab_id = 5; + fltr_info->data.port = ADPT_VPORT(adpt); + fltr_info->data.cos = cpu_to_be16(rxq_idx); + fltr_info->data.hash = hash; + + arfs_entry->flow_id = flow_id; + fltr_info->fltr_id = + atomic_inc_return(adpt->arfs_last_fltr_id) % RPS_NO_FILTER; + + return arfs_entry; +} + +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + + if (!adpt) + return; + + netdev = adpt->netdev; + if (!netdev || !netdev->rx_cpu_rmap) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +int ne6x_get_irq_num(struct ne6x_pf *pf, int idx) +{ + if (!pf->msix_entries) + return -EINVAL; + + return pf->msix_entries[idx].vector; +} + +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + struct ne6x_pf *pf; + int base_idx, i; + + pf = adpt->back; + + netdev = adpt->netdev; + if (!pf || !netdev || !adpt->num_q_vectors) + return -EINVAL; + + netdev_dbg(netdev, "Setup CPU RMAP: adpt type 0x%x, ifname %s, q_vectors %d\n", + adpt->type, netdev->name, adpt->num_q_vectors); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adpt->num_q_vectors); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + base_idx = adpt->base_vector; + for (i = 0; i < adpt->num_q_vectors; i++) { + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, ne6x_get_irq_num(pf, base_idx + i))) { + ne6x_free_cpu_rx_rmap(adpt); + return -EINVAL; + } + } + + return 0; +} + +int ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_adapter *adpt = np->adpt; + struct flow_keys fk; + struct ne6x_pf *pf; + __be16 n_proto; + u8 ip_proto; + u16 idx; + u32 hash; + int ret; + + if (unlikely(!(netdev->features & NETIF_F_NTUPLE))) + return -ENODEV; + + /* failed to allocate memory for aRFS so don't crash */ + if (unlikely(!adpt->arfs_fltr_list)) + return -ENODEV; + + pf = adpt->back; + + if (unlikely(test_bit(NE6X_DOWN, pf->state))) + return -ENODEV; + + /* aRFS only supported on Rx queues belonging to PF ADPT */ + if (rxq_idx >= adpt->num_queue) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + n_proto = fk.basic.n_proto; + /* Support only IPV4 and IPV6 */ + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || + n_proto == htons(ETH_P_IPV6)) + ip_proto = fk.basic.ip_proto; + else + return -EPROTONOSUPPORT; + + /* Support only TCP and UDP */ + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + /* choose the aRFS list bucket based on skb hash */ + hash = skb_get_hash_raw(skb); + idx = skb_get_hash_raw(skb) & NE6X_ARFS_LST_MASK; + /* search for entry in the bucket */ + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry(arfs_entry, &adpt->arfs_fltr_list[idx], + list_entry) { + struct ne6x_fster_fltr *fltr_info = &arfs_entry->fltr_info; + + /* keep searching for the already existing arfs_entry flow */ + if (!ne6x_arfs_cmp(fltr_info, &fk)) + continue; + + ret = fltr_info->fltr_id; + + if (fltr_info->q_index == rxq_idx || + arfs_entry->fltr_state != NE6X_ARFS_ACTIVE) + goto out; + + /* update the queue to forward to on an already existing flow */ + fltr_info->q_index = rxq_idx; + fltr_info->data.cos = cpu_to_be16(rxq_idx); + arfs_entry->fltr_state = NE6X_ARFS_INACTIVE; + ne6x_arfs_update_active_fltr_cntrs(adpt, arfs_entry, false); + goto out_schedule_service_task; + } + + arfs_entry = ne6x_arfs_build_entry(adpt, &fk, hash, rxq_idx, flow_id); + if (!arfs_entry) { + ret = -ENOMEM; + goto out; + } + + ret = arfs_entry->fltr_info.fltr_id; + INIT_HLIST_NODE(&arfs_entry->list_entry); + hlist_add_head(&arfs_entry->list_entry, &adpt->arfs_fltr_list[idx]); +out_schedule_service_task: + ne6x_service_event_schedule(pf); +out: + spin_unlock_bh(&adpt->arfs_lock); + return ret; +} + +static int ne6x_init_arfs_cntrs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return -EINVAL; + + adpt->arfs_fltr_cntrs = kzalloc(sizeof(*adpt->arfs_fltr_cntrs), + GFP_KERNEL); + if (!adpt->arfs_fltr_cntrs) + return -ENOMEM; + + adpt->arfs_last_fltr_id = kzalloc(sizeof(*adpt->arfs_last_fltr_id), + GFP_KERNEL); + if (!adpt->arfs_last_fltr_id) { + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; + return -ENOMEM; + } + + return 0; +} + +void ne6x_init_arfs(struct ne6x_adapter *adpt) +{ + struct hlist_head *arfs_fltr_list; + unsigned int i; + + if (!adpt) + return; + + arfs_fltr_list = kcalloc(NE6X_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), + GFP_KERNEL); + if (!arfs_fltr_list) + return; + + if (ne6x_init_arfs_cntrs(adpt)) + goto free_arfs_fltr_list; + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&arfs_fltr_list[i]); + + spin_lock_init(&adpt->arfs_lock); + + adpt->arfs_fltr_list = arfs_fltr_list; + + return; + +free_arfs_fltr_list: + kfree(arfs_fltr_list); +} + +void ne6x_clear_arfs(struct ne6x_adapter *adpt) +{ + struct device *dev; + unsigned int i; + struct ne6x_arfs_entry *r; + struct hlist_node *n; + HLIST_HEAD(tmp_del_list); + + if (!adpt || !adpt->back || !adpt->arfs_fltr_list) + return; + + dev = ne6x_pf_to_dev(adpt->back); + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + if (r->fltr_state == NE6X_ARFS_ACTIVE || r->fltr_state == NE6X_ARFS_TODEL) { + hlist_del(&r->list_entry); + hlist_add_head(&r->list_entry, &tmp_del_list); + } + } + spin_unlock_bh(&adpt->arfs_lock); + } + + hlist_for_each_entry_safe(r, n, &tmp_del_list, list_entry) { + ne6x_dev_del_fster_rules(adpt, &r->fltr_info, false); + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + struct ne6x_arfs_entry *r; + struct hlist_node *n; + + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + spin_unlock_bh(&adpt->arfs_lock); + } + + kfree(adpt->arfs_fltr_list); + adpt->arfs_fltr_list = NULL; + kfree(adpt->arfs_last_fltr_id); + adpt->arfs_last_fltr_id = NULL; + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; +} + +void ne6x_remove_arfs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return; + + ne6x_clear_arfs(adpt); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h new file mode 100644 index 0000000000000000000000000000000000000000..a24d9f19d478f72eccfcfa238f6d25bc83cd7ff4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ARFS_H +#define _NE6X_ARFS_H + +/* protocol enumeration for filters */ +enum ne6x_fltr_ptype { + /* NONE - used for undef/error */ + NE6X_FLTR_PTYPE_NONF_NONE = 0, + NE6X_FLTR_PTYPE_NONF_IPV4_UDP, + NE6X_FLTR_PTYPE_NONF_IPV4_TCP, + NE6X_FLTR_PTYPE_NONF_IPV6_UDP, + NE6X_FLTR_PTYPE_NONF_IPV6_TCP, + NE6X_FLTR_PTYPE_MAX, +}; + +struct ne6x_fster_v4 { + __be32 rsv0[3]; + __be32 dst_ip; + __be32 rsv1[3]; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be16 rsv2; + u8 pi; + u8 proto; + u8 rsv3[24]; +}; + +#define NE6X_IPV6_ADDR_LEN_AS_U32 4 + +struct ne6x_fster_v6 { + __be32 dst_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be16 rsv0; + u8 pi; + u8 proto; + u8 rsv1[24]; +}; + +struct ne6x_fster_data { + u8 tab_id; + u8 port; + __be16 cos; + __be32 hash; + u8 rsv0[24]; +}; + +struct ne6x_fster_table { + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_search_result { + u32 key_index; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_fltr { + struct list_head fltr_node; + enum ne6x_fltr_ptype flow_type; + + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; + + /* filter control */ + u16 q_index; + u16 dest_adpt; + u8 cnt_ena; + u16 cnt_index; + u32 fltr_id; +}; + +enum ne6x_arfs_fltr_state { + NE6X_ARFS_INACTIVE, + NE6X_ARFS_ACTIVE, + NE6X_ARFS_TODEL, +}; + +struct ne6x_arfs_entry { + struct ne6x_fster_fltr fltr_info; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + struct hlist_node list_entry; + u64 time_activated; /* only valid for UDP flows */ + u32 flow_id; + /* fltr_state = 0 - NE6X_ARFS_INACTIVE: + * filter needs to be updated or programmed in HW. + * fltr_state = 1 - NE6X_ARFS_ACTIVE: + * filter is active and programmed in HW. + * fltr_state = 2 - NE6X_ARFS_TODEL: + * filter has been deleted from HW and needs to be removed from + * the aRFS hash table. + */ + u8 fltr_state; +}; + +struct ne6x_arfs_entry_ptr { + struct ne6x_arfs_entry *arfs_entry; + struct hlist_node list_entry; +}; + +struct ne6x_arfs_active_fltr_cntrs { + atomic_t active_tcpv4_cnt; + atomic_t active_tcpv6_cnt; + atomic_t active_udpv4_cnt; + atomic_t active_udpv6_cnt; +}; + +#ifdef CONFIG_RFS_ACCEL +int +ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id); +void ne6x_clear_arfs(struct ne6x_adapter *adpt); +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_init_arfs(struct ne6x_adapter *adpt); +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf); +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_remove_arfs(struct ne6x_adapter *adpt); +#else +static inline void ne6x_clear_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) { } +static inline void ne6x_init_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) { } +static inline void ne6x_remove_arfs(struct ne6x_adapter *adpt) { } + +static inline int ne6x_set_cpu_rx_rmap(struct ne6x_adapter __always_unused *adpt) +{ + return 0; +} + +static inline int +ne6x_rx_flow_steer(struct net_device __always_unused *netdev, + const struct sk_buff __always_unused *skb, + u16 __always_unused rxq_idx, u32 __always_unused flow_id) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_RFS_ACCEL */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b945381ee8e8d2c39c30ad9aa772989823991e6b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c @@ -0,0 +1,2397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_debugfs.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +#define NE6X_CQ_TO_OFF_TX(__desc, __idx) \ + (((__desc)->payload.data[3 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[3 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_TX(__desc, __idx) ((__desc)->payload.data[3 * (__idx)]) + +#define NE6X_CQ_TO_LEN_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_RX(__desc, __idx) ((__desc)->payload.data[5 * (__idx)]) +#define NE6X_CQ_TO_OFF_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 3] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 4] << 8)) + +#define PARA_KEY_STRING " " +#define ARRAY_P_MAX_COUNT 140 +#define HASH_KEY_SIZE 64 +#define HASH_DATA_SIZE 64 +#define TABLE_WIDHT_BIT_512 512 +#define TABLE_WIDHT_BIT_128 128 +#define TABLE_WIDHT_BIT_64 64 +#define TABLE_WIDHT_BIT_16 16 +#define TABLE_WIDHT_BIT_256 256 +#define TABLE_WIDHT_BIT_32 32 + +#define FRU_CHECK_6ASCII(x) (((x) >> 6) == 0x2) +#define ASCII628_BASE 32 +#define FRU_6BIT_8BITLENGTH(x) (((x) * 4) / 3) + +static int table_size[] = { + TABLE_WIDHT_BIT_512, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_16, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_256, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_32 +}; + +const struct ne6x_debug_info ne6x_device_info[] = { + {0xE220, "N5E025P2-PAUA", "25G"}, {0xE22C, "N5E025P2-NAUA", "25G"}, + {0xE221, "N5S025P2-PAUA", "25G"}, {0xE22D, "N5S025P2-NAUA", "25G"}, + {0xEA20, "N6E100P2-PAUA", "100G"}, {0xEA2C, "N6E100P2-NAUA", "100G"}, + {0xEA21, "N6S100P2-PAUA", "100G"}, {0xEA2D, "N6S100P2-NAUA", "100G"}, + {0xD221, "N6S025P2-PDUA", "25G"}, {0xDA21, "N6S100P2-PDUA", "100G"}, + {0x1220, "N5E025P2-PAGA", "25G"}, {0x122C, "N5E025P2-NAGA", "25G"}, + {0x1221, "N5S025P2-PAGA", "25G"}, {0x122D, "N5S025P2-NAGA", "25G"}, + {0x1A20, "N6E100P2-PAGA", "100G"}, {0x1A2C, "N6E100P2-NAGA", "100G"}, + {0x1A21, "N6S100P2-PAGA", "100G"}, {0x1A2D, "N6S100P2-NAGA", "100G"}, + {0x0221, "N6S100P2-NAGA", "100G"}, {0x0A21, "N6S100P2-PDGA", "100G"} }; + +char *my_strtok(char *p_in_string, char *p_in_delimit, char **pp_out_ret) +{ + static char *p_tmp; + char *p_strstr = NULL; + char *ret = NULL; + int for_index; + + if (!pp_out_ret) + return NULL; + + *pp_out_ret = NULL; + if (!p_in_delimit) + return p_in_string; + + if (p_in_string) + p_tmp = p_in_string; + + if (!p_tmp) + return NULL; + + ret = p_tmp; + p_strstr = strstr(p_tmp, p_in_delimit); + if (p_strstr) { + p_tmp = p_strstr + strlen(p_in_delimit); + for (for_index = 0; for_index < strlen(p_in_delimit); for_index++) + *(p_strstr + for_index) = '\0'; + } else { + p_tmp = NULL; + } + + *pp_out_ret = p_tmp; + + return ret; +} + +int my_isdigit(char in_char) +{ + if ((in_char >= '0') && (in_char <= '9')) + return 1; + else + return 0; +} + +int my_atoi(char *p_in_string) +{ + int flag = 1; + int ret = 0; + + while (my_isdigit(p_in_string[0]) == 0) + p_in_string++; + + if (*(p_in_string - 1) == '-') + flag = -1; + + while (my_isdigit(p_in_string[0]) != 0) { + ret *= 10; + ret += p_in_string[0] - '0'; + if (ret > INT_MAX || ret < INT_MIN) + return 0; + + p_in_string++; + } + + if (ret != 0) + return (flag * ret); + else + return 0; +} + +static struct dentry *ne6x_dbg_root; +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len); + +void ne6x_dbg_show_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "----RX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_alloc, + ring->next_to_use, ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "----TX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "----CQ: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_ring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j, k, l; + union ne6x_rx_desc *rx_desc; + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, + rx_desc->wb.vp, rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_mop_addr) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, + tx_desc->chain, tx_desc->transmit_type, tx_desc->sop_valid, + tx_desc->eop_valid, tx_desc->tso, tx_desc->l3_csum, + tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, + "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", k, + ring->reg_idx, cq_desc->ctype, cq_desc->num); + for (l = 0; l < cq_desc->num; l++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, + "******[TX] %d:%d val:0x%x\n", l, + NE6X_CQ_TO_OFF_TX(cq_desc, l), + NE6X_CQ_TO_STS_TX(cq_desc, l)); + else + dev_info(&pf->pdev->dev, + "******[RX] %d:%d val:0x%x len:0x%x\n", l, + NE6X_CQ_TO_OFF_RX(cq_desc, l), + NE6X_CQ_TO_STS_RX(cq_desc, l), + NE6X_CQ_TO_LEN_RX(cq_desc, l)); + } + } + } + } +} + +void ne6x_dbg_show_txtail(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j; + struct ne6x_adapter *adpt; + struct ne6x_ring *ring; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %llx packets\n", i, j, + readq(ring->tail + j)); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_txq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_rxq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] RX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_cq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] CQ queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_clean_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + rx_ring = adpt->rx_rings[j]; + cq_ring = adpt->cq_rings[j]; + + memset(&tx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(struct ne6x_txq_stats)); + + memset(&rx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&rx_ring->rx_stats, 0, sizeof(struct ne6x_rxq_stats)); + + memset(&cq_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&cq_ring->cq_stats, 0, sizeof(struct ne6x_cq_stats)); + } + dev_info(&pf->pdev->dev, "---------------------------adpt[%d] all ring cleaned---------------------------------------", + i); + } +} + +void ne6x_dbg_show_txring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------tx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d, busy:%lld\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(tx_ring), + tx_ring->next_to_use, tx_ring->next_to_clean, + tx_ring->tx_stats.tx_busy); + } + } + dev_info(&pf->pdev->dev, "+----------------------------tx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *rx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------rx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + rx_ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], alloc:%04d, use:%04d, clean:%04d, cq_expect:%04d\n", + i, j, head, tail, oft, rx_ring->next_to_alloc, + rx_ring->next_to_use, rx_ring->next_to_clean, + rx_ring->cq_last_expect); + } + } + dev_info(&pf->pdev->dev, "+----------------------------rx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int queue_num = 0; + u64 head, tail; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------cq begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + cq_ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(cq_ring), cq_ring->next_to_use, + cq_ring->next_to_clean); + } + } + dev_info(&pf->pdev->dev, "+----------------------------cq end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_txdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + + tx_ring = adpt->tx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-----------------------------------Netdev[%d] - Queue[%d] - tx_desc begin-----------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < tx_ring->count; i++) { + tx_desc = NE6X_TX_DESC(tx_ring, i); + if (!tx_desc->buffer_mop_addr && i != 0) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "tx_desc[%d]\n", i); + dev_info(&pf->pdev->dev, "struct ne6x_tx_desc\n" + "{\n" + " u8 flags : 8; [0x%x]\n" + " u8 vp : 7; [%d]\n" + " u8 event_trigger : 1; [%d]\n" + " u8 chain : 1; [%d]\n" + " u8 transmit_type : 2; [%d]\n" + " u8 sop_valid : 1; [%d]\n" + " u8 eop_valid : 1; [%d]\n" + " u8 tso : 1; [%d]\n" + " u8 l3_csum : 1; [%d]\n" + " u8 l3_ofst : 7; [%d]\n" + " u8 l4_csum : 1; [%d]\n" + " u8 l4_ofst : 7; [%d]\n" + " u8 pld_ofst; [%d]\n" + " __le64 mop_cnt : 24; [%d]\n" + " __le64 sop_cnt : 16; [%d]\n" + " __le64 mss : 16; [%d]\n" + " __le64 buffer_mop_addr; [%lld]\n" + " __le64 buffer_sop_addr; [%lld]\n" + "};\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, tx_desc->tso, + tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, tx_desc->mss, + tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + dev_info(&pf->pdev->dev, "+------------------------------------------------Netdev[%d] - Queue[%d] - tx_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + union ne6x_rx_desc *rx_desc = NULL; + struct ne6x_ring *rx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + rx_ring = adpt->rx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%2d] - rx_desc begin-------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < rx_ring->count; i++) { + rx_desc = NE6X_RX_DESC(rx_ring, i); + + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], p[0x%02x%02x%02x%02x%02x%02x%02x%02x], pkt_len[%d]\n", + adpt_num, queue_num, i, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, rx_desc->wb.vp, + rx_desc->wb.pd[0], rx_desc->wb.pd[1], rx_desc->wb.pd[2], rx_desc->wb.pd[3], + rx_desc->wb.pd[4], rx_desc->wb.pd[5], rx_desc->wb.pd[6], rx_desc->wb.pd[7], + rx_desc->wb.pkt_len); + } + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%d] - rx_desc end----------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_ring *cq_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i, j; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + cq_ring = adpt->cq_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc begin------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < cq_ring->count; i++) { + cq_desc = NE6X_CQ_DESC(cq_ring, i); + + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], cq_desc[%d], vp[%d], ctype[%s], num[%d]\n", + adpt_num, queue_num, i, cq_ring->reg_idx, + cq_desc->ctype == 0 ? "tx" : "rx", + cq_desc->num); + for (j = 0; j < cq_desc->num; j++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, "******TX%d[%d]: val:0x%x\n", j, + NE6X_CQ_TO_OFF_TX(cq_desc, j), + NE6X_CQ_TO_STS_TX(cq_desc, j)); + else + dev_info(&pf->pdev->dev, "******RX%d[%d]: val:0x%x len:%d\n", j, + NE6X_CQ_TO_OFF_RX(cq_desc, j), + NE6X_CQ_TO_STS_RX(cq_desc, j), + NE6X_CQ_TO_LEN_RX(cq_desc, j)); + } + } + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +#ifdef CONFIG_RFS_ACCEL +void ne6x_dbg_show_arfs_cnt(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 idx = 0; + struct ne6x_adapter *pf_adpt; + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = NULL; + + ne6x_for_each_pf(pf, idx) { + pf_adpt = pf->adpt[idx]; + fltr_cntrs = pf_adpt->arfs_fltr_cntrs; + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + dev_info(&pf->pdev->dev, "pf_num:%d totle_num:%d\n\t\t\t tcp_v4_num:%d\n\t\t\t udp_v4_num:%d\n\t\t\t tcp_v6_num:%d\n\t\t\t udp_v6_num:%d\n", + idx, (atomic_read(&fltr_cntrs->active_tcpv4_cnt) + + atomic_read(&fltr_cntrs->active_udpv4_cnt) + + atomic_read(&fltr_cntrs->active_tcpv6_cnt) + + atomic_read(&fltr_cntrs->active_udpv6_cnt)), + atomic_read(&fltr_cntrs->active_tcpv4_cnt), + atomic_read(&fltr_cntrs->active_udpv4_cnt), + atomic_read(&fltr_cntrs->active_tcpv6_cnt), + atomic_read(&fltr_cntrs->active_udpv6_cnt)); + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + } +} +#endif + +extern u32 ne6x_dev_crc32(const u8 *buf, u32 size); + +void ne6x_dbg_apb_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "apb_read \n"); + return; + } + + offset = addr; + value = ne6x_reg_apb_read(pf, offset); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_apb_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "apb_write \n"); + return; + } + + offset = addr; + ne6x_reg_apb_write(pf, offset, value); + dev_info(&pf->pdev->dev, "apb_write: 0x%llx = 0x%x\n", offset, value); +} + +void ne6x_dbg_mem_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int index = 0, cnt; + u32 *reg_data; + u64 offset; + u32 addr; + u32 size; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &size); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "mem_read \n"); + return; + } + + reg_data = kzalloc((size + 4) * 4, GFP_KERNEL); + offset = addr; + for (index = 0x00; index < size; index++) + reg_data[index] = ne6x_reg_apb_read(pf, offset + index * 4); + + for (index = 0x00; index < size / 4; index++) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2], reg_data[4 * index + 3]); + + if ((size % 4) == 1) + dev_info(&pf->pdev->dev, "%lx: %08X\n", (unsigned int long)(offset + index * 16), + reg_data[4 * index]); + else if ((size % 4) == 2) + dev_info(&pf->pdev->dev, "%lx: %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1]); + else if ((size % 4) == 3) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2]); + + kfree((void *)reg_data); +} + +void ne6x_dbg_mem_write(struct ne6x_pf *pf, char *cmd_buf, int count) {} + +void ne6x_dbg_templ_help(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + dev_info(&pf->pdev->dev, "HW_FEATURES = 0\n"); + dev_info(&pf->pdev->dev, "HW_FLAGS = 1\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_SIZE = 2\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_ENTRY_WIDTH = 3\n"); + dev_info(&pf->pdev->dev, "RSS_HASH_KEY_BLOCK_SIZE = 4\n"); + dev_info(&pf->pdev->dev, "PORT2PI_0 = 5\n"); + dev_info(&pf->pdev->dev, "PI2PORT_0 = 25\n"); + dev_info(&pf->pdev->dev, "VLAN_TYPE = 33\n"); + dev_info(&pf->pdev->dev, "PI0_BROADCAST_LEAF = 37\n"); + dev_info(&pf->pdev->dev, "PORT_OLFLAGS_0 = 53\n"); + dev_info(&pf->pdev->dev, "PORT_2_COS_0 = 121\n"); + dev_info(&pf->pdev->dev, "VPORT0_LINK_STATUS = 155\n"); + dev_info(&pf->pdev->dev, "TSO_CKSUM_DISABLE = 156\n"); + dev_info(&pf->pdev->dev, "PORT0_MTU = 157\n"); + dev_info(&pf->pdev->dev, "PORT0_QINQ = 161\n"); + dev_info(&pf->pdev->dev, "CQ_SIZE = 229\n"); +} + +void ne6x_dbg_templ_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &vport, &type); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "temp_read \n"); + return; + } + + ne6x_reg_get_user_data(pf, vport + type, &value); + dev_info(&pf->pdev->dev, "temp_read 0x%04X value 0x%08X\n", type, value); +} + +void ne6x_dbg_templ_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i %i", &vport, &type, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "temp_write \n"); + return; + } + + ne6x_reg_set_user_data(pf, vport + type, value); + dev_info(&pf->pdev->dev, "temp_write: 0x%04x = 0x%x\n", type, value); +} + +void ne6x_dbg_soc_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "soc_read \n"); + return; + } + + ne6x_reg_indirect_read(pf, addr, &value); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_soc_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "soc_write \n"); + return; + } + + ne6x_reg_indirect_write(pf, addr, value); + dev_info(&pf->pdev->dev, "soc_write: 0x%08X = 0x%08X\n", addr, value); +} + +void ne6x_dbg_tab_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[10] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 2) { + dev_warn(&pf->pdev->dev, "tab_read \n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + table_info->size = table_size[table_info->table]; + ret = ne6x_reg_table_read(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success" : "timeout!"); + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_set_mac_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + int ret; + int cnt; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:21:00:00:01\n"); + return; + } + + cnt = sscanf(&cmd_buf[2], "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX", &mac_addr[0], &mac_addr[1], + &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]); + if (cnt != 6) { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:24:00:00:01\n"); + return; + } + + if (port == 0) + memcpy(&psdk_spd_info->port_0_mac, &mac_addr, 6); + else if (port == 1) + memcpy(&psdk_spd_info->port_1_mac, &mac_addr, 6); + else if (port == 2) + memcpy(&psdk_spd_info->port_2_mac, &mac_addr, 6); + else if (port == 3) + memcpy(&psdk_spd_info->port_3_mac, &mac_addr, 6); + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(*psdk_spd_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(*psdk_spd_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "set mac success!" : "set mac fail!"); +} + +void ne6x_dbg_get_mac(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "get_port_mac P0/P1\n"); + dev_warn(&pf->pdev->dev, "example-- get_port_mac P0\n"); + return; + } + + if (port == 0) + memcpy(&mac_addr, &psdk_spd_info->port_0_mac, 6); + else if (port == 1) + memcpy(&mac_addr, &psdk_spd_info->port_1_mac, 6); + else if (port == 2) + memcpy(&mac_addr, &psdk_spd_info->port_2_mac, 6); + else if (port == 3) + memcpy(&mac_addr, &psdk_spd_info->port_3_mac, 6); + else + return; + + dev_info(&pf->pdev->dev, "port %d: mac = %02x:%02x:%02x:%02x:%02x:%02x\n", port, + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); +} + +void ne6x_dbg_set_dev_type_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 *p_str_array[10] = {0}; + int array_index = 0, ret; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + u16 dev_type = 0; + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 1) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) { + dev_type = simple_strtoul(p_str_array[0], NULL, 16); + } else { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (dev_type != NE6000AI_2S_X16H_25G_N5 && dev_type != NE6000AI_2S_X16H_25G_N6) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + psdk_spd_info->product_mode = cpu_to_be16(dev_type); + psdk_spd_info->is_pcie_exist = 0x1; + + if (dev_type == NE6000AI_2S_X16H_25G_N5) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000800); + } else if (dev_type == NE6000AI_2S_X16H_25G_N6) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000100); + } else { + return; + } + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write eeprom mac success!" : "write eeprom mac fail!"); +} + +void ne6x_dbg_tab_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 8) { + dev_info(&pf->pdev->dev, "tab_write
...\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 2); index++) { + if (!strncmp(p_str_array[index + 2], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 2], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 2]); + + table_info->size++; + } + + table_info->size = table_size[table_info->table]; + + ret = ne6x_reg_table_write(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_insert(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u32 table_id = 0xffffffff; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + + if (!p_tmp_ret) + break; + } + + /* 1 + 16 + 1+++ */ + if (array_index < 24) { + dev_warn(&pf->pdev->dev, "tab_insert
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, NULL, + table_info->size); + if (ret == -ENOENT) { + table_info->size = 64 + table_size[table_info->table]; + ret = ne6x_reg_table_insert(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + &table_id); + } else { + dev_info(&pf->pdev->dev, "0x%x 0x%x 0x%x 0x%x table exist\n", table_info->data[0], + table_info->data[1], table_info->data[2], table_info->data[3]); + return; + } + if (ret == 0) + dev_info(&pf->pdev->dev, "insert rule_id = 0x%x\n", table_id); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : + ((ret != -ETIMEDOUT) ? "fail!" : "timeout!")); +} + +void ne6x_dbg_tab_delete(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_delete(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_search(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + dev_info(&pf->pdev->dev, "array_index = %d\n", array_index); + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "success!" : ((ret == -ENOENT) ? "not fount!" : "timeout!")); + if (ret) + return; + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_get_fru_info(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct file *fp = NULL; + u8 *buffer; + int para_count; + u32 size; + + para_count = sscanf(&cmd_buf[0], "%i", &size); + if (para_count != 1) { + dev_warn(&pf->pdev->dev, "fru_read \n"); + return; + } + + if (size > 512) { + dev_warn(&pf->pdev->dev, "size must less than 512\n."); + return; + } + + buffer = kzalloc((size + 4), GFP_KERNEL); + ne6x_dev_get_fru(pf, (u32 *)buffer, size); + + fp = filp_open("/opt/share/fru.bin", O_RDWR | O_CREAT, 0644); + if (!fp) { + dev_err(&pf->pdev->dev, "can't open /opt/share/fru.bin.\n"); + return; + } + + kernel_write(fp, (char *)buffer, size, &fp->f_pos); + filp_close(fp, NULL); +} + +u32 getparam(char *cmd_buf, u32 *param, int max_cnt) +{ + int ret, i, j, tmp, tmp1, tmp2, flag = 0; + u32 count = 0, cnt = 0, cnt_t = 0; + char *p = &cmd_buf[0]; + char *char_offset; + u32 *offset; + + offset = kzalloc((max_cnt + 1) * sizeof(u32), GFP_ATOMIC); + char_offset = kzalloc((max_cnt + 1) * sizeof(char), GFP_ATOMIC); + /* count the number */ + for (i = 0; i < strlen(cmd_buf); i++) { + if (cmd_buf[i] == ',' || cmd_buf[i] == '-') { + count++; + if (cmd_buf[i] == ',') { + offset[count] = i + 1; + char_offset[count] = ','; + } else if (cmd_buf[i] == '-') { + offset[count] = i + 1; + char_offset[count] = '-'; + } + } + if (cmd_buf[i] == ' ') + break; + + if (count >= max_cnt) + break; + } + + for (i = 0; i <= count; i++) { + ret = sscanf(p, "%i", ¶m[i + cnt_t]); + if (ret == 1) { + cnt++; + if (char_offset[cnt] == '-') { + flag++; + p = &cmd_buf[offset[cnt]]; + ret = sscanf(p, "%i", ¶m[i + cnt_t + 1]); + tmp1 = param[i + cnt_t]; + tmp2 = param[i + cnt_t + 1]; + if (ret == 1) { + tmp = i + cnt_t; + for (j = 0; j <= tmp2 - tmp1; j++) + param[tmp + j] = tmp1 + j; + } + cnt_t += tmp2 - tmp1; + + cnt++; + } + p = &cmd_buf[offset[cnt]]; + } + } + + kfree(offset); + + return cnt + cnt_t - 2 * flag; +} + +void ne6x_dbg_show_pcie_drop_counter(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + union ne6x_eth_recv_cnt eth_recv_cnt; + u64 __iomem *reg; + + reg = (void __iomem *)pf->hw.hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ETH_RECV_CNT); + eth_recv_cnt.val = readq(reg); + dev_info(&pf->pdev->dev, "pcie drop cnt = %d\n", eth_recv_cnt.reg.csr_eth_pkt_drop_cnt + + eth_recv_cnt.reg.csr_eth_rdq_drop_cnt); +} + +void ne6x_dbg_clr_table(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 table_id = 0, cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &table_id); + if (table_id == 6) + ne6x_reg_clear_table(pf, table_id); +} + +void ne6x_dbg_set_hw_flag_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + int flag = 0; + int ret; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &flag); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "\n0:none;1,ram white list;2,ddr white list\n"); + return; + } + + psdk_spd_info->hw_flag = cpu_to_be32(flag); + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "set hw_flag success!" + : "set hw_flag fail!"); +} + +void ne6x_dbg_erase_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset; + u32 length; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_erase \n"); + return; + } + + if (!ne6x_reg_erase_norflash(pf, offset, length)) + return; + + dev_err(&pf->pdev->dev, "norflash_erase fail.\n"); +} + +void ne6x_dbg_write_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *ptemp_data = NULL; + u32 offset = 0; + u32 length = 0; + u32 temp_data = 0; + u8 *ptemp = NULL; + int i = 0; + + ptemp_data = kzalloc(1024, GFP_ATOMIC); + + while ((ptemp = strsep(&cmd_buf, " "))) { + if (!strncmp(ptemp, "0x", 2)) + temp_data = simple_strtoul(ptemp, NULL, 16); + else + temp_data = my_atoi(ptemp); + + if (i == 0) + offset = temp_data; + else if (i == 1) + length = temp_data; + else + ptemp_data[i - 2] = (u8)temp_data; + + i++; + if (i == 1026) + break; + } + + if (length > 1024 || i < 2) { + dev_warn(&pf->pdev->dev, "norflash_write (byte split by space max 256)\n"); + goto pdata_memfree; + } + + if (!ne6x_reg_write_norflash(pf, offset, length, (u32 *)ptemp_data)) + dev_info(&pf->pdev->dev, "write norflash success.\n"); + else + dev_err(&pf->pdev->dev, "write norflash fail.\n"); + +pdata_memfree: + kfree(ptemp_data); +} + +void ne6x_dbg_read_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset = 0; + u32 length = 0; + u32 buffer_len; + char *pdata = NULL; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_read \n"); + return; + } + + buffer_len = length; + if (length % 4) + buffer_len = (length / 4 + 1) * 4; + + pdata = kzalloc(buffer_len, GFP_ATOMIC); + if (!ne6x_reg_read_norflash(pf, offset, buffer_len, (u32 *)pdata)) + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pdata, length); + else + dev_err(&pf->pdev->dev, "read_norflash fail.\n"); + + kfree(pdata); +} + +void ne6x_dbg_meter_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + u32 cir, type_num, type_flag = 0; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + struct meter_table vf_bw; + char *p_tmp_ret; + int index, ret = 0; + int array_index = 0; + u8 *p_in_string = NULL; + u32 data[3] = {0}; + u32 type = 0; + + p_in_string = &cmd_buf[0]; + p_tmp_ret = NULL; + + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + if (!p_tmp_ret) + break; + } + if (array_index != 3) { + dev_warn(&pf->pdev->dev, "Incorrect input, please re-enter\n"); + return; + } + + for (index = 0; index < array_index; index++) { + if (!strncmp(p_str_array[index], "0x", 2)) + data[index] = simple_strtoul(p_str_array[index], NULL, 16); + else + data[index] = my_atoi(p_str_array[index]); + } + + type_num = data[0]; + switch (type_num) { + case 0: + type_flag |= NE6X_F_ACK_FLOOD; + break; + case 1: + type_flag |= NE6X_F_PUSH_ACK_FLOOD; + break; + case 2: + type_flag |= NE6X_F_SYN_ACK_FLOOD; + break; + case 3: + type_flag |= NE6X_F_FIN_FLOOD; + break; + case 4: + type_flag |= NE6X_F_RST_FLOOD; + break; + case 5: + type_flag |= NE6X_F_PUSH_SYN_ACK_FLOOD; + break; + case 6: + type_flag |= NE6X_F_UDP_FLOOD; + break; + case 7: + type_flag |= NE6X_F_ICMP_FLOOD; + break; + case 8: + type_flag |= NE6X_F_FRAGMENT_FLOOD; + break; + default: + dev_err(&pf->pdev->dev, "err_input,please enter one of'0-8'\n"); + return; + } + + if (data[1] == 1) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type |= type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else if (data[1] == 0) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type &= ~type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else { + dev_err(&pf->pdev->dev, "Input error, please enter '0' or '1'\n"); + return; + } + + cir = data[2] * 1000 + 1023; + cir = min((cir / 1024), cir_maxnum); + vf_bw.cir = cir; + vf_bw.pir = min((cir + cir / 10), cir_maxnum); + + vf_bw.cbs = min((vf_bw.cir * 10000), cbs_maxnum); + vf_bw.pbs = min((vf_bw.pir * 10000), cbs_maxnum); + ret = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | type_num, + (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write meter success!" : "write meter fail!"); +} + +const struct ne6x_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6x_dbg_show_queue}, + {"ring", ne6x_dbg_show_ring}, + {"txq", ne6x_dbg_show_txq}, + {"rxq", ne6x_dbg_show_rxq}, + {"cq", ne6x_dbg_show_cq}, + {"clean", ne6x_dbg_clean_queue}, + {"txtail", ne6x_dbg_show_txtail}, + {"txr", ne6x_dbg_show_txring}, + {"rxr", ne6x_dbg_show_rxring}, + {"cqr", ne6x_dbg_show_cqring}, +#ifdef CONFIG_RFS_ACCEL + {"arfs", ne6x_dbg_show_arfs_cnt}, +#endif + {"apb_read", ne6x_dbg_apb_read}, + {"apb_write", ne6x_dbg_apb_write}, + {"mem_read", ne6x_dbg_mem_read}, + {"mem_write", ne6x_dbg_mem_write}, + {"soc_read", ne6x_dbg_soc_read}, + {"soc_write", ne6x_dbg_soc_write}, + {"templ_help", ne6x_dbg_templ_help}, + {"templ_read", ne6x_dbg_templ_read}, + {"templ_write", ne6x_dbg_templ_write}, + {"tab_read", ne6x_dbg_tab_read}, + {"tab_write", ne6x_dbg_tab_write}, + {"tab_insert", ne6x_dbg_tab_insert}, + {"tab_delete", ne6x_dbg_tab_delete}, + {"tab_search", ne6x_dbg_tab_search}, + {"set_port_mac", ne6x_dbg_set_mac_to_eeprom}, + {"get_port_mac", ne6x_dbg_get_mac}, + {"fru_read", ne6x_dbg_get_fru_info}, + {"pcie_dropcnt", ne6x_dbg_show_pcie_drop_counter}, + {"clear_table", ne6x_dbg_clr_table}, + {"set_hw_flag", ne6x_dbg_set_hw_flag_eeprom}, + {"norflash_erase", ne6x_dbg_erase_norflash}, + {"norflash_write", ne6x_dbg_write_norflash}, + {"norflash_read", ne6x_dbg_read_norflash}, + {"meter_write", ne6x_dbg_meter_write}, +}; + +/** + * ne6x_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +static ssize_t ne6x_dbg_info_pnsn_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + u8 *pru_name = NULL, *pru_pn = NULL, *pru_sn = NULL; + char name_pre[INFO_COL] = {0}; + char name_aft[INFO_COL] = {0}; + struct ne6x_pf *pf = NULL; + u32 buf_size = 500; + char *name = NULL; + ssize_t len = 0; + u8 *buffer_data; + u8 length = 0; + u16 device_id; + int erro = 0; + int dex = 0; + int i = 0; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + name = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!name) + return -ENOMEM; + + buffer_data = kzalloc(buf_size, GFP_KERNEL); + if (!buffer_data) { + kfree(name); + return -ENOMEM; + } + + pf = filp->private_data; + ne6x_dev_get_fru(pf, (u32 *)buffer_data, buf_size); + + pru_name = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_NAME, &length); + if (!pru_name) { + dev_err(&pf->pdev->dev, "get pru_name info erro"); + device_id = pf->hw.subsystem_device_id; + if (!device_id) { + dev_err(&pf->pdev->dev, "subsystem_device_id is NULL!"); + erro = 1; + goto get_buffer_end; + } + + sprintf(name_pre, "Product Name: BeiZhongWangXin"); + sprintf(name_aft, "Ethernet Adapter"); + + for (i = 0; i < ARRAY_SIZE(ne6x_device_info); i++) { + if (device_id == ne6x_device_info[i].system_id) + dex = i; + } + + if (dex != -1) { + len = sprintf(name, "%s %s %s %s\n", name_pre, + ne6x_device_info[dex].system_name, + ne6x_device_info[dex].system_speed, name_aft); + } else { + dev_warn(&pf->pdev->dev, "subsystem_device_id not match"); + erro = 1; + goto get_buffer_end; + } + + } else { + len = sprintf(name, "Product Name: %s\n", pru_name); + } + + pru_pn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_PART_NUMBER, &length); + if (pru_pn) + len = sprintf(name, "%s[PN] Part number: %s\n", name, pru_pn); + + pru_sn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_SERIAL_NUMBER, &length); + if (pru_sn) + len = sprintf(name, "%s[SN] Serial number: %s\n", name, pru_sn); + + if (copy_to_user(buffer, name, len)) { + erro = 2; + goto get_buffer_end; + } + + if (!len) { + erro = 1; + goto get_buffer_end; + } + + *ppos = len; + goto get_buffer_end; + +get_buffer_end: + kfree(pru_pn); + kfree(pru_sn); + kfree(pru_name); + kfree(name); + kfree(buffer_data); + + if (erro == 1) + return 0; + else if (erro == 2) + return -EFAULT; + + return len; +} + +static bool ne6x_dbg_fru_checksum(const u8 *data, u32 len) +{ + u8 gl = 0; + u32 i; + + for (i = 0; i < len - 1; i++) + gl += data[i]; + + gl = ~gl + 1; + return gl == data[len - 1]; +} + +static int ne6x_dbg_fru_get_offset(u8 *buffer, enum fru_type type, u8 *offset) +{ + u8 hd[8] = {0}; + int i; + + for (i = 0; i < 8; i++) + hd[i] = buffer[i]; + + if (!(hd[0] & 0x1)) + return -2; + + if (!ne6x_dbg_fru_checksum(hd, 8)) + return -3; + + if (type < INTER_USE_AREA || type > MUILT_AREA) + return -4; + + *offset = hd[type + 1]; + + return 0; +} + +static u8 *ne6x_dbg_fru_6ascii28(const u8 *data, u8 *len) +{ + u8 len_bit_6, len_bit_8; + int i, i6, byte; + u8 *buf = NULL; + + len_bit_6 = data[0] & 0x3F; + len_bit_8 = FRU_6BIT_8BITLENGTH(len_bit_6); + buf = kzalloc(len_bit_8 + 1, GFP_ATOMIC); + + if (!buf) { + *len = 0; + return NULL; + } + + for (i = 0, i6 = 1; i6 <= len_bit_6 && i < len_bit_8 && data[i6]; i++) { + byte = (i - 1) % 4; + + switch (byte) { + case 0: + buf[i] = data[i6] & 0x3F; + break; + case 1: + buf[i] = (data[i6] >> 6) | (data[1 + i6] << 2); + i6++; + break; + case 2: + buf[i] = (data[i6] >> 4) | (data[1 + i6] << 4); + i6++; + break; + case 3: + buf[i] = data[i6++] >> 2; + break; + } + + buf[i] &= 0x3F; + buf[i] += ASCII628_BASE; + } + + *len = len_bit_8; + + return buf; +} + +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len) +{ + u8 hd[2] = {0}; + u8 *pt = NULL; + u8 ofst = 0; + u32 i = 0; + + if (!buffer) + return NULL; + + if (ne6x_dbg_fru_get_offset(buffer, PRODUCT_AREA, &ofst) != 0 || ofst == 0) { + *len = 0; + return NULL; + } + + ofst *= 8; + hd[0] = buffer[ofst]; + hd[1] = buffer[ofst + 1]; + if (!(hd[0] & 0x1) || hd[1] == 0) + return NULL; + + if (!ne6x_dbg_fru_checksum(&buffer[ofst], hd[1] * 8)) + return NULL; + + ofst += 3; + + for (i = 0; i < part; i++) + ofst += 1 + (buffer[ofst] & 0x3f); + + if (FRU_CHECK_6ASCII(buffer[ofst])) { + pt = ne6x_dbg_fru_6ascii28(&buffer[ofst], len); + } else { + *len = (buffer[ofst] & 0x3f); + pt = kzalloc(*len, GFP_ATOMIC); + if (!pt) + return NULL; + + memcpy(pt, &buffer[ofst + 1], *len); + } + + return pt; +} + +void ne6x_dbg_update_adpt_speed(struct ne6x_adapter *adpt, u32 speed, u32 lane_mode) {} + +/** + * ne6x_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + struct ne6x_ring *tx_ring; + int bytes_not_copied; + struct ne6x_adapter *adpt; + int i, cnt = 0; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "updtail", 7) == 0) { + int idx, vp, tail; + + cnt = sscanf(&cmd_buf[7], "%d %d %d", &idx, &vp, &tail); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "updtail \n"); + goto command_write_done; + } + adpt = pf->adpt[idx ? 1 : 0]; + tx_ring = adpt->tx_rings[vp & 0xf]; + ne6x_tail_update(tx_ring, tail); + dev_info(&pf->pdev->dev, "write: adpt = %d vp = 0x%x tail_ptr = %d\n", idx ? 1 : 0, + vp, tail); + } else if (strncmp(cmd_buf, "memrd", 5) == 0) { + u32 base_addr; + u32 offset_addr = 0; + u64 value; + int index, vp; + + cnt = sscanf(&cmd_buf[5], "%d", &vp); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "memrd \n"); + goto command_write_done; + } + + offset_addr = 0x0; + for (index = 0; index < 0x20; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + if (base_addr == 0x13F) { + offset_addr = 0x21; + for (index = 0x21; index < 0x24; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x39; + for (index = 0x39; index < 0x4E; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x80; + for (index = 0x80; index < 0x95; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0xA3; + for (index = 0xA3; index < 0xA5; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + } + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + ne6x_reg_pci_write(pf, base_addr, offset_addr, value); + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "wr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i %i", &offset, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rr \n"); + goto command_write_done; + } + ne6x_reg_indirect_write(pf, offset, value); + dev_info(&pf->pdev->dev, "wr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "rr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i", &offset); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_indirect_read(pf, offset, &value); + dev_info(&pf->pdev->dev, "rr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "txd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "txd \n"); + goto command_write_done; + } + + ne6x_dbg_show_txdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "rxd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rxd \n"); + goto command_write_done; + } + + ne6x_dbg_show_rxdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "cqd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "cqd \n"); + goto command_write_done; + } + + ne6x_dbg_show_cqdesc_states(adpt_num, quenue_num, pf); + } else { + for (i = 0; i < count; i++) { + if (cmd_buf[i] == ' ') { + cmd_buf[i] = '\0'; + cnt = i; + break; + } + if (cmd_buf[i] == '\0') { + cnt = i; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, cnt) == 0) { + deg_cmd_wr[i].command_proc(pf, &cmd_buf[cnt + 1], count - cnt - 1); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_command_read, + .write = ne6x_dbg_command_write, +}; + +const struct ne6x_dbg_cmd_wr deg_netdev_ops_cmd_wr[] = {}; + +/** + * ne6x_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static const struct file_operations ne6x_dbg_info_pnsn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_info_pnsn_read, +}; + +static const struct file_operations ne6x_dbg_info_tps_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_proc_tps_read, +}; + +static ssize_t ne6x_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return 0; +} + +/** + * ne6x_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + for (i = 0; i < ARRAY_SIZE(deg_netdev_ops_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_netdev_ops_cmd_wr[i].command, count) == 0) { + deg_netdev_ops_cmd_wr[i].command_proc(pf, + &cmd_buf[sizeof(deg_netdev_ops_cmd_wr[i].command) + 1], + count - 1 - sizeof(deg_netdev_ops_cmd_wr[i].command)); + goto command_write_done; + } + } + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_netdev_ops_read, + .write = ne6x_dbg_netdev_ops_write, +}; + +/** + * ne6x_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6x_dbg_pf = debugfs_create_dir(name, ne6x_dbg_root); + if (!pf->ne6x_dbg_pf) + return; + + pf->ne6x_dbg_info_pf = debugfs_create_dir("info", pf->ne6x_dbg_pf); + if (!pf->ne6x_dbg_info_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6x_dbg_pf, pf, &ne6x_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->ne6x_dbg_pf, pf, + &ne6x_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("product_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_pnsn_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("power_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_tps_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + debugfs_remove_recursive(pf->ne6x_dbg_pf); +} + +/** + * ne6x_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + pf->ne6x_dbg_info_pf = NULL; + + debugfs_remove_recursive(pf->ne6x_dbg_pf); + pf->ne6x_dbg_pf = NULL; +} + +/** + * ne6x_dbg_init - start up debugfs for the driver + **/ +void ne6x_dbg_init(void) +{ + ne6x_dbg_root = debugfs_create_dir(ne6x_driver_name, NULL); + if (!ne6x_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * ne6x_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6x_dbg_exit(void) +{ + debugfs_remove_recursive(ne6x_dbg_root); + ne6x_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..2094e52f4b6d19c03fcedef642e6a9111a88356f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEBUGFS_H +#define _NE6X_DEBUGFS_H + +struct ne6x_debug_table { + int table; + int index; + int size; + u32 data[128]; +}; + +#ifdef CONFIG_DEBUG_FS + +enum fru_product_part { + MANUFACTURER_NAME = 0, + PRODUCT_NAME, + PRODUCT_PART_NUMBER, /* pn */ + PRODUCT_VERSION, + PRODUCT_SERIAL_NUMBER, /* sn */ + PRODUCT_ASSET_TAG, + PRODUCT_FRU_FILE_ID, +}; + +enum fru_type { + INTER_USE_AREA = 0, + CHASSIS_AREA, + BOARD_AREA, + PRODUCT_AREA, + MUILT_AREA, +}; + +#define NE6X_DEBUG_CHAR_LEN 1024 + +#define INFO_ROW 20 +#define INFO_COL 50 + +extern char ne6x_driver_name[]; + +struct ne6x_dbg_cmd_wr { + char command[NE6X_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6x_pf *pf, char *cmd_buf, int count); +}; + +struct ne6x_debug_info { + u16 system_id; + char system_name[INFO_COL]; + char system_speed[INFO_COL]; +}; + +void ne6x_dbg_init(void); +void ne6x_dbg_exit(void); + +void ne6x_dbg_pf_init(struct ne6x_pf *pf); +void ne6x_dbg_pf_exit(struct ne6x_pf *pf); +#else /* !CONFIG_DEBUG_FS */ + +static inline void ne6x_dbg_init(void) +{ } +static inline void ne6x_dbg_exit(void) +{ } +static inline void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ } +static inline void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ } +#endif /* end CONFIG_DEBUG_FS */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..70381bd6ebc986b130b16bfcde8af8ae5fcb20c6 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "reg.h" + +#define NE6X_SDK_CRC32_DATA_LEN 256 + +#define NE6X_PPORT_BY_HWINFO(HWINFO, index) (((HWINFO) >> (8 * (index))) & 0xff) + +#define to_be32_vector(s, e, p) \ +({ \ + int __n; \ + u32 *__data = (u32 *)(p);\ + for (__n = (s); __n < (e); __n++) \ + __data[__n] = cpu_to_be32(__data[__n]); \ +}) + +void ext_toeplitz_key(const unsigned char *key, unsigned char *ext_key) +{ + int i; + + for (i = 0; i < 39; i++) { + ext_key[i] = key[i]; + ext_key[44 + i] = (key[i] << 1) | (key[i + 1] >> 7); + ext_key[44 * 2 + i] = (key[i] << 2) | (key[i + 1] >> 6); + ext_key[44 * 3 + i] = (key[i] << 3) | (key[i + 1] >> 5); + ext_key[44 * 4 + i] = (key[i] << 4) | (key[i + 1] >> 4); + ext_key[44 * 5 + i] = (key[i] << 5) | (key[i + 1] >> 3); + ext_key[44 * 6 + i] = (key[i] << 6) | (key[i + 1] >> 2); + ext_key[44 * 7 + i] = (key[i] << 7) | (key[i + 1] >> 1); + } + + ext_key[39] = key[39]; + ext_key[44 + 39] = (key[39] << 1) | (key[1] >> 7); + ext_key[44 * 2 + 39] = (key[39] << 2) | (key[1] >> 6); + ext_key[44 * 3 + 39] = (key[39] << 3) | (key[1] >> 5); + ext_key[44 * 4 + 39] = (key[39] << 4) | (key[1] >> 4); + ext_key[44 * 5 + 39] = (key[39] << 5) | (key[1] >> 3); + ext_key[44 * 6 + 39] = (key[39] << 6) | (key[1] >> 2); + ext_key[44 * 7 + 39] = (key[39] << 7) | (key[1] >> 1); + + for (i = 0; i < 4; i++) { + ext_key[40 + i] = ext_key[i]; + ext_key[44 + 40 + i] = ext_key[44 + i]; + ext_key[44 * 2 + 40 + i] = ext_key[44 * 2 + i]; + ext_key[44 * 3 + 40 + i] = ext_key[44 * 3 + i]; + ext_key[44 * 4 + 40 + i] = ext_key[44 * 4 + i]; + ext_key[44 * 5 + 40 + i] = ext_key[44 * 5 + i]; + ext_key[44 * 6 + 40 + i] = ext_key[44 * 6 + i]; + ext_key[44 * 7 + 40 + i] = ext_key[44 * 7 + i]; + } +} + +static u32 ne6x_dev_bitrev(u32 input, int bw) +{ + u32 var = 0; + int i; + + for (i = 0; i < bw; i++) { + if (input & 0x01) + var |= 1 << (bw - 1 - i); + + input >>= 1; + } + + return var; +} + +void ne6x_dev_crc32_init(u32 poly, u32 *table) +{ + u32 c; + int i, j; + + poly = ne6x_dev_bitrev(poly, 32); + + for (i = 0; i < NE6X_SDK_CRC32_DATA_LEN; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = poly ^ (c >> 1); + else + c = c >> 1; + } + table[i] = c; + } +} + +u32 ne6x_dev_crc32(const u8 *buf, u32 size) +{ + u32 ne6x_sdk_crc32tab[NE6X_SDK_CRC32_DATA_LEN]; + u32 i, crc; + + ne6x_dev_crc32_init(0x4C11DB7, ne6x_sdk_crc32tab); + crc = 0xFFFFFFFF; + + for (i = 0; i < size; i++) + crc = ne6x_sdk_crc32tab[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc ^ 0xFFFFFFFF; +} + +int ne6x_dev_spd_verify(struct ne6x_dev_eeprom_info *spd_info) +{ + if (be32_to_cpu(spd_info->spd_verify_value) == + ne6x_dev_crc32((const u8 *)spd_info, sizeof(*spd_info) - 4)) + return 0; + + return -EINVAL; +} + +int ne6x_dev_get_eeprom(struct ne6x_pf *pf) +{ + int retry = 3; + + while (retry-- > 0) { + ne6x_reg_e2prom_read(pf, 0x0, (u8 *)&pf->sdk_spd_info, sizeof(pf->sdk_spd_info)); + if (!ne6x_dev_spd_verify(&pf->sdk_spd_info)) + return 0; + } + + memset(&pf->sdk_spd_info, 0, sizeof(pf->sdk_spd_info)); + + return -EINVAL; +} + +static int ne6x_dev_get_dev_info(struct ne6x_pf *pf) +{ + int ret; + + ret = ne6x_dev_get_eeprom(pf); + if (!ret) { + pf->dev_type = be16_to_cpu(pf->sdk_spd_info.product_mode); + pf->hw_flag = be32_to_cpu(pf->sdk_spd_info.hw_flag); + if (!pf->hw_flag) + pf->hw_flag = 1; + } else { + dev_err(ne6x_pf_to_dev(pf), "get eeprom fail\n"); + } + + return ret; +} + +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + if (pf->hw_flag == 1 || pf->hw_flag == 2) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + dev_info(ne6x_pf_to_dev(pf), "hw not support white list func\n"); + return -EOPNOTSUPP; + } + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } + + return 0; +} + +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + if (data & NE6X_F_TRUST_VLAN_ENABLED) + return true; + return false; +} + +int ne6x_dev_get_pport(struct ne6x_adapter *adpt) +{ + u32 lport_to_phy; + + if (!adpt) + return 0; + + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + return adpt->idx; + default: + break; + } + + lport_to_phy = adpt->back->sdk_spd_info.logic_port_to_phyical; + + return NE6X_PPORT_BY_HWINFO(be32_to_cpu(lport_to_phy), adpt->idx); +} + +static void ne6x_dev_set_roce_icrc_offload(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +int ne6x_dev_init(struct ne6x_pf *pf) +{ + if (unlikely(ne6x_dev_get_dev_info(pf))) + return -EINVAL; + + ne6x_reg_get_ver(pf, &pf->verinfo); + ne6x_dev_clear_vport(pf); + ne6x_dev_set_fast_mode(pf, false, 0); + ne6x_dev_set_roce_icrc_offload(pf, true); + + return 0; +} + +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac) +{ + struct ne6x_dev_eeprom_info *info = &adpt->back->sdk_spd_info; + + memset(mac, 0, 6); + switch (adpt->idx) { + case 0: + ether_addr_copy(mac, &info->port_0_mac[0]); + break; + case 1: + ether_addr_copy(mac, &info->port_1_mac[0]); + break; + case 2: + ether_addr_copy(mac, &info->port_2_mac[0]); + break; + case 3: + ether_addr_copy(mac, &info->port_3_mac[0]); + break; + default: + return -1; + } + + return 0; +} + +int ne6x_dev_get_port_num(struct ne6x_pf *pf) +{ + return pf->sdk_spd_info.number_of_physical_controllers; +} + +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_TEMPERATURE, (u32 *)temp, sizeof(*temp)); +} + +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_POWER_CONSUM, (u32 *)power, sizeof(*power)); +} + +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_I2C3_TEST, (u32 *)id, sizeof(u32)); +} + +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_FRU, buffer, size); +} + +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_DDR_TEST, NULL, 0); +} + +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_read(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_write(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status) +{ + u32 link_speed = ne6x_reg_apb_read(adpt->back, 0x2087FB00 + 4 * ADPT_LPORT(adpt)); + + status->link = link_speed >> 16; + status->speed = link_speed & 0xffff; + + return 0; +} + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status) +{ + u32 sfp_state; + + sfp_state = ne6x_reg_apb_read(adpt->back, 0x2087FB40 + 4 * ADPT_LPORT(adpt)); + *status = sfp_state & 0x1; + + return 0; +} + +void ne6x_dev_update_status(struct ne6x_pf *pf, struct ne6x_port_info *port, bool is_up) +{ + u32 speed = NE6X_LINK_SPEED_25GB; + struct ne6x_phy_info *phy = &port->phy; + struct ne6x_link_status *link = &phy->link_info; + + if (!is_up) { + link->phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + link->link_speed = speed; + link->link_info &= ~NE6X_AQ_LINK_UP; + phy->media_type = NE6X_MEDIA_UNKNOWN; + return; + } + + link->link_info |= NE6X_AQ_LINK_UP; + switch (speed) { + case NE6X_LINK_SPEED_10GB: + link->phy_type_low = NE6X_PHY_TYPE_10GBASE; + link->link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + link->phy_type_low = NE6X_PHY_TYPE_25GBASE; + link->link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + link->phy_type_low = NE6X_PHY_TYPE_40GBASE; + link->link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + link->phy_type_low = NE6X_PHY_TYPE_100GBASE; + link->link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + link->phy_type_low = NE6X_PHY_TYPE_200GBASE; + link->link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_warn(ne6x_pf_to_dev(pf), "Unrecognized link_speed (0x%x).\n", speed); + break; + } + + phy->media_type = NE6X_MEDIA_FIBER; +} + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LINK_STATUS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)verify, sizeof(int)); +} + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt) +{ + return ne6x_reg_reset_firmware(adpt->back); +} + +int ne6x_dev_get_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(fctrl)); +} + +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(*fctrl)); +} + +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&adpt->stats, sizeof(adpt->stats)); +} + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu) +{ + u32 max_length = mtu + 18; + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); +} + +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu) +{ + u32 max_length; + int ret; + + ret = ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); + *mtu = max_length - 18; + + return ret; +} + +static int fastlog2(int x) +{ + int idx; + + for (idx = 31; idx >= 0; idx--) { + if (x & (1 << idx)) + break; + } + + return idx; +} + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *cfg) +{ + struct rss_table rss; + u32 *rss_data = (u32 *)&rss; + int ret, i; + + memset(&rss, 0x00, sizeof(rss)); + rss.flag = cpu_to_be32(0x01); /* valid bit */ + rss.hash_fun = (cfg->hash_func << 24) & 0xFF000000; + rss.hash_fun |= (cfg->hash_type & 0xFFFFFF); + rss.hash_fun = cpu_to_be32(rss.hash_fun); + rss.queue_base = cpu_to_be32(ADPT_VPORTCOS(adpt)); + rss.queue_def = cpu_to_be16(0x0); + rss.queue_size = cpu_to_be16(adpt->num_queue); + rss.entry_num = fastlog2(cfg->ind_table_size); + rss.entry_num = cpu_to_be16(rss.entry_num); + rss.entry_size = cpu_to_be16(0x0); + + for (i = 0; i < cfg->ind_table_size; i++) + rss.entry_data[i] = cfg->ind_table[i]; + + ext_toeplitz_key(&cfg->hash_key[0], &rss.hash_key[0]); + + for (i = 0; i < 128; i++) + rss_data[i] = cpu_to_be32(rss_data[i]); + + ret = ne6x_reg_table_write(adpt->back, NE6X_REG_RSS_TABLE, ADPT_VPORT(adpt), + (void *)&rss, sizeof(rss)); + return ret; +} + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags) +{ + int ret; + + clear_bit(NE6X_LINK_POOLING, adpt->back->state); + ret = ne6x_reg_upgrade_firmware(adpt->back, region, data, size); + set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return ret; +} + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_TYPE_LEN, NE6X_TALK_GET, + ADPT_LPORT(adpt), sfp_mode, sizeof(*sfp_mode)); +} + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags) +{ + return ne6x_reg_get_sfp_eeprom(adpt->back, ADPT_LPORT(adpt), data, offset, size); +} + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_SET, + ADPT_LPORT(adpt), NULL, 0); +} + +/* panel port mapped to logical port */ +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt) +{ + u32 val = (ADPT_LPORT(adpt) << 24) | (ADPT_VPORT(adpt) << 16) | + (adpt->port_info->hw_queue_base + 160); + + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT2PI_0 + ADPT_PPORT(adpt)), val); +} + +/* logical port mapped to panel port */ +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt) +{ + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI2PORT_0 + ADPT_LPORT(adpt)), + ADPT_PPORT(adpt)); +} + +/* clear vport map */ +void ne6x_dev_clear_vport(struct ne6x_pf *pf) +{ + int idx; + + for (idx = 0; idx < 32; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_2_COS_0 + idx), 0); + + for (idx = 0; idx < 64; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_OLFLAGS_0 + idx), 0); +} + +/* automatically generating vp_base_cos */ +int ne6x_dev_set_vport(struct ne6x_adapter *adpt) +{ + u16 port = adpt->vport >> 1; + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), &val); + + /* pf base cos */ + if (adpt->vport & 0x1) { + val &= 0xFFFF; + val |= ((adpt->port_info->hw_queue_base + 160) << 16); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } else { + val &= 0xFFFF0000; + val |= (adpt->port_info->hw_queue_base + 160); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } + + return 0; +} + +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + int ret; + + PBMP_CLEAR(new_pbmp); + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, + sizeof(pbmp_t)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + return ret; +} + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + + PBMP_CLEAR(new_pbmp); + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, sizeof(pbmp_t)); +} + +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp_t)); + memset(new_pbmp, 0, sizeof(pbmp_t)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_ADD(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp)); + memset(new_pbmp, 0, sizeof(pbmp)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_REMOVE(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +/* clear vlan table */ +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf) +{ + pbmp_t pbmp; + int index; + + PBMP_CLEAR(pbmp); + for (index = 0; index < 8192; index++) + ne6x_reg_table_write(pf, NE6X_REG_VLAN_TABLE, index, (void *)pbmp, sizeof(pbmp)); + + return 0; +} + +/* port add qinq */ +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + struct ne6x_vf_vlan vlan; + u32 val = 0; + + memset(&vlan, 0, sizeof(vlan)); + + vlan.tpid = proto; + vlan.vid = vid; + + memcpy(&val, &vlan, sizeof(u32)); + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), val); + + return 0; +} + +/* port del qinq */ +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), 0); + + return 0; +} + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_PROMISC; + else + val &= ~NE6X_F_PROMISC; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_RX_ALLMULTI; + else + val &= ~NE6X_F_RX_ALLMULTI; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +static void ne6x_dev_update_uc_leaf(struct l2fdb_dest_unicast *unicast, struct ne6x_adapter *adpt, + bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(unicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(unicast->vp_bmp[vport / 32], vport % 32); + + unicast->cnt = 0; +} + +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_fast_table db; + + memcpy(&db.mac[0], mac, 6); + db.start_cos = ADPT_VPORTCOS(adpt); + db.cos_num = adpt->num_queue; + + to_be32_vector(0, sizeof(db) / 4, &db); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + if (adpt->back->is_fastmode) + ne6x_dev_add_unicast_for_fastmode(adpt, mac); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 16, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + db.vlanid = 0; + + memset(&db.fw_info.unicast, 0, sizeof(db.fw_info.unicast)); + db.fw_info.unicast.flags = 0x1; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert unicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +static int ne6x_dev_del_unicast_for_fastmode(struct ne6x_adapter *adpt) +{ + struct l2fdb_fast_table db; + + memset(&db, 0, sizeof(db)); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret = 0; + + if (adpt->back->is_fastmode) + ne6x_dev_del_unicast_for_fastmode(adpt); + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return 0; +} + +static void ne6x_dev_update_mc_leaf(struct l2fdb_dest_multicast *multicast, + struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(multicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(multicast->vp_bmp[vport / 32], vport % 32); +} + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + + memset(&db.fw_info.multicast, 0, sizeof(db.fw_info.multicast)); + db.fw_info.multicast.flags = 0x3; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert multicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret; + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + /* hash_key */ + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + /* mac info */ + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return ret; +} + +inline void ne6x_dev_update_boradcast_leaf(u32 *leaf, struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(*leaf, vport % 32) : CLR_BIT(*leaf, vport % 32); +} + +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, true); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, false); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt) +{ + int val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + return val; +} + +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 val) +{ + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (enable) + val |= NE6X_F_RSS; + else + val &= ~NE6X_F_RSS; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&fec, sizeof(int)); +} + +int ne6x_dev_set_mac_inloop(struct ne6x_adapter *adpt, int enable) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LOOPBACK, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&enable, sizeof(int)); +} + +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fec, sizeof(int)); +} + +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&state, sizeof(u32)); +} + +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)state, sizeof(u32)); +} + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_stop(pf, flag); +} + +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_start(pf, flag); +} + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state) +{ + return ne6x_reg_set_led(adpt->back, ADPT_LPORT(adpt), state); +} + +void ne6x_dev_transform_vf_stat_format(u32 *stat_arr, struct vf_stat *stat) +{ + u32 start_pos = 0; + + stat->rx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_drop_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 16; + stat->tx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; +} + +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat) +{ + u32 stat_arr[64]; + int ret; + + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); + ne6x_dev_transform_vf_stat_format(stat_arr, stat); + + return ret; +} + +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt) +{ + u32 stat_arr[64] = {0}; + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); +} + +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed) +{ + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + case NE6000AI_2S_X16H_25G_N6: + if (speed == SPEED_25000 || speed == SPEED_10000) + return 0; + + return -EOPNOTSUPP; + case NE6000AI_2S_X16H_100G_N5: + if (speed == SPEED_40000 || speed == SPEED_100000) + return 0; + + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (state) + val |= NE6X_F_RX_FW_LLDP; + else + val &= ~NE6X_F_RX_FW_LLDP; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +#define NE6X_METER_STEP 152 +#define NE6X_DF_METER_CBS_PBS (100 * 152) +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate) +{ + u32 val = 0, ret = 0; + u32 cir = 0, cbs = 0; + struct meter_table vf_bw; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + memset(&vf_bw, 0, sizeof(struct meter_table)); + + if (tx_rate) + val |= NE6X_F_TX_QOSBANDWIDTH; + else + val &= ~NE6X_F_TX_QOSBANDWIDTH; + + if (tx_rate) { + cir = tx_rate; + cbs = 0xffffff; + vf_bw.pbs = cbs; + vf_bw.cir = cir; + vf_bw.cbs = cbs; + vf_bw.pir = cir; + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + } else { + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + } + + return ret; +} + +static int ne6x_dev_reg_pattern_test(struct ne6x_pf *pf, u32 reg, u32 val_arg) +{ + struct device *dev; + u32 val, orig_val; + + orig_val = ne6x_reg_apb_read(pf, reg); + dev = ne6x_pf_to_dev(pf); + + ne6x_reg_apb_write(pf, reg, val_arg); + val = ne6x_reg_apb_read(pf, reg); + if (val != val_arg) { + dev_err(dev, "%s: reg pattern test failed - reg 0x%08x val 0x%08x\n", + __func__, reg, val); + return -1; + } + + ne6x_reg_apb_write(pf, reg, orig_val); + val = ne6x_reg_apb_read(pf, reg); + if (val != orig_val) { + dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return -1; + } + + return 0; +} + +#define NE6X_TEST_INT_SET_VALUE 0x1000000000000000 /* bit 60 */ +int ne6x_dev_test_intr(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + union ne6x_vp_int vp_int; + int ret = -1; + + if (base < NE6X_PF_VP0_NUM) { + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT), vp_int.val); + } + } else { + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT), vp_int.val); + } + } + + return ret; +} + +int ne6x_dev_test_reg(struct ne6x_adapter *adpt) +{ + struct ne6x_diag_reg_info test_reg[4] = { + {0x20a00180, 0x5A5A5A5A}, + {0x20a00180, 0xA5A5A5A5}, + {0x20a00188, 0x00000000}, + {0x20a0018c, 0xFFFFFFFF} + }; + u32 value, reg; + int index; + + netdev_dbg(adpt->netdev, "Register test\n"); + for (index = 0; index < ARRAY_SIZE(test_reg); ++index) { + value = test_reg[index].value; + reg = test_reg[index].address; + + /* bail on failure (non-zero return) */ + if (ne6x_dev_reg_pattern_test(adpt->back, reg, value)) + return 1; + } + + return 0; +} + +#define NE6X_LOOP_TEST_TYPE 0x1234 +/* handle hook packet */ +int ne6x_dev_proto_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *ndev) +{ + struct ne6x_netdev_priv *np = netdev_priv(dev); + struct ne6x_adapter *adpt = np->adpt; + + netdev_info(dev, "recv loopback test packet success!\n"); + adpt->recv_done = true; + + kfree_skb(skb); + wake_up(&adpt->recv_notify); + + return 0; +} + +static u8 loop_dst_mac[8] = {0x00, 0x00, 0x00, 0x11, 0x11, 0x01}; +int ne6x_dev_proto_send(struct net_device *netdev, char *buf, int len) +{ + struct sk_buff *skb; + u8 *pdata = NULL; + u32 skb_len; + + skb_len = LL_RESERVED_SPACE(netdev) + len; + skb = dev_alloc_skb(skb_len); + if (!skb) + return -1; + + skb_reserve(skb, LL_RESERVED_SPACE(netdev)); + skb->dev = netdev; + skb->ip_summed = CHECKSUM_NONE; + skb->priority = 0; + pdata = skb_put(skb, len); + if (pdata) + memcpy(pdata, buf, len); + + /* send loop test packet */ + if (dev_queue_xmit(skb) < 0) { + dev_put(netdev); + kfree_skb(skb); + netdev_err(netdev, "send pkt fail.\n"); + return -1; + } + netdev_info(netdev, "send loopback test packet success!\n"); + + return 0; +} + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt) +{ + struct packet_type prot_hook; + struct ethhdr *ether_hdr; + u32 old_value; + int ret = 0; + + adpt->send_buffer = kzalloc(2048, GFP_KERNEL); + if (!adpt->send_buffer) + return -ENOMEM; + + /* config mac/pcs loopback */ + if (ne6x_dev_set_mac_inloop(adpt, true)) { + netdev_err(adpt->netdev, "loopback test set_mac_inloop fail !\n"); + return -1; + } + + old_value = ne6x_dev_get_features(adpt); + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + memset(&prot_hook, 0, sizeof(struct packet_type)); + prot_hook.type = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + prot_hook.dev = adpt->netdev; + prot_hook.func = ne6x_dev_proto_recv; + dev_add_pack(&prot_hook); + ether_hdr = (struct ethhdr *)adpt->send_buffer; + memcpy(ether_hdr->h_source, &adpt->port_info->mac.perm_addr[0], ETH_ALEN); + memcpy(ether_hdr->h_dest, loop_dst_mac, ETH_ALEN); + ether_hdr->h_proto = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + adpt->send_buffer[14] = 0x45; + ne6x_dev_proto_send(adpt->netdev, adpt->send_buffer, 1024); + + if (wait_event_interruptible_timeout(adpt->recv_notify, !!adpt->recv_done, + msecs_to_jiffies(2000)) <= 0) { + netdev_info(adpt->netdev, "loopback test fail !\n"); + ret = -1; + } + + adpt->recv_done = false; + kfree(adpt->send_buffer); + adpt->send_buffer = NULL; + /* restore prosimc */ + ne6x_dev_set_features(adpt, old_value); + dev_remove_pack(&prot_hook); + if (ne6x_dev_set_mac_inloop(adpt, false)) { + netdev_err(adpt->netdev, "loopback test cancel_mac_inloop fail\n"); + return -1; + } + + return ret; +} + +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data) +{ + u8 mac_info[8]; + + memcpy(mac_info, data, 6); + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_INFO, NE6X_TALK_SET, ADPT_LPORT(adpt), + (void *)data, sizeof(mac_info)); +} + +static u32 crc_table[CRC32_TABLE_SIZE]; /* 1KB */ +static void ne6x_dev_crc32_for_fw_init(void) +{ + u32 remainder; + u32 dividend; + s32 bit; + + for (dividend = 0U; dividend < CRC32_TABLE_SIZE; ++dividend) { + remainder = dividend; + for (bit = 8; bit > 0; --bit) { + if ((remainder & 1U) != 0) + remainder = (remainder >> 1) ^ CRC32_REVERSED_POLYNOMIAL; + else + remainder >>= 1; + } + + crc_table[dividend] = remainder; + } +} + +static u32 ne6x_dev_crc32_for_fw(const void *message, u32 bytes) +{ + const u8 *buffer = (const u8 *)message; + u32 remainder = CRC32_INITIAL_REMAINDER; + u8 idx; + + ne6x_dev_crc32_for_fw_init(); + + while (bytes-- > 0) { + idx = (u8)(*buffer++ ^ remainder); + remainder = crc_table[idx] ^ (remainder >> 8); + } + + return remainder ^ CRC32_FINALIZE_REMAINDER; +} + +static int ne6x_dev_get_fw_region(const u8 *data, u32 size, int *region) +{ + if (size < NE6X_FW_SIG_LENGTH) + return NE6X_FW_NOT_SUPPORT; + + if (!memcmp(data, NE6X_FW_810_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_APP; + return 0; + } else if (!memcmp(data, NE6X_FW_NP_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_NP; + return 0; + } else if (!memcmp(data, NE6X_FW_PXE_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_PXE; + return 0; + } else if (!memcmp(data, NE6X_FW_810_LDR_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_LOADER; + return 0; + } else if (!memcmp(data, NE6X_FW_FRU_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FRU; + return 0; + } else if (!memcmp(data, NE6X_FW_807_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_807_APP; + return 0; + } else { + return NE6X_FW_NOT_SUPPORT; + } +} + +static int ne6x_dev_check_fw(const u8 *data, const u32 size, const int region) +{ + struct ne6x_fw_common_header *comm_hdr; + struct ne6x_fw_np_header *np_hdr; + u32 hcrc, pcrc, crc; + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FLASH_807_APP: + comm_hdr = (struct ne6x_fw_common_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = comm_hdr->header_crc; + pcrc = comm_hdr->package_crc; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*comm_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (comm_hdr->length != size) + return NE6X_FW_LENGTH_ERR; + + comm_hdr->package_crc = CRC32_INITIAL_REMAINDER; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, comm_hdr->length); + comm_hdr->package_crc = pcrc; + comm_hdr->header_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + case NE6X_ETHTOOL_FLASH_NP: + np_hdr = (struct ne6x_fw_np_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = np_hdr->hdr_crc; + pcrc = np_hdr->pkg_crc; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*np_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (np_hdr->img_length != size) + return NE6X_FW_LENGTH_ERR; + + np_hdr->pkg_crc = CRC32_INITIAL_REMAINDER; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, np_hdr->img_length); + np_hdr->pkg_crc = pcrc; + np_hdr->hdr_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + } + + return 0; +} + +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region) +{ + if (ne6x_dev_get_fw_region(data, size, region)) + return NE6X_FW_NOT_SUPPORT; + + return ne6x_dev_check_fw(data, size, *region); +} + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state) +{ + u32 value = ne6x_dev_get_features(adpt); + + if (tx_state) + value &= ~NE6X_F_TX_DISABLE; + else + value |= NE6X_F_TX_DISABLE; + + if (rx_state) + value &= ~NE6X_F_RX_DISABLE; + else + value |= NE6X_F_RX_DISABLE; + + ne6x_dev_set_features(adpt, value); + + return 0; +} + +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 number_queue) +{ + u32 mode; + + if (is_fast_mode) { + mode = pf->num_alloc_vfs; + mode |= 1 << 16; + pf->is_fastmode = true; + } else { + mode = 0; + pf->is_fastmode = false; + } + + return ne6x_reg_set_user_data(pf, NP_USER_DATA_FAST_MODE, mode); +} + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + return ne6x_reg_get_dump_data_len(pf, size); +} + +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + return ne6x_reg_get_dump_data(pf, data, size); +} + +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + return ne6x_reg_set_norflash_write_protect(pf, write_protect); +} + +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + return ne6x_reg_get_norflash_write_protect(pf, p_write_protect); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..02d89659623690514371dd1c9d17f6d583d88d74 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEV_H +#define _NE6X_DEV_H + +#include "ne6x_portmap.h" + +#define NE6000AI_2S_X16H_100G_N5 0xA050 +#define NE6000AI_2S_X16H_25G_N5 0xA030 +#define NE6000AI_2S_X16H_25G_N6 0xA031 + +#define NE6000_IF_INTERFACE_UP 1 +#define NE6000_IF_INTERFACE_DOWN 0 + +struct ne6x_flowctrl { + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; + +struct ne6x_sfp_mod_type_len { + u32 type; + u32 len; +}; + +enum { + NE6X_SOC_TEMPERATURE = 0x0, + NE6X_SOC_POWER_CONSUM, + NE6X_SOC_DDR_TEST, + NE6X_SOC_FRU, + NE6X_SOC_SERDES_SEND_BIT, + NE6X_SOC_I2C3_TEST, +}; + +struct ne6x_soc_temperature { + u32 chip_temerature; + u32 board_temperature; +}; + +struct ne6x_soc_power { + u32 cur; + u32 vol; + u32 power; +}; + +#define NE6X_FW_SIG_OFFSET 0x0 + +#define NE6X_FW_SIG_LENGTH 8 + +#define NE6X_FW_810_LDR_SIG "NE6K810L" +#define NE6X_FW_810_APP_SIG "NE6K810A" +#define NE6X_FW_807_APP_SIG "NE6K807A" +#define NE6X_FW_803_APP_SIG "NE6K803A" +#define NE6X_FW_803_LDR_SIG "NE6K803L" +#define NE6X_FW_NP_APP_SIG "NE6KNPV1" +#define NE6X_FW_TBL_SIG "NE6KTBL*" +#define NE6X_FW_PXE_SIG "NE6KPXE*" +#define NE6X_FW_FRU_SIG "NE6KFRU*" + +struct ne6x_fw_common_header { + u8 signature[NE6X_FW_SIG_LENGTH]; + u32 version; + u32 length; + u32 sections; + u32 sect_start_addr; + u32 type; + u32 build_date; + u8 reserved[16]; + u8 fw_ver[8]; + u32 package_crc; + u32 header_crc; +}; /* 64B */ + +struct ne6x_fw_np_iwidth { + char sig[4]; + u16 width; + u16 ocp; +}; /* 8B */ + +struct ne6x_fw_np_isad { + char sig[4]; + u32 isa_id; + + struct ne6x_fw_np_iwidth fp; + struct ne6x_fw_np_iwidth dp; + struct ne6x_fw_np_iwidth rp; +}; /* 32B */ + +struct ne6x_fw_np_atd { + char sig[4]; + u32 at_id; + + struct ne6x_fw_np_iwidth te; +}; /* 16B */ + +struct ne6x_fw_np_header { + char signature[NE6X_FW_SIG_LENGTH]; + u32 hdr_version; + u32 hdr_length; + + u32 rsvd; + u32 build_date; + u32 img_version; + u32 img_length; + + u32 npc_cnt; + u32 npc_offset; + u32 isa_cnt; + u32 isa_offset; + + u32 at_cnt; + u32 at_offset; + u32 atd_cnt; + u32 atd_offset; + + struct ne6x_fw_np_isad ISA[1]; + + struct ne6x_fw_np_atd ATD[1]; + + u32 cipher; /* For future use */ + u32 comp; /* For future use */ + u32 pkg_crc; + u32 hdr_crc; +}; /* 128 B */ + +#define CRC32_REVERSED_POLYNOMIAL 0xEDB88320U +#define CRC32_INITIAL_REMAINDER 0xFFFFFFFFU +#define CRC32_FINALIZE_REMAINDER 0xFFFFFFFFU +#define CRC32_TABLE_SIZE 256U + +enum { + NE6X_FW_NOT_SUPPORT = -1, + NE6X_FW_HEADER_CRC_ERR = -2, + NE6X_FW_LENGTH_ERR = -3, + NE6X_FW_PKG_CRC_ERR = -4, +}; + +struct ne6x_key_filter { + struct list_head list; + struct ne6x_key key; + struct { + u8 is_new_key : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +struct ne6x_vlan_filter { + struct list_head list; + struct ne6x_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +enum { + NE6X_METER_SUBSET0 = 0x0, + NE6X_METER_SUBSET1, + NE6X_METER_SUBSET2, + NE6X_METER_SUBSET3, + NE6X_METER_SUBSET4, + NE6X_METER_SUBSET5, + NE6X_METER_SUBSET6, + NE6X_METER_SUBSET7, + NE6X_METER_SUBSET8, + NE6X_METER_SUBSET9, + NE6X_METER_SUBSET10, + NE6X_METER_SUBSET11, + NE6X_METER_SUBSET12, + NE6X_METER_SUBSET13, + NE6X_METER_SUBSET14, + NE6X_METER_SUBSET15, +}; + +#define NE6X_METER0_TABLE 0x00000000U +#define NE6X_METER1_TABLE 0x80000000U +#define NE6X_METER_SUBSET(n) (((n) & 0xf) << 27) + +struct vf_stat { + u64 rx_drop_pkts; + u64 rx_broadcast_pkts; + u64 rx_multicast_pkts; + u64 rx_unicast_pkts; + u64 tx_broadcast_pkts; + u64 tx_multicast_pkts; + u64 tx_unicast_pkts; + u64 rx_malform_pkts; + u64 tx_malform_pkts; +}; + +enum ne6x_fec_state { + NE6X_FEC_NONE, + NE6X_FEC_RS, + NE6X_FEC_BASER, + NE6X_FEC_AUTO, +}; + +int ne6x_dev_init(struct ne6x_pf *pf); +int ne6x_dev_get_port_num(struct ne6x_pf *pf); +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt); +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status); +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed); + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt); + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify); + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt); +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 value); + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu); +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu); + +void ne6x_dev_clear_vport(struct ne6x_pf *pf); +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt); +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt); +int ne6x_dev_set_vport(struct ne6x_adapter *adpt); + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf); + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *info); + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt); + +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec); +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec); + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_read_qsfp(struct ne6x_adapter *adpt, u8 regaddr, u8 *data, int len); + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags); + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode); + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags); + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp); +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power); +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size); +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf); +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id); + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state); +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state); + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status); + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state); +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat); +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt); +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed); + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 index, + u32 *data, int size); + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state); + +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate); + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt); +int ne6x_dev_test_reg(struct ne6x_adapter *adpt); +int ne6x_dev_test_intr(struct ne6x_adapter *adpt); +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data); +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region); + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state); +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 num_queue); +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable); +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable); +int ne6x_dev_get_pport(struct ne6x_adapter *adpt); +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +u32 ne6x_dev_crc32(const u8 *buf, u32 size); +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable); +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..063b734f238f08755872400e8ed81f7d9ce6de6f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c @@ -0,0 +1,1623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include +#include "version.h" + +static const char ne6x_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test ", "Loopback test ", "Register test ", "Interrupt test" +}; + +#define NE6X_TEST_LEN (sizeof(ne6x_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6x_q_stats_len(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6x_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6X_NETDEV_STAT(_net_stat) NE6X_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_stats[] = { + NE6X_NETDEV_STAT(rx_packets), + NE6X_NETDEV_STAT(tx_packets), + NE6X_NETDEV_STAT(rx_bytes), + NE6X_NETDEV_STAT(tx_bytes), + NE6X_NETDEV_STAT(rx_errors), + NE6X_NETDEV_STAT(tx_errors), + NE6X_NETDEV_STAT(rx_dropped), + NE6X_NETDEV_STAT(tx_dropped), + NE6X_NETDEV_STAT(collisions), + NE6X_NETDEV_STAT(rx_length_errors), + NE6X_NETDEV_STAT(rx_crc_errors), +}; + +#define NE6X_DEVICE_ETH_STAT(_dev_eth_stat) NE6X_STAT(struct ne6x_eth_stats, \ + #_dev_eth_stat, _dev_eth_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_dev_eth_stats[] = { + NE6X_DEVICE_ETH_STAT(rx_unicast), + NE6X_DEVICE_ETH_STAT(rx_multicast), + NE6X_DEVICE_ETH_STAT(rx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_discards), + NE6X_DEVICE_ETH_STAT(rx_miss), + NE6X_DEVICE_ETH_STAT(tx_unicast), + NE6X_DEVICE_ETH_STAT(tx_multicast), + NE6X_DEVICE_ETH_STAT(tx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_malform), + NE6X_DEVICE_ETH_STAT(tx_malform), +}; + +#define NE6X_PF_STAT(_name, _stat) NE6X_STAT(struct ne6x_pf, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_pf_stats[] = { + NE6X_PF_STAT("tx_timeout", tx_timeout_count), +}; + +/* per-queue ring statistics */ +#define NE6X_QUEUE_STAT(_name, _stat) NE6X_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_tx_queue_stats[] = { + NE6X_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6X_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6X_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6X_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6X_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6X_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6X_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6X_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6x_stats ne6x_gstrings_rx_queue_stats[] = { + NE6X_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6X_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6X_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6X_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6X_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6X_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6x_stats ne6x_gstrings_cq_queue_stats[] = { + NE6X_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6X_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6X_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6X_PORT_MAC_STAT(_name, _stat) NE6X_STAT(struct ne6x_adapter, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_port_mac_stats[] = { + NE6X_PORT_MAC_STAT("port.rx_eth_byte", stats.mac_rx_eth_byte), + NE6X_PORT_MAC_STAT("port.rx_eth", stats.mac_rx_eth), + NE6X_PORT_MAC_STAT("port.rx_eth_undersize", stats.mac_rx_eth_undersize), + NE6X_PORT_MAC_STAT("port.rx_eth_crc_err", stats.mac_rx_eth_crc), + NE6X_PORT_MAC_STAT("port.rx_eth_64b", stats.mac_rx_eth_64b), + NE6X_PORT_MAC_STAT("port.rx_eth_65_127b", stats.mac_rx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.rx_eth_128_255b", stats.mac_rx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.rx_eth_256_511b", stats.mac_rx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.rx_eth_512_1023b", stats.mac_rx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.rx_eth_1024_15360b", stats.mac_rx_eth_1024_15360b), + NE6X_PORT_MAC_STAT("port.tx_eth_byte", stats.mac_tx_eth_byte), + NE6X_PORT_MAC_STAT("port.tx_eth", stats.mac_tx_eth), + NE6X_PORT_MAC_STAT("port.tx_eth_undersize", stats.mac_tx_eth_undersize), + NE6X_PORT_MAC_STAT("port.tx_eth_64b", stats.mac_tx_eth_64b), + NE6X_PORT_MAC_STAT("port.tx_eth_65_127b", stats.mac_tx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.tx_eth_128_255b", stats.mac_tx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.tx_eth_256_511b", stats.mac_tx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.tx_eth_512_1023b", stats.mac_tx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.tx_eth_1024_15360b", stats.mac_tx_eth_1024_15360b), +}; + +#define NE6X_ADPT_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_stats) +#define NE6X_ADPT_DEV_ETH_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_dev_eth_stats) + +#define NE6X_PF_STATS_LEN ARRAY_SIZE(ne6x_gstrings_pf_stats) +#define NE6X_PORT_MAC_STATS_LEN ARRAY_SIZE(ne6x_gstrings_port_mac_stats) + +#define NE6X_ALL_STATS_LEN(n) \ + (NE6X_ADPT_STATS_LEN + NE6X_ADPT_DEV_ETH_STATS_LEN + \ + NE6X_PF_STATS_LEN + NE6X_PORT_MAC_STATS_LEN + ne6x_q_stats_len(n)) + +struct ne6x_priv_flag { + char name[ETH_GSTRING_LEN]; + u32 bitno; /* bit position in pf->flags */ +}; + +#define NE6X_PRIV_FLAG(_name, _bitno) { \ + .name = _name, \ + .bitno = _bitno, \ +} + +static const struct ne6x_priv_flag ne6x_gstrings_priv_flags[] = { + NE6X_PRIV_FLAG("disable-fw-lldp", NE6X_ADPT_F_DISABLE_FW_LLDP), + NE6X_PRIV_FLAG("link-down-on-close", NE6X_ADPT_F_LINKDOWN_ON_CLOSE), + NE6X_PRIV_FLAG("write-protect", NE6X_ADPT_F_NORFLASH_WRITE_PROTECT), + NE6X_PRIV_FLAG("ddos-switch", NE6X_ADPT_F_DDOS_SWITCH), + NE6X_PRIV_FLAG("white-list", NE6X_ADPT_F_ACL), + NE6X_PRIV_FLAG("trust-vlan", NE6X_ADPT_F_TRUST_VLAN), +}; + +#define NE6X_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ne6x_gstrings_priv_flags) + +static void ne6x_get_settings_link_up_fec(struct net_device *netdev, + u32 link_speed, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + enum ne6x_fec_state fec = NE6X_FEC_NONE; + + switch (link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + else if (fec == NE6X_FEC_BASER) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + + break; + default: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + break; + } +} + +static void ne6x_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) +{ + struct ne6x_link_status *link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + link_info = &adpt->port_info->phy.link_info; + switch (link_info->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 100000baseCR4_Full); + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + + ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & NE6X_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Autoneg); + + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + + ne6x_get_settings_link_up_fec(netdev, link_info->link_speed, ks); +} + +static void ne6x_phy_type_to_ethtool(struct ne6x_adapter *adpt, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); +} + +static void ne6x_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ne6x_phy_type_to_ethtool(adpt, ks); + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +static int ne6x_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_link_status *hw_link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + hw_link_info = &adpt->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) + ne6x_get_settings_link_up(ks, netdev); + else + ne6x_get_settings_link_down(ks, netdev); + + if (!ne6x_dev_check_speed(adpt, SPEED_10000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_25000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_100000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_40000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + return 0; +} + +static int ne6x_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + bool if_running = netif_running(netdev); + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + u32 master = (adpt->idx == 0); + char *speed = "Unknown "; + u32 link_speed; + u32 sfp_speed; + int ret; + + if (ne6x_dev_check_speed(adpt, ks->base.speed)) { + dev_info(&pf->pdev->dev, "speed not support\n"); + return -EOPNOTSUPP; + } + + if (!master && pf->dev_type == NE6000AI_2S_X16H_25G_N5) { + dev_info(&pf->pdev->dev, "only master port can change speed\n"); + return -EOPNOTSUPP; + } + + switch (ks->base.speed) { + case SPEED_100000: + link_speed = NE6X_LINK_SPEED_100GB; + break; + case SPEED_40000: + link_speed = NE6X_LINK_SPEED_40GB; + break; + case SPEED_25000: + link_speed = NE6X_LINK_SPEED_25GB; + break; + case SPEED_10000: + link_speed = NE6X_LINK_SPEED_10GB; + break; + default: + return -EOPNOTSUPP; + } + + ret = ne6x_dev_get_sfp_speed(adpt, &sfp_speed); + if (!ret) { + switch (sfp_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (sfp_speed != link_speed) + netdev_info(adpt->netdev, "speed not match, sfp support%sbps Full Duplex\n", + speed); + } + + if (if_running) + ne6x_close(adpt->netdev); + + ret = ne6x_dev_set_speed(adpt, link_speed); + if (if_running) + ne6x_open(adpt->netdev); + + return ret; +} + +static void __ne6x_add_stat_strings(u8 **p, const struct ne6x_stats stats[], + const unsigned int size, + ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6x_add_stat_strings(p, stats, ...) \ + __ne6x_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6x_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + unsigned int i; + + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_dev_eth_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_pf_stats); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_add_stat_strings(&data, ne6x_gstrings_tx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_rx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_cq_queue_stats, i); + } + + ne6x_add_stat_strings(&data, ne6x_gstrings_port_mac_stats); +} + +static void ne6x_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + u8 *p = data; + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", ne6x_gstrings_priv_flags[i].name); + p += ETH_GSTRING_LEN; + } +} + +static void ne6x_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6x_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6x_gstrings_test, NE6X_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_PRIV_FLAGS: + ne6x_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +static int ne6x_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NE6X_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6X_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return NE6X_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +static void ne6x_get_mac_stats(struct ne6x_adapter *adpt) +{ + ne6x_dev_get_mac_stats(adpt); +} + +static void ne6x_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + char *p; + + ne6x_update_pf_stats(adpt); + + for (j = 0; j < NE6X_ADPT_STATS_LEN; j++) { + p = (char *)ne6x_get_adpt_stats_struct(adpt) + + ne6x_gstrings_adpt_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_ADPT_DEV_ETH_STATS_LEN; j++) { + p = (char *)(&adpt->eth_stats) + + ne6x_gstrings_adpt_dev_eth_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_dev_eth_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_PF_STATS_LEN; j++) { + p = (char *)pf + ne6x_gstrings_pf_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_pf_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = READ_ONCE(adpt->tx_rings[j]); + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = READ_ONCE(adpt->rx_rings[j]); + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = READ_ONCE(adpt->cq_rings[j]); + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + + rcu_read_unlock(); + + ne6x_get_mac_stats(adpt); + + for (j = 0; j < NE6X_PORT_MAC_STATS_LEN; j++) { + p = (char *)adpt + ne6x_gstrings_port_mac_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_port_mac_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +extern char ne6x_driver_name[]; + +static void ne6x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + u32 soc_ver = 0, np_ver = 0, erom_ver = 0; + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + char nvm_version_str[32]; + char driver_name[32]; + char temp_str[16] = {0}; + + snprintf(driver_name, 32, "%s", ne6x_driver_name); + strscpy(drvinfo->driver, driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + memset(nvm_version_str, 0, sizeof(nvm_version_str)); + soc_ver = pf->verinfo.firmware_soc_ver; + np_ver = pf->verinfo.firmware_np_ver & 0xFFFF; + erom_ver = pf->verinfo.firmware_pxe_ver & 0xFFFF; + snprintf(nvm_version_str, 20, "%d.%d.%d.%d ", (soc_ver & 0xff000000) >> 24, + ((erom_ver & 0xFFFF) / 100), ((soc_ver & 0xFFFF) / 100), + ((np_ver & 0xFFFF) / 100)); + if (erom_ver % 100) { + snprintf(temp_str, 4, "P%d", (erom_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if ((soc_ver & 0xffff) % 100) { + snprintf(temp_str, 4, "A%d", ((soc_ver & 0xffff) % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if (np_ver % 100) { + snprintf(temp_str, 4, "N%d", (np_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + strlcpy(drvinfo->fw_version, nvm_version_str, sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_hw *hw = &pf->hw; + unsigned int i, j, ri; + u32 *reg_buf = p; + u32 reg; + + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; ne6x_reg_list[i].offset != 0; i++) { + for (j = 0; j < ne6x_reg_list[i].elements; j++) { + reg = ne6x_reg_list[i].offset + (j * ne6x_reg_list[i].stride); + reg_buf[ri++] = rd64(hw, reg); + } + } +} + +static void ne6x_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6X_TEST_LEN); +} + +static int ne6x_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; ne6x_reg_list[i].offset != 0; i++) + reg_count += ne6x_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void ne6x_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adpt->num_rx_desc; + ring->tx_pending = adpt->num_tx_desc; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6x_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + u32 new_rx_count, new_tx_count, new_cq_count, new_tg_count; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + int i; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_tx_count + new_rx_count; + new_tg_count = new_tx_count; + + if (new_tx_count == adpt->num_tx_desc && new_rx_count == adpt->num_rx_desc) + return 0; + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + if (!netif_running(adpt->netdev)) { + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adpt->tx_rings[0]->count, adpt->rx_rings[0]->count, new_tx_count, new_rx_count); + + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < adpt->num_queue; i++) { + adpt->tx_rings[i]->count = new_tx_count; + adpt->rx_rings[i]->count = new_rx_count; + adpt->cq_rings[i]->count = new_cq_count; + adpt->tg_rings[i]->count = new_tg_count; + } + + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static void ne6x_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + ret = ne6x_dev_get_flowctrl(adpt, &flowctrl); + if (ret) + return; + + pause->autoneg = 0; + pause->rx_pause = flowctrl.rx_pause; + pause->tx_pause = flowctrl.tx_pause; +} + +static int ne6x_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + if (pause->autoneg) + return -EOPNOTSUPP; + + flowctrl.autoneg = pause->autoneg; + flowctrl.rx_pause = pause->rx_pause; + flowctrl.tx_pause = pause->tx_pause; + + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) + return ret; + + return 0; +} + +static int ne6x_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6x_get_eeprom_len(struct net_device *netdev) { return 256; } + +static int ne6x_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + u8 *eeprom_buff; + int err = 0; + int ret_val; + u32 magic; + + if (eeprom->len == 0) + return -EINVAL; + + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + err = -EINVAL; + else if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + err = -EBUSY; + + return err; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = ne6x_dev_read_eeprom(adpt, 0x0, (u8 *)eeprom_buff, eeprom->len); + memcpy(bytes, eeprom_buff, eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static u64 ne6x_get_rss_hash_opts(struct ne6x_adapter *adpt, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6x_set_rss_hash_opts(struct ne6x_adapter *adpt, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adpt->rss_info.hash_type; + int status; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adpt->rss_info.hash_type) + return 0; + + adpt->rss_info.hash_type = rss_flags; + + status = ne6x_dev_set_rss(adpt, &adpt->rss_info); + + return (status != 0) ? (-EIO) : 0; +} + +static int ne6x_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: + info->data = ne6x_get_rss_hash_opts(adpt, info->flow_type); + break; + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ne6x_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int status = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + case ETHTOOL_SRXFH: + status = ne6x_set_rss_hash_opts(adpt, info); + break; + default: + return -EINVAL; + } + + return status; +} + +static u32 ne6x_get_rxfh_key_size(struct net_device *netdev) +{ + return NE6X_RSS_MAX_KEY_SIZE; +} + +static u32 ne6x_get_rss_table_size(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + + return rss_info->ind_table_size; +} + +static int ne6x_get_rxfh(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int n = rss_info->ind_table_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (p) { + while (n--) + p[n] = rss_info->ind_table[n]; + } + + if (key) + memcpy(key, rss_info->hash_key, ne6x_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ne6x_set_rxfh(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int i; + int status; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + /* Fill out the redirection table */ + if (p) { + /* Allow at least 2 queues w/ SR-IOV. */ + for (i = 0; i < rss_info->ind_table_size; i++) + rss_info->ind_table[i] = p[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(&rss_info->hash_key[0], key, ne6x_get_rxfh_key_size(netdev)); + + status = ne6x_dev_set_rss(adpt, rss_info); + + return (status == 0) ? 0 : (-EIO); +} + +static void ne6x_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adpt->port_info->hw_max_queue; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adpt->num_queue; +} + +static int ne6x_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + int qp_remaining, q_vectors, i; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > pf->hw.expect_vp) + return -EINVAL; + + if (channels->combined_count == adpt->num_queue) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + netdev_info(netdev, "ne6x config busy, timeout!!!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* set for the next time the netdev is started */ + if (!netif_running(adpt->netdev)) { + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static int ne6x_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ne6x_dev_set_led(adpt, true); + return 1; + case ETHTOOL_ID_ON: + return 0; + case ETHTOOL_ID_OFF: + return 0; + case ETHTOOL_ID_INACTIVE: + ne6x_dev_set_led(adpt, false); + } + + return 0; +} + +static int ne6x_nway_reset(struct net_device *netdev) { return 0; } + +static u64 ne6x_link_test(struct net_device *netdev, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool link_up = false; + int verify; + + verify = 0; + link_up = adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP; + usleep_range(10, 20); + + link_up &= verify; + if (link_up) + *data = 1; + else + *data = 0; + + return *data; +} + +static void ne6x_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + /* Online tests */ + if (ne6x_link_test(netdev, &data[NE6X_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[NE6X_ETH_TEST_LOOPBACK] = 0; + if (ne6x_dev_test_loopback(adpt)) { + data[NE6X_ETH_TEST_LOOPBACK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_REG] = 0; + if (ne6x_dev_test_reg(adpt)) { + data[NE6X_ETH_TEST_REG] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_INT] = 0; + if (ne6x_dev_test_intr(adpt)) { + data[NE6X_ETH_TEST_INT] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ne6x_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ne6x_link_status *hw_link_info; + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err = 0; + + hw_link_info = &adpt->port_info->phy.link_info; + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) { + switch (hw_link_info->link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + err = ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec = ETHTOOL_FEC_RS; + } else if (fec == NE6X_FEC_BASER) { + fecparam->fec |= ETHTOOL_FEC_BASER; + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + break; + default: + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + + return err; +} + +static int ne6x_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int err = 0; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: AUTO"); + err = -EINVAL; + goto done; + case ETHTOOL_FEC_RS: + fec = NE6X_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = NE6X_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec = NE6X_FEC_NONE; + break; + default: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", fecparam->fec); + err = -EINVAL; + goto done; + } + + err = ne6x_dev_set_fec(adpt, fec); + if (err) + return err; + +done: + return err; +} + +static const char * const flash_region_strings[] = { + "810 loader", + "810 app", + "807 app", + "NP Image", + "PXE Image", +}; + +static int ethtool_flash_firmware(struct net_device *netdev, u32 type, const u8 *data, + u32 size) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int ret; + + ret = ne6x_dev_upgrade_firmware(adpt, type, (u8 *)data, size, 1); + if (ret) + dev_err(&pf->pdev->dev, "Failed to flash firmware\n"); + + return ret; +} + +static int ethtool_flash_region(struct net_device *netdev, const u8 *data, u32 size, u32 region) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret; + + netdev_info(netdev, "%s = 0x%x\n", __func__, region); + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_NP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FRU: + case NE6X_ETHTOOL_FLASH_807_APP: + ret = ethtool_flash_firmware(netdev, region, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (ret) + dev_info(&pf->pdev->dev, "loading %s fail, reload driver\n", + flash_region_strings[region]); + + return ret; +} + +static int ne6x_ethtool_get_flash_region(struct net_device *netdev, const u8 *data, u32 *size) +{ + int region = -1; + int ret; + + ret = ne6x_dev_validate_fw(data, *size, ®ion); + if (ret) { + netdev_err(netdev, "firmware error ret = %d\n", ret); + return -1; + } + + return region; +} + +static int ne6x_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + const struct firmware *fw; + unsigned int master; + size_t fw_size; + u8 *fw_data; + int region; + int ret; + + master = (adpt->idx == 0); + if (!master) { + dev_info(&pf->pdev->dev, "only master port can upgrade\n"); + return -1; + } + + ret = request_firmware(&fw, ef->data, &pf->pdev->dev); + if (ret < 0) + return ret; + + fw_data = (u8 *)fw->data; + fw_size = fw->size; + if (fw_size > 0) { + region = ne6x_ethtool_get_flash_region(netdev, fw_data, (u32 *)&fw_size); + if (region < 0) { + ret = region; + goto out_free_fw; + } + + ret = ethtool_flash_region(netdev, fw_data, fw_size, region); + if (ret) + goto out_free_fw; + } + +out_free_fw: + release_firmware(fw); + return ret; +} + +#define NE6X_FIRMWARE_RESET_CHIP \ + ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) << ETH_RESET_SHARED_SHIFT) + +static int ne6x_reset(struct net_device *netdev, u32 *flags) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; + + if (adpt->idx != 0x0) { + netdev_err(netdev, "Reset is not supported from a eth0_nfp1\n"); + return -EOPNOTSUPP; + } + + if ((req & NE6X_FIRMWARE_RESET_CHIP) == NE6X_FIRMWARE_RESET_CHIP) { + /* This feature is not supported in older firmware versions */ + if (!ne6x_dev_reset_firmware(adpt)) { + netdev_info(netdev, "Firmware reset request successful.\n"); + reload = true; + *flags &= ~NE6X_FIRMWARE_RESET_CHIP; + } + } + + if (reload) + netdev_info(netdev, "Reload driver to complete reset\n"); + + return 0; +} + +static int ne6x_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_sfp_mod_type_len sfp_mod; + int err; + + err = ne6x_dev_get_sfp_type_len(adpt, &sfp_mod); + if (err) + return err; + + modinfo->type = sfp_mod.type; + modinfo->eeprom_len = sfp_mod.len; + netdev_info(netdev, "type %d erprom_len %d.\n", sfp_mod.type, sfp_mod.len); + + return 0; +} + +#define STD_SFP_INFO_MAX_SIZE 640 + +static int ne6x_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + err = ne6x_dev_get_sfp_eeprom(adpt, sfp_data, ee->offset, ee->len, 0); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +static u32 ne6x_get_priv_flags(struct net_device *netdev) +{ + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 is_write_proterct = false; + u32 i, ret_flags = 0; + u32 value = 0; + + ne6x_dev_get_norflash_write_protect(adpt->back, &is_write_proterct); + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + if (ne6x_dev_get_trust_vlan(adpt->back)) + set_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + else + clear_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + if (test_bit(priv_flag->bitno, adpt->flags)) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +static int ne6x_set_priv_flags(struct net_device *netdev, u32 flags) +{ + DECLARE_BITMAP(change_flags, NE6X_ADPT_F_NBITS); + DECLARE_BITMAP(orig_flags, NE6X_ADPT_F_NBITS); + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int ret = 0; + u32 i; + + if (flags > BIT(NE6X_PRIV_FLAG_ARRAY_SIZE)) + return -EINVAL; + + bitmap_copy(orig_flags, adpt->flags, NE6X_ADPT_F_NBITS); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + set_bit(priv_flag->bitno, adpt->flags); + else + clear_bit(priv_flag->bitno, adpt->flags); + } + + bitmap_xor(change_flags, adpt->flags, orig_flags, NE6X_ADPT_F_NBITS); + + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, change_flags)) { + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags)) + ne6x_dev_set_fw_lldp(adpt, false); + else + ne6x_dev_set_fw_lldp(adpt, true); + } + + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, change_flags)) { + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags)) + ne6x_dev_set_norflash_write_protect(adpt->back, true); + else + ne6x_dev_set_norflash_write_protect(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, change_flags)) { + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags)) + ne6x_dev_set_ddos(adpt->back, true); + else + ne6x_dev_set_ddos(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_ACL, change_flags)) { + if (adpt->idx != 0) { + netdev_err(netdev, "only adpt 0 support acl flag\n"); + return -EINVAL; + } + if (test_bit(NE6X_ADPT_F_ACL, adpt->flags)) { + if (ne6x_dev_set_white_list(adpt->back, true)) + return -EPERM; + } else { + ne6x_dev_set_white_list(adpt->back, false); + } + } + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, change_flags)) { + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags)) + ne6x_dev_set_trust_vlan(adpt->back, true); + else + ne6x_dev_set_trust_vlan(adpt->back, false); + } + return ret; +} + +static int ne6x_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + + dump->version = 1; + dump->flag = 0; + + /* Calculate the requested preset idx length */ + if (ne6x_dev_get_dump_data_len(pf, &dump->len)) { + dump->len = 0; + return -EAGAIN; + } + + return 0; +} + +static int ne6x_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buffer) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + u32 *p = buffer; + + if (ne6x_dev_get_dump_data(pf, p, dump->len)) + return -EAGAIN; + + return 0; +} + +static const struct ethtool_ops ne6x_ethtool_ops = { + .get_link_ksettings = ne6x_get_link_ksettings, + .set_link_ksettings = ne6x_set_link_ksettings, + .get_strings = ne6x_get_strings, + .get_sset_count = ne6x_get_sset_count, + .get_ethtool_stats = ne6x_get_ethtool_stats, + .get_drvinfo = ne6x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6x_get_regs, + .get_regs_len = ne6x_get_regs_len, + .get_dump_flag = ne6x_get_dump_flag, + .get_dump_data = ne6x_get_dump_data, + .self_test = ne6x_self_test, + .get_ringparam = ne6x_get_ringparam, + .set_ringparam = ne6x_set_ringparam, + .get_pauseparam = ne6x_get_pauseparam, + .set_pauseparam = ne6x_set_pauseparam, + .get_coalesce = ne6x_get_coalesce, + .get_eeprom_len = ne6x_get_eeprom_len, + .get_eeprom = ne6x_get_eeprom, + .get_rxnfc = ne6x_get_rxnfc, + .set_rxnfc = ne6x_set_rxnfc, + .get_rxfh_key_size = ne6x_get_rxfh_key_size, + .get_rxfh_indir_size = ne6x_get_rss_table_size, + .get_rxfh = ne6x_get_rxfh, + .set_rxfh = ne6x_set_rxfh, + .get_channels = ne6x_get_channels, + .set_channels = ne6x_set_channels, + .flash_device = ne6x_set_flash, + .reset = ne6x_reset, + .get_module_info = ne6x_get_module_info, + .get_module_eeprom = ne6x_get_module_eeprom, + .get_priv_flags = ne6x_get_priv_flags, + .set_priv_flags = ne6x_set_priv_flags, + .set_phys_id = ne6x_set_phys_id, + .nway_reset = ne6x_nway_reset, + .self_test = ne6x_diag_test, + .get_fecparam = ne6x_get_fec_param, + .set_fecparam = ne6x_set_fec_param, +}; + +void ne6x_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6x_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..54d84d65900f205773691848dfd12b142f96241b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ETHTOOL_H +#define _NE6X_ETHTOOL_H + +#define NE6X_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6x_ethtool_test_id { + NE6X_ETH_TEST_LINK, + NE6X_ETH_TEST_LOOPBACK, + NE6X_ETH_TEST_REG, + NE6X_ETH_TEST_INT, + NE6X_ETH_TEST_CHIP_TEMPERATUR, + NE6X_ETH_TEST_BOARD_TEMPERATUR, + NE6X_ETH_TEST_CURRENT, + NE6X_ETH_TEST_VOLTAGE, + NE6X_ETH_TEST_POWER, + NE6X_ETH_TEST_I2C3, +}; + +void ne6x_set_ethtool_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c new file mode 100644 index 0000000000000000000000000000000000000000..91a03ef06a58df901a7d6b4cd82db05ffda8cc6e --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_interrupt.h" + +static int ne6x_init_msix(struct ne6x_pf *pf, int budget) +{ + int actual_vector; + ssize_t size; + + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, budget); + dev_info(&pf->pdev->dev, "%s actual_vector = %d\n", __func__, actual_vector); + if (actual_vector <= 0) { + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + dev_err(&pf->pdev->dev, "error msix enable failed\n"); + return -ENODEV; + } + + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + return 0; +} + +static int ne6x_init_intx(struct ne6x_pf *pf) +{ + int actual_vector; + ssize_t size; + + dev_info(&pf->pdev->dev, "try enable intx\n"); + actual_vector = 0x1; + + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + test_and_set_bit(NE6X_PF_INTX, pf->state); + + return 0; +} + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf) +{ + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + u64 __iomem *reg; + int err; + int i; + + pf->msix_entries = kcalloc(NE6X_MAX_MSIX_NUM, sizeof(struct msix_entry), GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < NE6X_MAX_MSIX_NUM; i++) + pf->msix_entries[i].entry = i; + + test_and_set_bit(NE6X_PF_MSIX, pf->state); + + if (ne6x_init_msix(pf, NE6X_MAX_MSIX_NUM)) { + clear_bit(NE6X_PF_MSIX, pf->state); + err = ne6x_init_intx(pf); + if (err) { + dev_err(&pf->pdev->dev, "error intx enable failed\n"); + return err; + } + } + + if (pf->irq_pile->num_entries >= NE6X_MAX_MSIX_NUM) { + err = ne6x_init_link_irq(pf); + if (err) { + dev_err(&pf->pdev->dev, "init int irq failed\n"); + return err; + } + } + + /* We only initialize int once, so as not to overwrite user settings */ + if (test_and_set_bit(NE6X_INT_INIT_DOWN, pf->state)) + return 0; + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + return 0; +} + +static int ne6x_adpt_alloc_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->adpt = adpt; + q_vector->v_idx = v_idx; + + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + + if (adpt->netdev) + netif_napi_add(adpt->netdev, &q_vector->napi, ne6x_napi_poll); + + /* tie q_vector and adpt together */ + adpt->q_vectors[v_idx] = q_vector; + return 0; +} + +static void ne6x_free_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *ring; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + if (!q_vector) { + dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); + return; + } + + /* disassociate q_vector from rings */ + ne6x_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->cq) ring->q_vector = NULL; + + /* only adapter w/ an associated netdev is set up w/ NAPI */ + if (adpt->netdev) + netif_napi_del(&q_vector->napi); + + adpt->q_vectors[v_idx] = NULL; + kfree(q_vector); +} + +static int ne6x_adpt_alloc_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx, num_q_vectors, err; + + /* if not MSIX, give the one vector only to the LAN adapter */ + num_q_vectors = adpt->num_q_vectors; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ne6x_adpt_alloc_q_vector(adpt, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ne6x_free_q_vector(adpt, v_idx); + + return err; +} + +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx; + + for (v_idx = 0; v_idx < adpt->num_q_vectors; v_idx++) + ne6x_free_q_vector(adpt, v_idx); +} + +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int ret = -ENOENT; + + if (adpt->q_vectors[0]) { + dev_info(&pf->pdev->dev, "adapter %d has existing q_vectors\n", adpt->idx); + return -EEXIST; + } + + if (adpt->base_vector) { + dev_info(&pf->pdev->dev, "adapter %d has non-zero base vector %d\n", adpt->idx, + adpt->base_vector); + return -EEXIST; + } + + ret = ne6x_adpt_alloc_q_vectors(adpt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for adapter %d, ret=%d\n", + adpt->num_q_vectors, adpt->idx, ret); + adpt->num_q_vectors = 0; + goto vector_setup_out; + } + + if (adpt->num_q_vectors) + adpt->base_vector = adpt->port_info->hw_queue_base; + + if (adpt->base_vector < 0) { + dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for adapter %d, err=%d\n", + adpt->num_q_vectors, adpt->idx, adpt->base_vector); + ne6x_adpt_free_q_vectors(adpt); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +static void ne6x_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector = + container_of(notify, struct ne6x_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void ne6x_irq_affinity_release(struct kref *ref) {} + +int ne6x_adpt_request_irq_msix(struct ne6x_adapter *adpt, char *basename) +{ + int q_vectors = adpt->num_q_vectors; + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + int irq_num; + int cpu; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[vector]; + + irq_num = pf->msix_entries[base + vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, adpt->irq_handler, 0, q_vector->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6x_irq_affinity_notify; + q_vector->affinity_notify.release = ne6x_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + adpt->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adpt->q_vectors[vector]); + } + + return err; +} + +static irqreturn_t ne6x_intr(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + u64 reg_val; + + reg_val = rd64(hw, NE6X_VPINT_DYN_CTLN(0, NE6X_VP_INT)); + if (!(reg_val & 0x10000)) + return IRQ_NONE; + + napi_schedule(&q_vector->napi); + return IRQ_HANDLED; +} + +int ne6x_adpt_request_irq_intx(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[0]; + struct net_device *netdev = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + u32 irq = pf->pdev->irq; + int err; + + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-INTx", basename, "TxRx"); + + err = request_irq(irq, &ne6x_intr, IRQF_SHARED, netdev->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "INTx request_irq failed, error: %d\n", err); + return err; + } + + return 0; +} + +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_pf *pf = adpt->back; + int err; + + if (test_bit(NE6X_PF_MSIX, pf->state)) + err = ne6x_adpt_request_irq_msix(adpt, basename); + else + err = ne6x_adpt_request_irq_intx(adpt, basename); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + union ne6x_int_cfg int_cfg; + u32 qp, nextqp; + int i, q; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = adpt->base_queue; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (qp < NE6X_PF_VP0_NUM) { + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp + i + q; + + int_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG), int_cfg.val); + + int_mask.val = rd64(hw, + NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } else { + /* SRIOV mode PF Config */ + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp - NE6X_PF_VP0_NUM + i + q; + + int_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG), + int_cfg.val); + + int_mask.val = + rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } +} + +static inline void ne6x_irq_dynamic_enable(struct ne6x_adapter *adpt, int vector) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + + if (vector < NE6X_PF_VP0_NUM) { + int_mask.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK), int_mask.val); + } else { + int_mask.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + int_mask.val); + } +} + +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt) +{ + int i; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_irq_dynamic_enable(adpt, adpt->base_vector + i); + + return 0; +} + +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + int i; + + /* disable each interrupt */ + if (base < NE6X_PF_VP0_NUM) { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT), NE6X_MAX_U64); + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT_MASK), NE6X_MAX_U64); + } + } else { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT), + NE6X_MAX_U64); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + NE6X_MAX_U64); + } + } + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + for (i = 0; i < adpt->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + synchronize_irq(pf->pdev->irq); + } +} + +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int i; + + if (!adpt->q_vectors) + return; + + if (!adpt->irqs_ready) + return; + + adpt->irqs_ready = false; + for (i = 0; i < adpt->num_q_vectors; i++) { + int irq_num; + u16 vector; + + vector = i + base; + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!adpt->q_vectors[i] || !adpt->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* remove our suggested affinity mask for this IRQ */ + irq_set_affinity_hint(irq_num, NULL); + + synchronize_irq(irq_num); + free_irq(irq_num, adpt->q_vectors[i]); + } +} + +static void ne6x_reset_interrupt_capability(struct ne6x_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in adpt_close */ + if (pf->msix_entries) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + } + + kfree(pf->irq_pile); + pf->irq_pile = NULL; +} + +int ne6x_init_link_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->link_intname, sizeof(pf->link_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "link", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_NIC_INT_VP].vector; + err = request_irq(irq_num, ne6x_linkint_irq_handler, 0, pf->link_intname, pf); + if (!err) + pf->link_int_irq_ready = true; + + return 0; +} + +int ne6x_enable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 temp = 1; + int i = 0; + + if (!pf->link_int_irq_ready) + return 0; + + for (i = 0; i < pf->hw.pf_port; i++) + int_mask &= ~(temp << (i + NE6X_NIC_INT_START_BIT)); + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + + return 0; +} + +int ne6x_disable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 int_val; + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + int_val = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT)); + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT), + int_val); + + return 0; +} + +void ne6x_free_link_irq(struct ne6x_pf *pf) +{ + if (pf->link_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector); + free_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector, pf); + } + + pf->link_int_irq_ready = false; +} + +irqreturn_t ne6x_msix_clean_vf_mbx(int irq, void *data) +{ + struct ne6x_pf *pf = data; + struct ne6x_hw *hw = &pf->hw; + bool have_cmd = false; + struct ne6x_vf *vf; + u64 int_val = 0; + u64 val; + int i; + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + test_and_set_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = true; + have_cmd = true; + int_val |= (1ULL << vf->base_queue); + } + } + + if (have_cmd) { + ne6x_service_event_schedule(pf); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), int_val); + } + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), + (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + pf->hw.ne6x_mbx_ready_to_send[i] = true; + } + } + + return IRQ_HANDLED; +} + +int ne6x_init_mailbox_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->mailbox_intname, sizeof(pf->mailbox_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "mailbox", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector; + err = request_irq(irq_num, ne6x_msix_clean_vf_mbx, 0, pf->mailbox_intname, pf); + if (!err) + pf->mailbox_int_irq_ready = true; + + dev_info(&pf->pdev->dev, "reg mailbox irq id= %d,name = %s\n", irq_num, + pf->mailbox_intname); + + return err; +} + +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_mailbox_irq(struct ne6x_pf *pf) +{ + if (pf->mailbox_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector); + free_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector, pf); + } + + pf->mailbox_int_irq_ready = false; +} + +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) + ne6x_adpt_free_q_vectors(pf->adpt[i]); + } + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + ne6x_reset_interrupt_capability(pf); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h new file mode 100644 index 0000000000000000000000000000000000000000..e8d512d965a14fa1aa421ee4a2d30a31f0ef42b0 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_INTERRUPT_H +#define _NE6X_INTERRUPT_H + +#include "ne6x.h" + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf); +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt); +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename); +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt); +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt); +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt); +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf); +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt); +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data); +int ne6x_enable_link_irq(struct ne6x_pf *pf); +int ne6x_disable_link_irq(struct ne6x_pf *pf); +int ne6x_init_link_irq(struct ne6x_pf *pf); +void ne6x_free_link_irq(struct ne6x_pf *pf); +int ne6x_init_mailbox_irq(struct ne6x_pf *pf); +void ne6x_free_mailbox_irq(struct ne6x_pf *pf); +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c new file mode 100644 index 0000000000000000000000000000000000000000..24e71dd689989f75902417e223d05f181a10b7e9 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c @@ -0,0 +1,3111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_debugfs.h" +#include "ne6x_arfs.h" +#include "version.h" +#include "ne6x_netlink.h" +#include "ne6x_interrupt.h" + +#define CREATE_TRACE_POINTS + +#define SUMMARY "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver" +#define COPYRIGHT "Copyright(c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6x_driver_name[] = "ncepf"; + +static const char ne6x_driver_string[] = SUMMARY; + +const char ne6x_driver_version_str[] = VERSION; +static const char ne6x_copyright[] = COPYRIGHT; + +/* ne6x_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ne6x_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x5010), 0}, + {PCI_VDEVICE(BZWX, 0x5011), 0}, + {PCI_VDEVICE(BZWX, 0x6010), 0}, + {PCI_VDEVICE(BZWX, 0x6011), 0}, + /* required last entry */ + {0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, ne6x_pci_tbl); +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION("Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static struct workqueue_struct *ne6x_wq; +static const struct net_device_ops ne6x_netdev_ops; + +bool netif_is_ne6x(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ne6x_netdev_ops); +} + +int ne6x_hw_init(struct ne6x_hw *hw) +{ + int cpu_num = num_online_cpus(); + + /* max phy_port */ + hw->pf_port = ne6x_dev_get_port_num(hw->back); + /* expect vp queue */ + hw->expect_vp = NE6X_MAX_VP_NUM / hw->pf_port; + /* actal max vp queue */ + hw->max_queue = min_t(int, cpu_num, hw->expect_vp); + + hw->port_info = devm_kzalloc(ne6x_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) + return -EIO; + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + if (!is_valid_ether_addr(hw->port_info->mac.perm_addr)) + eth_random_addr(hw->port_info->mac.perm_addr); + + return 0; +} + +int ne6x_aq_get_phy_capabilities(struct ne6x_adapter *adpt, bool is_up, bool get_hw_stats) +{ + struct ne6x_port_info *port_info = adpt->port_info; + + /* read link states */ + if (get_hw_stats) + ne6x_dev_get_link_status(adpt, &port_info->link_status); + + if (is_up) { + if (port_info->link_status.link) { + port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (port_info->link_status.speed) { + case NE6X_LINK_SPEED_10GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +int ne6x_aq_get_vf_link_status(struct ne6x_adapter *adpt, bool is_up) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_adapter *pf_adpt = pf->adpt[(adpt->port_info->lport >= pf->hw.pf_port) ? + (pf->hw.pf_port - 1) : adpt->port_info->lport]; + struct ne6x_link_info *pf_link_status = &pf_adpt->port_info->link_status; + struct ne6x_port_info *vf_port_info = adpt->port_info; + + if (is_up) { + if (pf_link_status->link) { + vf_port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (pf_link_status->speed) { + case NE6X_LINK_SPEED_10GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + vf_port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + vf_port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + vf_port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +static void ne6x_adpt_link_event(struct ne6x_adapter *adpt, bool link_up) +{ + if (!adpt) + return; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state) || !adpt->netdev) + return; + + if (link_up == netif_carrier_ok(adpt->netdev)) + return; + + if (link_up) { + netif_carrier_on(adpt->netdev); + netif_tx_wake_all_queues(adpt->netdev); + } else { + netif_carrier_off(adpt->netdev); + netif_tx_stop_all_queues(adpt->netdev); + } +} + +void ne6x_print_link_message(struct ne6x_adapter *adpt, bool isup) +{ + char *speed = "Unknown "; + char *an = "False"; + u16 new_speed; + + if (isup) + new_speed = adpt->port_info->phy.link_info.link_speed; + else + new_speed = NE6X_LINK_SPEED_UNKNOWN; + + if (adpt->current_isup == isup && adpt->current_speed == new_speed) + return; + + adpt->current_isup = isup; + adpt->current_speed = new_speed; + + if (!isup) { + netdev_info(adpt->netdev, "NIC Link is Down\n"); + return; + } + + switch (adpt->port_info->phy.link_info.link_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (adpt->port_info->phy.link_info.an_info) + an = "True"; + + netdev_info(adpt->netdev, "NIC Link is Up, %sbps Full Duplex, Autoneg: %s\n", speed, an); +} + +static void ne6x_link_event(struct ne6x_pf *pf) +{ + struct ne6x_phy_info *phy_info; + struct ne6x_adapter *adpt = NULL; + u32 old_link_speed; + bool old_link; + bool link_up; + int i; +#ifdef CONFIG_PCI_IOV + struct ne6x_vf *vf; + int vf_id; +#endif + + for (i = 0; i < pf->num_alloc_adpt; i++) { + link_up = false; + adpt = pf->adpt[i]; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_aq_get_phy_capabilities(adpt, false, true); + else + ne6x_aq_get_phy_capabilities(adpt, true, true); + + /* add sfp online state begin */ + ne6x_dev_get_sfp_status(adpt, &phy_info->link_info.ext_info); + if (phy_info->link_info.ext_info != phy_info->link_info_old.ext_info) { + if (phy_info->link_info.ext_info == 0) + netdev_info(adpt->netdev, "adpt->id= %d,optical module unplugged", + adpt->idx); + else + netdev_info(adpt->netdev, "adpt->id= %d,optical module plugged", + adpt->idx); + } + + /* end sfp online state */ + old_link = !!(adpt->port_info->phy.link_info_old.link_info & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + /* if the old link up/down is the same as the new */ + if (link_up == old_link) { + if (link_up && old_link_speed != adpt->port_info->phy.link_info.link_speed) + ne6x_print_link_message(adpt, link_up); + + continue; + } + + ne6x_adpt_link_event(adpt, link_up); + ne6x_print_link_message(adpt, link_up); + } + +#ifdef CONFIG_PCI_IOV + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + if (test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + if (!vf->rx_tx_state) { + adpt->port_info->phy.link_info.link_info = 0x0; + vf->rx_tx_state = true; + } + link_up = false; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + ne6x_aq_get_vf_link_status(adpt, true); + old_link = !!(adpt->port_info->phy.link_info_old.link_info + & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + if (link_up == old_link && + old_link_speed == adpt->port_info->phy.link_info.link_speed) + continue; + + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + ne6x_vc_notify_link_state(vf); + } + } +#endif +} + +static void ne6x_clean_link_status_subtask(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_LINK_POOLING, pf->state)) + return; + + ne6x_link_event(pf); +} + +void ne6x_service_event_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->serv_task); +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt); + +static void ne6x_do_reset(struct ne6x_pf *pf, u32 reset_flags, bool lock_acquired) +{ + struct ne6x_adapter *adpt = NULL; + int i; + + WARN_ON(in_interrupt()); + + if (reset_flags & BIT_ULL(NE6X_PF_RESET_REQUESTED)) { + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_RECOVER, adpt->comm.state)) { + ne6x_adpt_reinit_locked(adpt); + clear_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + } + } + } else if (reset_flags & BIT_ULL(NE6X_CORE_RESET_REQUESTED)) { + /* hardware reset:include PCIE,CORE.etc. */ + dev_info(&pf->pdev->dev, "timeout info: CORE reset\n"); + } else { + dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); + } +} + +static void ne6x_recover_hang_subtask(struct ne6x_pf *pf) +{ + u32 reset_flags = 0; + + if (test_and_clear_bit(NE6X_PF_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_PF_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_CORE_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_CORE_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_GLOBAL_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_GLOBAL_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_DOWN_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_DOWN_REQUESTED); + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) { + clear_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + test_and_clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && !test_bit(NE6X_DOWN, pf->state) && + !test_bit(NE6X_CONFIG_BUSY, pf->state)) + ne6x_do_reset(pf, reset_flags, false); +} + +static void ne6x_service_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, serv_tmr); + + if (pf->num_alloc_vfs) + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->service_timer_period)); + + ne6x_service_event_schedule(pf); +} + +void ne6x_linkscan_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_linkscan_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, linkscan_tmr); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ)); + else + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ * 30)); + + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_service_task(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, serv_task); + unsigned long start_time = jiffies; + +#ifdef CONFIG_PCI_IOV + /* vf command process */ + ne6x_vc_process_vf_msg(pf); +#endif + + ne6x_recover_hang_subtask(pf); + + ne6x_sync_arfs_fltrs(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + ne6x_service_event_schedule(pf); +} + +static void ne6x_linkscan_work(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, linkscan_work); + + ne6x_clean_link_status_subtask(pf); +} + +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data) +{ + struct ne6x_pf *pf = data; + u64 intval = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + + wr64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT), + intval); + ne6x_linkscan_schedule(pf); + + return IRQ_HANDLED; +} + +int ne6x_pf_init(struct ne6x_pf *pf) +{ + pf->ctrl_adpt_idx = 0; + mutex_init(&pf->switch_mutex); + + /* set up periodic task facility */ + timer_setup(&pf->serv_tmr, ne6x_service_timer, 0); + pf->service_timer_period = HZ; + timer_setup(&pf->linkscan_tmr, ne6x_linkscan_timer, 0); + add_timer(&pf->serv_tmr); + + INIT_WORK(&pf->serv_task, ne6x_service_task); + INIT_WORK(&pf->linkscan_work, ne6x_linkscan_work); + + clear_bit(NE6X_SERVICE_SCHED, pf->state); + + pf->next_adpt = 0; + pf->num_alloc_adpt = pf->hw.pf_port; + pf->num_alloc_vfs = 0; + pf->mailbox_int_irq_ready = false; + pf->link_int_irq_ready = false; + + ne6x_dbg_pf_init(pf); + ne6x_proc_pf_init(pf); + + /* init key list head node */ + spin_lock_init(&pf->key_list_lock); + INIT_LIST_HEAD(&pf->key_filter_list); + + return 0; +} + +static void ne6x_set_num_rings_in_adpt(struct ne6x_adapter *adpt) +{ + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + adpt->num_tx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_rx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_cq_desc = adpt->num_tx_desc + adpt->num_rx_desc; + adpt->num_tg_desc = adpt->num_tx_desc; + adpt->irqs_ready = false; +} + +static irqreturn_t ne6x_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring && !q_vector->tg.ring) + return IRQ_HANDLED; + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), + 0xffffffffffffffff); + else + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + 0xffffffffffffffff); + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt) +{ + struct ne6x_ring **next_rings; + int ret = -ENODEV; + int size; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + adpt->netdev_registered = false; + size = sizeof(struct ne6x_ring *) * adpt->num_queue * 4; + adpt->tx_rings = kzalloc(size, GFP_KERNEL); + if (!adpt->tx_rings) + goto err_rings; + + next_rings = adpt->tx_rings + adpt->num_queue; + adpt->cq_rings = next_rings; + next_rings += adpt->num_queue; + adpt->rx_rings = next_rings; + adpt->tg_rings = adpt->rx_rings + adpt->num_queue; + + /* allocate memory for q_vector pointers */ + size = sizeof(struct ne6x_q_vector *) * adpt->num_q_vectors; + adpt->q_vectors = kzalloc(size, GFP_KERNEL); + if (!adpt->q_vectors) { + kfree(adpt->tx_rings); + ret = -ENOMEM; + goto err_rings; + } + + /* Setup default MSIX irq handler for adapter */ + ne6x_adpt_setup_irqhandler(adpt, ne6x_msix_clean_rings); + ret = 0; + +err_rings: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +static int ne6x_force_link_state(struct ne6x_adapter *adpt, bool is_up) +{ + int err; + + err = ne6x_aq_get_phy_capabilities(adpt, is_up, true); + if (err) + return err; + + if (is_up) + test_and_set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return 0; +} + +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int i, pf_q; + + pf_q = adpt->base_queue; + for (i = 0; i < adpt->num_queue; i++, pf_q++) { + if (pf_q < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_VP_RELOAD), enable); + else + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_VP_RELOAD), + enable); + + usleep_range(1000, 2000); + if (!enable) { + ne6x_tail_update(adpt->rx_rings[i], 0); + ne6x_tail_update(adpt->tx_rings[i], 0); + } + } + + return 0; +} + +int ne6x_adpt_configure(struct ne6x_adapter *adpt) +{ + int err; + int i; + + err = ne6x_adpt_restart_vp(adpt, true); + if (!err) + err = ne6x_adpt_configure_tx(adpt); + + if (!err) + err = ne6x_adpt_configure_cq(adpt); + + if (!err) + err = ne6x_adpt_configure_rx(adpt); + + if (!err) + err = ne6x_adpt_restart_vp(adpt, false); + + if (!err) { + for (i = 0; i < adpt->num_queue && !err; i++) + ne6x_alloc_rx_buffers(adpt->rx_rings[i], + NE6X_DESC_UNUSED(adpt->rx_rings[i])); + } + + return err; +} + +static void ne6x_napi_enable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_enable(&q_vector->napi); + } +} + +static int ne6x_up_complete(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_adpt_configure_msix(adpt); + + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); + ne6x_napi_enable_all(adpt); + ne6x_adpt_enable_irq(adpt); + + if ((adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) && adpt->netdev) { + ne6x_print_link_message(adpt, true); + netif_tx_start_all_queues(adpt->netdev); + netif_carrier_on(adpt->netdev); + } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); + ne6x_linkscan_schedule(pf); + + return 0; +} + +static void ne6x_napi_disable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_disable(&q_vector->napi); + } +} + +static void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +void ne6x_clean_tx_ring(struct ne6x_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buf) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + ne6x_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_buf[i]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +void ne6x_clean_rx_ring(struct ne6x_ring *rx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buf) + return; + + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[i]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; +} + +static void ne6x_clean_cq_ring(struct ne6x_ring *cq_ring) +{ + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + + cq_ring->next_to_clean = 0; + cq_ring->next_to_use = 0; +} + +void ne6x_down(struct ne6x_adapter *adpt) +{ + int i; + + /* It is assumed that the caller of this function + * sets the adpt->comm.state NE6X_ADPT_DOWN bit. + */ + if (adpt->netdev) { + netif_carrier_off(adpt->netdev); + netif_tx_disable(adpt->netdev); + } + + ne6x_adpt_disable_irq(adpt); + ne6x_adpt_restart_vp(adpt, true); + ne6x_force_link_state(adpt, false); + ne6x_napi_disable_all(adpt); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_clean_tx_ring(adpt->tx_rings[i]); + ne6x_clean_cq_ring(adpt->cq_rings[i]); + ne6x_clean_rx_ring(adpt->rx_rings[i]); + } +} + +void ne6x_free_rx_resources(struct ne6x_ring *rx_ring) +{ + ne6x_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_rx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (!adpt->rx_rings) + return; + + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->rx_rings[i] && adpt->rx_rings[i]->desc) + ne6x_free_rx_resources(adpt->rx_rings[i]); + } +} + +void ne6x_free_tx_resources(struct ne6x_ring *tx_ring) +{ + ne6x_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +void ne6x_free_cq_resources(struct ne6x_ring *cq_ring) +{ + ne6x_clean_cq_ring(cq_ring); + if (cq_ring->desc) { + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_tx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tx_rings[i] && adpt->tx_rings[i]->desc) + ne6x_free_tx_resources(adpt->tx_rings[i]); + kfree(adpt->tx_rings[i]->sgl); + } + } + + if (adpt->cq_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->cq_rings[i] && adpt->cq_rings[i]->desc) + ne6x_free_cq_resources(adpt->cq_rings[i]); + } + } + + if (adpt->tg_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tg_rings[i] && adpt->tg_rings[i]->desc) + /* tg_ring == cq_ring */ + ne6x_free_cq_resources(adpt->tg_rings[i]); + } + } +} + +int ne6x_up(struct ne6x_adapter *adpt) +{ + int err; + + ne6x_force_link_state(adpt, true); + + err = ne6x_adpt_configure(adpt); + if (!err) + err = ne6x_up_complete(adpt); + + return err; +} + +int ne6x_adpt_open(struct ne6x_adapter *adpt) +{ + char int_name[NE6X_INT_NAME_STR_LEN]; + struct ne6x_pf *pf = adpt->back; + int err; + + /* allocate descriptors */ + err = ne6x_adpt_setup_tx_resources(adpt); + if (err) + goto err_setup_tx; + + err = ne6x_adpt_setup_rx_resources(adpt); + if (err) + goto err_setup_rx; + + err = ne6x_adpt_configure(adpt); + if (err) + goto err_setup_rx; + + if (adpt->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), + adpt->netdev->name); + err = ne6x_adpt_request_irq(adpt, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + + /* When reducing the number of Tx queues, any pre-existing + * skbuffs might target a now removed queue. Older versions of + * the Linux kernel do not check for this, and it can result + * in a kernel panic. Avoid this by flushing all skbs now, so + * that we avoid attempting to transmit one that has an + * invalid queue mapping. + */ + qdisc_reset_all_tx_gt(adpt->netdev, 0); + + err = netif_set_real_num_rx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = ne6x_up_complete(adpt); + if (err) + goto err_up_complete; + + ne6x_dev_set_tx_rx_state(adpt, true, true); + return 0; + +err_up_complete: + ne6x_down(adpt); +err_set_queues: + ne6x_adpt_free_irq(adpt); +err_setup_rx: + ne6x_adpt_free_rx_resources(adpt); +err_setup_tx: + ne6x_adpt_free_tx_resources(adpt); + + return err; +} + +int ne6x_open(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + set_bit(NE6X_ADPT_OPEN, adpt->comm.state); + + netif_carrier_off(netdev); + + if (ne6x_force_link_state(adpt, true)) + return -EAGAIN; + + err = ne6x_adpt_open(adpt); + if (err) + return err; + + ne6x_sync_features(netdev); + + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_UP); + + return 0; +} + +void ne6x_adpt_close(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_dev_set_tx_rx_state(adpt, false, false); + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_down(adpt); + + ne6x_adpt_free_irq(adpt); + ne6x_adpt_free_tx_resources(adpt); + ne6x_adpt_free_rx_resources(adpt); + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); +} + +int ne6x_close(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + clear_bit(NE6X_ADPT_OPEN, adpt->comm.state); + adpt->current_isup = false; + adpt->current_speed = NE6X_LINK_SPEED_UNKNOWN; + ne6x_adpt_close(adpt); + if (test_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags)) + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_DOWN); + + netdev_info(netdev, "close !!!\n"); + + return 0; +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) + usleep_range(1000, 2000); + + ne6x_down(adpt); + ne6x_up(adpt); + clear_bit(NE6X_CONFIG_BUSY, pf->state); +} + +static int ne6x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + NE6X_PACKET_HDR_PAD; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + if (adpt->back->num_alloc_vfs == 0) + ne6x_adpt_reinit_locked(adpt); + } + + return 0; +} + +static void ne6x_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + unsigned int hung_queue = 0; + u64 head, intr, tail; + + hung_queue = txqueue; + tx_ring = adpt->tx_rings[hung_queue]; + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + + if (tx_ring) { + if (tx_ring->reg_idx < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_SQ_HD_POINTER)); + /* Read interrupt register */ + intr = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_VP_INT)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, + NE6X_SQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + intr = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + } + + netdev_info(netdev, "tx_timeout: adapter: %u, Q: %u, NTC: 0x%x, HEAD: 0x%llx, NTU: 0x%x, TAIL: 0x%llx, INTR: 0x%llx\n", + adpt->idx, hung_queue, tx_ring->next_to_clean, head, + tx_ring->next_to_use, tail, intr); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + set_bit(NE6X_PF_RESET_REQUESTED, pf->state); + set_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + break; + case 2: + set_bit(NE6X_CORE_RESET_REQUESTED, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(NE6X_DOWN_REQUESTED, pf->state); + set_bit(NE6X_ADPT_DOWN_REQUESTED, adpt->comm.state); + break; + } + + ne6x_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +static void ne6x_get_netdev_stats_struct_tx(struct ne6x_ring *ring, struct rtnl_link_stats64 *stats) +{ + u64 bytes, packets; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; +} + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt) +{ + return &adpt->net_stats; +} + +static void ne6x_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct rtnl_link_stats64 *adpt_stats = ne6x_get_adpt_stats_struct(adpt); + struct ne6x_ring *tx_ring, *rx_ring; + u64 bytes, packets; + unsigned int start; + int i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + if (!adpt->tx_rings) + return; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + tx_ring = READ_ONCE(adpt->tx_rings[i]); + if (!tx_ring) + continue; + + ne6x_get_netdev_stats_struct_tx(tx_ring, stats); + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + adpt_stats->rx_dropped = 0; + rcu_read_unlock(); + + /* following stats updated by ne6x_watchdog_subtask() */ + stats->multicast = adpt_stats->multicast; + stats->tx_errors = adpt_stats->tx_errors; + stats->tx_dropped = adpt_stats->tx_dropped; + stats->rx_errors = adpt_stats->rx_errors; + stats->rx_dropped = adpt_stats->rx_dropped; + stats->rx_crc_errors = adpt_stats->rx_crc_errors; + stats->rx_length_errors = adpt_stats->rx_length_errors; +} + +void ne6x_update_pf_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct ne6x_eth_stats *oes; + struct ne6x_eth_stats *es; /* device's eth stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u32 tx_restart, tx_busy; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + struct vf_stat vf_stat; + u64 tx_linearize; + u64 tx_force_wb; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u64 tx_e, rx_e; + u64 rx_l, rx_c; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + ons = &adpt->net_stats_offsets; + es = &adpt->eth_stats; + oes = &adpt->eth_stats_offsets; + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + rx_e = 0; + tx_e = 0; + rx_c = 0; + rx_l = 0; + tx_force_wb = 0; + tx_linearize = 0; + tx_busy = 0; + tx_restart = 0; + rx_page = 0; + rx_buf = 0; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + /* locate Tx ring */ + tx_ring = READ_ONCE(adpt->tx_rings[i]); + + do { + start = u64_stats_fetch_begin(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); + + tx_b += bytes; + tx_p += packets; + tx_restart += tx_ring->tx_stats.restart_q; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_linearize += tx_ring->tx_stats.tx_linearize; + tx_e += tx_ring->tx_stats.csum_err + tx_ring->tx_stats.tx_drop_addr + + tx_ring->tx_stats.tx_pcie_read_err; + + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + rx_b += bytes; + rx_p += packets; + rx_buf += rx_ring->rx_stats.alloc_buf_failed; + rx_page += rx_ring->rx_stats.alloc_page_failed; + rx_e += rx_ring->rx_stats.csum_err + rx_ring->rx_stats.rx_err + + rx_ring->rx_stats.rx_mem_error; + rx_l += rx_ring->rx_stats.rx_mem_error; + } + + rcu_read_unlock(); + + adpt->tx_restart = tx_restart; + adpt->tx_busy = tx_busy; + adpt->rx_page_failed = rx_page; + adpt->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + ns->tx_errors = tx_e; + ns->rx_errors = rx_e; + ns->rx_length_errors = rx_l; + ns->rx_crc_errors = rx_c; + + ns->rx_dropped = 0; + ne6x_dev_get_vf_stat(adpt, &vf_stat); + es->rx_broadcast = vf_stat.rx_broadcast_pkts; + es->rx_miss = vf_stat.rx_drop_pkts; + es->rx_multicast = vf_stat.rx_multicast_pkts; + es->rx_unicast = vf_stat.rx_unicast_pkts; + es->tx_broadcast = vf_stat.tx_broadcast_pkts; + es->tx_multicast = vf_stat.tx_multicast_pkts; + es->tx_unicast = vf_stat.tx_unicast_pkts; + es->rx_malform = vf_stat.rx_malform_pkts; + es->tx_malform = vf_stat.tx_malform_pkts; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ne6x_netpoll(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_msix_clean_rings(0, adpt->q_vectors[i]); +} +#endif + +static int ne6x_set_mac(struct net_device *netdev, void *p) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_mac_info *mac = &adpt->port_info->mac; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + if (ether_addr_equal(mac->perm_addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", mac->perm_addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + ne6x_adpt_del_mac(adpt, mac->perm_addr, true); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(mac->perm_addr, addr->sa_data, netdev->addr_len); + ne6x_adpt_add_mac(adpt, mac->perm_addr, true); + ne6x_dev_set_port_mac(adpt, mac->perm_addr); + + return 0; +} + +static int ne6x_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + + if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + return -EINVAL; + } + + return ret; +} + +static int ne6x_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Make sure VLAN delete is successful before updating VLAN + * information + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) + return ret; + + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + + return 0; +} + +static struct mac_addr_node *ne6x_find_addr(struct ne6x_adapter *adpt, + const u8 *macaddr, bool is_unicast) +{ + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (!macaddr) + return NULL; + + if (is_unicast) + addr_head = &adpt->uc_mac_addr; + else + addr_head = &adpt->mc_mac_addr; + + list_for_each_entry(addr_node, &addr_head->list, list) { + if (ether_addr_equal(macaddr, addr_node->addr)) + return addr_node; + } + + return NULL; +} + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + int rc = 0; + + if (!addr) + return -EINVAL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_multicast; + } + + mutex_lock(&addr_head->mutex); + + if (ne6x_find_addr(adpt, addr, is_unicast)) + goto out_unlock; + + /* Update MAC list value */ + addr_node = kzalloc(sizeof(*addr_node), GFP_KERNEL); + if (!addr_node) { + rc = -ENOMEM; + goto out_unlock; + } + + ether_addr_copy(addr_node->addr, addr); + list_add_tail(&addr_node->list, &addr_head->list); + /* Send the value of the updated MAC linked list to the SDK */ + ne6x_vc_cfg_mac(adpt, addr_node->addr); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return rc; +} + +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_multicast; + } + + mutex_lock(&addr_head->mutex); + addr_node = ne6x_find_addr(adpt, addr, is_unicast); + if (!addr_node) + goto out_unlock; + + list_del(&addr_node->list); + ne6x_vc_cfg_mac(adpt, addr_node->addr); + kfree(addr_node); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return 0; +} + +static int ne6x_mc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, false); +} + +static int ne6x_mc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, false); +} + +static int ne6x_uc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, true); +} + +static int ne6x_uc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, true); +} + +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); +} + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt) +{ + struct mac_addr_node *temp_node = NULL, *addr_node = NULL; + struct ne6x_vlan_filter *f = NULL, *temp_filter = NULL; + struct mac_addr_head *addr_head = NULL; + struct list_head temp_header; + int ret = 0; + + INIT_LIST_HEAD(&temp_header); + spin_lock_bh(&adpt->mac_vlan_list_lock); + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid) { + temp_filter = kzalloc(sizeof(*temp_filter), GFP_ATOMIC); + memcpy(temp_filter, f, sizeof(struct ne6x_vlan_filter)); + list_add_tail(&temp_filter->list, &temp_header); + } + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + list_for_each_entry_safe(f, temp_filter, &temp_header, list) { + if (f->vlan.vid) + ret |= ne6x_adpt_del_vlan(adpt, f->vlan); + + list_del(&f->list); + kfree(f); + } + + addr_head = &adpt->uc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_unicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + addr_head = &adpt->mc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_multicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + return ret; +} + +static void ne6x_set_rx_mode_task(struct work_struct *work) +{ + struct ne6x_adapter *adpt = container_of(work, struct ne6x_adapter, set_rx_mode_task); + struct net_device *netdev = adpt->netdev; + + /* Check for Promiscuous modes */ + if (netdev->flags & IFF_PROMISC) { + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + } else { + ne6x_dev_set_uc_promiscuous_enable(adpt, false); + ne6x_dev_set_mc_promiscuous_enable(adpt, false); + /* Check for All Multicast modes */ + if (netdev->flags & IFF_ALLMULTI) + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + else + __dev_mc_sync(netdev, ne6x_mc_addr_sync, ne6x_mc_addr_unsync); + } + + __dev_uc_sync(netdev, ne6x_uc_addr_sync, ne6x_uc_addr_unsync); +} + +static void ne6x_set_rx_mode(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return; + + queue_work(ne6x_wq, &adpt->set_rx_mode_task); +} + +static int ne6x_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return -1; + + return 0; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static netdev_features_t ne6x_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +static int ne6x_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + if (changed & NETIF_F_NTUPLE) { + if (features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + } + return ne6x_dev_set_features(adpt, value); +} + +static netdev_features_t ne6x_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +int ne6x_link_speed_to_rate(int link_speed) +{ + switch (link_speed) { + case NE6X_LINK_SPEED_100GB: + return SPEED_100000; + case NE6X_LINK_SPEED_40GB: + return SPEED_40000; + case NE6X_LINK_SPEED_25GB: + return SPEED_25000; + case NE6X_LINK_SPEED_10GB: + return SPEED_10000; + default: + return SPEED_25000; + } +} + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate) +{ + if (!adpt) + return -EINVAL; + + if (min_tx_rate) { + dev_err(&adpt->back->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + if (max_tx_rate > ne6x_link_speed_to_rate(adpt->port_info->phy.link_info.link_speed)) { + dev_err(&adpt->back->pdev->dev, "Invalid max tx rate (%d) (greater than link_speed) specified for VF %d.\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + return 0; +} + +static struct ne6x_key_filter *ne6x_find_key(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + list_for_each_entry(f, &pf->key_filter_list, list) { + if (f->key.pi == key.pi && ether_addr_equal(f->key.mac_addr, key.mac_addr)) + return f; + } + + return NULL; +} + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f = NULL; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->key = key; + + list_add_tail(&f->list, &pf->key_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&pf->key_list_lock); + + return f; +} + +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&pf->key_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&pf->key_list_lock); + + return 0; +} + +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key_filter *f; + struct ne6x_key key; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + f = ne6x_add_key_list(adpt->back, key); + if (f->refcnt) + return -1; + + return 0; +} + +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key key; + int ret; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + ret = ne6x_del_key_list(adpt->back, key); + if (ret) + return -1; + + return 0; +} + +static struct ne6x_vlan_filter *ne6x_find_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = NULL; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adpt->vlan_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return f; +} + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&adpt->mac_vlan_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return 0; +} + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = ne6x_add_vlan_list(adpt, vlan); + + if (f->refcnt == 0) + ne6x_dev_vlan_add(adpt, &vlan); + + return 0; +} + +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + int ret; + + ret = ne6x_del_vlan_list(adpt, vlan); + if (ret == 0) + ne6x_dev_vlan_del(adpt, &vlan); + + return 0; +} + +int ne6x_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + u16 local_vlan_proto = ntohs(vlan_proto); + u16 vid_temp = 0, tpid_temp = 0; + struct ne6x_vlan vlan; + struct ne6x_adapter *adpt; + struct device *dev; + struct ne6x_vf *vf; + int lport; + + dev = ne6x_pf_to_dev(pf); + + if (vf_id < 0 || vf_id >= pf->num_alloc_vfs / 2 || vlan_id >= (VLAN_N_VID - 1) || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d,vlan ID %d, QoS %d\n", + vf_id, vlan_id, qos); + return -EINVAL; + } + + if (!ne6x_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { + dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", + local_vlan_proto); + return -EPROTONOSUPPORT; + } + + lport = ADPT_LPORT(np->adpt); + vf_id += (pf->num_alloc_vfs / 2) * lport; + + vf = ne6x_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + vf->port_vlan_info = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + if (vf->port_vlan_info.prio || vf->port_vlan_info.vid) + dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", + vlan_id, qos, local_vlan_proto, vf_id); + else + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); + + adpt = vf->adpt; + + dev_info(dev, "%s: net_name:%s TPID:%08x vlan_id:%d qos:%d lport:%d vport:%d vlan_id:%d tpid:%04x %d\n", + __func__, netdev->name, local_vlan_proto, vlan_id, qos, ADPT_LPORT(adpt), + ADPT_VPORT(adpt), vf->port_vlan_info.vid, vf->port_vlan_info.tpid, vf->vfp_vid); + + vlan = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + + if (vlan.vid == 0) { + if (vf->vfp_tpid == vlan.tpid) { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } else { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + } else if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + vid_temp = vlan.vid; + tpid_temp = vlan.tpid; + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + + if (vf->vfp_vid == vid_temp) { + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + vlan.vid = vid_temp; + vlan.tpid = tpid_temp; + vid_temp = (qos << VLAN_PRIO_SHIFT) | (vlan.vid & VLAN_VID_MASK); + vf->vfp_vid = vf->port_vlan_info.vid; + vf->vfp_tpid = vf->port_vlan_info.tpid; + ne6x_dev_add_vf_qinq(vf, tpid_temp, vid_temp); + ne6x_adpt_add_vlan(vf->adpt, vlan); + } else { + return -EINVAL; + } + + return 0; +} + +static void *ne6x_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(ne6x_pf_to_dev(adpt->back), sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + ne6x_adpt_add_mac(adpt, mac, true); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adpt->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +static void ne6x_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!accel_priv) + return; + + ne6x_adpt_del_mac(adpt, mv->mac, true); + list_del(&mv->list); + devm_kfree(ne6x_pf_to_dev(adpt->back), mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6x_netdev_ops = { + .ndo_open = ne6x_open, + .ndo_stop = ne6x_close, + .ndo_start_xmit = ne6x_lan_xmit_frame, + .ndo_get_stats64 = ne6x_get_netdev_stats_struct, + .ndo_set_rx_mode = ne6x_set_rx_mode, + .ndo_set_mac_address = ne6x_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ne6x_change_mtu, + .ndo_tx_timeout = ne6x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ne6x_netpoll, +#endif + .ndo_set_vf_rate = ne6x_ndo_set_vf_bw, + .ndo_set_tx_maxrate = ne6x_set_tx_maxrate, + .ndo_set_vf_mac = ne6x_set_vf_mac, + .ndo_get_vf_config = ne6x_get_vf_config, + .ndo_set_vf_trust = ne6x_set_vf_trust, + .ndo_set_vf_vlan = ne6x_set_vf_port_vlan, + .ndo_set_vf_link_state = ne6x_set_vf_link_state, + .ndo_vlan_rx_add_vid = ne6x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6x_vlan_rx_kill_vid, + .ndo_set_features = ne6x_set_features, + .ndo_features_check = ne6x_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ne6x_rx_flow_steer, +#endif + .ndo_tx_timeout = ne6x_tx_timeout, + .ndo_dfwd_add_station = ne6x_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6x_fwd_del_macvlan, + .ndo_fix_features = ne6x_fix_features, + .ndo_set_features = ne6x_set_features, +}; + +void ne6x_sync_features(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (netdev->features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + + if (netdev->features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + + if (netdev->features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + + ne6x_dev_set_features(adpt, value); +} + +static void ne6x_set_netdev_features(struct net_device *netdev) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + netdev_features_t vlano_features = 0u; + netdev_features_t csumo_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + pf->hw.dvm_ena = 0x1; + + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; +} + +static int ne6x_config_netdev(struct ne6x_adapter *adpt) +{ + struct ne6x_rss_info *rss_info = &adpt->rss_info; + struct ne6x_pf *pf = adpt->back; + struct ne6x_netdev_priv *np; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int etherdev_size, index; + u8 mac_addr[ETH_ALEN]; + + if (pf->hw.bus.domain_num) + sprintf(name, "enP%dp%ds0f%d", + pf->hw.bus.domain_num, pf->hw.bus.bus_num, adpt->idx); + else + sprintf(name, "enp%ds0f%d", pf->hw.bus.bus_num, adpt->idx); + + etherdev_size = sizeof(struct ne6x_netdev_priv); + + netdev = alloc_netdev_mq(etherdev_size, name, NET_NAME_USER, ether_setup, adpt->num_queue); + if (!netdev) + return -ENOMEM; + + adpt->netdev = netdev; + np = netdev_priv(netdev); + np->adpt = adpt; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + netdev_rss_key_fill(rss_info->hash_key, sizeof(rss_info->hash_key)); + + for (index = 0; index < rss_info->ind_table_size; index++) + rss_info->ind_table[index] = ethtool_rxfh_indir_default(index, adpt->num_queue); + + ne6x_dev_set_rss(adpt, rss_info); /* end rss info */ + + ne6x_set_netdev_features(netdev); + + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, adpt->port_info->mac.perm_addr); + eth_hw_addr_set(netdev, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + + netdev->netdev_ops = &ne6x_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + ne6x_set_ethtool_ops(netdev); + +/* MTU range: 128 - 15342 */ + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - NE6X_PACKET_HDR_PAD - ETH_FCS_LEN; + netdev->gso_max_size = 65535; + netdev->needed_headroom = 32; + netdev->needed_tailroom = 32; + ne6x_dev_set_mtu(adpt, netdev->mtu); + ne6x_sync_features(netdev); + + return 0; +} + +static void ne6x_map_vector_to_qp(struct ne6x_adapter *adpt, int v_idx, int qp_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = adpt->tx_rings[qp_idx]; + struct ne6x_ring *rx_ring = adpt->rx_rings[qp_idx]; + struct ne6x_ring *cq_ring = adpt->cq_rings[qp_idx]; + struct ne6x_ring *tg_ring = adpt->tg_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; + tg_ring->q_vector = q_vector; + tg_ring->next = q_vector->cq.ring; + q_vector->tg.ring = tg_ring; + q_vector->tg.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt) +{ + int q_vectors = adpt->num_q_vectors; + int qp_remaining = adpt->num_queue; + struct ne6x_q_vector *q_vector; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + q_vector = adpt->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + q_vector->reg_idx = q_vector->v_idx + adpt->base_vector; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->cq.count = 0; + q_vector->tg.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->cq.ring = NULL; + q_vector->tg.ring = NULL; + + while (num_ringpairs--) { + ne6x_map_vector_to_qp(adpt, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!adpt) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + memset(ns, 0, sizeof(*ns)); + memset(&adpt->net_stats_offsets, 0, sizeof(adpt->net_stats_offsets)); + memset(&adpt->eth_stats, 0, sizeof(adpt->eth_stats)); + memset(&adpt->eth_stats_offsets, 0, sizeof(adpt->eth_stats_offsets)); + + if (adpt->rx_rings && adpt->rx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + memset(&adpt->rx_rings[i]->stats, 0, + sizeof(adpt->rx_rings[i]->stats)); + memset(&adpt->rx_rings[i]->rx_stats, 0, + sizeof(adpt->rx_rings[i]->rx_stats)); + memset(&adpt->rx_rings[i]->cq_stats, 0, + sizeof(adpt->rx_rings[i]->cq_stats)); + memset(&adpt->tx_rings[i]->stats, 0, + sizeof(adpt->tx_rings[i]->stats)); + memset(&adpt->tx_rings[i]->tx_stats, 0, + sizeof(adpt->tx_rings[i]->tx_stats)); + } + } +} + +static int ne6x_adpt_setup(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt = NULL; + u32 is_write_proterct = false; + struct ne6x_hw *hw = &pf->hw; + int i, ret = 0; + u32 value; + + /* PF + VP */ + pf->adpt = kcalloc(NE6X_MAX_VP_NUM + 4, sizeof(*pf->adpt), GFP_KERNEL); + if (!pf->adpt) + return -ENOMEM; + + ne6x_dev_get_norflash_write_protect(pf, &is_write_proterct); + + /* Need to protect the allocation of the adapters at the PF level */ + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + struct ne6x_vlan vlan = {0}; + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + pf->adpt[i] = adpt; + adpt->idx = i; + adpt->vport = NE6X_PF_VP0_NUM + i; /*vport*/ + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + clear_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags); + clear_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags); + clear_bit(NE6X_ADPT_F_ACL, adpt->flags); + + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + INIT_WORK(&adpt->set_rx_mode_task, ne6x_set_rx_mode_task); + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + INIT_LIST_HEAD(&adpt->macvlan_list); + init_waitqueue_head(&adpt->recv_notify); + + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) { + ret = -ENOMEM; + goto err_portinfo; + } + + adpt->port_info->lport = i; /* logical port */ + adpt->port_info->hw_trunk_id = i; + adpt->port_info->hw_port_id = ne6x_dev_get_pport(adpt); + adpt->port_info->queue = pf->hw.max_queue; + adpt->port_info->hw_max_queue = adpt->port_info->queue; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * i; + adpt->comm.port_info = adpt->port_info->lport | (adpt->vport << 8); + adpt->port_info->hw = hw; + adpt->port_info->phy.curr_user_speed_req = 0x0; + + ne6x_dev_get_mac_addr(adpt, adpt->port_info->mac.perm_addr); + ne6x_set_num_rings_in_adpt(adpt); + + ret = ne6x_adpt_mem_alloc(pf, adpt); + if (ret) + goto err_netdev; + + ret = ne6x_config_netdev(adpt); + if (ret) + goto err_configdev; + + /* The unicast MAC address delivers the SDK */ + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(adpt, vlan); + ne6x_adpt_add_mac(adpt, adpt->port_info->mac.perm_addr, true); + ne6x_dev_add_broadcast_leaf(adpt); + + /* set up vectors and rings if needed */ + ret = ne6x_adpt_setup_vectors(adpt); + if (ret) + goto err_msix; + + ret = ne6x_alloc_rings(adpt); + if (ret) + goto err_rings; + + ne6x_init_arfs(adpt); + + ret = ne6x_set_cpu_rx_rmap(adpt); + if (ret) + netdev_info(adpt->netdev, "adpt rx rmap err: %d", ret); + + /* map all of the rings to the q_vectors */ + ne6x_adpt_map_rings_to_vectors(adpt); + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_port2pi(adpt); + ne6x_dev_set_pi2port(adpt); + ne6x_dev_set_vport(adpt); + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + adpt = pf->adpt[i]; + ret = ne6x_adpt_register_netdev(adpt); + if (ret) + goto err_configdev; + + adpt->netdev_registered = true; + netif_carrier_off(adpt->netdev); + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(adpt->netdev); + } + + return ret; + +err_rings: + ne6x_adpt_free_q_vectors(adpt); +err_msix: + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + unregister_netdev(adpt->netdev); + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } +err_configdev: + kfree(adpt->tx_rings); + kfree(adpt->q_vectors); +err_netdev: + kfree(adpt->port_info); +err_portinfo: + kfree(adpt); + + return ret; +} + +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt) +{ + int ret; + + ret = register_netdev(adpt->netdev); + if (ret) { + struct net_device *device = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + char name[IFNAMSIZ] = {0}; + + sprintf(name, "enp%ds0f%%d", pf->hw.bus.bus_num); + strcpy(device->name, name); + return register_netdev(adpt->netdev); + } + + return ret; +} + +void ne6x_adjust_adpt_port_max_queue(struct ne6x_pf *pf) +{ + int cpu_num = num_online_cpus(); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + pf->hw.expect_vp = pf->irq_pile->num_entries / pf->hw.pf_port; + /* actal max vp queue */ + pf->hw.max_queue = min_t(int, cpu_num, pf->hw.expect_vp); + dev_info(&pf->pdev->dev, "%s:hw->expect_vp = %d hw->max_queue = %d cpu_num = %d\n", + __func__, pf->hw.expect_vp, pf->hw.max_queue, cpu_num); + } +} + +static int ne6x_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6x_pf *pf; + struct ne6x_hw *hw; + u32 ioremap_len; + int err; + + if (PCI_FUNC(pdev->devfn) != 1) + return 0; + + /* initialize device for use with memory space */ + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_mem_regions(pdev, ne6x_driver_name); + if (err) { + dev_info(&pdev->dev, "pci_request_mem_regions failed %d\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_adpt = 0; + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(NE6X_DOWN, pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = pci_resource_len(pdev, 0); + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr0) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar0 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), ioremap_len, err); + goto err_ioremap_hw_addr0; + } + + ioremap_len = pci_resource_len(pdev, 2); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), ioremap_len); + if (!hw->hw_addr2) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar2 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 2), ioremap_len, err); + goto err_ioremap_hw_addr2; + } + + ioremap_len = pci_resource_len(pdev, 4); + hw->hw_addr4 = ioremap(pci_resource_start(pdev, 4), ioremap_len); + if (!hw->hw_addr4) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar4 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 4), ioremap_len, err); + goto err_ioremap_hw_addr4; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->bus.domain_num = pci_domain_nr(pdev->bus); + hw->bus.bus_num = pdev->bus->number; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + usleep_range(10, 20); + + mutex_init(&pf->mbus_comm_mutex); + if (ne6x_dev_init(pf)) { + err = -EIO; + dev_info(&pdev->dev, "sdk init failed!\n"); + goto error_sdk_init_failed; + } + usleep_range(10, 20); + + pci_save_state(pdev); + + /* hardware resource initialization */ + err = ne6x_hw_init(hw); + if (err) + goto err_unroll_alloc; + + /* driver private resource initialization */ + err = ne6x_pf_init(pf); + if (err) + goto err_pf_reset; + + /* interrupt resource initialization */ + err = ne6x_init_interrupt_scheme(pf); + if (err) + goto err_interrupt_scheme; + + ne6x_adjust_adpt_port_max_queue(pf); + + err = ne6x_adpt_setup(pf); + if (err) + goto err_adpts; + + ne6x_dev_set_nic_start(pf, 0); + add_timer(&pf->linkscan_tmr); + ne6x_enable_link_irq(pf); + pcie_print_link_status(pdev); + /* ready to go, so clear down state bit */ + clear_bit(NE6X_DOWN, pf->state); + return 0; + +err_adpts: + set_bit(NE6X_DOWN, pf->state); + ne6x_clear_interrupt_scheme(pf); +err_interrupt_scheme: + del_timer_sync(&pf->serv_tmr); +err_pf_reset: + devm_kfree(ne6x_hw_to_dev(hw), hw->port_info); + hw->port_info = NULL; +err_unroll_alloc: +error_sdk_init_failed: + iounmap(hw->hw_addr4); +err_ioremap_hw_addr4: + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; +err_ioremap_hw_addr2: + iounmap(hw->hw_addr0); +err_ioremap_hw_addr0: + kfree(pf); +err_pf_alloc: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + } + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + adpt->rx_rings = NULL; + adpt->cq_rings = NULL; +} + +static int ne6x_adpt_clear(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + /* updates the PF for this cleared adpt */ + ne6x_adpt_free_arrays(adpt, true); + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_key_filter *klf, *klftmp; + struct ne6x_macvlan *mv, *mv_tmp; + struct ne6x_pf *pf = adpt->back; + + if (!test_bit(NE6X_DOWN, pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF adapter\n"); + return -ENODEV; + } + + set_bit(NE6X_ADPT_RELEASING, adpt->comm.state); + + ne6x_remove_arfs(adpt); + ne6x_adpt_clear_ddos(pf); + ne6x_adpt_clear_mac_vlan(adpt); + ne6x_dev_del_broadcast_leaf(adpt); + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + spin_lock_bh(&adpt->back->key_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(klf, klftmp, &adpt->back->key_filter_list, list) { + list_del(&klf->list); + kfree(klf); + } + spin_unlock_bh(&adpt->back->key_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adpt->macvlan_list, list) + ne6x_fwd_del_macvlan(adpt->netdev, mv); + + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + if (adpt->netdev) + /* results in a call to i40e_close() */ + unregister_netdev(adpt->netdev); + } + + ne6x_free_cpu_rx_rmap(adpt); + ne6x_adpt_disable_irq(adpt); + + /* clear the sync flag on all filters */ + if (adpt->netdev) { + __dev_uc_unsync(adpt->netdev, NULL); + __dev_mc_unsync(adpt->netdev, NULL); + } + + ne6x_adpt_free_q_vectors(adpt); + if (adpt->netdev) { + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } + + /*add for lldp*/ + ne6x_dev_set_fw_lldp(adpt, false); + ne6x_adpt_clear_rings(adpt); + ne6x_adpt_clear(adpt); + + return 0; +} + +static void ne6x_remove(struct pci_dev *pdev) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_hw *hw = &pf->hw; + int i; + + if (PCI_FUNC(pdev->devfn) != 1) + return; + + ne6x_proc_pf_exit(pf); + ne6x_dbg_pf_exit(pf); + + ne6x_dev_set_nic_stop(pf, 0); + +#ifdef CONFIG_PCI_IOV + if (pf->num_alloc_vfs) { + set_bit(NE6X_REMOVE, pf->state); + ne6x_sriov_configure(pdev, 0); + } +#endif + + /* no more scheduling of any task */ + set_bit(NE6X_DOWN, pf->state); + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + if (pf->linkscan_tmr.function) + del_timer_sync(&pf->linkscan_tmr); + + if (pf->linkscan_work.func) + cancel_work_sync(&pf->linkscan_work); + + /* Now we can shutdown the PF's adapter, just before we kill + * adminq and hmc. + */ + for (i = 0; i < pf->num_alloc_adpt; i++) + ne6x_adpt_release(pf->adpt[i]); + + /* Clear all dynamic memory lists of rings, q_vectors, and adapters */ + rtnl_lock(); + ne6x_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) { + ne6x_adpt_clear_rings(pf->adpt[i]); + ne6x_adpt_clear(pf->adpt[i]); + pf->adpt[i] = NULL; + } + } + rtnl_unlock(); + + kfree(pf->adpt); + + iounmap(hw->hw_addr4); + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; + iounmap(hw->hw_addr0); + kfree(pf); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver ne6x_driver = { + .name = ne6x_driver_name, + .id_table = ne6x_pci_tbl, + .probe = ne6x_probe, + .remove = ne6x_remove, + .sriov_configure = ne6x_sriov_configure, +}; + +int __init ne6x_init_module(void) +{ + pr_info("%s: %s - version %s\n", ne6x_driver_name, ne6x_driver_string, + ne6x_driver_version_str); + pr_info("%s: %s\n", ne6x_driver_name, ne6x_copyright); + + ne6x_wq = create_singlethread_workqueue(ne6x_driver_name); + if (!ne6x_wq) { + pr_err("%s: Failed to create workqueue\n", ne6x_driver_name); + return -ENOMEM; + } + + ne6x_dbg_init(); + ne6x_proc_init(); + ne6x_netlink_init(); + + return pci_register_driver(&ne6x_driver); +} + +module_init(ne6x_init_module); + +void __exit ne6x_exit_module(void) +{ + pci_unregister_driver(&ne6x_driver); + destroy_workqueue(ne6x_wq); + ne6x_netlink_exit(); + ne6x_proc_exit(); + ne6x_dbg_exit(); +} + +module_exit(ne6x_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..1e6f21b5324250e90d1a427bbe798bcf08313c4b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_debugfs.h" +#include "ne6x_dev.h" +#include "ne6x_netlink.h" + +static struct sock *ne6x_nlsock; +static DEFINE_MUTEX(ne6x_msg_mutex); + +static int ne6x_netlink_tab_add(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + u32 table_id = 0xFFFFFFFF; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_search(pf, table_info->table, &table_info->data[0], + table_info->size, NULL, table_info->size); + if (err == -ENOENT) { + table_info->size = NE6X_HASH_KEY_SIZE + NE6X_HASH_DATA_SIZE; + err = ne6x_reg_table_insert(pf, table_info->table, &table_info->data[0], + table_info->size, &table_id); + } else { + dev_info(dev, "table exist\n"); + kfree(table_info); + return -EEXIST; + } + + if (err == 0) { + dev_info(dev, "insert rule_id = 0x%x success!\n", table_id); + } else if (err != -ETIMEDOUT) { + dev_info(dev, "insert rule_id = 0x%x fail!\n", table_id); + err = -EIO; + } else { + dev_info(dev, "insert rule_id = 0x%x timeout!\n", table_id); + err = EAGAIN; + } + + kfree(table_info); + return err; +} + +static int ne6x_netlink_tab_del(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_delete(pf, table_info->table, &table_info->data[0], table_info->size); + dev_info(dev, "%s: %s\n", __func__, (err == 0) ? "success!" : "timeout!"); + kfree(table_info); + + return err; +} + +static int ne6x_netlink_meter_write(struct ne6x_pf *pf, struct ne6x_meter *meter) +{ + struct meter_table vf_bw; + struct device *dev; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + u32 type_flag = 0; + u32 type_map = 0; + u32 cir; + int err; + + if (meter->type_num > NE6X_METER_TYPE_MAX || + meter->opcode > NE6X_METER_OPCODE_MAX) + return -EINVAL; + + dev = ne6x_pf_to_dev(pf); + type_flag |= BIT(meter->type_num); + + err = ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type_map); + if (err) + return err; + + if (meter->opcode) + type_map |= type_flag; + else + type_map &= ~type_flag; + + err = ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type_map); + if (err) + return err; + + cir = meter->value * 1000 + 1023; + cir = min(cir / 1024, cir_maxnum); + + vf_bw.cir = cir; + vf_bw.pir = min(cir + cir / 10, cir_maxnum); + + vf_bw.cbs = min(vf_bw.cir * 10000, cbs_maxnum); + vf_bw.pbs = min(vf_bw.pir * 10000, cbs_maxnum); + + err = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + meter->type_num, (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(dev, "%s\n", err ? "write meter fail!" : "write meter success!"); + + return err; +} + +static int ne6x_netlink_rcv_msg(struct nlmsghdr *nlh) +{ + char name[IFNAMSIZ] = {0}; + struct net_device *dev; + struct ne6x_pf *pf; + void *data; + int err; + + strncpy(name, nlmsg_data(nlh), IFNAMSIZ - 1); + dev = __dev_get_by_name(&init_net, name); + if (unlikely(!dev)) + return -ENODEV; + + if (unlikely(!netif_is_ne6x(dev))) + return -EOPNOTSUPP; + + pf = ne6x_netdev_to_pf(dev); + data = nlmsg_data(nlh) + IFNAMSIZ; + + switch (nlh->nlmsg_type) { + case NE6X_NLMSG_TAB_ADD: + /* if entry exists, treat it as insertion success */ + err = ne6x_netlink_tab_add(pf, data); + if (err == -EEXIST) + err = 0; + break; + case NE6X_NLMSG_TAB_DEL: + err = ne6x_netlink_tab_del(pf, data); + break; + case NE6X_NLMSG_METER_WRITE: + err = ne6x_netlink_meter_write(pf, data); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static void ne6x_netlink_ack(struct sk_buff *in_skb, unsigned long *status) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + size_t payload; + + payload = BITS_TO_LONGS(NE6X_RULE_BATCH_MAX) * sizeof(unsigned long); + skb_out = nlmsg_new(payload, GFP_KERNEL); + if (unlikely(!skb_out)) { + NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; + NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk); + return; + } + + nlh = nlmsg_put(skb_out, NETLINK_CB(in_skb).portid, 0, NLMSG_DONE, payload, 0); + if (unlikely(!nlh)) { + nlmsg_free(skb_out); + return; + } + + NETLINK_CB(skb_out).dst_group = 0; + bitmap_copy(nlmsg_data(nlh), status, NE6X_RULE_BATCH_MAX); + + nlmsg_unicast(in_skb->sk, skb_out, NETLINK_CB(in_skb).portid); +} + +static void ne6x_netlink_rcv(struct sk_buff *skb) +{ + DECLARE_BITMAP(status, NE6X_RULE_BATCH_MAX); + u32 idx = 0; + + bitmap_zero(status, NE6X_RULE_BATCH_MAX); + mutex_lock(&ne6x_msg_mutex); + while (skb->len >= nlmsg_total_size(0) && idx < NE6X_RULE_BATCH_MAX) { + struct nlmsghdr *nlh; + int msglen, err; + + nlh = nlmsg_hdr(skb); + + if (unlikely(nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)) { + set_bit(idx, status); + goto skip; + } + + err = ne6x_netlink_rcv_msg(nlh); + if (err) + set_bit(idx, status); + +skip: + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (unlikely(msglen > skb->len)) + msglen = skb->len; + + idx++; + skb_pull(skb, msglen); + } + + ne6x_netlink_ack(skb, status); + mutex_unlock(&ne6x_msg_mutex); +} + +/** + * ne6x_netlink_init - start up netlink resource for the driver + **/ +void ne6x_netlink_init(void) +{ + struct netlink_kernel_cfg ne6x_netlink_cfg = { + .input = ne6x_netlink_rcv, + }; + + ne6x_nlsock = netlink_kernel_create(&init_net, NE6X_NETLINK, &ne6x_netlink_cfg); + if (unlikely(!ne6x_nlsock)) + pr_warn("Init of netlink failed\n"); +} + +/** + * ne6x_netlink_exit - clean out the driver's netlink resource + **/ +void ne6x_netlink_exit(void) +{ + netlink_kernel_release(ne6x_nlsock); + ne6x_nlsock = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..61a6cd1347bde093b3d8d23d9776c81f4058caa7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_NETLINK_H +#define _NE6X_NETLINK_H + +#define NE6X_NETLINK 31 +#define NE6X_HASH_KEY_SIZE 64 +#define NE6X_HASH_DATA_SIZE 64 +#define NE6X_RULE_BATCH_MAX 64 +#define NE6X_METER_TYPE_MAX 8 +#define NE6X_METER_OPCODE_MAX 1 +#define NE6X_ADDR_LEN 16 + +/* netlink message opcodes */ +enum { + NE6X_NLMSG_BASE = 0x10, /* the type < 0x10 is reserved for control messages */ + NE6X_NLMSG_TAB_ADD = NE6X_NLMSG_BASE, + NE6X_NLMSG_TAB_DEL, + NE6X_NLMSG_METER_WRITE, + NE6X_NLMSG_MAX +}; + +struct ne6x_rule { + u8 dst[NE6X_ADDR_LEN]; + u8 src[NE6X_ADDR_LEN]; + u32 proto; +} __packed; + +struct ne6x_meter { + u8 type_num; + u8 opcode; + u32 value; +} __packed; + +void ne6x_netlink_init(void); +void ne6x_netlink_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h new file mode 100644 index 0000000000000000000000000000000000000000..b60470095d9902ae20eda6eb1b0580c9767b2be7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PORTMAP_H +#define _NE6X_PORTMAP_H + +#include +#include + +#define PBMP_DWORD_NUM 4 +#define PBMP_WORD_WIDTH 32 + +typedef u32 pbmp_t[PBMP_DWORD_NUM]; + +#define SET_BIT(DAT, POS) ((DAT) |= ((u32)0x1 << (POS))) +#define CLR_BIT(DAT, POS) ((DAT) &= (~((u32)0x01 << (POS)))) + +#define PBMP_DWORD_GET(bm, word) ((bm)[(word)]) +#define PBMP_CLEAR(bm) \ + (PBMP_DWORD_GET(bm, 0) = PBMP_DWORD_GET(bm, 1) = \ + PBMP_DWORD_GET(bm, 2) = \ + PBMP_DWORD_GET(bm, 3) = 0) + +#define PBMP_WNET(port) ((port) / PBMP_WORD_WIDTH) +#define PBMP_WBIT(port) (1LU << ((port) % PBMP_WORD_WIDTH)) + +#define PBMP_ENTRY(bm, port) \ + (PBMP_DWORD_GET(bm, PBMP_WNET(port))) + +#define PBMP_PORT_REMOVE(bm, port) \ + (PBMP_ENTRY(bm, port) &= ~(PBMP_WBIT(port))) + +#define PBMP_PORT_ADD(bm, port) \ + (PBMP_ENTRY(bm, port) |= PBMP_WBIT(port)) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..6015d51465c4a0affdafa9da400dd31822072cf8 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" + +static struct proc_dir_entry *ne6x_proc_root; + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct ne6x_soc_temperature temp = {0}; + struct ne6x_soc_power power = {0}; + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char *info = NULL; + ssize_t len = 0; + int err; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + info = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_get_temperature_info(pf, &temp); + if (err) { + dev_err(dev, "get device temperature failed\n"); + } else { + len += sprintf(info, "Chip temperature (°C) %d\n", temp.chip_temerature); + len += sprintf(info + len, "Nic temerature (°C) %d\n", temp.board_temperature); + } + + err = ne6x_dev_get_power_consum(pf, &power); + if (err) { + dev_err(dev, "get device power failed\n"); + } else { + len += sprintf(info + len, "Current (A) %d.%03d\n", + power.cur / 1000, power.cur % 1000); + len += sprintf(info + len, "Voltage (V) %d.%03d\n", + power.vol / 1000, power.vol % 1000); + len += sprintf(info + len, "Power (W) %d.%03d\n", + power.power / 1000, power.power % 1000); + } + + if (!len) { + kfree(info); + return len; + } + + if (copy_to_user(buf, info, len)) { + kfree(info); + return -EFAULT; + } + + *ppos = len; + kfree(info); + return len; +} + +ssize_t ne6x_proc_i2c_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char info[512] = {0}; + ssize_t len = 0; + u32 id = 0; + int err; + + if (*ppos > 0 || count < 512) + return 0; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_i2c3_signal_test(pf, &id); + if (err) + dev_err(dev, "get device i2c external info failed\n"); + else + len += sprintf(info, "I2c external sig test %d\n", id & 0xff); + + if (!len) + return len; + + if (copy_to_user(buf, info, len)) + return -EFAULT; + + *ppos = len; + return len; +} + +static int ne6x_tps_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static int ne6x_i2c_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static const struct proc_ops ne6x_proc_tps_fops = { + .proc_open = ne6x_tps_open, + .proc_read = ne6x_proc_tps_read, +}; + +static const struct proc_ops ne6x_proc_i2c_fops = { + .proc_open = ne6x_i2c_open, + .proc_read = ne6x_proc_i2c_read, +}; + +void ne6x_proc_pf_init(struct ne6x_pf *pf) +{ + struct proc_dir_entry *pfile = NULL; + const struct device *dev = NULL; + const char *name = NULL; + + name = pci_name(pf->pdev); + dev = &pf->pdev->dev; + pf->ne6x_proc_pf = proc_mkdir(name, ne6x_proc_root); + if (!pf->ne6x_proc_pf) { + dev_err(dev, "proc dir %s create failed\n", name); + return; + } + + pfile = proc_create_data("temperature_power_state", 0600, pf->ne6x_proc_pf, + &ne6x_proc_tps_fops, pf); + if (!pfile) { + dev_err(dev, "proc file temperature_power_state create failed\n"); + goto create_failed; + } + + pfile = proc_create_data("i2c_test", 0600, pf->ne6x_proc_pf, &ne6x_proc_i2c_fops, pf); + if (!pfile) { + dev_err(dev, "proc file i2c_test create failed\n"); + goto create_failed; + } + + return; + +create_failed: + proc_remove(pf->ne6x_proc_pf); +} + +void ne6x_proc_pf_exit(struct ne6x_pf *pf) +{ + proc_remove(pf->ne6x_proc_pf); + pf->ne6x_proc_pf = NULL; +} + +extern char ne6x_driver_name[]; +void ne6x_proc_init(void) +{ + ne6x_proc_root = proc_mkdir(ne6x_driver_name, NULL); + if (!ne6x_proc_root) + pr_info("init of proc failed\n"); +} + +void ne6x_proc_exit(void) +{ + proc_remove(ne6x_proc_root); + ne6x_proc_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ce94cab66b288f905d9c5df76d74a9d5e6ed31 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PROCFS_H +#define _NE6X_PROCFS_H + +struct ne6x_pf; + +void ne6x_proc_pf_init(struct ne6x_pf *pf); +void ne6x_proc_pf_exit(struct ne6x_pf *pf); +void ne6x_proc_init(void); +void ne6x_proc_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c new file mode 100644 index 0000000000000000000000000000000000000000..2b7f6f24ca2514500c8b00349506a66ed4004aa6 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" + +#define AXIA_MBUS_READ_MEMORY_COMMAND 0x07 +#define AXIA_MBUS_READ_MEMORY_ACK 0x08 + +#define AXIA_MBUS_WRITE_MEMORY_COMMAND 0x09 +#define AXIA_MBUS_WRITE_MEMORY_ACK 0x0A + +#define AXIA_MBUS_READ_REGISTER_COMMAND 0x0B +#define AXIA_MBUS_READ_REGISTER_ACK 0x0C + +#define AXIA_MBUS_WRITE_REGISTER_COMMAND 0x0D +#define AXIA_MBUS_WRITE_REGISTER_ACK 0x0E + +#define AXIA_MBUS_RESET_FIRMWARE_COMMAND 0x0F +#define AXIA_MBUS_RESET_FIRMWARE_ACK 0x10 +#define AXIA_MBUS_READ_TABLE_COMMAND 0x11 +#define AXIA_MBUS_READ_TABLE_ACK 0x12 + +#define AXIA_MBUS_WRITE_TABLE_COMMAND 0x13 +#define AXIA_MBUS_WRITE_TABLE_ACK 0x14 + +#define AXIA_MBUS_CLEARUP_COMMAND 0x15 +#define AXIA_MBUS_CLEARUP_ACK 0x16 + +/* hash table operator */ +#define AXIA_MBUS_INSERT_COMMAND 0x17 +#define AXIA_MBUS_INSERT_ACK 0x18 + +#define AXIA_MBUS_UPDATE_COMMAND 0x19 +#define AXIA_MBUS_UPDATE_ACK 0x1A + +#define AXIA_MBUS_DELETE_COMMAND 0x1B +#define AXIA_MBUS_DELETE_ACK 0x1C + +#define AXIA_MBUS_LOOKUP_COMMAND 0x1D +#define AXIA_MBUS_LOOKUP_ACK 0x1E + +/* data download operator */ +#define AXIA_MBUS_DOWNLOAD_COMMAND 0x21 +#define AXIA_MBUS_DOWNLOAD_ACK 0x22 + +#define AXIA_MBUS_OPERATOR_COMMAND 0x23 +#define AXIA_MBUS_OPERATOR_ACK 0x24 + +#define AXIA_MBUS_SETUP_PORT_COMMAND 0x25 +#define AXIA_MBUS_SETUP_PORT_ACK 0x26 + +#define AXIA_MBUS_SETUP_TABLE_COMMAND 0x27 +#define AXIA_MBUS_SETUP_TABLE_ACK 0x28 + +#define AXIA_MBUS_SETUP_TAPI_COMMAND 0x29 +#define AXIA_MBUS_SETUP_TAPI_ACK 0x2A + +#define AXIA_MBUS_SETUP_HASH_COMMAND 0x2B +#define AXIA_MBUS_SETUP_HASH_ACK 0x2C + +#define AXIA_MBUS_SETUP_DTAB_COMMAND 0x2D +#define AXIA_MBUS_SETUP_DTAB_ACK 0x2E + +#define AXIA_MBUS_E2PROM_READ_COMMAND 0x2F +#define AXIA_MBUS_E2PROM_READ_ACK 0x30 + +#define AXIA_MBUS_E2PROM_WRITE_COMMAND 0x31 +#define AXIA_MBUS_E2PROM_WRITE_ACK 0x32 + +#define AXIA_MBUS_SET_FAN_SPEED_COMMAND 0x33 +#define AXIA_MBUS_SET_FAN_SPEED_ACK 0x34 + +#define AXIA_MBUS_GET_FAN_SPEED_COMMAND 0x35 +#define AXIA_MBUS_GET_FAN_SPEED_ACK 0x36 + +#define AXIA_MBUS_GET_SYSTEM_INFO_COMMAND 0x37 +#define AXIA_MBUS_GET_SYSTEM_INFO_ACK 0x38 + +#define AXIA_MBUS_UPGRADE_PRE_COMMAND 0x39 +#define AXIA_MBUS_UPGRADE_PRE_COMMAND_ACK 0x3A +#define AXIA_MBUS_UPGRADE_COMMAND 0x3B +#define AXIA_MBUS_UPGRADE_COMMAND_ACK 0x3C + +#define AXIA_MBUS_GET_VER_COMMAND 0x3D +#define AXIA_MBUS_GET_VER_COMMAND_ACK 0x3E + +#define AXIA_MBUS_TALK_PORT_BASE 0x41 + +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 0) +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 2) +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 0) +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 2) +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 0) +#define AXIA_MBUS_TALK_SET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 1) + +#define AXIA_MBUS_TALK_GET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 2) +#define AXIA_MBUS_TALK_GET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 0) +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 2) +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 3) + +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 0) +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 1) + +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 2) +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 3) + +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 0) +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 1) + +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 2) +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 3) + +#define AXIA_MBUS_TALK_SET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 0) +#define AXIA_MBUS_TALK_SET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 1) + +#define AXIA_MBUS_TALK_GET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 2) +#define AXIA_MBUS_TALK_GET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 0) +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 2) +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 0) +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 2) +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 0) +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 2) +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 3) + +#define AXIA_MBUS_SET_NIC_START_COMMAND 0x9F +#define AXIA_MBUS_SET_NIC_START_ACK 0xA0 +#define AXIA_MBUS_SET_NIC_STOP_COMMAND 0xA1 +#define AXIA_MBUS_SET_NIC_STOP_ACK 0xA2 +#define AXIA_MBUS_GET_NIC_STATE_COMMAND 0xA3 +#define AXIA_MBUS_GET_NIC_STATE_ACK 0xA4 +#define AXIA_MBUS_SET_NP_USERDATA_COMMAND 0xA5 +#define AXIA_MBUS_SET_NP_USERDATA_ACK 0xA6 +#define AXIA_MBUS_GET_NP_USERDATA_COMMAND 0xA7 +#define AXIA_MBUS_GET_NP_USERDATA_ACK 0xA8 + +#define AXIA_MBUS_SET_LED_STATE_COMMAND 0xA9 +#define AXIA_MBUS_SET_LED_STATE_ACK 0xAA + +#define AXIA_MBUS_CONFIG_METER_COMMAND 0xAB +#define AXIA_MBUS_CONFIG_METER_ACK 0xAC + +#define AXIA_MBUS_CLEAR_CREDIT_COMMAND 0xAD +#define AXIA_MBUS_CLEAR_CREDIT_ACK 0xAE + +#define AXIA_MBUS_SET_FAST_L2FDB_COMMAND 0xD1 +#define AXIA_MBUS_SET_FAST_L2FDB_ACK 0xD2 + +#define AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND 0xD3 +#define AXIA_MBUS_GET_DUMP_DATA_LEN_ACK 0xD4 + +#define AXIA_MBUS_GET_DUMP_DATA_COMMAND 0xD5 +#define AXIA_MBUS_GET_DUMP_DATA_ACK 0xD6 + +#define AXIA_MBUS_CLR_TABLE_COMMAND 0xD7 +#define AXIA_MBUS_CLR_TABLE_ACK 0xD8 + +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND 0xD9 +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_ACK 0xDA + +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND 0xDB +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_ACK 0xDC + +#define AXIA_MBUS_OPT_NOFLASH_COMMAND 0xDD +#define AXIA_MBUS_OPT_NOFLASH_ACK 0xDE + +#define PCIE2C810_SHM_MBUS_BASE 0x20878000 +#define PCIE2C810_SHM_DATA_BASE 0x20878004 + +#define MEM_ONCHIP_64BIT 0x00 +#define MEM_ONCHIP_512BIT 0x01 +#define MEM_ONXDDR_512BIT 0x04 + +enum engine_idx { + ENGINE_DIRECT_TABLE0 = 0x1, + ENGINE_DIRECT_TABLE1, + ENGINE_HASHA_TABLE, + ENGINE_HASHB_TABLE, +}; + +struct axia_mbus_msg { + union { + u32 uint; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 opcode : 8; + u32 dst_block : 4; + u32 src_block : 4; + u32 data_len : 14; + u32 e : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 e : 2; + u32 data_len : 14; + u32 src_block : 4; + u32 dst_block : 4; + u32 opcode : 8; +#endif + } bits; + } hdr; + u32 data[]; +} __packed; + +struct ne6x_diag_reg_test_info ne6x_reg_list[] = { + /* offset mask elements stride */ + {NE6X_VP_BASE_ADDR, 0xFFFFFFFFFFFFFFFF, NE6X_VP_INT, 0}, + {0} +}; + +struct ne6x_reg_table_info { + u32 addr; /* engine id as base address */ + u32 size; /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u32 opcode_read; + u32 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u32 opcode_insert; + u32 opcode_delete; + u32 opcode_lookup; + u32 opcode_update; + u32 size_insert; + u32 size_delete; + u32 size_lookup; + u32 size_update; +}; + +static struct ne6x_reg_table_info table_info[] = { + /* address size(tableidx + memtype + bucket + entry_num + size) + * read write adv_cmd insert delete lookup size_insert size_delete size_lookup + */ + {0x00000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0200, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x10000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x20000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0010, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x30000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0008, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x40000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (4 << 16) | 0x0100, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x50000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_512BIT << 24) | (1 << 21) | (1 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x60000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x70000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 96, 64, 64, 32}, +}; + +#define TABLE_ADDR(table) (table_info[table].addr & 0xF0000000) +#define TABLE_SIZE(table) (table_info[table].size & 0x00000FFF) +#define TABLE_XMEM(table) (table_info[table].size & 0xFFE00000) +#define TABLE_XNUM(table) ((table_info[table].size >> 16) & 0xF) + +#define TABLE_OPCODE_WRITE(table) (table_info[table].opcode_write & 0x3F) +#define TABLE_OPCODE_READ(table) (table_info[table].opcode_read & 0x3F) +#define TABLE_ADVCMD_VALID(table) (table_info[table].advanced_cmd == 0x01) +#define TABLE_OPCODE_INSERT(table) (table_info[table].opcode_insert & 0x3F) +#define TABLE_OPCODE_DELETE(table) (table_info[table].opcode_delete & 0x3F) +#define TABLE_OPCODE_LOOKUP(table) (table_info[table].opcode_lookup & 0x3F) + +#define TABLE_OPCODE_UPDATE(table) (table_info[table].opcode_update & 0x3F) + +#define TABLE_SIZE_INSERT(table) (table_info[table].size_insert) +#define TABLE_SIZE_DELETE(table) (table_info[table].size_delete) +#define TABLE_SIZE_LOOKUP(table) (table_info[table].size_lookup) +#define TABLE_SIZE_UPDATE(table) (table_info[table].size_update) +#define TABLE_SIZE_LOOKUP_RET(table) (table_info[table].size & 0xFFF) + +#define NUM_TABLE(table) (table_info[table].table_num) + +static u64 local_module_base; + +void ne6x_reg_lock(struct ne6x_pf *pf) +{ + mutex_lock(&pf->mbus_comm_mutex); +} + +void ne6x_reg_unlock(struct ne6x_pf *pf) +{ + mutex_unlock(&pf->mbus_comm_mutex); +} + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + writeq(reg_value, addr); +} + +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + u64 val = 0; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + val = readq(addr); + + return val; +} + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + ne6x_switch_pci_write(pf->hw.hw_addr4, base_addr, offset_addr, reg_value); +} + +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr) +{ + return ne6x_switch_pci_read(pf->hw.hw_addr4, base_addr, offset_addr); +} + +#define BAR4_CSR_OFFSET 0x3C0 +u32 ne6x_reg_axi_read(struct ne6x_pf *pf, u32 offset) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30); + + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + + return ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0) & 0xFFFFFFFFUL; +} + +void ne6x_reg_axi_write(struct ne6x_pf *pf, u32 offset, u32 value) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30) + value; + + reg_offset = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); +} + +u32 _reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 offset_l = 0x27A00000 | ((offset << 4) & 0xFFFF0); + u32 offset_h; + u32 data = 0; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA1B2C3D4); + } + + data = ne6x_reg_axi_read(pf, offset_l); + + return data; +} + +void _reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + u32 offset_l; + u32 offset_h; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA2B2C3D4); + } + + offset_l = 0x2FA00000 | ((offset << 4) & 0xFFFF0); + ne6x_reg_axi_write(pf, offset_l, value); +} + +u32 NE6X_ACCESS_TIMEOUT = 9999; +int _ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + struct axia_mbus_msg resp; + int timeout = 0, index = 0; + + memset(&resp, 0, sizeof(resp)); + + /* Write Command(s) */ + for (index = 0; index < len; index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + + usleep_range(200, 300); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + + /* resp opcode is even number, request opcode is odd number */ + if ((resp.hdr.bits.opcode & 0x01) == 0x0) + break; + + timeout++; + usleep_range(200, 220); + } + + if (timeout >= NE6X_ACCESS_TIMEOUT) { + dev_info(ne6x_pf_to_dev(pf), "%s: timeout! (%d)\n", __func__, timeout); + return -ETIMEDOUT; + } + + if (resp.hdr.bits.e == 1) { + dev_info(ne6x_pf_to_dev(pf), "%s: response.bits.e = 1 !\n", __func__); + return -EAGAIN; + } + + if (!pbuf) + return 0; + + for (index = 0; index < retlen; index++) + pbuf[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index); + + return 0; +} + +int ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + int status; + + ne6x_reg_lock(pf); + status = _ne6x_reg_perform(pf, data, pbuf, len, retlen); + ne6x_reg_unlock(pf); + + return status; +} + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 data; + + ne6x_reg_lock(pf); + data = _reg_apb_read(pf, offset); + ne6x_reg_unlock(pf); + + return data; +} + +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + ne6x_reg_lock(pf); + _reg_apb_write(pf, offset, value); + ne6x_reg_unlock(pf); +} + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_REGISTER_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = addr; + + status = ne6x_reg_perform(pf, (u32 *)msg, value, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_REGISTER_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = value; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +static bool ne6x_reg_valid_table(struct ne6x_pf *pf, enum ne6x_reg_table table) +{ + if (pf->hw_flag != 0) { + if (table > NE6X_REG_ARFS_TABLE) + return false; + } else { + if (table > NE6X_REG_VF_BW_TABLE) + return false; + } + + return true; +} + +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size % TABLE_SIZE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_READ(table)); + msg->hdr.bits.data_len = 12; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)data, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table)) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_WRITE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + size / 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id) +{ + struct axia_mbus_msg *msg; + int status, count; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_INSERT(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + count = size / TABLE_SIZE_INSERT(table); + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_INSERT(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_INSERT(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, table_id, 3 + (size >> 2), + (!table_id) ? 0 : count); + kfree(msg); + + return status; +} + +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (TABLE_SIZE_DELETE(table) != size) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_DELETE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_LOOKUP(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_LOOKUP(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_LOOKUP_RET(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, ret_data, 3 + (size >> 2), ret_size / 4); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 index, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_UPDATE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_UPDATE(table)); + msg->hdr.bits.data_len = 16 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = index; + msg->data[2] = TABLE_SIZE_UPDATE(table); + memcpy((void *)&msg->data[3], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (size >> 2), 0); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, + int port, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (((size % 4) != 0) || size > 512) + return -EINVAL; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (AXIA_MBUS_TALK_PORT_BASE + 4 * talk + 2 * opcode); + msg->hdr.bits.data_len = 8 + size; + msg->data[0] = port; + if (pbuf) + memcpy(&msg->data[1], pbuf, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, (opcode == NE6X_TALK_GET) ? pbuf : NULL, + 2 + ((opcode == NE6X_TALK_GET) ? 0 : (size >> 2)), + (opcode == NE6X_TALK_GET) ? (size >> 2) : 0); + kfree(msg); + + return status; +} + +int ne6x_reg_reset_firmware(struct ne6x_pf *pf) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_RESET_FIRMWARE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 1, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_READ_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = offset; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_WRITE_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = (offset); + msg->data[1] = (size); + memcpy((void *)&msg->data[1], (void *)pbuf, (ssize_t)size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)speed, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = speed; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = class_type; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)ret, 3, size >> 2); + kfree(msg); + + return status; +} + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = 4; + msg->data[1] = port; + msg->data[2] = mode; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_read(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size > 1024) + size = 1024; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_write(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +#define NE6X_FW_MAX_FRG_SIZE (4 * 1024) +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size) +{ + struct axia_mbus_msg *msg; + int offset = 0, left_size = 0, frag_size = 0; + int status = 0; + + msg = kzalloc(NE6X_FW_MAX_FRG_SIZE + 16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + ne6x_reg_lock(pf); + /* scile begin */ + NE6X_ACCESS_TIMEOUT = 100000; + left_size = size; + while (left_size) { + frag_size = (left_size >= NE6X_FW_MAX_FRG_SIZE) ? NE6X_FW_MAX_FRG_SIZE : left_size; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_UPGRADE_COMMAND; + msg->hdr.bits.data_len = 12 + frag_size; + msg->data[0] = region; /* region */ + msg->data[1] = frag_size; /* size */ + memcpy(&msg->data[2], data + offset, frag_size); + + status |= _ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (frag_size >> 2), 0); + if (status) + goto err_upgrade; + + left_size -= frag_size; + offset += frag_size; + } + +err_upgrade: + /* scile end */ + NE6X_ACCESS_TIMEOUT = 999; + ne6x_reg_unlock(pf); + kfree(msg); + + return status; +} + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version) +{ + struct axia_mbus_msg *msg; + u32 *out_buffer = (u32 *)version; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_VER_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, out_buffer, 1, + sizeof(struct ne6x_firmware_ver_info) / sizeof(u32)); + kfree(msg); + + return status; +} + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, u32 offset, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = port; + msg->data[1] = offset; + msg->data[2] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 4, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_START_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_STOP_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NIC_STATE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)state, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = type; + msg->data[1] = data; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 4; + msg->data[0] = type; + + status = ne6x_reg_perform(pf, (u32 *)msg, data, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + return ne6x_reg_set_user_data_template(pf, type, data); +} + +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + int status = 0; + + status = ne6x_reg_get_user_data_template(pf, type, data); + + return status; +} + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_LED_STATE_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = port; + msg->data[1] = state; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CONFIG_METER_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = meter_id; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, u32 *data, + u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAST_L2FDB_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = index; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, size, 1, 1); + kfree(msg); + + return status; +} + +void ne6x_reg_send(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 size) +{ + struct axia_mbus_msg *msg; + u32 *msg_data; + int index; + + msg = kzalloc(size + 12, GFP_KERNEL); + if (!msg) + return; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = cmd; + msg->hdr.bits.data_len = 4 + size; + memcpy((void *)&msg->data[0], (void *)data, size); + + msg_data = (u32 *)msg; + /* Write Command(s) */ + for (index = 0; index < ((size / 4) + 1); index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, msg_data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + usleep_range(1000, 1200); + kfree(msg); +} + +int ne6x_reg_polling(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 buf_size, + u32 *real_size) +{ + int timeout = 0, offset = 0; + struct axia_mbus_msg resp; + int index, status; + + memset(&resp, 0, sizeof(resp)); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + if (resp.hdr.bits.opcode == cmd) + break; + + timeout++; + usleep_range(200, 220); + } + + status = (timeout >= NE6X_ACCESS_TIMEOUT) ? -ETIMEDOUT : 0; + status = (resp.hdr.bits.e == 1) ? -EAGAIN : status; + if (status) { + dev_info(ne6x_pf_to_dev(pf), "%s: cmd %d status (%d)\n", __func__, cmd, status); + return status; + } + + switch (cmd) { + case AXIA_MBUS_GET_DUMP_DATA_ACK: + *real_size = resp.hdr.bits.data_len - sizeof(resp) - sizeof(u32); + offset = sizeof(u32); + pf->dump_info = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE); + break; + default: + *real_size = resp.hdr.bits.data_len - sizeof(resp); + offset = 0; + break; + } + + if (*real_size > buf_size) + *real_size = buf_size; + + for (index = 0; index < (*real_size) / 4; index++) + data[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index + offset); + + return 0; +} + +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + u32 *temp_buff = data; + u32 left_size = size; + u32 real_size = 0; + + memset(&pf->dump_info, 0, sizeof(u32)); + + ne6x_reg_lock(pf); + while (left_size > 0) { + temp_buff += real_size / 4; + ne6x_reg_send(pf, AXIA_MBUS_GET_DUMP_DATA_COMMAND, (u32 *)&pf->dump_info, 4); + if (ne6x_reg_polling(pf, AXIA_MBUS_GET_DUMP_DATA_ACK, + temp_buff, left_size, &real_size)) { + ne6x_reg_unlock(pf); + return -EAGAIN; + } + + left_size -= real_size; + } + ne6x_reg_unlock(pf); + + return 0; +} + +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id) +{ + struct axia_mbus_msg *msg; + int status; + + if (!ne6x_reg_valid_table(pf, table_id)) + return -EINVAL; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + NE6X_ACCESS_TIMEOUT = 99999; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CLR_TABLE_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = table_id; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + NE6X_ACCESS_TIMEOUT = 9999; + + return status; +} + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = write_protect; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, p_write_protect, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16 + length; + msg->data[0] = NE6X_NORFLASH_OP_WRITE_E; + msg->data[1] = offset; + msg->data[2] = length; + memcpy((void *)&msg->data[3], (void *)pdata, length); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (length >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_ERASE_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_READ_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, p, 4, length >> 2); + kfree(msg); + + return status; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8a7c5767a1223b363f84beaa8cb16d2a8ae4a9 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_REG_H +#define _NE6X_REG_H + +#include + +struct ne6x_diag_reg_test_info { + u32 offset; /* the base register */ + u64 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +enum ne6x_reg_table { + NE6X_REG_RSS_TABLE = 0x0, + NE6X_REG_L2FDB_TABLE, + NE6X_REG_VLAN_TABLE, + NE6X_REG_MAC_LEARN_TABLE, + NE6X_REG_VF_STAT_TABLE, + NE6X_REG_VF_BW_TABLE, + NE6X_REG_ACL_TABLE, + NE6X_REG_ARFS_TABLE, + NE6X_REG_TABLE_LAST, +}; + +enum ne6x_reg_talk_port { + NE6X_MSG_PORT_ENABLE = 0, + NE6X_MSG_PORT_DUPLEX, + NE6X_MSG_PORT_SPEED, + NE6X_MSG_PORT_STATS, + NE6X_MSG_PORT_SFP_SPEED, + NE6X_MSG_PORT_FEC, + NE6X_MSG_PORT_SPEED_MAX, + NE6X_MSG_PORT_PAUSE, + NE6X_MSG_PORT_PAUSE_ADDR, + NE6X_MSG_PORT_LOOPBACK, + NE6X_MSG_PORT_MAX_FRAME, + NE6X_MSG_PORT_AUTO_NEG, + NE6X_MSG_PORT_INFO, + NE6X_MSG_PORT_LINK_STATUS, + NE6X_MSG_PORT_DRV_I2C, + NE6X_MSG_PORT_SELF_TEST, + NE6X_MSG_PORT_SFP_TYPE_LEN, + NE6X_MSG_PORT_SFP_EEPROM, + NE6X_MSG_PORT_STATE, +}; + +enum ne6x_reg_talk_opcode { + NE6X_TALK_SET = 0, + NE6X_TALK_GET +}; + +extern struct ne6x_diag_reg_test_info ne6x_reg_list[]; + +struct table_info { + u32 addr; /* 00 - 27: max_size + * 28 - 31: engine_idx + */ + u32 size; + /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u16 opcode_read; + u16 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u16 opcode_insert; + u16 opcode_delete; + u16 opcode_update; + u16 opcode_search; + u16 size_insert; + u16 size_delete; + u16 size_search; + u16 size_update; +}; + +struct rss_table { + u32 resv; + u32 flag; + u32 hash_fun; /* 24-31, func, 23-1,type */ + u32 queue_base; + u16 queue_def; + u16 queue_size; + u16 entry_num; + u16 entry_size; + u8 entry_data[128]; + u8 hash_key[352]; + u8 resv1[8]; +}; + +struct l2fdb_dest_unicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 rsv[3]; + u32 vp_bmp[3]; + u32 cnt; /* leaf num */ + u8 resv3[44]; +}; + +struct l2fdb_dest_multicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 resv3[3]; + u32 vp_bmp[3]; + u8 resv4[48]; +}; + +struct l2fdb_search_result { + u32 key_index; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; +}; + +struct l2fdb_table { + u8 resv1; + u8 pport; + u8 mac[6]; + u32 vlanid; + u8 resv2[52]; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; /* forward info */ +}; + +struct l2fdb_fast_table { + u8 mac[6]; + u8 start_cos; + u8 cos_num; +}; + +struct meter_table { + u32 cir; + u32 cbs; + u32 pir; + u32 pbs; +}; + +enum np_user_data { + NP_USER_DATA_HW_FEATURES = 0, + NP_USER_DATA_HW_FLAGS = 1, + NP_USER_DATA_RSS_TABLE_SIZE = 2, + NP_USER_DATA_RSS_TABLE_ENTRY_WIDTH = 3, + NP_USER_DATA_RSS_HASH_KEY_BLOCK_SIZE = 4, + NP_USER_DATA_PORT2PI_0 = 5, + NP_USER_DATA_PI2PORT_0 = 25, + NP_USER_DATA_VLAN_TYPE = 33, + NP_USER_DATA_RSV_0 = 34, + NP_USER_DATA_RSV_1 = 35, + NP_USER_DATA_RSV_2 = 36, + NP_USER_DATA_PI0_BROADCAST_LEAF = 37, + NP_USER_DATA_PORT_OLFLAGS_0 = 53, + NP_USER_DATA_PORT_2_COS_0 = 121, + NP_USER_DATA_VPORT0_LINK_STATUS = 155, + NP_USER_DATA_TSO_CKSUM_DISABLE = 156, + NP_USER_DATA_PORT0_MTU = 157, + NP_USER_DATA_PORT0_QINQ = 161, + NP_USER_DATA_CQ_SIZE = 229, + NP_USER_DATA_FAST_MODE = 230, + NP_USER_DATA_SUB_FLAG = 231, + NP_USER_DATA_DDOS_FLAG = 242, + NP_USER_DATA_END = 255, +}; + +struct ne6x_diag_reg_info { + u32 address; + u32 value; +}; + +enum { + NE6X_NORFLASH_OP_WRITE_E = 0, + NE6X_NORFLASH_OP_READ_E = 1, + NE6X_NORFLASH_OP_ERASE_E = 2, + NE6X_NORFLASH_OP_E_END, +}; + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, + u32 offset_addr, u64 reg_value); +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr); + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); +int ne6x_reg_reset_firmware(struct ne6x_pf *pf); +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value); +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value); +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id); +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size); +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size); + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed); +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed); + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size); +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, int port, + void *pbuf, int size); +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size); + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version); + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, + u32 offset, int size); + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag); + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state); + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data); +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data); + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state); +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size); + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode); + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, + u32 *data, u32 size); +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id); + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata); +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length); +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..bb70698eefecd465202cfe9ac37944e2b2ab87dc --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_txrx.h" +#include "ne6x_reg.h" + +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) { + err = ne6x_setup_tx_descriptors(adpt->tx_rings[i]); + err = ne6x_setup_tg_descriptors(adpt->tg_rings[i]); + err = ne6x_setup_cq_descriptors(adpt->cq_rings[i]); + err = ne6x_setup_tx_sgl(adpt->tx_rings[i]); + } + + return err; +} + +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_setup_rx_descriptors(adpt->rx_rings[i]); + + return err; +} + +static inline void ne6x_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + u64 val = 1ULL << NE6X_VP_CQ_INTSHIFT; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) { + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT), val); + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), ~(val)); + } else { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT), val); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), ~(val)); + } + } +} + +int ne6x_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + ring = q_vector->cq.ring; + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings && adpt->tx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + kfree_rcu(adpt->tx_rings[i], rcu); + adpt->tx_rings[i] = NULL; + adpt->rx_rings[i] = NULL; + adpt->cq_rings[i] = NULL; + } + } +} + +int ne6x_alloc_rings(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *ring; + int i, qpv = 4; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < adpt->num_queue; i++) { + /* allocate space for both Tx and Rx in one shot */ + ring = kcalloc(qpv, sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tx_desc; + ring->size = 0; + adpt->tx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_cq_desc; + ring->size = 0; + adpt->cq_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_rx_desc; + ring->size = 0; + adpt->rx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tg_desc; + ring->size = 0; + adpt->tg_rings[i] = ring; + } + + return 0; + +err_out: + ne6x_adpt_clear_rings(adpt); + return -ENOMEM; +} + +static int ne6x_configure_tx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_sq_base_addr sq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_sq_cfg sq_cfg; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (pf_q < NE6X_PF_VP0_NUM) { + sq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG), sq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV mode PF Config */ + sq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR), + sq_base_addr.val); + + sq_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_CFG), sq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + for (i = 0; (i < adpt->num_queue) && !err; i++) + err = ne6x_configure_tx_ring(adpt->tx_rings[i]); + + return err; +} + +static int ne6x_configure_cq_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_cq_base_addr cq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_cq_cfg cq_cfg; + + /* SRIOV enabled VF config OR SRIOV disabled PF config */ + if (pf_q < NE6X_PF_VP0_NUM) { + cq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG), cq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr0 + + (NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } else { + /* SRIOV enable PF config */ + cq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_BASE_ADDR), + cq_base_addr.val); + + cq_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG), + cq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr4 + + (NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } + + return 0; +} + +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_cq_ring(adpt->cq_rings[i]); + + return 0; +} + +static int ne6x_configure_rx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_rq_base_addr rq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_rq_cfg rc_cfg; + u16 rxmax = 0; + + ring->rx_buf_len = adpt->rx_buf_len; + + if (pf_q < NE6X_PF_VP0_NUM) { + rq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG), rc_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV enabled PF Config */ + rq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_BASE_ADDR), + rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG), + rq_block_cfg.val); + + rc_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_CFG), rc_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + adpt->max_frame = NE6X_MAX_RXBUFFER; + adpt->rx_buf_len = (PAGE_SIZE < 8192) ? NE6X_RXBUFFER_4096 : NE6X_RXBUFFER_4096; + + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_rx_ring(adpt->rx_rings[i]); + + return err; +} + +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_ring *tx_ring = adpt->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = adpt->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen = 4; + int nsg; + bool jumbo_frame = true; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + /* single packet add 4 byte to CRC */ + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(adpt->netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(adpt->netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..b09563cfc4e35fef82d97b42367a8feaf01e2aef --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_TXRX_H +#define _NE6X_TXRX_H + +int ne6x_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c new file mode 100644 index 0000000000000000000000000000000000000000..aca0ab3d3ee12aacf88d75efd09f5cc5ff542c14 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c @@ -0,0 +1,2388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_interrupt.h" + +void ne6x_clear_vf_status(struct ne6x_vf *vf) +{ + struct ne6x_flowctrl flowctrl; + + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + ne6x_dev_set_flowctrl(vf->adpt, &flowctrl); + ne6x_dev_set_vf_bw(vf->adpt, 0); +} + +void ne6x_mbx_deinit_snapshot(struct ne6x_hw *hw) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset vf counter length */ + kfree(snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; +} + +int ne6x_mbx_init_snapshot(struct ne6x_hw *hw, u16 vf_count) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > NE6X_MAX_VP_NUM) + return 1; + + snap->mbx_vf.vf_cntr = kcalloc(vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return 1; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + snap->state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + memset(hw->ne6x_mbx_ready_to_send, true, 64); + + return 0; +} + +int ne6x_status_to_errno(int err) +{ + if (err) + return -EINVAL; + + return 0; +} + +void ne6x_set_vf_state_qs_dis(struct ne6x_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + clear_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states); +} + +static void ne6x_dis_vf_qs(struct ne6x_vf *vf) +{ + ne6x_set_vf_state_qs_dis(vf); +} + +bool ne6x_is_reset_in_progress(unsigned long *state) +{ + return test_bit(NE6X_PF_RESET_REQUESTED, state) || + test_bit(NE6X_RESET_INTR_RECEIVED, state) || + test_bit(NE6X_CORE_RESET_REQUESTED, state) || + test_bit(NE6X_GLOBAL_RESET_REQUESTED, state); +} + +void ne6x_adpt_close_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); +} + +static int ne6x_adpt_clear_vf(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + + kfree(adpt->port_info); + adpt->port_info = NULL; + + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + struct ne6x_pf *pf; + + if (!adpt->back) + return -ENODEV; + + pf = adpt->back; + + if (adpt->netdev && !ne6x_is_reset_in_progress(pf->state) && + (test_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state))) { + unregister_netdev(adpt->netdev); + clear_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state); + } + + ne6x_adpt_close_vf(adpt, vf_id); + + if (!ne6x_is_reset_in_progress(pf->state)) + ne6x_adpt_clear_vf(adpt); + + return 0; +} + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf) +{ + return vf->pf->adpt[vf->lan_adpt_idx]; +} + +static void ne6x_vf_invalidate_adpt(struct ne6x_vf *vf) +{ + vf->lan_adpt_idx = NE6X_NO_ADPT; +} + +static void ne6x_vf_adpt_release(struct ne6x_vf *vf) +{ + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + ne6x_adpt_release_vf(ne6x_get_vf_adpt(vf), vf->vf_id); + ne6x_vf_invalidate_adpt(vf); +} + +static void ne6x_free_vf_res(struct ne6x_vf *vf) +{ + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's adapter after it's freed or invalidated. + */ + clear_bit(NE6X_VF_STATE_INIT, vf->vf_states); + + /* free adapter and disconnect it from the parent uplink */ + if (vf->lan_adpt_idx != NE6X_NO_ADPT) { + if (vf->tx_rate) { + ne6x_dev_set_vf_bw(ne6x_get_vf_adpt(vf), 0); + vf->tx_rate = 0; + } + + ne6x_vf_adpt_release(vf); + } +} + +static int ne6x_sriov_free_msix_res(struct ne6x_pf *pf) +{ + struct ne6x_lump_tracking *res; + + if (!pf) + return -EINVAL; + + res = pf->irq_pile; + if (!res) + return -EINVAL; + + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_vfs(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + unsigned int tmp, i; + u64 reg; + + if (!pf->vf) + return; + + while (test_and_set_bit(NE6X_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + + /* Avoid wait time by stopping all VFs at the same time */ + ne6x_for_each_vf(pf, i) { + if (test_bit(NE6X_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ne6x_dis_vf_qs(&pf->vf[i]); + } + + tmp = pf->num_alloc_vfs; + pf->num_qps_per_vf = 0; + pf->num_alloc_vfs = 0; + + for (i = 0; i < tmp; i++) { + if (test_bit(NE6X_VF_STATE_INIT, pf->vf[i].vf_states)) { + set_bit(NE6X_VF_STATE_DIS, pf->vf[i].vf_states); + ne6x_free_vf_res(&pf->vf[i]); + } + } + + if (ne6x_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + ne6x_dev_clear_vport(pf); + kfree(pf->vf); + pf->vf = NULL; + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x8000; + wr64_bar4(&pf->hw, 0x05300, reg); + + clear_bit(NE6X_VF_DIS, pf->state); +} + +static int ne6x_alloc_vfs(struct ne6x_pf *pf, int num_vfs) +{ + struct ne6x_vf *vfs; + + vfs = kcalloc(num_vfs, sizeof(*vfs), GFP_KERNEL); + if (!vfs) + return -ENOMEM; + + pf->vf = vfs; + pf->num_alloc_vfs = num_vfs; + + return 0; +} + +static int ne6x_sriov_set_msix_res(struct ne6x_pf *pf, u16 num_msix_needed) +{ + int sriov_base_vector; + + sriov_base_vector = NE6X_MAX_MSIX_NUM - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector < 0) + return -EINVAL; + + return 0; +} + +static int ne6x_set_per_vf_res(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + u16 queue; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + queue = NE6X_MAX_VP_NUM / pf->num_alloc_vfs; + + if (ne6x_sriov_set_msix_res(pf, queue * pf->num_alloc_vfs)) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", pf->num_alloc_vfs); + return -EINVAL; + } + + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->num_qps_per_vf = queue; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", pf->num_alloc_vfs, + pf->num_qps_per_vf, pf->num_qps_per_vf); + + return 0; +} + +static void ne6x_vc_clear_allowlist(struct ne6x_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX); +} + +/* default opcodes to communicate with VF */ +static const u32 default_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_VERSION, + VIRTCHNL_OP_RESET_VF, +}; + +static void ne6x_vc_allowlist_opcodes(struct ne6x_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +void ne6x_vc_set_default_allowlist(struct ne6x_vf *vf) +{ + ne6x_vc_clear_allowlist(vf); + ne6x_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + ARRAY_SIZE(default_allowlist_opcodes)); +} + +static void ne6x_set_dflt_settings_vfs(struct ne6x_pf *pf) +{ + int i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + vf->pf = pf; + vf->vf_id = i; + vf->base_queue = (NE6X_MAX_VP_NUM / pf->num_alloc_vfs) * i; + vf->num_vf_qs = pf->num_qps_per_vf; + vf->tx_rate = 0; + test_and_clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vc_set_default_allowlist(vf); + } +} + +void ne6x_send_init_mbx_mesg(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + u64 reg_cfg; + int i; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), 0x0); + reg_cfg = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK)); + reg_cfg &= ~(1ULL << vf->base_queue); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), reg_cfg); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), reg_cfg); + } +} + +static struct ne6x_port_info *ne6x_vf_get_port_info(struct ne6x_vf *vf) +{ + struct ne6x_adapter *adpt = ne6x_get_vf_adpt(vf); + + return adpt->port_info; +} + +static struct ne6x_adapter *ne6x_adpt_alloc(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt = NULL; + int pf_adpt_idx; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* If we have already allocated our maximum number of adapters, + * pf->next_adpt will be NE6X_NO_ADPT. If not, pf->next_adpt index + * is available to be populated + */ + if (pf->next_adpt == NE6X_NO_ADPT) { + dev_dbg(dev, "out of adapter slots!\n"); + goto unlock_pf; + } + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + adpt->type = NE6X_ADPT_VF; + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + adpt->num_queue = pf->vf[vf_id].num_vf_qs; + adpt->num_q_vectors = pf->vf[vf_id].num_vf_qs; + /* vf_id 0 -- 63: vport: 0 -- 64: pf: 64 -- 68 */ + adpt->idx = pf->vf[vf_id].vf_id + pf->num_alloc_adpt; + adpt->vport = pf->vf[vf_id].vf_id; + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) + goto err_rings; + + /* vf attach pf alloc */ + pf_adpt_idx = pf->vf[vf_id].base_queue / (NE6X_MAX_VP_NUM / pf->hw.pf_port); + adpt->port_info->lport = pf->adpt[pf_adpt_idx]->port_info->lport; + adpt->port_info->hw_port_id = pf->adpt[pf_adpt_idx]->port_info->hw_port_id; + adpt->port_info->hw = &pf->hw; + adpt->port_info->hw_trunk_id = pf->adpt[pf_adpt_idx]->port_info->hw_trunk_id; + adpt->port_info->hw_queue_base = pf->vf[vf_id].base_queue; + adpt->port_info->hw_max_queue = pf->vf[vf_id].num_vf_qs; + adpt->base_queue = pf->vf[vf_id].base_queue; + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + pf->adpt[adpt->idx] = adpt; + + goto unlock_pf; + +err_rings: + kfree(adpt); + adpt = NULL; +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return adpt; +} + +struct ne6x_adapter *ne6x_adpt_setup_vf(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_alloc(pf, vf_id, num_vfs); + if (!adpt) { + dev_err(dev, "could not allocate adapter\n"); + return NULL; + } + + return adpt; +} + +static struct ne6x_adapter *ne6x_vf_adpt_setup(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_setup_vf(pf, vf->vf_id, num_vfs); + if (!adpt) { + dev_err(ne6x_pf_to_dev(pf), "Failed to create VF adapter\n"); + ne6x_vf_invalidate_adpt(vf); + return NULL; + } + + vf->lan_adpt_idx = adpt->idx; + vf->adpt = adpt; + + return adpt; +} + +static int ne6x_init_vf_adpt_res(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct ne6x_adapter *adpt; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt = ne6x_vf_adpt_setup(vf, num_vfs); + if (!adpt) + return -ENOMEM; + + vf->tx_rate = 0; + ne6x_dev_set_vf_bw(adpt, vf->tx_rate); + eth_broadcast_addr(broadcast); + + return 0; +} + +static int ne6x_start_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + int retval, i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + retval = ne6x_init_vf_adpt_res(vf, num_vfs); + if (retval) { + dev_err(ne6x_pf_to_dev(pf), "Failed to initialize adapter resources for VF %d, error %d\n", + vf->vf_id, retval); + goto teardown; + } + + set_bit(NE6X_VF_STATE_INIT, vf->vf_states); + } + + ne6x_linkscan_schedule(pf); + + return 0; + +teardown: + for (i = i - 1; i >= 0; i--) { + struct ne6x_vf *vf = &pf->vf[i]; + + ne6x_vf_adpt_release(vf); + } + + return retval; +} + +static int ne6x_delete_pf_trunk(struct ne6x_pf *pf) +{ + return 0; +} + +static int ne6x_recycle_vp_resources(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt; + int rst, i; + u64 reg; + + rst = ne6x_delete_pf_trunk(pf); + if (rst) + return rst; + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_close(adpt); + } + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x7c000; + wr64_bar4(&pf->hw, 0x05300, reg); + + return 0; +} + +static int ne6x_adpt_resetup(struct ne6x_pf *pf, bool recovery) +{ + int vid, pooling, i, actual_vector = 1, size; + struct device *dev = ne6x_pf_to_dev(pf); + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + int qp_remaining, q_vectors; + struct ne6x_adapter *adpt = NULL; + u64 __iomem *reg; + + pooling = test_bit(NE6X_LINK_POOLING, pf->state); + if (pooling) + clear_bit(NE6X_LINK_POOLING, pf->state); + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + pci_disable_msix(pf->pdev); + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, + NE6X_MAX_MSIX_NUM); + if (actual_vector < NE6X_MAX_MSIX_NUM) { + clear_bit(NE6X_PF_MSIX, pf->state); + pci_disable_msix(pf->pdev); + dev_err(dev, "%s-%d: error msix enable failed\n", __func__, __LINE__); + } + + pf->irq_pile->num_entries = actual_vector; + } else { + if (!pf->irq_pile) { + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + + pf->irq_pile->num_entries = actual_vector; + } + + test_and_set_bit(NE6X_PF_INTX, pf->state); + } + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + ne6x_for_each_pf(pf, vid) { + adpt = pf->adpt[vid]; + if (recovery) { + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = pf->hw.max_queue; + adpt->port_info->queue = adpt->port_info->hw_max_queue; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } else { + adpt->port_info->hw_queue_base_old = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = NE6X_PF_VP1_NUM + vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = 1u; + adpt->port_info->queue = 1u; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } + + for (i = 0; i < adpt->num_queue; i++) { + adpt->rx_rings[i]->reg_idx = adpt->base_queue + i; + adpt->cq_rings[i]->reg_idx = adpt->rx_rings[i]->reg_idx; + adpt->tx_rings[i]->reg_idx = adpt->cq_rings[i]->reg_idx; + } + + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_vport(adpt); + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + ne6x_dev_set_port2pi(adpt); + rtnl_lock(); + + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_open(adpt); + + rtnl_unlock(); + } + + ne6x_init_link_irq(pf); + ne6x_enable_link_irq(pf); + + if (pooling) { + set_bit(NE6X_LINK_POOLING, pf->state); + ne6x_linkscan_schedule(pf); + } + + return 0; +} + +static int ne6x_ena_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + int ret; + + ret = ne6x_recycle_vp_resources(pf); + if (ret) + goto err_pci_disable_sriov; + + ret = ne6x_adpt_resetup(pf, false); + if (ret) + goto err_pci_disable_sriov; + + ne6x_clr_vf_bw_for_max_vpnum(pf); + ret = ne6x_alloc_vfs(pf, num_vfs); + if (ret) + goto err_pci_disable_sriov; + + if (ne6x_set_per_vf_res(pf)) { + dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", + num_vfs); + ret = -ENOSPC; + goto err_unroll_sriov; + } + + ne6x_set_dflt_settings_vfs(pf); + if (ne6x_start_vfs(pf, num_vfs)) { + dev_err(dev, "Failed to start VF(s)\n"); + ret = -EAGAIN; + goto err_unroll_sriov; + } + + ne6x_init_mailbox_irq(pf); + ne6x_send_init_mbx_mesg(pf); + clear_bit(NE6X_VF_DIS, pf->state); + + return 0; + +err_unroll_sriov: + kfree(pf->vf); + pf->vf = NULL; + pf->num_alloc_vfs = 0; +err_pci_disable_sriov: + pci_disable_sriov(pf->pdev); + + return ret; +} + +static int ne6x_pci_sriov_ena(struct ne6x_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = ne6x_pf_to_dev(pf); + int err; + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ne6x_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return 0; + + if (num_vfs > NE6X_MAX_VP_NUM) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", num_vfs, + NE6X_MAX_VP_NUM); + return -EOPNOTSUPP; + } + + err = ne6x_ena_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + if (num_vfs) + test_and_set_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + return 0; +} + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_adapter *adpt = NULL; + struct ne6x_vf *vf = NULL; + pbmp_t port_bitmap; + int err = 0, vf_id; + int timeout = 50; + int status; + + if (!(num_vfs == 0 || num_vfs == 2 || num_vfs == 4 || num_vfs == 8 || + num_vfs == 16 || num_vfs == 32 || num_vfs == 64)) + return -EINVAL; + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + dev_err(ne6x_pf_to_dev(pf), "ne6x irq number < %d!\n", NE6X_MAX_MSIX_NUM); + return -EPERM; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + dev_warn(ne6x_pf_to_dev(pf), "ne6x config busy, timeout!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + if (!num_vfs) { + set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + if (!pci_vfs_assigned(pdev)) { + ne6x_free_vfs(pf); + ne6x_disable_mailbox_irq(pf); + ne6x_free_mailbox_irq(pf); + ne6x_mbx_deinit_snapshot(&pf->hw); + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + if (!test_bit(NE6X_REMOVE, pf->state)) { + ne6x_recycle_vp_resources(pf); + err = ne6x_adpt_resetup(pf, true); + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + if (err) + goto err_recovery; + + return 0; + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return -EBUSY; + } + + status = ne6x_mbx_init_snapshot(&pf->hw, num_vfs); + if (status) + return ne6x_status_to_errno(status); + + err = ne6x_pci_sriov_ena(pf, num_vfs); + if (err) { + ne6x_mbx_deinit_snapshot(&pf->hw); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; + } + + PBMP_CLEAR(port_bitmap); + + /* config vport, default vlan */ + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + /* config default vlan */ + PBMP_PORT_ADD(port_bitmap, adpt->vport); + ne6x_dev_set_vport(adpt); + adpt->hw_feature = ne6x_dev_get_features(adpt); + } + + err = pci_enable_sriov(pf->pdev, num_vfs); + if (err) + goto err_hanler; + + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return num_vfs; + +err_hanler: + ne6x_dev_clear_vport(pf); + /* config vport, default vlan */ + ne6x_for_each_pf(pf, vf_id) { + adpt = pf->adpt[vf_id]; + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + ne6x_dev_set_vport(adpt); + } + + if (!pci_vfs_assigned(pdev)) { + ne6x_mbx_deinit_snapshot(&pf->hw); + ne6x_free_vfs(pf); + pf->num_alloc_vfs = 0; + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + } + +err_recovery: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; +} + +static int ne6x_validate_vf_id(struct ne6x_pf *pf, u16 vf_id) +{ + /* vf_id range is only valid for 0-255, and should always be unsigned */ + if (vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + return 0; +} + +static int ne6x_validate_outer_vf_id(struct ne6x_pf *pf, u16 out_vf_id) +{ + if (out_vf_id >= (pf->num_alloc_vfs / pf->num_alloc_adpt)) + return -EINVAL; + + return 0; +} + +int ne6x_sdk_send_msg_to_vf(struct ne6x_hw *hw, u16 vfid, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_pf *pf = hw->back; + struct ne6x_vf *vf = &pf->vf[vfid]; + int timeout = 2000; + int i; + + usnap.snap.state = v_retval; + usnap.snap.len = msglen; + usnap.snap.type = v_opcode; + + for (i = 0; i < msglen && i < 6; i++) + usnap.snap.data[i] = msg[i]; + + while (!(pf->hw.ne6x_mbx_ready_to_send[vfid])) { + usleep_range(100, 200); + timeout--; + if (!timeout) + break; + } + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), usnap.val); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_INT_REQ), (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_TRAVERSE; + pf->hw.ne6x_mbx_ready_to_send[vfid] = false; + + return 0; +} + +static int ne6x_vc_send_msg_to_vf(struct ne6x_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, + u8 *msg, u16 msglen) +{ + struct device *dev; + struct ne6x_pf *pf; + int aq_ret; + + if (!vf) + return -EINVAL; + + pf = vf->pf; + dev = ne6x_pf_to_dev(pf); + + if (ne6x_validate_vf_id(pf, vf->vf_id)) { + dev_err(dev, "vf id[%d] is invalid\n", vf->vf_id); + return -EINVAL; + } + + /* single place to detect unsuccessful return values */ + if (v_retval) + dev_info(dev, "VF %d failed opcode %s, retval: %s\n", vf->vf_id, + ne6x_opcode_str(v_opcode), ne6x_mbox_status_str(v_retval)); + + aq_ret = ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen); + if (aq_ret) { + dev_info(dev, "Unable to send the message to VF %d aq_err %d\n", vf->vf_id, aq_ret); + return -EIO; + } + + return 0; +} + +static int ne6x_check_vf_init(struct ne6x_pf *pf, struct ne6x_vf *vf) +{ + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(ne6x_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", vf->vf_id); + return -EBUSY; + } + + return 0; +} + +static int ne6x_vc_add_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + struct device *dev = ne6x_pf_to_dev(vf->pf); + u8 *mac_addr = vc_ether_addr->addr; + + if (!is_unicast_ether_addr(mac_addr)) { + dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return -EPERM; + } + + if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) { + dev_err(dev, "vf already use the same addr\n"); + return -EPERM; + } + + ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); + ne6x_adpt_add_mac(adpt, mac_addr, true); + + return 0; +} + +static int ne6x_vc_del_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 *mac) +{ + return ne6x_adpt_del_mac(adpt, mac, true); +} + +static int ne6x_vc_get_vf_res_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *vfres = NULL; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + int len, ret; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + + len = sizeof(union u_ne6x_mbx_snap_buffer_data); + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->snap.type = VIRTCHNL_OP_GET_VF_RESOURCES; + vfres->snap.data[0] = vf->vf_id; /* vport */ + vfres->snap.data[1] = pf_adpt->port_info->lport; /* lport */ + vfres->snap.data[2] = pf_adpt->port_info->hw_port_id; /* pport */ + vfres->snap.data[3] = pf_adpt->port_info->hw_queue_base; /* base_queue */ + vfres->snap.data[4] = pf->num_qps_per_vf; /* num_qps_per_vf */ + vfres->snap.data[5] = pf->num_alloc_vfs / pf->num_alloc_adpt; /* num vfs of per hw_port */ + vfres->snap.len = 6; + vf->ready = 0; + vf->adpt->port_info->phy.link_info.link_info = 0; + vf->ready_to_link_notify = 0; + set_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + vfres->snap.state = v_ret; + ret = ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, + vfres->snap.state, + (u8 *)vfres->snap.data, + vfres->snap.len); + + return ret; +} + +static int ne6x_vc_add_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_add_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_add_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_del_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_del_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_del_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_handle_mac_addr_msg(struct ne6x_vf *vf, u8 *msg, bool set) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *virtchnl_ether_addr); + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *usnap; + struct virtchnl_ether_addr eth_addr; + enum virtchnl_ops vc_op; + struct ne6x_adapter *adpt; + u8 *mac_addr; + int result; + + if (set) { + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_add_mac_addr; + } else { + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_del_mac_addr; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mac_addr = usnap->snap.data; + + if (is_broadcast_ether_addr(mac_addr) || is_zero_ether_addr(mac_addr)) + goto handle_mac_exit; + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + goto handle_mac_exit; + + ether_addr_copy(eth_addr.addr, mac_addr); + result = ne6x_vc_cfg_mac(vf, adpt, ð_addr); + if (result == -EEXIST || result == -ENOENT) { + goto handle_mac_exit; + } else if (result) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto handle_mac_exit; + } + +handle_mac_exit: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); +} + +static int ne6x_vc_add_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, true); +} + +static int ne6x_vc_del_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, false); +} + +static int ne6x_vf_set_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d enable promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, true); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, true); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode off VF-%u mac: %d, trunk: 0x%x, failed, error: %d\n", + vf->vf_id, 0, adpt->port_info->hw_trunk_id, status); + return status; + } + + return 0; +} + +static int ne6x_vf_clear_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d clear promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, false); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, false); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +static int ne6x_vc_cfg_promiscuous_mode_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)usnap->snap.data; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + bool alluni = false, allmulti = false; + int ucast_err = 0, mcast_err = 0; + struct ne6x_pf *pf = vf->pf; + u8 mcast_m, ucast_m; + struct ne6x_adapter *adpt; + struct device *dev; + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + dev = ne6x_pf_to_dev(pf); + + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + + if (info->flags & FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + + mcast_m = NE6X_MCAST_PROMISC_BITS; + ucast_m = NE6X_UCAST_PROMISC_BITS; + + if (alluni) + ucast_err = ne6x_vf_set_adpt_promisc(vf, adpt, ucast_m); + else + ucast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, ucast_m); + + if (allmulti) + mcast_err = ne6x_vf_set_adpt_promisc(vf, adpt, mcast_m); + else + mcast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, mcast_m); + + if (!mcast_err) { + if (allmulti && !test_and_set_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", + vf->vf_id); + else if (!allmulti && test_and_clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", + vf->vf_id); + } + + if (!ucast_err) { + if (alluni && !test_and_set_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", + vf->vf_id); + else if (!alluni && test_and_clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", + vf->vf_id); + } + +error_param: + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, v_ret, NULL, 0); +} + +static bool ne6x_is_vf_link_up(struct ne6x_vf *vf) +{ + struct ne6x_port_info *pi = ne6x_vf_get_port_info(vf); + struct ne6x_pf *pf = vf->pf; + + if (ne6x_check_vf_init(pf, vf)) + return false; + + if (vf->link_forced) + return vf->link_up; + else + return pi->phy.link_info.link_info & NE6X_AQ_LINK_UP; +} + +u32 ne6x_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + switch (link_speed) { + case NE6X_LINK_SPEED_10GB: + speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + speed = NE6X_LINK_SPEED_100GB; + break; + default: + speed = NE6X_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} + +static void ne6x_set_pfe_link(struct ne6x_vf *vf, struct virtchnl_pf_event *pfe, + int ne6x_link_speed, bool link_up) +{ + pfe->link_status = link_up; + /* Speed in Mbps */ + if (link_up && vf->link_forced) + ne6x_link_speed = NE6X_LINK_SPEED_25GB; + + pfe->link_speed = ne6x_conv_link_speed_to_virtchnl(true, ne6x_link_speed); +} + +void ne6x_vc_notify_vf_link_state(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_port_info *pi; + u8 data[6] = {0}; + + pi = ne6x_vf_get_port_info(vf); + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + + if (ne6x_is_vf_link_up(vf)) + ne6x_set_pfe_link(vf, &pfe, pi->phy.link_info.link_speed, true); + else + ne6x_set_pfe_link(vf, &pfe, NE6X_LINK_SPEED_UNKNOWN, false); + + data[0] = pfe.event; + data[1] = (pfe.link_speed >> 24) & 0xff; + data[2] = (pfe.link_speed >> 16) & 0xff; + data[3] = (pfe.link_speed >> 8) & 0xff; + data[4] = (pfe.link_speed >> 0) & 0xff; + data[5] = pfe.link_status; + + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 6); +} + +void ne6x_vc_notify_link_state(struct ne6x_vf *vf) +{ + if (vf->ready_to_link_notify) + ne6x_vc_notify_vf_link_state(vf); +} + +static void ne6x_vc_notify_vf_reset(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe; + struct ne6x_pf *pf; + u8 data[6] = {0}; + + if (!vf) + return; + + pf = vf->pf; + if (ne6x_validate_vf_id(pf, vf->vf_id)) + return; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(NE6X_VF_STATE_INIT, vf->vf_states) && + !test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(NE6X_VF_STATE_DIS, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + data[0] = pfe.event; + ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 1); +} + +static void ne6x_vc_notify_vf_trust_change(struct ne6x_vf *vf) +{ + struct virtchnl_vf_config vfconfig = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u8 data[6] = {0}; + + dev = ne6x_pf_to_dev(pf); + vfconfig.type = VIRTCHNL_VF_CONFIG_TRUST; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + vfconfig.data[0] = 1; + else + vfconfig.data[0] = 0; + + data[0] = vfconfig.type; + data[1] = vfconfig.data[0]; + dev_info(dev, "vfconfig_type = %d,data = %d\n", data[0], data[1]); + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_VF_CONFIG, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 2); +} + +bool ne6x_reset_vf(struct ne6x_vf *vf, bool is_vflr) +{ + struct ne6x_adapter *adpt; + + adpt = ne6x_get_vf_adpt(vf); + + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + ne6x_dis_vf_qs(vf); + + if (test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + clear_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + adpt->port_info->phy.link_info.link_info = 0x0; + if (is_vflr) + vf->rx_tx_state = false; + } + + if (test_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states); + + if (test_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states); + + return 0; +} + +static void ne6x_vc_reset_vf(struct ne6x_vf *vf, bool update_tx_rx) +{ + ne6x_vc_notify_vf_reset(vf); + ne6x_reset_vf(vf, update_tx_rx); +} + +static int ne6x_vc_request_qs_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + u16 req_queues = (usnap->snap.data[1] << 8) | usnap->snap.data[0]; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 max_avail_vf_qps, max_allowed_vf_qps; + u8 req_reset = usnap->snap.data[2]; + bool need_update_rx_tx = false; + struct ne6x_pf *pf = vf->pf; + u16 tx_rx_queue_left; + u16 num_queue_pairs; + struct device *dev; + u16 cur_queues; + + ne6x_clear_vf_status(vf); + dev = ne6x_pf_to_dev(pf); + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + max_allowed_vf_qps = pf->num_qps_per_vf; + cur_queues = vf->num_vf_qs; + tx_rx_queue_left = cur_queues; + max_avail_vf_qps = tx_rx_queue_left + cur_queues; + + if (!req_queues) { + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); + } else if (req_queues > max_allowed_vf_qps) { + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, + max_allowed_vf_qps); + num_queue_pairs = max_allowed_vf_qps; + } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, + req_queues - cur_queues, tx_rx_queue_left); + num_queue_pairs = min_t(u16, max_avail_vf_qps, max_allowed_vf_qps); + } else { + if (req_queues != vf->num_req_qs) { + vf->num_req_qs = req_queues; + need_update_rx_tx = true; + } + if (req_reset) { + ne6x_vc_reset_vf(vf, need_update_rx_tx); + } else { + vf->ready = false; + if (need_update_rx_tx) + vf->rx_tx_state = false; + + vf->adpt->port_info->phy.link_info.link_info = 0x0; + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + } + + return 0; + } + +error_param: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, v_ret, (u8 *)&num_queue_pairs, + 2); +} + +static int ne6x_vc_config_mtu_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u16 *mtu; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mtu = (u16 *)(rsvsnap->snap.data); + + dev = ne6x_pf_to_dev(pf); + dev_info(dev, "%s: mtu = %d\n", __func__, *mtu); + ne6x_dev_set_mtu(adpt, *mtu); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_MTU, v_ret, NULL, 0); +} + +struct virtchnl_vlan_info { + u16 vlan_id; + s16 flags; +}; + +static int ne6x_vc_config_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_vlan_info *dpdk_vlan; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + struct ne6x_vlan vlan; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + dpdk_vlan = (struct virtchnl_vlan_info *)rsvsnap->snap.data; + if (dpdk_vlan->flags) { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) { + dev_info(dev, "%s: add vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + dev_info(dev, "%s: add vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + } else { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) { + dev_info(dev, "%s: del vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + dev_info(dev, "%s: del vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN, v_ret, NULL, 0); +} + +#define ETH_VLAN_STRIP_MASK 0x0001 +#define ETH_VLAN_FILTER_MASK 0x0002 +#define ETH_QINQ_STRIP_MASK 0x0008 +#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 +#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 +#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 + +struct virtchnl_vlan_offload_info { + u16 mask; + u16 feature; +}; + +static int ne6x_vc_config_vlan_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_vlan_offload_info *offload; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt->hw_feature = ne6x_dev_get_features(adpt); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + offload = (struct virtchnl_vlan_offload_info *)rsvsnap->snap.data; + + if (offload->mask & ETH_VLAN_FILTER_MASK) { + dev_info(dev, "%s: ETH_VLAN_FILTER_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_FILTER) { + dev_info(dev, "%s: ETH_VLAN_FILTER ON\n", __func__); + adpt->hw_feature |= (NE6X_F_RX_VLAN_FILTER); + } else { + dev_info(dev, "%s: ETH_VLAN_FILTER OFF\n", __func__); + adpt->hw_feature &= ~(NE6X_F_RX_VLAN_FILTER); + } + } + + if (offload->mask & ETH_VLAN_STRIP_MASK) { + dev_info(dev, "%s: ETH_VLAN_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_STRIP) { + dev_info(dev, "%s: ETH_VLAN_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_VLAN_STRIP; + } else { + dev_info(dev, "%s: ETH_VLAN_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + } + } + + if (offload->mask & ETH_QINQ_STRIP_MASK) { + dev_info(dev, "%s: ETH_QINQ_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_QINQ_STRIP) { + dev_info(dev, "%s: ETH_QINQ_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_QINQ_STRIP; + } else { + dev_info(dev, "%s: ETH_QINQ_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + } + } + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD, v_ret, NULL, 0); +} + +struct virtchnl_flow_ctrl_info { + u16 mode; + u16 high_water; +}; + +enum rte_eth_fc_mode { + RTE_FC_NONE = 0, /**< Disable flow control. */ + RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */ + RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */ + RTE_FC_FULL /**< Enable flow control on both side. */ +}; + +static int ne6x_vc_config_flow_ctrl_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_flow_ctrl_info *flow; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_flowctrl flowctrl; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + flow = (struct virtchnl_flow_ctrl_info *)rsvsnap->snap.data; + if (flow->mode == RTE_FC_FULL) { + flowctrl.rx_pause = 1; + flowctrl.tx_pause = 1; + } else if (flow->mode == RTE_FC_RX_PAUSE) { + flowctrl.rx_pause = 1; + } else if (flow->mode == RTE_FC_TX_PAUSE) { + flowctrl.tx_pause = 1; + } else { + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + } + + dev_info(dev, "%s: mode = %d high water = %d\n", __func__, flow->mode, flow->high_water); + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) { + dev_info(dev, "%s: set flow ctrl failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } + + ret = ne6x_dev_set_vf_bw(adpt, flow->high_water); + if (ret) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_FLOW_CTRL, v_ret, NULL, 0); +} + +static int ne6x_vc_config_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + u8 *data = (u8 *)&adpt->rss_info; + int i; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + for (i = 0; i < rsvsnap->snap.len; i++) { + data[adpt->rss_size] = rsvsnap->snap.data[i]; + adpt->rss_size++; + } + + if (adpt->rss_size >= sizeof(struct ne6x_rss_info)) { + adpt->rss_size = 0; + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS, v_ret, NULL, 0); +} + +static int ne6x_vc_changed_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + int i, ret; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + memcpy(&adpt->num_queue, rsvsnap->snap.data, sizeof(adpt->num_queue)); + + if (adpt->rss_info.ind_table_size > NE6X_RSS_MAX_IND_TABLE_SIZE) + adpt->rss_info.ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ret = ne6x_dev_set_rss(adpt, &adpt->rss_info); + ret |= ne6x_dev_add_unicast_for_fastmode(adpt, vf->dev_lan_addr.addr); + ret |= ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CHANGED_RSS, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + + return ret; +} + +static int ne6x_vc_add_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", + __func__, vlan_tpid, vlan_id); + + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + dev_info(&vf->pf->pdev->dev, "%s:vfp_vid %04x\n", __func__, vf->vfp_vid); + + ne6x_adpt_add_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_del_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", __func__, vlan_tpid, + vlan_id); + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + ne6x_adpt_del_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_config_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = rsvsnap->snap.data[3]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[2]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[1]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[0]; + + if (vf->tx_rate) + adpt->hw_feature |= NE6X_F_TX_QOSBANDWIDTH; + else + adpt->hw_feature &= ~NE6X_F_TX_QOSBANDWIDTH; + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_OFFLOAD, VIRTCHNL_STATUS_SUCCESS, NULL, + 0); +} + +static int ne6x_vc_request_feature_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = ne6x_dev_get_features(adpt); + dev_info(&vf->pf->pdev->dev, "%s: vf->vf_id =%d vport = %d lport = %d pport = %d hw_queue_base = %d hw_feature = %08X\n", + __func__, vf->vf_id, adpt->vport, adpt->port_info->lport, + adpt->port_info->hw_port_id, adpt->port_info->hw_queue_base, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_FEATURE, VIRTCHNL_STATUS_SUCCESS, + (u8 *)&adpt->hw_feature, sizeof(u32)); +} + +static int ne6x_vc_reset_vf_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + + vf->ready = false; + vf->rx_tx_state = 0; + vf->adpt->port_info->phy.link_info.link_info = false; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + + vf->port_vlan_info = NE6X_VLAN(0, 0, 0); + vf->link_forced = false; + vf->trusted = false; + vf->tx_rate = 0; + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_RESET_VF, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_get_logic_vf_id(struct net_device *netdev, int vf_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + + return (adpt->idx * (pf->num_alloc_vfs / pf->num_alloc_adpt) + vf_id); +} + +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_vf *vf; + int logic_vf_id; + int ret = 0; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + if (logic_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + + netdev_info(netdev, "set vf-%d trust %s\n", vf_id, trusted ? "on" : "off"); + + if (!vf) { + netdev_err(netdev, "vf is NULL\n"); + return -EINVAL; + } + + /* Check if already ready ?*/ + if (!vf->ready) { + netdev_err(netdev, "vf is not ready\n"); + return (-1); + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + + if (vf->trusted) { + set_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + } else { + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vf_clear_adpt_promisc(vf, ne6x_get_vf_adpt(vf), + NE6X_UCAST_PROMISC_BITS | + NE6X_MCAST_PROMISC_BITS); + } + + ne6x_vc_notify_vf_trust_change(vf); + dev_info(ne6x_pf_to_dev(pf), "VF %u is now %strusted\n", + logic_vf_id, trusted ? "" : "un"); + + return 0; +} + +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret = 0, logic_vf_id; + struct ne6x_vf *vf; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + if (!vf) + return -EINVAL; + + netdev_info(netdev, "set vf-%d link state %s\n", vf_id, + link_state == IFLA_VF_LINK_STATE_ENABLE + ? "enable" + : (link_state == IFLA_VF_LINK_STATE_DISABLE ? "disable" : "auto")); + + /* Check if already ready ?*/ + if (!vf->ready) + return (-1); + + if (!vf->trusted) + return (-1); + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + ret = -EINVAL; + goto out_put_vf; + } + + ne6x_vc_notify_vf_link_state(vf); + +out_put_vf: + return ret; +} + +static int ne6x_vc_modify_vf_mac(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + if (!pf->adpt) + dev_info(ne6x_pf_to_dev(pf), "adpt is null vf %d\n", vf->vf_id); + + /* set zero addr mean clear mac */ + if (is_zero_ether_addr(vc_ether_addr.addr)) + return ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + + if (is_valid_ether_addr(vf->dev_lan_addr.addr)) { + ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + memset(vf->dev_lan_addr.addr, 0, 6); + } + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + +err: + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_VF_ADDR, v_ret, vc_ether_addr.addr, 6); +} + +static int ne6x_vc_set_fast_mode(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_pf *pf = vf->pf; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + if (rsvsnap->snap.data[0]) { + vf->adpt->num_queue = rsvsnap->snap.data[1]; + v_ret = ne6x_dev_set_fast_mode(pf, true, vf->adpt->num_queue); + } else { + v_ret = ne6x_dev_set_fast_mode(pf, false, 0); + } + + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_FAST_MDOE, v_ret, NULL, 0); +} + +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_hw *hw = &pf->hw; + struct ne6x_vf *vf = NULL; + struct ne6x_vlan vlan; + struct device *dev; + int err = 0; + int i; + + dev = ne6x_pf_to_dev(pf); + ne6x_for_each_vf(pf, i) { + if (pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i]) { + vf = &pf->vf[i]; + usnap.val = rd64_bar4(hw, NE6X_VF_MAILBOX_ADDR(vf->base_queue)); + WARN(usnap.snap.len > 6, ">>>>>>>>>>>>>>>>>>recv VF mailbox error!!!<<<<<<<<<<<<<<<<<<<"); + switch (usnap.snap.type) { + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ne6x_vc_get_vf_res_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + vf->ready = 1; + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ne6x_vc_add_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ne6x_vc_del_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ne6x_vc_add_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ne6x_vc_del_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ne6x_vc_cfg_promiscuous_mode_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_EVENT: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ne6x_vc_request_qs_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_RSS: + err = ne6x_vc_config_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN: + err = ne6x_vc_config_vlan_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + err = ne6x_vc_config_vlan_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_MTU: + err = ne6x_vc_config_mtu_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + err = ne6x_vc_config_flow_ctrl_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CHANGED_RSS: + err = ne6x_vc_changed_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + err = ne6x_vc_config_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + err = ne6x_vc_request_feature_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_RESET_VF: + err = ne6x_vc_reset_vf_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_PORT_STATUS: + ne6x_dev_add_broadcast_leaf(ne6x_get_vf_adpt(vf)); + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(ne6x_get_vf_adpt(vf), vlan); + ne6x_vc_notify_vf_link_state(vf); + + if (!vf->ready_to_link_notify) + vf->ready_to_link_notify = 1; + + ne6x_linkscan_schedule(pf); + break; + case VIRTCHNL_OP_SET_VF_ADDR: + err = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_SET_FAST_MDOE: + err = ne6x_vc_set_fast_mode(vf, (u8 *)&usnap); + break; + /* VIRTCHNL_OP_VERSION not used */ + default: + dev_err(dev, "Unsupported opcode %s from VF %d\n", + ne6x_opcode_str(usnap.snap.type), i); + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); + break; + } + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = false; + } + if (err) + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", i, + usnap.snap.type, err); + } + + if (test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state)) + clear_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); +} + +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid = 0; + int ret = 0; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + /* first adpt is always the LAN adpt */ + adpt = pf->adpt[vf->lan_adpt_idx]; + if (!adpt) { + ret = -ENOENT; + goto error_param; + } + + ivi->vf = vf_id; + + ether_addr_copy(ivi->mac, vf->dev_lan_addr.addr); + + ivi->vlan = vf->port_vlan_info.vid; + ivi->qos = vf->port_vlan_info.prio; + if (vf->port_vlan_info.vid) + ivi->vlan_proto = cpu_to_be16(vf->port_vlan_info.tpid); + + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + ivi->trusted = 1; + else + ivi->trusted = 0; + +error_param: + return ret; +} + +static void ne6x_calc_token_for_bw(int max_tx_rate, int *time_inv, int *tocken) +{ + if (max_tx_rate <= 100) { + *time_inv = 3910; + *tocken = max_tx_rate; + } else if (max_tx_rate <= 1000) { + *time_inv = 790; + *tocken = max_tx_rate / 5; + } else if (max_tx_rate < 5000) { + *time_inv = 395; + *tocken = max_tx_rate / 10; + } else if (max_tx_rate < 10000) { + *time_inv = 118; + *tocken = max_tx_rate / 33; + } else { + *time_inv = 39; + *tocken = max_tx_rate / 100; + } +} + +int ne6x_set_vf_bw_for_max_vpnum(struct ne6x_pf *pf, int vf_id, int max_tx_rate) +{ + union ne6x_sq_meter_cfg0 sq_meter_cfg0; + union ne6x_sq_meter_cfg1 sq_meter_cfg1; + union ne6x_sq_meter_cfg2 sq_meter_cfg2; + union ne6x_sq_meter_cfg3 sq_meter_cfg3; + struct ne6x_hw *hw = &pf->hw; + int time_inv = 0; + int tocken = 0; + + sq_meter_cfg3.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3)); + sq_meter_cfg3.reg.csr_meter_pause_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3), sq_meter_cfg3.val); + sq_meter_cfg2.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2)); + sq_meter_cfg2.reg.csr_meter_resume_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2), sq_meter_cfg2.val); + + sq_meter_cfg1.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1)); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = max_tx_rate; + + if (max_tx_rate) { + ne6x_calc_token_for_bw(max_tx_rate, &time_inv, &tocken); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = tocken; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = time_inv; + } else { + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = 0x1; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = 0x1; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1), sq_meter_cfg1.val); + sq_meter_cfg0.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0)); + sq_meter_cfg0.reg.csr_meter_pkt_token_num_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_ipg_len_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_packet_mode_vp = 0x0; + + if (max_tx_rate) { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + } else { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x0; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0), sq_meter_cfg0.val); + + return 0; +} + +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf) +{ + int index; + + for (index = 0; index < NE6X_MAX_VP_NUM; index++) + ne6x_set_vf_bw_for_max_vpnum(pf, index, 0); +} + +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = np->adpt->back; + struct ne6x_adapter *adpt; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + ret = -EINVAL; + goto error; + } + + ret = ne6x_validata_tx_rate(adpt, logic_vfid, min_tx_rate, max_tx_rate); + if (ret) { + ret = -EINVAL; + goto error; + } + + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", logic_vfid); + ret = -EAGAIN; + goto error; + } + + if (pf->num_alloc_vfs == 64) + ret = ne6x_set_vf_bw_for_max_vpnum(pf, logic_vfid, max_tx_rate); + else + ret = ne6x_dev_set_vf_bw(adpt, max_tx_rate); + + if (ret) + goto error; + + vf->tx_rate = max_tx_rate; + + return 0; +error: + return ret; +} + +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + + adpt = ne6x_get_vf_adpt(vf); + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac)) { + dev_err(&pf->pdev->dev, "already use the same Ethernet address %pM for VF %d\n", + mac, vf_id); + goto error_param; + } + + /*simluate a msg from vf*/ + usnap.snap.type = VIRTCHNL_OP_SET_VF_ADDR; + usnap.snap.state = VIRTCHNL_STATUS_SUCCESS; + usnap.snap.len = 6; + memcpy(usnap.snap.data, mac, usnap.snap.len); + ret = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + +error_param: + return ret; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h new file mode 100644 index 0000000000000000000000000000000000000000..2f094d164fe3f7b4527c4495147afea0198b3b04 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_VIRTCHNL_PF_H +#define _NE6X_VIRTCHNL_PF_H + +#include "mailbox.h" + +#define NE6X_NO_ADPT 0xffff + +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + VIRTCHNL_EVENT_DCF_ADPT_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u32 link_speed; + u8 link_status; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +/* Specific VF states */ +enum ne6x_vf_states { + NE6X_VF_STATE_INIT = 0, /* PF is initializing VF */ + NE6X_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + NE6X_VF_STATE_QS_ENA, /* VF queue(s) enabled */ + NE6X_VF_STATE_DIS, + NE6X_VF_STATE_MC_PROMISC, + NE6X_VF_STATE_UC_PROMISC, + NE6X_VF_STATES_NBITS +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; +}; + +struct virtchnl_promisc_info { + u16 adpt_id; + u16 flags; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +enum ne6x_promisc_flags { + NE6X_PROMISC_UCAST_RX = 0x1, + NE6X_PROMISC_UCAST_TX = 0x2, + NE6X_PROMISC_MCAST_RX = 0x4, + NE6X_PROMISC_MCAST_TX = 0x8, + NE6X_PROMISC_BCAST_RX = 0x10, + NE6X_PROMISC_BCAST_TX = 0x20, + NE6X_PROMISC_VLAN_RX = 0x40, + NE6X_PROMISC_VLAN_TX = 0x80, +}; + +#define NE6X_UCAST_PROMISC_BITS (NE6X_PROMISC_UCAST_TX | NE6X_PROMISC_UCAST_RX) +#define NE6X_MCAST_PROMISC_BITS (NE6X_PROMISC_MCAST_TX | NE6X_PROMISC_MCAST_RX) + +enum ne6x_vf_config_flag { + NE6X_VF_CONFIG_FLAG_TRUSTED = 0, + NE6X_VF_CONFIG_FLAG_LINK_FORCED, + NE6X_VF_CONFIG_FLAG_NBITS /* must be last */ +}; + +struct ne6x_key { + u8 rsv0; + u8 pi; + u8 mac_addr[6]; + u8 rsv1[56]; +}; + +/* VF information structure */ +struct ne6x_vf { + struct ne6x_pf *pf; + struct ne6x_adapter *adpt; + + u16 vf_id; /* VF ID in the PF space */ + u16 lan_adpt_idx; /* index into PF struct */ + /* first vector index of this VF in the PF space */ + u16 vfp_vid; + u16 vfp_tpid; + int tx_rate; + u8 rx_tx_state; + bool ready; + bool ready_to_link_notify; + + u16 base_queue; + u16 num_vf_qs; + u16 num_req_qs; + + struct ne6x_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ + + u8 trusted : 1; + u8 link_forced : 1; + u8 link_up : 1; /* only valid if VF link is forced */ + + struct virtchnl_ether_addr dev_lan_addr; + DECLARE_BITMAP(vf_states, NE6X_VF_STATES_NBITS); /* VF runtime states */ + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + DECLARE_BITMAP(vf_config_flag, NE6X_VF_CONFIG_FLAG_NBITS); +}; + +#define ne6x_for_each_vf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +#define ne6x_for_each_pf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_adpt; (i)++) + +#ifdef CONFIG_PCI_IOV +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs); +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf); +void ne6x_vc_notify_link_state(struct ne6x_vf *vf); +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf); + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf); +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); + +#else /* CONFIG_PCI_IOV */ +static inline int ne6x_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, + int min_tx_rate, int max_tx_rate) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_PCI_IOV */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee06262f0fb3e650455fbc57b1746b3792c57bd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h @@ -0,0 +1,555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_H +#define _NE6XVF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "common.h" +#include "feature.h" +#include "txrx.h" +#include "mailbox.h" +#include "ne6xvf_virtchnl.h" + +#define NE6XVF_MAX_AQ_BUF_SIZE 4096 +#define NE6XVF_AQ_LEN 32 +#define NE6XVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define NE6XVF_REG_ADDR(_VPID, _OFST) (((_VPID) << 12) + ((_OFST) << 4)) + +#define NE6XVF_DB_STATE 0x1a +#define NE6XVF_MAILBOX_DATA 0x19 +#define NE6XVF_PF_MAILBOX_DATA 0x18 + +#define NE6XVF_QC_TAIL1(_Q) (((_Q) << 12) | (NE6X_CQ_HD_POINTER << 4)) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QTX_TAIL1(_Q) (((_Q) << 12) | (0 << 11) | 0) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QRX_TAIL1(_Q) (((_Q) << 12) | (1 << 11) | 0) /* _i=0...15 Reset: PFR */ + +#define ne6xvf_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) + +#define hw_dbg(h, s, ...) \ + pr_debug("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__) + +extern char ne6xvf_driver_name[]; +extern const char ne6xvf_driver_version[]; +extern struct workqueue_struct *ne6xvf_wq; + +#define ne6xvf_init_spinlock(_sp) ne6xvf_init_spinlock_d(_sp) +#define ne6xvf_acquire_spinlock(_sp) ne6xvf_acquire_spinlock_d(_sp) +#define ne6xvf_release_spinlock(_sp) ne6xvf_release_spinlock_d(_sp) +#define ne6xvf_destroy_spinlock(_sp) ne6xvf_destroy_spinlock_d(_sp) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr0 + (reg))) +#define rd64(a, reg) readq((a)->hw_addr0 + (reg)) + +#define NE6XVF_READ_REG(hw, reg) rd64(hw, reg) +#define NE6XVF_WRITE_REG(hw, reg, value) wr64(hw, reg, value) + +#define NE6XVF_MAX_REQ_QUEUES 32 + +#define NE6XVF_RESET_WAIT_MS 10 +#define NE6XVF_RESET_WAIT_DETECTED_COUNT 50 +#define NE6XVF_RESET_WAIT_COMPLETE_COUNT 2000 + +enum ne6xvf_critical_section_t { + __NE6XVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __NE6XVF_IN_REMOVE_TASK, /* device being removed */ + __NE6XVF_TX_TSTAMP_IN_PROGRESS, /* PTP Tx timestamp request in progress */ +}; + +struct ne6xvf_vlan_filter { + struct list_head list; + struct ne6x_vf_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + }; +}; + +struct ne6xvf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + struct { + u8 is_new_mac : 1; /* filter is new, wait for PF decision */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 is_primary : 1; /* filter is a default VF MAC */ + u8 add_handled : 1; /* received response from PF for filter add */ + u8 padding : 3; + }; +}; + +/* Driver state. The order of these is important! */ +enum ne6xvf_state_t { + __NE6XVF_STARTUP, /* driver loaded, probe complete */ + __NE6XVF_REMOVE, /* driver is being unloaded */ + __NE6XVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __NE6XVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ + __NE6XVF_INIT_CONFIG_ADAPTER, + __NE6XVF_INIT_SW, /* got resources, setting up structs */ + __NE6XVF_INIT_FAILED, /* init failed, restarting procedure */ + __NE6XVF_RESETTING, /* in reset */ + __NE6XVF_COMM_FAILED, /* communication with PF failed */ + /* Below here, watchdog is running */ + __NE6XVF_DOWN, /* ready, can be opened */ + __NE6XVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __NE6XVF_TESTING, /* in ethtool self-test */ + __NE6XVF_RUNNING /* opened, working */ +}; + +struct ne6xvf_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum ne6xvf_bus_speed { + ne6xvf_bus_speed_unknown = 0, + ne6xvf_bus_speed_33 = 33, + ne6xvf_bus_speed_66 = 66, + ne6xvf_bus_speed_100 = 100, + ne6xvf_bus_speed_120 = 120, + ne6xvf_bus_speed_133 = 133, + ne6xvf_bus_speed_2500 = 2500, + ne6xvf_bus_speed_5000 = 5000, + ne6xvf_bus_speed_8000 = 8000, + ne6xvf_bus_speed_reserved +}; + +enum ne6xvf_bus_width { + ne6xvf_bus_width_unknown = 0, + ne6xvf_bus_width_pcie_x1 = 1, + ne6xvf_bus_width_pcie_x2 = 2, + ne6xvf_bus_width_pcie_x4 = 4, + ne6xvf_bus_width_pcie_x8 = 8, + ne6xvf_bus_width_32 = 32, + ne6xvf_bus_width_64 = 64, + ne6xvf_bus_width_reserved +}; + +enum ne6xvf_bus_type { + ne6xvf_bus_type_unknown = 0, + ne6xvf_bus_type_pci, + ne6xvf_bus_type_pcix, + ne6xvf_bus_type_pci_express, + ne6xvf_bus_type_reserved +}; + +struct ne6xvf_bus_info { + enum ne6xvf_bus_speed speed; + enum ne6xvf_bus_width width; + enum ne6xvf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +struct ne6xvf_hw_capabilities { + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; + u32 max_mtu; + u32 chip_id; + u32 mac_id; + u32 lport; + u32 vf_id; + u32 num_vf_per_pf; +}; + +struct ne6xvf_hw { + u8 __iomem *hw_addr0; + u8 __iomem *hw_addr2; + void *back; + + /* subsystem structs */ + struct ne6xvf_mac_info mac; + struct ne6xvf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct ne6xvf_hw_capabilities dev_caps; + + struct ne6xvf_sdk_mbx_info mbx; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct ne6xvf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#define NE6XVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define NE6XVF_FLAG_PF_COMMS_FAILED BIT(3) +#define NE6XVF_FLAG_RESET_PENDING BIT(4) +#define NE6XVF_FLAG_RESET_NEEDED BIT(5) +#define NE6XVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define NE6XVF_FLAG_PROMISC_ON BIT(13) +#define NE6XVF_FLAG_ALLMULTI_ON BIT(14) + +#define NE6XVF_FLAG_LEGACY_RX BIT(15) +#define NE6XVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define NE6XVF_FLAG_QUEUES_ENABLED BIT(17) +#define NE6XVF_FLAG_QUEUES_DISABLED BIT(18) +#define NE6XVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define NE6XF_FLAG_REINIT_CHNL_NEEDED BIT(21) +#define NE6XF_FLAG_RESET_DETECTED BIT(22) +#define NE6XF_FLAG_INITIAL_MAC_SET BIT(23) + +#define NE6XVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define NE6XVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define NE6XVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define NE6XVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define NE6XVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define NE6XVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define NE6XVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define NE6XVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define NE6XVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define NE6XVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define NE6XVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define NE6XVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define NE6XVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define NE6XVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define NE6XVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define NE6XVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define NE6XVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define NE6XVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) + +#define NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD BIT_ULL(38) +#define NE6XVF_FLAG_AQ_GET_FEATURE BIT_ULL(39) +#define NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS BIT_ULL(40) +#define NE6XVF_FLAG_AQ_SET_VF_MAC BIT_ULL(41) +#define NE6XVF_FLAG_AQ_CHANGED_RSS BIT_ULL(42) + +struct ne6xvf_adapter { + struct ne6x_adapt_comm comm; + struct work_struct sdk_task; + struct delayed_work watchdog_task; + wait_queue_head_t down_waitqueue; + wait_queue_head_t vc_waitqueue; + struct ne6x_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + u16 max_queues; + u16 num_active_queues; + u16 num_req_queues; + u32 hw_feature; + struct ne6x_ring *tg_rings; /* TG */ + struct ne6x_ring *cq_rings; /* CQ */ + u32 cq_desc_count; + + /* TX */ + struct ne6x_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct ne6x_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; + + /* duplicates for common code */ +#define NE6XVF_FLAG_DCB_ENABLED 0 + + /* flags for admin queue service task */ + u64 aq_required; + + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + + netdev_features_t current_netdev_promisc_flags; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct net_device_stats net_stats; + + struct ne6xvf_hw hw; /* defined in ne6xvf.h */ + + enum ne6xvf_state_t state; + enum ne6xvf_state_t last_state; + unsigned long crit_section; + + bool netdev_registered; + bool link_up; + enum ne6x_sdk_link_speed link_speed; + enum virtchnl_ops current_op; + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + + struct ne6xvf_eth_stats current_stats; + //struct ne6xvf_vsi vsi; + u16 msg_enable; + struct ne6x_rss_info rss_info; + u8 trusted; + +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6xvf_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ +}; + +#ifdef CONFIG_DEBUG_FS +#define NCE_DEBUG_CHAR_LEN 1024 + +struct ne6xvf_dbg_cmd_wr { + char command[NCE_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6xvf_adapter *pf); +}; + +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_init(void); +void ne6xvf_dbg_exit(void); +#else +static inline void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_init(void) { } +static inline void ne6xvf_dbg_exit(void) { } +#endif /* CONFIG_DEBUG_FS */ + +/* Error Codes */ +enum ne6xvf_status { + NE6XVF_SUCCESS = 0, + NE6XVF_ERR_NVM = -1, + NE6XVF_ERR_NVM_CHECKSUM = -2, + NE6XVF_ERR_PHY = -3, + NE6XVF_ERR_CONFIG = -4, + NE6XVF_ERR_PARAM = -5, + NE6XVF_ERR_MAC_TYPE = -6, + NE6XVF_ERR_UNKNOWN_PHY = -7, + NE6XVF_ERR_LINK_SETUP = -8, + NE6XVF_ERR_ADAPTER_STOPPED = -9, + NE6XVF_ERR_INVALID_MAC_ADDR = -10, + NE6XVF_ERR_DEVICE_NOT_SUPPORTED = -11, + NE6XVF_ERR_MASTER_REQUESTS_PENDING = -12, + NE6XVF_ERR_INVALID_LINK_SETTINGS = -13, + NE6XVF_ERR_AUTONEG_NOT_COMPLETE = -14, + NE6XVF_ERR_RESET_FAILED = -15, + NE6XVF_ERR_SWFW_SYNC = -16, + NE6XVF_ERR_NO_AVAILABLE_VSI = -17, + NE6XVF_ERR_NO_MEMORY = -18, + NE6XVF_ERR_BAD_PTR = -19, + NE6XVF_ERR_RING_FULL = -20, + NE6XVF_ERR_INVALID_PD_ID = -21, + NE6XVF_ERR_INVALID_QP_ID = -22, + NE6XVF_ERR_INVALID_CQ_ID = -23, + NE6XVF_ERR_INVALID_CEQ_ID = -24, + NE6XVF_ERR_INVALID_AEQ_ID = -25, + NE6XVF_ERR_INVALID_SIZE = -26, + NE6XVF_ERR_INVALID_ARP_INDEX = -27, + NE6XVF_ERR_INVALID_FPM_FUNC_ID = -28, + NE6XVF_ERR_QP_INVALID_MSG_SIZE = -29, + NE6XVF_ERR_QP_TOOMANY_WRS_POSTED = -30, + NE6XVF_ERR_INVALID_FRAG_COUNT = -31, + NE6XVF_ERR_QUEUE_EMPTY = -32, + NE6XVF_ERR_INVALID_ALIGNMENT = -33, + NE6XVF_ERR_FLUSHED_QUEUE = -34, + NE6XVF_ERR_INVALID_PUSH_PAGE_INDEX = -35, + NE6XVF_ERR_INVALID_IMM_DATA_SIZE = -36, + NE6XVF_ERR_TIMEOUT = -37, + NE6XVF_ERR_OPCODE_MISMATCH = -38, + NE6XVF_ERR_CQP_COMPL_ERROR = -39, + NE6XVF_ERR_INVALID_VF_ID = -40, + NE6XVF_ERR_INVALID_HMCFN_ID = -41, + NE6XVF_ERR_BACKING_PAGE_ERROR = -42, + NE6XVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + NE6XVF_ERR_INVALID_PBLE_INDEX = -44, + NE6XVF_ERR_INVALID_SD_INDEX = -45, + NE6XVF_ERR_INVALID_PAGE_DESC_INDEX = -46, + NE6XVF_ERR_INVALID_SD_TYPE = -47, + NE6XVF_ERR_MEMCPY_FAILED = -48, + NE6XVF_ERR_INVALID_HMC_OBJ_INDEX = -49, + NE6XVF_ERR_INVALID_HMC_OBJ_COUNT = -50, + NE6XVF_ERR_INVALID_SRQ_ARM_LIMIT = -51, + NE6XVF_ERR_SRQ_ENABLED = -52, + NE6XVF_ERR_ADMIN_QUEUE_ERROR = -53, + NE6XVF_ERR_ADMIN_QUEUE_TIMEOUT = -54, + NE6XVF_ERR_BUF_TOO_SHORT = -55, + NE6XVF_ERR_ADMIN_QUEUE_FULL = -56, + NE6XVF_ERR_ADMIN_QUEUE_NO_WORK = -57, + NE6XVF_ERR_BAD_IWARP_CQE = -58, + NE6XVF_ERR_NVM_BLANK_MODE = -59, + NE6XVF_ERR_NOT_IMPLEMENTED = -60, + NE6XVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, + NE6XVF_ERR_DIAG_TEST_FAILED = -62, + NE6XVF_ERR_NOT_READY = -63, + NE6XVF_NOT_SUPPORTED = -64, + NE6XVF_ERR_FIRMWARE_API_VERSION = -65, + NE6XVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +static inline const char *ne6xvf_state_str(enum ne6xvf_state_t state) +{ + switch (state) { + case __NE6XVF_STARTUP: + return "__NE6XVF_STARTUP"; + case __NE6XVF_REMOVE: + return "__NE6XVF_REMOVE"; + case __NE6XVF_INIT_GET_RESOURCES: + return "__NE6XVF_INIT_GET_RESOURCES"; + case __NE6XVF_INIT_EXTENDED_CAPS: + return "__NE6XVF_INIT_EXTENDED_CAPS"; + case __NE6XVF_INIT_CONFIG_ADAPTER: + return "__NE6XVF_INIT_CONFIG_ADAPTER"; + case __NE6XVF_INIT_SW: + return "__NE6XVF_INIT_SW"; + case __NE6XVF_INIT_FAILED: + return "__NE6XVF_INIT_FAILED"; + case __NE6XVF_RESETTING: + return "__NE6XVF_RESETTING"; + case __NE6XVF_COMM_FAILED: + return "__NE6XVF_COMM_FAILED"; + case __NE6XVF_DOWN: + return "__NE6XVF_DOWN"; + case __NE6XVF_DOWN_PENDING: + return "__NE6XVF_DOWN_PENDING"; + case __NE6XVF_TESTING: + return "__NE6XVF_TESTING"; + case __NE6XVF_RUNNING: + return "__NE6XVF_RUNNING"; + default: + return "__NE6XVF_UNKNOWN_STATE"; + } +} + +static inline void ne6xvf_change_state(struct ne6xvf_adapter *adapter, enum ne6xvf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init); +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_feature_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter); +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); +void ne6xvf_set_ethtool_ops(struct net_device *netdev); +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter); +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending); +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter); +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter); +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter); + +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter); +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num); +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter); +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll); +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter); +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter); +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len); +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter); +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter); +int ne6xvf_close(struct net_device *netdev); +int ne6xvf_open(struct net_device *netdev); +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter); +void ne6xvf_tail_update(struct ne6x_ring *ring, int val); +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter); + +#endif /* _NE6XVF_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1a5851788ff6755b351ca7adee3442a783403a80 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6xvf.h" + +static struct dentry *ne6xvf_dbg_root; + +void ne6xvf_showqueue(struct ne6xvf_adapter *pf) +{ + struct ne6x_ring *ring; + u64 head, tail, oft; + int i; + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->rx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_OFST)); + dev_info(&pf->pdev->dev, "----RX: Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_alloc, + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->tx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_OFST)); + dev_info(&pf->pdev->dev, "----TX: Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->cq_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER)); + dev_info(&pf->pdev->dev, "----CQ: Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); +} + +void ne6xvf_showring(struct ne6xvf_adapter *pf) +{ + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + union ne6x_rx_desc *rx_desc; + struct ne6x_ring *ring; + int j, k; + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->rx_rings[j]; + + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], m_len[%d], s_len[%d], s_addr[0x%llx], m_addr[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, + rx_desc->w.vp, + rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, + rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, + rx_desc->wb.u.val, + rx_desc->wb.vp, + rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->tx_rings[j]; + + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_sop_addr) + /* empty descriptor, skp */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, + tx_desc->u.val, + tx_desc->vp, + tx_desc->event_trigger, + tx_desc->chain, + tx_desc->transmit_type, + tx_desc->sop_valid, + tx_desc->eop_valid, + tx_desc->tso, + tx_desc->l3_csum, + tx_desc->l3_ofst, + tx_desc->l4_csum, + tx_desc->l4_ofst, + tx_desc->pld_ofst, + tx_desc->mop_cnt, + tx_desc->sop_cnt, + tx_desc->mss, + tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->cq_rings[j]; + + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", + k, + ring->reg_idx, + cq_desc->ctype, + cq_desc->num); + } + } +} + +const struct ne6xvf_dbg_cmd_wr deg_vf_cmd_wr[] = { + {"queue", ne6xvf_showqueue}, + {"ring", ne6xvf_showring}, +}; + +/** + * nce_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +/** + * ne6xvf_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ne6xvf_adapter *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NCE_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else { + for (i = 0; i < ARRAY_SIZE(deg_vf_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_vf_cmd_wr[i].command, count) == 0) { + deg_vf_cmd_wr[i].command_proc(pf); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6xvf_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6xvf_dbg_command_read, + .write = ne6xvf_dbg_command_write, +}; + +/** + * nce_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6xvf_dbg_pf = debugfs_create_dir(name, ne6xvf_dbg_root); + if (!pf->ne6xvf_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6xvf_dbg_pf, pf, + &ne6xvf_dbg_command_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); +} + +/** + * nce_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) +{ + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); + pf->ne6xvf_dbg_pf = NULL; +} + +/** + * nce_dbg_init - start up debugfs for the driver + **/ +void ne6xvf_dbg_init(void) +{ + ne6xvf_dbg_root = debugfs_create_dir(ne6xvf_driver_name, NULL); + if (!ne6xvf_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * nce_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6xvf_dbg_exit(void) +{ + debugfs_remove_recursive(ne6xvf_dbg_root); + ne6xvf_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..3fbab2d870667b461b9c092adc5015cb75e91409 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_ethtool_stats.h" +#include "ne6xvf_txrx.h" + +static const char ne6xvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Link test (on/offline)" +}; + +#define NE6XVF_TEST_LEN (sizeof(ne6xvf_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6xvf_q_stats_len(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6xvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6XVF_NETDEV_STAT(_net_stat) NE6XVF_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* per-queue ring statistics */ +#define NE6XVF_QUEUE_STAT(_name, _stat) NE6XVF_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6xvf_stats ne6xvf_gstrings_tx_queue_stats[] = { + NE6XVF_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6XVF_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6XVF_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6XVF_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_rx_queue_stats[] = { + NE6XVF_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6XVF_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6XVF_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_cq_queue_stats[] = { + NE6XVF_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6XVF_PORT_MAC_STAT(_name, _stat) NE6XVF_STAT(struct ne6xvf_vsi, _name, _stat) + +#define NE6XVF_ALL_STATS_LEN(n) (ne6xvf_q_stats_len(n)) + +#define ne6xvf_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ + ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_mode) + +static void ne6xvf_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + adapter->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ne6xvf_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void ne6xvf_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ne6xvf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ne6xvf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + + ks->base.port = PORT_NONE; + if (adapter->link_up) { + /* Set flow control settings */ + ne6xvf_get_settings_link_up(ks, netdev); + } else { + ne6xvf_get_settings_link_down(ks, netdev); + } + + return 0; +} + +/** + * ne6xvf_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ne6xvf_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + return -EOPNOTSUPP; +} + +static void __ne6xvf_add_stat_strings(u8 **p, const struct ne6xvf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6xvf_add_stat_strings(p, stats, ...) \ + __ne6xvf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6xvf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_tx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_rx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_cq_queue_stats, i); + } +} + +static void ne6xvf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6xvf_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6xvf_gstrings_test, NE6XVF_TEST_LEN * ETH_GSTRING_LEN); + default: + break; + } +} + +static int ne6xvf_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + /* The number (and order) of strings reported *must* remain + * constant for a given netdevice. This function must not + * report a different number based on run time parameters + * (such as the number of queues in use, or the setting of + * a private ethtool flag). This is due to the nature of the + * ethtool stats API. + * + * Userspace programs such as ethtool must make 3 separate + * ioctl requests, one for size, one for the strings, and + * finally one for the stats. Since these cross into + * userspace, changes to the number or size could result in + * undefined memory access or incorrect string<->value + * correlations for statistics. + * + * Even if it appears to be safe, changes to the size or + * order of strings will suffer from race conditions and are + * not safe. + */ + return NE6XVF_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6XVF_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ne6xvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + + ne6xvf_update_pf_stats(adapter); + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adapter->num_active_queues; j++) { + tx_ring = &adapter->tx_rings[j]; + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = &adapter->rx_rings[j]; + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = &adapter->cq_rings[j]; + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + rcu_read_unlock(); +} + +static void ne6xvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, ne6xvf_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, ne6xvf_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6xvf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) {} + +static void ne6xvf_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6XVF_TEST_LEN); +} + +static int ne6xvf_get_regs_len(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6xvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count, new_cq_count; + int err; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_rx_count + new_rx_count; + + if (new_tx_count == adapter->tx_desc_count && new_rx_count == adapter->rx_desc_count) + return 0; + + if (!netif_running(adapter->netdev)) { + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adapter->tx_rings[0].count, adapter->rx_rings[0].count, new_tx_count, + new_rx_count); + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +/** + * ne6xvf_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * + * Return tx/rx-pause status + **/ +static void ne6xvf_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + pause->autoneg = 0; + pause->rx_pause = 0; + pause->tx_pause = 0; +} + +/** + * ne6xvf_get_coalesce - get a netdev's coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * + **/ +static int ne6xvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6xvf_get_eeprom_len(struct net_device *netdev) +{ + return 0x64; +} + +static int ne6xvf_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return blink_freq; + case ETHTOOL_ID_ON: + break; + case ETHTOOL_ID_OFF: + break; + case ETHTOOL_ID_INACTIVE: + break; + default: + break; + } + + return 0; +} + +static int ne6xvf_nway_reset(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + data[NE6XVF_ETH_TEST_LINK] = 0; + + /* Offline only tests, not run in online; pass by default */ + data[NE6XVF_ETH_TEST_REG] = 0; + data[NE6XVF_ETH_TEST_EEPROM] = 0; + data[NE6XVF_ETH_TEST_INTR] = 0; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int ne6xvf_get_rss_hash_opts(struct ne6xvf_adapter *adapter, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6xvf_set_rss_hash_opts(struct ne6xvf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adapter->rss_info.hash_type; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adapter->rss_info.hash_type) + return 0; + + adapter->rss_info.hash_type = rss_flags; + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * ne6xvf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int ne6xvf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = ne6xvf_set_rss_hash_opts(adapter, info); + break; + default: + break; + } + + return ret; +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int ne6xvf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + cmd->data = ne6xvf_get_rss_hash_opts(adapter, cmd->flow_type); + break; + default: + break; + } + + return 0; +} + +/** + * ne6xvf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.hash_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.ind_table_size; +} + +/** + * ne6xvf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int ne6xvf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, adapter->rss_info.hash_key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + indir[i] = (u32)adapter->rss_info.ind_table[i]; + } + + return 0; +} + +/** + * ne6xvf_set_rxfh - set the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Returns -EINVAL if the table specifies an invalid queue ID, otherwise + * returns 0 after programming the table. + */ +static int ne6xvf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) + memcpy(&adapter->rss_info.hash_key[0], key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = (u8)(indir[i]); + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void ne6xvf_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adapter->max_queues; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adapter->num_active_queues; +} + +/** + * ne6xvf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int ne6xvf_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > adapter->vf_res->num_queue_pairs) + return -EINVAL; + + if (channels->rx_count == adapter->num_active_queues) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + /* set for the next time the netdev is started */ + if (!netif_running(adapter->netdev)) { + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +static const struct ethtool_ops ne6xvf_ethtool_ops = { + .get_link_ksettings = ne6xvf_get_link_ksettings, + .set_link_ksettings = ne6xvf_set_link_ksettings, + .get_strings = ne6xvf_get_strings, + .get_sset_count = ne6xvf_get_sset_count, + .get_ethtool_stats = ne6xvf_get_ethtool_stats, + .get_drvinfo = ne6xvf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6xvf_get_regs, + .get_regs_len = ne6xvf_get_regs_len, + .self_test = ne6xvf_self_test, + .get_ringparam = ne6xvf_get_ringparam, + .set_ringparam = ne6xvf_set_ringparam, + .get_pauseparam = ne6xvf_get_pauseparam, + .get_coalesce = ne6xvf_get_coalesce, + .get_eeprom_len = ne6xvf_get_eeprom_len, + .get_rxnfc = ne6xvf_get_rxnfc, + .set_rxnfc = ne6xvf_set_rxnfc, + .get_rxfh_key_size = ne6xvf_get_rxfh_key_size, + .get_rxfh_indir_size = ne6xvf_get_rxfh_indir_size, + .get_rxfh = ne6xvf_get_rxfh, + .set_rxfh = ne6xvf_set_rxfh, + .get_channels = ne6xvf_get_channels, + .set_channels = ne6xvf_set_channels, + .set_phys_id = ne6xvf_set_phys_id, + .nway_reset = ne6xvf_nway_reset, + .self_test = ne6xvf_diag_test, +}; + +void ne6xvf_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6xvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..300a90b6af55ef6f9f018f1e2c08d31a1bcb6214 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_ETHTOOL_H +#define _NE6XVF_ETHTOOL_H + +#include "ne6xvf.h" + +#define NE6XVF_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6xvf_ethtool_test_id { + NE6XVF_ETH_TEST_REG = 0, + NE6XVF_ETH_TEST_EEPROM, + NE6XVF_ETH_TEST_INTR, + NE6XVF_ETH_TEST_LINK, +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c new file mode 100644 index 0000000000000000000000000000000000000000..d72af2d4e6bdbf8f1f86a540e01a179c8e74ad10 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c @@ -0,0 +1,3310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" +#include "ne6xvf_virtchnl.h" +#include "ne6xvf_txrx.h" +#include "version.h" + +#define CREATE_TRACE_POINTS + +#define SUMMARY \ + "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function Linux Driver" +#define COPYRIGHT "Copyright (c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6xvf_driver_name[] = "ncevf"; +static const char ne6xvf_driver_string[] = SUMMARY; + +const char ne6xvf_driver_version[] = VERSION; +static const char ne6xvf_copyright[] = COPYRIGHT; + +static const struct pci_device_id ne6xvf_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x501a), 0}, + {PCI_VDEVICE(BZWX, 0x601a), 0}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ne6xvf_pci_tbl); + +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION(SUMMARY); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static const struct net_device_ops ne6xvf_netdev_ops; +struct workqueue_struct *ne6xvf_wq; +static void ne6xvf_sync_features(struct net_device *netdev); + +struct ne6xvf_adapter *ne6xvf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter) +{ + adapter->flags |= NE6XVF_FLAG_RESET_NEEDED; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + ne6xvf_schedule_reset(adapter); +} + +/** + * nce_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ + +struct net_device_stats *nce_get_vsi_stats_struct(struct ne6xvf_adapter *adapter) +{ + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +/** + * nce_update_pf_stats - Update PF port stats counters + * @pf: PF whose stats needs to be updated + */ +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter) +{ + struct net_device_stats *ns; /* netdev stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u64 bytes, packets; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + return; + + ns = nce_get_vsi_stats_struct(adapter); + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_active_queues; i++) { + /* locate Tx ring */ + tx_ring = &adapter->tx_rings[i]; + + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + + tx_b += bytes; + tx_p += packets; + + rx_ring = &adapter->rx_rings[i]; + + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + rx_b += bytes; + rx_p += packets; + } + rcu_read_unlock(); + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + adapter->net_stats.rx_packets = rx_p; + adapter->net_stats.tx_packets = rx_b; + adapter->net_stats.rx_bytes = rx_b; + adapter->net_stats.tx_bytes = tx_b; +} + +bool ne6xvf_is_remove_in_progress(struct ne6xvf_adapter *adapter) +{ + return test_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); +} + +static void ne6xvf_sdk_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, sdk_task); + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + enum ne6xvf_status ret, v_ret; + enum virtchnl_ops v_op; + u16 pending = 1u; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + do { + ret = ne6xvf_clean_arq_element(hw, &event, &pending); + v_op = (enum virtchnl_ops)le32_to_cpu(event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(event.snap.state); + + if (ret || !v_op) + break; /* No event to process or error cleaning ARQ */ + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + ne6xvf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (pending != 0) + memset(event.msg_buf, 0, NE6XVF_MAX_AQ_BUF_SIZE); + } while (pending); + + if ((adapter->flags & (NE6XVF_FLAG_RESET_PENDING | NE6XVF_FLAG_RESET_NEEDED)) || + adapter->state == __NE6XVF_RESETTING) + goto freedom; + +freedom: + kfree(event.msg_buf); + +out: + return; +} + +static int ne6xvf_check_reset_complete(struct ne6xvf_hw *hw) +{ + u64 rstat; + int i; + + for (i = 0; i < NE6XVF_RESET_WAIT_COMPLETE_COUNT; i++) { + rstat = rd64(hw, NE6XVF_REG_ADDR(0, NE6X_VP_RELOAD)); + if (rstat) + return 0; + + usleep_range(10, 20); + } + + return 0; +} + +int ne6xvf_init_sdk_mbx(struct ne6xvf_hw *hw) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + union u_ne6x_mbx_snap_buffer_data usnap; + u64 val; + + if (hw->mbx.init_flag) + return -1; + + hw->mbx.sq_data.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + hw->mbx.sq_data.type = VIRTCHNL_OP_UNKNOWN; + hw->mbx.init_flag = 0x1; + + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x2) { + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + mbx_buffer.snap.state = usnap.snap.state; + mbx_buffer.snap.type = usnap.snap.type; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + } + + usleep_range(10, 20); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + return 0; +} + +static void ne6xvf_startup(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ne6xvf_hw *hw = &adapter->hw; + int ret; + + WARN_ON(adapter->state != __NE6XVF_STARTUP); + + adapter->flags &= ~NE6XVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + + ret = ne6xvf_check_reset_complete(hw); + if (ret) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", ret); + goto err; + } + + ret = ne6xvf_init_sdk_mbx(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to init SDK (%d)\n", ret); + goto err; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_GET_RESOURCES); + + return; + +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, adapter->vsi_res->num_queue_pairs); + adapter->flags |= NE6XVF_FLAG_REINIT_MSIX_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + ne6xvf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + return 0; +} + +/** + * ne6xvf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __NE6XVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __NE6XVF_DOWN + * when fails the state is changed to __NE6XVF_INIT_FAILED + **/ +static int ne6xvf_init_get_resources(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + + WARN_ON(adapter->state != __NE6XVF_INIT_GET_RESOURCES); + + if (!adapter->vf_res) { + adapter->vf_res = kzalloc(sizeof(*adapter->vf_res) + + sizeof(struct virtchnl_vsi_resource), + GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + + adapter->hw_feature = 0x00; + ret = ne6xvf_send_vf_config_msg(adapter, true); + if (ret) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", ret); + goto err; + } + + ret = ne6xvf_get_vf_config(adapter); + if (ret == NE6XVF_ERR_ADMIN_QUEUE_NO_WORK) { + ret = ne6xvf_send_vf_config_msg(adapter, true); + goto err_alloc; + } else if (ret == NE6XVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + dev_err(&pdev->dev, + "Unable to get VF config due to PF error condition, not retrying\n"); + return ret; + } + + if (ret) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", ret); + goto err_alloc; + } + + ret = ne6xvf_parse_vf_resource_msg(adapter); + if (ret) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", ret); + goto err_alloc; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_EXTENDED_CAPS); + return ret; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); + + return ret; +} + +/** + * ne6xvf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_disable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + napi_disable(&q_vector->napi); + } +} + +static void ne6xvf_free_queues(struct ne6xvf_adapter *adapter) +{ + if (!adapter->vsi_res) + return; + + adapter->num_active_queues = 0; + kfree(adapter->tg_rings); + adapter->tg_rings = NULL; + kfree(adapter->cq_rings); + adapter->cq_rings = NULL; + kfree(adapter->tx_rings); + adapter->tx_rings = NULL; + kfree(adapter->rx_rings); + adapter->rx_rings = NULL; +} + +/** + * ne6xvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ne6xvf_alloc_queues(struct ne6xvf_adapter *adapter) +{ + int i, num_active_queues; + + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else + num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + + adapter->tg_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + adapter->cq_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + + adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + + adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + + for (i = 0; i < num_active_queues; i++) { + struct ne6x_ring *tg_ring; + struct ne6x_ring *cq_ring; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + + tg_ring = &adapter->tg_rings[i]; + tg_ring->queue_index = i; + tg_ring->netdev = adapter->netdev; + tg_ring->dev = pci_dev_to_dev(adapter->pdev); + tg_ring->adpt = adapter; + tg_ring->count = adapter->tx_desc_count; + + cq_ring = &adapter->cq_rings[i]; + cq_ring->queue_index = i; + cq_ring->netdev = adapter->netdev; + cq_ring->dev = pci_dev_to_dev(adapter->pdev); + cq_ring->adpt = adapter; + cq_ring->count = adapter->cq_desc_count; + + tx_ring = &adapter->tx_rings[i]; + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->adpt = adapter; + tx_ring->count = adapter->tx_desc_count; + + rx_ring = &adapter->rx_rings[i]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->adpt = adapter; + rx_ring->count = adapter->rx_desc_count; + } + + adapter->max_queues = num_active_queues; + adapter->num_active_queues = adapter->max_queues; + + return 0; + +err_out: + ne6xvf_free_queues(adapter); + return -ENOMEM; +} + +static void ne6xvf_irq_disable(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + if (!adapter->msix_entries) + return; + + for (i = 0; i < adapter->num_msix_vectors; i++) { + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), 0xffffffffffffffff); + synchronize_irq(adapter->msix_entries[i].vector); + } +} + +static void ne6xvf_free_traffic_irqs(struct ne6xvf_adapter *adapter) +{ + int vector, irq_num, q_vectors; + + if (!adapter->msix_entries) + return; + + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } +} + +static void ne6xvf_free_q_vectors(struct ne6xvf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[q_idx]; + + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * ne6xvf_disable_vf - disable a VF that failed to reset + * @adapter: private adapter structure + * + * Helper function to shut down the VF when a reset never finishes. + **/ +static void ne6xvf_disable_vf(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *fv, *fvtmp; + struct ne6xvf_mac_filter *f, *ftmp; + + /* reset never finished */ + adapter->flags |= NE6XVF_FLAG_PF_COMMS_FAILED; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + ne6xvf_free_traffic_irqs(adapter); + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* Delete all of the filters */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_del(&fv->list); + kfree(fv); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + memset(adapter->vf_res, 0, sizeof(struct virtchnl_vf_resource)); + adapter->netdev->flags &= ~IFF_UP; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); +} + +/** + * ne6xvf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_acquire_msix_vectors(struct ne6xvf_adapter *adapter, int vectors) +{ + int v_actual; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + v_actual = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1, vectors); + if (v_actual != vectors) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts: %d\n", v_actual); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + pci_disable_msi(adapter->pdev); + return v_actual; + } + + adapter->num_msix_vectors = v_actual; + + return 0; +} + +/** + * ne6xvf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ne6xvf_set_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + int vector, v_budget; + int err = 0; + + if (!adapter->vsi_res) + return -EIO; + + v_budget = adapter->num_active_queues; + adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + dev_info(&adapter->pdev->dev, "v_budget:%d, adapter->vf_res->max_vectors: %d\n", v_budget, + adapter->vf_res->max_vectors); + err = ne6xvf_acquire_msix_vectors(adapter, v_budget); +out: + netif_set_real_num_rx_queues(adapter->netdev, v_budget); + netif_set_real_num_tx_queues(adapter->netdev, v_budget); + + return err; +} + +/** + * ne6xvf_fill_rss_lut - Fill the lut with default values + * @adapter: board private structure + **/ +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = i % adapter->num_active_queues; +} + +/** + * ne6xvf_init_rss - Prepare for RSS + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_init_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6x_rss_info *rss_info = &adapter->rss_info; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + ne6xvf_fill_rss_lut(adapter); + netdev_rss_key_fill((void *)&adapter->rss_info.hash_key[0], + adapter->rss_info.hash_key_size); + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + return 0; +} + +/** + * ne6xvf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ne6xvf_alloc_q_vectors(struct ne6xvf_adapter *adapter) +{ + struct ne6x_q_vector *q_vector; + int q_idx, num_q_vectors; + + num_q_vectors = adapter->num_active_queues; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adpt = adapter; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + netif_napi_add(adapter->netdev, &q_vector->napi, ne6xvf_napi_poll); + } + + return 0; +} + +/** + * ne6xvf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int ne6xvf_init_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + int err; + + err = ne6xvf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = ne6xvf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ne6xvf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; + +err_alloc_q_vectors: + ne6xvf_reset_interrupt_capability(adapter); +err_set_interrupt: + ne6xvf_free_queues(adapter); +err_alloc_queues: + return err; +} + +/** + * ne6xvf_map_vector_to_cq - associate irqs with complete queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_cq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *cq_ring = &adapter->cq_rings[r_idx]; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; +} + +/** + * ne6xvf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_rxq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *rx_ring = &adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * ne6xvf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void ne6xvf_map_vector_to_txq(struct ne6xvf_adapter *adapter, int v_idx, int t_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = &adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->num_ringpairs++; +} + +/** + * ne6xvf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static void ne6xvf_map_rings_to_vectors(struct ne6xvf_adapter *adapter) +{ + int rings_remaining = adapter->num_active_queues; + int q_vectors; + int ridx; + + q_vectors = adapter->num_msix_vectors; + + for (ridx = 0; ridx < rings_remaining; ridx++) { + ne6xvf_map_vector_to_cq(adapter, ridx, ridx); + ne6xvf_map_vector_to_rxq(adapter, ridx, ridx); + ne6xvf_map_vector_to_txq(adapter, ridx, ridx); + } +} + +/** + * ne6xvf_setup_all_tg_resources - allocate all queues Tg resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tg_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tg_descriptors(&adapter->tg_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "tg Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_cq_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->cq_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_cq_descriptors(&adapter->cq_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tx_descriptors(&adapter->tx_rings[i]); + err |= ne6x_setup_tx_sgl(&adapter->tx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = ne6x_setup_rx_descriptors(&adapter->rx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t ne6xvf_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + u64 val; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + val = rd64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK)); + val |= 1ULL << NE6X_VP_CQ_INTSHIFT; + wr64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), val); + + return IRQ_HANDLED; +} + +/** + * ne6xvf_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void ne6xvf_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector; + + q_vector = container_of(notify, struct ne6x_q_vector, affinity_notify); + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * ne6xvf_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void ne6xvf_irq_affinity_release(struct kref *ref) {} + +/** + * ne6xvf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * @basename: device basename + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int ne6xvf_request_traffic_irqs(struct ne6xvf_adapter *adapter, char *basename) +{ + unsigned int rx_int_idx = 0, tx_int_idx = 0; + unsigned int vector, q_vectors; + int irq_num, err; + int cpu; + + ne6xvf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[vector]; + + irq_num = adapter->msix_entries[vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), "ne6xvf-%s-TxRx-%u", + basename, rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-rx-%u", basename, + rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-tx-%u", basename, + tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, ne6xvf_msix_clean_rings, 0, q_vector->name, q_vector); + if (err) { + dev_info(&adapter->pdev->dev, "Request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6xvf_irq_affinity_notify; + q_vector->affinity_notify.release = ne6xvf_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } + + return err; +} + +/** + * ne6xvf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void ne6xvf_configure_queues(struct ne6xvf_adapter *adapter) +{ + unsigned int rx_buf_len = NE6X_RXBUFFER_2048; + struct ne6xvf_hw *hw = &adapter->hw; + union ne6x_sq_base_addr sq_base_addr; + union ne6x_rq_base_addr rq_base_addr; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_cq_base_addr cq_base_addr; + union ne6x_cq_cfg cq_cfg; + union ne6x_sq_cfg sq_cfg; + union ne6x_rq_cfg rc_cfg; + int i; + + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & NE6XVF_FLAG_LEGACY_RX)) + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = NE6X_RXBUFFER_4096; +#endif + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + for (i = 0; i < adapter->num_active_queues; i++) { + /* cq */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->cq_rings[i].tail = (u64 __iomem *)(hw->hw_addr0 + NE6XVF_QC_TAIL1(i)); + adapter->cq_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + cq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = adapter->cq_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = adapter->cq_rings[i].count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG), cq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER), 0x0); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER), 0x0); + + /* tx */ + /* cache tail off for easier writes later */ + adapter->tx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QTX_TAIL1(i)); + adapter->tx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + sq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = adapter->tx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = adapter->tx_rings[i].count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG), sq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER), 0x0); + + /* rx */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->rx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QRX_TAIL1(i)); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + adapter->rx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + rq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = adapter->rx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rq_block_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = adapter->rx_rings[i].rx_buf_len; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = adapter->rx_rings[i].count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG), rc_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER), 0x0); + } + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x0); + + usleep_range(100, 120); +} + +/** + * ne6xvf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void ne6xvf_configure(struct ne6xvf_adapter *adapter) +{ + int i; + + ne6xvf_configure_queues(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct ne6x_ring *ring = &adapter->rx_rings[i]; + + ne6x_alloc_rx_buffers(ring, NE6X_DESC_UNUSED(ring)); + usleep_range(1000, 2000); + } +} + +/** + * ne6xvf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_enable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = &adapter->q_vectors[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * ne6xvf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +static void ne6xvf_up_complete(struct ne6xvf_adapter *adapter) +{ + ne6xvf_change_state(adapter, __NE6XVF_RUNNING); + clear_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_napi_enable_all(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_ENABLE_QUEUES; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_reinit_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + + err = ne6xvf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_map_rings_to_vectors(adapter); +err: + return err; +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter); + +/** + * ne6xvf_handle_reset - Handle hardware reset + * @adapter: pointer to ne6xvf_adapter + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + * + * This function is called in the __NE6XVF_RESETTING driver state. If a reset + * is detected and completes, the driver state changed to __NE6XVF_RUNNING or + * __NE6XVF_DOWN, else driver state will remain in __NE6XVF_RESETTING. + * + * The function is called with the NE6XVF_FLAG_RESET_PENDING flag set and it is + * cleared when a reset is detected and completes. + **/ +static void ne6xvf_handle_reset(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_hw *hw = &adapter->hw; + bool running; + int err, i; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->last_state == __NE6XVF_RUNNING); + + if (running) { + netdev->flags &= ~IFF_UP; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + adapter->link_up = false; + ne6xvf_napi_disable_all(adapter); + } + + pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); + + ne6xvf_irq_disable(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + + /* Set the queues_disabled flag when VF is going through reset + * to avoid a race condition especially for ADQ i.e. when a VF ADQ is + * configured, PF resets the VF to allocate ADQ resources. When this + * happens there's a possibility to hit a condition where VF is in + * running state but the queues haven't been enabled yet. So wait for + * virtchnl success message for enable queues and then unset this flag. + * Don't allow the link to come back up until that happens. + */ + adapter->flags |= NE6XVF_FLAG_QUEUES_DISABLED; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required = 0; + + err = ne6xvf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= NE6XVF_FLAG_AQ_MAP_VECTORS; + + /* We were running when the reset started, so we need + * to restore some state here. + */ + if (running) { + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto reset_err; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto reset_err; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + if ((adapter->flags & NE6XVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & NE6XVF_FLAG_REINIT_ITR_NEEDED)) { + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~NE6XVF_FLAG_REINIT_MSIX_NEEDED; + } + + ne6xvf_configure(adapter); + + /* ne6xvf_up_complete() will switch device back + * to __NE6XVF_RUNNING + */ + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + netdev->flags |= IFF_UP; + } else { + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + } + + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return; + +reset_err: + if (running) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_free_traffic_irqs(adapter); + netdev->flags &= ~IFF_UP; + } + + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + ne6xvf_disable_vf(adapter); +} + +/** + * ne6xvf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __NE6XVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __NE6XVF_INIT_CONFIG_ADAPTER. + */ +static void ne6xvf_init_process_extended_caps(struct ne6xvf_adapter *adapter) +{ + WARN_ON(adapter->state != __NE6XVF_INIT_EXTENDED_CAPS); + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __NE6XVF_INIT_CONFIG_ADAPTER + */ + adapter->vsi_res->num_queue_pairs = adapter->vf_res->num_queue_pairs; + adapter->hw_feature = 0x00; + ne6xvf_change_state(adapter, __NE6XVF_INIT_CONFIG_ADAPTER); +} + +/** + * ne6xvf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int ne6xvf_process_config(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + netdev_features_t csumo_features; + netdev_features_t vlano_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + + /* advertise support but don't enable by default since only one type of + * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one + * type turns on the other has to be turned off. This is enforced by the + * nce_fix_features() ndo callback. + */ + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; + + netdev->gso_max_size = 65535; + netdev->features = netdev->hw_features; + ne6xvf_sync_features(netdev); + + return 0; +} + +/** + * ne6xvf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __NE6XVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void ne6xvf_init_config_adapter(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_CONFIG_ADAPTER); + + if (ne6xvf_process_config(adapter)) + goto err; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= NE6XVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &ne6xvf_netdev_ops; + ne6xvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - ETH_HLEN - ETH_FCS_LEN; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->rx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->cq_desc_count = adapter->tx_desc_count + adapter->rx_desc_count; + ret = ne6xvf_init_interrupt_scheme(adapter); + if (ret) + goto err_sw_init; + + ne6xvf_map_rings_to_vectors(adapter); + + netif_carrier_off(netdev); + adapter->link_up = false; + if (!adapter->netdev_registered) { + ret = ne6xvf_register_netdev(adapter); + if (ret) + goto err_register; + } + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + wake_up(&adapter->down_waitqueue); + ne6xvf_init_rss(adapter); + adapter->trusted = 0; + return; + +err_register: +err_sw_init: + ne6xvf_reset_interrupt_capability(adapter); +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_process_aq_command - process aq_required flags + * and sends aq command + * @adapter: pointer to ne6xvf adapter structure + * + * Returns 0 on success + * Returns error code if no command was sent + * or error code if the command failed. + **/ +static int ne6xvf_process_aq_command(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_CONFIG) + return ne6xvf_send_vf_config_msg(adapter, false); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD) + return ne6xvf_send_vf_offload_msg(adapter); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_RSS) { + ne6xvf_config_rss_info(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CHANGED_RSS) { + ne6xvf_changed_rss(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_QUEUES) { + if (ne6xvf_request_queues(adapter, adapter->num_active_queues) == 0) { + usleep_range(50, 100); + if (ne6xvf_poll_virtchnl_msg(adapter, &event, + VIRTCHNL_OP_REQUEST_QUEUES) == 0) { + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + } + return 0; + } + if (adapter->aq_required & NE6XVF_FLAG_AQ_ENABLE_QUEUES) { + ne6xvf_enable_queues(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS) { + ne6xvf_vchanel_get_port_link_status(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_SET_VF_MAC) { + ne6xvf_set_vf_addr(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_MAC_FILTER) { + ne6xvf_add_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_MAC_FILTER) { + ne6xvf_del_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_VLAN_FILTER) { + ne6xvf_add_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_VLAN_FILTER) { + ne6xvf_del_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_REQUEST_PROMISC) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_REQUEST_PROMISC; + ne6xvf_set_promiscuous(adapter); + + return 0; + } + return -EAGAIN; +} + +/** + * ne6xvf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool ne6xvf_asq_done(struct ne6xvf_hw *hw) +{ + return 1; +} + +/** + * ne6xvf_register_netdev - register netdev + * @adapter: pointer to the ne6xvf_adapter struct + * + * Returns 0 if register netdev success + **/ +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter) +{ + char newname[IFNAMSIZ] = {0}; + int ret; + u16 domain_num; + + domain_num = pci_domain_nr(adapter->pdev->bus); + + /* There are some pcie device with the same bus number but with different + * pcie domain, the name of netdev should contain pcie domain number + */ + if (domain_num) + sprintf(newname, "enP%dp%ds0f%dv%d", domain_num, adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + else + sprintf(newname, "enp%ds0f%dv%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + + strcpy(&adapter->netdev->name[0], newname); + dev_info(&adapter->pdev->dev, "name: %s\n", newname); + ret = register_netdev(adapter->netdev); + if (ret) { + sprintf(newname, "enp%ds0f%dv%%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport); + strcpy(&adapter->netdev->name[0], newname); + ret = register_netdev(adapter->netdev); + } + return ret; +} + +static void ne6xvf_watchdog_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, + watchdog_task.work); + struct ne6xvf_hw *hw = &adapter->hw; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + ne6xvf_change_state(adapter, __NE6XVF_COMM_FAILED); + + if (adapter->flags & NE6XVF_FLAG_RESET_NEEDED && adapter->state != __NE6XVF_RESETTING) { + adapter->flags &= ~NE6XVF_FLAG_RESET_NEEDED; + ne6xvf_change_state(adapter, __NE6XVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + } + switch (adapter->state) { + case __NE6XVF_INIT_FAILED: + /* Try again from failed step */ + ne6xvf_change_state(adapter, adapter->last_state); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, HZ); + return; + case __NE6XVF_COMM_FAILED: + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); + return; + case __NE6XVF_RESETTING: + ne6xvf_handle_reset(adapter); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return; + case __NE6XVF_DOWN: + case __NE6XVF_DOWN_PENDING: + case __NE6XVF_TESTING: + case __NE6XVF_RUNNING: + if (adapter->current_op) { + if (!ne6xvf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + ne6xvf_send_api_ver(adapter); + } + } else { + int ret = ne6xvf_process_aq_command(adapter); + + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + * if the error isn't -EOPNOTSUPP + */ + if (ret && ret != -EOPNOTSUPP && adapter->state == __NE6XVF_RUNNING) + ne6xvf_request_stats(adapter); + } + break; + case __NE6XVF_REMOVE: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; + default: + break; + } + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + +restart_watchdog: + queue_work(ne6xvf_wq, &adapter->sdk_task); + if (adapter->aq_required) + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); + else + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(1000)); +} + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} + +/** + * ne6xvf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_mac_filter *ne6xvf_find_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_mac_filter *ne6xvf_add_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + f = ne6xvf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + + list_add_tail(&f->list, &adapter->mac_filter_list); + f->add = true; + f->add_handled = false; + f->is_new_mac = true; + f->is_primary = false; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; + } + + return f; +} + +/** + * ne6xvf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +void ne6xvf_down(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf; + struct ne6xvf_mac_filter *f; + + if (adapter->state <= __NE6XVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) + f->remove = true; + + /* remove all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) + vlf->remove = true; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!(adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __NE6XVF_RESETTING) { + dev_info(&adapter->pdev->dev, "%s: state->%s\n", __func__, + ne6xvf_state_str(adapter->state)); + /* cancel any current operation */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + + /* In case the queue configure or enable operations are still + * pending from when the interface was opened, make sure + * they're canceled here. + */ + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_set_vport_state(struct ne6xvf_adapter *adapter, int tx_state, int rx_state) +{ + if (rx_state) + adapter->hw_feature &= ~NE6X_F_RX_DISABLE; + else + adapter->hw_feature |= NE6X_F_RX_DISABLE; + + if (tx_state) + adapter->hw_feature &= ~NE6X_F_TX_DISABLE; + else + adapter->hw_feature |= NE6X_F_TX_DISABLE; + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog is started, + * and the stack is notified that the interface is ready. + **/ +int ne6xvf_open(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + err = -EIO; + goto unlock; + } + + if (adapter->state == __NE6XVF_RUNNING && !test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto unlock; + } + + if (adapter->state != __NE6XVF_DOWN) { + err = -EBUSY; + goto unlock; + } + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto err_setup_tg; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto err_setup_cq; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + ne6xvf_configure(adapter); + + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + ne6xvf_set_vport_state(adapter, true, true); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return 0; + +err_req_irq: + ne6xvf_down(adapter); + ne6xvf_free_traffic_irqs(adapter); +err_setup_rx: + ne6xvf_free_all_rx_resources(adapter); +err_setup_tx: + ne6xvf_free_all_tx_resources(adapter); +err_setup_cq: + ne6xvf_free_all_cq_resources(adapter); +err_setup_tg: + ne6xvf_free_all_tg_resources(adapter); + +unlock: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return err; +} + +/** + * ne6xvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +int ne6xvf_close(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_hw *hw = &adapter->hw; + int status; + int i; + + netdev_info(netdev, "close !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state <= __NE6XVF_DOWN_PENDING) { + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; + } + + ne6xvf_set_vport_state(adapter, false, false); + ne6xvf_down(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + ne6xvf_change_state(adapter, __NE6XVF_DOWN_PENDING); + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + /* If we're closing the interface as part of driver removal then don't + * wait. The VF resources will be reinitialized when the hardware is + * reset. + */ + if (ne6xvf_is_remove_in_progress(adapter)) + return 0; + + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * ne6xvf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __NE6XVF_DOWN before + * returning. State change occurs in ne6xvf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). + */ + status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __NE6XVF_DOWN, + msecs_to_jiffies(500)); + if (!status) + netdev_dbg(netdev, "Device resources not yet released\n"); + + return 0; +} + +/** + * ne6xvf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (ne6xvf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +/** + * ne6xvf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_mac_filter *f; + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = ne6xvf_find_filter(adapter, addr); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + } + + return 0; +} + +/** + * ne6xvf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +bool ne6xvf_promiscuous_mode_changed(struct ne6xvf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +/** + * ne6xvf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void ne6xvf_set_rx_mode(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + __dev_uc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + __dev_mc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + return; + } + + if (netdev->flags & IFF_PROMISC) { + adapter->flags |= NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else if (netdev->flags & IFF_ALLMULTI) { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_REQUEST_PROMISC; +} + +/** + * ne6xvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog task. + **/ +static struct net_device_stats *ne6xvf_get_stats(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +static void ne6xvf_sync_features(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + + if (netdev->features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/** + * nce_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int ne6xvf_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + adapter->hw_feature |= NE6X_OFFLOAD_UFO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + * nce_fix_features - fix the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static netdev_features_t ne6xvf_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +/** + * ne6xvf_replace_primary_mac - Replace current primary address + * @adapter: board private structure + * @new_mac: new MAC address to be applied + * + * Replace current dev_addr and send request to PF for removal of previous + * primary MAC address filter and addition of new primary MAC filter. + * Return 0 for success, -ENOMEM for failure. + * + * Do not call this with mac_vlan_list_lock! + **/ +int ne6xvf_replace_primary_mac(struct ne6xvf_adapter *adapter, const u8 *new_mac) +{ + memcpy(adapter->hw.mac.addr, new_mac, 6); + adapter->aq_required |= NE6XVF_FLAG_AQ_SET_VF_MAC; + + /* schedule the watchdog task to immediately process the request */ + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return 0; +} + +/** + * ne6xvf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_set_mac(struct net_device *netdev, void *p) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int ret; + + netdev_info(netdev, "set mac address %pM\n", addr->sa_data); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (is_multicast_ether_addr(addr->sa_data)) { + netdev_err(netdev, "Invalid Ethernet address %pM\n", addr->sa_data); + return -EINVAL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + ret = ne6xvf_replace_primary_mac(adapter, addr->sa_data); + + if (ret) + return ret; + + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + ether_addr_equal(netdev->dev_addr, addr->sa_data), + msecs_to_jiffies(2500)); + + /* If ret < 0 then it means wait was interrupted. + * If ret == 0 then it means we got a timeout. + * else it means we got response for set MAC from PF, + * check if netdev MAC was updated to requested MAC, + * if yes then set MAC succeeded otherwise it failed return -EACCES + */ + netdev_info(netdev, "%s,%pM %pM\n", __func__, addr->sa_data, netdev->dev_addr); + if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return -EACCES; + + return 0; +} + +/** + * ne6xvf_do_ioctl - Handle network device specific ioctls + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + * + * Callback to handle the networking device specific ioctls. Used to handle + * the SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl requests that configure Tx and Rx + * timstamping support. + */ +static int ne6xvf_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +/** + * ne6xvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu; + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + return 0; +} + +/** + * ne6xvf_find_vlan - Search filter list for specific vlan filter + * @vsi: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_find_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +struct ne6xvf_vlan_filter *ne6xvf_add_vlan_list(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +void ne6xvf_del_vlan_list(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_add_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void ne6xvf_del_vlan(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +static int ne6xvf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + if (!vid) + return 0; + + if (!ne6xvf_add_vlan(adapter, vlan)) + return -ENOMEM; + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +static int ne6xvf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + ne6xvf_del_vlan(adapter, vlan); + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + *__ne6xvf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_data: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __ne6xvf_setup_tc(struct net_device *netdev, void *type_data) +{ + return 0; +} + +/** + * ne6xvf_setup_tc - configure multiple traffic classes + * @dev: network interface device structure + * @type: type of offload + * @type_data: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int ne6xvf_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + return __ne6xvf_setup_tc(dev, type_data); +} + +/** + * ne6xvf_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t ne6xvf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * ne6xvf_fwd_add_macvlan - Configure MACVLAN interface + * @netdev: Main net device to configure + * @vdev: MACVLAN subordinate device + */ +static void *ne6xvf_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(&adapter->pdev->dev, sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_sync(netdev, mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adapter->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +/** + * ne6xvf_fwd_del_macvlan - Delete MACVLAN interface resources + * @netdev: Main net device + * @accel_priv: MACVLAN sub ordinate device + */ +static void ne6xvf_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (!accel_priv) + return; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_unsync(netdev, mv->mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + list_del(&mv->list); + devm_kfree(&adapter->pdev->dev, mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6xvf_netdev_ops = { + .ndo_open = ne6xvf_open, + .ndo_stop = ne6xvf_close, + .ndo_start_xmit = ne6xvf_lan_xmit_frame, + .ndo_get_stats = ne6xvf_get_stats, + .ndo_set_rx_mode = ne6xvf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ne6xvf_set_mac, + .ndo_do_ioctl = ne6xvf_do_ioctl, + .ndo_change_mtu = ne6xvf_change_mtu, + .ndo_tx_timeout = ne6xvf_tx_timeout, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_setup_tc = ne6xvf_setup_tc, + .ndo_features_check = ne6xvf_features_check, + + .ndo_dfwd_add_station = ne6xvf_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6xvf_fwd_del_macvlan, + + .ndo_fix_features = ne6xvf_fix_features, + .ndo_set_features = ne6xvf_set_features, +}; + +static int ne6xvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6xvf_adapter *adapter = NULL; + struct ne6xvf_hw *hw = NULL; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, ne6xvf_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + sprintf(name, "enp%ds%df%d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + netdev = alloc_netdev_mq(sizeof(struct ne6xvf_adapter), name, NET_NAME_USER, ether_setup, + NE6XVF_MAX_REQ_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + ne6xvf_change_state(adapter, __NE6XVF_STARTUP); + + pci_save_state(pdev); + + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + + if (!hw->hw_addr0 || !hw->hw_addr2) { + err = -EIO; + goto err_ioremap; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; + + ne6xvf_init_spinlock(&hw->mbx.mbx_spinlock); + spin_lock_init(&adapter->mac_vlan_list_lock); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->macvlan_list); + + INIT_WORK(&adapter->sdk_task, ne6xvf_sdk_task); + INIT_DELAYED_WORK(&adapter->watchdog_task, ne6xvf_watchdog_task); + + init_waitqueue_head(&adapter->down_waitqueue); + init_waitqueue_head(&adapter->vc_waitqueue); + + ne6xvf_startup(adapter); + if (ne6xvf_init_get_resources(adapter)) { + err = -EIO; + goto err_ioremap; + } + + adapter->aq_required = 0; + ne6xvf_init_process_extended_caps(adapter); + ne6xvf_init_config_adapter(adapter); + + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + + ne6xvf_dbg_pf_init(adapter); + + hw->debug_mask = 0xffffffff; + return 0; +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ne6xvf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void ne6xvf_irq_enable_queues(struct ne6xvf_adapter *adapter, u32 mask) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_msix_vectors; i++) + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), ~(1ULL << NE6X_VP_CQ_INTSHIFT)); +} + +/** + * ne6xvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + * @flush: boolean value whether to run rd32() + **/ +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush) +{ + ne6xvf_irq_enable_queues(adapter, ~0); +} + +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->tg_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tg_rings[i].desc) { + struct ne6x_ring *tg_ring = &adapter->tg_rings[i]; + /* Zero out the descriptor ring */ + memset(tg_ring->desc, 0, tg_ring->size); + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + if (!tg_ring->netdev) + return; + + dma_free_coherent(tg_ring->dev, tg_ring->size, tg_ring->desc, tg_ring->dma); + tg_ring->desc = NULL; + } +} + +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->cq_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->cq_rings[i].desc) { + struct ne6x_ring *cq_ring = &adapter->cq_rings[i]; + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + if (!cq_ring->netdev) + return; + + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->tx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].desc) { + struct ne6x_ring *tx_ring = &adapter->tx_rings[i]; + + /* ring already cleared, nothing to do */ + if (tx_ring->tx_buf) { + /* Free all the Tx ring sk_buffs */ + for (idx = 0; idx < tx_ring->count; idx++) + ne6xvf_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_buf[idx]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (tx_ring->netdev) + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + } + + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + kfree(tx_ring->sgl); + } +} + +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->rx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i].desc) { + struct ne6x_ring *rx_ring = &adapter->rx_rings[i]; + /* ring already cleared, nothing to do */ + if (rx_ring->rx_buf) { + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (idx = 0; idx < rx_ring->count; idx++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[idx]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + } + + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + } +} + +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ne6xvf_remove(struct pci_dev *pdev) +{ + struct ne6xvf_adapter *adapter = ne6xvf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf, *vlftmp; + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_mac_filter *f, *ftmp; + struct ne6x_macvlan *mv, *mv_tmp; + int i; + + ne6xvf_dbg_pf_exit(adapter); + + set_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); + cancel_work_sync(&adapter->sdk_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + + if (adapter->netdev_registered) { + /* This will call ne6xvf_close if the device was open previously. + * The Admin Queue and watchdog tasks have already been shut + * down at this point so the driver will rely on + * ne6xvf_request_reset below to disable the queues and handle + * any other Admin Queue-based cleanup normally done as part of + * ne6xvf_close. + */ + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + dev_info(&adapter->pdev->dev, "Removing device\n"); + + /* Shut down all the garbage mashers on the detention level */ + ne6xvf_change_state(adapter, __NE6XVF_REMOVE); + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + ne6xvf_request_reset(adapter); + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + + if (adapter->last_state == __NE6XVF_RESETTING || + (adapter->last_state == __NE6XVF_RUNNING && !(netdev->flags & IFF_UP))) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + + ne6xvf_destroy_spinlock(&hw->mbx.mbx_spinlock); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + /* release vsi vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adapter->macvlan_list, list) + ne6xvf_fwd_del_macvlan(netdev, mv); + + iounmap(hw->hw_addr0); + iounmap(hw->hw_addr2); + pci_release_regions(pdev); + + ne6xvf_free_queues(adapter); + kfree(adapter->vf_res); + adapter->vf_res = NULL; + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static struct pci_driver ne6xvf_driver = { + .name = ne6xvf_driver_name, + .id_table = ne6xvf_pci_tbl, + .probe = ne6xvf_probe, + .remove = ne6xvf_remove, +}; + +static int __init ne6xvf_init_module(void) +{ + int ret; + + pr_info("navf: %s - version %s\n", ne6xvf_driver_string, ne6xvf_driver_version); + + pr_info("%s\n", ne6xvf_copyright); + + ne6xvf_wq = create_singlethread_workqueue(ne6xvf_driver_name); + if (!ne6xvf_wq) { + pr_err("%s: Failed to create workqueue\n", ne6xvf_driver_name); + return -ENOMEM; + } + + ne6xvf_dbg_init(); + + ret = pci_register_driver(&ne6xvf_driver); + + return ret; +} + +module_init(ne6xvf_init_module); + +/** + * ne6xvf_exit_module - Driver Exit Cleanup Routine + * + * ne6xvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ne6xvf_exit_module(void) +{ + pci_unregister_driver(&ne6xvf_driver); + destroy_workqueue(ne6xvf_wq); + ne6xvf_dbg_exit(); +} + +module_exit(ne6xvf_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..8f74f79840492ea6c077e4377d3a692f9871df13 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_OSDEP_H +#define _NE6XVF_OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp); + +#endif /* _NE6XVF_OSDEP_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..7ba4a802d5b7d7d61eebfc89f289353f2722a143 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_txrx.h" + +/** + * ne6xvf_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void ne6xvf_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + struct ne6xvf_hw *hw = &adpt->hw; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT), + (1ULL << NE6X_VP_CQ_INTSHIFT)); + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), + ~(1ULL << NE6X_VP_CQ_INTSHIFT)); + } +} + +/** + * ne6xvf_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +/** + * ne6xvf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int ne6xvf_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + ring = q_vector->cq.ring; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = &adapter->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen, nsg; + bool jumbo_frame = true; + + tailen = 4; + + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..0a10c04862a204e0d16fec97f141d501280a9273 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_TXRX_H +#define _NE6XVF_TXRX_H + +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer); +int ne6xvf_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c new file mode 100644 index 0000000000000000000000000000000000000000..e504254e212c35c7450574e121f18628e65b1be1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c @@ -0,0 +1,1125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" + +int ne6xvf_sdk_send_msg_to_pf(struct ne6xvf_hw *hw, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen, + void *cmd_details) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + + mbx_buffer.snap.data[0] = 0; + mbx_buffer.snap.data[1] = 0; + mbx_buffer.snap.data[2] = 0; + mbx_buffer.snap.data[3] = 0; + mbx_buffer.snap.data[4] = 0; + mbx_buffer.snap.data[5] = 0; + + if (msglen) { + if (msglen > NE6XVF_SDK_LARGE_BUF) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_INVALID_SIZE; + } + + memcpy(mbx_buffer.snap.data, msg, msglen); + } + + mbx_buffer.snap.len = msglen; + mbx_buffer.snap.type = v_opcode; + mbx_buffer.snap.state = v_retval; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x2); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + + return 0; +} + +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int err; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = ne6xvf_sdk_send_msg_to_pf(hw, op, VIRTCHNL_STATUS_SUCCESS, msg, len, NULL); + if (err) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %d, sdk_err %s\n", + op, err, hw->err_str); + + return err; +} + +/** + * ne6xvf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + enum ne6xvf_status ret_code = 0; + u64 val; + int i; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + if (!(val & 0x2)) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_NOT_READY; + } + + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + e->msg_len = min_t(u16, (u16)usnap.snap.len, e->buf_len); + if (e->msg_buf && e->msg_len != 0) { + for (i = 0; i < e->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) { + e->msg_buf[i] = usnap.snap.data[i]; + e->snap.data[i] = usnap.snap.data[i]; + } + } + + e->snap.type = usnap.snap.type; + e->snap.state = usnap.snap.state; + + if (pending) + *pending = 0; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return ret_code; +} + +/** + * ne6xvf_poll_virtchnl_msg - poll for virtchnl msg matching the requested_op + * @adapter: adapter structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + */ +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + struct ne6xvf_arq_event_info rece_event; + struct ne6xvf_hw *hw = &adapter->hw; + enum ne6xvf_status status, v_ret; + enum virtchnl_ops received_op; + int timeout = 50000; + int i; + + rece_event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + rece_event.msg_buf = kzalloc(rece_event.buf_len, GFP_KERNEL); + if (!rece_event.msg_buf) + return NE6XVF_ERR_NO_MEMORY; + + while (1) { + /* When the SDK is empty, ne6xvf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = ne6xvf_clean_arq_element(hw, &rece_event, NULL); + if (status) { + if (status == NE6XVF_ERR_NOT_READY && timeout) { + usleep_range(10, 12); + timeout--; + continue; + } + kfree(rece_event.msg_buf); + return status; + } + + received_op = (enum virtchnl_ops)le32_to_cpu(rece_event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(rece_event.snap.state); + if (op_to_poll == received_op) { + memcpy(&event->snap, &rece_event.snap, + sizeof(struct ne6x_mbx_snap_buffer_data)); + event->msg_len = min(rece_event.msg_len, event->buf_len); + if (event->msg_buf) { + for (i = 0; i < event->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) + event->msg_buf[i] = rece_event.msg_buf[i]; + } + break; + } + + ne6xvf_virtchnl_completion(adapter, received_op, v_ret, rece_event.msg_buf, + rece_event.msg_len); + } + + kfree(rece_event.msg_buf); + status = (enum ne6xvf_status)le32_to_cpu(event->snap.state); + + return status; +} + +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter) +{ + int status; + + if (!adapter->vf_res) + return 0; + /* Don't check CURRENT_OP - this is always higher priority */ + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, + &adapter->vf_res->vsi_res[0].default_mac_addr[0], 6); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + return status; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct ne6xvf_virtchnl_version_info vvi; + + vvi.major = NE6XVF_VIRTCHNL_VERSION_MAJOR; + vvi.minor = NE6XVF_VIRTCHNL_VERSION_MINOR; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); + usleep_range(10, 12); + return ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_VERSION); +} + +/** + * ne6xvf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void ne6xvf_vf_parse_hw_config(struct ne6xvf_hw *hw, struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + + hw->dev_caps.max_mtu = msg->max_mtu; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * ne6xvf_get_vf_config + * @adapter: private adapter structure + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + int err; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + err = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_RESOURCES); + + hw->dev_caps.vf_id = event.msg_buf[0]; + hw->dev_caps.chip_id = 0x0; + hw->dev_caps.lport = event.msg_buf[1]; + hw->dev_caps.mac_id = event.msg_buf[2]; + hw->dev_caps.base_queue = event.msg_buf[3]; + hw->dev_caps.num_vf_per_pf = event.msg_buf[5]; + adapter->vf_res->num_vsis = 0x1; + adapter->vf_res->num_queue_pairs = event.msg_buf[4]; + adapter->vf_res->max_vectors = event.msg_buf[4]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + adapter->comm.port_info = hw->dev_caps.lport | (hw->dev_caps.vf_id << 8); + + dev_info(&adapter->pdev->dev, "vf %d Get Resource [ lport: %d, mac_id: %d, base: %d, queue: %d, err = %d]\n", + hw->dev_caps.vf_id, hw->dev_caps.lport, hw->dev_caps.mac_id, + hw->dev_caps.base_queue, adapter->vf_res->num_queue_pairs, err); + + ne6xvf_vf_parse_hw_config(hw, adapter->vf_res); + + return err; +} + +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + struct ne6x_vf_vlan vlan; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + event.buf_len = 0; + event.msg_buf = NULL; + + vlan = NE6X_VF_VLAN(0xfff, ETH_P_8021Q); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan, sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + + return 0; +} + +/** + * ne6xvf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the NE6XVF_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init) +{ + u8 mac_addr[ETH_ALEN]; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_CONFIG; + if (b_init) { + eth_random_addr(mac_addr); + mac_addr[0] = 0x02; + mac_addr[1] = 0x31; + mac_addr[2] = 0x3a; + } else { + memcpy(mac_addr, adapter->vf_res->vsi_res[0].default_mac_addr, 6); + } + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, mac_addr, 6); + + /* mac addr need get for PF */ + adapter->vf_res->vsi_res[0].default_mac_addr[0] = mac_addr[0]; + adapter->vf_res->vsi_res[0].default_mac_addr[1] = mac_addr[1]; + adapter->vf_res->vsi_res[0].default_mac_addr[2] = mac_addr[2]; + adapter->vf_res->vsi_res[0].default_mac_addr[3] = mac_addr[3]; + adapter->vf_res->vsi_res[0].default_mac_addr[4] = mac_addr[4]; + adapter->vf_res->vsi_res[0].default_mac_addr[5] = mac_addr[5]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + return 0; +} + +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter) +{ + adapter->current_op = VIRTCHNL_OP_CONFIG_OFFLOAD; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + dev_info(&adapter->pdev->dev, "adapter->hw_feature = 0x%08X\n", adapter->hw_feature); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_OFFLOAD, (u8 *)&adapter->hw_feature, 4); + + return 0; +} + +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter) +{ + int count, size = sizeof(struct ne6x_rss_info); + int index, status; + u8 *plut_info = (u8 *)&adapter->rss_info; + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS; + + count = (size + NE6XVF_SDK_LARGE_BUF - 1) / NE6XVF_SDK_LARGE_BUF; + + for (index = 0; index < count; index++) { + event.buf_len = 0; + event.msg_buf = NULL; + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS, + &plut_info[index * NE6XVF_SDK_LARGE_BUF], + ((size - index * NE6XVF_SDK_LARGE_BUF) > + NE6XVF_SDK_LARGE_BUF) + ? NE6XVF_SDK_LARGE_BUF + : (size - index * NE6XVF_SDK_LARGE_BUF)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CONFIG_RSS); + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + event.msg_buf = NULL; + event.buf_len = 0; + + adapter->current_op = VIRTCHNL_OP_CHANGED_RSS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CHANGED_RSS, (u8 *)&adapter->num_active_queues, + sizeof(adapter->num_active_queues)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CHANGED_RSS); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CHANGED_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request feature, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_GET_VF_FEATURE; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_FEATURE, NULL, 0); + + return 0; +} + +/** + * ne6xvf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter) +{ + ne6xvf_update_pf_stats(adapter); +} + +/** + * ne6xvf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num) +{ + struct ne6xvf_virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = 1; + vfres.need_reset = 0x0; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, (u8 *)&vfres, sizeof(vfres)); +} + +/** + * ne6xvf_enable_queues + * @adapter: adapter structure + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, NULL, 0); + return 0; +} + +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + int status; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + status = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_FEATURE); + if (status == 0) { + adapter->hw_feature = event.snap.data[3]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[2]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[1]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[0]; + dev_info(&adapter->pdev->dev, "vf %d get feature 0x%08X\n", + adapter->hw.dev_caps.vf_id, adapter->hw_feature); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + kfree(event.msg_buf); + + return status; +} + +/** + * ne6xvf_add_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_SET_VF_ADDR; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_SET_VF_ADDR, adapter->hw.mac.addr, 6); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_SET_VF_MAC; +} + +/** + * ne6xvf_del_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f, *temp; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, temp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +#define NE6XVF_MAX_SPEED_STRLEN 13 + +/** + * ne6xvf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void ne6xvf_print_link_message(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int link_speed_mbps; + char *speed; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + speed = kcalloc(1, NE6XVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + link_speed_mbps = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; + break; + default: + link_speed_mbps = SPEED_UNKNOWN; + break; + } + + snprintf(speed, NE6XVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps / 1000, "Gbps"); + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + + kfree(speed); +} + +/** + * ne6xvf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_virtchnl_promisc_info vpi; + int flags = 0; + + dev_warn(&adapter->pdev->dev, "%s: ....\n", __func__); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + + if (adapter->flags & NE6XVF_FLAG_PROMISC_ON) { + adapter->hw_feature |= NE6X_F_PROMISC; + flags |= FLAG_VF_UNICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_PROMISC; + } + + if (adapter->flags & NE6XVF_FLAG_ALLMULTI_ON) { + adapter->hw_feature |= NE6X_F_RX_ALLMULTI; + flags |= FLAG_VF_MULTICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + } + + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); +} + +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + u8 msg[8] = {0}; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get_link_status, command %d pending\n", + adapter->current_op); + return; + } + + /* pass queue info to vf */ + msg[0] = hw->dev_caps.base_queue; + msg[1] = adapter->num_active_queues; + + adapter->current_op = VIRTCHNL_OP_GET_PORT_STATUS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_PORT_STATUS, msg, 2); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; +} + +/** + * ne6xvf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->link_status; + enum ne6x_sdk_link_speed old_link_speed = adapter->link_speed; + + switch (vpe->event) { + case NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_speed = (vpe->link_speed_0 << 24) | + (vpe->link_speed_1 << 16) | + (vpe->link_speed_2 << 8) | + vpe->link_speed_3; + if (adapter->current_op == VIRTCHNL_OP_GET_PORT_STATUS) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) { + if (link_up && old_link_speed != adapter->link_speed) + ne6xvf_print_link_message(adapter); + + break; + } + + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __NE6XVF_RUNNING) + break; + + /* For ADQ enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & NE6XVF_FLAG_QUEUES_DISABLED) + break; + } + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + ne6xvf_print_link_message(adapter); + break; + case NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); + break; + default: + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", vpe->event); + break; + } + return; + } + + if (v_opcode == VIRTCHNL_OP_VF_CONFIG) { + struct virtchnl_vf_config *vfconfig = (struct virtchnl_vf_config *)msg; + + dev_info(&adapter->pdev->dev, "vf_vonfig_data from the PF,type= %d,value = %d\n", + vfconfig->type, vfconfig->data[0]); + switch (vfconfig->type) { + case VIRTCHNL_VF_CONFIG_TRUST: + adapter->trusted = vfconfig->data[0]; + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + break; + default: + break; + } + return; + } + + if (v_retval) { + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + dev_err(&adapter->pdev->dev, "Failed to change MAC address\n"); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op != VIRTCHNL_OP_SET_VF_ADDR) + return; + + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d to our request %d\n", + v_retval, v_opcode); + + /* Assume that the ADQ configuration caused one of the + * v_opcodes in this if statement to fail. Set the + * flag so the reset path can return to the pre-ADQ + * configuration and traffic can resume + */ + if ((v_opcode == VIRTCHNL_OP_ENABLE_QUEUES || + v_opcode == VIRTCHNL_OP_CONFIG_IRQ_MAP || + v_opcode == VIRTCHNL_OP_CONFIG_ADPT_QUEUES)) { + dev_err(&adapter->pdev->dev, + "ADQ is enabled and opcode %d failed (%d)\n", v_opcode, + v_retval); + netdev_reset_tc(netdev); + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + ne6xvf_schedule_reset(adapter); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return; + } + } + } + + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + if (!v_retval) { + if (msglen != 0 && msg) { + netif_addr_lock_bh(netdev); + ether_addr_copy(adapter->hw.mac.addr, msg); + eth_hw_addr_set(netdev, msg); + netif_addr_unlock_bh(netdev); + } + } + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op == VIRTCHNL_OP_SET_VF_ADDR) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + memcpy(adapter->vf_res, msg, msglen); + ne6xvf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + netif_addr_lock_bh(netdev); + /* refresh current mac address if changed */ + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + } + + ne6xvf_parse_vf_resource_msg(adapter); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + memcpy(&adapter->hw_feature, msg, 4); + dev_info(&adapter->pdev->dev, "%s: hw_featrue = 0x%08X\n", + ne6xvf_state_str(adapter->state), adapter->hw_feature); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + if (adapter->state == __NE6XVF_RUNNING) { + ne6xvf_irq_enable(adapter, true); + /* If queues not enabled when handling link event, + * then set carrier on now + */ + if (adapter->link_up && !netif_carrier_ok(netdev)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } + } + adapter->flags |= NE6XVF_FLAG_QUEUES_ENABLED; + adapter->flags &= ~NE6XVF_FLAG_QUEUES_DISABLED; + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + adapter->flags &= ~NE6XVF_FLAG_QUEUES_ENABLED; + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + + break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct ne6xvf_virtchnl_vf_res_request *vfres = + (struct ne6xvf_virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { + dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, vfres->num_queue_pairs); + adapter->num_req_queues = 0; + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + } + } break; + default: + if (adapter->current_op && v_opcode != adapter->current_op) + dev_dbg(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + + break; + } /* switch v_opcode */ + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * ne6xvf_add_vlans + * @adapter: adapter structure + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f = NULL; + struct ne6x_vf_vlan *vlan = NULL; + int len = 0, i = 0, count = 0; + + dev_info(&adapter->pdev->dev, "%s: adapter->current_op:%d\n", __func__, + adapter->current_op); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct ne6x_vf_vlan) * count; + vlan = kzalloc(len, GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + f->add = false; + f->is_new_vlan = true; + if (i == count) + break; + } + } + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} + +/** + * ne6xvf_del_vlans + * @adapter: adapter structure + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f, *ftmp; + struct ne6x_vf_vlan *vlan = NULL; + int i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + vlan = kcalloc(count, sizeof(*vlan), GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h new file mode 100644 index 0000000000000000000000000000000000000000..1fae0b1922dcccbfee11e0cfe894703c4068240c --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_VIRTCHNL_H +#define _NE6XVF_VIRTCHNL_H + +#define NE6XVF_SDK_LARGE_BUF 6 + +struct ne6xvf_spinlock { + /* mutext lock */ + struct mutex spinlock; +}; + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[]; +}; + +enum nacf_virtchnl_vsi_type { + NE6XVF_VIRTCHNL_VSI_TYPE_INVALID = 0, + NE6XVF_VIRTCHNL_VSI_SRIOV = 6, +}; + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[]; +}; + +struct ne6xvf_arq_event_info { + struct ne6x_mbx_snap_buffer_data snap; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* VF resource request */ +struct ne6xvf_virtchnl_vf_res_request { + u16 num_queue_pairs; + u8 need_reset; + u8 rsv; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct ne6xvf_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +struct ne6xvf_sdk_mbx_info { + struct ne6xvf_spinlock mbx_spinlock; + struct ne6x_mbx_snap_buffer_data sq_data; + struct ne6x_mbx_snap_buffer_data cq_data; + int init_flag; +}; + +#define NE6XVF_VIRTCHNL_VERSION_MAJOR 1 +#define NE6XVF_VIRTCHNL_VERSION_MINOR 1 + +struct ne6xvf_virtchnl_version_info { + u8 major; + u8 minor; +}; + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum ne6xvf_virtchnl_event_codes { + NE6XVF_VIRTCHNL_EVENT_UNKNOWN = 0, + NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE, + NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING, + NE6XVF_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + NE6XVF_VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u8 link_speed_0; + u8 link_speed_1; + u8 link_speed_2; + u8 link_speed_3; + u8 link_status; +}; + +#endif diff --git a/drivers/net/ethernet/motorcomm/Kconfig b/drivers/net/ethernet/motorcomm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..2d058928936f04163f9563cc43e85b492f485d2c --- /dev/null +++ b/drivers/net/ethernet/motorcomm/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + +config NET_VENDOR_MOTORCOMM + bool "Motorcomm devices" + default y + depends on PCI + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Motorcomm cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MOTORCOMM + +config YT6801 + tristate "Motorcomm YT6801 Ethernet support" + depends on PCI + help + If you have a network (Ethernet) controller of this type, say Y here. + + To compile this driver as a module, choose M here. The module + will be called forcedeth. + +endif # NET_VENDOR_MOTORCOMM diff --git a/drivers/net/ethernet/motorcomm/Makefile b/drivers/net/ethernet/motorcomm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..af0a439d54a16e06ad2f8e80f98749695f6dc7b7 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + +obj-$(CONFIG_YT6801) += yt6801/ diff --git a/drivers/net/ethernet/motorcomm/yt6801/Makefile b/drivers/net/ethernet/motorcomm/yt6801/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..93b5c4510eb0598f7e7914b1eecdeeef03fda31e --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + + +obj-$(CONFIG_YT6801) += yt6801.o + +yt6801-objs := fuxi-gmac-common.o \ + fuxi-gmac-desc.o \ + fuxi-gmac-ethtool.o \ + fuxi-gmac-hw.o \ + fuxi-gmac-net.o \ + fuxi-gmac-pci.o \ + fuxi-gmac-phy.o \ + fuxi-efuse.o \ + fuxi-gmac-debugfs.o diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..24282f8e2230338795cde633611b0b183ade734b --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef _MP_DBG_H +#define _MP_DBG_H + +/* Message verbosity: lower values indicate higher urgency */ +#define MP_OFF 0 +#define MP_ERROR 1 +#define MP_WARN 2 +#define MP_TRACE 3 +#define MP_INFO 4 +#define MP_LOUD 5 + +#endif /* _MP_DBG_H */ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c new file mode 100644 index 0000000000000000000000000000000000000000..ae4ca3d59ac4c06fbe1f08de34450271f53eb2f8 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c @@ -0,0 +1,1344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#include "fuxi-efuse.h" + +/* read patch per index. */ +bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, + u8 index, u32 *offset, u32 *value) +{ + unsigned int wait, i; + u32 regval = 0; + bool succeed = false; + + if (index >= FUXI_EFUSE_MAX_ENTRY) { + FXGMAC_PR("Reading efuse out of range, index %d\n", index); + return false; + } + + if (offset) { + *offset = 0; + } + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + regval = 0; + regval = FXGMAC_SET_REG_BITS( + regval, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, regval, + pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + regval = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(regval, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (succeed) { + if (offset) { + *offset |= + (FXGMAC_GET_REG_BITS( + regval, EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN) + << (i << 3)); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + if (value) { + *value = 0; + } + for (i = EFUSE_PATCH_DATA_START_BYTE; i < EFUSE_EACH_PATH_SIZE; i++) { + regval = 0; + regval = FXGMAC_SET_REG_BITS( + regval, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, regval, + pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + regval = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(regval, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (succeed) { + if (value) { + *value |= (FXGMAC_GET_REG_BITS( + regval, EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN) + << ((i - 2) << 3)); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, + u32 *value) /* read patch per index. */ +{ + u32 reg_offset, reg_val; + u32 cur_val = 0; + bool succeed = true; + u8 index = 0; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + for (index = 0; index < FUXI_EFUSE_MAX_ENTRY; index++) { + if (!fxgmac_read_patch_from_efuse_per_index( + pdata, index, ®_offset, ®_val)) { + succeed = false; + break; + } else if (reg_offset == offset) { + cur_val = reg_val; + } else if (0 == reg_offset && 0 == reg_val) { + break; /* first blank. We should write here. */ + } + } + + if (value) { + *value = cur_val; + } + + return succeed; +} + +bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, + u32 offset, u32 value) +{ + unsigned int wait, i; + u32 reg_val; + bool succeed = false; + u32 cur_reg, cur_val; + u8 max_index = FUXI_EFUSE_MAX_ENTRY; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val); + if (EFUSE_LED_COMMON_SOLUTION == reg_val) { + max_index = FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; + } + + if (index >= max_index) { + FXGMAC_PR("Writing efuse out of range, index %d max index %d\n", + index, max_index); + return false; + } + + if (fxgmac_read_patch_from_efuse_per_index(pdata, index, &cur_reg, + &cur_val)) { + if (cur_reg != 0 || cur_val != 0) { + FXGMAC_PR( + " The index %d has writed value, cannot rewrite it.\n", + index); + return false; + } + } else { + FXGMAC_PR("Cannot read index %d.\n", index); + return false; + } + + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (offset >> (i << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + for (i = 2; i < 6; i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, + 18 + index * 6 + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (value >> ((i - 2) << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, + u32 value) +{ + unsigned int wait, i; + u32 reg_offset, reg_val; + u32 cur_offset = 0, cur_val = 0; + bool succeed = false; + u8 index = 0; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index( + pdata, index, ®_offset, ®_val)) { + return false; + } else if (reg_offset == offset) { + cur_offset = reg_offset; + cur_val = reg_val; + } else if (0 == reg_offset && 0 == reg_val) { + break; /* first blank. We should write here. */ + } + } + + if (cur_offset == offset) { + if (cur_val == value) { + FXGMAC_PR("0x%x -> Reg0x%x already exists, ignore.\n", + value, offset); + return true; + } else { + FXGMAC_PR( + "Reg0x%x entry current value 0x%x, reprogram.\n", + offset, value); + } + } + + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (offset >> (i << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + for (i = EFUSE_PATCH_DATA_START_BYTE; i < EFUSE_EACH_PATH_SIZE; i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (value >> ((i - 2) << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 offset = 0, value = 0; + u32 machr = 0, maclr = 0; + bool succeed = true; + u8 index = 0; + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (0x00 == offset) { + break; /* reach the blank. */ + } + if (MACA0LR_FROM_EFUSE == offset) { + maclr = value; + } + if (MACA0HR_FROM_EFUSE == offset) { + machr = value; + } + + if ((0x08 == offset) && revid) { + *revid = value; + } + if ((0x2C == offset) && subsys) { + *subsys = value; + } + } + if (mac_addr) { + mac_addr[5] = (u8)(maclr & 0xFF); + mac_addr[4] = (u8)((maclr >> 8) & 0xFF); + mac_addr[3] = (u8)((maclr >> 16) & 0xFF); + mac_addr[2] = (u8)((maclr >> 24) & 0xFF); + mac_addr[1] = (u8)(machr & 0xFF); + mac_addr[0] = (u8)((machr >> 8) & 0xFF); + } + + return succeed; +} + +bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 machr = 0, maclr = 0, pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; + bool succeed = true; + if (mac_addr) { + machr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0HR_FROM_EFUSE); + maclr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0LR_FROM_EFUSE); + DPRINTK("Current mac address from efuse is %02x-%02x-%02x-%02x-%02x-%02x.\n", + (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, + (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, + maclr & 0xFF); + + if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, + (((u32)mac_addr[0]) << 8) | + mac_addr[1])) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse( + pdata, MACA0LR_FROM_EFUSE, + (((u32)mac_addr[2]) << 24) | + (((u32)mac_addr[3]) << 16) | + (((u32)mac_addr[4]) << 8) | mac_addr[5])) { + succeed = false; + } + } + + if (revid) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, + *revid)) { + succeed = false; + } + } + if (subsys) { + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 1); + if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, + pcie_cfg_ctrl)) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, + *subsys)) { + succeed = false; + } + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 0); + if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, + pcie_cfg_ctrl)) { + succeed = false; + } + } + return succeed; +} + +bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) +{ + u32 machr = 0, maclr = 0; + bool succeed = true; + + if (mac_addr) { + machr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0HR_FROM_EFUSE); + maclr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0LR_FROM_EFUSE); + DPRINTK("Current mac address from efuse is %02x-%02x-%02x-%02x-%02x-%02x.\n", + (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, + (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, + maclr & 0xFF); + + if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, + (((u32)mac_addr[0]) << 8) | + mac_addr[1])) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse( + pdata, MACA0LR_FROM_EFUSE, + (((u32)mac_addr[2]) << 24) | + (((u32)mac_addr[3]) << 16) | + (((u32)mac_addr[4]) << 8) | mac_addr[5])) { + succeed = false; + } + } + + return succeed; +} + +bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, + u32 *revid) +{ + u32 offset = 0, value = 0; + u8 index; + bool succeed = true; + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (0x00 == offset) { + break; /* reach the blank. */ + } + + if ((EFUSE_REVID_REGISTER == offset) && revid) { + *revid = value; + } else { + succeed = false; + } + if ((EFUSE_SUBSYS_REGISTER == offset) && subsys) { + *subsys = value; + } else { + succeed = false; + } + } + + return succeed; +} + +bool fxgmac_write_subsys_to_efuse(struct fxgmac_pdata *pdata, u32 *subsys, + u32 *revid) +{ + bool succeed = true; + + /* write subsys info */ + if (revid) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, + *revid)) { + succeed = false; + } + } + if (subsys) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, + *subsys)) { + succeed = false; + } + } + return succeed; +} + +bool fxgmac_efuse_load(struct fxgmac_pdata *pdata) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_AUTO_LOAD); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to loading efuse, ctrl_1 0x%08x\n", reg_val); + } + return succeed; +} + +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + + if (value) { + *value = 0; + } + + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (succeed) { + if (value) { + *value = FXGMAC_GET_REG_BITS(reg_val, + EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); + } + + return succeed; +} + +bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val, value; + + if (!fxgmac_efuse_read_data(pdata, EFUSE_OOB_ADDR, ®_val)) { + return succeed; + } + + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OOB_POS, EFUSE_OOB_LEN)) { + FXGMAC_PR("OOB Ctrl bit already exists"); + return true; + } + + value = 0; + value = FXGMAC_SET_REG_BITS(value, EFUSE_OOB_POS, EFUSE_OOB_LEN, 1); + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, EFUSE_OOB_ADDR); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte OOB"); + } + + return succeed; +} + +bool fxgmac_efuse_write_led(struct fxgmac_pdata *pdata, u32 value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val; + + if (!fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val)) { + return succeed; + } + + if (reg_val == value) { + FXGMAC_PR("Led Ctrl option already exists"); + return true; + } + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, EFUSE_LED_ADDR); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte LED"); + } + + return succeed; +} + +bool fxgmac_efuse_write_data(struct fxgmac_pdata *pdata, u32 offset, u32 value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val; + + if (!fxgmac_efuse_read_data(pdata, offset, ®_val)) { + return succeed; + } + + if (reg_val == value) { + FXGMAC_PR("offset 0x%x already exists", offset); + return true; + } + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse 0x%x Byte LED", offset); + } + + return succeed; +} + +static void fxgmac_read_led_efuse_config(struct fxgmac_pdata *pdata, + struct led_setting *pfirst, + struct led_setting *psecond) +{ + u32 val_high = 0, val_low = 0; + + /* read first area */ + fxgmac_efuse_read_data(pdata, EFUSE_FISRT_UPDATE_ADDR, &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 1), &val_low); + pfirst->disable_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 2), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 3), &val_low); + pfirst->disable_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 4), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 5), &val_low); + pfirst->disable_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 6), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 7), &val_low); + pfirst->disable_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 8), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 9), &val_low); + pfirst->disable_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 10), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 11), &val_low); + pfirst->s5_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 12), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 13), &val_low); + pfirst->s5_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 14), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 15), &val_low); + pfirst->s5_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 16), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 17), &val_low); + pfirst->s5_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 18), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 19), &val_low); + pfirst->s5_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 20), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 21), &val_low); + pfirst->s3_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 22), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 23), &val_low); + pfirst->s3_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 24), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 25), &val_low); + pfirst->s3_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 26), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 27), &val_low); + pfirst->s3_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 28), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 29), &val_low); + pfirst->s3_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 30), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 31), &val_low); + pfirst->s0_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 32), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 33), &val_low); + pfirst->s0_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 34), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 35), &val_low); + pfirst->s0_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 36), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 37), &val_low); + pfirst->s0_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 38), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 39), &val_low); + pfirst->s0_led_setting[0] = ((val_high << 8) + val_low); + + /* read second area */ + fxgmac_efuse_read_data(pdata, EFUSE_SECOND_UPDATE_ADDR, &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 1), &val_low); + psecond->disable_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 2), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 3), &val_low); + psecond->disable_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 4), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 5), &val_low); + psecond->disable_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 6), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 7), &val_low); + psecond->disable_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 8), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 9), &val_low); + psecond->disable_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 10), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 11), + &val_low); + psecond->s5_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 12), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 13), + &val_low); + psecond->s5_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 14), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 15), + &val_low); + psecond->s5_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 16), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 17), + &val_low); + psecond->s5_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 18), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 19), + &val_low); + psecond->s5_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 20), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 21), + &val_low); + psecond->s3_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 22), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 23), + &val_low); + psecond->s3_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 24), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 25), + &val_low); + psecond->s3_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 26), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 27), + &val_low); + psecond->s3_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 28), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 29), + &val_low); + psecond->s3_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 30), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 31), + &val_low); + psecond->s0_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 32), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 33), + &val_low); + psecond->s0_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 34), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 35), + &val_low); + psecond->s0_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 36), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 37), + &val_low); + psecond->s0_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 38), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 39), + &val_low); + psecond->s0_led_setting[0] = ((val_high << 8) + val_low); +} + +bool fxgmac_write_led_setting_to_efuse(struct fxgmac_pdata *pdata) +{ + struct led_setting led_config_first; + struct led_setting led_config_second; + bool bfirstflag = false, bsecondflag = false; + bool bsucceed = false; + + fxgmac_read_led_efuse_config(pdata, &led_config_first, + &led_config_second); + + if (0x00 == led_config_first.s0_led_setting[0] && + 0x00 == led_config_first.s0_led_setting[1] && + 0x00 == led_config_first.s0_led_setting[2] && + 0x00 == led_config_first.s0_led_setting[3] && + 0x00 == led_config_first.s0_led_setting[4] && + 0x00 == led_config_first.s3_led_setting[0] && + 0x00 == led_config_first.s3_led_setting[1] && + 0x00 == led_config_first.s3_led_setting[2] && + 0x00 == led_config_first.s3_led_setting[3] && + 0x00 == led_config_first.s3_led_setting[4] && + 0x00 == led_config_first.s5_led_setting[0] && + 0x00 == led_config_first.s5_led_setting[1] && + 0x00 == led_config_first.s5_led_setting[2] && + 0x00 == led_config_first.s5_led_setting[3] && + 0x00 == led_config_first.s5_led_setting[4] && + 0x00 == led_config_first.disable_led_setting[0] && + 0x00 == led_config_first.disable_led_setting[1] && + 0x00 == led_config_first.disable_led_setting[2] && + 0x00 == led_config_first.disable_led_setting[3] && + 0x00 == led_config_first.disable_led_setting[4]) { + bfirstflag = true; + } + + if (0x00 == led_config_second.s0_led_setting[0] && + 0x00 == led_config_second.s0_led_setting[1] && + 0x00 == led_config_second.s0_led_setting[2] && + 0x00 == led_config_second.s0_led_setting[3] && + 0x00 == led_config_second.s0_led_setting[4] && + 0x00 == led_config_second.s3_led_setting[0] && + 0x00 == led_config_second.s3_led_setting[1] && + 0x00 == led_config_second.s3_led_setting[2] && + 0x00 == led_config_second.s3_led_setting[3] && + 0x00 == led_config_second.s3_led_setting[4] && + 0x00 == led_config_second.s5_led_setting[0] && + 0x00 == led_config_second.s5_led_setting[1] && + 0x00 == led_config_second.s5_led_setting[2] && + 0x00 == led_config_second.s5_led_setting[3] && + 0x00 == led_config_second.s5_led_setting[4] && + 0x00 == led_config_second.disable_led_setting[0] && + 0x00 == led_config_second.disable_led_setting[1] && + 0x00 == led_config_second.disable_led_setting[2] && + 0x00 == led_config_second.disable_led_setting[3] && + 0x00 == led_config_second.disable_led_setting[4]) { + bsecondflag = true; + } + + if (bfirstflag && bsecondflag) { + /* update first area */ + fxgmac_efuse_write_data( + pdata, EFUSE_FISRT_UPDATE_ADDR, + (pdata->ledconfig.disable_led_setting[4] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 1), + pdata->ledconfig.disable_led_setting[4]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 2), + (pdata->ledconfig.disable_led_setting[3] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 3), + pdata->ledconfig.disable_led_setting[3]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 4), + (pdata->ledconfig.disable_led_setting[2] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 5), + pdata->ledconfig.disable_led_setting[2]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 6), + (pdata->ledconfig.disable_led_setting[1] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 7), + pdata->ledconfig.disable_led_setting[1]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 8), + (pdata->ledconfig.disable_led_setting[0] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 9), + pdata->ledconfig.disable_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 10), + (pdata->ledconfig.s5_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 11), + pdata->ledconfig.s5_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 12), + (pdata->ledconfig.s5_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 13), + pdata->ledconfig.s5_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 14), + (pdata->ledconfig.s5_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 15), + pdata->ledconfig.s5_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 16), + (pdata->ledconfig.s5_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 17), + pdata->ledconfig.s5_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 18), + (pdata->ledconfig.s5_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 19), + pdata->ledconfig.s5_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 20), + (pdata->ledconfig.s3_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 21), + pdata->ledconfig.s3_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 22), + (pdata->ledconfig.s3_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 23), + pdata->ledconfig.s3_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 24), + (pdata->ledconfig.s3_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 25), + pdata->ledconfig.s3_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 26), + (pdata->ledconfig.s3_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 27), + pdata->ledconfig.s3_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 28), + (pdata->ledconfig.s3_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 29), + pdata->ledconfig.s3_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 30), + (pdata->ledconfig.s0_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 31), + pdata->ledconfig.s0_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 32), + (pdata->ledconfig.s0_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 33), + pdata->ledconfig.s0_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 34), + (pdata->ledconfig.s0_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 35), + pdata->ledconfig.s0_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 36), + (pdata->ledconfig.s0_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 37), + pdata->ledconfig.s0_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 38), + (pdata->ledconfig.s0_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 39), + pdata->ledconfig.s0_led_setting[0]); + + bsucceed = true; + } else if (!bfirstflag && bsecondflag) { + /* update second area */ + fxgmac_efuse_write_data( + pdata, EFUSE_SECOND_UPDATE_ADDR, + (pdata->ledconfig.disable_led_setting[4] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 1), + pdata->ledconfig.disable_led_setting[4]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 2), + (pdata->ledconfig.disable_led_setting[3] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 3), + pdata->ledconfig.disable_led_setting[3]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 4), + (pdata->ledconfig.disable_led_setting[2] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 5), + pdata->ledconfig.disable_led_setting[2]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 6), + (pdata->ledconfig.disable_led_setting[1] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 7), + pdata->ledconfig.disable_led_setting[1]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 8), + (pdata->ledconfig.disable_led_setting[0] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 9), + pdata->ledconfig.disable_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 10), + (pdata->ledconfig.s5_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 11), + pdata->ledconfig.s5_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 12), + (pdata->ledconfig.s5_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 13), + pdata->ledconfig.s5_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 14), + (pdata->ledconfig.s5_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 15), + pdata->ledconfig.s5_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 16), + (pdata->ledconfig.s5_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 17), + pdata->ledconfig.s5_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 18), + (pdata->ledconfig.s5_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 19), + pdata->ledconfig.s5_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 20), + (pdata->ledconfig.s3_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 21), + pdata->ledconfig.s3_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 22), + (pdata->ledconfig.s3_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 23), + pdata->ledconfig.s3_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 24), + (pdata->ledconfig.s3_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 25), + pdata->ledconfig.s3_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 26), + (pdata->ledconfig.s3_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 27), + pdata->ledconfig.s3_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 28), + (pdata->ledconfig.s3_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 29), + pdata->ledconfig.s3_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 30), + (pdata->ledconfig.s0_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 31), + pdata->ledconfig.s0_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 32), + (pdata->ledconfig.s0_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 33), + pdata->ledconfig.s0_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 34), + (pdata->ledconfig.s0_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 35), + pdata->ledconfig.s0_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 36), + (pdata->ledconfig.s0_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 37), + pdata->ledconfig.s0_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 38), + (pdata->ledconfig.s0_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 39), + pdata->ledconfig.s0_led_setting[0]); + + bsucceed = true; + } + + return bsucceed; +} + +bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata) +{ + struct led_setting led_config_first; + struct led_setting led_config_second; + bool bfirstflag = false, bsecondflag = false; + bool bsucceed = false; + + fxgmac_read_led_efuse_config(pdata, &led_config_first, + &led_config_second); + + if (0x00 == led_config_first.s0_led_setting[0] && + 0x00 == led_config_first.s0_led_setting[1] && + 0x00 == led_config_first.s0_led_setting[2] && + 0x00 == led_config_first.s0_led_setting[3] && + 0x00 == led_config_first.s0_led_setting[4] && + 0x00 == led_config_first.s3_led_setting[0] && + 0x00 == led_config_first.s3_led_setting[1] && + 0x00 == led_config_first.s3_led_setting[2] && + 0x00 == led_config_first.s3_led_setting[3] && + 0x00 == led_config_first.s3_led_setting[4] && + 0x00 == led_config_first.s5_led_setting[0] && + 0x00 == led_config_first.s5_led_setting[1] && + 0x00 == led_config_first.s5_led_setting[2] && + 0x00 == led_config_first.s5_led_setting[3] && + 0x00 == led_config_first.s5_led_setting[4] && + 0x00 == led_config_first.disable_led_setting[0] && + 0x00 == led_config_first.disable_led_setting[1] && + 0x00 == led_config_first.disable_led_setting[2] && + 0x00 == led_config_first.disable_led_setting[3] && + 0x00 == led_config_first.disable_led_setting[4]) { + bfirstflag = true; + } + + if (0x00 == led_config_second.s0_led_setting[0] && + 0x00 == led_config_second.s0_led_setting[1] && + 0x00 == led_config_second.s0_led_setting[2] && + 0x00 == led_config_second.s0_led_setting[3] && + 0x00 == led_config_second.s0_led_setting[4] && + 0x00 == led_config_second.s3_led_setting[0] && + 0x00 == led_config_second.s3_led_setting[1] && + 0x00 == led_config_second.s3_led_setting[2] && + 0x00 == led_config_second.s3_led_setting[3] && + 0x00 == led_config_second.s3_led_setting[4] && + 0x00 == led_config_second.s5_led_setting[0] && + 0x00 == led_config_second.s5_led_setting[1] && + 0x00 == led_config_second.s5_led_setting[2] && + 0x00 == led_config_second.s5_led_setting[3] && + 0x00 == led_config_second.s5_led_setting[4] && + 0x00 == led_config_second.disable_led_setting[0] && + 0x00 == led_config_second.disable_led_setting[1] && + 0x00 == led_config_second.disable_led_setting[2] && + 0x00 == led_config_second.disable_led_setting[3] && + 0x00 == led_config_second.disable_led_setting[4]) { + bsecondflag = true; + } + + if (!bfirstflag && bsecondflag) { + /* read first area */ + memcpy(&pdata->led, &led_config_first, + sizeof(struct led_setting)); + bsucceed = true; + } else if (!bfirstflag && !bsecondflag) { + /* read second area */ + memcpy(&pdata->led, &led_config_second, + sizeof(struct led_setting)); + bsucceed = true; + } + + return bsucceed; +} \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h new file mode 100644 index 0000000000000000000000000000000000000000..fa0446958719c055f1e6df44e168fc9773d0c49a --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_EFUSE_H__ +#define __FUXI_EFUSE_H__ + + +bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 *value); /* read patch per register offset. */ +bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 *offset, u32 *value); /* read patch per 0-based index. */ +bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 value); +bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 offset, u32 value); +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); +bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); +bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr); +bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u32 *revid); +bool fxgmac_write_subsys_to_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u32 *revid); +bool fxgmac_efuse_load(struct fxgmac_pdata *pdata); +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value); +bool fxgmac_efuse_write_data(struct fxgmac_pdata *pdata, u32 offset, u32 value); +bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata); +bool fxgmac_efuse_write_led(struct fxgmac_pdata *pdata, u32 value); +bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata); +bool fxgmac_write_led_setting_to_efuse(struct fxgmac_pdata *pdata); + +#endif /* __FUXI_EFUSE_H__ */ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c new file mode 100644 index 0000000000000000000000000000000000000000..63cbf948cbfa2fc524d7295a53c282844181b02b --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c @@ -0,0 +1,939 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +MODULE_LICENSE("Dual BSD/GPL"); + +static int debug = 16; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "FUXI ethernet debug level (0=none,...,16=all)"); + +static unsigned char dev_addr[6] = { 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7 }; + +static void fxgmac_read_mac_addr(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + DPRINTK("read mac from eFuse\n"); + + /* if efuse have mac addr, use it.if not, use static mac address. */ + hw_ops->read_mac_subsys_from_efuse(pdata, pdata->mac_addr, NULL, NULL); + if (ETH_IS_ZEROADDRESS(pdata->mac_addr)) { + /* Currently it uses a static mac address for test */ + memcpy(pdata->mac_addr, dev_addr, netdev->addr_len); + } +} + +static void fxgmac_default_config(struct fxgmac_pdata *pdata) +{ + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->rx_sf_mode = MTL_RSF_DISABLE; /* MTL_RSF_DISABLE 20210514 */ + pdata->pblx8 = DMA_PBL_X8_ENABLE; /* DMA_PBL_X8_ENABLE 20210514 */ + pdata->tx_pbl = DMA_PBL_32; + pdata->rx_pbl = DMA_PBL_32; /* DMA_PBL_32 20210514 */ + pdata->tx_threshold = MTL_TX_THRESHOLD_128; + pdata->rx_threshold = MTL_RX_THRESHOLD_128; + pdata->tx_pause = 1; + pdata->rx_pause = 1; + +#if FXGMAC_RSS_FEATURE_ENABLED + pdata->rss = 1; +#else + pdata->rss = 0; +#endif + /* open interrupt moderation default */ + pdata->intr_mod = 1; + pdata->crc_check = 1; + + /* set based on phy status. pdata->phy_speed = SPEED_1000; */ + pdata->sysclk_rate = FXGMAC_SYSCLOCK; + pdata->phy_autoeng = AUTONEG_ENABLE; /* default to autoneg */ + pdata->phy_duplex = DUPLEX_FULL; + pdata->expansion.phy_link = false; + pdata->phy_speed = SPEED_1000; + + /* default to magic */ + pdata->expansion.wol = WAKE_MAGIC; + + strscpy(pdata->drv_name, FXGMAC_DRV_NAME, sizeof(pdata->drv_name)); + strscpy(pdata->drv_ver, FXGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); + + printk("FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", FXGMAC_DRV_NAME, + FXGMAC_DRV_VERSION); +} + +static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) +{ + fxgmac_init_desc_ops(&pdata->desc_ops); + fxgmac_init_hw_ops(&pdata->hw_ops); + + DPRINTK("register desc_ops and hw ops\n"); +} + +int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + unsigned int i, dma_width; + int ret; + + /* Set all the function pointers */ + fxgmac_init_all_ops(pdata); + + /* Set default configuration data */ + fxgmac_default_config(pdata); + + /* Set irq, base_addr, MAC address, */ + netdev->irq = pdata->dev_irq; + netdev->base_addr = (unsigned long)pdata->base_mem; + fxgmac_read_mac_addr(pdata); + eth_hw_addr_set(netdev, pdata->mac_addr); + + if (save_private_reg) { + hw_ops->save_nonstick_reg(pdata); + } + + /* reset here to get hw features correctly */ + hw_ops->exit(pdata); + + /* Populate the hardware features */ + fxgmac_get_all_hw_features(pdata); + fxgmac_print_all_hw_features(pdata); + + /* TODO: Set the PHY mode to XLGMII */ + + /* Set the DMA mask */ +#ifdef CONFIG_ARM64 + dma_width = FUXI_DMA_BIT_MASK; +#else + dma_width = pdata->hw_feat.dma_width; +#endif + ret = dma_set_mask_and_coherent(pdata->dev, DMA_BIT_MASK(dma_width)); + if (ret) { + dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); + return ret; + } + + /* Channel and ring params initializtion + * pdata->channel_count; + * pdata->tx_ring_count; + * pdata->rx_ring_count; + * pdata->tx_desc_count; + * pdata->rx_desc_count; + */ + BUILD_BUG_ON_NOT_POWER_OF_2(FXGMAC_TX_DESC_CNT); + pdata->tx_desc_count = FXGMAC_TX_DESC_CNT; + if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { + dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n", + pdata->tx_desc_count); + ret = -EINVAL; + return ret; + } + BUILD_BUG_ON_NOT_POWER_OF_2(FXGMAC_RX_DESC_CNT); + pdata->rx_desc_count = FXGMAC_RX_DESC_CNT; + if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { + dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n", + pdata->rx_desc_count); + ret = -EINVAL; + return ret; + } + + pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), + pdata->hw_feat.tx_ch_cnt); + pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, + pdata->hw_feat.tx_q_cnt); + pdata->tx_q_count = pdata->tx_ring_count; + +#if !(FXGMAC_NUM_OF_TX_Q_USED) + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); +#else + ret = netif_set_real_num_tx_queues( + netdev, FXGMAC_NUM_OF_TX_Q_USED /*pdata->tx_q_count*/); +#endif + + DPRINTK("num_online_cpus:%u, tx_ch_cnt:%u, tx_q_cnt:%u, tx_ring_count:%u\n", + num_online_cpus(), pdata->hw_feat.tx_ch_cnt, + pdata->hw_feat.tx_q_cnt, pdata->tx_ring_count); + + if (ret) { + dev_err(pdata->dev, "error setting real tx queue count\n"); + return ret; + } + + pdata->rx_ring_count = min_t(unsigned int, + netif_get_num_default_rss_queues(), + pdata->hw_feat.rx_ch_cnt); +#ifdef FXGMAC_ONE_CHANNEL + pdata->rx_ring_count = 1; + pdata->hw_feat.rx_q_cnt = pdata->rx_ring_count; +#else + pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, + pdata->hw_feat.rx_q_cnt); +#endif + pdata->rx_q_count = pdata->rx_ring_count; + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count); + if (ret) { + dev_err(pdata->dev, "error setting real rx queue count\n"); + return ret; + } + + pdata->channel_count = + max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + DPRINTK("default rss queues:%u, rx_ch_cnt:%u, rx_q_cnt:%u, rx_ring_count:%u\n", + netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt, + pdata->hw_feat.rx_q_cnt, pdata->rx_ring_count); + DPRINTK("channel_count:%u, netdev tx channel_num=%u\n", + pdata->channel_count, netdev->num_tx_queues); + + /* Initialize RSS hash key and lookup table */ +#if FXGMAC_RSS_HASH_KEY_LINUX + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); +#else + /* this is for test only. HW does not want to change Hash key */ + hw_ops->get_rss_hash_key(pdata, (u8 *)pdata->rss_key); +#endif + +#if FXGMAC_MSIX_CH0RXDIS_EN + for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { + pdata->rss_table[i] = FXGMAC_SET_REG_BITS( + pdata->rss_table[i], MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, (i % 3) + 1); /* eliminate ch0 */ + } +#else + for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { + pdata->rss_table[i] = FXGMAC_SET_REG_BITS( + pdata->rss_table[i], MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + /* note, rx_ring_count should be equal to IRQ requsted + * for MSIx, 4 + */ + i % pdata->rx_ring_count); + } +#endif + + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN, 1); + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN, 1); + + /* config MTU supported, 20210726 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = + FXGMAC_JUMBO_PACKET_MTU + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + /* + * netdev->extended->min_mtu = netdev->min_mtu; + * netdev->extended->max_mtu = netdev->max_mtu; + */ + + DPRINTK("rss_options:0x%x\n", pdata->rss_options); + + /* Set device operations */ + netdev->netdev_ops = fxgmac_get_netdev_ops(); + netdev->ethtool_ops = fxgmac_get_ethtool_ops(); + + /* Set device features */ + if (pdata->hw_feat.tso) { + netdev->hw_features = NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } else if (pdata->hw_feat.tx_coe) { + netdev->hw_features = NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } + + if (pdata->hw_feat.rx_coe) { + netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_GRO; + } + + if (pdata->hw_feat.rss) { + netdev->hw_features |= + NETIF_F_RXHASH; /* it is NETIF_F_RXHASH_BIT finally */ + } + + netdev->vlan_features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + pdata->vlan_strip = 1; + if (pdata->hw_feat.sa_vlan_ins) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + /* only can filter one vlan id */ + pdata->hw_feat.vlhash = 1; +#else + pdata->hw_feat.vlhash = 0; +#endif + + if (pdata->hw_feat.vlhash) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + pdata->vlan_filter = 1; + } + + netdev->features |= netdev->hw_features; + pdata->expansion.netdev_features = netdev->features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Use default watchdog timeout */ + netdev->watchdog_timeo = + msecs_to_jiffies(5000); /* refer to sunxi-gmac, 5s */ + netdev->gso_max_size = NIC_MAX_TCP_OFFLOAD_SIZE; + + /* Tx coalesce parameters initialization */ + pdata->tx_usecs = FXGMAC_INIT_DMA_TX_USECS; + pdata->tx_frames = FXGMAC_INIT_DMA_TX_FRAMES; + + /* Rx coalesce parameters initialization */ + pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, FXGMAC_INIT_DMA_RX_USECS); + + pdata->rx_usecs = FXGMAC_INIT_DMA_RX_USECS; + pdata->rx_frames = FXGMAC_INIT_DMA_RX_FRAMES; + + DPRINTK("fxgmac_init callout, ok.\n"); + + return 0; +} + +static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) +{ +#ifdef CONFIG_PCI_MSI + int vectors, rc, i, req_vectors; + /* check cpu core number. + * since we have 4 channels, we must ensure the number of cpu core > 4 + * otherwise, just roll back to legacy + */ + vectors = num_online_cpus(); + DPRINTK("num of cpu=%d\n", vectors); + if (vectors >= FXGMAC_MAX_DMA_CHANNELS) { + /* 0-3 for rx, 4 for tx, 5 for phy */ + req_vectors = FXGMAC_MSIX_INT_NUMS; + pdata->expansion.msix_entries = kcalloc( + req_vectors, sizeof(struct msix_entry), GFP_KERNEL); + if (!pdata->expansion.msix_entries) { + DPRINTK("MSIx, kcalloc err for msix entries, rollback to MSI..\n"); + goto enable_msi_interrupt; + } else { + for (i = 0; i < req_vectors; i++) + pdata->expansion.msix_entries[i].entry = i; + + rc = pci_enable_msix_range( + pdata->pdev, pdata->expansion.msix_entries, + req_vectors, req_vectors); + if (rc < 0) { + DPRINTK("enable MSIx failed,%d.\n", rc); + req_vectors = 0; /* indicate failure */ + } else { + req_vectors = rc; + } + + if (req_vectors >= FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX) { + DPRINTK("enable MSIx ok, cpu=%d, vectors=%d.\n", + vectors, req_vectors); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, + FXGMAC_FLAG_MSIX_ENABLED); + pdata->per_channel_irq = 1; + pdata->expansion.phy_irq = + pdata->expansion + .msix_entries[MSI_ID_PHY_OTHER] + .vector; + return; + } else if (req_vectors) { + DPRINTK("enable MSIx with only %d vector, while we need %d, rollback to MSI.\n", + req_vectors, vectors); + /* roll back to msi */ + pci_disable_msix(pdata->pdev); + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + req_vectors = 0; + } else { + DPRINTK("enable MSIx failure and clear msix entries.\n"); + /* roll back to msi */ + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + req_vectors = 0; + } + } + } + +enable_msi_interrupt: + rc = pci_enable_msi(pdata->pdev); + if (rc < 0) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); + DPRINTK("enable MSI failure, rollback to LEGACY.\n"); + } else { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSI_ENABLED); + pdata->dev_irq = pdata->pdev->irq; + DPRINTK("enable MSI ok, irq=%d.\n", pdata->pdev->irq); + } +#else + pdata = pdata; +#endif +} + +int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) +{ + struct fxgmac_pdata *pdata; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct fxgmac_pdata), + FXGMAC_MAX_DMA_CHANNELS); + + if (!netdev) { + dev_err(dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(netdev, dev); + dev_set_drvdata(dev, netdev); + pdata = netdev_priv(netdev); + pdata->dev = dev; + pdata->pdev = to_pci_dev(dev); + pdata->netdev = netdev; + + pdata->dev_irq = res->irq; + + /* default to legacy interrupt */ + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); + pdata->expansion.phy_irq = pdata->dev_irq; + + fxgmac_init_interrupt_scheme(pdata); + + pdata->expansion.current_state = CURRENT_STATE_INIT; + + pdata->msg_enable = NETIF_MSG_DRV; + DPRINTK("netif msg_enable init to %08x\n", pdata->msg_enable); + + pdata->mac_regs = res->addr; + pdata->base_mem = res->addr; + pdata->mac_regs = pdata->mac_regs + FUXI_MAC_REGS_OFFSET; + + ret = fxgmac_init(pdata, true); + if (ret) { + dev_err(dev, "fxgmac init failed\n"); + goto err_free_netdev; + } + + pdata->hw_ops.read_led_config(pdata); + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "net device registration failed\n"); + goto err_free_netdev; + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgamc_drv_prob callout, netdev num_tx_q=%u\n", + netdev->num_tx_queues); + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_init(pdata); + fxgmac_dbg_adapter_init(pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ + + return 0; + +err_free_netdev: + free_netdev(netdev); + DPRINTK("fxgamc_drv_prob callout with err \n"); + + return ret; +} + +int fxgmac_drv_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_adapter_exit(pdata); +#endif /*HAVE_FXGMAC_DEBUG_FS */ + hw_ops->led_under_shutdown(pdata); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +void fxgmac_dump_tx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx, unsigned int count, + unsigned int flag) +{ + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + + while (count--) { + desc_data = FXGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, + "TX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), + le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); + + idx++; + } +} + +void fxgmac_dump_rx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx) +{ + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + + desc_data = FXGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", + idx, le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); +} + +void fxgmac_dbg_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + unsigned char buffer[128]; + unsigned int i; + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); + + netdev_dbg(netdev, "%s packet of %d bytes\n", (tx_rx ? "TX" : "RX"), + skb->len); + + netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); + netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); + netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, buffer, + sizeof(buffer), false); + netdev_dbg(netdev, " %#06x: %s\n", i, buffer); + } + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); +} + +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, + bool tx_rx) +{ + unsigned char buffer[128]; + unsigned int i; + + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, buffer, + sizeof(buffer), false); + DPRINTK(" %#06x: %s\n", i, buffer); + } +} + +void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_features *hw_feat = &pdata->hw_feat; + unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3; + + mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R); + mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R); + mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R); + mac_hfr3 = readl(pdata->mac_regs + MAC_HWF3R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = readl(pdata->mac_regs + MAC_VR); + if (netif_msg_drv(pdata)) + DPRINTK("get offset 0x110, ver=%#x\n", + readl(pdata->mac_regs + 0x110)); + + /* Hardware feature register 0 */ + hw_feat->phyifsel = FXGMAC_GET_REG_BITS( + mac_hfr0, MAC_HWF0R_ACTPHYIFSEL_POS, MAC_HWF0R_ACTPHYIFSEL_LEN); + hw_feat->vlhash = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_VLHASH_POS, + MAC_HWF0R_VLHASH_LEN); + hw_feat->sma = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_SMASEL_POS, + MAC_HWF0R_SMASEL_LEN); + hw_feat->rwk = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RWKSEL_POS, + MAC_HWF0R_RWKSEL_LEN); + hw_feat->mgk = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MGKSEL_POS, + MAC_HWF0R_MGKSEL_LEN); + hw_feat->mmc = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MMCSEL_POS, + MAC_HWF0R_MMCSEL_LEN); + hw_feat->aoe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_ARPOFFSEL_POS, + MAC_HWF0R_ARPOFFSEL_LEN); + hw_feat->ts = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSEL_POS, + MAC_HWF0R_TSSEL_LEN); + hw_feat->eee = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_EEESEL_POS, + MAC_HWF0R_EEESEL_LEN); + hw_feat->tx_coe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TXCOESEL_POS, + MAC_HWF0R_TXCOESEL_LEN); + hw_feat->rx_coe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RXCOESEL_POS, + MAC_HWF0R_RXCOESEL_LEN); + hw_feat->addn_mac = FXGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_ADDMACADRSEL_POS, + MAC_HWF0R_ADDMACADRSEL_LEN); + hw_feat->ts_src = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSTSSEL_POS, + MAC_HWF0R_TSSTSSEL_LEN); + hw_feat->sa_vlan_ins = FXGMAC_GET_REG_BITS( + mac_hfr0, MAC_HWF0R_SAVLANINS_POS, MAC_HWF0R_SAVLANINS_LEN); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_RXFIFOSIZE_POS, MAC_HWF1R_RXFIFOSIZE_LEN); + hw_feat->tx_fifo_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_TXFIFOSIZE_POS, MAC_HWF1R_TXFIFOSIZE_LEN); + hw_feat->adv_ts_hi = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_ADVTHWORD_POS, MAC_HWF1R_ADVTHWORD_LEN); + hw_feat->dma_width = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_ADDR64_POS, + MAC_HWF1R_ADDR64_LEN); + hw_feat->dcb = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_DCBEN_POS, + MAC_HWF1R_DCBEN_LEN); + hw_feat->sph = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_SPHEN_POS, + MAC_HWF1R_SPHEN_LEN); + hw_feat->tso = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_TSOEN_POS, + MAC_HWF1R_TSOEN_LEN); + hw_feat->dma_debug = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_DBGMEMA_POS, MAC_HWF1R_DBGMEMA_LEN); +#if (FXGMAC_RSS_FEATURE_ENABLED) + hw_feat->rss = 1; +#else + /* = FXGMAC_GET_REG_BITS(mac_hfr1, + * MAC_HWF1R_RSSEN_POS, + * MAC_HWF1R_RSSEN_LEN); + */ + hw_feat->rss = 0; +#endif + /* FXGMAC_GET_REG_BITS(mac_hfr1, + * MAC_HWF1R_NUMTC_POS, + * MAC_HWF1R_NUMTC_LEN); + */ + hw_feat->tc_cnt = 3; + hw_feat->avsel = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_AVSEL_POS, + MAC_HWF1R_AVSEL_LEN); + hw_feat->ravsel = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_RAVSEL_POS, + MAC_HWF1R_RAVSEL_LEN); + hw_feat->hash_table_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_HASHTBLSZ_POS, MAC_HWF1R_HASHTBLSZ_LEN); + hw_feat->l3l4_filter_num = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_L3L4FNUM_POS, MAC_HWF1R_L3L4FNUM_LEN); + + + /* Hardware feature register 2 + * FXGMAC_GET_REG_BITS(mac_hfr2, + * MAC_HWF2R_RXQCNT_POS, + * MAC_HWF2R_RXQCNT_LEN) + */ + hw_feat->rx_q_cnt = 3; + hw_feat->tx_q_cnt = FXGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_TXQCNT_POS, + MAC_HWF2R_TXQCNT_LEN); + hw_feat->rx_ch_cnt = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_RXCHCNT_POS, MAC_HWF2R_RXCHCNT_LEN); + hw_feat->tx_ch_cnt = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_TXCHCNT_POS, MAC_HWF2R_TXCHCNT_LEN); + hw_feat->pps_out_num = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_PPSOUTNUM_POS, MAC_HWF2R_PPSOUTNUM_LEN); + hw_feat->aux_snap_num = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_AUXSNAPNUM_POS, MAC_HWF2R_AUXSNAPNUM_LEN); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + hw_feat->hwfr3 = mac_hfr3; + DPRINTK("HWFR3: %u\n", mac_hfr3); +} + +void fxgmac_print_all_hw_features(struct fxgmac_pdata *pdata) +{ + char *str = NULL; + + DPRINTK("\n"); + DPRINTK("=====================================================\n"); + DPRINTK("\n"); + DPRINTK("HW support following features, ver=%#x\n", + pdata->hw_feat.version); + DPRINTK("\n"); + /* HW Feature Register0 */ + DPRINTK("VLAN Hash Filter Selected : %s\n", + pdata->hw_feat.vlhash ? "YES" : "NO"); + DPRINTK("SMA (MDIO) Interface : %s\n", + pdata->hw_feat.sma ? "YES" : "NO"); + DPRINTK("PMT Remote Wake-up Packet Enable : %s\n", + pdata->hw_feat.rwk ? "YES" : "NO"); + DPRINTK("PMT Magic Packet Enable : %s\n", + pdata->hw_feat.mgk ? "YES" : "NO"); + DPRINTK("RMON/MMC Module Enable : %s\n", + pdata->hw_feat.mmc ? "YES" : "NO"); + DPRINTK("ARP Offload Enabled : %s\n", + pdata->hw_feat.aoe ? "YES" : "NO"); + DPRINTK("IEEE 1588-2008 Timestamp Enabled : %s\n", + pdata->hw_feat.ts ? "YES" : "NO"); + DPRINTK("Energy Efficient Ethernet Enabled : %s\n", + pdata->hw_feat.eee ? "YES" : "NO"); + DPRINTK("Transmit Checksum Offload Enabled : %s\n", + pdata->hw_feat.tx_coe ? "YES" : "NO"); + DPRINTK("Receive Checksum Offload Enabled : %s\n", + pdata->hw_feat.rx_coe ? "YES" : "NO"); + DPRINTK("Additional MAC Addresses 1-31 Selected : %s\n", + pdata->hw_feat.addn_mac ? "YES" : "NO"); + + switch (pdata->hw_feat.ts_src) { + case 0: + str = "RESERVED"; + break; + case 1: + str = "INTERNAL"; + break; + case 2: + str = "EXTERNAL"; + break; + case 3: + str = "BOTH"; + break; + } + DPRINTK("Timestamp System Time Source : %s\n", str); + + DPRINTK("Source Address or VLAN Insertion Enable : %s\n", + pdata->hw_feat.sa_vlan_ins ? "YES" : "NO"); + + /* HW Feature Register1 */ + switch (pdata->hw_feat.rx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + DPRINTK("MTL Receive FIFO Size : %s\n", str); + + switch (pdata->hw_feat.tx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + DPRINTK("MTL Transmit FIFO Size : %s\n", str); + + DPRINTK("IEEE 1588 High Word Register Enable : %s\n", + pdata->hw_feat.adv_ts_hi ? "YES" : "NO"); + DPRINTK("Address width : %u\n", + pdata->hw_feat.dma_width); + DPRINTK("DCB Feature Enable : %s\n", + pdata->hw_feat.dcb ? "YES" : "NO"); + DPRINTK("Split Header Feature Enable : %s\n", + pdata->hw_feat.sph ? "YES" : "NO"); + DPRINTK("TCP Segmentation Offload Enable : %s\n", + pdata->hw_feat.tso ? "YES" : "NO"); + DPRINTK("DMA Debug Registers Enabled : %s\n", + pdata->hw_feat.dma_debug ? "YES" : "NO"); + DPRINTK("RSS Feature Enabled : %s\n", + pdata->hw_feat.rss ? "YES" : "NO"); + DPRINTK("*TODO*Number of Traffic classes : %u\n", + (pdata->hw_feat.tc_cnt)); + DPRINTK("AV Feature Enabled : %s\n", + pdata->hw_feat.avsel ? "YES" : "NO"); + DPRINTK("Rx Side Only AV Feature Enabled : %s\n", + (pdata->hw_feat.ravsel ? "YES" : "NO")); + DPRINTK("Hash Table Size : %u\n", + pdata->hw_feat.hash_table_size); + DPRINTK("Total number of L3 or L4 Filters : %u\n", + pdata->hw_feat.l3l4_filter_num); + + /* HW Feature Register2 */ + DPRINTK("Number of MTL Receive Queues : %u\n", + pdata->hw_feat.rx_q_cnt); + DPRINTK("Number of MTL Transmit Queues : %u\n", + pdata->hw_feat.tx_q_cnt); + DPRINTK("Number of DMA Receive Channels : %u\n", + pdata->hw_feat.rx_ch_cnt); + DPRINTK("Number of DMA Transmit Channels : %u\n", + pdata->hw_feat.tx_ch_cnt); + + switch (pdata->hw_feat.pps_out_num) { + case 0: + str = "No PPS output"; + break; + case 1: + str = "1 PPS output"; + break; + case 2: + str = "2 PPS output"; + break; + case 3: + str = "3 PPS output"; + break; + case 4: + str = "4 PPS output"; + break; + default: + str = "RESERVED"; + } + DPRINTK("Number of PPS Outputs : %s\n", str); + + switch (pdata->hw_feat.aux_snap_num) { + case 0: + str = "No auxiliary input"; + break; + case 1: + str = "1 auxiliary input"; + break; + case 2: + str = "2 auxiliary input"; + break; + case 3: + str = "3 auxiliary input"; + break; + case 4: + str = "4 auxiliary input"; + break; + default: + str = "RESERVED"; + } + DPRINTK("Number of Auxiliary Snapshot Inputs : %s", str); + + DPRINTK("\n"); + DPRINTK("=====================================================\n"); + DPRINTK("\n"); +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..4596d91b6e28284d81b51b37c133f9ad0acce314 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c @@ -0,0 +1,787 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#ifdef HAVE_FXGMAC_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEST_MAC_HEAD 14 +#define TEST_TCP_HEAD_LEN_OFFSET 12 +#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 +#define TEST_TCP_FIX_HEAD_LEN 24 +#define TEST_TCP_MSS_OFFSET 56 + +#define DF_MAX_NIC_NUM 16 + +#ifdef HAVE_FXGMAC_DEBUG_FS + +/** + * fxgmac_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t fxgmac_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct fxgmac_pdata *pdata = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", pdata->netdev->name, + pdata->expansion.fxgmac_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * fxgmac_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t fxgmac_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct fxgmac_pdata *pdata = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer( + pdata->expansion.fxgmac_dbg_netdev_ops_buf, + sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + pdata->expansion.fxgmac_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(pdata->expansion.fxgmac_dbg_netdev_ops_buf, "tx_timeout", + 10) == 0) { + DPRINTK("tx_timeout called\n"); + } else { + FXGMAC_PR("Unknown command: %s\n", + pdata->expansion.fxgmac_dbg_netdev_ops_buf); + FXGMAC_PR("Available commands:\n"); + FXGMAC_PR(" tx_timeout\n"); + } + return count; +} +#endif + +static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int pktLen = 0; + struct sk_buff *skb; + pfxgmac_test_packet pPkt; + u8 *pTx_data = NULL; + u8 *pSkb_data = NULL; + u32 offload_len = 0; + u8 ipHeadLen, tcpHeadLen, headTotalLen; + static u32 lastGsoSize = 806; /* initial default value */ + + /* get fxgmac_test_packet */ + pPkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); + pktLen = pPkt->length; + + /* get pkt data */ + pTx_data = (u8 *)pPkt + sizeof(fxgmac_test_packet); + + /* alloc sk_buff */ + skb = alloc_skb(pktLen, GFP_ATOMIC); + if (!skb) { + DPRINTK("alloc skb fail\n"); + return; + } + + /* copy data to skb */ + pSkb_data = skb_put(skb, pktLen); + memset(pSkb_data, 0, pktLen); + memcpy(pSkb_data, pTx_data, pktLen); + + /* set skb parameters */ + skb->dev = pdata->netdev; + skb->pkt_type = PACKET_OUTGOING; + skb->protocol = ntohs(ETH_P_IP); + skb->no_fcs = 1; + skb->ip_summed = CHECKSUM_PARTIAL; + if (skb->len > 1514) { + /* TSO packet */ + /* set tso test flag */ + pdata->expansion.fxgmac_test_tso_flag = true; + + /* get protocol head length */ + ipHeadLen = (pSkb_data[TEST_MAC_HEAD] & 0xF) * 4; + tcpHeadLen = (pSkb_data[TEST_MAC_HEAD + ipHeadLen + + TEST_TCP_HEAD_LEN_OFFSET] >> + 4 & + 0xF) * + 4; + headTotalLen = TEST_MAC_HEAD + ipHeadLen + tcpHeadLen; + offload_len = (pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | + pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & + 0xFFFF; + /* set tso skb parameters */ + skb->transport_header = ipHeadLen + TEST_MAC_HEAD; + skb->network_header = TEST_MAC_HEAD; + skb->inner_network_header = TEST_MAC_HEAD; + skb->mac_len = TEST_MAC_HEAD; + + /* set skb_shinfo parameters */ + if (tcpHeadLen > TEST_TCP_FIX_HEAD_LEN) { + skb_shinfo(skb)->gso_size = + (pSkb_data[TEST_TCP_MSS_OFFSET] << 8 | + pSkb_data[TEST_TCP_MSS_OFFSET + 1]) & + 0xFFFF; + } else { + skb_shinfo(skb)->gso_size = 0; + } + if (skb_shinfo(skb)->gso_size != 0) { + lastGsoSize = skb_shinfo(skb)->gso_size; + } else { + skb_shinfo(skb)->gso_size = lastGsoSize; + } + /* get segment size */ + if (offload_len % skb_shinfo(skb)->gso_size == 0) { + skb_shinfo(skb)->gso_segs = + offload_len / skb_shinfo(skb)->gso_size; + pdata->expansion.fxgmac_test_last_tso_len = + skb_shinfo(skb)->gso_size + headTotalLen; + } else { + skb_shinfo(skb)->gso_segs = + offload_len / skb_shinfo(skb)->gso_size + 1; + pdata->expansion.fxgmac_test_last_tso_len = + offload_len % skb_shinfo(skb)->gso_size + + headTotalLen; + } + pdata->expansion.fxgmac_test_tso_seg_num = + skb_shinfo(skb)->gso_segs; + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + skb_shinfo(skb)->frag_list = NULL; + skb->csum_start = skb_headroom(skb) + TEST_MAC_HEAD + ipHeadLen; + skb->csum_offset = skb->len - TEST_MAC_HEAD - ipHeadLen; + + pdata->expansion.fxgmac_test_packet_len = + skb_shinfo(skb)->gso_size + headTotalLen; + } else { + /* set non-TSO packet parameters */ + pdata->expansion.fxgmac_test_packet_len = skb->len; + } + + /* send data */ + if (dev_queue_xmit(skb) != NET_XMIT_SUCCESS) { + DPRINTK("xmit data fail \n"); + } +} + +static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int totalLen = 0; + struct sk_buff *rx_skb; + struct ext_ioctl_data *pcmd; + fxgmac_test_packet pkt; + void *addr = 0; + u8 *rx_data = (u8 *)kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); + if (!rx_data) + return; + + /* initial dest data region */ + pcmd = (struct ext_ioctl_data *)pcmd_data; + addr = pcmd->cmd_buf.buf; + while (pdata->expansion.fxgmac_test_skb_arr_in_index != + pdata->expansion.fxgmac_test_skb_arr_out_index) { + /* get received skb data */ + rx_skb = + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion.fxgmac_test_skb_arr_out_index]; + + if (rx_skb->len + sizeof(fxgmac_test_packet) + totalLen < + 64000) { + pkt.length = rx_skb->len; + pkt.type = 0x80; + pkt.buf[0].offset = + totalLen + sizeof(fxgmac_test_packet); + pkt.buf[0].length = rx_skb->len; + + /* get data from skb */ + memcpy(rx_data, rx_skb->data, rx_skb->len); + + /* update next pointer */ + if ((pdata->expansion.fxgmac_test_skb_arr_out_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT == + pdata->expansion.fxgmac_test_skb_arr_in_index) { + pkt.next = NULL; + } else { + pkt.next = + (pfxgmac_test_packet)(addr + totalLen + + sizeof(fxgmac_test_packet) + + pkt.length); + } + + /* copy data to user space */ + if (copy_to_user((void *)(addr + totalLen), + (void *)(&pkt), + sizeof(fxgmac_test_packet))) { + DPRINTK("cppy pkt data to user fail..."); + } + if (copy_to_user((void *)(addr + totalLen + + sizeof(fxgmac_test_packet)), + (void *)rx_data, rx_skb->len)) { + DPRINTK("cppy data to user fail..."); + } + + /* update total length */ + totalLen += (sizeof(fxgmac_test_packet) + rx_skb->len); + + /* free skb */ + kfree_skb(rx_skb); + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion.fxgmac_test_skb_arr_out_index] = + NULL; + + /* update gCurSkbOutIndex */ + pdata->expansion.fxgmac_test_skb_arr_out_index = + (pdata->expansion.fxgmac_test_skb_arr_out_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("receive data more receive buffer... \n"); + break; + } + } + + if (rx_data) + kfree(rx_data); +} + +/* Based on the current application scenario, we only use CMD_DATA for data. + * if you use other struct, you should recalculate in_total_size + */ +long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + bool ret = true; + int regval = 0; + struct fxgmac_pdata *pdata = file->private_data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + FXGMAC_PDATA_OF_PLATFORM *ex = &pdata->expansion; + CMD_DATA ex_data; + struct ext_ioctl_data pcmd; + u8 *data = NULL; + u8 *buf = NULL; + int in_total_size, in_data_size, out_total_size; + int ioctl_cmd_size = sizeof(struct ext_ioctl_data); + u8 mac[ETH_ALEN] = { 0 }; + struct sk_buff *tmpskb; + + if (!arg) { + DPRINTK("[%s] command arg is %lx !\n", __func__, arg); + goto err; + } + + /* check device type */ + if (_IOC_TYPE(cmd) != IOC_MAGIC) { + DPRINTK("[%s] command type [%c] error!\n", __func__, + _IOC_TYPE(cmd)); + goto err; + } + + /* check command number*/ + if (_IOC_NR(cmd) > IOC_MAXNR) { + DPRINTK("[%s] command numer [%d] exceeded!\n", __func__, + _IOC_NR(cmd)); + goto err; + } + + if (copy_from_user(&pcmd, (void *)arg, ioctl_cmd_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + + in_total_size = pcmd.cmd_buf.size_in; + in_data_size = in_total_size - ioctl_cmd_size; + out_total_size = pcmd.cmd_buf.size_out; + + buf = (u8 *)kzalloc(in_total_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, (void *)arg, in_total_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + data = buf + ioctl_cmd_size; + + if (arg != 0) { + switch (pcmd.cmd_type) { + /* ioctl diag begin */ + case FUXI_DFS_IOCTL_DIAG_BEGIN: + DPRINTK("Debugfs received diag begin command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + + /* release last loopback test abnormal exit buffer */ + while (ex->fxgmac_test_skb_arr_in_index != + ex->fxgmac_test_skb_arr_out_index) { + tmpskb = + ex->fxgmac_test_skb_array + [ex->fxgmac_test_skb_arr_out_index]; + if (tmpskb) { + kfree_skb(tmpskb); + ex->fxgmac_test_skb_array + [ex->fxgmac_test_skb_arr_out_index] = + NULL; + } + + ex->fxgmac_test_skb_arr_out_index = + (ex->fxgmac_test_skb_arr_out_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } + + /* init loopback test parameters */ + ex->fxgmac_test_skb_arr_in_index = 0; + ex->fxgmac_test_skb_arr_out_index = 0; + ex->fxgmac_test_tso_flag = false; + ex->fxgmac_test_tso_seg_num = 0; + ex->fxgmac_test_last_tso_len = 0; + ex->fxgmac_test_packet_len = 0; + break; + + /* ioctl diag end */ + case FUXI_DFS_IOCTL_DIAG_END: + DPRINTK("Debugfs received diag end command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + break; + + /* ioctl diag tx pkt */ + case FUXI_DFS_IOCTL_DIAG_TX_PKT: + fxgmac_dbg_tx_pkt(pdata, buf); + break; + + /* ioctl diag rx pkt */ + case FUXI_DFS_IOCTL_DIAG_RX_PKT: + fxgmac_dbg_rx_pkt(pdata, buf); + break; + + /* ioctl device reset */ + case FUXI_DFS_IOCTL_DEVICE_RESET: + DPRINTK("Debugfs received device reset command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + break; + + case FXGMAC_EFUSE_LED_TEST: + DPRINTK("Debugfs received device led test command.\n"); + memcpy(&pdata->led, data, sizeof(struct led_setting)); + fxgmac_restart_dev(pdata); + break; + + case FXGMAC_EFUSE_UPDATE_LED_CFG: + DPRINTK("Debugfs received device led update command.\n"); + memcpy(&pdata->ledconfig, data, + sizeof(struct led_setting)); + ret = hw_ops->write_led_config(pdata); + hw_ops->read_led_config(pdata); + hw_ops->led_under_active(pdata); + break; + + case FXGMAC_EFUSE_WRITE_LED: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + DPRINTK("FXGMAC_EFUSE_WRITE_LED, val = 0x%x\n", + ex_data.val0); + ret = hw_ops->write_led(pdata, ex_data.val0); + break; + + case FXGMAC_EFUSE_WRITE_OOB: + DPRINTK("FXGMAC_EFUSE_WRITE_OOB.\n"); + ret = hw_ops->write_oob(pdata); + break; + + case FXGMAC_EFUSE_READ_REGIONABC: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_efuse_data(pdata, ex_data.val0, + &ex_data.val1); + DPRINTK("FXGMAC_EFUSE_READ_REGIONABC, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_REG, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + ret = hw_ops->write_patch_to_efuse(pdata, ex_data.val0, + ex_data.val1); + break; + + case FXGMAC_EFUSE_READ_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse(pdata, ex_data.val0, + &ex_data.val1); + DPRINTK("FXGMAC_EFUSE_READ_PATCH_REG, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_patch_to_efuse_per_index( + pdata, ex_data.val0, ex_data.val1, + ex_data.val2); + DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, index = %d, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1, ex_data.val2); + break; + + case FXGMAC_EFUSE_READ_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse_per_index( + pdata, ex_data.val0, &ex_data.val1, + &ex_data.val2); + DPRINTK("FXGMAC_EFUSE_READ_PATCH_PER_INDEX, address = 0x%x, val = 0x%x\n", + ex_data.val1, ex_data.val2); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_LOAD: + DPRINTK("FXGMAC_EFUSE_LOAD.\n"); + ret = hw_ops->efuse_load(pdata); + break; + + case FXGMAC_GET_MAC_DATA: + ret = hw_ops->read_mac_subsys_from_efuse(pdata, mac, + NULL, NULL); + if (ret) { + memcpy(data, mac, ETH_ALEN); + out_total_size = ioctl_cmd_size + ETH_ALEN; + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_MAC_DATA: + if (in_data_size != ETH_ALEN) + goto err; + memcpy(mac, data, ETH_ALEN); + ret = hw_ops->write_mac_subsys_to_efuse(pdata, mac, + NULL, NULL); + if (ret) { + eth_hw_addr_set(pdata->netdev, mac); + memcpy(pdata->mac_addr, mac, ETH_ALEN); + hw_ops->set_mac_address(pdata, mac); + hw_ops->set_mac_hash(pdata); + } + break; + + case FXGMAC_GET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_mac_subsys_from_efuse( + pdata, NULL, &ex_data.val0, NULL); + if (ret) { + ex_data.val1 = 0xFFFF; /* invalid value */ + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_mac_subsys_to_efuse( + pdata, NULL, &ex_data.val0, NULL); + break; + + case FXGMAC_GET_GMAC_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ex_data.val1 = hw_ops->get_gmac_register( + pdata, (u8 *)(pdata->mac_regs + ex_data.val0)); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_SET_GMAC_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->set_gmac_register( + pdata, (u8 *)(pdata->mac_regs + ex_data.val0), + ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->read_ephy_reg(pdata, ex_data.val0, + &ex_data.val1); + if (regval != -1) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + ret = (regval == -1 ? false : true); + break; + + case FXGMAC_SET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->write_ephy_reg(pdata, ex_data.val0, + ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PCIE_LOCATION: + ex_data.val0 = pdata->pdev->bus->number; + ex_data.val1 = PCI_SLOT(pdata->pdev->devfn); + ex_data.val2 = PCI_FUNC(pdata->pdev->devfn); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_GET_GSO_SIZE: + ex_data.val0 = pdata->netdev->gso_max_size; + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_SET_GSO_SIZE: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + pdata->netdev->gso_max_size = ex_data.val0; + break; + + case FXGMAC_SET_RX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_RX_POS, + INT_MOD_RX_LEN, + ex_data.val0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + INT_MOD); + break; + + case FXGMAC_SET_TX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_TX_POS, + INT_MOD_TX_LEN, + ex_data.val0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + INT_MOD); + break; + + case FXGMAC_GET_TXRX_MODERATION: + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + ex_data.val0 = FXGMAC_GET_REG_BITS( + regval, INT_MOD_RX_POS, INT_MOD_RX_LEN); + ex_data.val1 = FXGMAC_GET_REG_BITS( + regval, INT_MOD_TX_POS, INT_MOD_TX_LEN); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + default: + DPRINTK("Debugfs received invalid command: %x.\n", + pcmd.cmd_type); + ret = false; + break; + } + } + + if (buf) + kfree(buf); + return ret ? FXGMAC_SUCCESS : FXGMAC_FAIL; + +err: + if (buf) + kfree(buf); + return FXGMAC_FAIL; +} + +#ifdef HAVE_FXGMAC_DEBUG_FS + +static struct file_operations fxgmac_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = fxgmac_dbg_netdev_ops_read, + .write = fxgmac_dbg_netdev_ops_write, + .unlocked_ioctl = fxgmac_dbg_netdev_ops_ioctl, +}; + +/** + * fxgmac_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata) +{ + const char *name = pdata->drv_name; + struct dentry *pfile; + + pdata->expansion.dbg_adapter = + debugfs_create_dir(name, pdata->expansion.fxgmac_dbg_root); + if (pdata->expansion.dbg_adapter) { + pfile = debugfs_create_file("netdev_ops", 0600, + pdata->expansion.dbg_adapter, pdata, + &fxgmac_dbg_netdev_ops_fops); + if (!pfile) + DPRINTK("debugfs netdev_ops for %s failed\n", name); + } else { + DPRINTK("debugfs entry for %s failed\n", name); + } +} + +/** + * fxgmac_dbg_adapter_exit - clear out the adapter's debugfs entries + * @adapter: board private structure + **/ +void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata) +{ + if (pdata->expansion.dbg_adapter) + debugfs_remove_recursive(pdata->expansion.dbg_adapter); + pdata->expansion.dbg_adapter = NULL; +} + +/** + * fxgmac_dbg_init - start up debugfs for the driver + **/ +void fxgmac_dbg_init(struct fxgmac_pdata *pdata) +{ + unsigned int i; + char num[3]; + const char debug_path[] = "/sys/kernel/debug/"; + const char file_prefix[] = "fuxi_"; + char file_path[50]; + char file_name[8]; + + /* init file_path */ + memset(file_path, '\0', sizeof(file_path)); + memcpy(file_path, debug_path, sizeof(debug_path)); + + for (i = 0; i < DF_MAX_NIC_NUM; i++) { + /* init num and filename */ + memset(num, '\0', sizeof(num)); + memset(file_name, '\0', sizeof(file_name)); + + /* int to string */ + sprintf(num, "%d", i); + + /* file name */ + memcpy(file_name, file_prefix, sizeof(file_prefix)); + memcpy(file_name + strlen(file_prefix), num, sizeof(num)); + + /* file path */ + memcpy(file_path + sizeof(debug_path) - 1, file_name, + sizeof(file_name)); + + /* whether file exist */ + pdata->expansion.fxgmac_dbg_root = + debugfs_lookup(file_name, NULL); + if (!pdata->expansion.fxgmac_dbg_root) { + /* create file */ + pdata->expansion.fxgmac_dbg_root = + debugfs_create_dir(file_name, NULL); + if (IS_ERR(pdata->expansion.fxgmac_dbg_root)) + DPRINTK("fxgmac init of debugfs failed\n"); + + break; + } + } +} + +/** + * fxgmac_dbg_exit - clean out the driver's debugfs entries + **/ +void fxgmac_dbg_exit(struct fxgmac_pdata *pdata) +{ + if (pdata->expansion.fxgmac_dbg_root) + debugfs_remove_recursive(pdata->expansion.fxgmac_dbg_root); +} + +#endif /* HAVE_XLGMAC_DEBUG_FS */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c new file mode 100644 index 0000000000000000000000000000000000000000..969d84eb44e2a1cc4f80ec90f3b4e71e1a5646ba --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c @@ -0,0 +1,601 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +static void fxgmac_unmap_desc_data(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data) +{ + if (desc_data->skb_dma) { + if (desc_data->mapped_as_page) { + dma_unmap_page(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } else { + dma_unmap_single(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } + desc_data->skb_dma = 0; + desc_data->skb_dma_len = 0; + } + + if (desc_data->rx.buf.dma_base) { + dma_unmap_single(pdata->dev, desc_data->rx.buf.dma_base, + pdata->rx_buf_size, DMA_FROM_DEVICE); + desc_data->rx.buf.dma_base = 0; + } + + if (desc_data->skb) { + dev_kfree_skb_any(desc_data->skb); + desc_data->skb = NULL; + } + + memset(&desc_data->tx, 0, sizeof(desc_data->tx)); + memset(&desc_data->rx, 0, sizeof(desc_data->rx)); + + desc_data->mapped_as_page = 0; + + if (desc_data->state_saved) { + desc_data->state_saved = 0; + desc_data->state.skb = NULL; + desc_data->state.len = 0; + desc_data->state.error = 0; + } +} + +static void fxgmac_free_ring(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring) +{ + struct fxgmac_desc_data *desc_data; + unsigned int i; + + if (!ring) + return; + + if (ring->desc_data_head) { + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + fxgmac_unmap_desc_data(pdata, desc_data); + } + + kfree(ring->desc_data_head); + ring->desc_data_head = NULL; + } + + if (ring->dma_desc_head) { + dma_free_coherent( + pdata->dev, + (sizeof(struct fxgmac_dma_desc) * ring->dma_desc_count), + ring->dma_desc_head, ring->dma_desc_head_addr); + ring->dma_desc_head = NULL; + } +} + +static int fxgmac_init_ring(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + unsigned int dma_desc_count) +{ + if (!ring) + return 0; + /* Descriptors */ + ring->dma_desc_count = dma_desc_count; + ring->dma_desc_head = dma_alloc_coherent( + pdata->dev, (sizeof(struct fxgmac_dma_desc) * dma_desc_count), + &ring->dma_desc_head_addr, GFP_KERNEL); + if (!ring->dma_desc_head) + return -ENOMEM; + + /* Array of descriptor data */ + ring->desc_data_head = kcalloc( + dma_desc_count, sizeof(struct fxgmac_desc_data), GFP_KERNEL); + if (!ring->desc_data_head) + return -ENOMEM; + + netif_dbg( + pdata, drv, pdata->netdev, + "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n", + ring->dma_desc_head, &ring->dma_desc_head_addr, + ring->desc_data_head); + + return 0; +} + +static void fxgmac_free_rings(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (!pdata->channel_head) + return; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + fxgmac_free_ring(pdata, channel->tx_ring); + fxgmac_free_ring(pdata, channel->rx_ring); + } +} + +static int fxgmac_alloc_rings(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + int ret; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", + channel->name); + + if (i < pdata->tx_ring_count) { + ret = fxgmac_init_ring(pdata, channel->tx_ring, + pdata->tx_desc_count); + + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Tx ring"); + goto err_init_ring; + } + } + + netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", + channel->name); + + ret = fxgmac_init_ring(pdata, channel->rx_ring, + pdata->rx_desc_count); + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Rx ring\n"); + goto err_init_ring; + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_alloc_ring..ch=%u, tx_desc_cnt=%u, rx_desc_cnt=%u\n", + i, pdata->tx_desc_count, pdata->rx_desc_count); + } + if (netif_msg_drv(pdata)) + DPRINTK("alloc_rings callout ok\n"); + + return 0; + +err_init_ring: + fxgmac_free_rings(pdata); + + DPRINTK("alloc_rings callout err,%d\n", ret); + return ret; +} + +static void fxgmac_free_channels(struct fxgmac_pdata *pdata) +{ + if (!pdata->channel_head) + return; + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, tx_ring=%p\n", + pdata->channel_head->tx_ring); + kfree(pdata->channel_head->tx_ring); + pdata->channel_head->tx_ring = NULL; + + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, rx_ring=%p\n", + pdata->channel_head->rx_ring); + kfree(pdata->channel_head->rx_ring); + pdata->channel_head->rx_ring = NULL; + + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, channel=%p\n", pdata->channel_head); + kfree(pdata->channel_head); + + pdata->channel_head = NULL; +} + +static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel_head, *channel; + struct fxgmac_ring *tx_ring, *rx_ring; + int ret = -ENOMEM; + unsigned int i; + +#ifdef CONFIG_PCI_MSI + u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, + FXGMAC_FLAG_MSIX_LEN); +#endif + + channel_head = kcalloc(pdata->channel_count, + sizeof(struct fxgmac_channel), GFP_KERNEL); + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, channel_head=%p, size=%d*%ld\n", + channel_head, pdata->channel_count, + sizeof(struct fxgmac_channel)); + + if (!channel_head) + return ret; + + netif_dbg(pdata, drv, pdata->netdev, "channel_head=%p\n", channel_head); + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct fxgmac_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, tx_ring=%p, size=%d*%ld\n", tx_ring, + pdata->tx_ring_count, sizeof(struct fxgmac_ring)); + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct fxgmac_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, rx_ring=%p, size=%d*%ld\n", rx_ring, + pdata->rx_ring_count, sizeof(struct fxgmac_ring)); + + for (i = 0, channel = channel_head; i < pdata->channel_count; + i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = + pdata->mac_regs + DMA_CH_BASE + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the per DMA interrupt */ +#ifdef CONFIG_PCI_MSI + if (msix) { + pdata->channel_irq[i] = + pdata->expansion.msix_entries[i].vector; + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS] = + pdata->expansion + .msix_entries + [FXGMAC_MAX_DMA_CHANNELS] + .vector; + + if (pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS] < + 0) { + netdev_err( + pdata->netdev, + "get_irq %u for tx failed\n", + i + 1); + goto err_irq; + } + + channel->expansion.dma_irq_tx = + pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS]; + DPRINTK("fxgmac_alloc_channels, for MSIx, channel %d dma_irq_tx=%u\n", + i, + channel->expansion.dma_irq_tx); + } + } +#endif + ret = pdata->channel_irq[i]; + if (ret < 0) { + netdev_err(pdata->netdev, "get_irq %u failed\n", + i + 1); + goto err_irq; + } + channel->dma_irq = ret; + DPRINTK("fxgmac_alloc_channels, for MSIx, channel %d dma_irq=%u\n", + i, channel->dma_irq); + } + + if (i < pdata->tx_ring_count) + channel->tx_ring = tx_ring++; + + if (i < pdata->rx_ring_count) + channel->rx_ring = rx_ring++; + + netif_dbg(pdata, drv, pdata->netdev, + "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", + channel->name, channel->dma_regs, channel->tx_ring, + channel->rx_ring); + } + + pdata->channel_head = channel_head; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels callout ok\n"); + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_head); + + DPRINTK("fxgmac alloc_channels callout err,%d\n", ret); + return ret; +} + +static void fxgmac_free_channels_and_rings(struct fxgmac_pdata *pdata) +{ + fxgmac_free_rings(pdata); + + fxgmac_free_channels(pdata); +} + +static int fxgmac_alloc_channels_and_rings(struct fxgmac_pdata *pdata) +{ + int ret; + + ret = fxgmac_alloc_channels(pdata); + if (ret) + goto err_alloc; + + ret = fxgmac_alloc_rings(pdata); + if (ret) + goto err_alloc; + + return 0; + +err_alloc: + fxgmac_free_channels_and_rings(pdata); + + return ret; +} + +static int fxgmac_map_rx_buffer(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + struct fxgmac_desc_data *desc_data) +{ + struct sk_buff *skb; + skb = __netdev_alloc_skb_ip_align(pdata->netdev, pdata->rx_buf_size, + GFP_ATOMIC); + if (!skb) { + netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", + __func__); + return -ENOMEM; + } + + desc_data->skb = skb; + desc_data->rx.buf.dma_base = dma_map_single( + pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, desc_data->rx.buf.dma_base)) { + netdev_err(pdata->netdev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + return 0; +} + +static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + /* reset the tx timer status. 20220104 */ + channel->tx_timer_active = 0; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + dma_desc++; + dma_desc_addr += sizeof(struct fxgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); + + hw_ops->tx_desc_init(channel); + } +} + +static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + if (fxgmac_map_rx_buffer(pdata, ring, desc_data)) + break; + + dma_desc++; + dma_desc_addr += sizeof(struct fxgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + + hw_ops->rx_desc_init(channel); + } +} + +static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, + struct sk_buff *skb) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int start_index, cur_index; + struct fxgmac_desc_data *desc_data; + unsigned int offset, datalen, len; + struct fxgmac_pkt_info *pkt_info; + skb_frag_t *frag; + unsigned int tso, vlan; + dma_addr_t skb_dma; + unsigned int i; + + offset = 0; + start_index = ring->cur; + cur_index = ring->cur; + + pkt_info = &ring->pkt_info; + pkt_info->desc_count = 0; + pkt_info->length = 0; + + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + /* Save space for a context descriptor if needed */ + if ((tso && (pkt_info->mss != ring->tx.cur_mss)) || + (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + } + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + + if (tso) { + /* Map the TSO header */ + skb_dma = dma_map_single(pdata->dev, skb->data, + pkt_info->header_len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = pkt_info->header_len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb header: index=%u, dma=%pad, len=%u\n", cur_index, + &skb_dma, pkt_info->header_len); + + offset = pkt_info->header_len; + + pkt_info->length += pkt_info->header_len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + + /* Map the (remainder of the) packet */ + for (datalen = skb_headlen(skb) - offset; datalen;) { + len = min_t(unsigned int, datalen, FXGMAC_TX_MAX_BUF_SIZE); + + skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, + DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb data: index=%u, dma=%pad, len=%u\n", cur_index, + &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + netif_dbg(pdata, tx_queued, pdata->netdev, "mapping frag %u\n", + i); + frag = &skb_shinfo(skb)->frags[i]; + offset = 0; + + for (datalen = skb_frag_size(frag); datalen;) { + len = min_t(unsigned int, datalen, + FXGMAC_TX_MAX_BUF_SIZE); + + skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, + len, DMA_TO_DEVICE); + + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, + "skb_frag_dma_map failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + desc_data->mapped_as_page = 1; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb frag: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, + ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + } + + /* Save the skb address in the last entry. We always have some data + * that has been mapped so desc_data is always advanced past the last + * piece of mapped data - use the entry pointed to by cur_index - 1. + */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, (cur_index - 1) & (ring->dma_desc_count - 1)); + desc_data->skb = skb; + + /* Save the number of descriptor entries used */ + if (start_index <= cur_index) + pkt_info->desc_count = cur_index - start_index; + else + pkt_info->desc_count = + ring->dma_desc_count - start_index + cur_index; + + return pkt_info->desc_count; + +err_out: + while (start_index < cur_index) { + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + start_index = + FXGMAC_GET_ENTRY(start_index, ring->dma_desc_count); + fxgmac_unmap_desc_data(pdata, desc_data); + } + + return 0; +} + +void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops) +{ + desc_ops->alloc_channles_and_rings = fxgmac_alloc_channels_and_rings; + desc_ops->free_channels_and_rings = fxgmac_free_channels_and_rings; + desc_ops->map_tx_skb = fxgmac_map_tx_skb; + desc_ops->map_rx_buffer = fxgmac_map_rx_buffer; + desc_ops->unmap_desc_data = fxgmac_unmap_desc_data; + desc_ops->tx_desc_init = fxgmac_tx_desc_init; + desc_ops->rx_desc_init = fxgmac_rx_desc_init; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..05aa42f90ad832106943c4e5f3d3478ab52fcd31 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c @@ -0,0 +1,1114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +struct fxgmac_stats_desc { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define FXGMAC_STAT(str, var) \ + { \ + str, offsetof(struct fxgmac_pdata, stats.var), \ + } + +static const struct fxgmac_stats_desc fxgmac_gstring_stats[] = { + /* MMC TX counters */ + FXGMAC_STAT("tx_bytes", txoctetcount_gb), + FXGMAC_STAT("tx_bytes_good", txoctetcount_g), + FXGMAC_STAT("tx_packets", txframecount_gb), + FXGMAC_STAT("tx_packets_good", txframecount_g), + FXGMAC_STAT("tx_unicast_packets", txunicastframes_gb), + FXGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + FXGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g), + FXGMAC_STAT("tx_multicast_packets", txmulticastframes_gb), + FXGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g), + FXGMAC_STAT("tx_vlan_packets_good", txvlanframes_g), + FXGMAC_STAT("tx_64_byte_packets", tx64octets_gb), + FXGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + FXGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + FXGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + FXGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + FXGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + FXGMAC_STAT("tx_underflow_errors", txunderflowerror), + FXGMAC_STAT("tx_pause_frames", txpauseframes), + FXGMAC_STAT("tx_single_collision", txsinglecollision_g), + FXGMAC_STAT("tx_multiple_collision", txmultiplecollision_g), + FXGMAC_STAT("tx_deferred_frames", txdeferredframes), + FXGMAC_STAT("tx_late_collision_frames", txlatecollisionframes), + FXGMAC_STAT("tx_excessive_collision_frames", + txexcessivecollisionframes), + FXGMAC_STAT("tx_carrier_error_frames", txcarriererrorframes), + FXGMAC_STAT("tx_excessive_deferral_error", txexcessivedeferralerror), + FXGMAC_STAT("tx_oversize_frames_good", txoversize_g), + + /* MMC RX counters */ + FXGMAC_STAT("rx_bytes", rxoctetcount_gb), + FXGMAC_STAT("rx_bytes_good", rxoctetcount_g), + FXGMAC_STAT("rx_packets", rxframecount_gb), + FXGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g), + FXGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g), + FXGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g), + FXGMAC_STAT("rx_vlan_packets_mac", rxvlanframes_gb), + FXGMAC_STAT("rx_64_byte_packets", rx64octets_gb), + FXGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + FXGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + FXGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + FXGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + FXGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + FXGMAC_STAT("rx_undersize_packets_good", rxundersize_g), + FXGMAC_STAT("rx_oversize_packets_good", rxoversize_g), + FXGMAC_STAT("rx_crc_errors", rxcrcerror), + FXGMAC_STAT("rx_align_error", rxalignerror), + FXGMAC_STAT("rx_crc_errors_small_packets", rxrunterror), + FXGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + FXGMAC_STAT("rx_length_errors", rxlengtherror), + FXGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype), + FXGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + FXGMAC_STAT("rx_watchdog_errors", rxwatchdogerror), + FXGMAC_STAT("rx_pause_frames", rxpauseframes), + FXGMAC_STAT("rx_receive_error_frames", rxreceiveerrorframe), + FXGMAC_STAT("rx_control_frames_good", rxcontrolframe_g), + + /* Extra counters */ + FXGMAC_STAT("tx_tso_packets", tx_tso_packets), + FXGMAC_STAT("rx_split_header_packets", rx_split_header_packets), + FXGMAC_STAT("tx_process_stopped", tx_process_stopped), + FXGMAC_STAT("rx_process_stopped", rx_process_stopped), + FXGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable), + FXGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable), + FXGMAC_STAT("fatal_bus_error", fatal_bus_error), + FXGMAC_STAT("tx_vlan_packets_net", tx_vlan_packets), + FXGMAC_STAT("rx_vlan_packets_net", rx_vlan_packets), + FXGMAC_STAT("napi_poll_isr", napi_poll_isr), + FXGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer), + FXGMAC_STAT("alive_cnt_txtimer", cnt_alive_txtimer), + + FXGMAC_STAT("ephy_poll_timer", ephy_poll_timer_cnt), + FXGMAC_STAT("mgmt_int_isr", mgmt_int_isr), +}; + +#define FXGMAC_STATS_COUNT ARRAY_SIZE(fxgmac_gstring_stats) + +static void fxgmac_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 ver = pdata->hw_feat.version; + u32 sver, devid, userver; + + strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version)); + strscpy(drvinfo->bus_info, dev_name(pdata->dev), + sizeof(drvinfo->bus_info)); + /* + * D|DEVID: Indicates the Device family + * U|USERVER: User-defined Version + */ + sver = FXGMAC_GET_REG_BITS(ver, MAC_VR_SVER_POS, MAC_VR_SVER_LEN); + devid = FXGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, MAC_VR_DEVID_LEN); + userver = FXGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, + MAC_VR_USERVER_LEN); + /*DPRINTK("xlgma: No userver (%x) here, sver (%x) should be 0x51\n", userver, sver);*/ + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "S.D.U: %x.%x.%x", sver, devid, userver); +} + +static u32 fxgmac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + return pdata->msg_enable; +} + +static void fxgmac_ethtool_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxmac, set msglvl from %08x to %08x\n", pdata->msg_enable, + msglevel); + pdata->msg_enable = msglevel; +} + +static void fxgmac_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *channel) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); +#if (FXGMAC_RSS_FEATURE_ENABLED) + /* report maximum channels */ + channel->max_combined = FXGMAC_MAX_DMA_CHANNELS; + channel->max_other = 0; + channel->other_count = 0; + + /* record RSS queues */ + channel->combined_count = FXGMAC_MAX_DMA_CHANNELS; + + /* nothing else to report if RSS is disabled */ + if (channel->combined_count == 1) + return; + DPRINTK("fxmac rss, get channels max=(combined %d, other %d), count(combined %d, other %d)\n", + channel->max_combined, channel->max_other, + channel->combined_count, channel->other_count); +#endif + + channel->max_rx = FXGMAC_MAX_DMA_CHANNELS; + channel->max_tx = FXGMAC_MAX_DMA_CHANNELS; + channel->rx_count = pdata->rx_q_count; + channel->tx_count = pdata->tx_q_count; + DPRINTK("fxmac, get channels max=(rx %d, tx %d), count(%d,%d)\n", + channel->max_rx, channel->max_tx, channel->rx_count, + channel->tx_count); +} + +static int +fxgmac_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + memset(ec, 0, sizeof(struct ethtool_coalesce)); + ec->rx_coalesce_usecs = pdata->rx_usecs; + ec->tx_coalesce_usecs = pdata->tx_usecs; + /*If we need to assign values to other members, + * we need to modify the supported_coalesce_params of fxgmac_ethtool_ops synchronously + */ + DPRINTK("fxmac, get coalesce\n"); + return 0; +} + +static int +fxgmac_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int rx_frames, rx_riwt, rx_usecs; + unsigned int tx_frames; + + /* Check for not supported parameters */ + if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs_high) || (ec->tx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs_irq) || (ec->stats_block_coalesce_usecs) || + (ec->pkt_rate_low) || (ec->use_adaptive_rx_coalesce) || + (ec->use_adaptive_tx_coalesce) || + (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + rx_usecs = ec->rx_coalesce_usecs; + rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs); + rx_frames = ec->rx_max_coalesced_frames; + tx_frames = ec->tx_max_coalesced_frames; + + if ((rx_riwt > FXGMAC_MAX_DMA_RIWT) || + (rx_riwt < FXGMAC_MIN_DMA_RIWT) || + (rx_frames > pdata->rx_desc_count)) + return -EINVAL; + + if (tx_frames > pdata->tx_desc_count) + return -EINVAL; + + pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; + pdata->rx_frames = rx_frames; + hw_ops->config_rx_coalesce(pdata); + + pdata->tx_frames = tx_frames; + hw_ops->config_tx_coalesce(pdata); + + pdata->tx_usecs = ec->tx_coalesce_usecs; + hw_ops->set_interrupt_moderation(pdata); + + DPRINTK("fxmac, set coalesce\n"); + return 0; +} + +#if (FXGMAC_RSS_FEATURE_ENABLED) +static u32 fxgmac_get_rxfh_key_size(struct net_device *netdev) +{ + return FXGMAC_RSS_HASH_KEY_SIZE; +} + +static u32 fxgmac_rss_indir_size(struct net_device *netdev) +{ + return FXGMAC_RSS_MAX_TABLE_SIZE; +} + +static void fxgmac_get_reta(struct fxgmac_pdata *pdata, u32 *indir) +{ + int i, reta_size = FXGMAC_RSS_MAX_TABLE_SIZE; + u16 rss_m; +#ifdef FXGMAC_ONE_CHANNLE + rss_m = FXGMAC_MAX_DMA_CHANNELS; +#else + rss_m = FXGMAC_MAX_DMA_CHANNELS - + 1; /* mask for index of channel, 0-3 */ +#endif + + for (i = 0; i < reta_size; i++) + indir[i] = pdata->rss_table[i] & rss_m; +} + +static int fxgmac_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + /* ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) + * ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + * ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) + */ + if (hfunc) { + *hfunc = ETH_RSS_HASH_TOP; + DPRINTK("fxmac, get_rxfh for hash function\n"); + } + + if (indir) { + fxgmac_get_reta(pdata, indir); + DPRINTK("fxmac, get_rxfh for indirection tab\n"); + } + + if (key) { + memcpy(key, pdata->rss_key, fxgmac_get_rxfh_key_size(netdev)); + DPRINTK("fxmac, get_rxfh for hash key\n"); + } + + return 0; +} + +static int fxgmac_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int i; + u32 reta_entries = fxgmac_rss_indir_size(netdev); + int max_queues = FXGMAC_MAX_DMA_CHANNELS; + + DPRINTK("fxmac, set_rxfh callin, indir=%lx, key=%lx, func=%02x\n", + (unsigned long)indir, (unsigned long)key, hfunc); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { +#if FXGMAC_MSIX_CH0RXDIS_EN + max_queues = max_queues; + reta_entries = reta_entries; + i = i; + DPRINTK("fxmac, set_rxfh, change of indirect talbe is not supported.\n"); + return -EINVAL; +#else + /* double check user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + pdata->rss_table[i] = indir[i]; + + hw_ops->write_rss_lookup_table(pdata); +#endif + } + + /* Fill out the rss hash key */ + if (FXGMAC_RSS_HASH_KEY_LINUX && key) + hw_ops->set_rss_hash_key(pdata, key); + + return 0; +} + +static int fxgmac_get_rss_hash_opts(struct fxgmac_pdata *pdata, + struct ethtool_rxnfc *cmd) +{ + u32 reg_opt; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + cmd->data = 0; + + reg_opt = hw_ops->get_rss_options(pdata); + DPRINTK("fxgmac_get_rss_hash_opts, hw=%02x, %02x\n", reg_opt, + pdata->rss_options); + + if (reg_opt != pdata->rss_options) { + DPRINTK("fxgmac_get_rss_hash_opts, warning, options are not consistent\n"); + } + + /* Report default options for RSS */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (((TCP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN))) || + ((UDP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN)))) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + if (((TCP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN))) || + ((UDP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN))) || + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN))) { + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + } + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + if (((TCP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN))) || + ((UDP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN)))) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + if (((TCP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN))) || + ((UDP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN))) || + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN))) { + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct fxgmac_pdata *pdata = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = pdata->rx_q_count; + ret = 0; + DPRINTK("fxmac, get_rxnfc for rx ring cnt\n"); + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = 0; + ret = 0; + DPRINTK("fxmac, get_rxnfc for classify rule cnt\n"); + break; + case ETHTOOL_GRXCLSRULE: + DPRINTK("fxmac, get_rxnfc for classify rules\n"); + ret = 0; /* ixgbe_get_ethtool_fdir_entry(adapter, cmd); */ + break; + case ETHTOOL_GRXCLSRLALL: + cmd->rule_cnt = 0; + ret = 0; + /*ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + */ + DPRINTK("fxmac, get_rxnfc for classify both cnt and rules\n"); + break; + case ETHTOOL_GRXFH: + ret = fxgmac_get_rss_hash_opts(pdata, cmd); + DPRINTK("fxmac, get_rxnfc for hash options\n"); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | BIT(MAC_RSSCR_UDP6TE_POS)) +static int fxgmac_set_rss_hash_opt(struct fxgmac_pdata *pdata, + struct ethtool_rxnfc *nfc) +{ + u32 rssopt = 0; /* pdata->rss_options; */ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + DPRINTK("fxgmac_set_rss_hash_opt call in, nfc_data=%llx, cur opt=%x\n", + nfc->data, pdata->rss_options); + + /* For RSS, it does not support anything other than hashing + * to queues on src, dst IPs and L4 ports + */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* default to TCP flow and do nothting */ + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + if (TCP_V4_FLOW == (nfc->flow_type)) { + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN, 1); + } + + if (TCP_V6_FLOW == (nfc->flow_type)) { + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN, 1); + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN, 1); + } + break; + + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + rssopt = FXGMAC_SET_REG_BITS(rssopt, MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN, 1); + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + rssopt = FXGMAC_SET_REG_BITS(rssopt, MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN, 1); + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN, 1); + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if options are changed, then update to hw */ + if (rssopt != pdata->rss_options) { + if ((rssopt & UDP_RSS_FLAGS) && + !(pdata->rss_options & UDP_RSS_FLAGS)) + DPRINTK("enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + DPRINTK("rss option changed from %x to %x\n", + pdata->rss_options, rssopt); + pdata->rss_options = rssopt; + hw_ops->set_rss_options(pdata); + } + + return 0; +} + +static int fxgmac_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct fxgmac_pdata *pdata = netdev_priv(dev); + + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + /* no support. rx classifier rule insert */ + DPRINTK("set_rxnfc for rx cls rule insert-n\\a\n"); + break; + case ETHTOOL_SRXCLSRLDEL: + /* no support. rx classifier rule delete */ + DPRINTK("set_rxnfc for rx cls rule del-n\\a\n"); + break; + case ETHTOOL_SRXFH: + DPRINTK("set_rxnfc for rx rss option\n"); + ret = fxgmac_set_rss_hash_opt(pdata, cmd); + break; + default: + break; + } + + return ret; +} +#endif /* FXGMAC_RSS_FEATURE_ENABLED */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) +static void fxgmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *exact) + +#else +static void fxgmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxmac, get_ringparam callin\n"); + + ring->rx_max_pending = FXGMAC_RX_DESC_CNT; + ring->tx_max_pending = FXGMAC_TX_DESC_CNT; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = pdata->rx_desc_count; + ring->tx_pending = pdata->tx_desc_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) +static int fxgmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *exact) + +#else +static int fxgmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + + DPRINTK("fxmac, set_ringparam callin\n"); + + pdata->tx_desc_count = ring->tx_pending; + pdata->rx_desc_count = ring->rx_pending; + + fxgmac_stop(pdata); + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + desc_ops->alloc_channles_and_rings(pdata); + fxgmac_start(pdata); + + return 0; +} + +#if FXGMAC_WOL_FEATURE_ENABLED +static void fxgmac_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + /* for further feature implementation + * wol->supported = WAKE_PHY | WAKE_UCAST | WAKE_MCAST | + * WAKE_BCAST | WAKE_MAGIC; + */ + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | + WAKE_ARP; +#if FXGMAC_WOL_UPON_EPHY_LINK + wol->supported |= WAKE_PHY; +#endif + + wol->wolopts = 0; + if (!(pdata->hw_feat.rwk) || + !device_can_wakeup(/*pci_dev_to_dev*/ (pdata->dev))) { + DPRINTK("fxgmac get_wol, pci does not support wakeup\n"); + return; + } + wol->wolopts = pdata->expansion.wol; + DPRINTK("fxmac, get_wol, 0x%x, 0x%x\n", wol->wolopts, + pdata->expansion.wol); +} + +static int fxgmac_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + + /* currently, we do not support these options */ +#if FXGMAC_WOL_UPON_EPHY_LINK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_FILTER)) { +#else + if (wol->wolopts & WAKE_MAGICSECURE) { +#endif +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE | WAKE_FILTER)) { +#else + if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE)) { +#endif +#endif + DPRINTK("fxmac, set_wol, not supported wol options, 0x%x\n", + wol->wolopts); + return -EOPNOTSUPP; + } + + if (!(pdata->hw_feat.rwk)) { + DPRINTK("fxmac, set_wol, hw wol feature is n/a\n"); + ret = (wol->wolopts ? -EOPNOTSUPP : 0); + return ret; + } + + pdata->expansion.wol = 0; + if (wol->wolopts & WAKE_UCAST) + pdata->expansion.wol |= WAKE_UCAST; + + if (wol->wolopts & WAKE_MCAST) + pdata->expansion.wol |= WAKE_MCAST; + + if (wol->wolopts & WAKE_BCAST) + pdata->expansion.wol |= WAKE_BCAST; + + if (wol->wolopts & WAKE_MAGIC) + pdata->expansion.wol |= WAKE_MAGIC; + + if (wol->wolopts & WAKE_PHY) + pdata->expansion.wol |= WAKE_PHY; + + if (wol->wolopts & WAKE_ARP) + pdata->expansion.wol |= WAKE_ARP; + + hw_ops->set_pattern_data(pdata); + + hw_ops->config_wol(pdata, (!!(pdata->expansion.wol))); + + DPRINTK("fxmac, set_wol, opt=0x%x, 0x%x\n", wol->wolopts, + pdata->expansion.wol); + + return 0; +} +#endif /*FXGMAC_WOL_FEATURE_ENABLED*/ + +static int fxgmac_get_regs_len(struct net_device __always_unused *netdev) +{ + return FXGMAC_EPHY_REGS_LEN * sizeof(u32); +} + +static void fxgmac_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + u32 *regs_buff = p; + u8 i; + + memset(p, 0, FXGMAC_EPHY_REGS_LEN * sizeof(u32)); + for (i = REG_MII_BMCR; i < FXGMAC_EPHY_REGS_LEN; i++) { + hw_ops->read_ephy_reg(pdata, i, (unsigned int *)®s_buff[i]); + } + regs->version = regs_buff[REG_MII_PHYSID1] << 16 | + regs_buff[REG_MII_PHYSID2]; +} + +#if FXGMAC_PAUSE_FEATURE_ENABLED +static int fxgmac_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 duplex, regval, link_status; + u32 adv = 0xFFFFFFFF; + + regval = fxgmac_ephy_autoneg_ability_get(pdata, &adv); + if (regval) + return -ETIMEDOUT; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* set the supported link speeds */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + + /* Indicate pause support */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + + ethtool_link_ksettings_add_link_mode(cmd, supported, MII); + cmd->base.port = PORT_MII; + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_GET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN); + if (regval) { + if (pdata->phy_autoeng) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + else + clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + cmd->link_modes.advertising); + + if (adv & FXGMAC_ADVERTISE_10HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + if (adv & FXGMAC_ADVERTISE_10FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + if (adv & FXGMAC_ADVERTISE_100HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + if (adv & FXGMAC_ADVERTISE_100FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + if (adv & FXGMAC_ADVERTISE_1000FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + } else { + clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + cmd->link_modes.advertising); + switch (pdata->phy_speed) { + case SPEED_1000M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 1000baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 1000baseT_Half); + break; + case SPEED_100M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 100baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 100baseT_Half); + break; + case SPEED_10M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 10baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 10baseT_Half); + break; + default: + break; + } + } + cmd->base.autoneg = pdata->phy_autoeng ? regval : 0; + + hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); + link_status = regval & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + if (link_status) { + duplex = FXGMAC_GET_REG_BITS(regval, PHY_MII_SPEC_DUPLEX_POS, + PHY_MII_SPEC_DUPLEX_LEN); + cmd->base.duplex = duplex; + cmd->base.speed = pdata->phy_speed; + } else { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + } + + return 0; +} + +static int fxgmac_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + u32 advertising, support, adv; + int ret; + struct fxphy_ag_adv; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + + pdata->phy_autoeng = cmd->base.autoneg; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&support, + cmd->link_modes.supported); + advertising &= support; + + if (pdata->phy_autoeng || + (!pdata->phy_autoeng && cmd->base.speed == SPEED_1000)) { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv &= ~REG_BIT_ADVERTISE_100_10_CAP; + adv |= ethtool_adv_to_mii_adv_t(advertising); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); + if (ret < 0) + return -ETIMEDOUT; + ret = hw_ops->read_ephy_reg(pdata, REG_MII_CTRL1000, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv &= ~REG_BIT_ADVERTISE_1000_CAP; + adv |= ethtool_adv_to_mii_ctrl1000_t(advertising); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_CTRL1000, adv); + if (ret < 0) + return -ETIMEDOUT; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return -ETIMEDOUT; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, + PHY_CR_RE_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return -ETIMEDOUT; + } else { + pdata->phy_duplex = cmd->base.duplex; + pdata->phy_speed = cmd->base.speed; + fxgmac_phy_force_speed(pdata, pdata->phy_speed); + fxgmac_phy_force_duplex(pdata, pdata->phy_duplex); + fxgmac_phy_force_autoneg(pdata, pdata->phy_autoeng); + } + + ret = fxgmac_ephy_soft_reset(pdata); + if (ret) { + printk("%s: ephy soft reset timeout.\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +static void fxgmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + pause->autoneg = 1; + pause->rx_pause = pdata->rx_pause; + pause->tx_pause = pdata->tx_pause; + + DPRINTK("fxmac get_pauseparam done, rx=%d, tx=%d\n", pdata->rx_pause, + pdata->tx_pause); +} + +static int fxgmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int pre_rx_pause = pdata->rx_pause; + unsigned int pre_tx_pause = pdata->tx_pause; + + pdata->rx_pause = pause->rx_pause; + pdata->tx_pause = pause->tx_pause; + + if (pre_rx_pause != pdata->rx_pause) { + hw_ops->config_rx_flow_control(pdata); + DPRINTK("fxgmac set pause parameter, rx from %d to %d\n", + pre_rx_pause, pdata->rx_pause); + } + if (pre_tx_pause != pdata->tx_pause) { + hw_ops->config_tx_flow_control(pdata); + DPRINTK("fxgmac set pause parameter, tx from %d to %d\n", + pre_tx_pause, pdata->tx_pause); + } + + DPRINTK("fxgmac set pause parameter, autoneg=%d, rx=%d, tx=%d\n", + pause->autoneg, pause->rx_pause, pause->tx_pause); + + return 0; +} +#endif /*FXGMAC_PAUSE_FEATURE_ENABLED*/ + +/* yzhang added for debug sake. descriptors status checking + * 2021.03.29 + */ +#define FXGMAC_ETH_GSTRING_LEN 32 + +#define FXGMAC_TEST_LEN (sizeof(fxgmac_gstrings_test) / FXGMAC_ETH_GSTRING_LEN) +#define DBG_ETHTOOL_CHECK_NUM_OF_DESC 5 + +static void fxgmac_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < FXGMAC_STATS_COUNT; i++) { + memcpy(data, fxgmac_gstring_stats[i].stat_string, + strlen(fxgmac_gstring_stats[i].stat_string)); + data += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static int fxgmac_ethtool_get_sset_count(struct net_device *netdev, + int stringset) +{ + int ret; + + switch (stringset) { + case ETH_SS_STATS: + ret = FXGMAC_STATS_COUNT; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static void fxgmac_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u8 *stat; + int i; + +#if FXGMAC_PM_FEATURE_ENABLED + /* 20210709 for net power down */ + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + pdata->hw_ops.read_mmc_stats(pdata); + } + + for (i = 0; i < FXGMAC_STATS_COUNT; i++) { + stat = (u8 *)pdata + fxgmac_gstring_stats[i].stat_offset; + *data++ = *(u64 *)stat; + } +} + +static inline bool fxgmac_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define FXGMAC_REMOVED(a) fxgmac_removed(a) + +static const struct ethtool_ops fxgmac_ethtool_ops = { + .get_drvinfo = fxgmac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = fxgmac_ethtool_get_msglevel, + .set_msglevel = fxgmac_ethtool_set_msglevel, + .get_channels = fxgmac_ethtool_get_channels, + .get_coalesce = fxgmac_ethtool_get_coalesce, + .set_coalesce = fxgmac_ethtool_set_coalesce, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + +/* The process of set is to get first and then set, + * and the result of get is preserved for values that have not been modified. + * + * Therefore, when using, it is necessary to ensure that this macro and the + * assignment operation in the get_coalesce are one-to-one correspondence, + * otherwise the macro and parameters will be verified when set, and the error + * of "Operation not supported " will be reported if the verification fails + */ +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#endif + .get_strings = fxgmac_ethtool_get_strings, + .get_sset_count = fxgmac_ethtool_get_sset_count, + .get_ethtool_stats = fxgmac_ethtool_get_ethtool_stats, + .get_regs_len = fxgmac_get_regs_len, + .get_regs = fxgmac_get_regs, + .get_ringparam = fxgmac_get_ringparam, + .set_ringparam = fxgmac_set_ringparam, +#if (FXGMAC_RSS_FEATURE_ENABLED) + .get_rxnfc = fxgmac_get_rxnfc, + .set_rxnfc = fxgmac_set_rxnfc, + .get_rxfh_indir_size = fxgmac_rss_indir_size, + .get_rxfh_key_size = fxgmac_get_rxfh_key_size, + .get_rxfh = fxgmac_get_rxfh, + .set_rxfh = fxgmac_set_rxfh, +#endif +#if (FXGMAC_WOL_FEATURE_ENABLED) + .get_wol = fxgmac_get_wol, + .set_wol = fxgmac_set_wol, +#endif +#if (FXGMAC_PAUSE_FEATURE_ENABLED) +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = fxgmac_get_link_ksettings, + .set_link_ksettings = fxgmac_set_link_ksettings, +#endif /* ETHTOOL_GLINKSETTINGS */ + .get_pauseparam = fxgmac_get_pauseparam, + .set_pauseparam = fxgmac_set_pauseparam, +#endif +}; + +const struct ethtool_ops *fxgmac_get_ethtool_ops(void) +{ + return &fxgmac_ethtool_ops; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c new file mode 100644 index 0000000000000000000000000000000000000000..0517968365d744bc961c2cd4bd630dbc4f691582 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c @@ -0,0 +1,6256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#include "fuxi-efuse.h" + +void fxgmac_release_phy(struct fxgmac_pdata *pdata); +static void fxgmac_pwr_clock_ungate(struct fxgmac_pdata *pdata); +static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata); + +static int fxgmac_tx_complete(struct fxgmac_dma_desc *dma_desc) +{ +#if (FXGMAC_DUMMY_TX_DEBUG) + return 1; +#endif + return !FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN); +} + +static int fxgmac_disable_rx_csum(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + DPRINTK("fxgmac disable rx checksum.\n"); + return 0; +} + +static int fxgmac_enable_rx_csum(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + DPRINTK("fxgmac enable rx checksum.\n"); + return 0; +} + +static int fxgmac_set_mac_address(struct fxgmac_pdata *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | + (addr[0] << 0); + + writereg(pdata->pAdapter, mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); + writereg(pdata->pAdapter, mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); + + return 0; +} + +#if !defined(DPDK) +static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, + struct netdev_hw_addr *ha, unsigned int *mac_reg) +{ + unsigned int mac_addr_hi, mac_addr_lo; + u8 *mac_addr; + + mac_addr_lo = 0; + mac_addr_hi = 0; + + if (ha) { + mac_addr = (u8 *)&mac_addr_lo; + mac_addr[0] = ha->addr[0]; + mac_addr[1] = ha->addr[1]; + mac_addr[2] = ha->addr[2]; + mac_addr[3] = ha->addr[3]; + mac_addr = (u8 *)&mac_addr_hi; + mac_addr[0] = ha->addr[4]; + mac_addr[1] = ha->addr[5]; + + netif_dbg(pdata, drv, pdata->netdev, + "adding mac address %pM at %#x\n", ha->addr, + *mac_reg); + + mac_addr_hi = FXGMAC_SET_REG_BITS( + mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); + } + + writereg(pdata->pAdapter, mac_addr_hi, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; + writereg(pdata->pAdapter, mac_addr_lo, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; +} +#endif + +static int fxgmac_enable_tx_vlan(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANIR); + /* Indicate that VLAN Tx CTAGs come from mac_vlan_incl register */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, + MAC_VLANIR_VLTI_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, + MAC_VLANIR_CSVL_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLP_POS, + MAC_VLANIR_VLP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLC_POS, + MAC_VLANIR_VLC_LEN, 2); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLT_POS, + MAC_VLANIR_VLT_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANIR); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + + return 0; +} + +static int fxgmac_disable_tx_vlan(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANIR); + + /* Indicate that VLAN Tx CTAGs come from mac_vlan_incl register + * Set VLAN Tag input enable + */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, + MAC_VLANIR_CSVL_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, + MAC_VLANIR_VLTI_LEN, /*0*/ 1); + /* Set VLAN priority control disable */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLP_POS, + MAC_VLANIR_VLP_LEN, /*1*/ 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLC_POS, + MAC_VLANIR_VLC_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANIR); + + return 0; +} + +static int fxgmac_enable_rx_vlan_stripping(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + /* Put the VLAN tag in the Rx descriptor */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, + MAC_VLANTR_EVLRXS_LEN, 1); + /* Don't check the VLAN type */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, + MAC_VLANTR_DOVLTC_LEN, 1); + /* Check only C-TAG (0x8100) packets */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, + MAC_VLANTR_ERSVLM_LEN, 0); + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, + MAC_VLANTR_ESVL_LEN, 0); + /* Enable VLAN tag stripping */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0x3); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + DPRINTK("fxgmac enable MAC rx vlan stripping.\n"); + + return 0; +} + +static int fxgmac_disable_rx_vlan_stripping(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + DPRINTK("fxgmac disable MAC rx vlan stripping.\n"); + + return 0; +} + +static int fxgmac_enable_rx_vlan_filtering(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + /* Enable VLAN filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); +#else + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + /* Enable VLAN Hash Table filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, + MAC_VLANTR_VTHM_LEN, 1); + /* Disable VLAN tag inverse matching */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, + MAC_VLANTR_VTIM_LEN, 0); + /* Only filter on the lower 12-bits of the VLAN tag */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, + MAC_VLANTR_ETV_LEN, 1); +#endif + + return 0; +} + +static int fxgmac_disable_rx_vlan_filtering(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + /* Disable VLAN filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); +#endif + + return 0; +} + +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED +static u32 fxgmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 crc = ~0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= CRC32_POLY_LE; + } + + return crc; +} +#endif + +static int fxgmac_update_vlan_hash_table(struct fxgmac_pdata *pdata) +{ + u16 vlan_hash_table = 0; + u32 regval; +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED + __le16 vid_le; + u32 crc; + u16 vid; + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~fxgmac_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } +#endif + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANHTR); + /* Set the VLAN Hash Table filtering register */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, + MAC_VLANHTR_VLHT_LEN, vlan_hash_table); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANHTR); + + DPRINTK("fxgmac_update_vlan_hash_tabl done, hash tbl=%08x.\n", + vlan_hash_table); + return 0; +} + +static int fxgmac_set_promiscuous_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN) == + val) { + return 0; + } + netif_dbg(pdata, drv, pdata->netdev, + "" STR_FORMAT " promiscuous mode\n", + enable ? "entering" : "leaving"); + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, "" STR_FORMAT " - promiscuous mode=%d, reg=%x.", + __FUNCTION__, enable, regval); + DbgPrintF( + MP_TRACE, + "" STR_FORMAT + " - note, vlan filter is called when set promiscuous mode=%d.", + __FUNCTION__, enable); + + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + fxgmac_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + fxgmac_enable_rx_vlan_filtering(pdata); + } + } + + DPRINTK("fxgmac set promisc mode=%d\n", enable); + return 0; +} + +static int fxgmac_enable_rx_broadcast(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + /* mac reg bit is disable, so invert the val. */ + unsigned int val = enable ? 0 : 1; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_DBF_POS, MAC_PFR_DBF_LEN) == + val) { + return 0; + } + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_DBF_POS, MAC_PFR_DBF_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, "%s - bcast en=%d, bit-val=%d, reg=%x.", + __FUNCTION__, enable, val, regval); + return 0; +} + +static int fxgmac_set_all_multicast_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN) == + val) { + return 0; + } + netif_dbg(pdata, drv, pdata->netdev, "" STR_FORMAT " allmulti mode\n", + enable ? "entering" : "leaving"); + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, + "" STR_FORMAT " - Enable all Multicast=%d, regval=%#x.", + __FUNCTION__, enable, regval); + + return 0; +} + +static void fxgmac_set_mac_addn_addrs(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK +#if FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; +#endif + unsigned int addn_macs; + unsigned int mac_reg; + + mac_reg = MAC_MACA1HR; + addn_macs = pdata->hw_feat.addn_mac; +#if FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED + DPRINTK("xlgamc add mac addr callin\n"); + if (netdev_uc_count(netdev) > addn_macs) { + fxgmac_set_promiscuous_mode(pdata, 1); + } else { + netdev_for_each_uc_addr(ha, netdev) { + fxgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + + if (netdev_mc_count(netdev) > addn_macs) { + fxgmac_set_all_multicast_mode(pdata, 1); + } else { + netdev_for_each_mc_addr(ha, netdev) { + fxgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + } + } +#endif + /* Clear remaining additional MAC address entries */ + while (addn_macs--) { + fxgmac_set_mac_reg(pdata, NULL, &mac_reg); + } +#else + (void)pdata; +#endif +} + +#define GET_REG_AND_BIT_POS(reversalval, regOut, bitOut) \ + do { \ + regOut = (((reversalval) >> 5) & 0x7); \ + bitOut = ((reversalval) & 0x1f); \ + } while (0) + +static u32 fxgmac_crc32(unsigned char *Data, int Length) +{ + u32 Crc = (u32)~0; /* Initial value. 0xFFFFFFFF */ + + while (--Length >= 0) { + unsigned char Byte = *Data++; + int Bit; + + for (Bit = 8; --Bit >= 0; Byte >>= 1) { + if ((Crc ^ Byte) & 1) { + Crc >>= 1; + Crc ^= 0xedb88320; + } else { + Crc >>= 1; + } + } + } + + return ~Crc; +} + +/* + * configure multicast hash table, reg 0x2010~202c + * input: pmc_mac, pointer to mcast MAC. if it is null, then clean all registers. + * b_add, 1 to set the bit; 0 to clear the bit. + */ +static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, + unsigned char *pmc_mac, + int b_add) +{ + unsigned int hash_reg, reg_bit; + unsigned int j; + u32 crc, reversal_crc, regval; + + if (!pmc_mac) { + for (j = 0; j < FXGMAC_MAC_HASH_TABLE_SIZE; j++) { + hash_reg = j; + hash_reg = (MAC_HTR0 + hash_reg * MAC_HTR_INC); + writereg(pdata->pAdapter, 0, + pdata->mac_regs + hash_reg); + } + DBGPRINT( + MP_TRACE, + ("> 24), hash_reg, reg_bit); + /* Set the MAC Hash Table registers */ + hash_reg = (MAC_HTR0 + hash_reg * MAC_HTR_INC); + regval = readreg(pdata->pAdapter, pdata->mac_regs + hash_reg); + + regval = FXGMAC_SET_REG_BITS(regval, reg_bit, 1, (b_add ? 1 : 0)); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + hash_reg); +} + +static void fxgmac_set_mac_hash_table(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK +#if FUXI_MAC_HASH_TABLE + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, netdev) { + fxgmac_config_multicast_mac_hash_table(pdata, ha->addr, 1); + } +#endif + pdata = pdata; + +#else + (void)pdata; +#endif +} + +static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) +{ + if (pdata->hw_feat.hash_table_size) + fxgmac_set_mac_hash_table(pdata); + else + fxgmac_set_mac_addn_addrs(pdata); + + return 0; +} + +static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) +{ + u32 regval; + fxgmac_set_mac_address(pdata, pdata->mac_addr); + + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, + MAC_PFR_HPF_LEN, 1); +#if FUXI_MAC_HASH_TABLE + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, + MAC_PFR_HUC_LEN, 1); +#endif + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, + MAC_PFR_HMC_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + } +} + +static int fxgmac_config_crc_check(struct fxgmac_pdata *pdata) +{ + u32 regval, value; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ECR); + value = (pdata->crc_check) ? 0 : 1; + regval = FXGMAC_SET_REG_BITS(regval, MAC_ECR_DCRCC_POS, + MAC_ECR_DCRCC_LEN, value); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); + + return 0; +} + +static int fxgmac_config_jumbo(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_JE_POS, MAC_CR_JE_LEN, + pdata->jumbo); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + return 0; +} + +static void fxgmac_config_checksum_offload(struct fxgmac_pdata *pdata) +{ + if (pdata->netdev->features & NETIF_F_RXCSUM) + fxgmac_enable_rx_csum(pdata); + else + fxgmac_disable_rx_csum(pdata); +} + +static void fxgmac_config_vlan_support(struct fxgmac_pdata *pdata) +{ + fxgmac_disable_tx_vlan( + pdata); /* configure dynamical vlanID from TX Context. */ + + /* Set the current VLAN Hash Table register value */ + fxgmac_update_vlan_hash_table(pdata); + + if (pdata->vlan_filter) /* disable vlan rx filter by default */ + fxgmac_enable_rx_vlan_filtering(pdata); + else + fxgmac_disable_rx_vlan_filtering(pdata); + + if (pdata->vlan_strip) /* enable vlan rx strip by default */ + fxgmac_enable_rx_vlan_stripping(pdata); + else + fxgmac_disable_rx_vlan_stripping(pdata); +} + +static int fxgmac_config_rx_mode(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + unsigned int pr_mode, am_mode; + + pr_mode = ((netdev->flags & IFF_PROMISC) != 0); + am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + + fxgmac_set_promiscuous_mode(pdata, pr_mode); + fxgmac_set_all_multicast_mode(pdata, am_mode); + + fxgmac_add_mac_addresses(pdata); + + return 0; +} + +static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, + struct fxgmac_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned long tx_timeout; + unsigned int tx_status; + + pdata = pdata; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + + DMA_DSRX_TPS_START; + } + +#if FXGMAC_TX_HANG_TIMER_EN + tx_timeout = jiffies + msecs_to_jiffies(100); /* 100ms */ +#else + tx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); +#endif + while (time_before(jiffies, tx_timeout)) { + tx_status = readreg(pdata->pAdapter, pdata->mac_regs + tx_dsr); + tx_status = + FXGMAC_GET_REG_BITS(tx_status, tx_pos, DMA_DSR_TPS_LEN); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range_ex(pdata->pAdapter, 500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + +static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + +#if FXGMAC_TX_HANG_TIMER_EN + pdata->tx_hang_restart_queuing = 0; +#endif + + /* Enable each Tx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_tx_queue *txq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Enable Tx DMA channel */ + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); + } +#endif + + /* Enable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, + MTL_Q_ENABLED); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Enable MAC Tx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); +} + +static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + + /* Prepare for Tx DMA channel stop */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + fxgmac_prepare_tx_stop(pdata, channel); + +#if FXGMAC_TX_HANG_TIMER_EN + pdata->tx_hang_restart_queuing = 0; +#endif + } + } + +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_tx_queue *txq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < pdata->tx_q_count; i++) { + txq = dev->data->tx_queues[i]; + fxgmac_txq_prepare_tx_stop(pdata, i); + } +#endif + + /* Disable MAC Tx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Disable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Disable each Tx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + } +#else + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); + } +#endif +} + +static void fxgmac_prepare_rx_stop(struct fxgmac_pdata *pdata, + unsigned int queue) +{ + unsigned int rx_status, prxq; + unsigned int rxqsts; + unsigned long rx_timeout; + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ +#if FXGMAC_TX_HANG_TIMER_EN + rx_timeout = + jiffies + msecs_to_jiffies(500); /* 500ms, larger is better */ +#else + rx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); +#endif + while (time_before(jiffies, rx_timeout)) { + rx_status = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); + prxq = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, + MTL_Q_RQDR_PRXQ_LEN); + rxqsts = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, + MTL_Q_RQDR_RXQSTS_LEN); + if ((prxq == 0) && (rxqsts == 0)) + break; + + usleep_range_ex(pdata->pAdapter, 500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int regval, i; + + /* Enable each Rx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_rx_queue *rxq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Enable Rx DMA channel */ + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); + } +#endif + + /* Enable each Rx queue */ + regval = 0; + for (i = 0; i < pdata->rx_q_count; i++) + regval |= (0x02 << (i << 1)); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RQC0R); + +#ifndef DPDK + /* Enable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, MAC_CR_CST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, MAC_CR_ACS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); +#else + /* Enable MAC Rx */ + FXGMAC_IOWRITE_BITS(pdata, MAC_ECR, DCRCC, 1); + + /* Frame is forwarded after stripping CRC to application*/ + if (pdata->expansion.crc_strip_enable) { + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, CST, 1); + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, ACS, 1); + } + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, RE, 1); +#endif +} +static void fxgmac_enable_channel_rx(struct fxgmac_pdata *pdata, + unsigned int queue) +{ + struct fxgmac_channel *channel; + unsigned int regval; + + /* Enable Rx DMA channel */ + channel = pdata->channel_head + queue; + + if (!channel->rx_ring) + return; + regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 1); + writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + /* Enable Rx queue */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RQC0R); + regval |= (0x02 << (queue << 1)); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RQC0R); + + /* Enable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + if (!(regval & ((0x01 << MAC_CR_CST_POS) | (0x01 << MAC_CR_ACS_POS) | + (0x01 << MAC_CR_RE_POS)))) { + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, + MAC_CR_CST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, + MAC_CR_ACS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + } +} + +static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + + /* Disable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, MAC_CR_CST_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, MAC_CR_ACS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Prepare for Rx DMA channel stop */ +#ifndef DPDK + for (i = 0; i < pdata->rx_q_count; i++) + fxgmac_prepare_rx_stop(pdata, i); +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_rx_queue *rxq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + fxgmac_prepare_rx_stop(pdata, i); + } +#endif + + /* Disable each Rx queue */ + writereg(pdata->pAdapter, 0, pdata->mac_regs + MAC_RQC0R); + + /* Disable each Rx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + } +#else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); + } +#endif +} + +static void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, + struct fxgmac_ring *ring) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_desc_data *desc_data; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor + */ + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + +#if !(FXGMAC_DUMMY_TX_DEBUG) + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); +#else + DPRINTK("dummy tx, fxgmac_tx_start_xmit, tail reg=0x%lx, val=%08x\n", + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO) - pdata->mac_regs, + (u32)lower_32_bits(desc_data->dma_desc_addr)); +#endif + if (netif_msg_tx_done(pdata)) + DPRINTK("tx_start_xmit: dump before wr reg, dma base=0x%016llx, reg=0x%08x, tx timer usecs=%u, tx_timer_active=%u\n", + desc_data->dma_desc_addr, + readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), + pdata->tx_usecs, channel->tx_timer_active); + + ring->tx.xmit_more = 0; +} + +static void fxgmac_dev_xmit(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int tso_context, vlan_context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int csum, tso, vlan; + int start_index = ring->cur; + int cur_index = ring->cur; + int i; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); + + pkt_info = &ring->pkt_info; + csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + if (tso && (pkt_info->mss != ring->tx.cur_mss)) + tso_context = 1; + else + tso_context = 0; + + if ((tso_context) && (netif_msg_tx_done(pdata))) { + /* tso is initialized to start... */ + DPRINTK("fxgmac_dev_xmit, tso_%s tso=0x%x, pkt_mss=%d, cur_mss=%d\n", + (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, + ring->tx.cur_mss); + } + + if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) + vlan_context = 1; + else + vlan_context = 0; + + if (vlan && (netif_msg_tx_done(pdata))) + DPRINTK("fxgmac_dev_xmi:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", + pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, + vlan_context); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Create a context descriptor if this is a TSO pkt_info */ + if (tso_context || vlan_context) { + if (tso_context) { + if (netif_msg_tx_done(pdata)) + DPRINTK("xlgamc dev xmit, construct tso context descriptor, mss=%u\n", + pkt_info->mss); + + /* Set the MSS size */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, + TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Indicate this descriptor contains the MSS */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, + TX_CONTEXT_DESC3_TCMSSV_LEN, 1); + + ring->tx.cur_mss = pkt_info->mss; + } + + if (vlan_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + pkt_info->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Set the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, + TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, + TX_CONTEXT_DESC3_VLTV_LEN, 1); + + ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; + } + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + } + + /* Update buffer address (for TSO this is the header) */ + dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) { + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, + TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); + pdata->stats.tx_vlan_packets++; + } + + /* Timestamp enablement check */ + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_PTP_POS, + TX_PACKET_ATTRIBUTES_PTP_LEN)) + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, + TX_NORMAL_DESC2_TTSE_LEN, 1); + + /* Mark it as First Descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FD_POS, + TX_NORMAL_DESC3_FD_LEN, 1); + + /* Mark it as a NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (tso) { + /* Enable TSO */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, + TX_NORMAL_DESC3_TSE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, + TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, + TX_NORMAL_DESC3_TCPHDRLEN_LEN, + pkt_info->tcp_header_len / 4); + + pdata->stats.tx_tso_packets++; + } else { + /* Enable CRC and Pad Insertion */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, + TX_NORMAL_DESC3_CPC_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + + /* Set the total length to be transmitted */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FL_POS, + TX_NORMAL_DESC3_FL_LEN, + pkt_info->length); + } + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, start_index, dma_desc->desc0, + dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); + + if (start_index <= cur_index) + i = cur_index - start_index + 1; + else + i = ring->dma_desc_count - start_index + cur_index; + + for (; i < pkt_info->desc_count; i++) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Update buffer address */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); + + /* Set OWN bit */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + /* Mark it as NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + } + + /* Set LAST bit for the last descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN, 1); + + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_IC_POS, + TX_NORMAL_DESC2_IC_LEN, 1); + + /* Save the Tx info to report back during cleanup */ + desc_data->tx.packets = pkt_info->tx_packets; + desc_data->tx.bytes = pkt_info->tx_bytes; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + dma_desc = desc_data->dma_desc; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", + start_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + if (netif_msg_tx_queued(pdata)) + fxgmac_dump_tx_desc(pdata, ring, start_index, + pkt_info->desc_count, 1); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit about to call tx_start_xmit, ring xmit_more=%d, txq_stopped=%x\n", + ring->tx.xmit_more, + netif_xmit_stopped(netdev_get_tx_queue( + pdata->netdev, channel->queue_index))); +#else /* ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,165))*/ + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit about to call tx_start_xmit, pkt xmit_more=%d, txq_stopped=%x\n", + pkt_info->skb->xmit_more, + netif_xmit_stopped(netdev_get_tx_queue( + pdata->netdev, channel->queue_index))); +#endif + + /* Make sure ownership is written to the descriptor */ + smp_wmb(); + + ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + fxgmac_tx_start_xmit(channel, ring); + + /* yzhang for reduce debug output */ + if (netif_msg_tx_done(pdata)) { + DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", + channel->name, start_index & (ring->dma_desc_count - 1), + (ring->cur - 1) & (ring->dma_desc_count - 1)); + } +} + +static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, + struct fxgmac_dma_desc *dma_desc) +{ + u64 nsec; + + nsec = le32_to_cpu(dma_desc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(dma_desc->desc0); + if (nsec != 0xffffffffffffffffULL) { + pkt_info->rx_tstamp = nsec; + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); + } +} + +static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + dma_desc->desc0 = 0; + dma_desc->desc1 = 0; + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_tx_desc_init(struct fxgmac_channel *channel) +{ + struct fxgmac_ring *ring = channel->tx_ring; + struct fxgmac_desc_data *desc_data; + int start_index = ring->cur; + unsigned int i; + start_index = start_index; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + fxgmac_tx_desc_reset(desc_data); + } + + writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(channel->pdata->pAdapter, + upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); + writereg(channel->pdata->pAdapter, + lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); +} + +static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->rx.buf.dma_base)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->rx.buf.dma_base)); + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_INTE_POS, + RX_NORMAL_DESC3_INTE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF2V_POS, + RX_NORMAL_DESC3_BUF2V_LEN, 0); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF1V_POS, + RX_NORMAL_DESC3_BUF1V_LEN, 1); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN, 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_rx_desc_init(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + unsigned int start_index = ring->cur; + struct fxgmac_desc_data *desc_data; + unsigned int i; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + fxgmac_rx_desc_reset(pdata, desc_data, i); + } + + /* Update the total number of Rx descriptors */ + writereg(pdata->pAdapter, ring->dma_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); + + /* Update the Rx Descriptor Tail Pointer */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, start_index + ring->dma_desc_count - 1); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static int fxgmac_is_context_desc(struct fxgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ + return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN); +} + +static int fxgmac_is_last_desc(struct fxgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share LD bit, so check TDES3.LD bit */ + return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN); +} + +static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Clear MAC flow control */ + max_q_count = FXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int fxgmac_enable_tx_flow_control(struct fxgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Set MAC flow control */ + max_q_count = FXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + + /* Enable transmit flow control */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, 1); + /* Set pause time */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, + MAC_Q0TFCR_PT_LEN, 0xffff); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int fxgmac_disable_rx_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RFCR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int fxgmac_enable_rx_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RFCR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int fxgmac_config_tx_flow_control(struct fxgmac_pdata *pdata) +{ + if (pdata->tx_pause) + fxgmac_enable_tx_flow_control(pdata); + else + fxgmac_disable_tx_flow_control(pdata); + + return 0; +} + +static int fxgmac_config_rx_flow_control(struct fxgmac_pdata *pdata) +{ + if (pdata->rx_pause) + fxgmac_enable_rx_flow_control(pdata); + else + fxgmac_disable_rx_flow_control(pdata); + + return 0; +} + +static int fxgmac_config_rx_coalesce(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RIWT)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, + DMA_CH_RIWT_RWT_LEN, + pdata->rx_riwt); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RIWT)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RIWT, RWT, pdata->rx_riwt); + } +#endif + + return 0; +} + +static void fxgmac_config_rx_fep_disable(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + /* 1:enable the rx queue forward packet with error + * status(crc error, gmii_er, watch dog timeout.or overflow) + */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, + MTL_Q_RQOMR_FEP_LEN, + MTL_FEP_ENABLE); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static void fxgmac_config_rx_fup_enable(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, + MTL_Q_RQOMR_FUP_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static int fxgmac_config_tx_coalesce(struct fxgmac_pdata *pdata) +{ + pdata = pdata; + return 0; +} + +static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, + DMA_CH_RCR_RBSZ_LEN, + pdata->rx_buf_size); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + + rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rxq->buf_size = (rxq->buf_size + FXGMAC_RX_BUF_ALIGN - 1) & + ~(FXGMAC_RX_BUF_ALIGN - 1); + + if (rxq->buf_size > pdata->rx_buf_size) + pdata->rx_buf_size = rxq->buf_size; + + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, rxq->buf_size); + } +#endif +} + +static void fxgmac_config_tso_mode(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + if (pdata->hw_feat.tso) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, + DMA_CH_TCR_TSE_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, pdata->tx_pbl); + } +#endif +} + +static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + +#ifndef DPDK + struct fxgmac_channel *channel; + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, + DMA_CH_CR_SPH_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + } +#else + struct fxgmac_rx_queue *rxq; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, pdata->rx_pbl); + } +#endif + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ECR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_ECR_HDSMS_POS, + MAC_ECR_HDSMS_LEN, FXGMAC_SPH_HDSMS_SIZE); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); +} + +static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, + unsigned int usec) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input usec value to the watchdog timer value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( usec * ( system_clock_mhz / 10^6) / 256 + */ + ret = (usec * (rate / 1000000)) / 256; + + return ret; +} + +static unsigned int fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, + unsigned int riwt) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input watchdog timer value to the usec value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( riwt * 256) / ( system_clock_mhz / 10^6) + */ + ret = (riwt * 256) / (rate / 1000000); + + return ret; +} + +static int fxgmac_config_rx_threshold(struct fxgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, + MTL_Q_RQOMR_RTC_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static void fxgmac_config_mtl_mode(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + /* Set Tx to weighted round robin scheduling algorithm */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MTL_OMR); + regval = FXGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, + MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MTL_OMR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->tx_q_count /*hw_feat.tc_cnt*/; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, + MTL_TC_QWR_QW_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + } + + /* Set Rx to strict priority algorithm */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MTL_OMR); + regval = FXGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, MTL_OMR_RAA_LEN, + MTL_RAA_SP); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MTL_OMR); +} + +static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) +{ + unsigned int ppq, ppq_extra, prio, prio_queues; + unsigned int queue; + unsigned int reg, regval; + unsigned int mask; + unsigned int i, j; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + queue = 0; + DPRINTK("need to map TXq(%u) to TC\n", queue); + + /* Map the 8 VLAN priority values to available MTL Rx queues */ + prio_queues = + min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, pdata->rx_q_count); + ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; + ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; + + reg = MAC_RQC2R; + regval = 0; + for (i = 0, prio = 0; i < prio_queues;) { + mask = 0; + for (j = 0; j < ppq; j++) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + if (i < ppq_extra) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); + + if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) + continue; + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + reg += MAC_RQC2_INC; + regval = 0; + } + + /* Configure one to one, MTL Rx queue to DMA Rx channel mapping + * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 + */ + reg = MTL_RQDCM0R; + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | + MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); + + if (pdata->rss) { + /* in version later 0617, need to enable DA-based DMA Channel Selection to let RSS work, + * ie, bit4,12,20,28 for Q0,1,2,3 individual + */ + regval |= (MTL_RQDCM0R_Q0DDMACH | MTL_RQDCM0R_Q1DDMACH | + MTL_RQDCM0R_Q2DDMACH | MTL_RQDCM0R_Q3DDMACH); + } + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MTL_RQDCM_INC; + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | + MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); +} + +static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, + unsigned int queue_count) +{ + unsigned int q_fifo_size; + unsigned int p_fifo; + + /* Calculate the configured fifo size */ + q_fifo_size = 1 << (fifo_size + 7); + + /* The configured value may not be the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, FXGMAC_MAX_FIFO, q_fifo_size); + + q_fifo_size = q_fifo_size / queue_count; + + /* Each increment in the queue fifo size represents 256 bytes of + * fifo, with 0 representing 256 bytes. Distribute the fifo equally + * between the queues. + */ + p_fifo = q_fifo_size / 256; + if (p_fifo) + p_fifo--; + + return p_fifo; +} + +static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, + MTL_Q_TQOMR_TQS_LEN, fifo_size); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); +} + +static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, + pdata->rx_q_count); + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, + MTL_Q_RQOMR_RQS_LEN, fifo_size); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); +} + +static void fxgmac_config_flow_control_threshold(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + /* Activate flow control when less than 6k left in fifo */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFA_POS, + MTL_Q_RQOMR_RFA_LEN, 6); + /* De-activate flow control when more than 10k left in fifo */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFD_POS, + MTL_Q_RQOMR_RFD_LEN, 10); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static int fxgmac_config_tx_threshold(struct fxgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, + MTL_Q_TQOMR_TTC_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int fxgmac_config_rsf_mode(struct fxgmac_pdata *pdata, unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, + MTL_Q_RQOMR_RSF_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static int fxgmac_config_tsf_mode(struct fxgmac_pdata *pdata, unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, + MTL_Q_TQOMR_TSF_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int fxgmac_config_osp_mode(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, + DMA_CH_TCR_OSP_LEN, + pdata->tx_osp_mode); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + /* Force DMA to operate on second packet before closing descriptors + * of first packet + */ + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } +#endif + return 0; +} + +static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, + DMA_CH_CR_PBLX8_LEN, pdata->pblx8); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, pdata->pblx8); + } +#endif + + return 0; +} + +static int fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); + regval = FXGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN); + return regval; +} + +static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN, pdata->tx_pbl); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, pdata->tx_pbl); + } +#endif + + return 0; +} + +static int fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); + regval = FXGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN); + return regval; +} + +static int fxgmac_config_rx_pbl_val(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN, pdata->rx_pbl); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, pdata->rx_pbl); + } +#endif + + return 0; +} + +static u64 fxgmac_mmc_read(struct fxgmac_pdata *pdata, unsigned int reg_lo) +{ + /* bool read_hi; */ + u64 val; + val = (u64)readreg(pdata->pAdapter, pdata->mac_regs + reg_lo); + + return val; +} + +static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) +{ + unsigned int mmc_isr = + readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR); + struct fxgmac_stats *stats = &pdata->stats; + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_GB_POS, + MMC_TISR_TXOCTETCOUNT_GB_LEN)) + stats->txoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_GB_POS, + MMC_TISR_TXFRAMECOUNT_GB_LEN)) + stats->txframecount_gb += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_G_POS, + MMC_TISR_TXBROADCASTFRAMES_G_LEN)) + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_G_POS, + MMC_TISR_TXMULTICASTFRAMES_G_LEN)) + stats->txmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX64OCTETS_GB_POS, + MMC_TISR_TX64OCTETS_GB_LEN)) + stats->tx64octets_gb += + fxgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX65TO127OCTETS_GB_POS, + MMC_TISR_TX65TO127OCTETS_GB_LEN)) + stats->tx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX128TO255OCTETS_GB_POS, + MMC_TISR_TX128TO255OCTETS_GB_LEN)) + stats->tx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX256TO511OCTETS_GB_POS, + MMC_TISR_TX256TO511OCTETS_GB_LEN)) + stats->tx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX512TO1023OCTETS_GB_POS, + MMC_TISR_TX512TO1023OCTETS_GB_LEN)) + stats->tx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX1024TOMAXOCTETS_GB_POS, + MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) + stats->tx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNICASTFRAMES_GB_POS, + MMC_TISR_TXUNICASTFRAMES_GB_LEN)) + stats->txunicastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_GB_POS, + MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) + stats->txmulticastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_GB_POS, + MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNDERFLOWERROR_POS, + MMC_TISR_TXUNDERFLOWERROR_LEN)) + stats->txunderflowerror += + fxgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXSINGLECOLLISION_G_POS, + MMC_TISR_TXSINGLECOLLISION_G_LEN)) + stats->txsinglecollision_g += + fxgmac_mmc_read(pdata, MMC_TXSINGLECOLLISION_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTIPLECOLLISION_G_POS, + MMC_TISR_TXMULTIPLECOLLISION_G_LEN)) + stats->txmultiplecollision_g += + fxgmac_mmc_read(pdata, MMC_TXMULTIPLECOLLISION_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXDEFERREDFRAMES_POS, + MMC_TISR_TXDEFERREDFRAMES_LEN)) + stats->txdeferredframes += + fxgmac_mmc_read(pdata, MMC_TXDEFERREDFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXLATECOLLISIONFRAMES_POS, + MMC_TISR_TXLATECOLLISIONFRAMES_LEN)) + stats->txlatecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXLATECOLLISIONFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_POS, + MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_LEN)) + stats->txexcessivecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVECOLLSIONFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXCARRIERERRORFRAMES_POS, + MMC_TISR_TXCARRIERERRORFRAMES_LEN)) + stats->txcarriererrorframes += + fxgmac_mmc_read(pdata, MMC_TXCARRIERERRORFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_G_POS, + MMC_TISR_TXOCTETCOUNT_G_LEN)) + stats->txoctetcount_g += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_G_POS, + MMC_TISR_TXFRAMECOUNT_G_LEN)) + stats->txframecount_g += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_POS, + MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_LEN)) + stats->txexcessivedeferralerror += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVEDEFERRALERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXPAUSEFRAMES_POS, + MMC_TISR_TXPAUSEFRAMES_LEN)) + stats->txpauseframes += + fxgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXVLANFRAMES_G_POS, + MMC_TISR_TXVLANFRAMES_G_LEN)) + stats->txvlanframes_g += + fxgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOVERSIZE_G_POS, + MMC_TISR_TXOVERSIZE_G_LEN)) + stats->txoversize_g += + fxgmac_mmc_read(pdata, MMC_TXOVERSIZEFRAMES); +} + +static void fxgmac_rx_mmc_int(struct fxgmac_pdata *pdata) +{ + unsigned int mmc_isr = + readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR); + struct fxgmac_stats *stats = &pdata->stats; + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFRAMECOUNT_GB_POS, + MMC_RISR_RXFRAMECOUNT_GB_LEN)) + stats->rxframecount_gb += + fxgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_GB_POS, + MMC_RISR_RXOCTETCOUNT_GB_LEN)) + stats->rxoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_G_POS, + MMC_RISR_RXOCTETCOUNT_G_LEN)) + stats->rxoctetcount_g += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXBROADCASTFRAMES_G_POS, + MMC_RISR_RXBROADCASTFRAMES_G_LEN)) + stats->rxbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXMULTICASTFRAMES_G_POS, + MMC_RISR_RXMULTICASTFRAMES_G_LEN)) + stats->rxmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXCRCERROR_POS, + MMC_RISR_RXCRCERROR_LEN)) + stats->rxcrcerror += fxgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXALIGNERROR_POS, + MMC_RISR_RXALIGNERROR_LEN)) + stats->rxalignerror += fxgmac_mmc_read(pdata, MMC_RXALIGNERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXRUNTERROR_POS, + MMC_RISR_RXRUNTERROR_LEN)) + stats->rxrunterror += fxgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXJABBERERROR_POS, + MMC_RISR_RXJABBERERROR_LEN)) + stats->rxjabbererror += + fxgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNDERSIZE_G_POS, + MMC_RISR_RXUNDERSIZE_G_LEN)) + stats->rxundersize_g += + fxgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOVERSIZE_G_POS, + MMC_RISR_RXOVERSIZE_G_LEN)) + stats->rxoversize_g += fxgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX64OCTETS_GB_POS, + MMC_RISR_RX64OCTETS_GB_LEN)) + stats->rx64octets_gb += + fxgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX65TO127OCTETS_GB_POS, + MMC_RISR_RX65TO127OCTETS_GB_LEN)) + stats->rx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX128TO255OCTETS_GB_POS, + MMC_RISR_RX128TO255OCTETS_GB_LEN)) + stats->rx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX256TO511OCTETS_GB_POS, + MMC_RISR_RX256TO511OCTETS_GB_LEN)) + stats->rx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX512TO1023OCTETS_GB_POS, + MMC_RISR_RX512TO1023OCTETS_GB_LEN)) + stats->rx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX1024TOMAXOCTETS_GB_POS, + MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) + stats->rx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNICASTFRAMES_G_POS, + MMC_RISR_RXUNICASTFRAMES_G_LEN)) + stats->rxunicastframes_g += + fxgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXLENGTHERROR_POS, + MMC_RISR_RXLENGTHERROR_LEN)) + stats->rxlengtherror += + fxgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOUTOFRANGETYPE_POS, + MMC_RISR_RXOUTOFRANGETYPE_LEN)) + stats->rxoutofrangetype += + fxgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXPAUSEFRAMES_POS, + MMC_RISR_RXPAUSEFRAMES_LEN)) + stats->rxpauseframes += + fxgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFIFOOVERFLOW_POS, + MMC_RISR_RXFIFOOVERFLOW_LEN)) + stats->rxfifooverflow += + fxgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXVLANFRAMES_GB_POS, + MMC_RISR_RXVLANFRAMES_GB_LEN)) + stats->rxvlanframes_gb += + fxgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXWATCHDOGERROR_POS, + MMC_RISR_RXWATCHDOGERROR_LEN)) + stats->rxwatchdogerror += + fxgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXERRORFRAMES_POS, + MMC_RISR_RXERRORFRAMES_LEN)) + stats->rxreceiveerrorframe += + fxgmac_mmc_read(pdata, MMC_RXRECEIVEERRORFRAME); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXERRORCONTROLFRAMES_POS, + MMC_RISR_RXERRORCONTROLFRAMES_LEN)) + stats->rxcontrolframe_g += + fxgmac_mmc_read(pdata, MMC_RXCONTROLFRAME_G); +} + +static void fxgmac_read_mmc_stats(struct fxgmac_pdata *pdata) +{ + struct fxgmac_stats *stats = &pdata->stats; + u32 regval; + + /* Freeze counters */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); + + stats->txoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + stats->txframecount_gb += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + stats->txmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + stats->tx64octets_gb += fxgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + stats->tx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + stats->tx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + stats->tx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + stats->tx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + stats->tx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + stats->txunicastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + stats->txmulticastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + stats->txunderflowerror += + fxgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + stats->txsinglecollision_g += + fxgmac_mmc_read(pdata, MMC_TXSINGLECOLLISION_G); + + stats->txmultiplecollision_g += + fxgmac_mmc_read(pdata, MMC_TXMULTIPLECOLLISION_G); + + stats->txdeferredframes += fxgmac_mmc_read(pdata, MMC_TXDEFERREDFRAMES); + + stats->txlatecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXLATECOLLISIONFRAMES); + + stats->txexcessivecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVECOLLSIONFRAMES); + + stats->txcarriererrorframes += + fxgmac_mmc_read(pdata, MMC_TXCARRIERERRORFRAMES); + + stats->txoctetcount_g += fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + stats->txframecount_g += fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + stats->txexcessivedeferralerror += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVEDEFERRALERROR); + + stats->txpauseframes += fxgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + stats->txvlanframes_g += fxgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + stats->txoversize_g += fxgmac_mmc_read(pdata, MMC_TXOVERSIZEFRAMES); + + stats->rxframecount_gb += + fxgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + stats->rxoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + stats->rxoctetcount_g += fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + stats->rxbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + stats->rxmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + stats->rxcrcerror += fxgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + stats->rxalignerror += fxgmac_mmc_read(pdata, MMC_RXALIGNERROR); + + stats->rxrunterror += fxgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + stats->rxjabbererror += fxgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + stats->rxundersize_g += fxgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + stats->rxoversize_g += fxgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + stats->rx64octets_gb += fxgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + stats->rx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + stats->rx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + stats->rx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + stats->rx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + stats->rx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + stats->rxunicastframes_g += + fxgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + stats->rxlengtherror += fxgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + stats->rxoutofrangetype += + fxgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + stats->rxpauseframes += fxgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + stats->rxfifooverflow += fxgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + stats->rxvlanframes_gb += + fxgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + stats->rxwatchdogerror += fxgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + stats->rxreceiveerrorframe += + fxgmac_mmc_read(pdata, MMC_RXRECEIVEERRORFRAME); + + stats->rxcontrolframe_g += fxgmac_mmc_read(pdata, MMC_RXCONTROLFRAME_G); + + /* Un-freeze counters */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); +} + +static void fxgmac_config_mmc(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + /* Set counters to reset on read */ + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, MMC_CR_ROR_LEN, 1); + /* Reset the counters */ + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); + +#if defined(FUXI_MISC_INT_HANDLE_FEATURE_EN) && FUXI_MISC_INT_HANDLE_FEATURE_EN + writereg(pdata->pAdapter, 0xffffffff, + pdata->mac_regs + MMC_IPCRXINTMASK); +#endif +} + +static int fxgmac_write_rss_reg(struct fxgmac_pdata *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + int ret = 0; + type = type; + + writereg(pdata->pAdapter, val, (pdata->base_mem + index)); + + return ret; +} + +static u32 fxgmac_read_rss_options(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Get the RSS options bits */ + regval = FXGMAC_GET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN); + + return regval; +} + +static int fxgmac_write_rss_options(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Set the RSS options */ + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + + return 0; +} + +#if !defined(DPDK) +static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + u32 *key = (u32 *)key_buf; + + while (key_regs--) { + (*key) = cpu_to_be32(readreg( + pdata->pAdapter, + pdata->base_mem + (MGMT_RSS_KEY0 + + key_regs * MGMT_RSS_KEY_REG_INC))); + + DBGPRINT( + MP_LOUD, + ("fxgmac_read_rss_hash_key: idx=%d, reg=%x, key=0x%08x\n", + key_regs, + MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, + (u32)(*key))); + key++; + } + + return 0; +} +#endif + +static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + u32 *key = (u32 *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_HASH_KEY_TYPE, + MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, + cpu_to_be32(*key)); + if (ret) + return ret; + key++; + } + + return 0; +} + +static int fxgmac_write_rss_lookup_table(struct fxgmac_pdata *pdata) +{ + unsigned int i, j; + u32 regval = 0; + int ret; + + for (i = 0, j = 0; i < ARRAY_SIZE(pdata->rss_table); i++, j++) { + if (j < MGMT_RSS_IDT_ENTRY_PER_REG) { + regval |= + ((pdata->rss_table[i] & MGMT_RSS_IDT_ENTRY_MASK) + << (j * 2)); + } else { + ret = fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_LOOKUP_TABLE_TYPE, + MGMT_RSS_IDT + (i / MGMT_RSS_IDT_ENTRY_PER_REG - + 1) * MGMT_RSS_IDT_REG_INC, + regval); + if (ret) + return ret; + + regval = pdata->rss_table[i]; + j = 0; + } + } + + if (j == MGMT_RSS_IDT_ENTRY_PER_REG) { + /* last IDT */ + fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_LOOKUP_TABLE_TYPE, + MGMT_RSS_IDT + (i / MGMT_RSS_IDT_ENTRY_PER_REG - 1) * + MGMT_RSS_IDT_REG_INC, + regval); + } + + return 0; +} + +static int fxgmac_set_rss_hash_key(struct fxgmac_pdata *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return fxgmac_write_rss_hash_key(pdata); +} + +static int fxgmac_set_rss_lookup_table(struct fxgmac_pdata *pdata, + const u32 *table) +{ + unsigned int i; + u32 tval; + +#if FXGMAC_MSIX_CH0RXDIS_EN + DPRINTK("Set_rss_table, rss ctrl eth=0x%08x\n", 0); + + return 0; +#endif + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + tval = table[i]; + pdata->rss_table[i] = FXGMAC_SET_REG_BITS(pdata->rss_table[i], + MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + tval); + } + + return fxgmac_write_rss_lookup_table(pdata); +} + +static u32 log2ex(u32 value) +{ + u32 i = 31; + while (i > 0) { + if (value & 0x80000000) { + break; + } + value <<= 1; + i--; + } + return i; +} + +static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) +{ + u32 regval; + u32 size = 0; + + int ret; + + if (!pdata->hw_feat.rss) { + return -EOPNOTSUPP; + } + + /* Program the hash key */ + ret = fxgmac_write_rss_hash_key(pdata); + if (ret) { + return ret; + } + + /* Program the lookup table */ + ret = fxgmac_write_rss_lookup_table(pdata); + if (ret) { + return ret; + } + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Set RSS IDT table size */ + size = log2ex(FXGMAC_RSS_MAX_TABLE_SIZE) - 1; + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_TBL_SIZE_POS, + MGMT_RSS_CTRL_TBL_SIZE_LEN, size); + +#if FXGMAC_MSIX_CH0RXDIS_EN + /* set default cpu id to 1 */ + regval = FXGMAC_SET_REG_BITS(regval, 8, 2, 1); +#endif + /* Enable RSS */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 1); + + /* Set the RSS options */ + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + DPRINTK("enable_rss callout, rss ctrl reg=0x%08x\n", regval); + + return 0; +} + +static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + +#if FXGMAC_MSIX_CH0RXDIS_EN + DPRINTK("Disable_rss, rss ctrl eth=0x%08x\n", 0); + + return 0; +#endif + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 0); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + DPRINTK("disable_rss, rss ctrl reg=0x%08x\n", regval); + + return 0; +} + +static void fxgmac_config_rss(struct fxgmac_pdata *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->rss) + ret = fxgmac_enable_rss(pdata); + else + ret = fxgmac_disable_rss(pdata); + + if (ret) { + DBGPRINT(MP_ERROR, + ("fxgmac_config_rss: error configuring RSS\n")); + } +} + +static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) +{ + unsigned int regval, ipval = 0; + + /* enable or disable ARP offload engine. */ + if (!pdata->hw_feat.aoe) { + netdev_err( + pdata->netdev, + "error update ip addr - arp offload not supported.\n"); + return; + } + + if (ip_addr) { + ipval = (ip_addr[0] << 24) | (ip_addr[1] << 16) | + (ip_addr[2] << 8) | (ip_addr[3] << 0); + DPRINTK("%s, covert IP dotted-addr %s to binary 0x%08x ok.\n", + __FUNCTION__, ip_addr, cpu_to_be32(ipval)); + } else { + /* get ipv4 addr from net device */ + ipval = fxgmac_get_netdev_ip4addr(pdata); + DPRINTK("%s, Get net device binary IP ok, 0x%08x\n", + __FUNCTION__, cpu_to_be32(ipval)); + + ipval = cpu_to_be32(ipval); + } + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ARP_PROTO_ADDR); + if (regval != /*cpu_to_be32*/ (ipval)) { + writereg(pdata->pAdapter, /*cpu_to_be32*/ (ipval), + pdata->mac_regs + MAC_ARP_PROTO_ADDR); + DPRINTK("%s, update arp ipaddr reg from 0x%08x to 0x%08x\n", + __FUNCTION__, regval, /*cpu_to_be32*/ (ipval)); + } +} + +static int fxgmac_enable_arp_offload(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.aoe) + return -EOPNOTSUPP; + + /* Enable arpoffload */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ARPEN_POS, MAC_CR_ARPEN_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_disable_arp_offload(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.aoe) + return -EOPNOTSUPP; + /* disable arpoffload */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ARPEN_POS, MAC_CR_ARPEN_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +/* this function config register for NS offload function + * parameters: + * index - 0~1, index to NS look up table. one entry of the lut is like this |remote|solicited|target0|target1| + * remote_addr - ipv6 addr where fuxi gets the NS solicitation pkt(request). in common, it is 0 to match any remote machine. + * solicited_addr - the solicited node multicast group address which fuxi computes and joins. + * target_addr1 - it is the target address in NS solicitation pkt. + * target_addr2 - second target address, any address (with last 6B same with target address?). + */ +static int fxgmac_set_ns_offload(struct fxgmac_pdata *pdata, unsigned int index, + unsigned char *remote_addr, + unsigned char *solicited_addr, + unsigned char *target_addr1, + unsigned char *target_addr2, + unsigned char *mac_addr) +{ + u32 regval; + u32 Address[4], mac_addr_hi, mac_addr_lo; + u8 i, remote_not_zero = 0; + + regval = readreg(pdata->pAdapter, pdata->base_mem + NS_TPID_PRO); + regval = FXGMAC_SET_REG_BITS(regval, NS_TPID_PRO_STPID_POS, + NS_TPID_PRO_STPID_LEN, 0X8100); + regval = FXGMAC_SET_REG_BITS(regval, NS_TPID_PRO_CTPID_POS, + NS_TPID_PRO_CTPID_LEN, 0X9100); + writereg(pdata->pAdapter, regval, pdata->base_mem + NS_TPID_PRO); + regval = readreg(pdata->pAdapter, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_DST_CMP_TYPE_POS, + NS_LUT_DST_CMP_TYPE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_DST_IGNORED_POS, + NS_LUT_DST_IGNORED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_TARGET_ISANY_POS, + NS_LUT_TARGET_ISANY_LEN, 0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + + /* AR */ + for (i = 0; i < 16 / 4; i++) { + Address[i] = (remote_addr[i * 4 + 0] << 24) | + (remote_addr[i * 4 + 1] << 16) | + (remote_addr[i * 4 + 2] << 8) | + (remote_addr[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_ROMOTE0 + + 4 * i); + if (Address[i]) { + remote_not_zero = 1; + } + Address[i] = (target_addr1[i * 4 + 0] << 24) | + (target_addr1[i * 4 + 1] << 16) | + (target_addr1[i * 4 + 2] << 8) | + (target_addr1[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_TARGET0 + + 4 * i); + Address[i] = (solicited_addr[i * 4 + 0] << 24) | + (solicited_addr[i * 4 + 1] << 16) | + (solicited_addr[i * 4 + 2] << 8) | + (solicited_addr[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_SOLICITED0 + + 4 * i); + Address[i] = (target_addr2[i * 4 + 0] << 24) | + (target_addr2[i * 4 + 1] << 16) | + (target_addr2[i * 4 + 2] << 8) | + (target_addr2[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X10 * index + NS_LUT_TARGET4 + + 4 * i); + } + mac_addr_hi = (mac_addr[0] << 24) | (mac_addr[1] << 16) | + (mac_addr[2] << 8) | (mac_addr[3] << 0); + mac_addr_lo = (mac_addr[4] << 8) | (mac_addr[5] << 0); + + writereg(pdata->pAdapter, mac_addr_hi, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR); + if (remote_not_zero == 0) { + regval = readreg(pdata->pAdapter, pdata->base_mem + + 0X38 * index + + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_MAC_ADDR_LOW_POS, + NS_LUT_MAC_ADDR_LOW_LEN, + mac_addr_lo); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + } else { + regval = readreg(pdata->pAdapter, pdata->base_mem + + 0X38 * index + + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_MAC_ADDR_LOW_POS, + NS_LUT_MAC_ADDR_LOW_LEN, + mac_addr_lo); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + } + return 0; +} + +static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, + unsigned int param) +{ + struct net_device *netdev = pdata->netdev; + unsigned char addr_buf[5][16]; + + unsigned char *remote_addr = (unsigned char *)&addr_buf[0][0]; + unsigned char *solicited_addr = (unsigned char *)&addr_buf[1][0]; + unsigned char *target_addr1 = (unsigned char *)&addr_buf[2][0]; + unsigned char *mac_addr = (unsigned char *)&addr_buf[4][0]; + + /* get ipv6 addr from net device */ + if (NULL == fxgmac_get_netdev_ip6addr(pdata, target_addr1, + solicited_addr, + (FXGMAC_NS_IFA_LOCAL_LINK | + FXGMAC_NS_IFA_GLOBAL_UNICAST) & + param)) { + DPRINTK("%s, get net device ipv6 addr with err and ignore NS offload.\n", + __FUNCTION__); + + return; + } + + DPRINTK("%s, Get net device binary IPv6 ok, local-link=%pI6\n", + __FUNCTION__, target_addr1); + DPRINTK("%s, Get net device binary IPv6 ok, solicited =%pI6\n", + __FUNCTION__, solicited_addr); + + memcpy(mac_addr, netdev->dev_addr, netdev->addr_len); + DPRINTK("%s, Get net device MAC addr ok, ns_tab idx=%d, %02x:%02x:%02x:%02x:%02x:%02x\n", + __FUNCTION__, pdata->expansion.ns_offload_tab_idx, mac_addr[0], + mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], + mac_addr[5]); + + memset(remote_addr, 0, 16); + fxgmac_set_ns_offload(pdata, pdata->expansion.ns_offload_tab_idx++, + remote_addr, solicited_addr, target_addr1, + target_addr1, mac_addr); + if (pdata->expansion.ns_offload_tab_idx >= 2) + pdata->expansion.ns_offload_tab_idx = 0; +} + +static int fxgmac_enable_ns_offload(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0X00000011, pdata->base_mem + NS_OF_GLB_CTL); + return 0; +} + +static int fxgmac_disable_ns_offload(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0X00000000, pdata->base_mem + NS_OF_GLB_CTL); + return 0; +} + +static int fxgmac_check_wake_pattern_fifo_pointer(struct fxgmac_pdata *pdata) +{ + u32 regval; + int ret = 0; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_GET_REG_BITS(regval, MAC_PMT_STA_RWKPTR_POS, + MAC_PMT_STA_RWKPTR_LEN); + if (regval != 0) { + DPRINTK("Remote fifo pointer is not 0\n"); + ret = -EINVAL; + } + return ret; +} + +static int fxgmac_set_wake_pattern_mask(struct fxgmac_pdata *pdata, + u32 filter_index, u8 register_index, + u32 Data) +{ + const u16 address_offset[16][3] = { + { 0x1020, 0x1024, 0x1028 }, { 0x102c, 0x1030, 0x1034 }, + { 0x1038, 0x103c, 0x1040 }, { 0x1044, 0x1050, 0x1054 }, + { 0x1058, 0x105c, 0x1060 }, { 0x1064, 0x1068, 0x106c }, + { 0x1070, 0x1074, 0x1078 }, { 0x107c, 0x1080, 0x1084 }, + { 0x1088, 0x108c, 0x1090 }, { 0x1134, 0x113c, 0x1140 }, + { 0x1208, 0x1200, 0x1204 }, { 0x1218, 0x1210, 0x1214 }, + { 0x1228, 0x1220, 0x1224 }, { 0x1238, 0x1230, 0x1234 }, + { 0x1248, 0x1240, 0x1244 }, { 0x1258, 0x1250, 0x1254 }, + }; + if (filter_index > 15 || register_index > 2) { + DbgPrintF( + MP_TRACE, + "%s - Remote mask pointer is over range, filter_index:%d, register_index:0x%x\n", + __FUNCTION__, filter_index, register_index); + return -1; + } + writereg(pdata->pAdapter, Data, + pdata->base_mem + + address_offset[filter_index][register_index]); + return 0; +} + +static u16 wol_crc16(u8 *pucframe, u16 uslen) +{ + int i; + + union type16 { + u16 raw; + struct { + u16 bit_0 : 1; + u16 bit_1 : 1; + u16 bit_2 : 1; + u16 bit_3 : 1; + u16 bit_4 : 1; + u16 bit_5 : 1; + u16 bit_6 : 1; + u16 bit_7 : 1; + u16 bit_8 : 1; + u16 bit_9 : 1; + u16 bit_10 : 1; + u16 bit_11 : 1; + u16 bit_12 : 1; + u16 bit_13 : 1; + u16 bit_14 : 1; + u16 bit_15 : 1; + } bits; + }; + + union type8 { + u16 raw; + + struct { + u16 bit_0 : 1; + u16 bit_1 : 1; + u16 bit_2 : 1; + u16 bit_3 : 1; + u16 bit_4 : 1; + u16 bit_5 : 1; + u16 bit_6 : 1; + u16 bit_7 : 1; + } bits; + }; + + union type16 crc, crc_comb; + union type8 next_crc, rrpe_data; + next_crc.raw = 0; + crc.raw = 0xffff; + for (i = 0; i < uslen; i++) { + rrpe_data.raw = pucframe[i]; + next_crc.bits.bit_0 = crc.bits.bit_15 ^ rrpe_data.bits.bit_0; + next_crc.bits.bit_1 = crc.bits.bit_14 ^ next_crc.bits.bit_0 ^ + rrpe_data.bits.bit_1; + next_crc.bits.bit_2 = crc.bits.bit_13 ^ next_crc.bits.bit_1 ^ + rrpe_data.bits.bit_2; + next_crc.bits.bit_3 = crc.bits.bit_12 ^ next_crc.bits.bit_2 ^ + rrpe_data.bits.bit_3; + next_crc.bits.bit_4 = crc.bits.bit_11 ^ next_crc.bits.bit_3 ^ + rrpe_data.bits.bit_4; + next_crc.bits.bit_5 = crc.bits.bit_10 ^ next_crc.bits.bit_4 ^ + rrpe_data.bits.bit_5; + next_crc.bits.bit_6 = crc.bits.bit_9 ^ next_crc.bits.bit_5 ^ + rrpe_data.bits.bit_6; + next_crc.bits.bit_7 = crc.bits.bit_8 ^ next_crc.bits.bit_6 ^ + rrpe_data.bits.bit_7; + + crc_comb.bits.bit_15 = crc.bits.bit_7 ^ next_crc.bits.bit_7; + crc_comb.bits.bit_14 = crc.bits.bit_6; + crc_comb.bits.bit_13 = crc.bits.bit_5; + crc_comb.bits.bit_12 = crc.bits.bit_4; + crc_comb.bits.bit_11 = crc.bits.bit_3; + crc_comb.bits.bit_10 = crc.bits.bit_2; + crc_comb.bits.bit_9 = crc.bits.bit_1 ^ next_crc.bits.bit_0; + crc_comb.bits.bit_8 = crc.bits.bit_0 ^ next_crc.bits.bit_1; + crc_comb.bits.bit_7 = next_crc.bits.bit_0 ^ next_crc.bits.bit_2; + crc_comb.bits.bit_6 = next_crc.bits.bit_1 ^ next_crc.bits.bit_3; + crc_comb.bits.bit_5 = next_crc.bits.bit_2 ^ next_crc.bits.bit_4; + crc_comb.bits.bit_4 = next_crc.bits.bit_3 ^ next_crc.bits.bit_5; + crc_comb.bits.bit_3 = next_crc.bits.bit_4 ^ next_crc.bits.bit_6; + crc_comb.bits.bit_2 = next_crc.bits.bit_5 ^ next_crc.bits.bit_7; + crc_comb.bits.bit_1 = next_crc.bits.bit_6; + crc_comb.bits.bit_0 = next_crc.bits.bit_7; + crc.raw = crc_comb.raw; + } + return crc.raw; +} + +static int fxgmac_set_wake_pattern(struct fxgmac_pdata *pdata, + struct wol_bitmap_pattern *wol_pattern, + u32 pattern_cnt) +{ + u32 i, j, kp, km, mask_index; + int z; + u16 map_index; + u8 mask[MAX_PATTERN_SIZE]; + u32 regval = 0; + u32 total_cnt = 0, pattern_inherited_cnt = 0; + u8 *ptdata, *ptmask; + + if (pattern_cnt > MAX_PATTERN_COUNT) { + DbgPrintF( + MP_TRACE, + "%s - Error: %d patterns, exceed %d, not supported!\n", + __FUNCTION__, pattern_cnt, MAX_PATTERN_COUNT); + return -1; + } + + /* Reset the FIFO head pointer. */ + if (fxgmac_check_wake_pattern_fifo_pointer(pdata)) { + DbgPrintF( + MP_TRACE, + "%s - Warning: the remote pattern array pointer is not be 0\n", + __FUNCTION__); + return -1; + } + + for (i = 0; i < pattern_cnt; i++) { + memcpy(&pdata->pattern[i], wol_pattern + i, + sizeof(wol_pattern[0])); + if (pattern_cnt + pattern_inherited_cnt < MAX_PATTERN_COUNT) { + if (wol_pattern[i].pattern_offset || + !(wol_pattern[i].mask_info[0] & 0x01)) { + memcpy(&pdata->pattern[pattern_cnt + + pattern_inherited_cnt], + wol_pattern + i, sizeof(wol_pattern[0])); + pattern_inherited_cnt++; + } + } + } + total_cnt = pattern_cnt + pattern_inherited_cnt; + + /* + * calculate the crc-16 of the mask pattern + * print the pattern and mask for debug purpose. + */ + for (i = 0; i < total_cnt; i++) { + /* Please program pattern[i] to NIC for pattern match wakeup. + * pattern_size, pattern_info, mask_info + */ + /* save the mask pattern */ + mask_index = 0; + map_index = 0; + for (j = 0; j < pdata->pattern[i].mask_size; j++) { + for (z = 0; + z < ((j == (MAX_PATTERN_SIZE / 8 - 1)) ? 7 : 8); + z++) { + if (pdata->pattern[i].mask_info[j] & + (0x01 << z)) { + mask[map_index] = + pdata->pattern[i].pattern_info + [pdata->pattern[i] + .pattern_offset + + mask_index]; + map_index++; + } + mask_index++; + } + } + /* calculate the crc-16 of the mask pattern */ + pdata->pattern[i].pattern_crc = wol_crc16(mask, map_index); + + /* Print pattern match, for debug purpose. */ + DbgPrintF(MP_LOUD, "%s - Pattern[%d]:", __FUNCTION__, i); + for (kp = 0, km = 0; + kp < sizeof(pdata->pattern[i].pattern_info); + kp += 16, km += 2) { + ptdata = &pdata->pattern[i].pattern_info[kp]; + ptmask = &pdata->pattern[i].mask_info[km]; + DBGPRINT( + MP_LOUD, + ("\n %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x Mask %02x-%02x", + ptdata[0], ptdata[1], ptdata[2], ptdata[3], + ptdata[4], ptdata[5], ptdata[6], ptdata[7], + ptdata[8], ptdata[9], ptdata[10], ptdata[11], + ptdata[12], ptdata[13], ptdata[14], ptdata[15], + ptmask[0], ptmask[1])); + } + DbgPrintF( + MP_LOUD, + "WritePatternToNic62 the %d patterns crc = %x mask length = %d, mask_offset=%x.\n", + i, pdata->pattern[i].pattern_crc, map_index, + pdata->pattern[i].pattern_offset); + memset(mask, 0, sizeof(mask)); + } + + /* Write patterns by FIFO block. */ + for (i = 0; i < (total_cnt + 3) / 4; i++) { + /* 1. Write the first 4Bytes of Filter. */ + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 0].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 0].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 0].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 0].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 1].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 1].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 1].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 1].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 2].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 2].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 2].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 2].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 3].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 3].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 3].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 3].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + /* 2. Write the Filter Command. */ + regval = 0; + /* Set filter enable bit. */ + regval |= ((i * 4 + 0) < total_cnt) ? (0x1 << 0) : 0x0; + regval |= ((i * 4 + 1) < total_cnt) ? (0x1 << 8) : 0x0; + regval |= ((i * 4 + 2) < total_cnt) ? (0x1 << 16) : 0x0; + regval |= ((i * 4 + 3) < total_cnt) ? (0x1 << 24) : 0x0; + /* Set filter address type, 0- unicast, 1 - multicast. */ + regval |= (i * 4 + 0 >= total_cnt) ? 0x0 : + (i * 4 + 0 >= pattern_cnt) ? (0x1 << (3 + 0)) : + pdata->pattern[i * 4 + 0].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 0].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 0].pattern_info[0] & 0x01) ? + (0x1 << (3 + 0)) : + 0x0; + regval |= (i * 4 + 1 >= total_cnt) ? 0x0 : + (i * 4 + 1 >= pattern_cnt) ? (0x1 << (3 + 8)) : + pdata->pattern[i * 4 + 1].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 1].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 1].pattern_info[0] & 0x01) ? + (0x1 << (3 + 8)) : + 0x0; + regval |= (i * 4 + 2 >= total_cnt) ? 0x0 : + (i * 4 + 2 >= pattern_cnt) ? (0x1 << (3 + 16)) : + pdata->pattern[i * 4 + 2].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 2].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 2].pattern_info[0] & 0x01) ? + (0x1 << (3 + 16)) : + 0x0; + regval |= (i * 4 + 3 >= total_cnt) ? 0x0 : + (i * 4 + 3 >= pattern_cnt) ? (0x1 << (3 + 24)) : + pdata->pattern[i * 4 + 3].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 3].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 3].pattern_info[0] & 0x01) ? + (0x1 << (3 + 24)) : + 0x0; + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_RWK_PAC); + + /* 3. Write the mask offset. */ + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 3].pattern_offset << 24) | + (pdata->pattern[i * 4 + 2].pattern_offset + << 16) | + (pdata->pattern[i * 4 + 1].pattern_offset + << 8) | + (pdata->pattern[i * 4 + 0].pattern_offset + << 0), + pdata->mac_regs + MAC_RWK_PAC); + + /* 4. Write the masked data CRC. */ + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 1].pattern_crc << 16) | + (pdata->pattern[i * 4 + 0].pattern_crc << 0), + pdata->mac_regs + MAC_RWK_PAC); + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 3].pattern_crc << 16) | + (pdata->pattern[i * 4 + 2].pattern_crc << 0), + pdata->mac_regs + MAC_RWK_PAC); + } + + for (i = 0; i < total_cnt; i++) { + fxgmac_set_wake_pattern_mask( + pdata, i, 0, + ((pdata->pattern[i].mask_info[7] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[6] << (16 + 1)) | + (pdata->pattern[i].mask_info[5] << (8 + 1)) | + (pdata->pattern[i].mask_info[4] << (0 + 1)) | + ((pdata->pattern[i].mask_info[3] & 0x80) >> + 7)); /* global manager regitster mask bit 31~62 */ + fxgmac_set_wake_pattern_mask( + pdata, i, 1, + ((pdata->pattern[i].mask_info[11] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[10] << (16 + 1)) | + (pdata->pattern[i].mask_info[9] << (8 + 1)) | + (pdata->pattern[i].mask_info[8] << (0 + 1)) | + ((pdata->pattern[i].mask_info[7] & 0x80) >> + 7)); /* global manager regitster mask bit 63~94 */ + fxgmac_set_wake_pattern_mask( + pdata, i, 2, + ((pdata->pattern[i].mask_info[15] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[14] << (16 + 1)) | + (pdata->pattern[i].mask_info[13] << (8 + 1)) | + (pdata->pattern[i].mask_info[12] << (0 + 1)) | + ((pdata->pattern[i].mask_info[11] & 0x80) >> + 7)); /* global manager regitster mask bit 95~126 */ + } + + return 0; +} + +static int fxgmac_enable_wake_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKPKTEN_POS, + MAC_PMT_STA_RWKPKTEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static int fxgmac_disable_wake_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKPKTEN_POS, + MAC_PMT_STA_RWKPKTEN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static int fxgmac_enable_wake_magic_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_MGKPKTEN_POS, + MAC_PMT_STA_MGKPKTEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + + /* Enable PME Enable Bit. */ + cfg_r32(pdata, REG_PM_STATCTRL, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PM_CTRLSTAT_PME_EN_POS, + PM_CTRLSTAT_PME_EN_LEN, 1); + cfg_w32(pdata, REG_PM_STATCTRL, regval); + + return 0; +} + +static int fxgmac_disable_wake_magic_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_MGKPKTEN_POS, + MAC_PMT_STA_MGKPKTEN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + return 0; +} + +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +/* + * enable Wake packet indication. called to enable before sleep/hibernation + * and no needed to call disable for that, fxgmac_get_wake_packet_indication will clear to normal once done. + */ +static void fxgmac_enable_wake_packet_indication(struct fxgmac_pdata *pdata, + int en) +{ + u32 val_wpi_crtl0; + + /* read-clear WoL event. */ + readreg(pdata->pAdapter, pdata->base_mem + MGMT_WOL_CTRL); + + /* get wake packet information */ + val_wpi_crtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + /* prepare to write packet data by write wpi_mode to 1 */ + val_wpi_crtl0 = + FXGMAC_SET_REG_BITS(val_wpi_crtl0, MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + (en ? MGMT_WPI_CTRL0_WPI_MODE_WR : + MGMT_WPI_CTRL0_WPI_MODE_NORMAL)); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + DbgPrintF(MP_TRACE, "%s - WPI pkt enable=%d, reg=%08x.\n", __FUNCTION__, + en, val_wpi_crtl0); + + return; +} + +/* + * this function read Wake up packet after MDIS resume + * input: + * pdata + * wpi_buf container of a packet. + * buf_size size of the packet container. since HW limit to 14bits, ie 16KB all together. + * output: + * wake_reason from HW, we can indentify 1)magic packet, or 2)pattern(remote wake packet) or WAKE_REASON_HW_ERR indicates err + * packet_size length of the wake packet. 0 indicates exception. + * + */ +static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, + int *wake_reason, + u32 *wake_pattern_number, + u8 *wpi_buf, u32 buf_size, + u32 *packet_size) +{ + u32 i, regval, val_wpi_crtl0, *dw_wpi_buf; + u32 data_len, data_len_dw, b_need_pkt = 0; + + *wake_reason = WAKE_REASON_NONE; + *packet_size = 0; + fxgmac_release_phy(pdata); + + /* try to check wake reason. GMAC reg 20c0 only tells Magic or remote-pattern + * read from MGMT_WOL_CTRL, 1530 instead. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WOL_CTRL); + DbgPrintF(MP_TRACE, "%s - 0x1530=%x.\n", __FUNCTION__, regval); + if (!regval) { + DbgPrintF(MP_TRACE, "%s - nothing for WPI pkt.\n", + __FUNCTION__); + return; + } + + if (regval & MGMT_WOL_CTRL_WPI_MGC_PKT) { + *wake_reason = WAKE_REASON_MAGIC; + b_need_pkt = 1; + } else if (regval & MGMT_WOL_CTRL_WPI_RWK_PKT) { + *wake_reason = WAKE_REASON_PATTERNMATCH; + b_need_pkt = 1; + *wake_pattern_number = 0; + + /* + * wake_pattern_number, HW should tell, tbd + */ + for (i = 0; i < MAX_PATTERN_COUNT; i++) { + if (regval & (MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { + *wake_pattern_number = i; + break; + } + } + + } else if (regval & MGMT_WOL_CTRL_WPI_LINK_CHG) { + *wake_reason = WAKE_REASON_LINK; + } + + if (!b_need_pkt) { + DbgPrintF(MP_TRACE, "%s - wake by link and no WPI pkt.\n", + __FUNCTION__); + return; + } + + /* get wake packet information */ + val_wpi_crtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + if (val_wpi_crtl0 & MGMT_WPI_CTRL0_WPI_FAIL) { + *wake_reason = WAKE_REASON_HW_ERR; + DbgPrintF(MP_TRACE, "%s - WPI pkt fail from hw.\n", + __FUNCTION__); + return; + } + + *packet_size = FXGMAC_GET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_PKT_LEN_POS, + MGMT_WPI_CTRL0_WPI_PKT_LEN_LEN); + + if (0 == *packet_size) { + *wake_reason = WAKE_REASON_HW_ERR; + DbgPrintF(MP_TRACE, "%s - WPI pkt len is 0 from hw.\n", + __FUNCTION__); + return; + } + + DbgPrintF(MP_TRACE, "%s - WPI pkt len from hw, *packet_size=%u.\n", + __FUNCTION__, *packet_size); + + if (buf_size < *packet_size) { + DbgPrintF(MP_WARN, + "%s - too small buf_size=%u, WPI pkt len is %u.\n", + __FUNCTION__, buf_size, *packet_size); + data_len = buf_size; + } else { + data_len = *packet_size; + } + + /* prepare to read packet data by write wpi_mode to 2 */ + val_wpi_crtl0 = FXGMAC_SET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + MGMT_WPI_CTRL0_WPI_MODE_RD); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + dw_wpi_buf = (u32 *)wpi_buf; + data_len_dw = (data_len + 3) / 4; + + i = 0; + DbgPrintF( + MP_TRACE, + "%s - before retrieve, len=%d, len_dw=%d, reg_wpi_ctrl0=%08x.\n", + __FUNCTION__, data_len, data_len_dw, val_wpi_crtl0); + while ((0 == (val_wpi_crtl0 & MGMT_WPI_CTRL0_WPI_OP_DONE))) { + if (i < data_len_dw) { + regval = (u32)readreg(pdata->pAdapter, + pdata->base_mem + + MGMT_WPI_CTRL1_DATA); + /*dw_wpi_buf[i] = SWAP_BYTES_32(regval);*/ + dw_wpi_buf[i] = regval; + } else { + break; + } + + val_wpi_crtl0 = (u32)readreg(pdata->pAdapter, + pdata->base_mem + MGMT_WPI_CTRL0); + i++; + } + if (*packet_size <= MAC_CRC_LENGTH) { + DbgPrintF(MP_TRACE, + "%s - Warning, WPI pkt len is less 4 from hw.\n", + __FUNCTION__); + return; + } + *packet_size -= MAC_CRC_LENGTH; + + /* once read data complete and write wpi_mode to 0, normal */ + val_wpi_crtl0 = FXGMAC_SET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + MGMT_WPI_CTRL0_WPI_MODE_NORMAL); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + DbgPrintF( + MP_TRACE, + "%s - WPI done and back to normal mode, reg=%08x, read data=%dB.\n", + __FUNCTION__, val_wpi_crtl0, i * 4); + + return; +} +#endif /* FUXI_PM_WPI_READ_FEATURE_EN */ + +static int fxgmac_enable_wake_link_change(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_LINKCHG_EN_POS, + WOL_LINKCHG_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} +static int fxgmac_disable_wake_link_change(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_LINKCHG_EN_POS, + WOL_LINKCHG_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) +{ + /* enable or disable WOL. this function only set wake-up type, and power related configure + * will be in other place, see power management. + */ + if (!pdata->hw_feat.rwk) { + netdev_err(pdata->netdev, + "error configuring WOL - not supported.\n"); + return; + } + + fxgmac_disable_wake_magic_pattern(pdata); + fxgmac_disable_wake_pattern(pdata); + fxgmac_disable_wake_link_change(pdata); + + if (en) { + /* config mac address for rx of magic or ucast */ + fxgmac_set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); + + /* Enable Magic packet */ + if (pdata->expansion.wol & WAKE_MAGIC) { + fxgmac_enable_wake_magic_pattern(pdata); + } + + /* Enable global unicast packet */ + if (pdata->expansion.wol & WAKE_UCAST || + pdata->expansion.wol & WAKE_MCAST || + pdata->expansion.wol & WAKE_BCAST || + pdata->expansion.wol & WAKE_ARP) { + fxgmac_enable_wake_pattern(pdata); + } + + /* Enable ephy link change */ + if ((FXGMAC_WOL_UPON_EPHY_LINK) && + (pdata->expansion.wol & WAKE_PHY)) { + fxgmac_enable_wake_link_change(pdata); + } + } + device_set_wakeup_enable(/*pci_dev_to_dev*/ (pdata->dev), en); + + DPRINTK("config_wol callout\n"); +} + +static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) +{ + u32 value; + value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); + return value; +} + +static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + unsigned int dma_ch_isr, dma_ch_ier; + struct fxgmac_channel *channel; + unsigned int i; + +#ifdef NIC_NET_ADAPETERCX + u32 regval; + /* config interrupt to level signal */ + regval = (u32)readreg(pdata->pAdapter, pdata->mac_regs + DMA_MR); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, + 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, + DMA_MR_QUREAD_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_MR); +#endif + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Clear all the interrupts which are set */ + dma_ch_isr = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + writereg(pdata->pAdapter, dma_ch_isr, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 1); + /* dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + * DMA_CH_IER_AIE_POS, DMA_CH_IER_AIE_LEN, 1); + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + + if (channel->tx_ring) { + /* Enable the following Tx interrupts + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) + */ + if (!pdata->per_channel_irq) + dma_ch_ier = FXGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (pdata->per_channel_irq) { + dma_ch_ier = FXGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + } + } + } + if (channel->rx_ring) { + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, + 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + } + + writereg(pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + + /* Clear all the interrupts which are set */ + dma_ch_isr = FXGMAC_DMA_IOREAD(txq, DMA_CH_SR); + FXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); /* 0 fx 1 */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts in edge triggered + * mode) + */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); /* 0 fx 1 */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); /* 0 fx 1 */ + + FXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); + } +#endif +} + +static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) +{ + unsigned int q_count, i; + unsigned int mtl_q_isr; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + writereg(pdata->pAdapter, mtl_q_isr, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + + /* No MTL interrupts to be enabled */ + writereg(pdata->pAdapter, 0, + FXGMAC_MTL_REG(pdata, i, MTL_Q_IER)); + } +} + +static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) +{ + unsigned int mac_ier = 0; + u32 regval; + + /* Enable Timestamp interrupt */ + mac_ier = FXGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, + MAC_IER_TSIE_LEN, 1); + + writereg(pdata->pAdapter, mac_ier, pdata->mac_regs + MAC_IER); + + /* Enable all counter interrupts */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RIER); + regval = FXGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, + MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_RIER); + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TIER); + regval = FXGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, + MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_TIER); +} + +static int fxgmac_set_fxgmii_2500_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_1000_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_100_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_10_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +/** + * fxgmac_check_phy_link - Get link/speed status + * @pdata: pointer to gmac structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u16 link_reg = 0; + + struct net_device *netdev = pdata->netdev; + if (netdev->base_addr) { + link_reg = + (u16)(*((u32 *)(netdev->base_addr + MGMT_EPHY_CTRL))); + + /* + * check register address 0x1004 + * b[6:5] ephy_pause + * b[4:3] ephy_speed 0b10 1000m 0b01 100m + * b[2] ephy_duplex + * b[1] ephy_link + * b[0] ephy_reset. should be set to 1 before use phy. + */ + *link_up = false; + if (link_reg & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) { + if (link_up) { + *link_up = (link_reg & + MGMT_EPHY_CTRL_STA_EPHY_LINKUP) ? + true : + false; + } + if (speed) + *speed = (link_reg & + MGMT_EPHY_CTRL_STA_SPEED_MASK) >> + MGMT_EPHY_CTRL_STA_SPEED_POS; + } else { + DPRINTK("fxgmac_check_phy_link ethernet PHY not released.\n"); + return -1; + } + } else { + DPRINTK("fxgmac_check_phy_link null base addr err\n"); + return -1; + } + + return 0; +} + +static int fxgmac_config_mac_speed(struct fxgmac_pdata *pdata) +{ + switch (pdata->phy_speed) { + case SPEED_2500: + fxgmac_set_fxgmii_2500_speed(pdata); + break; + case SPEED_1000: + fxgmac_set_fxgmii_1000_speed(pdata); + break; + case SPEED_100: + fxgmac_set_fxgmii_100_speed(pdata); + break; + case SPEED_10: + fxgmac_set_fxgmii_10_speed(pdata); + break; + } + return 0; +} + +static int fxgmac_write_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 data) +{ + u32 regval; + u32 mdioctrl = reg_id * 0x10000 + 0x8000205; + int busy = 15; + + writereg(pdata->pAdapter, data, pdata->mac_regs + MAC_MDIO_DATA); + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + DPRINTK("fxgmac_write_ephy_reg id %d %s, ctrl=0x%08x, data=0x%08x\n", + reg_id, (regval & 0x1) ? "err" : "ok", regval, data); + + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ +} + +static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 *data) +{ + u32 regval = 0, regret; + u32 mdioctrl = reg_id * 0x10000 + 0x800020d; + int busy = 15; + + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + if (0 == (regval & MAC_MDIO_ADDRESS_BUSY)) { + regret = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_DATA); + if (data) + *data = regret; + return regret; + } + + DPRINTK("fxgmac_read_ephy_reg id=0x%02x err, busy=%d, ctrl=0x%08x.\n", + reg_id, busy, regval); + return -1; +} + +static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 mmd, u32 data) +{ + u32 regval; + u32 mdioctrl = (mmd << 16) + 0x8000207; + u32 regdata = (reg_id << 16) + data; + /* for phy mmd reg r/w operation, set more delay time than phy mii reg r/w */ + int busy = 60; + + writereg(pdata->pAdapter, regdata, pdata->mac_regs + MAC_MDIO_DATA); + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + DPRINTK("fxgmac_write_ephy_mmd_reg id %d mmd %d %s, ctrl=0x%08x, data=0x%08x\n", + reg_id, mmd, (regval & 0x1) ? "err" : "ok", regval, data); + + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ +} + +static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_config_tx_flow_control(pdata); + fxgmac_config_rx_flow_control(pdata); + + fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + /* set auto negotiation advertisement pause ability */ + if (pdata->tx_pause || pdata->rx_pause) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, 0); + } + fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + /* after change the auto negotiation advertisement need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, + struct fxphy_ag_adv phy_ag_adv) +{ + u32 regval = 0, ret = 0; + + if (phy_ag_adv.auto_neg_en) { + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 1); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + } else { + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 0); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + } + + fxgmac_read_ephy_reg(pdata, REG_MII_CTRL1000, ®val); + if (phy_ag_adv.full_1000m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000FULL_POS, + PHY_MII_CTRL1000_1000FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000FULL_POS, + PHY_MII_CTRL1000_1000FULL_LEN, 0); + } + if (phy_ag_adv.half_1000m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000HALF_POS, + PHY_MII_CTRL1000_1000HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000HALF_POS, + PHY_MII_CTRL1000_1000HALF_LEN, 0); + } + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); + + fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + + if (phy_ag_adv.full_100m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100FULL_POS, + PHY_MII_ADVERTISE_100FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100FULL_POS, + PHY_MII_ADVERTISE_100FULL_LEN, 0); + } + if (phy_ag_adv.half_100m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100HALF_POS, + PHY_MII_ADVERTISE_100HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100HALF_POS, + PHY_MII_ADVERTISE_100HALF_LEN, 0); + } + if (phy_ag_adv.full_10m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10FULL_POS, + PHY_MII_ADVERTISE_10FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10FULL_POS, + PHY_MII_ADVERTISE_10FULL_LEN, 0); + } + if (phy_ag_adv.half_10m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10HALF_POS, + PHY_MII_ADVERTISE_10HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10HALF_POS, + PHY_MII_ADVERTISE_10HALF_LEN, 0); + } + + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + /* after change the auto negotiation advertisement need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + + return ret; +} + +static int fxgmac_phy_config(struct fxgmac_pdata *pdata) +{ + struct fxphy_ag_adv phy_ag_adv; + + if (pdata->phy_autoeng) { + phy_ag_adv.auto_neg_en = 1; + } else { + phy_ag_adv.auto_neg_en = 0; + } + switch (pdata->phy_speed) { + case SPEED_1000: + phy_ag_adv.full_1000m = 1, phy_ag_adv.half_1000m = 0, + phy_ag_adv.full_100m = 1, phy_ag_adv.half_100m = 1, + phy_ag_adv.full_10m = 1, phy_ag_adv.half_10m = 1; + break; + + case SPEED_100: + phy_ag_adv.full_1000m = 0, phy_ag_adv.half_1000m = 0; + if (pdata->phy_duplex) { + phy_ag_adv.full_100m = 1; + } else { + phy_ag_adv.full_100m = 0; + } + phy_ag_adv.half_100m = 1, phy_ag_adv.full_10m = 1, + phy_ag_adv.half_10m = 1; + break; + + case SPEED_10: + phy_ag_adv.full_1000m = 0, phy_ag_adv.half_1000m = 0; + phy_ag_adv.full_100m = 0, phy_ag_adv.half_100m = 0; + if (pdata->phy_duplex) { + phy_ag_adv.full_10m = 1; + } else { + phy_ag_adv.full_10m = 0; + } + phy_ag_adv.half_10m = 1; + break; + + default: + break; + } + return fxgmac_set_ephy_autoneg_advertise(pdata, phy_ag_adv); +} + +static void fxgmac_phy_green_ethernet(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + /* GREEN */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_PMA_DBG0_ADC); + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE); + + /* CLD */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_NP_WP); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_GT_HT_BT); + + /* after change green ethernet & CLD need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static void fxgmac_phy_eee_feature(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EN_LPI_POS, + DMA_SBMR_EN_LPI_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_LPI_XIT_PKT_POS, + DMA_SBMR_LPI_XIT_PKT_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_AALE_POS, + DMA_SBMR_AALE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIATE_POS, MAC_LPIATE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPITXA_POS, MAC_LPITXA_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PLS_POS, MAC_PLS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIEN_POS, MAC_LPIEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_STA); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIET_POS, MAC_LPIET_LEN, + MAC_LPI_ENTRY_TIMER); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_TIMER); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_CONTROL); + regval = FXGMAC_SET_REG_BITS(regval, MAC_TWT_POS, MAC_TWT_LEN, + MAC_TWT_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LST_POS, MAC_LST_LEN, + MAC_LST_TIMER); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_CONTROL); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_MS_TIC_COUNTER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_MS_TIC_POS, MAC_MS_TIC_LEN, + MAC_MS_TIC); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_MS_TIC_COUNTER); + + fxgmac_write_ephy_mmd_reg(pdata, REG_MMD_EEE_ABILITY_REG, 0x07, + REG_MMD_EEE_ABILITY_VALUE); + + /* after change EEE need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static void fxgmac_reset_phy(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RESET); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 1500, 1500); +} + +void fxgmac_release_phy(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RELEASE); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 100, 150); + value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); + DBGPRINT(MP_LOUD, ("0x1004: 0x%x\n", value)); +#ifdef AISC_MODE + fxgmac_read_ephy_reg(pdata, REG_MII_SPEC_CTRL, + &value); /* read phy specific control */ + value = FXGMAC_SET_REG_BITS(value, PHY_MII_SPEC_CTRL_CRS_ON_POS, + PHY_MII_SPEC_CTRL_CRS_ON_LEN, + 1); /* set on crs on */ + fxgmac_write_ephy_reg(pdata, REG_MII_SPEC_CTRL, + value); /* phy specific control set on crs on */ + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG3); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + /* VGA bandwidth, default is 2 after reset. Set to 0 to mitigate unstable issue in 130m. */ + value = FXGMAC_SET_REG_BITS(value, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_LEN, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_DEFAULT); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); + + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, &value); + /* led index use bit0~bit5 */ + value = FXGMAC_GET_REG_BITS(value, EFUSE_LED_POS, EFUSE_LED_LEN); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG2); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG2_LED_VALUE); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG8_LED_VALUE); + + if (EFUSE_LED_COMMON_SOLUTION != value) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1); + break; + case EFUSE_LED_SOLUTION2: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2); + break; + case EFUSE_LED_SOLUTION3: + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0); + break; + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1); + break; + case EFUSE_LED_SOLUTION2: + case EFUSE_LED_SOLUTION3: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0); + break; + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0); + break; + case EFUSE_LED_SOLUTION2: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2); + break; + case EFUSE_LED_SOLUTION3: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3); + break; + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0); + break; + } + + if (EFUSE_LED_SOLUTION2 == value) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2); + } + } +#endif +} + +static void fxgmac_enable_phy_check(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + value = FXGMAC_SET_REG_BITS(value, REG_MII_EXT_PKG_CHECK_POS, + REG_MII_EXT_PKG_CHECK_LEN, + REG_MII_EXT_PKG_ENABLE_CHECK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_disable_phy_check(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + value = FXGMAC_SET_REG_BITS(value, REG_MII_EXT_PKG_CHECK_POS, + REG_MII_EXT_PKG_CHECK_LEN, + REG_MII_EXT_PKG_DISABLE_CHECK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_setup_cable_loopback(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_SLEEP_REG_ENABLE_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_LPBK_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_LPBK_REG_ENABLE_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, + REG_MII_BMCR_ENABLE_LOOPBACK); +} + +static void fxgmac_clean_cable_loopback(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_LPBK_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_LPBK_REG_CLEAN_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, + REG_MII_BMCR_DISABLE_LOOPBACK); +} + +static void fxgmac_disable_phy_sleep(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + + value = FXGMAC_SET_REG_BITS(value, MII_EXT_SLEEP_CONTROL1_EN_POS, + MII_EXT_SLEEP_CONTROL1_EN_LEN, 0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_enable_phy_sleep(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + + value = FXGMAC_SET_REG_BITS(value, MII_EXT_SLEEP_CONTROL1_EN_POS, + MII_EXT_SLEEP_CONTROL1_EN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_close_phy_led(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); +} + +static void fxmgac_config_led_under_active(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[4]); + } +} + +static void fxgmac_config_led_under_sleep(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[4]); + } +} + +static void fxgmac_config_led_under_shutdown(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[4]); + } +} + +static void fxgmac_config_led_under_disable(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[4]); + } else { + /* http://redmine.motor-comm.com/issues/4101 */ + /* for disable case, reset phy to close LED */ + fxgmac_reset_phy(pdata); + } +} + +extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); + +static int fxgmac_dev_read(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int err, etlt, l34t; + + static unsigned int cnt_incomplete; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + dma_desc = desc_data->dma_desc; + pkt_info = &ring->pkt_info; + + /* Check for data availability */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN)) { + return 1; + } + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + + if (netif_msg_rx_status(pdata)) + fxgmac_dump_rx_desc(pdata, ring, ring->cur); + + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, + RX_NORMAL_DESC3_CTXT_LEN)) { + /* Timestamp Context Descriptor */ + fxgmac_get_rx_tstamp(pkt_info, dma_desc); + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); + if (netif_msg_rx_status(pdata)) + DPRINTK("dev_read context desc, ch=%s\n", channel->name); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); + + /* Get the header length */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, + RX_NORMAL_DESC3_FD_LEN)) { + desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, + RX_NORMAL_DESC2_HL_LEN); + if (desc_data->rx.hdr_len) + pdata->stats.rx_split_header_packets++; + } + l34t = 0; + + /* Get the pkt_info length */ + desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_PL_POS, + RX_NORMAL_DESC3_PL_LEN); + + if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, + RX_NORMAL_DESC3_LD_LEN)) { + /* Not all the data has been transferred for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); + cnt_incomplete++; + if ((cnt_incomplete < 2) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read NOT last desc, pkt incomplete yet,%u\n", + cnt_incomplete); + + return 0; + } + if ((cnt_incomplete) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", + cnt_incomplete); + cnt_incomplete = 0; /* when back to normal, reset cnt */ + + /* This is the last of the data for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); + + /* Check for errors (only valid in last descriptor) */ + err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, + RX_NORMAL_DESC3_ES_LEN); + etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, + RX_NORMAL_DESC3_ETLT_LEN); + if ((err) && netif_msg_rx_status(pdata)) { + DPRINTK("dev_read:head_len=%u, pkt_len=%u, err=%u, etlt=%#x, desc2=0x%08x, desc3=0x%08x\n", + desc_data->rx.hdr_len, desc_data->rx.len, err, etlt, + dma_desc->desc2, dma_desc->desc3); + } + + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ + if ((etlt == 0x4 /*yzhang changed to 0x4, 0x09*/) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, + RX_NORMAL_DESC0_OVT_LEN); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + pkt_info->vlan_ctag); + } + } else { + if (etlt == 0x05 || etlt == 0x06) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); + else + pkt_info->errors = FXGMAC_SET_REG_BITS( + pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, + RX_PACKET_ERRORS_FRAME_LEN, 1); + } + + return 0; +} + +static int fxgmac_enable_int(struct fxgmac_channel *channel, + enum fxgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case FXGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + break; + case FXGMAC_INT_DMA_ALL: + dma_ch_ier |= channel->saved_ier; + break; + default: + return -1; + } + + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int fxgmac_disable_int(struct fxgmac_channel *channel, + enum fxgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case FXGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 0); + break; + case FXGMAC_INT_DMA_ALL: + channel->saved_ier = dma_ch_ier & FXGMAC_DMA_INTERRUPT_MASK; + dma_ch_ier &= ~FXGMAC_DMA_INTERRUPT_MASK; + break; + default: + return -1; + } + + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) +{ + unsigned int dma_ch_ier; + + int_id = int_id; + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR /*1160*/)); + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + + return 0; +} + +static void fxgmac_dismiss_MTL_Q_int(struct fxgmac_pdata *pdata) +{ + unsigned int q_count, i; + unsigned int mtl_q_isr; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + writereg(pdata->pAdapter, mtl_q_isr, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + } +} + +static int fxgmac_dismiss_MAC_int(struct fxgmac_pdata *pdata) +{ + u32 regval, regErrVal; + + /* all MAC interrupts in 0xb0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ISR); + /* MAC tx/rx error interrupts in 0xb8 */ + regErrVal = readreg(pdata->pAdapter, pdata->mac_regs + MAC_TX_RX_STA); + return 0; +} + +static int fxgmac_dismiss_MAC_PMT_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + return 0; +} + +static int fxgmac_dismiss_MAC_LPI_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + + return 0; +} + +static int fxgmac_dismiss_MAC_DBG_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_DBG_STA); + + return 0; +} + +int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i, regval; + struct net_device *netdev = pdata->netdev; + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_dismiss_all_int callin\n"); + } + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + fxgmac_dismiss_DMA_int(channel, 0); + } + fxgmac_dismiss_MTL_Q_int(pdata); + fxgmac_dismiss_MAC_int(pdata); + fxgmac_dismiss_MAC_PMT_int(pdata); + fxgmac_dismiss_MAC_LPI_int(pdata); + fxgmac_dismiss_MAC_DBG_int(pdata); + + /* control module int to PCIe slot */ + if (netdev->base_addr) { + regval = (unsigned int)(*( + (u32 *)(netdev->base_addr + MGMT_INT_CTRL0))); + } + return 0; +} + +static void fxgmac_set_interrupt_moderation(struct fxgmac_pdata *pdata) +{ + u32 value = 0, time; + + pdata->intr_mod_timer = INT_MOD_IN_US; + + time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; + time = (pdata->intr_mod) ? pdata->tx_usecs : 0; + value = FXGMAC_SET_REG_BITS(value, INT_MOD_TX_POS, INT_MOD_TX_LEN, + time); + time = (pdata->intr_mod) ? pdata->rx_usecs : 0; + value = FXGMAC_SET_REG_BITS(value, INT_MOD_RX_POS, INT_MOD_RX_LEN, + time); + writereg(pdata->pAdapter, value, pdata->base_mem + INT_MOD); +} +static void fxgmac_enable_msix_rxtxinterrupt(struct fxgmac_pdata *pdata) +{ + u32 intid; + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } +} +static void fxgmac_disable_msix_interrupt(struct fxgmac_pdata *pdata) +{ + u32 intid; + + for (intid = 0; intid < MSIX_TBL_MAX_NUM; intid++) { + writereg(pdata->pAdapter, 0x1, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } +} +static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) +{ + u32 intid, regval = 0; +#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; +#endif + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + MSI_ID_PHY_OTHER * 16); +#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + hw_ops->write_ephy_reg( + pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt ASIC bit10 linkup bit11 linkdown */ +#endif +} +static void fxgmac_enable_msix_one_interrupt(struct fxgmac_pdata *pdata, + u32 intid) +{ + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + intid * 16); +} + +static void fxgmac_disable_msix_one_interrupt(struct fxgmac_pdata *pdata, + u32 intid) +{ + writereg(pdata->pAdapter, 0x01, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + intid * 16); +} + +static bool fxgmac_enable_mgm_interrupt(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0xf0000000, pdata->base_mem + MGMT_INT_CTRL0); + return true; +} + +static bool fxgmac_disable_mgm_interrupt(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0xffff0000, pdata->base_mem + MGMT_INT_CTRL0); + return true; +} + +static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) +{ + unsigned int i, count; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK("fxgmac_flush_tx_queues, reg=0x%p, val=0x%08x\n", + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + } + + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + do { + usleep_range_ex(pdata->pAdapter, 40, 50); + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_GET_REG_BITS(regval, + MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN); + + } while (--count && regval); + DPRINTK("fxgmac_flush_tx_queues wait... reg=0x%p, val=0x%08x\n", + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + if (regval) { /*(!count)*/ + return -EBUSY; + } + } + + return 0; +} + +static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); + /* Set enhanced addressing mode */ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, + DMA_SBMR_EAME_LEN, 1); + /* Set the System Bus mode */ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_FB_POS, DMA_SBMR_FB_LEN, + 0); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_4_POS, + DMA_SBMR_BLEN_4_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_8_POS, + DMA_SBMR_BLEN_8_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_16_POS, + DMA_SBMR_BLEN_16_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_32_POS, + DMA_SBMR_BLEN_32_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); +} + +static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) +{ + unsigned int i = 0, regval = 0; + + fxgmac_phy_config(pdata); + for (i = 0, regval = fxgmac_get_ephy_state(pdata); + (!(regval & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) || + !(regval & MGMT_EPHY_CTRL_STA_EPHY_LINKUP)) && + (i < PHY_LINK_TIMEOUT); + regval = fxgmac_get_ephy_state(pdata), i++) { + usleep_range_ex(pdata->pAdapter, 2000, 2000); + } + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt. */ +} + +static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) +{ + unsigned int regval = 0; + + fxgmac_disable_rx(pdata); + + /* HERE, WE NEED TO CONSIDER PHY CONFIG...TBD */ + DPRINTK("fxgmac_config_powerdown, phy and mac status update\n"); + /* for phy cable loopback, it can't configure phy speed, it will cause os resume again by link change although it has finished speed setting, */ + if (!phyloopback) { + /* When the Linux platform enters the s4 state, it goes through + * the suspend->resume->suspend process. The process of + * suspending again after resume is fast, and PHY + * auto-negotiation is not yet complete, so the + * auto-negotiation of PHY must be carried out again. When the + * Linux platform enters the s4 state, force speed to 10M. + */ + pdata->phy_speed = SPEED_10; + fxgmac_legacy_link_speed_setting(pdata); + } + + fxgmac_config_mac_speed(pdata); + + /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and + * lead to panic sometimes. So we should disable it from powerup, + * enable it from power down. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + OOB_WOL_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, + OOB_WOL_CTRL_DIS_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + OOB_WOL_CTRL); + usleep_range_ex(pdata->pAdapter, 2000, 2000); + + /* after enable OOB_WOL, recofigure mac addr again */ + fxgmac_set_mac_address(pdata, pdata->mac_addr); +} + +/* only supports four patterns, and patterns will be cleared on every call */ +static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) +{ + u32 ip_addr, i = 0; + u8 type_offset, op_offset, tip_offset; + struct pattern_packet packet; + struct wol_bitmap_pattern + pattern[4]; /* for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. */ + + memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); + + /* config ucast */ + if (pdata->expansion.wol & WAKE_UCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + /* config bcast */ + if (pdata->expansion.wol & WAKE_BCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + /* config mcast */ + if (pdata->expansion.wol & WAKE_MCAST) { + pattern[i].mask_info[0] = 0x7; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_info[0] = 0x1; + pattern[i].pattern_info[1] = 0x0; + pattern[i].pattern_info[2] = 0x5E; + pattern[i].pattern_offset = 0; + i++; + } + + /* config arp */ + if (pdata->expansion.wol & WAKE_ARP) { + memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); + type_offset = offsetof(struct pattern_packet, ar_pro); + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + type_offset++; + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + op_offset = offsetof(struct pattern_packet, ar_op); + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + op_offset++; + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + tip_offset = offsetof(struct pattern_packet, ar_tip); + pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + + packet.ar_pro = + 0x0 << 8 | + 0x08; /* arp type is 0x0800, notice that ar_pro and ar_op is big endian */ + packet.ar_op = + 0x1 + << 8; /* 1 is arp request,2 is arp replay, 3 is rarp request, 4 is rarp replay */ + ip_addr = fxgmac_get_netdev_ip4addr(pdata); + packet.ar_tip[0] = ip_addr & 0xFF; + packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; + packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; + packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; + memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_offset = 0; + i++; + } + + fxgmac_set_wake_pattern(pdata, pattern, i); +} + +static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, + unsigned int wol) +{ + u32 regval = 0; + + fxgmac_disable_tx(pdata); + fxgmac_disable_rx(pdata); + + /* performs fxgmac power down sequence + * 1. set led + * 2. check wol. + * 3. check arp offloading + * 4. disable gmac rx + * 5. set gmac power down + */ + + /* Close LED when entering the S3, S4, S5 except solution3 */ + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION != regval) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + if (EFUSE_LED_SOLUTION3 == regval) { + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3); + } else { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + } + + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err( + pdata->netdev, + "fxgmac powerstate is %lu when config power to down.\n", + pdata->expansion.powerstate); + } + +#if FXGMAC_WOL_FEATURE_ENABLED + fxgmac_config_wol(pdata, wol); +#endif +#if FXGMAC_AOE_FEATURE_ENABLED + /* use default arp offloading feature */ + fxgmac_update_aoe_ipv4addr(pdata, (u8 *)NULL); + fxgmac_enable_arp_offload(pdata); +#endif + +#if FXGMAC_NS_OFFLOAD_ENABLED + /* pls do not change the seq below */ + fxgmac_update_ns_offload_ipv6addr(pdata, FXGMAC_NS_IFA_GLOBAL_UNICAST); + fxgmac_update_ns_offload_ipv6addr(pdata, FXGMAC_NS_IFA_LOCAL_LINK); + fxgmac_enable_ns_offload(pdata); +#endif + + /* Enable MAC Rx TX */ + if (1) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 1); + if (pdata->hw_feat.aoe) { + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, + MAC_CR_TE_LEN, 1); + } + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + } + + regval = readreg(pdata->pAdapter, pdata->base_mem + LPW_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_LPW_EN_POS, + LPW_CTRL_ASPM_LPW_EN_LEN, + 1); /* Enable PCIE PM_L23. */ + + writereg(pdata->pAdapter, regval, pdata->base_mem + LPW_CTRL); + + /* set gmac power down */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_PWRDWN_POS, + MAC_PMT_STA_PWRDWN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + /* adjust sigdet threshold + * redmine.motor-comm.com/issues/5093 + * fix issue can not wake up os on some FT-D2000 platform, y + * this modification is only temporarif it is 55mv, wol maybe failed. + */ + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); + regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, + MGMT_SIGDET_40MV); + writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_SIGDET); + DPRINTK("fxgmac_config_powerdown callout, reg=0x%08x\n", regval); +} + +static void fxgmac_config_powerup(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + if (test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err( + pdata->netdev, + "fxgmac powerstate is %lu when config power to up.\n", + pdata->expansion.powerstate); + } + + /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and lead to panic sometimes. + * So we should disable it from powerup, enable it from power down. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + OOB_WOL_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, + OOB_WOL_CTRL_DIS_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + OOB_WOL_CTRL); + + /* clear wpi mode whether or not waked by WOL, write reset value */ + regval = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + regval = FXGMAC_SET_REG_BITS(regval, MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_WPI_CTRL0); + /* read pmt_status register to De-assert the pmt_intr_o */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + /* wether or not waked up by WOL, write reset value */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_PWRDWN_POS, + MAC_PMT_STA_PWRDWN_LEN, 0); + /* write register to synchronized always-on block */ + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + /* Disable fast link mode*/ + cfg_r32(pdata, REG_POWER_EIOS, ®val); + regval = FXGMAC_SET_REG_BITS(regval, POWER_EIOS_POS, POWER_EIOS_LEN, 0); + cfg_w32(pdata, REG_POWER_EIOS, regval); + + fxgmac_pwr_clock_gate(pdata); +} + +#if FXGMAC_SANITY_CHECK_ENABLED +/* + * fxgmac_diag_sanity_check + * check if there is any error like tx q hang + * return: 0 normal and other fatal error + */ +static int fxgmac_diag_sanity_check(struct fxgmac_pdata *pdata) +{ + u32 reg_q_val, reg_tail_val; + static u32 reg_tail_pre; + static int cnt; + + reg_q_val = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, 0 /* tx channe 0 */, + 0x8 /* 0x2d08 */)); + if (!(reg_q_val & 0x10)) { /* tx q is empty */ + return 0; + } + reg_tail_val = + readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_TDTR_LO)); + if (reg_tail_pre != reg_tail_val) { + reg_tail_pre = reg_tail_val; + cnt = 0; + } else { + cnt++; + } + + if (cnt > 10) { + reg_q_val = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, 0 /* tx channe 0 */, + 0x8 /* 0x2d08 */)); + if (reg_q_val & 0x10) { /* double check */ + DPRINTK("fxgmac, WARNing, tx Q status is 0x%x and tail keeps unchanged for %d times, 0x%x\n", + reg_q_val, cnt, reg_tail_val); + return 1; + } + } + + return 0; +} +#endif +static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL1); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, ®val); + /* close pll in sleep mode */ + regval = FXGMAC_SET_REG_BITS(regval, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN, + 0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, regval); +} +static void fxgmac_pwr_clock_ungate(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL1); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, ®val); + /* keep pll in sleep mode */ + regval = FXGMAC_SET_REG_BITS(regval, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, regval); +} + +/* context - pointer to struct nic_pdata. */ +static unsigned char fxgmac_suspend_int(void *context) +{ + /* ULONG_PTR addr; */ + u32 intid; +#if FUXI_EPHY_INTERRUPT_D0_OFF + u32 regval = 0; +#endif + u32 val_mgmt_intcrtl0; + struct fxgmac_pdata *pdata = (struct fxgmac_pdata *)context; + + val_mgmt_intcrtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + /* disable management interrupts. enable only pmt interrupts. */ + val_mgmt_intcrtl0 = FXGMAC_SET_REG_BITS(val_mgmt_intcrtl0, + MGMT_INT_CTRL0_INT_MASK_POS, + MGMT_INT_CTRL0_INT_MASK_LEN, + MGMT_INT_CTRL0_INT_MASK_EX_PMT); + writereg(pdata->pAdapter, val_mgmt_intcrtl0, + pdata->base_mem + MGMT_INT_CTRL0); + + for (intid = 0; intid < MSIX_TBL_MAX_NUM; + intid++) { /* disable all msix */ + writereg(pdata->pAdapter, 0x1, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + + /* enable pmt msix */ + writereg(pdata->pAdapter, 0x0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + MSI_ID_PHY_OTHER * 16); + readreg(pdata->pAdapter, + pdata->base_mem + + MGMT_WOL_CTRL); /* read clear wake up reason */ + /* since Msix interrupt masked now, enable EPHY interrupt for case of link change wakeup */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ +#if FUXI_EPHY_INTERRUPT_D0_OFF + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ +#endif + + return true; +} +static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + int busy = 15; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) { + break; + } + fxgmac_prepare_tx_stop(pdata, channel); + } + + /* Disable each Tx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) { + break; + } + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + DBGPRINT(MP_TRACE, (" %s disable tx dma", __FUNCTION__)); + } + + do { + regval = + readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + busy--; + } while ((regval & MAC_DBG_STA_TX_BUSY) && (busy)); + + if (0 != (regval & MAC_DBG_STA_TX_BUSY)) { + regval = + readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + DbgPrintF(MP_WARN, + "warning !!!timed out waiting for Tx MAC to stop\n"); + return -1; + } + /* wait empty Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + do { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_TXQ_DEG)); + busy--; + } while ((regval & MTL_TXQ_DEG_TX_BUSY) && (busy)); + if (0 != (regval & MTL_TXQ_DEG_TX_BUSY)) { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MTL_TXQ_DEG); + DbgPrintF( + MP_WARN, + "warning !!!timed out waiting for tx queue %u to empty\n", + i); + return -1; + } + } + + /* Disable MAC TxRx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) { + fxgmac_prepare_rx_stop(pdata, i); + } + /* Disable each Rx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) { + break; + } + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + DBGPRINT(MP_TRACE, (" %s disable rx dma", __FUNCTION__)); + } + return 0; +} +static void fxgmac_resume_int(struct fxgmac_pdata *pdata) +{ + u32 intid, regval = 0; + u32 val_mgmt_intcrtl0; + + val_mgmt_intcrtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + /* disable management interrupts. enable only pmt interrupts. */ + val_mgmt_intcrtl0 = FXGMAC_SET_REG_BITS( + val_mgmt_intcrtl0, MGMT_INT_CTRL0_INT_MASK_POS, + MGMT_INT_CTRL0_INT_MASK_LEN, MGMT_INT_CTRL0_INT_MASK_DISABLE); + writereg(pdata->pAdapter, val_mgmt_intcrtl0, + pdata->base_mem + MGMT_INT_CTRL0); + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + + for (intid = MSIX_TBL_RXTX_NUM; intid < MSIX_TBL_MAX_NUM; + intid++) { /* disable some msix */ + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + +#if FUXI_EPHY_INTERRUPT_D0_OFF + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + 0x0); /* disable phy interrupt */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ +#else + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ +#endif +} + +static int fxgmac_hw_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + int ret; + u32 regval = 0; + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac hw init call in\n"); + } + + /* Flush Tx queues */ + ret = fxgmac_flush_tx_queues(pdata); + if (ret) { + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_hw_init call flush tx queue err.\n"); + } + return ret; + } + + /* Initialize DMA related features */ + fxgmac_config_dma_bus(pdata); + fxgmac_config_osp_mode(pdata); + fxgmac_config_pblx8(pdata); + fxgmac_config_tx_pbl_val(pdata); + fxgmac_config_rx_pbl_val(pdata); + fxgmac_config_rx_coalesce(pdata); + fxgmac_config_tx_coalesce(pdata); + fxgmac_config_rx_buffer_size(pdata); + fxgmac_config_tso_mode(pdata); + fxgmac_config_sph_mode(pdata); + fxgmac_config_rss(pdata); + fxgmac_config_wol(pdata, pdata->expansion.wol); + + desc_ops->tx_desc_init(pdata); + desc_ops->rx_desc_init(pdata); + fxgmac_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + fxgmac_config_mtl_mode(pdata); + fxgmac_config_queue_mapping(pdata); + fxgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); + fxgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); + fxgmac_config_tx_threshold(pdata, pdata->tx_threshold); + fxgmac_config_rx_threshold(pdata, pdata->rx_threshold); + fxgmac_config_tx_fifo_size(pdata); + fxgmac_config_rx_fifo_size(pdata); + fxgmac_config_flow_control_threshold(pdata); + fxgmac_config_rx_fep_disable(pdata); + fxgmac_config_rx_fup_enable(pdata); + fxgmac_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + fxgmac_config_mac_address(pdata); + fxgmac_config_crc_check(pdata); + fxgmac_config_rx_mode(pdata); + fxgmac_config_jumbo(pdata); + fxgmac_config_flow_control(pdata); + fxgmac_config_mac_speed(pdata); + fxgmac_config_checksum_offload(pdata); + fxgmac_config_vlan_support(pdata); + fxgmac_config_mmc(pdata); + fxgmac_enable_mac_interrupts(pdata); + + /* enable EPhy link change interrupt */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac hw init callout\n"); + } + return 0; +} + +static void fxgmac_save_nonstick_reg(struct fxgmac_pdata *pdata) +{ + u32 i; + for (i = REG_PCIE_TRIGGER; i < MSI_PBA_REG; i += 4) { + pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2] = + readreg(pdata->pAdapter, pdata->base_mem + i); + } +} + +static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) +{ + u32 i; + for (i = REG_PCIE_TRIGGER; i < MSI_PBA_REG; i += 4) { + writereg(pdata->pAdapter, + pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2], + pdata->base_mem + i); + } +} + +static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) +{ + cfg_w32(pdata, REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd); + cfg_w32(pdata, REG_CACHE_LINE_SIZE, + pdata->expansion.cfg_cache_line_size); + cfg_w32(pdata, REG_MEM_BASE, pdata->expansion.cfg_mem_base); + cfg_w32(pdata, REG_MEM_BASE_HI, pdata->expansion.cfg_mem_base_hi); + cfg_w32(pdata, REG_IO_BASE, pdata->expansion.cfg_io_base); + cfg_w32(pdata, REG_INT_LINE, pdata->expansion.cfg_int_line); + cfg_w32(pdata, REG_DEVICE_CTRL1, pdata->expansion.cfg_device_ctrl1); + cfg_w32(pdata, REG_PCI_LINK_CTRL, pdata->expansion.cfg_pci_link_ctrl); + cfg_w32(pdata, REG_DEVICE_CTRL2, pdata->expansion.cfg_device_ctrl2); + cfg_w32(pdata, REG_MSIX_CAPABILITY, + pdata->expansion.cfg_msix_capability); +} + +static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) +{ + u32 regval; + u32 value = 0; + + cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); + pdata->pcie_link_status = + FXGMAC_GET_REG_BITS(regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN); + if (PCI_LINK_CTRL_L1_STATUS == (pdata->pcie_link_status & 0x02)) { + regval = FXGMAC_SET_REG_BITS(regval, + PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN, 0); + cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); + } + + /* Issue a CHIP reset */ + regval = readreg(pdata->pAdapter, pdata->base_mem + SYS_RESET_REG); + DPRINTK("CHIP_RESET 0x%x\n", regval); + /* reg152c bit31 1->reset, self-clear, if read it again, it still set 1. */ + regval = FXGMAC_SET_REG_BITS(regval, SYS_RESET_POS, SYS_RESET_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + SYS_RESET_REG); + + usleep_range_ex(pdata->pAdapter, 9000, 10000); + + /* reg152c reset will reset trigger circuit and reload efuse patch 0x1004=0x16, need to release ephy reset again */ + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RELEASE); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 100, 150); + + fxgmac_restore_nonstick_reg( + pdata); /* reset will clear nonstick registers. */ + + return 0; +} + +static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, u8 *address, + unsigned int data) +{ + if (address < (u8 *)(pdata->base_mem)) { + return -1; + } + writereg(pdata->pAdapter, data, address); + return 0; +} + +static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, u8 *address) +{ + u32 regval = 0; + + if (address > (u8 *)(pdata->base_mem)) { + regval = readreg(pdata->pAdapter, address); + } + return regval; +} + +static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, + bool aspm_l1ss_en, bool aspm_l1_en, + bool aspm_l0s_en) +{ + u32 regval = 0; + u32 deviceid = 0; + + cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); + if (PCI_LINK_CTRL_L1_STATUS == (pdata->pcie_link_status & 0x02) && + 0x00 == FXGMAC_GET_REG_BITS(regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN)) { + regval = FXGMAC_SET_REG_BITS(regval, + PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN, + pdata->pcie_link_status); + cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); + } + + regval = FXGMAC_SET_REG_BITS(0, LTR_IDLE_ENTER_REQUIRE_POS, + LTR_IDLE_ENTER_REQUIRE_LEN, + LTR_IDLE_ENTER_REQUIRE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_SCALE_POS, + LTR_IDLE_ENTER_SCALE_LEN, + LTR_IDLE_ENTER_SCALE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_POS, + LTR_IDLE_ENTER_LEN, LTR_IDLE_ENTER_USVAL); + regval = (regval << 16) + regval; /* snoopy + non-snoopy */ + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_ENTER); + + regval = 0; + regval = FXGMAC_SET_REG_BITS(0, LTR_IDLE_EXIT_REQUIRE_POS, + LTR_IDLE_EXIT_REQUIRE_LEN, + LTR_IDLE_EXIT_REQUIRE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_SCALE_POS, + LTR_IDLE_EXIT_SCALE_LEN, + LTR_IDLE_EXIT_SCALE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_POS, + LTR_IDLE_EXIT_LEN, LTR_IDLE_EXIT_USVAL); + regval = (regval << 16) + regval; /* snoopy + non-snoopy */ + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_EXIT); + + regval = readreg(pdata->pAdapter, pdata->base_mem + LTR_CTRL); + if (ltr_en) { + regval = FXGMAC_SET_REG_BITS(regval, LTR_CTRL_EN_POS, + LTR_CTRL_EN_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, + LTR_CTRL_IDLE_THRE_TIMER_POS, + LTR_CTRL_IDLE_THRE_TIMER_LEN, + LTR_CTRL_IDLE_THRE_TIMER_VAL); + } else { + regval = FXGMAC_SET_REG_BITS(regval, LTR_CTRL_EN_POS, + LTR_CTRL_EN_LEN, 0); + } + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_CTRL); + + regval = readreg(pdata->pAdapter, pdata->base_mem + LPW_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_L0S_EN_POS, + LPW_CTRL_ASPM_L0S_EN_LEN, + aspm_l0s_en ? 1 : 0); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_L1_EN_POS, + LPW_CTRL_ASPM_L1_EN_LEN, + aspm_l1_en ? 1 : 0); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_L1SS_EN_POS, + LPW_CTRL_L1SS_EN_LEN, + aspm_l1ss_en ? 1 : 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + LPW_CTRL); + + cfg_r32(pdata, REG_ASPM_CONTROL, ®val); + regval = FXGMAC_SET_REG_BITS(regval, ASPM_L1_IDLE_THRESHOLD_POS, + ASPM_L1_IDLE_THRESHOLD_LEN, + ASPM_L1_IDLE_THRESHOLD_1US); + cfg_w32(pdata, REG_ASPM_CONTROL, regval); + + regval = 0; + regval = FXGMAC_SET_REG_BITS(regval, PCIE_SERDES_PLL_AUTOOFF_POS, + PCIE_SERDES_PLL_AUTOOFF_LEN, 1); + writereg(pdata->pAdapter, regval, + pdata->base_mem + REG_PCIE_SERDES_PLL); + + /*fuxi nto adjust sigdet threshold*/ + cfg_r8(pdata, REG_PCI_REVID, ®val); + cfg_r16(pdata, REG_PCI_DEVICE_ID, &deviceid); + if (FUXI_REV_01 == regval && PCI_DEVICE_ID_FUXI == deviceid) { + regval = + readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); + regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, + MGMT_SIGDET_LEN, MGMT_SIGDET_55MV); + writereg(pdata->pAdapter, regval, + pdata->base_mem + MGMT_SIGDET); + } + + return 0; +} + +static void fxgmac_trigger_pcie(struct fxgmac_pdata *pdata, u32 code) +{ + writereg(pdata->pAdapter, code, pdata->base_mem + REG_PCIE_TRIGGER); +} + +void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) +{ + hw_ops->init = fxgmac_hw_init; + hw_ops->exit = fxgmac_hw_exit; + hw_ops->save_nonstick_reg = fxgmac_save_nonstick_reg; + hw_ops->restore_nonstick_reg = fxgmac_restore_nonstick_reg; + hw_ops->esd_restore_pcie_cfg = fxgmac_esd_restore_pcie_cfg; + + hw_ops->set_gmac_register = fxgmac_set_gmac_register; + hw_ops->get_gmac_register = fxgmac_get_gmac_register; + + hw_ops->tx_complete = fxgmac_tx_complete; + hw_ops->enable_tx = fxgmac_enable_tx; + hw_ops->disable_tx = fxgmac_disable_tx; + hw_ops->enable_rx = fxgmac_enable_rx; + hw_ops->disable_rx = fxgmac_disable_rx; + hw_ops->enable_channel_rx = fxgmac_enable_channel_rx; + hw_ops->dev_xmit = fxgmac_dev_xmit; + hw_ops->dev_read = fxgmac_dev_read; + hw_ops->config_tso = fxgmac_config_tso_mode; + hw_ops->enable_int = fxgmac_enable_int; + hw_ops->disable_int = fxgmac_disable_int; + hw_ops->set_interrupt_moderation = fxgmac_set_interrupt_moderation; + hw_ops->enable_msix_rxtxinterrupt = fxgmac_enable_msix_rxtxinterrupt; + hw_ops->disable_msix_interrupt = fxgmac_disable_msix_interrupt; + hw_ops->enable_msix_rxtxphyinterrupt = + fxgmac_enable_msix_rxtxphyinterrupt; + hw_ops->enable_msix_one_interrupt = fxgmac_enable_msix_one_interrupt; + hw_ops->disable_msix_one_interrupt = fxgmac_disable_msix_one_interrupt; + hw_ops->enable_mgm_interrupt = fxgmac_enable_mgm_interrupt; + hw_ops->disable_mgm_interrupt = fxgmac_disable_mgm_interrupt; + + hw_ops->set_mac_address = fxgmac_set_mac_address; + hw_ops->set_mac_hash = fxgmac_add_mac_addresses; + hw_ops->config_rx_mode = fxgmac_config_rx_mode; + hw_ops->enable_rx_csum = fxgmac_enable_rx_csum; + hw_ops->disable_rx_csum = fxgmac_disable_rx_csum; + + /* For MII speed configuration */ + hw_ops->config_mac_speed = fxgmac_config_mac_speed; + hw_ops->get_xlgmii_phy_status = fxgmac_check_phy_link; + + /* For descriptor related operation */ + hw_ops->tx_desc_init = fxgmac_tx_desc_init; + hw_ops->rx_desc_init = fxgmac_rx_desc_init; + hw_ops->tx_desc_reset = fxgmac_tx_desc_reset; + hw_ops->rx_desc_reset = fxgmac_rx_desc_reset; + hw_ops->is_last_desc = fxgmac_is_last_desc; + hw_ops->is_context_desc = fxgmac_is_context_desc; + hw_ops->tx_start_xmit = fxgmac_tx_start_xmit; + hw_ops->set_pattern_data = fxgmac_set_pattern_data; + hw_ops->config_wol = fxgmac_config_wol; + hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; + hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; +#if FXGMAC_SANITY_CHECK_ENABLED + hw_ops->diag_sanity_check = fxgmac_diag_sanity_check; +#endif + + /* For Flow Control */ + hw_ops->config_tx_flow_control = fxgmac_config_tx_flow_control; + hw_ops->config_rx_flow_control = fxgmac_config_rx_flow_control; + + /*For Jumbo Frames*/ + hw_ops->enable_jumbo = fxgmac_config_jumbo; + + /* For Vlan related config */ + hw_ops->enable_tx_vlan = fxgmac_enable_tx_vlan; + hw_ops->disable_tx_vlan = fxgmac_disable_tx_vlan; + hw_ops->enable_rx_vlan_stripping = fxgmac_enable_rx_vlan_stripping; + hw_ops->disable_rx_vlan_stripping = fxgmac_disable_rx_vlan_stripping; + hw_ops->enable_rx_vlan_filtering = fxgmac_enable_rx_vlan_filtering; + hw_ops->disable_rx_vlan_filtering = fxgmac_disable_rx_vlan_filtering; + hw_ops->update_vlan_hash_table = fxgmac_update_vlan_hash_table; + + /* For RX coalescing */ + hw_ops->config_rx_coalesce = fxgmac_config_rx_coalesce; + hw_ops->config_tx_coalesce = fxgmac_config_tx_coalesce; + hw_ops->usec_to_riwt = fxgmac_usec_to_riwt; + hw_ops->riwt_to_usec = fxgmac_riwt_to_usec; + + /* For RX and TX threshold config */ + hw_ops->config_rx_threshold = fxgmac_config_rx_threshold; + hw_ops->config_tx_threshold = fxgmac_config_tx_threshold; + + /* For RX and TX Store and Forward Mode config */ + hw_ops->config_rsf_mode = fxgmac_config_rsf_mode; + hw_ops->config_tsf_mode = fxgmac_config_tsf_mode; + + /* For TX DMA Operating on Second Frame config */ + hw_ops->config_osp_mode = fxgmac_config_osp_mode; + + /* For RX and TX PBL config */ + hw_ops->config_rx_pbl_val = fxgmac_config_rx_pbl_val; + hw_ops->get_rx_pbl_val = fxgmac_get_rx_pbl_val; + hw_ops->config_tx_pbl_val = fxgmac_config_tx_pbl_val; + hw_ops->get_tx_pbl_val = fxgmac_get_tx_pbl_val; + hw_ops->config_pblx8 = fxgmac_config_pblx8; + + /* For MMC statistics support */ + hw_ops->tx_mmc_int = fxgmac_tx_mmc_int; + hw_ops->rx_mmc_int = fxgmac_rx_mmc_int; + hw_ops->read_mmc_stats = fxgmac_read_mmc_stats; + + /* For Receive Side Scaling */ + hw_ops->enable_rss = fxgmac_enable_rss; + hw_ops->disable_rss = fxgmac_disable_rss; + hw_ops->get_rss_options = fxgmac_read_rss_options; + hw_ops->set_rss_options = fxgmac_write_rss_options; + hw_ops->set_rss_hash_key = fxgmac_set_rss_hash_key; + hw_ops->set_rss_lookup_table = fxgmac_set_rss_lookup_table; + + /*For Offload*/ + hw_ops->set_arp_offload = fxgmac_update_aoe_ipv4addr; + hw_ops->enable_arp_offload = fxgmac_enable_arp_offload; + hw_ops->disable_arp_offload = fxgmac_disable_arp_offload; + + hw_ops->set_ns_offload = fxgmac_set_ns_offload; + hw_ops->enable_ns_offload = fxgmac_enable_ns_offload; + hw_ops->disable_ns_offload = fxgmac_disable_ns_offload; + + hw_ops->enable_wake_magic_pattern = fxgmac_enable_wake_magic_pattern; + hw_ops->disable_wake_magic_pattern = fxgmac_disable_wake_magic_pattern; + + hw_ops->enable_wake_link_change = fxgmac_enable_wake_link_change; + hw_ops->disable_wake_link_change = fxgmac_disable_wake_link_change; + + hw_ops->check_wake_pattern_fifo_pointer = + fxgmac_check_wake_pattern_fifo_pointer; + hw_ops->set_wake_pattern = fxgmac_set_wake_pattern; + hw_ops->enable_wake_pattern = fxgmac_enable_wake_pattern; + hw_ops->disable_wake_pattern = fxgmac_disable_wake_pattern; + hw_ops->set_wake_pattern_mask = fxgmac_set_wake_pattern_mask; +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN + hw_ops->enable_wake_packet_indication = + fxgmac_enable_wake_packet_indication; + hw_ops->get_wake_packet_indication = fxgmac_get_wake_packet_indication; +#endif + + /*For phy write /read*/ + hw_ops->reset_phy = fxgmac_reset_phy; + hw_ops->release_phy = fxgmac_release_phy; + hw_ops->get_ephy_state = fxgmac_get_ephy_state; + hw_ops->write_ephy_reg = fxgmac_write_ephy_reg; + hw_ops->read_ephy_reg = fxgmac_read_ephy_reg; + hw_ops->set_ephy_autoneg_advertise = fxgmac_set_ephy_autoneg_advertise; + hw_ops->phy_config = fxgmac_phy_config; + hw_ops->close_phy_led = fxgmac_close_phy_led; + hw_ops->led_under_active = fxmgac_config_led_under_active; + hw_ops->led_under_sleep = fxgmac_config_led_under_sleep; + hw_ops->led_under_shutdown = fxgmac_config_led_under_shutdown; + hw_ops->led_under_disable = fxgmac_config_led_under_disable; + hw_ops->enable_phy_check = fxgmac_enable_phy_check; + hw_ops->disable_phy_check = fxgmac_disable_phy_check; + hw_ops->setup_cable_loopback = fxgmac_setup_cable_loopback; + hw_ops->clean_cable_loopback = fxgmac_clean_cable_loopback; + hw_ops->disable_phy_sleep = fxgmac_disable_phy_sleep; + hw_ops->enable_phy_sleep = fxgmac_enable_phy_sleep; + hw_ops->phy_green_ethernet = fxgmac_phy_green_ethernet; + hw_ops->phy_eee_feature = fxgmac_phy_eee_feature; + + /* For power management */ + hw_ops->pre_power_down = fxgmac_pre_powerdown; + hw_ops->config_power_down = fxgmac_config_powerdown; + hw_ops->config_power_up = fxgmac_config_powerup; + hw_ops->set_suspend_int = fxgmac_suspend_int; + hw_ops->set_resume_int = fxgmac_resume_int; + hw_ops->set_suspend_txrx = fxgmac_suspend_txrx; + hw_ops->set_pwr_clock_gate = fxgmac_pwr_clock_gate; + hw_ops->set_pwr_clock_ungate = fxgmac_pwr_clock_ungate; + + hw_ops->set_all_multicast_mode = fxgmac_set_all_multicast_mode; + hw_ops->config_multicast_mac_hash_table = + fxgmac_config_multicast_mac_hash_table; + hw_ops->set_promiscuous_mode = fxgmac_set_promiscuous_mode; + hw_ops->enable_rx_broadcast = fxgmac_enable_rx_broadcast; + + /* efuse relevant operation. */ + hw_ops->read_patch_from_efuse = + fxgmac_read_patch_from_efuse; /* read patch per register. */ + hw_ops->read_patch_from_efuse_per_index = + fxgmac_read_patch_from_efuse_per_index; /* read patch per index. */ + hw_ops->write_patch_to_efuse = fxgmac_write_patch_to_efuse; + hw_ops->write_patch_to_efuse_per_index = + fxgmac_write_patch_to_efuse_per_index; + hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; + hw_ops->write_mac_subsys_to_efuse = fxgmac_write_mac_subsys_to_efuse; + hw_ops->efuse_load = fxgmac_efuse_load; + hw_ops->read_efuse_data = fxgmac_efuse_read_data; + hw_ops->write_oob = fxgmac_efuse_write_oob; + hw_ops->write_led = fxgmac_efuse_write_led; + hw_ops->write_led_config = fxgmac_write_led_setting_to_efuse; + hw_ops->read_led_config = fxgmac_read_led_setting_from_efuse; + + /* */ + hw_ops->pcie_init = fxgmac_pcie_init; + hw_ops->trigger_pcie = fxgmac_trigger_pcie; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c new file mode 100644 index 0000000000000000000000000000000000000000..b8734efb36426425e17f9ea7c687425d62ac410d --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c @@ -0,0 +1,2329 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include +#include +#include +#include + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +static int fxgmac_one_poll_rx(struct napi_struct *, int); +static int fxgmac_one_poll_tx(struct napi_struct *, int); +static int fxgmac_all_poll(struct napi_struct *, int); + +unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct in_ifaddr *ifa; + unsigned int ipval = + 0xc0a801ca; /* here just hard code to 192.168.1.202 */ + + rcu_read_lock(); + /* we only get the first IPv4 addr. */ + ifa = rcu_dereference(netdev->ip_ptr->ifa_list); + if (ifa) { + /* binary ipv4 addr with __be */ + ipval = (unsigned int)ifa->ifa_address; + + DPRINTK("%s, netdev %s IPv4 address %pI4, mask: %pI4\n", + __FUNCTION__, ifa->ifa_label, &ifa->ifa_address, + &ifa->ifa_mask); + } + rcu_read_unlock(); + + return ipval; +} + +unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, + unsigned char *ipval, + unsigned char *ip6addr_solicited, + unsigned int ifa_flag) +{ + struct net_device *netdev = pdata->netdev; + struct inet6_dev *i6dev; + struct inet6_ifaddr *ifp; + unsigned char local_ipval[16] = { 0 }; + unsigned char solicited_ipval[16] = { 0 }; + struct in6_addr *addr_ip6 = (struct in6_addr *)local_ipval; + struct in6_addr *addr_ip6_solicited = + (struct in6_addr *)solicited_ipval; + int err = -EADDRNOTAVAIL; + unsigned char *ret; + + if (ipval) { + addr_ip6 = (struct in6_addr *)ipval; + } + + if (ip6addr_solicited) { + addr_ip6_solicited = (struct in6_addr *)ip6addr_solicited; + } + + in6_pton("fe80::4808:8ffb:d93e:d753", -1, (u8 *)addr_ip6, -1, + NULL); /* here just hard code for default */ + + if (ifa_flag & FXGMAC_NS_IFA_GLOBAL_UNICAST) + DPRINTK("%s FXGMAC_NS_IFA_GLOBAL_UNICAST is set, %x\n", + __FUNCTION__, ifa_flag); + + if (ifa_flag & FXGMAC_NS_IFA_LOCAL_LINK) + DPRINTK("%s FXGMAC_NS_IFA_LOCAL_LINK is set, %x\n", + __FUNCTION__, ifa_flag); + + rcu_read_lock(); + i6dev = __in6_dev_get(netdev); + if (i6dev != NULL) { + read_lock_bh(&i6dev->lock); + list_for_each_entry(ifp, &i6dev->addr_list, if_list) { + /* here we need only the ll addr, use scope to filter out it. */ + if (((ifa_flag & FXGMAC_NS_IFA_GLOBAL_UNICAST) && (ifp->scope != IFA_LINK)) || ((ifa_flag & FXGMAC_NS_IFA_LOCAL_LINK) && (ifp->scope == IFA_LINK)/* && + !(ifp->flags & IFA_F_TENTATIVE)*/)) { + memcpy(addr_ip6, &ifp->addr, 16); + addrconf_addr_solict_mult(addr_ip6, + addr_ip6_solicited); + err = 0; + + break; + } + } + read_unlock_bh(&i6dev->lock); + } + rcu_read_unlock(); + + if (err) + DPRINTK("%s get ipv6 addr failed, use default.\n", + __FUNCTION__); + + ret = (err ? NULL : ipval); + + return ret; +} + +inline unsigned int fxgmac_tx_avail_desc(struct fxgmac_ring *ring) +{ + unsigned int avail; + + if (ring->dirty > ring->cur) + avail = ring->dirty - ring->cur; + else + avail = ring->dma_desc_count - ring->cur + ring->dirty; + + return avail; +} + +inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring) +{ + unsigned int dirty; + + if (ring->dirty <= ring->cur) + dirty = ring->cur - ring->dirty; + else + dirty = ring->dma_desc_count - ring->dirty + ring->cur; + + return dirty; +} + +static int fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, + struct fxgmac_ring *ring, + unsigned int count) +{ + struct fxgmac_pdata *pdata = channel->pdata; + + if (count > fxgmac_tx_avail_desc(ring)) { + netif_info( + pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_ops.tx_start_xmit(channel, ring); + if (netif_msg_tx_done(pdata)) + DPRINTK("about stop tx q, ret BUSY\n"); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static void fxgmac_prep_vlan(struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ + if (skb_vlan_tag_present(skb)) + pkt_info->vlan_ctag = skb_vlan_tag_get(skb); +} + +static int fxgmac_prep_tso(struct fxgmac_pdata *pdata, struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ + int ret; + + if (!FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + pkt_info->tcp_header_len = tcp_hdrlen(skb); + pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; + pkt_info->mss = skb_shinfo(skb)->gso_size; + + if (netif_msg_tx_done(pdata)) { + DPRINTK("header_len=%u\n", pkt_info->header_len); + DPRINTK("tcp_header_len=%u, tcp_payload_len=%u\n", + pkt_info->tcp_header_len, pkt_info->tcp_payload_len); + DPRINTK("mss=%u\n", pkt_info->mss); + } + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; + pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; + + return 0; +} + +static int fxgmac_is_tso(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + return 1; +} + +static void fxgmac_prep_tx_pkt(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + skb_frag_t *frag; +#else + struct skb_frag_struct *frag; +#endif + unsigned int context_desc; + unsigned int len; + unsigned int i; + + pkt_info->skb = skb; + + context_desc = 0; + pkt_info->desc_count = 0; + + pkt_info->tx_packets = 1; + pkt_info->tx_bytes = skb->len; + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt callin, pkt desc cnt=%d, skb len=%d, skbheadlen=%d\n", + pkt_info->desc_count, skb->len, skb_headlen(skb)); + + if (fxgmac_is_tso(skb)) { + /* TSO requires an extra descriptor if mss is different */ + if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { + context_desc = 1; + pkt_info->desc_count++; + } + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_is_tso=%d, ip_summed=%d, skb gso=%d\n", + ((skb->ip_summed == CHECKSUM_PARTIAL) && + (skb_is_gso(skb))) ? + 1 : + 0, + skb->ip_summed, skb_is_gso(skb) ? 1 : 0); + + /* TSO requires an extra descriptor for TSO header */ + pkt_info->desc_count++; + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt, tso, pkt desc cnt=%d\n", + pkt_info->desc_count); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); + + if (skb_vlan_tag_present(skb)) { + /* VLAN requires an extra descriptor if tag is different */ + if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) + /* We can share with the TSO context descriptor */ + if (!context_desc) { + context_desc = 1; + pkt_info->desc_count++; + } + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt, VLAN, pkt desc cnt=%d, vlan=0x%04x\n", + pkt_info->desc_count, skb_vlan_tag_get(skb)); + } + + for (len = skb_headlen(skb); len;) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, FXGMAC_TX_MAX_BUF_SIZE); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + for (len = skb_frag_size(frag); len;) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, FXGMAC_TX_MAX_BUF_SIZE); + } + } + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt callout, pkt desc cnt=%d, skb len=%d, skbheadlen=%d, frags=%d\n", + pkt_info->desc_count, skb->len, skb_headlen(skb), + skb_shinfo(skb)->nr_frags); +} + +static int fxgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) +{ + unsigned int rx_buf_size; + + if (mtu > FXGMAC_JUMBO_PACKET_MTU) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } + + rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + rx_buf_size = + clamp_val(rx_buf_size, FXGMAC_RX_MIN_BUF_SIZE, + PAGE_SIZE * 4 /* follow yonggang's suggestion */); + + rx_buf_size = (rx_buf_size + FXGMAC_RX_BUF_ALIGN - 1) & + ~(FXGMAC_RX_BUF_ALIGN - 1); + + return rx_buf_size; +} + +static void fxgmac_enable_rx_tx_ints(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_channel *channel; + enum fxgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_ops->enable_int(channel, int_id); + } +} + +static void fxgmac_phy_process(struct fxgmac_pdata *pdata) +{ + int cur_link = 0; + int regval = 0; + int cur_speed = 0; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + regval = hw_ops->get_ephy_state(pdata); + + /* We should make sure that PHY is done with the reset */ + if (regval & MGMT_EPHY_CTRL_STA_EPHY_RESET) { + pdata->expansion.phy_link = false; + return; + } + + cur_link = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); + if (pdata->expansion.phy_link != cur_link) { + pdata->expansion.phy_link = cur_link; + if (pdata->expansion.phy_link) { + cur_speed = FXGMAC_GET_REG_BITS( + regval, MGMT_EPHY_CTRL_STA_SPEED_POS, + MGMT_EPHY_CTRL_STA_SPEED_LEN); + pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : + (cur_speed == 1) ? SPEED_100 : + SPEED_10; + pdata->phy_duplex = FXGMAC_GET_REG_BITS( + regval, MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, + MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); + hw_ops->config_mac_speed(pdata); + + hw_ops->enable_rx(pdata); + hw_ops->enable_tx(pdata); + netif_carrier_on(pdata->netdev); + if (netif_running(pdata->netdev)) { + netif_tx_wake_all_queues(pdata->netdev); + DPRINTK("%s now is link up, mac_speed=%d.\n", + FXGMAC_DRV_NAME, pdata->phy_speed); + } + } else { + netif_carrier_off(pdata->netdev); + netif_tx_stop_all_queues(pdata->netdev); + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_duplex = DUPLEX_UNKNOWN; + hw_ops->disable_rx(pdata); + hw_ops->disable_tx(pdata); + DPRINTK("%s now is link down\n", FXGMAC_DRV_NAME); + } + } +} + +static int fxgmac_phy_poll(struct napi_struct *napi, int budget) +{ + struct fxgmac_pdata *pdata = + container_of(napi, struct fxgmac_pdata, expansion.napi_phy); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + fxgmac_phy_process(pdata); + if (napi_complete_done(napi, 0)) + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); + + return 0; +} + +static irqreturn_t fxgmac_phy_isr(int irq, void *data) +{ + struct fxgmac_pdata *pdata = data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + if (!(regval & MGMT_INT_CTRL0_INT_STATUS_PHY)) + return IRQ_HANDLED; + + hw_ops->disable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + if (napi_schedule_prep(&pdata->expansion.napi_phy)) { + __napi_schedule_irqoff(&pdata->expansion.napi_phy); + } + + return IRQ_HANDLED; +} + +static irqreturn_t fxgmac_isr(int irq, void *data) +{ + unsigned int dma_isr, dma_ch_isr, mac_isr; + struct fxgmac_pdata *pdata = data; + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int i, ti, ri; + u32 val; + + dma_isr = readreg(pdata->pAdapter, pdata->mac_regs + DMA_ISR); + + val = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK)) + return IRQ_HANDLED; + + hw_ops->disable_mgm_interrupt(pdata); + pdata->expansion.mgm_intctrl_val = val; + + pdata->stats.mgmt_int_isr++; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel_head + i; + + dma_ch_isr = readl(FXGMAC_DMA_REG(channel, DMA_CH_SR)); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", + i, dma_ch_isr); + + /* The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. + */ + ti = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN); + ri = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN); + if (!pdata->per_channel_irq && (ti || ri)) { + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); + } + } + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, + DMA_CH_SR_TPS_LEN)) + pdata->stats.tx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, + DMA_CH_SR_RPS_LEN)) + pdata->stats.rx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, + DMA_CH_SR_TBU_LEN)) + pdata->stats.tx_buffer_unavailable++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, + DMA_CH_SR_RBU_LEN)) + pdata->stats.rx_buffer_unavailable++; + + /* Restart the device on a Fatal Bus Error */ + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, + DMA_CH_SR_FBE_LEN)) { + pdata->stats.fatal_bus_error++; + schedule_work(&pdata->expansion.restart_work); + } + + /* Clear all interrupt signals */ + writel(dma_ch_isr, FXGMAC_DMA_REG(channel, DMA_CH_SR)); + } + + if (FXGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, + DMA_ISR_MACIS_LEN)) { + mac_isr = readl(pdata->mac_regs + MAC_ISR); + + if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, + MAC_ISR_MMCTXIS_LEN)) + hw_ops->tx_mmc_int(pdata); + + if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, + MAC_ISR_MMCRXIS_LEN)) + hw_ops->rx_mmc_int(pdata); + + /* Clear all interrupt signals */ + writel(mac_isr, (pdata->mac_regs + MAC_ISR)); + } + + if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, &val); + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t fxgmac_dma_isr(int irq, void *data) +{ + struct fxgmac_channel *channel = data; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval; + int message_id; + + if (irq == channel->expansion.dma_irq_tx) { + message_id = MSI_ID_TXQ0; + hw_ops->disable_msix_one_interrupt(pdata, message_id); + regval = 0; + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + if (napi_schedule_prep(&channel->expansion.napi_tx)) { + __napi_schedule_irqoff(&channel->expansion.napi_tx); + } + } else { + message_id = channel->queue_index; + hw_ops->disable_msix_one_interrupt(pdata, message_id); + regval = 0; + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + if (napi_schedule_prep(&channel->expansion.napi_rx)) { + __napi_schedule_irqoff(&channel->expansion.napi_rx); + } + } + + return IRQ_HANDLED; +} + +#if FXGMAC_TX_HANG_TIMER_EN +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +static void fxgmac_tx_hang_timer_handler(struct timer_list *t) +#else +static void fxgmac_tx_hang_timer_handler(unsigned long data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + struct fxgmac_channel *channel = + from_timer(channel, t, expansion.tx_hang_timer); +#else + struct fxgmac_channel *channel = (struct fxgmac_channel *)data; +#endif + +#if FXGMAC_TX_HANG_CHECH_DIRTY + struct fxgmac_ring *ring = channel->tx_ring; +#endif + struct fxgmac_pdata *pdata = channel->pdata; + struct net_device *netdev = pdata->netdev; + unsigned int hw_reg_cur; + unsigned int regval; + +#if FXGMAC_TX_HANG_CHECH_DIRTY + hw_reg_cur = ring->dirty; +#else + hw_reg_cur = readl( + FXGMAC_DMA_REG(channel, 0x44 /* tx desc curr pointer reg */)); +#endif + if (hw_reg_cur == channel->expansion.tx_hang_hw_cur) { + /* hw current desc still stucked */ + if (!pdata->tx_hang_restart_queuing) { + pdata->tx_hang_restart_queuing = 1; + DPRINTK("tx_hang_timer_handler: restart scheduled, at desc %u, queuing=%u.\n", + channel->expansion.tx_hang_hw_cur, + pdata->tx_hang_restart_queuing); + + netif_tx_stop_all_queues(netdev); + + /* Disable MAC Rx */ + regval = readl(pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, + MAC_CR_CST_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, + MAC_CR_ACS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_CR); + + schedule_work(&pdata->expansion.restart_work); + } + } + + channel->expansion.tx_hang_timer_active = 0; +} + +static void fxgmac_tx_hang_timer_start(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + + /* Start the Tx hang timer */ + if (1 && !channel->expansion.tx_hang_timer_active) { + channel->expansion.tx_hang_timer_active = 1; + + /* FXGMAC_INIT_DMA_TX_USECS is desc3 polling period, we give 2 more checking period */ + mod_timer(&channel->expansion.tx_hang_timer, + jiffies + usecs_to_jiffies(FXGMAC_INIT_DMA_TX_USECS * + 10)); + } +} +#endif + +static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight( + pdata->netdev, + &channel->expansion.napi_rx, + fxgmac_one_poll_rx, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &channel->expansion.napi_rx, + fxgmac_one_poll_rx, + NAPI_POLL_WEIGHT); +#endif + } + napi_enable(&channel->expansion.napi_rx); + + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight( + pdata->netdev, + &channel->expansion.napi_tx, + fxgmac_one_poll_tx, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &channel->expansion.napi_tx, + fxgmac_one_poll_tx, + NAPI_POLL_WEIGHT); +#endif + napi_enable(&channel->expansion.napi_tx); + } + if (netif_msg_drv(pdata)) + DPRINTK("napi_enable, msix ch%d napi enabled done, add=%d\n", + i, add); + } + + /* for phy */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi_phy, + fxgmac_phy_poll, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, &pdata->expansion.napi_phy, + fxgmac_phy_poll, NAPI_POLL_WEIGHT); +#endif + napi_enable(&pdata->expansion.napi_phy); + } else { + i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); + if (!i) { + if (add) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(pdata->netdev, + &pdata->expansion.napi, + fxgmac_all_poll, + NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &pdata->expansion.napi, + fxgmac_all_poll, + NAPI_POLL_WEIGHT); +#endif + } + + napi_enable(&pdata->expansion.napi); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 1); + } + } +} + +static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->expansion.napi_rx); + + if (del) { + netif_napi_del( + &channel->expansion.napi_rx); + } + + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + napi_disable( + &channel->expansion.napi_tx); + netif_napi_del( + &channel->expansion.napi_tx); + } + if (netif_msg_drv(pdata)) + DPRINTK("napi_disable, msix ch%d napi disabled done, del=%d\n", + i, del); + } + + napi_disable(&pdata->expansion.napi_phy); + netif_napi_del(&pdata->expansion.napi_phy); + } + } else { + i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); + if (i) { + napi_disable(&pdata->expansion.napi); + + if (del) + netif_napi_del(&pdata->expansion.napi); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 0); + } + } +} + +static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_channel *channel; + unsigned int i; + int ret; + u32 msi, msix, need_free; + + msi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSI_POS, FXGMAC_FLAG_MSI_LEN); + + msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); + + need_free = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); + + if (!msix) { + if (!need_free) { + ret = devm_request_irq(pdata->dev, pdata->dev_irq, + fxgmac_isr, + msi ? 0 : IRQF_SHARED, + netdev->name, pdata); + if (ret) { + netdev_alert( + netdev, + "error requesting irq %d, ret = %d\n", + pdata->dev_irq, ret); + return ret; + } + + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 1); + } + } + + if (!pdata->per_channel_irq) + return 0; + + ret = devm_request_irq(pdata->dev, pdata->expansion.phy_irq, + fxgmac_phy_isr, 0, netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting phy irq %d, ret = %d\n", + pdata->expansion.phy_irq, ret); + return ret; + } + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->expansion.dma_irq_name, + sizeof(channel->expansion.dma_irq_name) - 1, + "%s-ch%d-Rx-%u", netdev_name(netdev), i, + channel->queue_index); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + snprintf(channel->expansion.dma_irq_name_tx, + sizeof(channel->expansion.dma_irq_name_tx) - 1, + "%s-ch%d-Tx-%u", netdev_name(netdev), i, + channel->queue_index); + + ret = devm_request_irq( + pdata->dev, channel->expansion.dma_irq_tx, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name_tx, channel); + + if (ret) { + DPRINTK("fxgmac_req_irqs, err with MSIx irq request for ch %d tx, ret=%d\n", + i, ret); + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + return ret; + } + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_req_irqs, MSIx irq_tx request ok, ch=%d, irq=%d,%s\n", + i, channel->expansion.dma_irq_tx, + channel->expansion.dma_irq_name_tx); + } + ret = devm_request_irq(pdata->dev, channel->dma_irq, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name, + channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_req_irqs, MSIx irq request ok, total=%d,%d~%d\n", + i, (pdata->channel_head)[0].dma_irq, + (pdata->channel_head)[i - 1].dma_irq); + return 0; + +err_irq: + DPRINTK("fxgmac_req_irqs, err with MSIx irq request at %d, ret=%d\n", i, + ret); + + if (pdata->per_channel_irq) { + for (i--, channel--; i < pdata->channel_count; i--, channel--) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + } + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + } + return ret; +} + +static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i = 0; + u32 need_free, msix; + + msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); + + need_free = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); + + if (!msix) { + if (need_free) { + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 0); + } + } + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_free_irqs, MSIx irq_tx clear done, ch=%d\n", + i); + } + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_free_irqs, MSIx rx irq clear done, total=%d\n", + i); +} + +void fxgmac_free_tx_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } + } +} + +void fxgmac_free_rx_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } + } +} + +/* + * since kernel does not clear the MSI mask bits and + * this function clear MSI mask bits when MSI is enabled. + */ +static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) +{ + u16 pcie_cap_offset; + u32 pcie_msi_mask_bits; + int ret = 0; + + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSI); + if (pcie_cap_offset) { + ret = pci_read_config_dword(pdev, pcie_cap_offset, + &pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR + "read pci config space MSI cap. failed, %d\n", + ret); + ret = -EFAULT; + } + } + + pcie_msi_mask_bits = FXGMAC_SET_REG_BITS(pcie_msi_mask_bits, + PCI_CAP_ID_MSI_ENABLE_POS, + PCI_CAP_ID_MSI_ENABLE_LEN, 0); + ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR "write pci config space MSI mask failed, %d\n", + ret); + ret = -EFAULT; + } + + return ret; +} + +static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) +{ + u16 pcie_cap_offset; + u32 pcie_msi_mask_bits; + int ret = 0; + + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pcie_cap_offset) { + ret = pci_read_config_dword(pdev, pcie_cap_offset, + &pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR + "read pci config space MSIX cap. failed, %d\n", + ret); + ret = -EFAULT; + } + } + + pcie_msi_mask_bits = FXGMAC_SET_REG_BITS(pcie_msi_mask_bits, + PCI_CAP_ID_MSIX_ENABLE_POS, + PCI_CAP_ID_MSIX_ENABLE_LEN, 0); + ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR "write pci config space MSIX mask failed, %d\n", + ret); + ret = -EFAULT; + } + + return ret; +} + +int fxgmac_start(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + int ret; + unsigned int pcie_low_power = 0; + u32 regval; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac start callin here.\n"); + + /* must reset software again here, to avoid flushing tx queue error caused by the system only run probe + * when installing driver on the arm platform. + */ + hw_ops->exit(pdata); + + if (FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_POS, + FXGMAC_FLAG_LEGACY_LEN)) { + /* + * we should disable msi and msix here when we use legacy interrupt, for two reasons: + * 1. Exit will restore msi and msix config regisiter, that may enable them. + * 2. When the driver that uses the msix interrupt by default is compiled + * into the OS, uninstall the driver through rmmod, and then install the + * driver that uses the legacy interrupt, at which time the msix enable + * will be turned on again by default after waking up from S4 on some platform. + * such as UOS platform. + */ + ret = fxgmac_disable_pci_msi_config(pdata->pdev); + ret |= fxgmac_disable_pci_msix_config(pdata->pdev); + if (ret) + return ret; + } + + hw_ops->reset_phy(pdata); + hw_ops->release_phy(pdata); + hw_ops->pcie_init(pdata, pcie_low_power & PCIE_LP_ASPM_LTR, + pcie_low_power & PCIE_LP_ASPM_L1SS, + pcie_low_power & PCIE_LP_ASPM_L1, + pcie_low_power & PCIE_LP_ASPM_L0S); + hw_ops->config_power_up(pdata); + + fxgmac_dismiss_all_int(pdata); + + ret = hw_ops->init(pdata); + if (ret) { + printk("fxgmac hw init error.\n"); + return ret; + } + fxgmac_napi_enable(pdata, 1); + + ret = fxgmac_request_irqs(pdata); + if (ret) + goto err_napi; + + hw_ops->enable_tx(pdata); + hw_ops->enable_rx(pdata); + + /* config interrupt to level signal */ + regval = (u32)readl((const volatile void *)(pdata->mac_regs + DMA_MR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, + 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, + DMA_MR_QUREAD_LEN, 1); + writel(regval, pdata->mac_regs + DMA_MR); + + writel(0xF0000000, + (volatile void *)(netdev->base_addr + MGMT_INT_CTRL0)); + + hw_ops->set_interrupt_moderation(pdata); + + if (pdata->per_channel_irq) + hw_ops->enable_msix_rxtxphyinterrupt(pdata); + + fxgmac_enable_rx_tx_ints(pdata); + + hw_ops->led_under_active(pdata); + + return 0; + +err_napi: + fxgmac_napi_disable(pdata, 1); + hw_ops->exit(pdata); + DPRINTK("fxgmac start callout with irq err.\n"); + return ret; +} + +void fxgmac_stop(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + struct fxgmac_channel *channel; + struct netdev_queue *txq; + unsigned int i; + + if (pdata->per_channel_irq) { + hw_ops->disable_msix_interrupt(pdata); + } else { + hw_ops->disable_mgm_interrupt(pdata); + } + + pdata->expansion.phy_link = false; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + hw_ops->disable_tx(pdata); + hw_ops->disable_rx(pdata); + fxgmac_free_irqs(pdata); + fxgmac_napi_disable(pdata, 1); + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + } + + switch (pdata->expansion.current_state) { + case CURRENT_STATE_SUSPEND: + hw_ops->led_under_sleep(pdata); + break; + case CURRENT_STATE_SHUTDOWN: + case CURRENT_STATE_RESTART: + hw_ops->led_under_shutdown(pdata); + break; + case CURRENT_STATE_CLOSE: + break; + default: + break; + } +} + +void fxgmac_restart_dev(struct fxgmac_pdata *pdata) +{ + int ret; + + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + pdata->expansion.current_state = CURRENT_STATE_RESTART; + fxgmac_stop(pdata); + + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + + ret = fxgmac_start(pdata); + if (ret) { + printk("fxgmac_restart_dev: fxgmac_start failed.\n"); + } +} + +static void fxgmac_restart(struct work_struct *work) +{ + struct fxgmac_pdata *pdata = + container_of(work, struct fxgmac_pdata, expansion.restart_work); + + rtnl_lock(); + + fxgmac_restart_dev(pdata); + + rtnl_unlock(); +} + +void fxgmac_net_powerup(struct fxgmac_pdata *pdata) +{ + int ret; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerup callin\n"); + + /* signal that we are up now */ + pdata->expansion.powerstate = 0; /* clear all bits as normal now */ + if (__test_and_set_bit(FXGMAC_POWER_STATE_UP, + &pdata->expansion.powerstate)) { + return; /* do nothing if already up */ + } + + ret = fxgmac_start(pdata); + if (ret) { + printk("fxgmac_net_powerup: fxgmac_start error\n"); + return; + } + + /* must call it after fxgmac_start, because it will be enable in fxgmac_start */ + hw_ops->disable_arp_offload(pdata); + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_net_powerup callout, powerstate=%ld.\n", + pdata->expansion.powerstate); + } +} + +void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown callin here.\n"); + + /* signal that we are down to the interrupt handler */ + if (__test_and_set_bit(FXGMAC_POWER_STATE_DOWN, + &pdata->expansion.powerstate)) + return; /* do nothing if already down */ + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown continue with down process.\n"); + /* phy polling timer should detect the state of fxgmac and stop link status polling accordingly */ + + __clear_bit(FXGMAC_POWER_STATE_UP, &pdata->expansion.powerstate); + + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* Disable Rx */ + hw_ops->disable_rx(pdata); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + synchronize_rcu(); + + fxgmac_stop(pdata); /* some works are redundent in this call */ + + /* must call it after software reset */ + hw_ops->pre_power_down(pdata, false); + + /* set mac to lowpower mode and enable wol accordingly */ + hw_ops->config_power_down(pdata, wol); + + /* handle vfs if it is envolved */ + + /* similar work as in restart() for that, we do need a resume laterly */ + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown callout, powerstate=%ld.\n", + pdata->expansion.powerstate); +} + +static int fxgmac_open(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops; + int ret; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_open callin\n"); + + desc_ops = &pdata->desc_ops; + + /* TODO: Initialize the phy */ + + /* Calculate the Rx buffer size before allocating rings */ + ret = fxgmac_calc_rx_buf_size(netdev, netdev->mtu); + if (ret < 0) + return ret; + pdata->rx_buf_size = ret; + + /* Allocate the channels and rings */ + ret = desc_ops->alloc_channles_and_rings(pdata); + if (ret) + return ret; + + INIT_WORK(&pdata->expansion.restart_work, fxgmac_restart); + + ret = fxgmac_start(pdata); + if (ret) + goto err_channels_and_rings; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_open callout\n"); + + return 0; + +err_channels_and_rings: + desc_ops->free_channels_and_rings(pdata); + DPRINTK("fxgmac_open callout with channel alloc err\n"); + return ret; +} + +static int fxgmac_close(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_close callin\n"); + + desc_ops = &pdata->desc_ops; + + pdata->expansion.current_state = + (pdata->expansion.current_state == CURRENT_STATE_SHUTDOWN) ? + pdata->expansion.current_state : + CURRENT_STATE_CLOSE; + + /* Stop the device */ + fxgmac_stop(pdata); + + /* Free the channels and rings */ + desc_ops->free_channels_and_rings(pdata); + + pdata->hw_ops.reset_phy(pdata); + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_close callout\n"); + + return 0; +} + +#if ((LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))) +static void fxgmac_tx_timeout(struct net_device *netdev) +#else +static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + netdev_warn(netdev, "tx timeout, device restarting\n"); +#if FXGMAC_TX_HANG_TIMER_EN + if (!pdata->tx_hang_restart_queuing) + schedule_work(&pdata->expansion.restart_work); +#else + schedule_work(&pdata->expansion.restart_work); +#endif +} + +static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_pkt_info *tx_pkt_info; + struct fxgmac_desc_ops *desc_ops; + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + struct fxgmac_ring *ring; + int ret; + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + if (netif_msg_tx_done(pdata)) + DPRINTK("xmit callin, skb->len=%d, q=%d\n", skb->len, + skb->queue_mapping); + + channel = pdata->channel_head + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + ring = channel->tx_ring; + tx_pkt_info = &ring->pkt_info; + + if (skb->len == 0) { + netif_err(pdata, tx_err, netdev, + "empty skb received from stack\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Prepare preliminary packet info for TX */ + memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); + fxgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); + + /* Check that there are enough descriptors available */ + ret = fxgmac_maybe_stop_tx_queue(channel, ring, + tx_pkt_info->desc_count); + if (ret) { + return ret; + } + + ret = fxgmac_prep_tso(pdata, skb, tx_pkt_info); + if (ret) { + netif_err(pdata, tx_err, netdev, + "error processing TSO packet\n"); + DPRINTK("dev_xmit, tx err for TSO\n"); + dev_kfree_skb_any(skb); + return ret; + } + fxgmac_prep_vlan(skb, tx_pkt_info); + + if (!desc_ops->map_tx_skb(channel, skb)) { + dev_kfree_skb_any(skb); + DPRINTK("xmit, map tx skb err\n"); + return NETDEV_TX_OK; + } + + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); + if (netif_msg_tx_done(pdata)) + DPRINTK("xmit, before hw_xmit, byte len=%d\n", + tx_pkt_info->tx_bytes); + + /* Configure required descriptor fields for transmission */ + hw_ops->dev_xmit(channel); +#if FXGMAC_DUMMY_TX_DEBUG + DPRINTK("tx hw_ops->dev_xmit ok\n"); +#endif + if (netif_msg_pktdata(pdata)) + fxgmac_dbg_pkt(netdev, skb, true); + + /* Stop the queue in advance if there may not be enough descriptors */ + fxgmac_maybe_stop_tx_queue(channel, ring, FXGMAC_TX_MAX_DESC_NR); + + return NETDEV_TX_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +static void fxgmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +#else +static struct rtnl_link_stats64 *fxgmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_stats *pstats = &pdata->stats; + +#if FXGMAC_PM_FEATURE_ENABLED + /* 20210709 for net power down */ + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + pdata->hw_ops.read_mmc_stats(pdata); + } + s->rx_packets = pstats->rxframecount_gb; + s->rx_bytes = pstats->rxoctetcount_gb; + s->rx_errors = pstats->rxframecount_gb - pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - pstats->rxunicastframes_g; + s->multicast = pstats->rxmulticastframes_g; + s->rx_length_errors = pstats->rxlengtherror; + s->rx_crc_errors = pstats->rxcrcerror; + s->rx_fifo_errors = pstats->rxfifooverflow; + + s->tx_packets = pstats->txframecount_gb; + s->tx_bytes = pstats->txoctetcount_gb; + s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; + s->tx_dropped = netdev->stats.tx_dropped; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + return s; +#endif +} + +static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) + eth_hw_addr_set(netdev, saddr->sa_data); +#else + memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); +#endif + memcpy(pdata->mac_addr, saddr->sa_data, netdev->addr_len); + + hw_ops->set_mac_address(pdata, saddr->sa_data); + hw_ops->set_mac_hash(pdata); + + DPRINTK("fxgmac, set mac addr to %02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + return 0; +} + +/* cmd = [0x89F0, 0x89FF] */ +static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct file f; + int ret = FXGMAC_SUCCESS; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -ENODEV; + + f.private_data = pdata; + + switch (cmd) { + case FXGMAC_DEV_CMD: + ret = fxgmac_dbg_netdev_ops_ioctl( + &f, FXGMAC_IOCTL_DFS_COMMAND, + (unsigned long)(ifr->ifr_data)); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) +static int fxgmac_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + return fxgmac_ioctl(dev, ifr, cmd); +} +#endif + +static int fxgmac_change_mtu(struct net_device *netdev, int mtu) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + int ret; +#ifdef FXGMAC_DEBUG + int old_mtu = netdev->mtu; +#endif + + fxgmac_stop(pdata); + fxgmac_free_tx_data(pdata); + + /* We must unmap rx desc's dma before we change rx_buf_size. */ + /* Becaues the size of the unmapped DMA is set according to rx_buf_size */ + fxgmac_free_rx_data(pdata); + + pdata->jumbo = mtu > ETH_DATA_LEN ? 1 : 0; + + ret = fxgmac_calc_rx_buf_size(netdev, mtu); + if (ret < 0) + return ret; + + pdata->rx_buf_size = ret; + netdev->mtu = mtu; + + if (netif_running(netdev)) + fxgmac_start(pdata); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + DPRINTK("fxgmac, set MTU from %d to %d. min, max=(%d,%d)\n", old_mtu, + netdev->mtu, netdev->min_mtu, netdev->max_mtu); +#else + DPRINTK("fxgmac, set MTU from %d to %d.\n", old_mtu, netdev->mtu); +#endif + + return 0; +} + +static int fxgmac_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + set_bit(vid, pdata->active_vlans); +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + pdata->vlan = vid; + hw_ops->enable_rx_vlan_filtering(pdata); +#else + hw_ops->update_vlan_hash_table(pdata); +#endif + DPRINTK("fxgmac, add rx vlan %d\n", vid); + return 0; +} + +static int fxgmac_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + clear_bit(vid, pdata->active_vlans); +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + pdata->vlan = 0; + hw_ops->disable_rx_vlan_filtering(pdata); +#else + hw_ops->update_vlan_hash_table(pdata); +#endif + + DPRINTK("fxgmac, del rx vlan %d\n", vid); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fxgmac_poll_controller(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) + fxgmac_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + fxgmac_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int fxgmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter, tso; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret = 0; + + rxhash = pdata->expansion.netdev_features & NETIF_F_RXHASH; + rxcsum = pdata->expansion.netdev_features & NETIF_F_RXCSUM; + rxvlan = pdata->expansion.netdev_features & NETIF_F_HW_VLAN_CTAG_RX; + rxvlan_filter = pdata->expansion.netdev_features & + NETIF_F_HW_VLAN_CTAG_FILTER; + tso = pdata->expansion.netdev_features & (NETIF_F_TSO | NETIF_F_TSO6); + + if ((features & (NETIF_F_TSO | NETIF_F_TSO6)) && !tso) { + printk("enable tso.\n"); + pdata->hw_feat.tso = 1; + hw_ops->config_tso(pdata); + } else if (!(features & (NETIF_F_TSO | NETIF_F_TSO6)) && tso) { + printk("disable tso.\n"); + pdata->hw_feat.tso = 0; + hw_ops->config_tso(pdata); + } + + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_ops->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_ops->disable_rss(pdata); + if (ret) + return ret; + + if ((features & NETIF_F_RXCSUM) && !rxcsum) + hw_ops->enable_rx_csum(pdata); + else if (!(features & NETIF_F_RXCSUM) && rxcsum) + hw_ops->disable_rx_csum(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) + hw_ops->enable_rx_vlan_stripping(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) + hw_ops->disable_rx_vlan_stripping(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) + hw_ops->enable_rx_vlan_filtering(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) + hw_ops->disable_rx_vlan_filtering(pdata); + + pdata->expansion.netdev_features = features; + + DPRINTK("fxgmac, set features done,%llx\n", (u64)features); + return 0; +} + +static void fxgmac_set_rx_mode(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + hw_ops->config_rx_mode(pdata); +} + +static const struct net_device_ops fxgmac_netdev_ops = { + .ndo_open = fxgmac_open, + .ndo_stop = fxgmac_close, + .ndo_start_xmit = fxgmac_xmit, + .ndo_tx_timeout = fxgmac_tx_timeout, + .ndo_get_stats64 = fxgmac_get_stats64, + .ndo_change_mtu = fxgmac_change_mtu, + .ndo_set_mac_address = fxgmac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = fxgmac_ioctl, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = fxgmac_siocdevprivate, +#endif + .ndo_vlan_rx_add_vid = fxgmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fxgmac_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fxgmac_poll_controller, +#endif + .ndo_set_features = fxgmac_set_features, + .ndo_set_rx_mode = fxgmac_set_rx_mode, +}; + +const struct net_device_ops *fxgmac_get_netdev_ops(void) +{ + return &fxgmac_netdev_ops; +} + +static void fxgmac_rx_refresh(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct fxgmac_desc_data *desc_data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + while (ring->dirty != ring->cur) { + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); + hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); + ring->dirty = + FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); + } + + /* Make sure everything is written before the register write */ + wmb(); + + /* Update the Rx Tail Pointer Register with address of + * the last cleaned entry + */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, (ring->dirty - 1) & (ring->dma_desc_count - 1)); + writel(lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static struct sk_buff *fxgmac_create_skb(struct fxgmac_pdata *pdata, + struct napi_struct *napi, + struct fxgmac_desc_data *desc_data, + unsigned int len) +{ + struct sk_buff *skb; + skb = __netdev_alloc_skb_ip_align(pdata->netdev, len, GFP_ATOMIC); + if (!skb) { + netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", + __func__); + return NULL; + } + + dma_sync_single_for_cpu(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, desc_data->skb->data, len); + skb_put(skb, len); + dma_sync_single_for_device(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); + + return skb; +} + +static int fxgmac_tx_poll(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int tx_packets = 0, tx_bytes = 0; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_desc_ops *desc_ops; + struct fxgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + int processed = 0; + unsigned int cur; + + static int fxgmac_restart_need; + static u32 change_cnt; + static u32 reg_cur_pre = 0xffffffff; + +#if FXGMAC_TX_HANG_TIMER_EN + static u32 reg_cur; +#endif + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Tx ring for this channel */ + if (!ring) { + if (netif_msg_tx_done(pdata) && + (channel->queue_index < pdata->tx_q_count)) + DPRINTK("tx_poll, null point to ring %d\n", + channel->queue_index); + return 0; + } + if ((ring->cur != ring->dirty) && (netif_msg_tx_done(pdata))) + DPRINTK("tx_poll callin, ring_cur=%d, ring_dirty=%d, qIdx=%d\n", + ring->cur, ring->dirty, channel->queue_index); + + cur = ring->cur; + + /* Be sure we get ring->cur before accessing descriptor data */ + smp_rmb(); + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + + while (ring->dirty != cur) { + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); + dma_desc = desc_data->dma_desc; + + if (!hw_ops->tx_complete(dma_desc)) { +#if FXGMAC_TRIGGER_TX_HANG + struct net_device *netdev = pdata->netdev; +#define FXGMAC_HANG_THRESHOLD 1 + reg_cur = readl(FXGMAC_DMA_REG( + channel, 0x44 /* tx desc curr pointer reg */)); + + if (reg_cur != reg_cur_pre) { + reg_cur_pre = reg_cur; + change_cnt = 0; + } else { + change_cnt++; + } + + if (change_cnt > 2) { + DPRINTK("after complete check, cur=%d, dirty=%d, qIdx=%d, hw desc cur=%#x, pre=%#x\n", + ring->cur, ring->dirty, + channel->queue_index, reg_cur, + reg_cur_pre); + + if ((ring->cur > ring->dirty) && + ((ring->cur - ring->dirty) > + FXGMAC_HANG_THRESHOLD)) { + DPRINTK("after complete check warning..., too many TBD occupied by HW, 0xdbbb, %d.\n", + (ring->cur - ring->dirty)); + (*((u32 *)(netdev->base_addr + + 0x1000))) = 0xdbbb; + + if (!fxgmac_restart_need) { + schedule_work( + &pdata->expansion + .restart_work); + fxgmac_restart_need = 1; + change_cnt = 0; + } + } else if ((ring->cur < ring->dirty) && + ((ring->cur + (ring->dma_desc_count - + ring->dirty)) > + FXGMAC_HANG_THRESHOLD)) { + DPRINTK("after complete check warning..., too many TBD occupied by HW, 0xdb00, %d.\n", + (ring->cur + + (ring->dma_desc_count - + ring->dirty))); + (*((u32 *)(netdev->base_addr + + 0x1000))) = 0xdb00; + + if (!fxgmac_restart_need) { + schedule_work( + &pdata->expansion + .restart_work); + fxgmac_restart_need = 1; + change_cnt = 0; + } + } + } +#endif +#if FXGMAC_TX_HANG_TIMER_EN + if ((!pdata->tx_hang_restart_queuing) && + (!channel->expansion.tx_hang_timer_active)) { + reg_cur = ring->dirty; + if (reg_cur_pre != reg_cur) { + reg_cur_pre = reg_cur; + change_cnt = 0; + } else { + change_cnt++; + } + + if (change_cnt > 4) { +#if FXGMAC_TX_HANG_CHECH_DIRTY + channel->expansion.tx_hang_hw_cur = + ring->dirty; +#else + channel->expansion + .tx_hang_hw_cur = readl(FXGMAC_DMA_REG( + channel, + 0x44 /* tx desc curr pointer reg */)); +#endif + /* double check for race conditione */ + if ((!pdata->tx_hang_restart_queuing) && + (!channel->expansion + .tx_hang_timer_active)) { + DPRINTK("tx_hang polling: start timer at desc %u, timer act=%u, queuing=%u, qidx=%u.\n", + reg_cur, + channel->expansion + .tx_hang_timer_active, + pdata->tx_hang_restart_queuing, + channel->queue_index); + fxgmac_tx_hang_timer_start( + channel); + } + } + } +#endif + + break; + } + + reg_cur_pre = 0xffffffff; + fxgmac_restart_need = 0; + change_cnt = 0; + + /* Make sure descriptor fields are read after reading + * the OWN bit + */ + dma_rmb(); + + if (netif_msg_tx_done(pdata)) + fxgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); + + if (hw_ops->is_last_desc(dma_desc)) { + tx_packets += desc_data->tx.packets; + tx_bytes += desc_data->tx.bytes; + } + + /* Free the SKB and reset the descriptor for re-use */ + desc_ops->unmap_desc_data(pdata, desc_data); + hw_ops->tx_desc_reset(desc_data); + + processed++; + ring->dirty = + FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); + } + + if (!processed) + return 0; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + + if ((ring->tx.queue_stopped == 1) && + (fxgmac_tx_avail_desc(ring) > FXGMAC_TX_DESC_MIN_FREE)) { + ring->tx.queue_stopped = 0; + netif_tx_wake_queue(txq); + } + + if (netif_msg_tx_done(pdata)) { + DPRINTK("tx_poll callout, processed=%d\n", processed); + } + + return processed; +} + +static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int len; + unsigned int context_next, context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_pkt_info *pkt_info; + unsigned int incomplete; + struct fxgmac_hw_ops *hw_ops; + struct napi_struct *napi; + struct sk_buff *skb; + int packet_count = 0; + u32 ipce, iphe; + + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Rx ring for this channel */ + if (!ring) + return 0; + + incomplete = 0; + context_next = 0; + + napi = (pdata->per_channel_irq) ? &channel->expansion.napi_rx : + &pdata->expansion.napi; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + pkt_info = &ring->pkt_info; + + while (packet_count < budget) { + memset(pkt_info, 0, sizeof(*pkt_info)); + skb = NULL; + len = 0; + +read_again: + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + + if (fxgmac_rx_dirty_desc(ring) > FXGMAC_RX_DESC_MAX_DIRTY) + fxgmac_rx_refresh(channel); + + if (hw_ops->dev_read(channel)) + break; + + ring->cur = FXGMAC_GET_ENTRY(ring->cur, ring->dma_desc_count); + + incomplete = FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); + context_next = FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); + context = FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN); + + if (incomplete || context_next) + goto read_again; + + if (pkt_info->errors) { + netif_err(pdata, rx_err, netdev, + "error in received packet\n"); + dev_kfree_skb(skb); + goto next_packet; + } + + if (!context) { + len = desc_data->rx.len; + if (len > pdata->rx_buf_size) { + if (net_ratelimit()) + netdev_err( + pdata->netdev, + "len %d larger than size (%d)\n", + len, pdata->rx_buf_size); + pdata->netdev->stats.rx_dropped++; + goto next_packet; + } + + if (len == 0) { + if (net_ratelimit()) + netdev_err( + pdata->netdev, + "A packet of length 0 was received\n"); + pdata->netdev->stats.rx_length_errors++; + goto next_packet; + } + + if (len && !skb) { + skb = fxgmac_create_skb(pdata, napi, desc_data, + len); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_warn( + pdata->netdev, + "create skb failed\n"); + goto next_packet; + } + } + } + + if (!skb) + goto next_packet; + + if (netif_msg_pktdata(pdata)) + fxgmac_print_pkt(netdev, skb, false); + + skb_checksum_none_assert(skb); + if (netdev->features & NETIF_F_RXCSUM) { + ipce = FXGMAC_GET_REG_BITS_LE( + desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPCE_POS, + RX_NORMAL_DESC1_WB_IPCE_LEN); + iphe = FXGMAC_GET_REG_BITS_LE( + desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPHE_POS, + RX_NORMAL_DESC1_WB_IPHE_LEN); + /* if csum error, let the stack verify checksum errors.otherwise don't verify */ + if (!ipce && !iphe && + FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + pkt_info->vlan_ctag); + pdata->stats.rx_vlan_packets++; + } + + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RSS_HASH_POS, + RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) + skb_set_hash(skb, pkt_info->rss_hash, + pkt_info->rss_hash_type); + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, channel->queue_index); + + if (pdata->expansion.fxgmac_test_tso_flag) { + /* tso test */ + if (pdata->expansion.fxgmac_test_tso_seg_num == 1) { + /* last segment */ + if (pdata->expansion.fxgmac_test_last_tso_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* receive last segment, reset flag */ + pdata->expansion.fxgmac_test_tso_flag = + false; + pdata->expansion + .fxgmac_test_tso_seg_num = 0; + pdata->expansion.fxgmac_test_packet_len = + 0; + pdata->expansion + .fxgmac_test_last_tso_len = 0; + + /* process packet */ + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, + GFP_ATOMIC); + skb_push( + tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } else { /* non last segment */ + if (pdata->expansion.fxgmac_test_packet_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* receive a segment */ + pdata->expansion + .fxgmac_test_tso_seg_num--; + + /* process packet */ + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, + GFP_ATOMIC); + skb_push( + tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } + } else if (pdata->expansion.fxgmac_test_packet_len != 0) { + /* xsum and phy loopback test */ + if (pdata->expansion.fxgmac_test_packet_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* reset fxg_packet_len */ + pdata->expansion.fxgmac_test_packet_len = 0; + + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, GFP_ATOMIC); + skb_push(tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } + napi_gro_receive(napi, skb); + +next_packet: + packet_count++; + + pdata->netdev->stats.rx_packets++; + pdata->netdev->stats.rx_bytes += len; + } + + fxgmac_rx_refresh(channel); + + return packet_count; +} + +static int fxgmac_one_poll_tx(struct napi_struct *napi, int budget) +{ + struct fxgmac_channel *channel = + container_of(napi, struct fxgmac_channel, expansion.napi_tx); + int ret = 0; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + ret = fxgmac_tx_poll(channel); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + if (napi_complete_done(napi, 0)) { + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); + } +#else + napi_complete(napi); + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); +#endif + return 0; +} + +static int fxgmac_one_poll_rx(struct napi_struct *napi, int budget) +{ + struct fxgmac_channel *channel = + container_of(napi, struct fxgmac_channel, expansion.napi_rx); + int processed = 0; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + processed = fxgmac_rx_poll(channel, budget); + if (processed < budget) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* if there no interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED, + * napi_complete_done return true and we can enable irq, it will not cause unbalanced iqr issure. + * if there more interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED | NAPIF_STATE_MISSED + * because napi_schedule_prep will make it. At this time napi_complete_done will return false and + * schedule poll again because of NAPIF_STATE_MISSED, it will cause unbalanced irq issure. + */ + if (napi_complete_done(napi, processed)) { + hw_ops->enable_msix_one_interrupt(pdata, + channel->queue_index); + } +#else + napi_complete(napi); + hw_ops->enable_msix_one_interrupt(pdata, channel->queue_index); +#endif + } + + return processed; +} + +static int fxgmac_all_poll(struct napi_struct *napi, int budget) +{ + struct fxgmac_pdata *pdata = + container_of(napi, struct fxgmac_pdata, expansion.napi); + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int processed; + unsigned int i; + + if (netif_msg_rx_status(pdata)) { + DPRINTK("rx all_poll callin budget=%d\n", budget); + } + + processed = 0; + do { + channel = pdata->channel_head; + /* Cleanup Tx ring first */ + /*since only 1 tx channel supported in this version, poll ch 0 always. */ + fxgmac_tx_poll(pdata->channel_head + 0); + for (i = 0; i < pdata->channel_count; i++, channel++) { + processed += fxgmac_rx_poll(channel, budget); + } + } while (false); + + /* for phy, we needn't to process any packet, so processed will be 0 */ + if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { + fxgmac_phy_process(pdata); + pdata->expansion.mgm_intctrl_val &= + ~MGMT_INT_CTRL0_INT_STATUS_PHY; + } + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + if (napi_complete_done(napi, processed)) + hw_ops->enable_mgm_interrupt(pdata); + } + + if ((processed) && (netif_msg_rx_status(pdata))) { + DPRINTK("rx all_poll callout received = %d\n", processed); + } + + return processed; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..f6f8f4f6a5e9bfa10e2f184cb511b4821e12f9da --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include + +/* for file operation */ +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +#define FXGMAC_DBG 0 + +/* declarations */ +static void fxgmac_shutdown(struct pci_dev *pdev); + +static int fxgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) +{ + struct device *dev = &pcidev->dev; + struct fxgmac_resources res; + int i, ret; + + ret = pcim_enable_device(pcidev); + if (ret) { + dev_err(dev, "ERROR: fxgmac_probe failed to enable device\n"); + return ret; + } + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pcidev, i) == 0) + continue; + ret = pcim_iomap_regions(pcidev, BIT(i), FXGMAC_DRV_NAME); + if (ret) + goto err_disable_device; + break; + } + + pci_set_master(pcidev); + + memset(&res, 0, sizeof(res)); + res.irq = pcidev->irq; + res.addr = pcim_iomap_table(pcidev)[i]; + + ret = fxgmac_drv_probe(&pcidev->dev, &res); + if (ret) + goto err_disable_device; + + return ret; + +err_disable_device: + pci_disable_device(pcidev); + return ret; +} + +static void fxgmac_remove(struct pci_dev *pcidev) +{ + struct net_device *netdev = dev_get_drvdata(&pcidev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + +#ifdef CONFIG_PCI_MSI + u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, + FXGMAC_FLAG_MSIX_LEN); +#endif + + fxgmac_drv_remove(&pcidev->dev); +#ifdef CONFIG_PCI_MSI + if (msix) { + pci_disable_msix(pcidev); + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + } +#endif + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_exit(pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ +} + +/* for Power management, 20210628 */ +static int __fxgmac_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 wufc = pdata->expansion.wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + DPRINTK("fxpm,_fxgmac_shutdown, callin\n"); + + rtnl_lock(); + + /* for linux shutdown, we just treat it as power off wol can be ignored + * for suspend, we do need recovery by wol + */ + fxgmac_net_powerdown(pdata, (unsigned int)!!wufc); + netif_device_detach(netdev); + rtnl_unlock(); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) { + DPRINTK("fxpm,_fxgmac_shutdown, save pci state failed.\n"); + return retval; + } +#endif + + DPRINTK("fxpm,_fxgmac_shutdown, save pci state done.\n"); + + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + + pci_disable_device(pdev); + + DPRINTK("fxpm,_fxgmac_shutdown callout, enable wake=%d.\n", + *enable_wake); + + return 0; +} + +static void fxgmac_shutdown(struct pci_dev *pdev) +{ + bool wake; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxpm, fxgmac_shutdown callin\n"); + + pdata->expansion.current_state = CURRENT_STATE_SHUTDOWN; + __fxgmac_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } + DPRINTK("fxpm, fxgmac_shutdown callout, system power off=%d\n", + (system_state == SYSTEM_POWER_OFF) ? 1 : 0); +} + +#ifdef CONFIG_PM +/* yzhang, 20210628 for PM */ +static int fxgmac_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +{ + int retval; + bool wake; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxpm, fxgmac_suspend callin\n"); + + pdata->expansion.current_state = CURRENT_STATE_SUSPEND; + + if (netif_running(netdev)) { + retval = __fxgmac_shutdown(pdev, &wake); + if (retval) + return retval; + } else { + wake = !!(pdata->expansion.wol); + } + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + DPRINTK("fxpm, fxgmac_suspend callout to %s\n", + wake ? "sleep" : "D3hot"); + + return 0; +} + +static int fxgmac_resume(struct pci_dev *pdev) +{ + struct fxgmac_pdata *pdata; + struct net_device *netdev; + u32 err; + + DPRINTK("fxpm, fxgmac_resume callin\n"); + + netdev = dev_get_drvdata(&pdev->dev); + pdata = netdev_priv(netdev); + + pdata->expansion.current_state = CURRENT_STATE_RESUME; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(pdata->dev, + "fxgmac_resume, failed to enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + __clear_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + rtnl_lock(); + err = 0; + if (!err && netif_running(netdev)) + fxgmac_net_powerup(pdata); + + if (!err) + netif_device_attach(netdev); + + rtnl_unlock(); + + DPRINTK("fxpm, fxgmac_resume callout\n"); + + return err; +} +#endif + +static const struct pci_device_id fxgmac_pci_tbl[] = { { PCI_DEVICE(0x1f0a, + 0x6801) }, + { 0 } }; +MODULE_DEVICE_TABLE(pci, fxgmac_pci_tbl); + +static struct pci_driver fxgmac_pci_driver = { + .name = FXGMAC_DRV_NAME, + .id_table = fxgmac_pci_tbl, + .probe = fxgmac_probe, + .remove = fxgmac_remove, +#ifdef CONFIG_PM + /* currently, we only use USE_LEGACY_PM_SUPPORT */ + .suspend = fxgmac_suspend, + .resume = fxgmac_resume, +#endif + .shutdown = fxgmac_shutdown, +}; + +module_pci_driver(fxgmac_pci_driver); + +MODULE_DESCRIPTION(FXGMAC_DRV_DESC); +MODULE_VERSION(FXGMAC_DRV_VERSION); +MODULE_AUTHOR("Frank "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c new file mode 100644 index 0000000000000000000000000000000000000000..88066a110f410fe633336d024ec2e82c9d3edbda --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + unsigned int high_bit = 0, low_bit = 0; + + switch (speed) { + case SPEED_1000: + high_bit = 1, low_bit = 0; + break; + case SPEED_100: + high_bit = 0, low_bit = 1; + break; + case SPEED_10: + high_bit = 0, low_bit = 0; + break; + default: + break; + } + + /* disable autoneg */ + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, + PHY_CR_SPEED_SEL_H_LEN, high_bit); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, + PHY_CR_SPEED_SEL_L_LEN, low_bit); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, + PHY_CR_DUPLEX_LEN, (duplex ? 1 : 0)); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, (autoneg ? 1 : 0)); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +/* + * input: lport + * output: + * cap_mask, bit definitions: + * pause capbility and 100/10 capbilitys follow the definition of mii reg4. + * for 1000M capability, bit0=1000M half; bit1=1000M full, refer to mii reg9.[9:8]. + */ +int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, + unsigned int *cap_mask) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int val; + unsigned int reg; + + if ((!hw_ops->read_ephy_reg) || (!hw_ops->write_ephy_reg)) + return -1; + + reg = REG_MII_ADVERTISE; + if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) + goto busy_exit; + + if (FXGMAC_ADVERTISE_10HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_10HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_10HALF; + } + + if (FXGMAC_ADVERTISE_10FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_10FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_10FULL; + } + + if (FXGMAC_ADVERTISE_100HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_100HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_100HALF; + } + + if (FXGMAC_ADVERTISE_100FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_100FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_100FULL; + } + + if (FXGMAC_ADVERTISE_PAUSE_CAP & val) { + *cap_mask |= FXGMAC_ADVERTISE_PAUSE_CAP; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_CAP; + } + + if (FXGMAC_ADVERTISE_PAUSE_ASYM & val) { + *cap_mask |= FXGMAC_ADVERTISE_PAUSE_ASYM; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_ASYM; + } + + reg = REG_MII_CTRL1000; + if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) + goto busy_exit; + + if (REG_BIT_ADVERTISE_1000HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_1000HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_1000HALF; + } + + if (REG_BIT_ADVERTISE_1000FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_1000FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_1000FULL; + } + + return 0; + +busy_exit: + DPRINTK("fxgmac_ephy_autoneg_ability_get exit due to ephy reg access fail.\n"); + + return -1; +} + +int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + volatile unsigned int val; + int busy = 15; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, (unsigned int *)&val); + if (0 > ret) + goto busy_exit; + + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, (val | 0x8000)); + if (0 > ret) + goto busy_exit; + + do { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, + (unsigned int *)&val); + busy--; + } while ((ret >= 0) && (0 != (val & 0x8000)) && (busy)); + + if (0 == (val & 0x8000)) + return 0; + + DPRINTK("fxgmac_ephy_soft_reset, timeout, busy=%d.\n", busy); + return -EBUSY; + +busy_exit: + DPRINTK("fxgmac_ephy_soft_reset exit due to ephy reg access fail.\n"); + + return ret; +} + +/* this function used to double check the speed. for fiber, to correct there is no 10M */ +static int fxgmac_ephy_adjust_status(u32 lport, int val, int is_utp, int *speed, + int *duplex) +{ + int speed_mode; + + *speed = -1; + *duplex = (val & BIT(FUXI_EPHY_DUPLEX_BIT)) >> FUXI_EPHY_DUPLEX_BIT; + speed_mode = (val & FUXI_EPHY_SPEED_MODE) >> FUXI_EPHY_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + if (is_utp) + *speed = SPEED_10M; + break; + case 1: + *speed = SPEED_100M; + break; + case 2: + *speed = SPEED_1000M; + break; + case 3: + break; + default: + break; + } + + return 0; +} + +/* + * this function for polling to get status of ephy link. + * output: + * speed: SPEED_10M, SPEED_100M, SPEED_1000M or -1; + * duplex: 0 or 1, see reg 0x11, bit YT8614_DUPLEX_BIT. + * ret_link: 0 or 1, link down or up. + * media: only valid when ret_link=1, (YT8614_SMI_SEL_SDS_SGMII + 1) for fiber; (YT8614_SMI_SEL_PHY + 1) for utp. -1 for link down. + */ +int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, + int *ret_link, int *media) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + u16 reg; + volatile unsigned int val; + volatile int link; + int link_utp = 0, link_fiber = 0; + + reg = REG_MII_SPEC_STATUS; + ret = hw_ops->read_ephy_reg(pdata, reg, (unsigned int *)&val); + if (0 > ret) + goto busy_exit; + + link = val & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + fxgmac_ephy_adjust_status(0, val, 1, speed, duplex); + } else { + link_utp = 0; + } + + if (link_utp || link_fiber) { + /* case of fiber of priority */ + if (link_utp) + *media = (FUXI_EPHY_SMI_SEL_PHY + 1); + if (link_fiber) + *media = (FUXI_EPHY_SMI_SEL_SDS_SGMII + 1); + + *ret_link = 1; + } else { + *ret_link = 0; + *media = -1; + *speed = -1; + *duplex = -1; + } + + return 0; + +busy_exit: + DPRINTK("fxgmac_ephy_status_get exit due to ephy reg access fail.\n"); + + return ret; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h new file mode 100644 index 0000000000000000000000000000000000000000..65d6288e6869a369f38c98e905cfea18042ee2c2 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h @@ -0,0 +1,1894 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_GMAC_REG_H__ +#define __FUXI_GMAC_REG_H__ + +#define AISC_MODE + +#define FUXI_REV_01 0x01 /* The first NTO version. */ +#define FUXI_REV_03 0x03 /* ECO back on 07/2023. */ + +/* MAC register offsets */ +#define MAC_OFFSET 0x2000 +#define MAC_CR 0x0000 /* The MAC Configuration Register */ +#define MAC_ECR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_TX_RX_STA 0x00b8 +#define MAC_PMT_STA 0x00c0 +/* This is the FIFO address, the pointer will be increased + * automatically after writting. + */ +#define MAC_RWK_PAC 0x00c4 +#define MAC_LPI_STA 0x00d0 +#define MAC_LPI_CONTROL 0x00d4 +#define MAC_LPI_TIMER 0x00d8 +#define MAC_MS_TIC_COUNTER 0x00dc +#define MAC_AN_SR 0x00E4 +#define MAC_PHYIF_STA 0x00F8 +#define MAC_VR 0x0110 +#define MAC_DBG_STA 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_HWF3R 0x0128 +#define MAC_MDIO_ADDRESS 0x0200 +#define MAC_MDIO_DATA 0x0204 +#define MAC_GPIO_SR 0x020c +#define MAC_ARP_PROTO_ADDR 0x0210 +#define MAC_CSR_SW_CTRL 0x0230 + +/* mac[5]->bit15:8, mac[4]->bit7:0 */ +#define MAC_MACA0HR 0x0300 +/* mac[0]->bit7:0, mac[1]->bit15:8, mac[2]->bit23:16, mac[3]->bit31:24 */ +#define MAC_MACA0LR 0x0304 + +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1HR_AE_POS 31 +#define MAC_MACA1HR_AE_LEN 1 + +#define MAC_MACA1LR 0x030c + + +#define MAC_RSSCR 0x3c80 +#define MAC_RSSAR 0x3c88 +#define MAC_RSSDR 0x3c8c + + + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_POS 18 +#define MAC_HWF0R_ADDMACADRSEL_LEN 5 +#define MAC_HWF0R_ARPOFFSEL_POS 9 +#define MAC_HWF0R_ARPOFFSEL_LEN 1 +#define MAC_HWF0R_EEESEL_POS 13 +#define MAC_HWF0R_EEESEL_LEN 1 +#define MAC_HWF0R_ACTPHYIFSEL_POS 28 +#define MAC_HWF0R_ACTPHYIFSEL_LEN 3 +#define MAC_HWF0R_MGKSEL_POS 7 +#define MAC_HWF0R_MGKSEL_LEN 1 +#define MAC_HWF0R_MMCSEL_POS 8 +#define MAC_HWF0R_MMCSEL_LEN 1 +#define MAC_HWF0R_RWKSEL_POS 6 +#define MAC_HWF0R_RWKSEL_LEN 1 +#define MAC_HWF0R_RXCOESEL_POS 16 +#define MAC_HWF0R_RXCOESEL_LEN 1 +#define MAC_HWF0R_SAVLANINS_POS 27 +#define MAC_HWF0R_SAVLANINS_LEN 1 +#define MAC_HWF0R_SMASEL_POS 5 +#define MAC_HWF0R_SMASEL_LEN 1 +#define MAC_HWF0R_TSSEL_POS 12 +#define MAC_HWF0R_TSSEL_LEN 1 +#define MAC_HWF0R_TSSTSSEL_POS 25 +#define MAC_HWF0R_TSSTSSEL_LEN 2 +#define MAC_HWF0R_TXCOESEL_POS 14 +#define MAC_HWF0R_TXCOESEL_LEN 1 +#define MAC_HWF0R_VLHASH_POS 4 +#define MAC_HWF0R_VLHASH_LEN 1 +#define MAC_HWF1R_ADDR64_POS 14 +#define MAC_HWF1R_ADDR64_LEN 2 +#define MAC_HWF1R_ADVTHWORD_POS 13 +#define MAC_HWF1R_ADVTHWORD_LEN 1 +#define MAC_HWF1R_DBGMEMA_POS 19 +#define MAC_HWF1R_DBGMEMA_LEN 1 +#define MAC_HWF1R_DCBEN_POS 16 +#define MAC_HWF1R_DCBEN_LEN 1 +#define MAC_HWF1R_HASHTBLSZ_POS 24 +#define MAC_HWF1R_HASHTBLSZ_LEN 2 +#define MAC_HWF1R_L3L4FNUM_POS 27 +#define MAC_HWF1R_L3L4FNUM_LEN 4 +#define MAC_HWF1R_RAVSEL_POS 21 +#define MAC_HWF1R_RAVSEL_LEN 1 +#define MAC_HWF1R_AVSEL_POS 20 +#define MAC_HWF1R_AVSEL_LEN 1 +#define MAC_HWF1R_RXFIFOSIZE_POS 0 +#define MAC_HWF1R_RXFIFOSIZE_LEN 5 +#define MAC_HWF1R_SPHEN_POS 17 +#define MAC_HWF1R_SPHEN_LEN 1 +#define MAC_HWF1R_TSOEN_POS 18 +#define MAC_HWF1R_TSOEN_LEN 1 +#define MAC_HWF1R_TXFIFOSIZE_POS 6 +#define MAC_HWF1R_TXFIFOSIZE_LEN 5 +#define MAC_HWF2R_AUXSNAPNUM_POS 28 +#define MAC_HWF2R_AUXSNAPNUM_LEN 3 +#define MAC_HWF2R_PPSOUTNUM_POS 24 +#define MAC_HWF2R_PPSOUTNUM_LEN 3 +#define MAC_HWF2R_RXCHCNT_POS 12 +#define MAC_HWF2R_RXCHCNT_LEN 4 +#define MAC_HWF2R_RXQCNT_POS 0 +#define MAC_HWF2R_RXQCNT_LEN 4 +#define MAC_HWF2R_TXCHCNT_POS 18 +#define MAC_HWF2R_TXCHCNT_LEN 4 +#define MAC_HWF2R_TXQCNT_POS 6 +#define MAC_HWF2R_TXQCNT_LEN 4 +#define MAC_IER_TSIE_POS 12 +#define MAC_IER_TSIE_LEN 1 +#define MAC_ISR_MMCRXIS_POS 9 +#define MAC_ISR_MMCRXIS_LEN 1 +#define MAC_ISR_MMCTXIS_POS 10 +#define MAC_ISR_MMCTXIS_LEN 1 +#define MAC_ISR_PMTIS_POS 4 +#define MAC_ISR_PMTIS_LEN 1 +#define MAC_ISR_TSIS_POS 12 +#define MAC_ISR_TSIS_LEN 1 +#define MAC_MACA1HR_AE_POS 31 +#define MAC_MACA1HR_AE_LEN 1 +#define MAC_PFR_HMC_POS 2 +#define MAC_PFR_HMC_LEN 1 +#define MAC_PFR_HPF_POS 10 +#define MAC_PFR_HPF_LEN 1 +#define MAC_PFR_PM_POS 4 /* Pass all Multicast. */ +#define MAC_PFR_PM_LEN 1 +#define MAC_PFR_DBF_POS 5 /* Disable Broadcast Packets. */ +#define MAC_PFR_DBF_LEN 1 +/* Hash Unicast. 0x0 (DISABLE). compares the DA field with + * the values programmed in DA registers. + */ +#define MAC_PFR_HUC_POS 1 +#define MAC_PFR_HUC_LEN 1 +#define MAC_PFR_PR_POS 0 /* Enable Promiscuous Mode. */ +#define MAC_PFR_PR_LEN 1 +#define MAC_PFR_VTFE_POS 16 +#define MAC_PFR_VTFE_LEN 1 +#define MAC_Q0TFCR_PT_POS 16 +#define MAC_Q0TFCR_PT_LEN 16 +#define MAC_Q0TFCR_TFE_POS 1 +#define MAC_Q0TFCR_TFE_LEN 1 +#define MAC_CR_ARPEN_POS 31 +#define MAC_CR_ARPEN_LEN 1 +#define MAC_CR_ACS_POS 20 +#define MAC_CR_ACS_LEN 1 +#define MAC_CR_CST_POS 21 +#define MAC_CR_CST_LEN 1 +#define MAC_CR_IPC_POS 27 +#define MAC_CR_IPC_LEN 1 +#define MAC_CR_JE_POS 16 +#define MAC_CR_JE_LEN 1 +#define MAC_CR_LM_POS 12 +#define MAC_CR_LM_LEN 1 +#define MAC_CR_RE_POS 0 +#define MAC_CR_RE_LEN 1 +#define MAC_CR_PS_POS 15 +#define MAC_CR_PS_LEN 1 +#define MAC_CR_FES_POS 14 +#define MAC_CR_FES_LEN 1 +#define MAC_CR_DM_POS 13 +#define MAC_CR_DM_LEN 1 +#define MAC_CR_TE_POS 1 +#define MAC_CR_TE_LEN 1 +#define MAC_ECR_DCRCC_POS 16 +#define MAC_ECR_DCRCC_LEN 1 +#define MAC_ECR_HDSMS_POS 20 +#define MAC_ECR_HDSMS_LEN 3 +#define MAC_RFCR_PFCE_POS 8 +#define MAC_RFCR_PFCE_LEN 1 +#define MAC_RFCR_RFE_POS 0 +#define MAC_RFCR_RFE_LEN 1 +#define MAC_RFCR_UP_POS 1 +#define MAC_RFCR_UP_LEN 1 +#define MAC_RQC0R_RXQ0EN_POS 0 +#define MAC_RQC0R_RXQ0EN_LEN 2 +#define MAC_LPIIE_POS 5 +#define MAC_LPIIE_LEN 1 +#define MAC_LPIATE_POS 20 +#define MAC_LPIATE_LEN 1 +#define MAC_LPITXA_POS 19 +#define MAC_LPITXA_LEN 1 +#define MAC_PLS_POS 17 +#define MAC_PLS_LEN 1 +#define MAC_LPIEN_POS 16 +#define MAC_LPIEN_LEN 1 +#define MAC_LPI_ENTRY_TIMER 8 +#define MAC_LPIET_POS 3 +#define MAC_LPIET_LEN 17 +#define MAC_TWT_TIMER 0x10 +#define MAC_TWT_POS 0 +#define MAC_TWT_LEN 16 +#define MAC_LST_TIMER 2 +#define MAC_LST_POS 16 +#define MAC_LST_LEN 10 +#define MAC_MS_TIC 24 +#define MAC_MS_TIC_POS 0 +#define MAC_MS_TIC_LEN 12 + +/* RSS table */ +#define MAC_RSSAR_ADDRT_POS 2 +#define MAC_RSSAR_ADDRT_LEN 1 +#define MAC_RSSAR_CT_POS 1 +#define MAC_RSSAR_CT_LEN 1 +#define MAC_RSSAR_OB_POS 0 +#define MAC_RSSAR_OB_LEN 1 +#define MAC_RSSAR_RSSIA_POS 8 +#define MAC_RSSAR_RSSIA_LEN 8 +/* RSS control and options */ +/* note, below options definitions are used only for pdata->options, + * not for register, so the position is not consistent with register. + * [0] ipv4 + * [1] tcpv4 + * [2] udpv4 + * [3] ipv6 + * [4] tcpv6 + * [5] udpv6 + */ +#define MAC_RSSCR_IP4TE_POS 0 +#define MAC_RSSCR_IP4TE_LEN 1 +#define MAC_RSSCR_IP6TE_POS 3 +#define MAC_RSSCR_IP6TE_LEN 1 +#define MAC_RSSCR_TCP4TE_POS 1 +#define MAC_RSSCR_TCP4TE_LEN 1 +#define MAC_RSSCR_UDP4TE_POS 2 +#define MAC_RSSCR_UDP4TE_LEN 1 +#define MAC_RSSCR_TCP6TE_POS 4 +#define MAC_RSSCR_TCP6TE_LEN 1 +#define MAC_RSSCR_UDP6TE_POS 5 +#define MAC_RSSCR_UDP6TE_LEN 1 + +/* RSS indirection table */ +#define MAC_RSSDR_DMCH_POS 0 +#define MAC_RSSDR_DMCH_LEN 2 + +#define MAC_VLANHTR_VLHT_POS 0 +#define MAC_VLANHTR_VLHT_LEN 16 +#define MAC_VLANIR_VLTI_POS 20 +#define MAC_VLANIR_VLTI_LEN 1 +#define MAC_VLANIR_CSVL_POS 19 +#define MAC_VLANIR_CSVL_LEN 1 +#define MAC_VLANIR_VLP_POS 18 +#define MAC_VLANIR_VLP_LEN 1 +#define MAC_VLANIR_VLC_POS 16 +#define MAC_VLANIR_VLC_LEN 2 +#define MAC_VLANIR_VLT_POS 0 +#define MAC_VLANIR_VLT_LEN 16 +#define MAC_VLANTR_DOVLTC_POS 20 +#define MAC_VLANTR_DOVLTC_LEN 1 +#define MAC_VLANTR_ERSVLM_POS 19 +#define MAC_VLANTR_ERSVLM_LEN 1 +#define MAC_VLANTR_ESVL_POS 18 +#define MAC_VLANTR_ESVL_LEN 1 +#define MAC_VLANTR_ETV_POS 16 +#define MAC_VLANTR_ETV_LEN 1 +#define MAC_VLANTR_EVLS_POS 21 +#define MAC_VLANTR_EVLS_LEN 2 +#define MAC_VLANTR_EVLRXS_POS 24 +#define MAC_VLANTR_EVLRXS_LEN 1 +#define MAC_VLANTR_VL_POS 0 +#define MAC_VLANTR_VL_LEN 16 +#define MAC_VLANTR_VTHM_POS 25 +#define MAC_VLANTR_VTHM_LEN 1 +#define MAC_VLANTR_VTIM_POS 17 +#define MAC_VLANTR_VTIM_LEN 1 +#define MAC_VR_DEVID_POS 16 +#define MAC_VR_DEVID_LEN 16 +#define MAC_VR_SVER_POS 0 +#define MAC_VR_SVER_LEN 8 +#define MAC_VR_USERVER_POS 8 +#define MAC_VR_USERVER_LEN 8 + +#define MAC_DBG_STA_TX_BUSY 0x70000 +#define MTL_TXQ_DEG_TX_BUSY 0x10 + +#define MAC_MDIO_ADDRESS_BUSY 1 /* bit 0 */ + +#define MAC_MDIO_ADDR_GOC_POS 2 +#define MAC_MDIO_ADDR_GOC_LEN 2 +#define MAC_MDIO_ADDR_GB_POS 0 +#define MAC_MDIO_ADDR_GB_LEN 1 + +#define MAC_MDIO_DATA_RA_POS 16 +#define MAC_MDIO_DATA_RA_LEN 16 +#define MAC_MDIO_DATA_GD_POS 0 +#define MAC_MDIO_DATA_GD_LEN 16 + +/* bit definitions for PMT and WOL, 20210622 */ +#define MAC_PMT_STA_PWRDWN_POS 0 +#define MAC_PMT_STA_PWRDWN_LEN 1 +#define MAC_PMT_STA_MGKPKTEN_POS 1 +#define MAC_PMT_STA_MGKPKTEN_LEN 1 +#define MAC_PMT_STA_RWKPKTEN_POS 2 +#define MAC_PMT_STA_RWKPKTEN_LEN 1 +#define MAC_PMT_STA_MGKPRCVD_POS 5 +#define MAC_PMT_STA_MGKPRCVD_LEN 1 +#define MAC_PMT_STA_RWKPRCVD_POS 6 +#define MAC_PMT_STA_RWKPRCVD_LEN 1 +#define MAC_PMT_STA_GLBLUCAST_POS 9 +#define MAC_PMT_STA_GLBLUCAST_LEN 1 +#define MAC_PMT_STA_RWKPTR_POS 24 +#define MAC_PMT_STA_RWKPTR_LEN 4 +#define MAC_PMT_STA_RWKFILTERST_POS 31 +#define MAC_PMT_STA_RWKFILTERST_LEN 1 +/* MMC register offsets */ +#define MMC_CR 0x0700 +#define MMC_RISR 0x0704 +#define MMC_TISR 0x0708 +#define MMC_RIER 0x070c +#define MMC_TIER 0x0710 +#define MMC_TXOCTETCOUNT_GB_LO 0x0714 +#define MMC_TXFRAMECOUNT_GB_LO 0x0718 +#define MMC_TXBROADCASTFRAMES_G_LO 0x071c +#define MMC_TXMULTICASTFRAMES_G_LO 0x0720 +#define MMC_TX64OCTETS_GB_LO 0x0724 +#define MMC_TX65TO127OCTETS_GB_LO 0x0728 +#define MMC_TX128TO255OCTETS_GB_LO 0x072c +#define MMC_TX256TO511OCTETS_GB_LO 0x0730 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0734 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x0738 +#define MMC_TXUNICASTFRAMES_GB_LO 0x073c +#define MMC_TXMULTICASTFRAMES_GB_LO 0x0740 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0744 +#define MMC_TXUNDERFLOWERROR_LO 0x0748 +#define MMC_TXSINGLECOLLISION_G 0x074c +#define MMC_TXMULTIPLECOLLISION_G 0x0750 +#define MMC_TXDEFERREDFRAMES 0x0754 +#define MMC_TXLATECOLLISIONFRAMES 0x0758 +#define MMC_TXEXCESSIVECOLLSIONFRAMES 0x075c +#define MMC_TXCARRIERERRORFRAMES 0x0760 +#define MMC_TXOCTETCOUNT_G_LO 0x0764 +#define MMC_TXFRAMECOUNT_G_LO 0x0768 +#define MMC_TXEXCESSIVEDEFERRALERROR 0x076c +#define MMC_TXPAUSEFRAMES_LO 0x0770 +#define MMC_TXVLANFRAMES_G_LO 0x0774 +#define MMC_TXOVERSIZEFRAMES 0x0778 +#define MMC_RXFRAMECOUNT_GB_LO 0x0780 +#define MMC_RXOCTETCOUNT_GB_LO 0x0784 +#define MMC_RXOCTETCOUNT_G_LO 0x0788 +#define MMC_RXBROADCASTFRAMES_G_LO 0x078c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0790 +#define MMC_RXCRCERROR_LO 0x0794 +#define MMC_RXALIGNERROR 0x0798 +#define MMC_RXRUNTERROR 0x079c +#define MMC_RXJABBERERROR 0x07a0 +#define MMC_RXUNDERSIZE_G 0x07a4 +#define MMC_RXOVERSIZE_G 0x07a8 +#define MMC_RX64OCTETS_GB_LO 0x07ac +#define MMC_RX65TO127OCTETS_GB_LO 0x07b0 +#define MMC_RX128TO255OCTETS_GB_LO 0x07b4 +#define MMC_RX256TO511OCTETS_GB_LO 0x07b8 +#define MMC_RX512TO1023OCTETS_GB_LO 0x07bc +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x07c0 +#define MMC_RXUNICASTFRAMES_G_LO 0x07c4 +#define MMC_RXLENGTHERROR_LO 0x07c8 +#define MMC_RXOUTOFRANGETYPE_LO 0x07cc +#define MMC_RXPAUSEFRAMES_LO 0x07d0 +#define MMC_RXFIFOOVERFLOW_LO 0x07d4 +#define MMC_RXVLANFRAMES_GB_LO 0x07d8 +#define MMC_RXWATCHDOGERROR 0x07dc +#define MMC_RXRECEIVEERRORFRAME 0x07e0 +#define MMC_RXCONTROLFRAME_G 0x07e4 + +#define MMC_IPCRXINTMASK 0x800 +#define MMC_IPCRXINT 0x808 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_POS 0 +#define MMC_CR_CR_LEN 1 +#define MMC_CR_CSR_POS 1 +#define MMC_CR_CSR_LEN 1 +#define MMC_CR_ROR_POS 2 +#define MMC_CR_ROR_LEN 1 +#define MMC_CR_MCF_POS 3 +#define MMC_CR_MCF_LEN 1 +#define MMC_RIER_ALL_INTERRUPTS_POS 0 +#define MMC_RIER_ALL_INTERRUPTS_LEN 26 +#define MMC_RISR_RXFRAMECOUNT_GB_POS 0 +#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_GB_POS 1 +#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_G_POS 2 +#define MMC_RISR_RXOCTETCOUNT_G_LEN 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXCRCERROR_POS 5 +#define MMC_RISR_RXCRCERROR_LEN 1 +#define MMC_RISR_RXALIGNERROR_POS 6 +#define MMC_RISR_RXALIGNERROR_LEN 1 +#define MMC_RISR_RXRUNTERROR_POS 7 +#define MMC_RISR_RXRUNTERROR_LEN 1 +#define MMC_RISR_RXJABBERERROR_POS 8 +#define MMC_RISR_RXJABBERERROR_LEN 1 +#define MMC_RISR_RXUNDERSIZE_G_POS 9 +#define MMC_RISR_RXUNDERSIZE_G_LEN 1 +#define MMC_RISR_RXOVERSIZE_G_POS 10 +#define MMC_RISR_RXOVERSIZE_G_LEN 1 +#define MMC_RISR_RX64OCTETS_GB_POS 11 +#define MMC_RISR_RX64OCTETS_GB_LEN 1 +#define MMC_RISR_RX65TO127OCTETS_GB_POS 12 +#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1 +#define MMC_RISR_RX128TO255OCTETS_GB_POS 13 +#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1 +#define MMC_RISR_RX256TO511OCTETS_GB_POS 14 +#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_POS 15 +#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 16 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_RISR_RXUNICASTFRAMES_G_POS 17 +#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXLENGTHERROR_POS 18 +#define MMC_RISR_RXLENGTHERROR_LEN 1 +#define MMC_RISR_RXOUTOFRANGETYPE_POS 19 +#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1 +#define MMC_RISR_RXPAUSEFRAMES_POS 20 +#define MMC_RISR_RXPAUSEFRAMES_LEN 1 +#define MMC_RISR_RXFIFOOVERFLOW_POS 21 +#define MMC_RISR_RXFIFOOVERFLOW_LEN 1 +#define MMC_RISR_RXVLANFRAMES_GB_POS 22 +#define MMC_RISR_RXVLANFRAMES_GB_LEN 1 +#define MMC_RISR_RXWATCHDOGERROR_POS 23 +#define MMC_RISR_RXWATCHDOGERROR_LEN 1 +#define MMC_RISR_RXERRORFRAMES_POS 24 +#define MMC_RISR_RXERRORFRAMES_LEN 1 +#define MMC_RISR_RXERRORCONTROLFRAMES_POS 25 +#define MMC_RISR_RXERRORCONTROLFRAMES_LEN 1 +#define MMC_RISR_RXLPIMICROSECOND_POS 26 /* no counter register */ +#define MMC_RISR_RXLPIMICROSECOND_LEN 1 +#define MMC_RISR_RXLPITRANSITION_POS 27 /* no counter register */ +#define MMC_RISR_RXLPITRANSITION_LEN 1 + +#define MMC_TIER_ALL_INTERRUPTS_POS 0 +#define MMC_TIER_ALL_INTERRUPTS_LEN 26 +#define MMC_TISR_TXOCTETCOUNT_GB_POS 0 +#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_GB_POS 1 +#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1 +#define MMC_TISR_TX64OCTETS_GB_POS 4 +#define MMC_TISR_TX64OCTETS_GB_LEN 1 +#define MMC_TISR_TX65TO127OCTETS_GB_POS 5 +#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1 +#define MMC_TISR_TX128TO255OCTETS_GB_POS 6 +#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1 +#define MMC_TISR_TX256TO511OCTETS_GB_POS 7 +#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXUNDERFLOWERROR_POS 13 +#define MMC_TISR_TXUNDERFLOWERROR_LEN 1 +#define MMC_TISR_TXSINGLECOLLISION_G_POS 14 +#define MMC_TISR_TXSINGLECOLLISION_G_LEN 1 +#define MMC_TISR_TXMULTIPLECOLLISION_G_POS 15 +#define MMC_TISR_TXMULTIPLECOLLISION_G_LEN 1 +#define MMC_TISR_TXDEFERREDFRAMES_POS 16 +#define MMC_TISR_TXDEFERREDFRAMES_LEN 1 +#define MMC_TISR_TXLATECOLLISIONFRAMES_POS 17 +#define MMC_TISR_TXLATECOLLISIONFRAMES_LEN 1 +#define MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_POS 18 +#define MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_LEN 1 +#define MMC_TISR_TXCARRIERERRORFRAMES_POS 19 +#define MMC_TISR_TXCARRIERERRORFRAMES_LEN 1 +#define MMC_TISR_TXOCTETCOUNT_G_POS 20 +#define MMC_TISR_TXOCTETCOUNT_G_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_G_POS 21 +#define MMC_TISR_TXFRAMECOUNT_G_LEN 1 +#define MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_POS 22 +#define MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_LEN 1 +#define MMC_TISR_TXPAUSEFRAMES_POS 23 +#define MMC_TISR_TXPAUSEFRAMES_LEN 1 +#define MMC_TISR_TXVLANFRAMES_G_POS 24 +#define MMC_TISR_TXVLANFRAMES_G_LEN 1 +#define MMC_TISR_TXOVERSIZE_G_POS 25 +#define MMC_TISR_TXOVERSIZE_G_LEN 1 +#define MMC_TISR_TXLPIMICROSECOND_POS 26 /* no counter register */ +#define MMC_TISR_TXLPIMICROSECOND_LEN 1 +#define MMC_TISR_TXLPITRANSITION_POS 27 /* no counter register */ +#define MMC_TISR_TXLPITRANSITION_LEN 1 + +/* MTL register offsets */ +#define MTL_OMR 0x0c00 +#define MTL_FDDR 0x0c10 +#define MTL_INT_SR 0x0c20 +#define MTL_RQDCM0R 0x0c30 +#define MTL_ECC_INT_SR 0x0ccc + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_POS 5 +#define MTL_OMR_ETSALG_LEN 2 +#define MTL_OMR_RAA_POS 2 +#define MTL_OMR_RAA_LEN 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x0d00. Each subsequent queue has registers that + * are accessed using an offset of 0x40 from the previous queue. + */ +#define MTL_Q_BASE 0x0d00 +#define MTL_Q_INC 0x40 +#define MTL_Q_INT_CTL_SR 0x0d2c + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_RQOMR 0x30 +#define MTL_Q_RQDR 0x38 +#define MTL_Q_IER 0x2c +#define MTL_Q_ISR 0x2c /* no isr register */ +#define MTL_TXQ_DEG 0x08 /* transmit debug */ + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_POS 16 +#define MTL_Q_RQDR_PRXQ_LEN 14 +#define MTL_Q_RQDR_RXQSTS_POS 4 +#define MTL_Q_RQDR_RXQSTS_LEN 2 +#define MTL_Q_RQOMR_RFA_POS 8 +#define MTL_Q_RQOMR_RFA_LEN 6 +#define MTL_Q_RQOMR_RFD_POS 14 +#define MTL_Q_RQOMR_RFD_LEN 6 +#define MTL_Q_RQOMR_EHFC_POS 7 +#define MTL_Q_RQOMR_EHFC_LEN 1 +#define MTL_Q_RQOMR_RQS_POS 20 +#define MTL_Q_RQOMR_RQS_LEN 9 +#define MTL_Q_RQOMR_RSF_POS 5 +#define MTL_Q_RQOMR_RSF_LEN 1 +#define MTL_Q_RQOMR_FEP_POS 4 +#define MTL_Q_RQOMR_FEP_LEN 1 +#define MTL_Q_RQOMR_FUP_POS 3 +#define MTL_Q_RQOMR_FUP_LEN 1 +#define MTL_Q_RQOMR_RTC_POS 0 +#define MTL_Q_RQOMR_RTC_LEN 2 +#define MTL_Q_TQOMR_FTQ_POS 0 +#define MTL_Q_TQOMR_FTQ_LEN 1 +#define MTL_Q_TQOMR_TQS_POS 16 +#define MTL_Q_TQOMR_TQS_LEN 7 +#define MTL_Q_TQOMR_TSF_POS 1 +#define MTL_Q_TQOMR_TSF_LEN 1 +#define MTL_Q_TQOMR_TTC_POS 4 +#define MTL_Q_TQOMR_TTC_LEN 3 +#define MTL_Q_TQOMR_TXQEN_POS 2 +#define MTL_Q_TQOMR_TXQEN_LEN 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 +#define MTL_FEP_DISABLE 0x00 +#define MTL_FEP_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_32 0x01 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x00 +#define MTL_TX_THRESHOLD_64 0x01 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_ETSALG_SP 0x03 + +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_EN_IF_AV 0x01 +#define MTL_Q_ENABLED 0x02 + +#define MTL_RQDCM0R_Q0MDMACH 0x0 +#define MTL_RQDCM0R_Q1MDMACH 0x00000100 +#define MTL_RQDCM0R_Q2MDMACH 0x00020000 +#define MTL_RQDCM0R_Q3MDMACH 0x03000000 +#define MTL_RQDCM1R_Q4MDMACH 0x00000004 +#define MTL_RQDCM1R_Q5MDMACH 0x00000500 +#define MTL_RQDCM1R_Q6MDMACH 0x00060000 +#define MTL_RQDCM1R_Q7MDMACH 0x07000000 +#define MTL_RQDCM2R_Q8MDMACH 0x00000008 +#define MTL_RQDCM2R_Q9MDMACH 0x00000900 +#define MTL_RQDCM2R_Q10MDMACH 0x000A0000 +#define MTL_RQDCM2R_Q11MDMACH 0x0B000000 + +#define MTL_RQDCM0R_Q0DDMACH 0x10 +#define MTL_RQDCM0R_Q1DDMACH 0x00001000 +#define MTL_RQDCM0R_Q2DDMACH 0x00100000 +#define MTL_RQDCM0R_Q3DDMACH 0x10000000 +#define MTL_RQDCM1R_Q4DDMACH 0x00000010 +#define MTL_RQDCM1R_Q5DDMACH 0x00001000 +#define MTL_RQDCM1R_Q6DDMACH 0x00100000 +#define MTL_RQDCM1R_Q7DDMACH 0x10000000 + + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +/* NO TRAFFIC CLASS REGISTER DESCRIPTION */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_POS 0 +#define MTL_TC_ETSCR_TSA_LEN 2 +#define MTL_TC_QWR_QW_POS 0 +#define MTL_TC_QWR_QW_LEN 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* DMA register offsets */ +#define DMA_MR 0x1000 +#define DMA_SBMR 0x1004 +#define DMA_ISR 0x1008 +#define DMA_DSR0 0x100c +#define DMA_DSR1 0x1010 +#define DMA_DSR2 0x1014 +#define DMA_ECC_INT_SR 0x1088 + +/* DMA register entry bit positions and sizes */ +#define DMA_ISR_MACIS_POS 17 +#define DMA_ISR_MACIS_LEN 1 +#define DMA_ISR_MTLIS_POS 16 +#define DMA_ISR_MTLIS_LEN 1 +#define DMA_MR_SWR_POS 0 +#define DMA_MR_SWR_LEN 1 +#define DMA_MR_INTM_POS 16 +#define DMA_MR_INTM_LEN 2 +#define DMA_MR_QUREAD_POS 19 +#define DMA_MR_QUREAD_LEN 1 + +#define DMA_SBMR_EN_LPI_POS 31 +#define DMA_SBMR_EN_LPI_LEN 1 +#define DMA_SBMR_LPI_XIT_PKT_POS 30 +#define DMA_SBMR_LPI_XIT_PKT_LEN 1 +#define DMA_SBMR_WR_OSR_LMT_POS 24 +#define DMA_SBMR_WR_OSR_LMT_LEN 6 +#define DMA_SBMR_RD_OSR_LMT_POS 16 +#define DMA_SBMR_RD_OSR_LMT_LEN 8 +#define DMA_SBMR_EAME_POS 11 +#define DMA_SBMR_EAME_LEN 1 +#define DMA_SBMR_AALE_POS 10 +#define DMA_SBMR_AALE_LEN 1 +#define DMA_SBMR_BLEN_4_POS 1 +#define DMA_SBMR_BLEN_4_LEN 1 +#define DMA_SBMR_BLEN_8_POS 2 +#define DMA_SBMR_BLEN_8_LEN 1 +#define DMA_SBMR_BLEN_16_POS 3 +#define DMA_SBMR_BLEN_16_LEN 1 +#define DMA_SBMR_BLEN_32_POS 4 +#define DMA_SBMR_BLEN_32_LEN 1 +#define DMA_SBMR_BLEN_64_POS 5 +#define DMA_SBMR_BLEN_64_LEN 1 +#define DMA_SBMR_BLEN_128_POS 6 +#define DMA_SBMR_BLEN_128_LEN 1 +#define DMA_SBMR_BLEN_256_POS 7 +#define DMA_SBMR_BLEN_256_LEN 1 +#define DMA_SBMR_FB_POS 0 +#define DMA_SBMR_FB_LEN 1 + +/* DMA register values */ +#define DMA_DSR_RPS_LEN 4 +#define DMA_DSR_TPS_LEN 4 +#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN) +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 /* no definition */ +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x1100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x1100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x20 +#define DMA_CH_RDTR_LO 0x28 +#define DMA_CH_TDRLR 0x2c +#define DMA_CH_RDRLR 0x30 +#define DMA_CH_IER 0x34 +#define DMA_CH_RIWT 0x38 +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_POS 16 +#define DMA_CH_CR_PBLX8_LEN 1 +#define DMA_CH_CR_SPH_POS 24 +#define DMA_CH_CR_SPH_LEN 1 +#define DMA_CH_IER_AIE_POS 14 +#define DMA_CH_IER_AIE_LEN 1 +#define DMA_CH_IER_FBEE_POS 12 +#define DMA_CH_IER_FBEE_LEN 1 +#define DMA_CH_IER_NIE_POS 15 +#define DMA_CH_IER_NIE_LEN 1 +#define DMA_CH_IER_RBUE_POS 7 +#define DMA_CH_IER_RBUE_LEN 1 +#define DMA_CH_IER_RIE_POS 6 +#define DMA_CH_IER_RIE_LEN 1 +#define DMA_CH_IER_RSE_POS 8 +#define DMA_CH_IER_RSE_LEN 1 +#define DMA_CH_IER_TBUE_POS 2 +#define DMA_CH_IER_TBUE_LEN 1 +#define DMA_CH_IER_TIE_POS 0 +#define DMA_CH_IER_TIE_LEN 1 +#define DMA_CH_IER_TXSE_POS 1 +#define DMA_CH_IER_TXSE_LEN 1 +#define DMA_CH_RCR_PBL_POS 16 +#define DMA_CH_RCR_PBL_LEN 6 +#define DMA_CH_RCR_RBSZ_POS 1 +#define DMA_CH_RCR_RBSZ_LEN 14 +#define DMA_CH_RCR_SR_POS 0 +#define DMA_CH_RCR_SR_LEN 1 +#define DMA_CH_RIWT_RWT_POS 0 +#define DMA_CH_RIWT_RWT_LEN 8 +#define DMA_CH_SR_FBE_POS 12 +#define DMA_CH_SR_FBE_LEN 1 +#define DMA_CH_SR_RBU_POS 7 +#define DMA_CH_SR_RBU_LEN 1 +#define DMA_CH_SR_RI_POS 6 +#define DMA_CH_SR_RI_LEN 1 +#define DMA_CH_SR_RPS_POS 8 +#define DMA_CH_SR_RPS_LEN 1 +#define DMA_CH_SR_TBU_POS 2 +#define DMA_CH_SR_TBU_LEN 1 +#define DMA_CH_SR_TI_POS 0 +#define DMA_CH_SR_TI_LEN 1 +#define DMA_CH_SR_TPS_POS 1 +#define DMA_CH_SR_TPS_LEN 1 +#define DMA_CH_TCR_OSP_POS 4 +#define DMA_CH_TCR_OSP_LEN 1 +#define DMA_CH_TCR_PBL_POS 16 +#define DMA_CH_TCR_PBL_LEN 6 +#define DMA_CH_TCR_ST_POS 0 +#define DMA_CH_TCR_ST_LEN 1 +#define DMA_CH_TCR_TSE_POS 12 +#define DMA_CH_TCR_TSE_LEN 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 +#define DMA_PBL_128 128 +#define DMA_PBL_256 256 +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_POS 2 +#define RX_PACKET_ERRORS_CRC_LEN 1 +#define RX_PACKET_ERRORS_FRAME_POS 3 +#define RX_PACKET_ERRORS_FRAME_LEN 1 +#define RX_PACKET_ERRORS_LENGTH_POS 0 +#define RX_PACKET_ERRORS_LENGTH_LEN 1 +#define RX_PACKET_ERRORS_OVERRUN_POS 1 +#define RX_PACKET_ERRORS_OVERRUN_LEN 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1 + +#define RX_NORMAL_DESC0_OVT_POS 0 +#define RX_NORMAL_DESC0_OVT_LEN 16 +#define RX_NORMAL_DESC2_HL_POS 0 +#define RX_NORMAL_DESC2_HL_LEN 10 +#define RX_NORMAL_DESC3_CDA_LEN 1 +#define RX_NORMAL_DESC3_CTXT_POS 30 +#define RX_NORMAL_DESC3_CTXT_LEN 1 +#define RX_NORMAL_DESC3_ES_POS 15 +#define RX_NORMAL_DESC3_ES_LEN 1 +#define RX_NORMAL_DESC3_ETLT_POS 16 +#define RX_NORMAL_DESC3_ETLT_LEN 3 +#define RX_NORMAL_DESC3_FD_POS 29 +#define RX_NORMAL_DESC3_FD_LEN 1 +#define RX_NORMAL_DESC3_INTE_POS 30 +#define RX_NORMAL_DESC3_INTE_LEN 1 +#define RX_NORMAL_DESC3_L34T_LEN 4 +#define RX_NORMAL_DESC3_LD_POS 28 +#define RX_NORMAL_DESC3_LD_LEN 1 +#define RX_NORMAL_DESC3_OWN_POS 31 +#define RX_NORMAL_DESC3_OWN_LEN 1 +#define RX_NORMAL_DESC3_BUF2V_POS 25 +#define RX_NORMAL_DESC3_BUF2V_LEN 1 +#define RX_NORMAL_DESC3_BUF1V_POS 24 +#define RX_NORMAL_DESC3_BUF1V_LEN 1 +#define RX_NORMAL_DESC3_PL_POS 0 +#define RX_NORMAL_DESC3_PL_LEN 15 +#define RX_NORMAL_DESC3_RSV_LEN 1 + +/* Inner VLAN Tag. Valid only when Double VLAN tag processing + * and VLAN tag stripping are enabled. + */ +#define RX_NORMAL_DESC0_WB_IVT_POS 16 +#define RX_NORMAL_DESC0_WB_IVT_LEN 16 +#define RX_NORMAL_DESC0_WB_OVT_POS 0 /* Outer VLAN Tag. */ +#define RX_NORMAL_DESC0_WB_OVT_LEN 16 +#define RX_NORMAL_DESC0_WB_OVT_VLANID_POS 0 /* Outer VLAN ID. */ +#define RX_NORMAL_DESC0_WB_OVT_VLANID_LEN 12 +#define RX_NORMAL_DESC0_WB_OVT_CFI_POS 12 /* Outer VLAN CFI. */ +#define RX_NORMAL_DESC0_WB_OVT_CFI_LEN 1 +#define RX_NORMAL_DESC0_WB_OVT_PRIO_POS 13 /* Outer VLAN Priority. */ +#define RX_NORMAL_DESC0_WB_OVT_PRIO_LEN 3 + +#define RX_NORMAL_DESC1_WB_IPCE_POS 7 /* IP Payload Error. */ +#define RX_NORMAL_DESC1_WB_IPCE_LEN 1 +#define RX_NORMAL_DESC1_WB_IPV6_POS 5 /* IPV6 Header Present. */ +#define RX_NORMAL_DESC1_WB_IPV6_LEN 1 +#define RX_NORMAL_DESC1_WB_IPV4_POS 4 /* IPV4 Header Present. */ +#define RX_NORMAL_DESC1_WB_IPV4_LEN 1 +#define RX_NORMAL_DESC1_WB_IPHE_POS 3 /* P Header Error. */ +#define RX_NORMAL_DESC1_WB_IPHE_LEN 1 +#define RX_NORMAL_DESC1_WB_PT_POS 0 +#define RX_NORMAL_DESC1_WB_PT_LEN 3 + +/* Hash Filter Status. When this bit is set, it indicates + * that the packet passed the MAC address hash filter. + */ +#define RX_NORMAL_DESC2_WB_HF_POS 18 +#define RX_NORMAL_DESC2_WB_HF_LEN 1 +/* Destination Address Filter Fail. When Flexible RX Parser + * is disabled, and this bit is set, it indicates that the packet + * failed the DA Filter in the MAC. + */ +#define RX_NORMAL_DESC2_WB_DAF_POS 17 +#define RX_NORMAL_DESC2_WB_DAF_LEN 1 + +#define RX_NORMAL_DESC3_WB_LD_POS 28 +#define RX_NORMAL_DESC3_WB_LD_LEN 1 +/* When this bit is set, it indicates that the status in + * RDES0 is valid and it is written by the DMA. + */ +#define RX_NORMAL_DESC3_WB_RS0V_POS 25 +#define RX_NORMAL_DESC3_WB_RS0V_LEN 1 +/* When this bit is set, it indicates that a Cyclic Redundancy + * Check (CRC) Error occurred on the received packet. This field + * is valid only when the LD bit of RDES3 is set. + */ +#define RX_NORMAL_DESC3_WB_CE_POS 24 +#define RX_NORMAL_DESC3_WB_CE_LEN 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_DESC1_PT_UDP 1 +#define RX_DESC1_PT_TCP 2 +#define RX_DESC1_PT_ICMP 3 +#define RX_DESC1_PT_AV_TAG_DATA 6 +#define RX_DESC1_PT_AV_TAG_CTRL 7 +#define RX_DESC1_PT_AV_NOTAG_CTRL 5 + +#define RX_CONTEXT_DESC3_TSA_LEN 1 +#define RX_CONTEXT_DESC3_TSD_LEN 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define TX_PACKET_ATTRIBUTES_PTP_POS 3 +#define TX_PACKET_ATTRIBUTES_PTP_LEN 1 + +#define TX_CONTEXT_DESC2_MSS_POS 0 +#define TX_CONTEXT_DESC2_MSS_LEN 14 +#define TX_CONTEXT_DESC2_IVLTV_POS 16 /* Inner VLAN Tag. */ +#define TX_CONTEXT_DESC2_IVLTV_LEN 16 + +#define TX_CONTEXT_DESC3_CTXT_POS 30 +#define TX_CONTEXT_DESC3_CTXT_LEN 1 +#define TX_CONTEXT_DESC3_TCMSSV_POS 26 +#define TX_CONTEXT_DESC3_TCMSSV_LEN 1 +#define TX_CONTEXT_DESC3_IVTIR_POS 18 +#define TX_CONTEXT_DESC3_IVTIR_LEN 2 +/* Insert an inner VLAN tag with the tag value programmed + * in the MAC_Instxner_VLAN_Incl register or context + * descriptor. + */ +#define TX_CONTEXT_DESC3_IVTIR_INSERT 2 +/* Indicates that the Inner VLAN TAG, IVLTV field of context TDES2 is valid. */ +#define TX_CONTEXT_DESC3_IVLTV_POS 17 +#define TX_CONTEXT_DESC3_IVLTV_LEN 1 +/* Indicates that the VT field of context TDES3 is valid. */ +#define TX_CONTEXT_DESC3_VLTV_POS 16 +#define TX_CONTEXT_DESC3_VLTV_LEN 1 +#define TX_CONTEXT_DESC3_VT_POS 0 +#define TX_CONTEXT_DESC3_VT_LEN 16 + +/* Header Length or Buffer 1 Length. */ +#define TX_NORMAL_DESC2_HL_B1L_POS 0 +#define TX_NORMAL_DESC2_HL_B1L_LEN 14 +/* Interrupt on Completion. */ +#define TX_NORMAL_DESC2_IC_POS 31 +#define TX_NORMAL_DESC2_IC_LEN 1 +/* Transmit Timestamp Enable or External TSO Memory Write Enable. */ +#define TX_NORMAL_DESC2_TTSE_POS 30 +#define TX_NORMAL_DESC2_TTSE_LEN 1 +/* LAN Tag Insertion or Replacement. */ +#define TX_NORMAL_DESC2_VTIR_POS 14 +#define TX_NORMAL_DESC2_VTIR_LEN 2 +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +#define TX_NORMAL_DESC3_TCPPL_POS 0 +#define TX_NORMAL_DESC3_TCPPL_LEN 18 +/* Frame Length or TCP Payload Length. */ +#define TX_NORMAL_DESC3_FL_POS 0 +#define TX_NORMAL_DESC3_FL_LEN 15 +/* Checksum Insertion Control or TCP Payload Length. + * 2'b00: Checksum Insertion Disabled. + * 2'b01: Only IP header checksum calculation and insertion are enabled. + * 2'b10: IP header checksum and payload checksum calculation and insertion are + * enabled, but pseudo-header checksum is not calculated in hardware. + * 2'b11: IP Header checksum and payload checksum calculation and insertion are + * enabled, and pseudo - header checksum is calculated in hardware. */ +#define TX_NORMAL_DESC3_CIC_POS 16 +#define TX_NORMAL_DESC3_CIC_LEN 2 +/* TCP Segmentation Enable. */ +#define TX_NORMAL_DESC3_TSE_POS 18 +#define TX_NORMAL_DESC3_TSE_LEN 1 +/* THL: TCP/UDP Header Length.If the TSE bit is set, this field contains + * the length of the TCP / UDP header.The minimum value of this field must + * be 5 for TCP header.The value must be equal to 2 for UDP header. This + * field is valid only for the first descriptor. + */ +#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4 +#define TX_NORMAL_DESC3_CPC_POS 26 /* CRC Pad Control. */ +#define TX_NORMAL_DESC3_CPC_LEN 2 +#define TX_NORMAL_DESC3_LD_POS 28 /* Last Descriptor. */ +#define TX_NORMAL_DESC3_LD_LEN 1 +#define TX_NORMAL_DESC3_FD_POS 29 /* First Descriptor. */ +#define TX_NORMAL_DESC3_FD_LEN 1 +/* Context Type.This bit should be set to 1'b0 for normal descriptor. */ +#define TX_NORMAL_DESC3_CTXT_POS 30 +#define TX_NORMAL_DESC3_CTXT_LEN 1 +#define TX_NORMAL_DESC3_OWN_POS 31 /* Own Bit. */ +#define TX_NORMAL_DESC3_OWN_LEN 1 + +/* for ephy generic register definitions */ + +#define FXGMAC_EPHY_REGS_LEN 32 /* 32 ethernet phy registers under spec */ +#define REG_MII_BMCR 0x00 /* Basic mode control register */ +#define PHY_CR_RESET_POS 15 +#define PHY_CR_RESET_LEN 1 +#define PHY_CR_SPEED_SEL_H_POS 6 +#define PHY_CR_SPEED_SEL_H_LEN 1 +#define PHY_CR_SPEED_SEL_L_POS 13 +#define PHY_CR_SPEED_SEL_L_LEN 1 +#define PHY_CR_AUTOENG_POS 12 +#define PHY_CR_AUTOENG_LEN 1 +#define PHY_CR_RE_AUTOENG_POS 9 +#define PHY_CR_RE_AUTOENG_LEN 1 +#define PHY_CR_DUPLEX_POS 8 +#define PHY_CR_DUPLEX_LEN 1 +#define REG_MII_BMCR_ENABLE_LOOPBACK 0x8140 +#define REG_MII_BMCR_DISABLE_LOOPBACK 0x9140 +#define REG_MII_BMSR 0x01 /* Basic mode status register */ +#define REG_MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define REG_MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define REG_MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define PHY_MII_ADVERTISE_ASYPAUSE_POS 11 +#define PHY_MII_ADVERTISE_ASYPAUSE_LEN 1 +#define PHY_MII_ADVERTISE_PAUSE_POS 10 +#define PHY_MII_ADVERTISE_PAUSE_LEN 1 +#define PHY_MII_ADVERTISE_100FULL_POS 8 +#define PHY_MII_ADVERTISE_100FULL_LEN 1 +#define PHY_MII_ADVERTISE_100HALF_POS 7 +#define PHY_MII_ADVERTISE_100HALF_LEN 1 +#define PHY_MII_ADVERTISE_10FULL_POS 6 +#define PHY_MII_ADVERTISE_10FULL_LEN 1 +#define PHY_MII_ADVERTISE_10HALF_POS 5 +#define PHY_MII_ADVERTISE_10HALF_LEN 1 +#define REG_MII_LPA 0x05 /* Link partner ability reg */ +#define REG_MII_EXPANSION 0x06 /* Expansion register */ +#define REG_MII_NEXT_PAGE 0x07 /* Next page register */ +#define REG_MII_LPR_NEXT_PAGE 0x08 /* LPR next page register */ +#define REG_MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define PHY_MII_CTRL1000_1000FULL_POS 9 +#define PHY_MII_CTRL1000_1000FULL_LEN 1 +#define PHY_MII_CTRL1000_1000HALF_POS 8 +#define PHY_MII_CTRL1000_1000HALF_LEN 1 +#define REG_MII_STAT1000 0x0A /* 1000BASE-T status */ +#define PHY_MII_STAT1000_CFG_ERROR_POS 15 +#define PHY_MII_STAT1000_CFG_ERROR_LEN 1 + +#define REG_MII_MMD_CTRL 0x0D /* MMD access control register */ +#define REG_MII_MMD_DATA 0x0E /* MMD access data register */ + +#define REG_MII_ESTATUS 0x0F /* Extended Status */ + +#define REG_MII_SPEC_CTRL 0x10 /* PHY specific func control */ +#define PHY_MII_SPEC_CTRL_CRS_ON_POS 3 +#define PHY_MII_SPEC_CTRL_CRS_ON_LEN 1 +#define REG_MII_SPEC_STATUS 0x11 /* PHY specific status */ +#define PHY_MII_SPEC_DUPLEX_POS 13 +#define PHY_MII_SPEC_DUPLEX_LEN 1 +#define REG_MII_INT_MASK 0x12 /* Interrupt mask register */ + +#ifdef AISC_MODE +#define PHY_INT_MASK_LINK_UP_POS 10 +#define PHY_INT_MASK_LINK_UP_LEN 1 +#define PHY_INT_MASK_LINK_DOWN_POS 11 +#define PHY_INT_MASK_LINK_DOWN_LEN 1 +#else /* FPGA_MODE */ +#define PHY_INT_MASK_LINK_UP_POS 1 +#define PHY_INT_MASK_LINK_UP_LEN 1 +#define PHY_INT_MASK_LINK_DOWN_POS 0 +#define PHY_INT_MASK_LINK_DOWN_LEN 1 +#endif +#define REG_MII_INT_STATUS 0x13 /* Interrupt status register */ +#define PHY_INT_STAT_LINK_UP_POS 1 +#define PHY_INT_STAT_LINK_UP_LEN 1 +#define PHY_INT_STAT_LINK_DOWN_POS 0 +#define PHY_INT_STAT_LINK_DOWN_LEN 1 +#define REG_MII_DOWNG_CTRL 0x14 /* Speed auto downgrade control*/ +#define REG_MII_RERRCOUNTER 0x15 /* Receive error counter */ + +#define REG_MII_EXT_ADDR 0x1E /* Extended reg's address */ +#define REG_MII_EXT_DATA 0x1F /* Extended reg's date */ + +#define FXGMAC_EPHY_ID_MASK 0x0000ffff + +/* for ephy link capability + * Advertisement control register(0x04) + */ + /* Advertisement control register(0x04) */ +#define FXGMAC_ADVERTISE_SLCT 0x001f /* Selector bits */ +#define FXGMAC_ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define FXGMAC_ADVERTISE_1000FULL 0x0004 /* trt fir 1000BASE-T full duplex */ +#define FXGMAC_ADVERTISE_1000HALF 0x0008 /* try for 1000BASE-T half duplex */ +#define FXGMAC_ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define FXGMAC_ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define FXGMAC_ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define FXGMAC_ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define FXGMAC_ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ +#define FXGMAC_ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define FXGMAC_ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ +#define FXGMAC_ADVERTISE_RESV 0x1000 /* Unused... */ +#define FXGMAC_ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */ +#define FXGMAC_ADVERTISE_LPACK 0x4000 /* Ack link partners response */ +#define FXGMAC_ADVERTISE_NPAGE 0x8000 /* Next page bit */ + +/* 1000BASE-T Control register(0x09) */ +#define REG_BIT_ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define REG_BIT_ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ + +#define REG_BIT_ADVERTISE_1000_CAP (REG_BIT_ADVERTISE_1000FULL | REG_BIT_ADVERTISE_1000HALF) +#define REG_BIT_ADVERTISE_100_10_CAP (FXGMAC_ADVERTISE_100FULL | FXGMAC_ADVERTISE_100HALF | FXGMAC_ADVERTISE_10FULL | FXGMAC_ADVERTISE_10HALF) + +#ifndef SPEED_1000M +#define SPEED_1000M 1000 +#endif +#ifndef SPEED_100M +#define SPEED_100M 100 +#endif +#ifndef SPEED_10M +#define SPEED_10M 10 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN 0xffff +#endif + +#ifndef DUPLEX_FULL +#define DUPLEX_FULL 1 +#endif +#ifndef DUPLEX_HALF +#define DUPLEX_HALF 0 +#endif + +#ifndef BIT +#define BIT(n) (0x1<<(n)) +#endif + +#ifndef FUXI_EPHY_SPEED_MODE_BIT +#define FUXI_EPHY_SPEED_MODE 0xc000 +#define FUXI_EPHY_DUPLEX 0x2000 +#define FUXI_EPHY_SPEED_MODE_BIT 14 +#define FUXI_EPHY_DUPLEX_BIT 13 +#define FUXI_EPHY_LINK_STATUS_BIT 10 + +#endif + +#define FUXI_EPHY_SMI_SEL_PHY 0x0 +#define FUXI_EPHY_SMI_SEL_SDS_QSGMII 0x02 +#define FUXI_EPHY_SMI_SEL_SDS_SGMII 0x03 + +#define REG_MII_EXT_ANALOG_CFG3 0x52 +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS 14 +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_LEN 2 +/* VGA bandwidth, default is 2 after reset. Set to 0 to mitigate + * unstable issue in 130m. + */ +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_DEFAULT 0x0 +#define MII_EXT_ANALOG_CFG3_ON_TIME_CFG_POS 12 +#define MII_EXT_ANALOG_CFG3_ON_TIME_CFG_LEN 2 +#define MII_EXT_ANALOG_CFG3_VGA_AMP_GAIN_CFG_POS 8 +#define MII_EXT_ANALOG_CFG3_VGA_AMP_GAIN_CFG_LEN 4 +#define MII_EXT_ANALOG_CFG3_VGA_IBIAS_CFG_POS 4 +#define MII_EXT_ANALOG_CFG3_VGA_IBIAS_CFG_LEN 3 +#define MII_EXT_ANALOG_CFG3_OCP_CFG_POS 2 +#define MII_EXT_ANALOG_CFG3_OCP_CFG_LEN 2 +#define MII_EXT_ANALOG_CFG3_VGA_LPF_CFG_POS 0 +#define MII_EXT_ANALOG_CFG3_VGA_LPF_CFG_LEN 2 + +#define REG_MII_EXT_PMA_DEBUG_KCOEF 0x78 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_POS 8 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_LEN 6 +/* After reset, it's 0x10. We need change it to 0x20 to make it + * easier to linkup in gigabit mode with long cable. + */ +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_DEFAULT 0x20 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_DEFAULT_POS 0 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_DEFAULT_LEN 6 + +#define REG_MII_EXT_LPBK_REG 0x0a +#define REG_MII_EXT_LPBK_REG_ENABLE_LOOPBACK 0x3a18 +#define REG_MII_EXT_LPBK_REG_CLEAN_LOOPBACK 0x3a08 +#define REG_MII_EXT_SLEEP_CONTROL_REG 0x27 +#define REG_MII_EXT_SLEEP_REG_ENABLE_LOOPBACK 0x6812 +#define REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK 0xe812 + +#define REG_MII_EXT_ANALOG_CFG2 0x51 +#define REG_MII_EXT_ANALOG_CFG2_LED_VALUE 0x4a9 +#define REG_MII_EXT_ANALOG_CFG8 0x57 +#define REG_MII_EXT_ANALOG_CFG8_LED_VALUE 0x274c + +#define REG_MII_EXT_COMMON_LED_CFG 0xA00B +#define REG_MII_EXT_COMMON_LED0_CFG 0xA00C +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0 0x2600 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2 0x20 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3 0x2600 +#define REG_MII_EXT_COMMON_LED1_CFG 0xA00D +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0 0x1800 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2 0x40 +#define REG_MII_EXT_COMMON_LED2_CFG 0xA00E +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0 0x00 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2 0x07 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3 0x20 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4 0x1800 +#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xA00F +#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0F + +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3 0x2600 + +#define REG_MII_EXT_PKG_CFG0 0xA0 +#define REG_MII_EXT_PKG_CHECK_POS 14 +#define REG_MII_EXT_PKG_CHECK_LEN 2 +#define REG_MII_EXT_PKG_ENABLE_CHECK 0x2 +#define REG_MII_EXT_PKG_DISABLE_CHECK 0x1 +#define REG_MII_EXT_SLEEP_CONTROL1 0x27 +#define MII_EXT_SLEEP_CONTROL1_EN_POS 15 +#define MII_EXT_SLEEP_CONTROL1_EN_LEN 1 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS 14 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN 1 +#define REG_MII_EXT_PKG_RX_VALID0 0xA3 +#define REG_MII_EXT_REG_RX_VALID1 0xA4 +#define REG_MII_EXT_REG_RX_OS0 0xA5 +#define REG_MII_EXT_REG_RX_OS1 0xA6 +#define REG_MII_EXT_REG_RX_US0 0xA7 +#define REG_MII_EXT_REG_RX_US1 0xA8 +#define REG_MII_EXT_REG_RX_ERR 0xA9 +#define REG_MII_EXT_REG_RX_0S_BAD 0xAA +#define REG_MII_EXT_REG_RX_FRAGMENT 0xAB +#define REG_MII_EXT_REG_RX_NOSFD 0xAC +#define REG_MII_EXT_REG_TX_VALID0 0xAD +#define REG_MII_EXT_REG_TX_VALID1 0xAE +#define REG_MII_EXT_REG_TX_OS0 0xAF +#define REG_MII_EXT_REG_TX_OS1 0xB0 +#define REG_MII_EXT_REG_TX_US0 0xB1 +#define REG_MII_EXT_REG_TX_US1 0xB2 +#define REG_MII_EXT_REG_TX_ERR 0xB3 +#define REG_MII_EXT_REG_TX_OS_BAD 0xB4 +#define REG_MII_EXT_REG_TX_FRAGMENT 0xB5 +#define REG_MII_EXT_REG_TX_NOSFD 0xB6 +#define REG_MII_EXT_REG_PMA_DBG0_ADC 0x13 +#define REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE 0x3538 +#define REG_MII_EXT_REG_CLD_REG0 0x3A0 +#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xEB24 +#define REG_MII_EXT_REG_CLD_REG1 0x3CC +#define REG_MII_EXT_ENABLE_CLD_GT_HT_BT 0x7001 +#define REG_MMD_EEE_ABILITY_REG 0x3C +#define REG_MMD_EEE_ABILITY_VALUE 0x06 + +/* Below registers don't belong to GMAC, it has zero offset, not 0x2000 offset. mem_base + REG_XXX. */ +/***When issue happens, driver write this register to trigger pcie sniffer. ***/ +#define REG_PCIE_TRIGGER 0x1000 +#define PCIE_TRIGGER_CODE_TX_HANG 0x00000002 +#define PCIE_TRIGGER_CODE_LINKDOWN 0x00000003 + + +#define MGMT_EPHY_CTRL 0x1004 +/* check register address 0x1004 +* b[6:5] ephy_pause +* b[4:3] ephy_speed 0b10 1000m 0b01 100m +* b[2] ephy_duplex +* b[1] ephy_link +* b[0] ephy_reset.0-reset, 1-unreset. Should be set to 1 before use phy. +*/ +#define MGMT_EPHY_CTRL_RESET_POS 0 +#define MGMT_EPHY_CTRL_RESET_LEN 1 +#define MGMT_EPHY_CTRL_STA_EPHY_RESET 0 /* 0: reset state. */ +#define MGMT_EPHY_CTRL_STA_EPHY_RELEASE 1 /* 1: release state. */ +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP 2 /* 1: link up; 0: link down. */ +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS 1 +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN 1 +#define MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS 2 /* ephy duplex */ +#define MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN 1 + +#define MGMT_EPHY_CTRL_STA_SPEED_POS 3 +#define MGMT_EPHY_CTRL_STA_SPEED_LEN 2 +#define MGMT_EPHY_CTRL_STA_SPEED_MASK 0x18 + +#define MGMT_EPHY_CTRL_ERROR_VAULE 0xFFFFFFFF + +#define MGMT_PCIE_EP_CTRL 0x1008 + +#define MGMT_PCIE_EP_CTRL_DBI_CS_EN_POS 0 +#define MGMT_PCIE_EP_CTRL_DBI_CS_EN_LEN 1 + +#define MGMT_PCIE_CFG_CTRL 0x8BC +#define PCIE_CFG_CTRL_DEFAULT_VAL 0x7ff40 + +#define MGMT_PCIE_CFG_CTRL_CS_EN_POS 0 +#define MGMT_PCIE_CFG_CTRL_CS_EN_LEN 1 + +/***power management ***/ +#define WOL_CTL 0x100C +/* set means magic and remote packet wakeup enable */ +#define WOL_PKT_EN_POS 1 +#define WOL_PKT_EN_LEN 1 +/* set means link change wakeup enable */ +#define WOL_LINKCHG_EN_POS 0 +#define WOL_LINKCHG_EN_LEN 1 + +#define OOB_WOL_CTRL 0x1010 +#define OOB_WOL_CTRL_DIS_POS 0 +#define OOB_WOL_CTRL_DIS_LEN 1 + +/* b3:0 per rx ch interrupt + * b7:4 per tx ch interrupt + * b8 Safety interrupt signal for un-correctable error + * b9 Safety interrupt signal for correctable error + * b10 Interrupt signal to host system + * b11 Magic Packet Received or Remote Wake-up Packet Received + * b12 ethernet phy interrupt + */ +#define MGMT_INT_CTRL0 0x1100 + +/* MAC management registers bit positions and sizes */ +#define MGMT_INT_CTRL0_INT_MASK_POS 16 +#define MGMT_INT_CTRL0_INT_MASK_LEN 16 +#define MGMT_INT_CTRL0_INT_MASK_MASK 0xFFFF +#define MGMT_INT_CTRL0_INT_MASK_RXCH 0xF +#define MGMT_INT_CTRL0_INT_MASK_TXCH 0x10 +#define MGMT_INT_CTRL0_INT_MASK_EX_PMT 0xF7FF +#define MGMT_INT_CTRL0_INT_MASK_DISABLE 0xF000 + +#define MGMT_INT_CTRL0_INT_STATUS_POS 0 +#define MGMT_INT_CTRL0_INT_STATUS_LEN 16 +#define MGMT_INT_CTRL0_INT_STATUS_MASK 0xFFFF +#define MGMT_INT_CTRL0_INT_STATUS_RX 0x0001 +#define MGMT_INT_CTRL0_INT_STATUS_TX 0x0010 +#define MGMT_INI_CTRL0_INT_STATUS_TX_INVERSE 0xFFEF +#define MGMG_INT_CTRL0_INT_STATUS_PHY_INVERSE 0xFFDF +#define MGMT_INT_CTRL0_INT_STATUS_PHY 0x0020 + +#define MGMT_INT_CTRL0_INT_MASK_RXCH_POS 16 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_POS 0 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_LEN 4 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_MASK 0xF +#define MGMT_INT_CTRL0_INT_STATUS_RXTX_LEN 5 +#define MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK 0x1F +#define MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK 0x3F + +#define MGMT_INT_CTRL0_INT_MASK_TXCH_POS 20 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_POS 4 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_LEN 1 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_MASK 0x1 + + +/* Interrupt Ctrl1 */ +#define INT_CTRL1 0x1104 +#define INT_CTRL1_TMR_CNT_CFG_MAX_POS 0 /* Timer counter cfg max. Default 0x19, 1us. */ +#define INT_CTRL1_TMR_CNT_CFG_MAX_LEN 10 +#define INT_CTRL1_TMR_CNT_CFG_DEF_VAL 0x19 +#define INT_CTRL1_MSI_AIO_EN_POS 16 +#define INT_CTRL1_MSI_AIO_EN_LEN 1 + +/* Interrupt Moderation */ +#define INT_MOD 0x1108 +#define INT_MOD_TX_POS 16 +#define INT_MOD_TX_LEN 12 +#define INT_MOD_RX_POS 0 +#define INT_MOD_RX_LEN 12 +#define INT_MOD_IN_US 200 /*in us*/ + +/* PCIE LTR 2 working modes: +Two working mode: +1. SW trigger +LTR idle threshold timer set as 0, enable LTR enable will trigger one LTR message +Note: PCIe cfg enable should set in initilization before enable LTR. +2. HW auto trigger +LTR idle threshold timer set as one non-zero value, HW monitor system status, +when system idle timer over threshold, HW send out LTR message +system exit idle state, send out one LTR exit message. +*/ +#define LTR_CTRL 0x1130 +#define LTR_CTRL_IDLE_THRE_TIMER_POS 16 +#define LTR_CTRL_IDLE_THRE_TIMER_LEN 14 /* in 8ns units*/ +#define LTR_CTRL_IDLE_THRE_TIMER_VAL 0x3FFF +#define LTR_CTRL_EN_POS 0 +#define LTR_CTRL_EN_LEN 1 + +#define LTR_CTRL1 0x1134 /* LTR latency message, only for SW enable. */ +#define LTR_CTRL1_LTR_MSG_POS 0 +#define LTR_CTRL1_LTR_MSG_LEN 32 + +#define LTR_CTRL2 0x1138 +#define LTR_CTRL2_DBG_DATA_POS 0 +#define LTR_CTRL2_DBG_DATA_LEN 32 + +#define LTR_IDLE_ENTER 0x113C /* LTR_CTRL3, LTR latency message, only for System IDLE Start. */ +#define LTR_IDLE_ENTER_POS 0 +#define LTR_IDLE_ENTER_LEN 10 +#define LTR_IDLE_ENTER_USVAL 900 +#define LTR_IDLE_ENTER_SCALE_POS 10 +#define LTR_IDLE_ENTER_SCALE_LEN 5 +#define LTR_IDLE_ENTER_SCALE 2 /* 0-1ns, 1-32ns, 2-1024ns, 3-32,768ns, 4-1,048,576ns, 5-33,554,432ns, 110-111-Not Permitted.*/ +#define LTR_IDLE_ENTER_REQUIRE_POS 15 +#define LTR_IDLE_ENTER_REQUIRE_LEN 1 +#define LTR_IDLE_ENTER_REQUIRE 1 + +#define LTR_IDLE_EXIT 0x1140 /* LTR_CTRL4, LTR latency message, only for System IDLE End. */ +#define LTR_IDLE_EXIT_POS 0 +#define LTR_IDLE_EXIT_LEN 10 +#define LTR_IDLE_EXIT_USVAL 2 +#define LTR_IDLE_EXIT_SCALE_POS 10 +#define LTR_IDLE_EXIT_SCALE_LEN 5 +#define LTR_IDLE_EXIT_SCALE 2 +#define LTR_IDLE_EXIT_REQUIRE_POS 15 +#define LTR_IDLE_EXIT_REQUIRE_LEN 1 +#define LTR_IDLE_EXIT_REQUIRE 1 + +#define LPW_CTRL 0x1188 +#define LPW_CTRL_L1SS_EN_POS 22 +#define LPW_CTRL_L1SS_EN_LEN 1 +#define LPW_CTRL_L1SS_SEL_POS 21 /* 0 - up to both CFG0x158 and reg1188 L1ss setting. 1 - up to CFG0x158 L1ss setting. */ +#define LPW_CTRL_L1SS_SEL_LEN 1 +#define LPW_CTRL_L1SS_SEL_CFG 1 +#define LPW_CTRL_ASPM_L1_CPM_POS 19 /*L1.CPM mode enable bit. Default 0, set as 1 enable this mode. clkreq pin need to connect RC*/ +#define LPW_CTRL_ASPM_L1_CPM_LEN 1 +#define LPW_CTRL_ASPM_L0S_EN_POS 17 +#define LPW_CTRL_ASPM_L0S_EN_LEN 1 +#define LPW_CTRL_ASPM_L1_EN_POS 16 +#define LPW_CTRL_ASPM_L1_EN_LEN 1 +#define LPW_CTRL_ASPM_LPW_EN_POS 9 /* application ready to enter L23. */ +#define LPW_CTRL_ASPM_LPW_EN_LEN 1 +#define LPW_CTRL_SYS_CLK_125_SEL_POS 8 /* system 125M select: 125M or 62.5MHz. Default: 125MHz.*/ +#define LPW_CTRL_SYS_CLK_125_SEL_LEN 1 +#define LPW_CTRL_PCIE_RADM_CG_EN_POS 5 /* clock gating enable bit of PCIe Radm clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_RADM_CG_EN_LEN 1 +#define LPW_CTRL_PCIE_CORE_CG_EN_POS 4 /* clock gating enable bit of PCIe Core clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_CORE_CG_EN_LEN 1 +#define LPW_CTRL_PCIE_AXI_CG_EN_POS 3 /* clock gating enable bit of PCIe AXI clock.Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_AXI_CG_EN_LEN 1 +#define LPW_CTRL_GMAC_AXI_CG_EN_POS 2 /* clock gating enable bit of GMAC AXI clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_GMAC_AXI_CG_EN_LEN 1 +#define LPW_CTRL_MDIO2APB_CG_EN_POS 1 /* clock gating enable bit of MDIO2APB, default 1. Set as 1, enable clock gating feature. */ +#define LPW_CTRL_MDIO2APB_CG_EN_LEN 1 +#define LPW_CTRL_OTP_CLK_ON_POS 0 /* Turn on before SW OTP operation, default 1. */ +#define LPW_CTRL_OTP_CLK_ON_LEN 1 + +#define MSI_PBA_REG 0x1300 +#define SYS_RESET_REG 0x152C +#define SYS_RESET_POS 31 +#define SYS_RESET_LEN 1 + +#define REG_PCIE_PSM_STATE 0x1994 /* PCIe PHY power state. */ +#define PCIE_PSM_STATE_POS 0 +#define PCIE_PSM_STATE_LEN 4 +#define PCIE_PSM_STATE_P0 2 +#define PCIE_PSM_STATE_P0s 3 +#define PCIE_PSM_STATE_P1 4 +#define PCIE_PSM_STATE_P1_CPM 5 +#define PCIE_PSM_STATE_P1_1 6 +#define PCIE_PSM_STATE_P1_2 7 +#define PCIE_PSM_STATE_P2 8 + +#define REG_PCIE_SERDES_STATUS 0x1998 +#define PCIE_SERDES_STATUS_DRV_ON_POS 11 +#define PCIE_SERDES_STATUS_DRV_ON_LEN 1 +#define PCIE_SERDES_STATUS_RX_PD_POS 10 +#define PCIE_SERDES_STATUS_RX_PD_LEN 1 +#define PCIE_SERDES_STATUS_PI_PD_POS 9 +#define PCIE_SERDES_STATUS_PI_PD_LEN 1 +#define PCIE_SERDES_STATUS_SIGDET_ON_POS 8 +#define PCIE_SERDES_STATUS_SIGDET_ON_LEN 1 +#define PCIE_SERDES_STATUS_TX_VCM_POS 7 +#define PCIE_SERDES_STATUS_TX_VCM_LEN 1 +#define PCIE_SERDES_STATUS_RX_RT50_POS 6 +#define PCIE_SERDES_STATUS_RX_RT50_LEN 1 +#define PCIE_SERDES_STATUS_BEACON_ON_POS 5 +#define PCIE_SERDES_STATUS_BEACON_ON_LEN 1 +#define PCIE_SERDES_STATUS_PLL_ON_POS 4 +#define PCIE_SERDES_STATUS_PLL_ON_LEN 1 +#define PCIE_SERDES_STATUS_REFCLK_ON_POS 3 +#define PCIE_SERDES_STATUS_REFCLK_ON_LEN 1 +#define PCIE_SERDES_STATUS_LDO_ON_POS 2 +#define PCIE_SERDES_STATUS_LDO_ON_LEN 1 +#define PCIE_SERDES_STATUS_HW_EN_SDS_BIAS_POS 1 +#define PCIE_SERDES_STATUS_HW_EN_SDS_BIAS_LEN 1 +#define PCIE_SERDES_STATUS_HW_BIAS_ON_POS 0 +#define PCIE_SERDES_STATUS_HW_BIAS_ON_LEN 1 + +#define REG_PCIE_SERDES_PLL 0x199C +#define PCIE_SERDES_PLL_AUTOOFF_POS 0 +#define PCIE_SERDES_PLL_AUTOOFF_LEN 1 + +#define NS_OF_GLB_CTL 0x1B00 +#define NS_TPID_PRO 0x1B04 +#define NS_LUT_ROMOTE0 0x1B08 +#define NS_LUT_ROMOTE1 0X1B0C +#define NS_LUT_ROMOTE2 0X1B10 +#define NS_LUT_ROMOTE3 0X1B14 +#define NS_LUT_TARGET0 0X1B18 +#define NS_LUT_TARGET1 0X1B1C +#define NS_LUT_TARGET2 0X1B20 +#define NS_LUT_TARGET3 0X1B24 +#define NS_LUT_SOLICITED0 0X1B28 +#define NS_LUT_SOLICITED1 0X1B2C +#define NS_LUT_SOLICITED2 0X1B30 +#define NS_LUT_SOLICITED3 0X1B34 +#define NS_LUT_MAC_ADDR 0X1B38 +#define NS_LUT_MAC_ADDR_CTL 0X1B3C +#define NS_LUT_TARGET4 0X1B78 +#define NS_LUT_TARGET5 0X1B7c +#define NS_LUT_TARGET6 0X1B80 +#define NS_LUT_TARGET7 0X1B84 + +#define NS_OF_GLB_CTL_TX_CLK_EN_POS 2 +#define NS_OF_GLB_CTL_TX_CLK_EN_LEN 1 +#define NS_OF_GLB_CTL_RX_CLK_EN_POS 1 +#define NS_OF_GLB_CTL_RX_CLK_EN_LEN 1 +#define NS_OF_GLB_CTL_EN_POS 0 +#define NS_OF_GLB_CTL_EN_ELN 1 +#define NS_TPID_PRO_STPID_POS 16 +#define NS_TPID_PRO_STPID_LEN 16 +#define NS_TPID_PRO_CTPID_POS 0 +#define NS_TPID_PRO_CTPID_LEN 16 +#define NS_LUT_DST_CMP_TYPE_POS 19 +#define NS_LUT_DST_CMP_TYPE_LEN 1 +#define NS_LUT_DST_IGNORED_POS 18 +#define NS_LUT_DST_IGNORED_LEN 1 +#define NS_LUT_REMOTE_AWARED_POS 17 +#define NS_LUT_REMOTE_AWARED_LEN 1 +#define NS_LUT_TARGET_ISANY_POS 16 +#define NS_LUT_TARGET_ISANY_LEN 1 +#define NS_LUT_MAC_ADDR_LOW_POS 0 +#define NS_LUT_MAC_ADDR_LOW_LEN 16 + +/* RSS implementation registers, 20210817 */ + +/* 10 RSS key registers */ +#define MGMT_RSS_KEY0 0x1020 +#define MGMT_RSS_KEY9 0x1044 +#define MGMT_RSS_KEY_REG_INC 0x4 + +/* RSS control register */ +#define MGMT_RSS_CTRL 0x1048 +/* b31 enable + * b12:10 indirection table size. 2^(val+1) + * b9:8 default Queue NO. + * b7:0 hash type or options + */ + +/* RSS ctrl register bit definitions. + * [0] ipv4 + * [1] tcpv4 + * [2] udpv4 + * [3] ipv6 + * [4] tcpv6 + * [5] udpv6 +* [6] only ipv4 udp check IP hash +* [7] only ipv6 udp check IP hash + */ +#define MGMT_RSS_CTRL_OPT_POS 0 +#define MGMT_RSS_CTRL_OPT_LEN 8 +#define MGMT_RSS_CTRL_OPT_MASK 0xFF +#define MGMT_RSS_CTRL_IPV4_EN 0x01 +#define MGMT_RSS_CTRL_TCPV4_EN 0x02 +#define MGMT_RSS_CTRL_UDPV4_EN 0x04 +#define MGMT_RSS_CTRL_IPV6_EN 0x08 +#define MGMT_RSS_CTRL_TCPV6_EN 0x10 +#define MGMT_RSS_CTRL_UDPV6_EN 0x20 +#define MGMT_RSS_CTRL_IPV4 0x0 +#define MGMT_RSS_CTRL_IPV4 0x0 + +#define MGMT_RSS_CTRL_DEFAULT_Q_POS 8 +#define MGMT_RSS_CTRL_DEFAULT_Q_LEN 2 +#define MGMT_RSS_CTRL_DEFAULT_Q_MASK 0x3 + +#define MGMT_RSS_CTRL_TBL_SIZE_POS 10 +#define MGMT_RSS_CTRL_TBL_SIZE_LEN 3 +#define MGMT_RSS_CTRL_TBL_SIZE_MASK 0x7 + +#define MAC_RSSCR_RSSE_POS 31 +#define MAC_RSSCR_RSSE_LEN 1 + +/* rss indirection table (IDT) */ +#define MGMT_RSS_IDT 0x1050 +/* b0:1 entry0 + * b2:3 entry1 + * ... + */ +#define MGMT_RSS_IDT_REG_INC 4 +#define MGMT_RSS_IDT_ENTRY_PER_REG 16 +#define MGMT_RSS_IDT_ENTRY_MASK 0x3 +#define MAC_CRC_LENGTH 4 + + /* osc_ctrl */ +#define MGMT_XST_OSC_CTRL 0x1158 +#define MGMT_XST_OSC_CTRL_XST_OSC_SEL_POS 2 +#define MGMT_XST_OSC_CTRL_XST_OSC_SEL_LEN 1 +#define MGMT_XST_OSC_CTRL_EN_OSC_POS 1 +#define MGMT_XST_OSC_CTRL_EN_OSC_LEN 1 +#define MGMT_XST_OSC_CTRL_EN_XST_POS 0 +#define MGMT_XST_OSC_CTRL_EN_XST_LEN 1 + +/* for WPI, yzhang, 20210826 */ +#define MGMT_WPI_CTRL0 0x1160 + /* b1:0 wpi_mode "2b00: normal working mode; 2b01: WPI write mode, work in sleep mode; 2b10: WPI read mode, work after sleep before normal working mode;" + * b2 ram_op_done Each row ram read done, SW can start read after done; + * b3 wpi_op_done WPI read done for the total packet; + * b17:4 wpi_pkt_len WOL packet length, unit byte; + * b31 wpi_fail Error status in Sleep mode; + */ +#define MGMT_WPI_CTRL0_WPI_MODE_POS 0 +#define MGMT_WPI_CTRL0_WPI_MODE_LEN 2 +#define MGMT_WPI_CTRL0_WPI_MODE_NORMAL 0x00 /* normal working mode. */ +/* WPI write mode, work in sleep mode. */ +#define MGMT_WPI_CTRL0_WPI_MODE_WR 0x01 +/* WPI read mode, work after sleep before normal working mode. */ +#define MGMT_WPI_CTRL0_WPI_MODE_RD 0x02 +#define MGMT_WPI_CTRL0_RAM_OP_DONE 0x4 +#define MGMT_WPI_CTRL0_WPI_OP_DONE 0x8 +#define MGMT_WPI_CTRL0_WPI_PKT_LEN_POS 4 +#define MGMT_WPI_CTRL0_WPI_PKT_LEN_LEN 14 +#define MGMT_WPI_CTRL0_WPI_FAIL 0x80000000 + +#define MGMT_WPI_CTRL1_DATA 0x1164 + +#define MGMT_WOL_CTRL 0x1530 + /* b0 link_chg_status 1: waken by link-change + * b1 mgk_pkt_status 1: waken by magic-packet + * b2 rwk_pkt_status 1: waken by remote patten packet + */ +#define MGMT_WOL_CTRL_WPI_LINK_CHG 1 +#define MGMT_WOL_CTRL_WPI_MGC_PKT 2 +#define MGMT_WOL_CTRL_WPI_RWK_PKT 4 +#define MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER 0x010000 + +#define MGMT_RMK_CTRL 0x1400 + +#define MGMT_SIGDET 0x17F8 +#define MGMT_SIGDET_POS 13 +#define MGMT_SIGDET_LEN 3 +#define MGMT_SIGDET_55MV 7 +#define MGMT_SIGDET_50MV 6 +#define MGMT_SIGDET_45MV 5 /* default value */ +#define MGMT_SIGDET_40MV 4 +#define MGMT_SIGDET_35MV 3 +#define MGMT_SIGDET_30MV 2 +#define MGMT_SIGDET_25MV 1 +#define MGMT_SIGDET_20MV 0 + +#define FXGMAC_MTL_REG(pdata, n, reg) \ + ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg)) + +#define FXGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg)) + +#define MSI_ID_RXQ0 0 +#define MSI_ID_RXQ1 1 +#define MSI_ID_RXQ2 2 +#define MSI_ID_RXQ3 3 +#define MSI_ID_TXQ0 4 + +#if 1/* msi table modify to 6 0~3 rx 4 tx 5 phy/other */ +#define MSI_ID_PHY_OTHER 5 + +#define MSIX_TBL_MAX_NUM 6 +#define MSIX_TBL_RXTX_NUM 5 + +#else +#define MSI_ID_TXQ1 5 +#define MSI_ID_TXQ2 6 +#define MSI_ID_TXQ3 7 +#define MSI_ID_SFTUE 8 +#define MSI_ID_SFTCE 9 +#define MSI_ID_SBD 10 +#define MSI_ID_PMT 11 +#define MSI_ID_PHY 12 + +#define MSIX_TBL_MAX_NUM 16 +#define MSIX_TBL_RXTX_NUM 8 +#endif +#define MSIX_TBL_BASE_ADDR 0x1200 +#define MSIX_TBL_MASK_OFFSET 0xC +#define MSIX_TBL_DATA_OFFSET 0x8 +#define MSIX_TBL_ADDR_OFFSET 0x0 + +/******************************************************************* + efuse entry. val31:0 -> offset15:0 + offset7:0 + offset15:8 + val7:0 + val15:8 + val23:16 + val31:24 +*******************************************************************/ +#define EFUSE_OP_CTRL_0 0x1500 +#define EFUSE_OP_WR_DATA_POS 16 +#define EFUSE_OP_WR_DATA_LEN 8 +#define EFUSE_OP_ADDR_POS 8 +#define EFUSE_OP_ADDR_LEN 8 +#define EFUSE_OP_START_POS 2 +#define EFUSE_OP_START_LEN 1 +#define EFUSE_OP_MODE_POS 0 +#define EFUSE_OP_MODE_LEN 2 +#define EFUSE_OP_MODE_ROW_WRITE 0x0 +#define EFUSE_OP_MODE_ROW_READ 0x1 +#define EFUSE_OP_MODE_AUTO_LOAD 0x2 +#define EFUSE_OP_MODE_READ_BLANK 0x3 + +#define EFUSE_OP_CTRL_1 0x1504 +#define EFUSE_OP_RD_DATA_POS 24 +#define EFUSE_OP_RD_DATA_LEN 8 +#define EFUSE_OP_BIST_ERR_ADDR_POS 16 +#define EFUSE_OP_BIST_ERR_ADDR_LEN 8 +#define EFUSE_OP_BIST_ERR_CNT_POS 8 +#define EFUSE_OP_BIST_ERR_CNT_LEN 8 +#define EFUSE_OP_PGM_PASS_POS 2 +#define EFUSE_OP_PGM_PASS_LEN 1 +#define EFUSE_OP_DONE_POS 1 +#define EFUSE_OP_DONE_LEN 1 + +/* efuse layout refer to http://redmine.motor-comm.com/issues/3856 */ +#define EFUSE_FISRT_UPDATE_ADDR 255 +#define EFUSE_SECOND_UPDATE_ADDR 209 +#define FUXI_EFUSE_MAX_ENTRY 39 +#define FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 +#define EFUSE_PATCH_ADDR_START_BYTE 0 +#define EFUSE_PATCH_DATA_START_BYTE 2 +#define EFUSE_REGION_A_B_LENGTH 18 +#define EFUSE_EACH_PATH_SIZE 6 + +#define EFUSE_REVID_REGISTER 0x0008 +#define EFUSE_SUBSYS_REGISTER 0x002C +/* mac[5]->bit7:0, mac[4]->bit15:8, mac[3]->bit23:16, mac[2]->bit31:24. */ +#define MACA0LR_FROM_EFUSE 0x1520 +/* mac[1]->bit7:0, mac[0]->bit15:8. mac[6] = + * {00, 01, 02, 03, 04, 05} 00-01-02-03-04-05. + */ +#define MACA0HR_FROM_EFUSE 0x1524 + +#define EFUSE_LED_ADDR 0x00 +#define EFUSE_LED_POS 0 +#define EFUSE_LED_LEN 5 +#define EFUSE_OOB_ADDR 0x07 +#define EFUSE_OOB_POS 2 +#define EFUSE_OOB_LEN 1 +#define EFUSE_LED_SOLUTION0 0 +#define EFUSE_LED_SOLUTION1 1 +#define EFUSE_LED_SOLUTION2 2 +#define EFUSE_LED_SOLUTION3 3 +#define EFUSE_LED_SOLUTION4 4 +#define EFUSE_LED_COMMON_SOLUTION 0x1F + +/******************** Below for pcie configuration register. *********************/ +#define REG_PCI_VENDOR_ID 0x0 /* WORD reg */ +#define REG_PCI_DEVICE_ID 0x2 /* WORD reg */ +#define PCI_DEVICE_ID_FUXI 0x6801 + +#define REG_PCI_COMMAND 0x4 +#define PCI_COMMAND_IO_SPACE_POS 0 +#define PCI_COMMAND_IO_SPACE_LEN 1 +#define PCI_COMAMND_MEM_SPACE_POS 1 +#define PCI_COMAMND_MEM_SPACE_LEN 1 +#define PCI_COMMAND_MASTER_POS 2 +#define PCI_COMMAND_MASTER_LEN 1 +#define PCI_COMMAND_DIS_INT_POS 10 +#define PCI_COMMAND_DIS_INT_LEN 1 +#define PCI_COMMAND_INTX_STATUS_POS 19 +#define PCI_COMMAND_INTX_STATUS_LEN 1 + +#define REG_PCI_REVID 0x8 /* BYTE reg */ +#define REG_PCI_PROGRAM_INTF 0x9 /* BYTE reg */ /* PCI Class Program Interface */ +#define REG_PCI_SUB_CLASS 0xA /* BYTE reg */ +#define REG_PCI_BASE_CLASS 0xB /* BYTE reg */ +#define REG_CACHE_LINE_SIZE 0xC + + +#define REG_MEM_BASE 0x10 /* DWORD or QWORD reg */ +#define REG_MEM_BASE_HI 0x14 /* DWORD or QWORD reg */ + +#define REG_IO_BASE 0x20 /* DWORD reg */ + +#define REG_PCI_SUB_VENDOR_ID 0x2C /* WORD reg */ +#define REG_PCI_SUB_DEVICE_ID 0x2E /* WORD reg */ + +#define REG_INT_LINE 0x3C /* BYTE reg */ + +#define REG_PM_STATCTRL 0x44 /* WORD reg */ +#define PM_STATCTRL_PWR_STAT_POS 0 +#define PM_STATCTRL_PWR_STAT_LEN 2 +#define PM_STATCTRL_PWR_STAT_D3 3 +#define PM_STATCTRL_PWR_STAT_D0 0 +#define PM_CTRLSTAT_PME_EN_POS 8 +#define PM_CTRLSTAT_PME_EN_LEN 1 +#define PM_CTRLSTAT_DATA_SEL_POS 9 +#define PM_CTRLSTAT_DATA_SEL_LEN 4 +#define PM_CTRLSTAT_DATA_SCAL_POS 13 +#define PM_CTRLSTAT_DATA_SCAL_LEN 2 +#define PM_CTRLSTAT_PME_STAT_POS 15 +#define PM_CTRLSTAT_PME_STAT_LEN 1 + +#define REG_DEVICE_CTRL1 0x78 +#define DEVICE_CTRL1_CONTROL_POS 0 +#define DEVICE_CTRL1_CONTROL_LEN 16 +#define DEVICE_CTRL1_STATUS_POS 16 +#define DEVICE_CTRL1_STATUS_LEN 16 + +#define REG_PCI_LINK_CTRL 0x80 +#define PCI_LINK_CTRL_CONTROL_POS 0 +#define PCI_LINK_CTRL_CONTROL_LEN 16 +#define PCI_LINK_CTRL_ASPM_CONTROL_POS 0 +#define PCI_LINK_CTRL_ASPM_CONTROL_LEN 2 +#define PCI_LINK_CTRL_L1_STATUS 2 +#define PCI_LINK_CTRL_CONTROL_CPM_POS 8 /*L1.CPM mode enable bit. Default 0, set as 1 enable this mode. clkreq pin need to connect RC*/ +#define PCI_LINK_CTRL_CONTROL_CPM_LEN 1 +#define PCI_LINK_CTRL_STATUS_POS 16 +#define PCI_LINK_CTRL_STATUS_LEN 16 + +#define REG_DEVICE_CTRL2 0x98 /* WORD reg */ +#define DEVICE_CTRL2_LTR_EN_POS 10 /* Enable from BIOS side. */ +#define DEVICE_CTRL2_LTR_EN_LEN 1 + +#define REG_MSIX_CAPABILITY 0xB0 + +/* ASPM L1ss PM Substates */ +#define REG_ASPM_L1SS_CAP 0x154 /* Capabilities Register */ +#define ASPM_L1SS_CAP_PCIPM_L1_2_POS 0 /* PCI-PM L1.2 Supported */ +#define ASPM_L1SS_CAP_PCIPM_L1_2_LEN 1 +#define ASPM_L1SS_CAP_PCIPM_L1_1_POS 1 /* PCI-PM L1.1 Supported */ +#define ASPM_L1SS_CAP_PCIPM_L1_1_LEN 1 +#define ASPM_L1SS_CAP_ASPM_L1_2_POS 2 /* ASPM L1.2 Supported */ +#define ASPM_L1SS_CAP_ASPM_L1_2_LEN 1 +#define ASPM_L1SS_CAP_ASPM_L1_1_POS 3 /* ASPM L1.1 Supported */ +#define ASPM_L1SS_CAP_ASPM_L1_1_LEN 1 +#define ASPM_L1SS_CAP_L1_PM_SS_POS 4 /* L1 PM Substates Supported */ +#define ASPM_L1SS_CAP_L1_PM_SS_LEN 1 +#define ASPM_L1SS_CAP_CM_RESTORE_TIME_POS 8 /* Port Common_Mode_Restore_Time */ +#define ASPM_L1SS_CAP_CM_RESTORE_TIME_LEN 8 +#define ASPM_L1SS_CAP_P_PWR_ON_SCALE_POS 16 /* Port T_POWER_ON scale */ +#define ASPM_L1SS_CAP_P_PWR_ON_SCALE_LEN 2 +#define ASPM_L1SS_CAP_P_PWR_ON_VALUE_POS 19 /* Port T_POWER_ON value */ +#define ASPM_L1SS_CAP_P_PWR_ON_VALUE_LEN 5 + +#define REG_ASPM_L1SS_CTRL1 0x158 +#define REG_ASPM_L1SS_CTRL1_VALUE 0x405e000f +#define ASPM_L1SS_CTRL1_L12_PCIPM_EN_POS 0 /* L1.2 in D3 state. */ +#define ASPM_L1SS_CTRL1_L12_PCIPM_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L11_PCIPM_EN_POS 1 /* L1.1 in D3 state. */ +#define ASPM_L1SS_CTRL1_L11_PCIPM_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L12_EN_POS 2 +#define ASPM_L1SS_CTRL1_L12_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L11_EN_POS 3 +#define ASPM_L1SS_CTRL1_L11_EN_LEN 1 +#define ASPM_L1SS_CTRL1_CM_RESTORE_TIME_POS 8 /* Common_Mode_Restore_Time */ +#define ASPM_L1SS_CTRL1_CM_RESTORE_TIME_LEN 8 +#define ASPM_L1SS_CTRL1_LTR_L12_TH_VALUE_POS 16 /* LTR_L1.2_THRESHOLD_Value */ +#define ASPM_L1SS_CTRL1_LTR_L12_TH_VALUE_LEN 10 +#define ASPM_L1SS_CTRL1_L12_TH_SCALE_POS 29 /* LTR_L1.2_THRESHOLD_Scale */ +#define ASPM_L1SS_CTRL1_L12_TH_SCALE_LEN 3 + +#define REG_ASPM_L1SS_CTL2 0x15c /* Control 2 Register */ + +#define REG_ASPM_CONTROL 0x70C +#define ASPM_L1_IDLE_THRESHOLD_POS 27 +#define ASPM_L1_IDLE_THRESHOLD_LEN 3 +#define ASPM_L1_IDLE_THRESHOLD_1US 0 +#define ASPM_L1_IDLE_THRESHOLD_2US 1 +#define ASPM_L1_IDLE_THRESHOLD_4US 2 +#define ASPM_L1_IDLE_THRESHOLD_8US 3 /* default value after reset. */ +#define ASPM_L1_IDLE_THRESHOLD_16US 4 +#define ASPM_L1_IDLE_THRESHOLD_32US 5 +#define ASPM_L1_IDLE_THRESHOLD_64US 6 + +#define REG_POWER_EIOS 0x710 +#define POWER_EIOS_POS 7 +#define POWER_EIOS_LEN 1 + +#endif /* __FUXI_GMAC_REG_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h new file mode 100644 index 0000000000000000000000000000000000000000..ea01ebdadc4e373ab6c7b9efdb1815443a9d392f --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h @@ -0,0 +1,934 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_GMAC_H__ +#define __FUXI_GMAC_H__ + +#include "fuxi-os.h" + +/* For fpga before 20210507 */ +#define FXGMAC_FPGA_VER_B4_0507 0 +#define FXGMAC_FPGA_VER_20210507 1 + +#define FXGMAC_DRV_NAME "yt6801" + +#define FXGMAC_DRV_DESC "Motorcomm FUXI GMAC Driver" + +#define FUXI_MAC_REGS_OFFSET 0x2000 + +/* 1: in normal D0 state, turn off ephy link change interrupt. */ +#define FUXI_EPHY_INTERRUPT_D0_OFF 0 +/* 1:when rec buffer is not enough, to create rbd and rec buffer, + * but the rdb need to be continus with the intialized rdb, so + * close the feature + */ +#define FUXI_ALLOC_NEW_RECBUFFER 0 + +#define RESUME_MAX_TIME 3000000 +#define PHY_LINK_TIMEOUT 3000 +#define ESD_RESET_MAXIMUM 0 + +#define REGWR_RETRY_MAXIMUM 2600 +#define PCIE_LINKDOWN_VALUE 0xFFFFFFFF + +#define FXGMAC_MSIX_Q_VECTORS 4 + +#define FXGMAC_IS_CHANNEL_WITH_TX_IRQ(chId) (0 == (chId) ? 1 : 0) + +/* flags for ipv6 NS offload address, local link or Global unicast */ +#define FXGMAC_NS_IFA_LOCAL_LINK 1 +#define FXGMAC_NS_IFA_GLOBAL_UNICAST 2 + +#define FXGMAX_ASPM_WAR_EN +/* Descriptor related parameters */ +#if FXGMAC_TX_HANG_TIMER_EN +#define FXGMAC_TX_DESC_CNT 1024 +#else +/* 256 to make sure the tx ring is in the 4k range when + * FXGMAC_TX_HANG_TIMER_EN is 0 + */ +#define FXGMAC_TX_DESC_CNT 256 +#endif +#define FXGMAC_TX_DESC_MIN_FREE (FXGMAC_TX_DESC_CNT >> 3) +#define FXGMAC_TX_DESC_MAX_PROC (FXGMAC_TX_DESC_CNT >> 1) +#define FXGMAC_RX_DESC_CNT 1024 +#define FXGMAC_RX_DESC_MAX_DIRTY (FXGMAC_RX_DESC_CNT >> 3) + +/* Descriptors required for maximum contiguous TSO/GSO packet */ +#define FXGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / FXGMAC_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for a SKB */ +#define FXGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + FXGMAC_TX_MAX_SPLIT + 2) + +#define FXGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define FXGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define FXGMAC_RX_BUF_ALIGN 64 + +/* Maximum Size for Splitting the Header Data + * Keep in sync with SKB_ALLOC_SIZE + * 3'b000: 64 bytes, 3'b001: 128 bytes + * 3'b010: 256 bytes, 3'b011: 512 bytes + * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved + */ +#define FXGMAC_SPH_HDSMS_SIZE 3 +#define FXGMAC_SKB_ALLOC_SIZE 512 + +/* In Linux Driver, it set MAX_FIFO size 131072, here it uses + * the same value as windows driver + */ +#define FXGMAC_MAX_FIFO 81920 + +#define FXGMAC_MAX_DMA_CHANNELS FXGMAC_MSIX_Q_VECTORS +#define FXGMAC_DMA_STOP_TIMEOUT 5 +#define FXGMAC_DMA_INTERRUPT_MASK 0x31c7 +#define FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX (FXGMAC_MAX_DMA_CHANNELS + 1) + +/* Default coalescing parameters */ +#define FXGMAC_INIT_DMA_TX_USECS INT_MOD_IN_US +#define FXGMAC_INIT_DMA_TX_FRAMES 25 +#define FXGMAC_INIT_DMA_RX_USECS INT_MOD_IN_US +#define FXGMAC_INIT_DMA_RX_FRAMES 25 +#define FXGMAC_MAX_DMA_RIWT 0xff +#define FXGMAC_MIN_DMA_RIWT 0x01 + +/* Flow control queue count */ +#define FXGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* System clock is 125 MHz */ +#define FXGMAC_SYSCLOCK 125000000 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define FXGMAC_MAC_HASH_TABLE_SIZE 8 + +/* wol pattern settings */ +#define MAX_PATTERN_SIZE 128 /* PATTERN length */ +#define MAX_PATTERN_COUNT 16 /* pattern count */ +#define MAX_LPP_ARP_OFFLOAD_COUNT 1 +#define MAX_LPP_NS_OFFLOAD_COUNT 2 + +#define MAX_WPI_LENGTH_SIZE 1536 /* WPI packet. */ +#define PM_WAKE_PKT_ALIGN 8 /* try use 64 bit boundary... */ + +/* Receive Side Scaling */ +#define FXGMAC_RSS_HASH_KEY_SIZE 40 +#define FXGMAC_RSS_MAX_TABLE_SIZE 128 +#define FXGMAC_RSS_LOOKUP_TABLE_TYPE 0 +#define FXGMAC_RSS_HASH_KEY_TYPE 1 +#define MAX_MSI_COUNT 16 /* Max Msi/Msix supported. */ + +#define FXGMAC_STD_PACKET_MTU 1500 +#define FXGMAC_JUMBO_PACKET_MTU 9014 + +#define NIC_MAX_TCP_OFFLOAD_SIZE 7300 +#define NIC_MIN_LSO_SEGMENT_COUNT 2 + +/* power management */ +#define FXGMAC_POWER_STATE_DOWN 0 +#define FXGMAC_POWER_STATE_UP 1 + +struct wol_bitmap_pattern { + u32 flags; + u32 pattern_size; + u32 mask_size; + u8 mask_info[MAX_PATTERN_SIZE / 8]; + u8 pattern_info[MAX_PATTERN_SIZE]; + u8 pattern_offset; + u16 pattern_crc; +}; + +struct led_setting { + u32 s0_led_setting[5]; + u32 s3_led_setting[5]; + u32 s5_led_setting[5]; + u32 disable_led_setting[5]; +}; + +typedef struct led_setting LED_SETTING; +typedef struct wol_bitmap_pattern WOL_BITMAP_PATTERN; + +/* note, maybe we should refer to NDIS_PM_WAKE_REASON_TYPE + * to avoid duplication definition.... + */ +typedef enum { + WAKE_REASON_NONE = 0, + WAKE_REASON_MAGIC, + WAKE_REASON_PATTERNMATCH, + WAKE_REASON_LINK, + WAKE_REASON_TCPSYNV4, + WAKE_REASON_TCPSYNV6, + /* for wake up method like Link-change, for that, + * GMAC cannot identify and need more checking. + */ + WAKE_REASON_TBD, + WAKE_REASON_HW_ERR, +} WAKE_REASON; + +/* Helper macro for descriptor handling + * Always use FXGMAC_GET_DESC_DATA to access the descriptor data + */ +#define FXGMAC_GET_DESC_DATA(ring, idx) ((ring)->desc_data_head + (idx)) +#define FXGMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + +struct fxgmac_pdata; + +enum fxgmac_int { + FXGMAC_INT_DMA_CH_SR_TI, + FXGMAC_INT_DMA_CH_SR_TPS, + FXGMAC_INT_DMA_CH_SR_TBU, + FXGMAC_INT_DMA_CH_SR_RI, + FXGMAC_INT_DMA_CH_SR_RBU, + FXGMAC_INT_DMA_CH_SR_RPS, + FXGMAC_INT_DMA_CH_SR_TI_RI, + FXGMAC_INT_DMA_CH_SR_FBE, + FXGMAC_INT_DMA_ALL, +}; + +struct fxgmac_stats { + /* MMC TX counters */ + u64 txoctetcount_gb; + u64 txframecount_gb; + u64 txbroadcastframes_g; + u64 txmulticastframes_g; + u64 tx64octets_gb; + u64 tx65to127octets_gb; + u64 tx128to255octets_gb; + u64 tx256to511octets_gb; + u64 tx512to1023octets_gb; + u64 tx1024tomaxoctets_gb; + u64 txunicastframes_gb; + u64 txmulticastframes_gb; + u64 txbroadcastframes_gb; + u64 txunderflowerror; + u64 txsinglecollision_g; + u64 txmultiplecollision_g; + u64 txdeferredframes; + u64 txlatecollisionframes; + u64 txexcessivecollisionframes; + u64 txcarriererrorframes; + u64 txoctetcount_g; + u64 txframecount_g; + u64 txexcessivedeferralerror; + u64 txpauseframes; + u64 txvlanframes_g; + u64 txoversize_g; + + /* MMC RX counters */ + u64 rxframecount_gb; + u64 rxoctetcount_gb; + u64 rxoctetcount_g; + u64 rxbroadcastframes_g; + u64 rxmulticastframes_g; + u64 rxcrcerror; + u64 rxalignerror; + u64 rxrunterror; + u64 rxjabbererror; + u64 rxundersize_g; + u64 rxoversize_g; + u64 rx64octets_gb; + u64 rx65to127octets_gb; + u64 rx128to255octets_gb; + u64 rx256to511octets_gb; + u64 rx512to1023octets_gb; + u64 rx1024tomaxoctets_gb; + u64 rxunicastframes_g; + u64 rxlengtherror; + u64 rxoutofrangetype; + u64 rxpauseframes; + u64 rxfifooverflow; + u64 rxvlanframes_gb; + u64 rxwatchdogerror; + u64 rxreceiveerrorframe; + u64 rxcontrolframe_g; + + /* Extra counters */ + u64 tx_tso_packets; + u64 rx_split_header_packets; + u64 tx_process_stopped; + u64 rx_process_stopped; + u64 tx_buffer_unavailable; + u64 rx_buffer_unavailable; + u64 fatal_bus_error; + u64 tx_vlan_packets; + u64 rx_vlan_packets; + u64 napi_poll_isr; + u64 napi_poll_txtimer; + u64 cnt_alive_txtimer; + + u64 ephy_poll_timer_cnt; + u64 mgmt_int_isr; +}; + +struct fxgmac_ring_buf { + struct sk_buff *skb; + DMA_ADDR_T skb_dma; + unsigned int skb_len; +}; + +/* Common Tx and Rx DMA hardware descriptor */ +struct fxgmac_dma_desc { + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct fxgmac_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + DMA_ADDR_T pages_dma; +}; + +/* Ring entry buffer data */ +struct fxgmac_buffer_data { + struct fxgmac_page_alloc pa; + struct fxgmac_page_alloc pa_unmap; + + DMA_ADDR_T dma_base; + unsigned long dma_off; + unsigned int dma_len; +}; + +/* Tx-related desc data */ +struct fxgmac_tx_desc_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related desc data */ +struct fxgmac_rx_desc_data { + struct fxgmac_buffer_data hdr; /* Header locations */ + struct fxgmac_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ +}; + +struct fxgmac_pkt_info { + struct sk_buff *skb; + + unsigned int attributes; + + unsigned int errors; + + /* descriptors needed for this packet */ + unsigned int desc_count; + unsigned int length; + + unsigned int tx_packets; + unsigned int tx_bytes; + + unsigned int header_len; + unsigned int tcp_header_len; + unsigned int tcp_payload_len; + unsigned short mss; + + unsigned short vlan_ctag; + + u64 rx_tstamp; + + u32 rss_hash; + RSS_HASH_TYPE rss_hash_type; +}; + +struct fxgmac_desc_data { + /* dma_desc: Virtual address of descriptor + * dma_desc_addr: DMA address of descriptor + */ + struct fxgmac_dma_desc *dma_desc; + DMA_ADDR_T dma_desc_addr; + + /* skb: Virtual address of SKB + * skb_dma: DMA address of SKB data + * skb_dma_len: Length of SKB DMA area + */ + struct sk_buff *skb; + DMA_ADDR_T skb_dma; + unsigned int skb_dma_len; + + /* Tx/Rx -related data */ + struct fxgmac_tx_desc_data tx; + struct fxgmac_rx_desc_data rx; + + unsigned int mapped_as_page; + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct fxgmac_ring { + /* Per packet related information */ + struct fxgmac_pkt_info pkt_info; + + /* Virtual/DMA addresses of DMA descriptor list and the total count */ + struct fxgmac_dma_desc *dma_desc_head; + DMA_ADDR_T dma_desc_head_addr; + unsigned int dma_desc_count; + + /* Array of descriptor data corresponding the DMA descriptor + * (always use the FXGMAC_GET_DESC_DATA macro to access this data) + */ + struct fxgmac_desc_data *desc_data_head; + + /* Page allocation for RX buffers */ + struct fxgmac_page_alloc rx_hdr_pa; + struct fxgmac_page_alloc rx_buf_pa; + + /* Ring index values + * cur - Tx: index of descriptor to be used for current transfer + * Rx: index of descriptor to check for packet availability + * dirty - Tx: index of descriptor to check for transfer complete + * Rx: index of descriptor to check for buffer reallocation + */ + unsigned int cur; + unsigned int dirty; + + /* Coalesce frame count used for interrupt bit setting */ + unsigned int coalesce_count; + + struct { + unsigned int xmit_more; + unsigned int queue_stopped; + unsigned short cur_mss; + unsigned short cur_vlan_ctag; + } tx; +} ____cacheline_aligned; + +struct fxgmac_channel { + char name[16]; + + /* Address of private data area for device */ + struct fxgmac_pdata *pdata; + + /* Queue index and base address of queue's DMA registers */ + unsigned int queue_index; + + IOMEM dma_regs; + + /* Per channel interrupt irq number */ + u32 dma_irq; + FXGMAC_CHANNEL_OF_PLATFORM expansion; + + unsigned int saved_ier; + + unsigned int tx_timer_active; + + struct fxgmac_ring *tx_ring; + struct fxgmac_ring *rx_ring; +} ____cacheline_aligned; + +struct fxphy_ag_adv { + u8 auto_neg_en : 1; + u8 full_1000m : 1; + u8 half_1000m : 1; + u8 full_100m : 1; + u8 half_100m : 1; + u8 full_10m : 1; + u8 half_10m : 1; +}; + +struct fxgmac_desc_ops { + int (*alloc_channles_and_rings)(struct fxgmac_pdata *pdata); + void (*free_channels_and_rings)(struct fxgmac_pdata *pdata); + int (*map_tx_skb)(struct fxgmac_channel *channel, struct sk_buff *skb); + int (*map_rx_buffer)(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + struct fxgmac_desc_data *desc_data); + void (*unmap_desc_data)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data); + void (*tx_desc_init)(struct fxgmac_pdata *pdata); + void (*rx_desc_init)(struct fxgmac_pdata *pdata); +}; + +struct fxgmac_hw_ops { + int (*init)(struct fxgmac_pdata *pdata); + int (*exit)(struct fxgmac_pdata *pdata); + void (*save_nonstick_reg)(struct fxgmac_pdata *pdata); + void (*restore_nonstick_reg)(struct fxgmac_pdata *pdata); + int (*set_gmac_register)(struct fxgmac_pdata *pdata, u8 *address, + unsigned int data); + u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, u8 *address); + void (*esd_restore_pcie_cfg)(struct fxgmac_pdata *pdata); + + int (*tx_complete)(struct fxgmac_dma_desc *dma_desc); + + void (*enable_tx)(struct fxgmac_pdata *pdata); + void (*disable_tx)(struct fxgmac_pdata *pdata); + void (*enable_rx)(struct fxgmac_pdata *pdata); + void (*disable_rx)(struct fxgmac_pdata *pdata); + void (*enable_channel_rx)(struct fxgmac_pdata *pdata, + unsigned int queue); + + int (*enable_int)(struct fxgmac_channel *channel, + enum fxgmac_int int_id); + int (*disable_int)(struct fxgmac_channel *channel, + enum fxgmac_int int_id); + void (*set_interrupt_moderation)(struct fxgmac_pdata *pdata); + void (*enable_msix_rxtxinterrupt)(struct fxgmac_pdata *pdata); + void (*disable_msix_interrupt)(struct fxgmac_pdata *pdata); + void (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); + void (*enable_msix_one_interrupt)(struct fxgmac_pdata *pdata, + u32 intid); + void (*disable_msix_one_interrupt)(struct fxgmac_pdata *pdata, + u32 intid); + bool (*enable_mgm_interrupt)(struct fxgmac_pdata *pdata); + bool (*disable_mgm_interrupt)(struct fxgmac_pdata *pdata); + + void (*dev_xmit)(struct fxgmac_channel *channel); + int (*dev_read)(struct fxgmac_channel *channel); + + int (*set_mac_address)(struct fxgmac_pdata *pdata, u8 *addr); + int (*set_mac_hash)(struct fxgmac_pdata *pdata); + int (*config_rx_mode)(struct fxgmac_pdata *pdata); + int (*enable_rx_csum)(struct fxgmac_pdata *pdata); + int (*disable_rx_csum)(struct fxgmac_pdata *pdata); + void (*config_tso)(struct fxgmac_pdata *pdata); + + /* For MII speed configuration */ + int (*config_mac_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_2500_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_1000_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_100_speed)(struct fxgmac_pdata *pdata); + int (*get_xlgmii_phy_status)(struct fxgmac_pdata *pdata, u32 *speed, + bool *link_up, + bool link_up_wait_to_complete); + + /* For descriptor related operation */ + void (*tx_desc_init)(struct fxgmac_channel *channel); + void (*rx_desc_init)(struct fxgmac_channel *channel); + void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); + void (*rx_desc_reset)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index); + int (*is_last_desc)(struct fxgmac_dma_desc *dma_desc); + int (*is_context_desc)(struct fxgmac_dma_desc *dma_desc); + void (*tx_start_xmit)(struct fxgmac_channel *channel, + struct fxgmac_ring *ring); + void (*set_pattern_data)(struct fxgmac_pdata *pdata); + void (*config_wol)(struct fxgmac_pdata *pdata, int en); + + /* For Flow Control */ + int (*config_tx_flow_control)(struct fxgmac_pdata *pdata); + int (*config_rx_flow_control)(struct fxgmac_pdata *pdata); + + /* For Jumbo Frames */ + int (*config_mtu)(struct fxgmac_pdata *pdata); + int (*enable_jumbo)(struct fxgmac_pdata *pdata); + + /* For Vlan related config */ + int (*enable_tx_vlan)(struct fxgmac_pdata *pdata); + int (*disable_tx_vlan)(struct fxgmac_pdata *pdata); + int (*enable_rx_vlan_stripping)(struct fxgmac_pdata *pdata); + int (*disable_rx_vlan_stripping)(struct fxgmac_pdata *pdata); + int (*enable_rx_vlan_filtering)(struct fxgmac_pdata *pdata); + int (*disable_rx_vlan_filtering)(struct fxgmac_pdata *pdata); + int (*update_vlan_hash_table)(struct fxgmac_pdata *pdata); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct fxgmac_pdata *pdata); + int (*config_tx_coalesce)(struct fxgmac_pdata *pdata); + unsigned int (*usec_to_riwt)(struct fxgmac_pdata *pdata, + unsigned int usec); + unsigned int (*riwt_to_usec)(struct fxgmac_pdata *pdata, + unsigned int riwt); + + /* For RX and TX threshold config */ + int (*config_rx_threshold)(struct fxgmac_pdata *pdata, + unsigned int val); + int (*config_tx_threshold)(struct fxgmac_pdata *pdata, + unsigned int val); + + /* For RX and TX Store and Forward Mode config */ + int (*config_rsf_mode)(struct fxgmac_pdata *pdata, unsigned int val); + int (*config_tsf_mode)(struct fxgmac_pdata *pdata, unsigned int val); + + /* For TX DMA Operate on Second Frame config */ + int (*config_osp_mode)(struct fxgmac_pdata *pdata); + + /* For RX and TX PBL config */ + int (*config_rx_pbl_val)(struct fxgmac_pdata *pdata); + int (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); + int (*config_tx_pbl_val)(struct fxgmac_pdata *pdata); + int (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); + int (*config_pblx8)(struct fxgmac_pdata *pdata); + + /* For MMC statistics */ + void (*rx_mmc_int)(struct fxgmac_pdata *pdata); + void (*tx_mmc_int)(struct fxgmac_pdata *pdata); + void (*read_mmc_stats)(struct fxgmac_pdata *pdata); + bool (*update_stats_counters)(struct fxgmac_pdata *pdata, + bool ephy_check_en); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct fxgmac_pdata *pdata); + int (*disable_rss)(struct fxgmac_pdata *pdata); + u32 (*get_rss_options)(struct fxgmac_pdata *pdata); + int (*set_rss_options)(struct fxgmac_pdata *pdata); + int (*set_rss_hash_key)(struct fxgmac_pdata *pdata, const u8 *key); + int (*set_rss_lookup_table)(struct fxgmac_pdata *pdata, + const u32 *table); + + /*For Offload*/ + void (*set_arp_offload)(struct fxgmac_pdata *pdata, + unsigned char *ip_addr); + int (*enable_arp_offload)(struct fxgmac_pdata *pdata); + int (*disable_arp_offload)(struct fxgmac_pdata *pdata); + + /*NS offload*/ + int (*set_ns_offload)(struct fxgmac_pdata *pdata, unsigned int index, + unsigned char *remote_addr, + unsigned char *solicited_addr, + unsigned char *target_addr1, + unsigned char *target_addr2, + unsigned char *mac_addr); + int (*enable_ns_offload)(struct fxgmac_pdata *pdata); + int (*disable_ns_offload)(struct fxgmac_pdata *pdata); + + int (*enable_wake_magic_pattern)(struct fxgmac_pdata *pdata); + int (*disable_wake_magic_pattern)(struct fxgmac_pdata *pdata); + + int (*enable_wake_link_change)(struct fxgmac_pdata *pdata); + int (*disable_wake_link_change)(struct fxgmac_pdata *pdata); + + int (*check_wake_pattern_fifo_pointer)(struct fxgmac_pdata *pdata); + int (*set_wake_pattern)(struct fxgmac_pdata *pdata, + struct wol_bitmap_pattern *wol_pattern, + u32 pattern_cnt); + int (*enable_wake_pattern)(struct fxgmac_pdata *pdata); + int (*disable_wake_pattern)(struct fxgmac_pdata *pdata); + int (*set_wake_pattern_mask)(struct fxgmac_pdata *pdata, + u32 filter_index, u8 register_index, + u32 Data); +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN + void (*get_wake_packet_indication)(struct fxgmac_pdata *pdata, + int *wake_reason, + u32 *wake_pattern_number, + u8 *wpi_buf, u32 buf_size, + u32 *packet_size); + void (*enable_wake_packet_indication)(struct fxgmac_pdata *pdata, + int en); +#endif + + void (*reset_phy)(struct fxgmac_pdata *pdata); + /*for release phy, phy write and read, and provide clock to GMAC. */ + void (*release_phy)(struct fxgmac_pdata *pdata); + void (*enable_phy_check)(struct fxgmac_pdata *pdata); + void (*disable_phy_check)(struct fxgmac_pdata *pdata); + void (*setup_cable_loopback)(struct fxgmac_pdata *pdata); + void (*clean_cable_loopback)(struct fxgmac_pdata *pdata); + void (*disable_phy_sleep)(struct fxgmac_pdata *pdata); + void (*enable_phy_sleep)(struct fxgmac_pdata *pdata); + void (*phy_green_ethernet)(struct fxgmac_pdata *pdata); + void (*phy_eee_feature)(struct fxgmac_pdata *pdata); + int (*get_ephy_state)(struct fxgmac_pdata *pdata); + int (*write_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 data); + int (*read_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 *data); + int (*set_ephy_autoneg_advertise)(struct fxgmac_pdata *pdata, + struct fxphy_ag_adv phy_ag_adv); + int (*phy_config)(struct fxgmac_pdata *pdata); + void (*close_phy_led)(struct fxgmac_pdata *pdata); + void (*led_under_active)(struct fxgmac_pdata *pdata); + void (*led_under_sleep)(struct fxgmac_pdata *pdata); + void (*led_under_shutdown)(struct fxgmac_pdata *pdata); + void (*led_under_disable)(struct fxgmac_pdata *pdata); + + /* For power management */ + void (*pre_power_down)(struct fxgmac_pdata *pdata, bool phyloopback); + int (*diag_sanity_check)(struct fxgmac_pdata *pdata); + int (*write_rss_lookup_table)(struct fxgmac_pdata *pdata); + int (*get_rss_hash_key)(struct fxgmac_pdata *pdata, u8 *key_buf); + void (*config_power_down)(struct fxgmac_pdata *pdata, unsigned int wol); + void (*config_power_up)(struct fxgmac_pdata *pdata); + unsigned char (*set_suspend_int)(void *pdata); + void (*set_resume_int)(struct fxgmac_pdata *pdata); + int (*set_suspend_txrx)(struct fxgmac_pdata *pdata); + void (*set_pwr_clock_gate)(struct fxgmac_pdata *pdata); + void (*set_pwr_clock_ungate)(struct fxgmac_pdata *pdata); + + /* for multicast address list */ + int (*set_all_multicast_mode)(struct fxgmac_pdata *pdata, + unsigned int enable); + void (*config_multicast_mac_hash_table)(struct fxgmac_pdata *pdata, + unsigned char *pmc_mac, + int b_add); + + /* for packet filter-promiscuous and broadcast */ + int (*set_promiscuous_mode)(struct fxgmac_pdata *pdata, + unsigned int enable); + int (*enable_rx_broadcast)(struct fxgmac_pdata *pdata, + unsigned int enable); + + /* efuse relevant operation. */ + bool (*read_patch_from_efuse)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); /* read patch per index. */ + bool (*read_patch_from_efuse_per_index)( + struct fxgmac_pdata *pdata, u8 index, u32 *offset, + u32 *value); /* read patch per index. */ + bool (*write_patch_to_efuse)(struct fxgmac_pdata *pdata, u32 offset, + u32 value); + bool (*write_patch_to_efuse_per_index)(struct fxgmac_pdata *pdata, + u8 index, u32 offset, u32 value); + bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*write_mac_subsys_to_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*read_mac_addr_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr); + bool (*write_mac_addr_to_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr); + bool (*efuse_load)(struct fxgmac_pdata *pdata); + bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); + bool (*write_oob)(struct fxgmac_pdata *pdata); + bool (*write_led)(struct fxgmac_pdata *pdata, u32 value); + bool (*read_led_config)(struct fxgmac_pdata *pdata); + bool (*write_led_config)(struct fxgmac_pdata *pdata); + + int (*pcie_init)(struct fxgmac_pdata *pdata, bool ltr_en, + bool aspm_l1ss_en, bool aspm_l1_en, bool aspm_l0s_en); + void (*trigger_pcie)( + struct fxgmac_pdata *pdata, + u32 code); /* To trigger pcie sniffer for analysis. */ +#ifdef DPDK + int (*phy_init)(struct fxgmac_pdata *); + int (*phy_start)(struct fxgmac_pdata *); + void (*phy_stop)(struct fxgmac_pdata *); + void (*phy_status)(struct fxgmac_pdata *); + void (*an_isr)( + struct fxgmac_pdata + *); /* phy_if->an_isr For single interrupt support */ +#endif +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct fxgmac_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int phyifsel; /* PHY interface support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int avsel; /* AV Feature Enable */ + unsigned int ravsel; /* Rx Side Only AV Feature Enable */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ + + /* HW Feature Register3 */ + u32 hwfr3; +}; + +struct fxgmac_resources { + IOMEM addr; + int irq; +}; + +struct fxgmac_pdata { + struct net_device *netdev; + struct device *dev; + PCI_DEV *pdev; + void *pAdapter; + + struct fxgmac_hw_ops hw_ops; + struct fxgmac_desc_ops desc_ops; + + /* Device statistics */ + struct fxgmac_stats stats; + + u32 msg_enable; + u32 reg_nonstick[0x300 >> 2]; + + /* MAC registers base */ + IOMEM mac_regs; + IOMEM base_mem; + + /* Hardware features of the device */ + struct fxgmac_hw_features hw_feat; + + /* Rings for Tx/Rx on a DMA channel */ + struct fxgmac_channel *channel_head; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_desc_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; +#if FXGMAC_TX_HANG_TIMER_EN + /* for tx hang checking. 20211227 */ + unsigned int tx_hang_restart_queuing; +#endif + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + + /* Tx coalescing settings */ + unsigned int tx_usecs; + unsigned int tx_frames; + + /* Rx coalescing settings */ + unsigned int rx_riwt; + unsigned int rx_usecs; + unsigned int rx_frames; + + /* Current Rx buffer size */ + unsigned int rx_buf_size; + + /* Flow control settings */ + unsigned int tx_pause; + unsigned int rx_pause; + + /* Jumbo frames */ + unsigned int mtu; + unsigned int jumbo; + + /* CRC checking */ + unsigned int crc_check; + + /* MSIX */ + unsigned int msix; + + /* RSS */ + unsigned int rss; + + /* VlanID */ + unsigned int vlan; + unsigned int vlan_exist; + unsigned int vlan_filter; + unsigned int vlan_strip; + + /* Interrupt Moderation */ + unsigned int intr_mod; + unsigned int intr_mod_timer; + + /* Device interrupt number */ + int dev_irq; + unsigned int per_channel_irq; + /* change type from int to u32 to match MSIx, p_msix_entry.vector; */ + u32 channel_irq[FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX]; + + /* Netdev related settings */ + unsigned char mac_addr[ETH_ALEN]; + + /* Filtering support */ +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + + /* Device clocks */ + unsigned long sysclk_rate; + + /* Receive Side Scaling settings */ + u8 rss_key[FXGMAC_RSS_HASH_KEY_SIZE]; + u32 rss_table[FXGMAC_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + + int phy_speed; + int phy_duplex; + int phy_autoeng; + + char drv_name[32]; + char drv_ver[32]; + + struct wol_bitmap_pattern pattern[MAX_PATTERN_COUNT]; + + struct led_setting led; + struct led_setting ledconfig; + + FXGMAC_PDATA_OF_PLATFORM expansion; + + u32 pcie_link_status; +}; + +#define FXGMAC_FLAG_MSI_CAPABLE ((u32)(1 << 0)) /* bit0 */ +#define FXGMAC_FLAG_MSI_ENABLED ((u32)(1 << 1)) /* bit1 */ +#define FXGMAC_FLAG_MSIX_CAPABLE ((u32)(1 << 2)) /* bit2 */ +#define FXGMAC_FLAG_MSIX_ENABLED ((u32)(1 << 3)) /* bit3 */ +#define FXGMAC_FLAG_LEGACY_ENABLED ((u32)(1 << 4)) /* bit4 */ + +#define FXGMAC_FLAG_INTERRUPT_POS 0 +#define FXGMAC_FLAG_INTERRUPT_LEN 5 + +#define FXGMAC_FLAG_MSI_POS 1 +#define FXGMAC_FLAG_MSI_LEN 1 +#define FXGMAC_FLAG_MSIX_POS 3 +#define FXGMAC_FLAG_MSIX_LEN 1 +#define FXGMAC_FLAG_LEGACY_POS 4 +#define FXGMAC_FLAG_LEGACY_LEN 1 +#define FXGMAC_FLAG_LEGACY_IRQ_FREE_POS 31 /* bit31 */ +#define FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_LEGACY_NAPI_FREE_POS 30 /* bit30 */ +#define FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN 1 + +void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops); +void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops); +const struct net_device_ops *fxgmac_get_netdev_ops(void); +const struct ethtool_ops *fxgmac_get_ethtool_ops(void); +void fxgmac_dump_tx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx, unsigned int count, + unsigned int flag); +void fxgmac_dump_rx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx); +void fxgmac_dbg_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx); +void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata); +void fxgmac_print_all_hw_features(struct fxgmac_pdata *pdata); +int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res); +int fxgmac_drv_remove(struct device *dev); + +#endif /* __FUXI_GMAC_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h new file mode 100644 index 0000000000000000000000000000000000000000..1a40267e1fa2ee7cb939c67f795b08ef799d6e71 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h @@ -0,0 +1,515 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + + +#ifndef __FUXI_OS_H__ +#define __FUXI_OS_H__ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PCI_MSI +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fuxi-dbg.h" + +struct fxgmac_ring; +struct fxgmac_pdata; + +#define FXGMAC_DRV_VERSION "1.0.27" + +#define PCIE_LP_ASPM_L0S 1 +#define PCIE_LP_ASPM_L1 2 +#define PCIE_LP_ASPM_L1SS 4 +#define PCIE_LP_ASPM_LTR 8 + +#define FXGMAC_FAIL -1 +#define FXGMAC_SUCCESS 0 +#define FXGMAC_DEV_CMD (SIOCDEVPRIVATE + 1) +#define FXGMAC_IOCTL_DFS_COMMAND _IOWR('M', 0x80, struct ext_ioctl_data) + +#define FXGMAC_MAX_DBG_TEST_PKT 150 +#define FXGMAC_MAX_DBG_BUF_LEN 64000 +#define FXGMAC_MAX_DBG_RX_DATA 1600 +#define FXGMAC_NETDEV_OPS_BUF_LEN 256 + +#define FXGMAC_TEST_MAC_HEAD_LEN 14 + +#define FUXI_PM_WPI_READ_FEATURE_EN 1 + +#define RSS_Q_COUNT 4 + +#define FXGMAC_TX_HANG_TIMER_EN 0 +/* only for debug. for normal run, pls keep them both 0 + * 0: use default tx q; other: specify txq-1: 1 txq; + */ +#define FXGMAC_NUM_OF_TX_Q_USED 0 +/* 1 to enable a dummy tx, ie, no tail for gmac; */ +#define FXGMAC_DUMMY_TX_DEBUG 0 +/* 1 to trigger(write reg 0x1000) for sniffer stop */ +#define FXGMAC_TRIGGER_TX_HANG 0 + +/* driver feature configuration */ +#if FXGMAC_TX_HANG_TIMER_EN +/* 0: check hw current desc; 1: check software dirty */ +#define FXGMAC_TX_HANG_CHECH_DIRTY 0 +#endif + +/* 1:poll tx of 4 channels; 0: since only 1 tx channel supported in this + * version, poll ch 0 always. + */ + +#define FXGMAC_FULL_TX_CHANNEL 0 + +#ifdef CONFIG_ARM64 +/* when you want to run this driver on 64bit arm, you should open this, + * otherwise dma's mask cannot be set successfully. + */ +#define FUXI_DMA_BIT_MASK 64 +#endif + +#ifdef CONFIG_PCI_MSI +/* should be same as FXGMAC_MAX_DMA_CHANNELS + 1 tx_irq */ +#define FXGMAC_MAX_MSIX_Q_VECTORS (FXGMAC_MSIX_Q_VECTORS + 1) +#define FXGMAC_MSIX_CH0RXDIS_EN 0 /* set to 1 for ch0 unbalance fix; */ +#define FXGMAC_MSIX_INTCTRL_EN 1 + +#define FXGMAC_PHY_INT_NUM 1 +#define FXGMAC_MSIX_INT_NUMS (FXGMAC_MAX_MSIX_Q_VECTORS + FXGMAC_PHY_INT_NUM) +#else /* for case of no CONFIG_PCI_MSI */ +/* NO modification needed! for non-MSI, set to 0 always */ +#define FXGMAC_MSIX_CH0RXDIS_EN 0 +#define FXGMAC_MSIX_INTCTRL_EN 0 +#endif + +/*RSS features*/ +#ifdef FXGMAC_ONE_CHANNEL +#define FXGMAC_RSS_FEATURE_ENABLED 0 /* 1:enable rss ; 0: rss not included. */ +#else +#define FXGMAC_RSS_FEATURE_ENABLED 1 /* 1:enable rss ; 0: rss not included. */ +#endif +#define FXGMAC_RSS_HASH_KEY_LINUX 1 /* 0:hard to default rss key ;1: normal hash key process from Linux. */ + +/*WOL features*/ +#define FXGMAC_WOL_FEATURE_ENABLED 1 /* 1:enable wol ; 0: wol not included. */ +/*since wol upon link will cause issue, disabled it always. */ +#define FXGMAC_WOL_UPON_EPHY_LINK 1 /* 1:enable ephy link change wol ; 0: ephy link change wol is not supported. */ + +/*Pause features*/ +#define FXGMAC_PAUSE_FEATURE_ENABLED 1 /* 1:enable flow control/pause framce ; 0: flow control/pause frame not included. */ + +/*ARP offload engine (AOE)*/ +#define FXGMAC_AOE_FEATURE_ENABLED 1 /* 1:enable arp offload engine ; 0: aoe is not included. */ + +/*NS offload engine*/ +#define FXGMAC_NS_OFFLOAD_ENABLED 1 /* 1:enable NS offload for IPv6 ; 0: NS is not included. */ + +/*for fpga ver after, which needs release phy before set of MAC tx/rx */ +#define FXGMAC_TXRX_EN_AFTER_PHY_RELEASE 1 /* 1:release ephy before mac tx/rx bits are set. */ + +/*power management features*/ +#define FXGMAC_PM_FEATURE_ENABLED 1 /* 1:enable PM ; 0: PM not included. */ + +/*sanity check*/ +#define FXGMAC_SANITY_CHECK_ENABLED 0 /* 1:enable health checking; */ + +/*vlan id filter*/ +#define FXGMAC_FILTER_SINGLE_VLAN_ENABLED 1 /* 1:enable health checking; */ +#define FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED 1 +#define FUXI_MAC_HASH_TABLE 1 +#define FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED 1 +#define FUXI_MISC_INT_HANDLE_FEATURE_EN 1 + +#define HAVE_FXGMAC_DEBUG_FS + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &(((TYPE *)0)->MEMBER)) +#endif + +#define ETH_IS_ZEROADDRESS(Address) \ + ((((u8 *)(Address))[0] == ((u8)0x00)) \ + && (((u8 *)(Address))[1] == ((u8)0x00)) \ + && (((u8 *)(Address))[2] == ((u8)0x00)) \ + && (((u8 *)(Address))[3] == ((u8)0x00)) \ + && (((u8 *)(Address))[4] == ((u8)0x00)) \ + && (((u8 *)(Address))[5] == ((u8)0x00))) + + /* read from 8bit register via pci config space */ +#define cfg_r8(_pdata, reg, pdat) pci_read_config_byte((_pdata)->pdev, (reg), (u8 *)(pdat)) + + /* read from 16bit register via pci config space */ +#define cfg_r16(_pdata, reg, pdat) pci_read_config_word((_pdata)->pdev, (reg), (u16 *)(pdat)) + + /* read from 32bit register via pci config space */ +#define cfg_r32(_pdata, reg, pdat) pci_read_config_dword((_pdata)->pdev, (reg), (u32 *)(pdat)) + +/* write to 8bit register via pci config space */ +#define cfg_w8(_pdata, reg, val) pci_write_config_byte((_pdata)->pdev, (reg), (u8)(val)) + +/* write to 16bit register via pci config space */ +#define cfg_w16(_pdata, reg, val) pci_write_config_word((_pdata)->pdev, (reg), (u16)(val)) + +/* write to 32bit register via pci config space */ +#define cfg_w32(_pdata, reg, val) pci_write_config_dword((_pdata)->pdev, (reg), (u32)(val)) + +#define readreg(pAdapter, addr) (readl(addr)) +#define writereg(pAdapter, val, addr) (writel(val, addr)) +#define usleep_range_ex(pAdapter, a, b) (usleep_range(a, b)) +#define _CR(Record, TYPE, Field) ((TYPE *) ((char *) (Record) - (char *) &(((TYPE *) 0)->Field))) + +#define FXGMAC_GET_REG_BITS(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define FXGMAC_GET_REG_BITS_LE(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(var) _var = le32_to_cpu((var)); \ + ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define FXGMAC_SET_REG_BITS(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ +}) + +#define FXGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ + cpu_to_le32(_var); \ +}) + +#define STR_FORMAT "%s" + +#define DbgPrintF(level, fmt, ...) +#define DBGPRINT(Level, Fmt) +#define DBGPRINT_RAW(Level, Fmt) +#define DBGPRINT_S(Status, Fmt) +#define DBGPRINT_UNICODE(Level, UString) +#define Dump(p, cb, fAddress, ulGroup) + +#undef ASSERT +#define ASSERT(x) + +#define DbgPrintOidName(_Oid) +#define DbgPrintAddress(_pAddress) + +#define fxgmac_dump_buffer(_skb, _len, _tx_rx) +#define DumpLine(_p, _cbLine, _fAddress, _ulGroup) + +#ifndef FXGMAC_DEBUG +#define FXGMAC_DEBUG +#endif + +/* For debug prints */ +#ifdef FXGMAC_DEBUG +#define FXGMAC_PR(fmt, args...) \ + pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args) + +#define DPRINTK printk +#else +#define FXGMAC_PR(x...) do { } while (0) +#define DPRINTK(x...) +#endif + +#define IOC_MAGIC 'M' +#define IOC_MAXNR (0x80 + 5) + +#define FUXI_DFS_IOCTL_DEVICE_INACTIVE 0x10001 +#define FUXI_DFS_IOCTL_DEVICE_RESET 0x10002 +#define FUXI_DFS_IOCTL_DIAG_BEGIN 0x10003 +#define FUXI_DFS_IOCTL_DIAG_END 0x10004 +#define FUXI_DFS_IOCTL_DIAG_TX_PKT 0x10005 +#define FUXI_DFS_IOCTL_DIAG_RX_PKT 0x10006 + +#define FXGMAC_EFUSE_UPDATE_LED_CFG 0x10007 +#define FXGMAC_EFUSE_WRITE_LED 0x10008 +#define FXGMAC_EFUSE_WRITE_PATCH_REG 0x10009 +#define FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX 0x1000A +#define FXGMAC_EFUSE_WRITE_OOB 0x1000B +#define FXGMAC_EFUSE_LOAD 0x1000C +#define FXGMAC_EFUSE_READ_REGIONABC 0x1000D +#define FXGMAC_EFUSE_READ_PATCH_REG 0x1000E +#define FXGMAC_EFUSE_READ_PATCH_PER_INDEX 0x1000F +#define FXGMAC_EFUSE_LED_TEST 0x10010 + +#define FXGMAC_GET_MAC_DATA 0x10011 +#define FXGMAC_SET_MAC_DATA 0x10012 +#define FXGMAC_GET_SUBSYS_ID 0x10013 +#define FXGMAC_SET_SUBSYS_ID 0x10014 +#define FXGMAC_GET_GMAC_REG 0x10015 +#define FXGMAC_SET_GMAC_REG 0x10016 +#define FXGMAC_GET_PHY_REG 0x10017 +#define FXGMAC_SET_PHY_REG 0x10018 +#define FXGMAC_EPHYSTATISTICS 0x10019 +#define FXGMAC_GET_STATISTICS 0x1001A +#define FXGMAC_GET_PCIE_LOCATION 0x1001B + +#define FXGMAC_GET_GSO_SIZE 0x1001C +#define FXGMAC_SET_GSO_SIZE 0x1001D +#define FXGMAC_SET_RX_MODERATION 0x1001E +#define FXGMAC_SET_TX_MODERATION 0x1001F +#define FXGMAC_GET_TXRX_MODERATION 0x10020 + +#define MAX_PKT_BUF 1 +#define FXGAMC_MAX_DATA_SIZE (1024 * 4 + 16) + +#ifndef PCI_CAP_ID_MSI +#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ +#endif + +#ifndef PCI_CAP_ID_MSIX +#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ +#endif + +#define PCI_CAP_ID_MSI_ENABLE_POS 0x10 +#define PCI_CAP_ID_MSI_ENABLE_LEN 0x1 +#define PCI_CAP_ID_MSIX_ENABLE_POS 0x1F +#define PCI_CAP_ID_MSIX_ENABLE_LEN 0x1 + +#ifndef fallthrough +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#endif + + +#pragma pack(1) +/* it's better to make this struct's size to 128byte. */ +struct pattern_packet{ + u8 ether_daddr[ETH_ALEN]; + u8 ether_saddr[ETH_ALEN]; + u16 ether_type; + + __be16 ar_hrd; /* format of hardware address */ + __be16 ar_pro; /* format of protocol */ + unsigned char ar_hln; /* length of hardware address */ + unsigned char ar_pln; /* length of protocol address */ + __be16 ar_op; /* ARP opcode (command) */ + unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ + unsigned char ar_sip[4]; /* sender IP address */ + unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ + unsigned char ar_tip[4]; /* target IP address */ + + u8 reverse[86]; +}; +#pragma pack() + +typedef enum { + CURRENT_STATE_SHUTDOWN = 0, + CURRENT_STATE_RESUME = 1, + CURRENT_STATE_INIT = 2, + CURRENT_STATE_SUSPEND = 3, + CURRENT_STATE_CLOSE = 4, + CURRENT_STATE_OPEN = 5, + CURRENT_STATE_RESTART = 6, + CURRENT_STATE_REMOVE = 7, +} CURRENT_STATE; + +typedef dma_addr_t DMA_ADDR_T; +typedef enum pkt_hash_types RSS_HASH_TYPE; +typedef void __iomem *IOMEM; +typedef struct pci_dev PCI_DEV; + +struct ext_command_buf { + void *buf; + u32 size_in; + u32 size_out; +}; + +struct ext_command_mac { + u32 num; + union { + u32 val32; + u16 val16; + u8 val8; + }; +}; + +struct ext_command_mii { + u16 dev; + u16 num; + u16 val; +}; + +struct ext_ioctl_data { + u32 cmd_type; + struct ext_command_buf cmd_buf; +}; + +typedef struct _fxgmac_test_buf { + u8 *addr; + u32 offset; + u32 length; +} fxgmac_test_buf, *pfxgmac_test_buf; + +typedef struct _fxgmac_test_packet { + struct _fxgmac_test_packet *next; + u32 length; /* total length of the packet(buffers) */ + u32 type; /* packet type, vlan, ip checksum, TSO, etc. */ + + fxgmac_test_buf buf[MAX_PKT_BUF]; + fxgmac_test_buf sGList[MAX_PKT_BUF]; + u16 vlanID; + u16 mss; + u32 hash; + u16 cpuNum; + u16 xsum; /* rx, ip-payload checksum */ + u16 csumStart; /* custom checksum offset to the mac-header */ + u16 csumPos; /* custom checksom position (to the mac_header) */ + void *upLevelReserved[4]; + void *lowLevelReserved[4]; +} fxgmac_test_packet, *pfxgmac_test_packet; + +typedef struct fxgmac_channel_of_platform { + char dma_irq_name[IFNAMSIZ + 32]; + + /* for MSIx to match the type of struct msix_entry.vector */ + u32 dma_irq_tx; + char dma_irq_name_tx[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi_tx; + + /* Netdev related settings */ + struct napi_struct napi_rx; + struct timer_list tx_timer; + +#if FXGMAC_TX_HANG_TIMER_EN + unsigned int tx_hang_timer_active; + struct timer_list tx_hang_timer; + unsigned int tx_hang_hw_cur; +#endif +} FXGMAC_CHANNEL_OF_PLATFORM; + +typedef struct per_regisiter_info { + unsigned int size; + unsigned int address; + unsigned int value; + unsigned char data[FXGAMC_MAX_DATA_SIZE]; +} PER_REG_INFO; + +/* for FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, val0 is index, val1 is offset, + * val2 is value. + */ +typedef struct ext_command_data { + u32 val0; + u32 val1; + u32 val2; +} CMD_DATA; + +typedef struct fxgmac_pdata_of_platform { + u32 cfg_pci_cmd; + u32 cfg_cache_line_size; + u32 cfg_mem_base; + u32 cfg_mem_base_hi; + u32 cfg_io_base; + u32 cfg_int_line; + u32 cfg_device_ctrl1; + u32 cfg_pci_link_ctrl; + u32 cfg_device_ctrl2; + u32 cfg_msix_capability; + + struct work_struct restart_work; + u32 int_flags; /* legacy, msi or msix */ + int phy_irq; +#ifdef CONFIG_PCI_MSI + struct msix_entry *msix_entries; +#endif + + /* power management and wol*/ + u32 wol; /* wol options */ + unsigned long powerstate; /* power state */ + unsigned int ns_offload_tab_idx; /* for ns-offload table. 2 entries supported. */ + CURRENT_STATE current_state; + netdev_features_t netdev_features; + struct napi_struct napi; + struct napi_struct napi_phy; + u32 mgm_intctrl_val; + bool phy_link; + bool fxgmac_test_tso_flag; + u32 fxgmac_test_tso_seg_num; + u32 fxgmac_test_last_tso_len; + u32 fxgmac_test_packet_len; + volatile u32 fxgmac_test_skb_arr_in_index; + volatile u32 fxgmac_test_skb_arr_out_index; + struct sk_buff *fxgmac_test_skb_array[FXGMAC_MAX_DBG_TEST_PKT]; +#ifdef HAVE_FXGMAC_DEBUG_FS + struct dentry *dbg_adapter; + struct dentry *fxgmac_dbg_root; + char fxgmac_dbg_netdev_ops_buf[FXGMAC_NETDEV_OPS_BUF_LEN]; +#endif +} FXGMAC_PDATA_OF_PLATFORM; + +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, + bool tx_rx); +int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata); + +#ifdef HAVE_FXGMAC_DEBUG_FS +void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata); +void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata); +void fxgmac_dbg_init(struct fxgmac_pdata *pdata); +void fxgmac_dbg_exit(struct fxgmac_pdata *pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ + +void fxgmac_restart_dev(struct fxgmac_pdata *pdata); +long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + +int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg); +/* for phy interface */ +int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, + unsigned int *cap_mask); +int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, + int *duplex, int *ret_link, int *media); +int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata); +void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); +void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); +void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); + +unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata); +unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, + unsigned char *ipval, + unsigned char *ip6addr_solicited, + unsigned int ifa_flag); + +#if FXGMAC_PM_FEATURE_ENABLED +void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol); +void fxgmac_net_powerup(struct fxgmac_pdata *pdata); +#endif + +inline unsigned int fxgmac_tx_avail_desc(struct fxgmac_ring *ring); +inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring); +int fxgmac_start(struct fxgmac_pdata *pdata); +void fxgmac_stop(struct fxgmac_pdata *pdata); +void fxgmac_free_rx_data(struct fxgmac_pdata *pdata); +void fxgmac_free_tx_data(struct fxgmac_pdata *pdata); + +#endif /* __FUXI_OS_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 69e84ff7f2e5214e584c8b09722a069264acc0b0..41d18df97c8576596e602f0749c179f29b5d77fc 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -362,7 +362,7 @@ int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) const struct nfp_rtsym *sym; int res; - abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = nfp_get_pf_id(pf); /* Check if Qdisc offloads are supported */ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5d3df28c648ffc2d1925fa84af04899af9742677..d4acaa15629d5869545ad13bfca23d003032f348 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -451,7 +451,7 @@ static int nfp_abm_init(struct nfp_app *app) nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f469950c726573fd1f3a40a162b9f0f03a3bc6a5..3d928dfba114f40655b728dbd25118380ae437eb 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -70,7 +70,7 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nfp_err(pf->cpp, "No ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 83eaa5ae3cd4f5adbf04475811ca91da82ebf4f2..88e8ae25f0cc67cab04db5dbe2e04e25a5888150 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -378,10 +378,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { - u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + u8 nfp_pcie = nfp_get_pf_id(app->pf); enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; @@ -428,10 +428,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { - port->pf_id = i; + port->pf_id = app->pf->multi_pf.id; port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; + port->pf_id = app->pf->multi_pf.id; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; @@ -496,24 +496,27 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + int err, reify_cnt, phy_reprs_num; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; - int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; + phy_reprs_num = app->pf->multi_pf.en ? app->pf->max_data_vnics : + eth_tbl->count; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } - for (i = 0; i < eth_tbl->count; i++) { - unsigned int phys_port = eth_tbl->ports[i].index; + for (i = 0; i < phy_reprs_num; i++) { + int idx = app->pf->multi_pf.en ? app->pf->multi_pf.id : i; + unsigned int phys_port = eth_tbl->ports[idx].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; @@ -542,7 +545,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_repr_free(repr); goto err_reprs_clean; } - err = nfp_port_init_phy_port(app->pf, app, port, i); + err = nfp_port_init_phy_port(app->pf, app, port, idx); if (err) { kfree(repr_priv); nfp_port_free(port); @@ -609,7 +612,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { - if (id > 0) { + if (id > 0 && !app->pf->multi_pf.en) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 71301dbd8fb5ee462a29c04ac215f91712816b16..0fae86d8abe03e27311d40eaae50423ad32261b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -69,6 +69,13 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +u8 nfp_get_pf_id(struct nfp_pf *pf) +{ + return nfp_cppcore_pcie_unit(pf->cpp) * + pf->dev_info->pf_num_per_unit + + pf->multi_pf.id; +} + int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { @@ -76,7 +83,7 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, int err = 0; u64 val; - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), format, nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -90,15 +97,22 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, } u8 __iomem * -nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { char pf_symbol[256]; - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_rtsym_map_offset(pf->rtbl, pf_symbol, name, offset, min_size, area); +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_pf_map_rtsym_offset(pf, name, sym_fmt, 0, min_size, area); } /* Callers should hold the devlink instance lock */ @@ -218,11 +232,49 @@ static int nfp_pf_board_state_wait(struct nfp_pf *pf) return 0; } +static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, + unsigned int limit_vfs_rtsym) +{ + u16 pos, offset, total; + + if (!pf->multi_pf.en || !limit_vfs_rtsym) + return limit_vfs_rtsym; + + pos = pci_find_ext_capability(pf->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* Management firmware ensures that SR-IOV capability registers + * are initialized correctly. + */ + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_TOTAL_VF, &total); + if (!total) + return 0; + + /* Offset of first VF is relative to its PF. */ + offset += pf->multi_pf.id; + if (offset < pf->dev_info->pf_num_per_unit) + return 0; + + /* For 3800, VF is numbered from max PF count. */ + offset -= pf->dev_info->pf_num_per_unit; + if (offset >= limit_vfs_rtsym) + return 0; + + pf->multi_pf.vf_fid = offset; + if (offset + total > limit_vfs_rtsym) + return limit_vfs_rtsym - offset; + + return total; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { + unsigned int limit_vfs_rtsym; int err; - pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); + limit_vfs_rtsym = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; @@ -233,9 +285,13 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return err; } - err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); - if (err) - nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + pf->limit_vfs = nfp_pf_get_limit_vfs(pf, limit_vfs_rtsym); + if (pci_sriov_get_totalvfs(pf->pdev) != pf->limit_vfs) { + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + } + return 0; } @@ -404,7 +460,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (fw) return fw; - /* Finally try the card type and media */ + /* Then try the card type */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -418,6 +474,12 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return NULL; } + sprintf(fw_name, "netronome/%s.nffw", fw_model); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); @@ -469,6 +531,118 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static u8 __iomem * +nfp_get_beat_addr(struct nfp_pf *pf, int pf_id) +{ + /* Each PF has corresponding qword to beat: + * offset | usage + * 0 | magic number + * 8 | beat qword of pf0 + * 16 | beat qword of pf1 + */ + return pf->multi_pf.beat_addr + ((pf_id + 1) << 3); +} + +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + /* Beat once per second. */ + mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); +} + +/** + * nfp_nsp_keepalive_start() - Start keepalive mechanism if needed + * @pf: NFP PF Device structure + * + * Return 0 if no error, errno otherwise + */ +static int +nfp_nsp_keepalive_start(struct nfp_pf *pf) +{ + struct nfp_resource *res; + u8 __iomem *base; + int err = 0; + u64 addr; + u32 cpp; + + if (!pf->multi_pf.en) + return 0; + + res = nfp_resource_acquire(pf->cpp, NFP_KEEPALIVE); + if (IS_ERR(res)) + return PTR_ERR(res); + + cpp = nfp_resource_cpp_id(res); + addr = nfp_resource_address(res); + + /* Allocate a fixed area for keepalive. */ + base = nfp_cpp_map_area(pf->cpp, "keepalive", cpp, addr, + nfp_resource_size(res), &pf->multi_pf.beat_area); + if (IS_ERR(base)) { + nfp_err(pf->cpp, "Failed to map area for keepalive\n"); + err = PTR_ERR(base); + goto res_release; + } + + pf->multi_pf.beat_addr = base; + timer_setup(&pf->multi_pf.beat_timer, nfp_nsp_beat_timer, 0); + mod_timer(&pf->multi_pf.beat_timer, jiffies); + +res_release: + nfp_resource_release(res); + return err; +} + +static void +nfp_nsp_keepalive_stop(struct nfp_pf *pf) +{ + if (pf->multi_pf.beat_area) { + del_timer_sync(&pf->multi_pf.beat_timer); + nfp_cpp_area_release_free(pf->multi_pf.beat_area); + } +} + +static u64 +nfp_get_sibling_beat(struct nfp_pf *pf) +{ + unsigned int i = 0; + u64 beat = 0; + + if (!pf->multi_pf.beat_addr) + return 0; + + for (; i < pf->dev_info->pf_num_per_unit; i++) { + if (i == pf->multi_pf.id) + continue; + + beat += readq(nfp_get_beat_addr(pf, i)); + } + + return beat; +} + +static bool +nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + unsigned long timeout = jiffies + HZ * 3; + u64 beat = nfp_get_sibling_beat(pf); + + if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) + return false; + + while (time_is_after_jiffies(timeout)) { + if (beat != nfp_get_sibling_beat(pf)) + return true; + + msleep(500); + } + + return false; +} + /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure @@ -528,6 +702,13 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; + err = nfp_nsp_keepalive_start(pf); + if (err) + return err; + + if (nfp_skip_fw_load(pf, nsp)) + return true; + fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); @@ -556,7 +737,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { - /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. */ @@ -577,7 +757,9 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1) + if (err < 0) + nfp_nsp_keepalive_stop(pf); + else if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; return err < 0 ? err : fw_loaded; @@ -629,6 +811,15 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) if (err < 0) goto exit_close_nsp; + if (pf->multi_pf.en && pf->multi_pf.id) { + err = nfp_nsp_device_activate(nsp); + if (err < 0 && err != -EOPNOTSUPP) { + dev_err(&pdev->dev, + "Failed to activate the NFP device: %d\n", err); + goto exit_close_nsp; + } + } + nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); @@ -657,6 +848,12 @@ static void nfp_fw_unload(struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* NSP will unload firmware when no active PF exists. */ + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return; + } + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); @@ -674,10 +871,8 @@ static void nfp_fw_unload(struct nfp_pf *pf) static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { + unsigned int pf_id = nfp_get_pf_id(pf); char pf_symbol[256]; - unsigned int pf_id; - - pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); @@ -703,7 +898,7 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) int err = 0; u64 val; - snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -753,6 +948,18 @@ static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static u8 nfp_init_pf_id(struct pci_dev *pdev) +{ + int vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + u8 id = 0; + + if (!vndr) + return PCI_FUNC(pdev->devfn); + + pci_read_config_byte(pdev, vndr + NFP_VNDR_PF_ID_OFFSET, &id); + return id; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -803,15 +1010,22 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_priv_unset; } - pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = nfp_init_pf_id(pdev); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info, pf); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } - err = nfp_resource_table_init(pf->cpp); - if (err) - goto err_cpp_free; + /* Only PF0 has the right to reclaim locked resources. */ + if (!pf->multi_pf.id) { + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + } pf->hwinfo = nfp_hwinfo_read(pf->cpp); @@ -874,6 +1088,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); @@ -896,12 +1111,14 @@ static int nfp_pci_probe(struct pci_dev *pdev, static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { + bool keep_device_active; struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; + keep_device_active = pf->multi_pf.en && !pf->multi_pf.id; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); @@ -914,6 +1131,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); @@ -923,7 +1141,13 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) kfree(pf->nspi); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); - pci_disable_device(pdev); + + /* In multiple pfs case, we need to keep master flag of pf 0 + * to ensure vfs of other pfs work normally because of + * hardware limitation. + */ + if (!keep_device_active) + pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) @@ -936,12 +1160,65 @@ static void nfp_pci_shutdown(struct pci_dev *pdev) __nfp_pci_shutdown(pdev, false); } +void nfp_pci_error_reset_prepare(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + struct nfp_net *nn; + + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* Pause heartbeat timer so it can't happen during FLR */ + del_timer_sync(&pf->multi_pf.beat_timer); + /* We need to write keepalive to keep firmware alive + * during frequent FLR. + */ + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + } + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + netdev->netdev_ops->ndo_stop(netdev); + } + } + } +} + +void nfp_pci_error_reset_done(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + rtnl_lock(); + netdev->netdev_ops->ndo_open(netdev); + rtnl_unlock(); + } + } + if (pf->multi_pf.en && pf->multi_pf.beat_addr) + add_timer(&pf->multi_pf.beat_timer); + } +} + +static const struct pci_error_handlers nfp_pci_err_handler = { + .reset_prepare = nfp_pci_error_reset_prepare, + .reset_done = nfp_pci_error_reset_done, +}; + static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, + .err_handler = &nfp_pci_err_handler, .sriov_configure = nfp_pcie_sriov_configure, }; @@ -994,6 +1271,13 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); +MODULE_FIRMWARE("netronome/AMDA0161-1001.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1113.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1114.nffw"); MODULE_AUTHOR("Corigine, Inc. "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 14a751bfe1fe15f3648e9fd5fb7c934bad77a00b..5a01c66ddce9520dc16a7da49892c5df39c2a0f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -84,6 +84,13 @@ struct nfp_dumpspec { * @port_refresh_work: Work entry for taking netdevs out * @shared_bufs: Array of shared buffer structures if FW has any SBs * @num_shared_bufs: Number of elements in @shared_bufs + * @multi_pf: Used in multi-PF setup + * @multi_pf.en: Is multi-PF setup? + * @multi_pf.id: PF index + * @multi_pf.vf_fid: Id of first VF that belongs to this PF + * @multi_pf.beat_timer:Timer for beat to keepalive + * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive + * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive * * Fields which may change after proble are protected by devlink instance lock. */ @@ -141,6 +148,15 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; + + struct { + bool en; + u8 id; + u8 vf_fid; + struct timer_list beat_timer; + struct nfp_cpp_area *beat_area; + u8 __iomem *beat_addr; + } multi_pf; }; extern struct pci_driver nfp_netvf_pci_driver; @@ -163,6 +179,10 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val); int nfp_net_pf_get_app_id(struct nfp_pf *pf); u8 __iomem * +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, @@ -194,4 +214,5 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf); unsigned int nfp_net_lr2speed(unsigned int linkrate); unsigned int nfp_net_speed2lr(unsigned int speed); +u8 nfp_get_pf_id(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index fceb4abea2365dc023516351ff39436d5e0dbf6f..998761bf56af5d9e98466d327035dcb16a27d4f7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2704,6 +2704,11 @@ int nfp_net_init(struct nfp_net *nn) if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER; + /* Multi-PF is already enabled during pre-init, preserve control bit */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_MULTI_PF) + nn->dp.ctrl_w1 |= (nn_readl(nn, NFP_NET_CFG_CTRL_WORD1) & + NFP_NET_CFG_CTRL_MULTI_PF); + /* Stash the re-configuration queue away. First odd queue in TX Bar */ nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 3e63f6d6a563d1261a09471947d5fe5bf24f51f4..d6b127f13ed35edc46f10f19e6b10da4d3922e58 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -268,6 +268,7 @@ #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */ #define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */ #define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */ +#define NFP_NET_CFG_CTRL_MULTI_PF (0x1 << 5) /* Multi PF */ #define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index cbe4972ba104ba0e4368d8c901618fa574dbf738..5df99c60c3b226584eec89fb10640439e4b30bff 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -141,7 +141,7 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; - nn->id = id; + nn->id = pf->multi_pf.en ? pf->multi_pf.id : id; if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); @@ -184,7 +184,7 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, for (i = 0; i < pf->max_data_vnics; i++) { nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, - stride, i); + stride, pf->multi_pf.en ? pf->multi_pf.id : i); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_free_prev; @@ -293,6 +293,16 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf) return err; } +static void nfp_net_pf_clean_vnics(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nfp_net_is_data_vnic(nn)) + nfp_net_pf_clean_vnic(pf, nn); + } +} + static int nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) { @@ -463,9 +473,10 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * pf->limit_vfs, - &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym_offset(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->multi_pf.vf_fid, + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -474,7 +485,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vf_cfg_mem = NULL; } - min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; + min_size = NFP_NET_VF_CFG_SZ * (pf->limit_vfs + pf->multi_pf.vf_fid) + + NFP_NET_VF_CFG_MB_SZ; pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", "_pf%d_net_vf_cfg2", min_size, &pf->vfcfg_tbl2_area); @@ -684,15 +696,100 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) return ret; } +static int nfp_net_pre_init(struct nfp_pf *pf, int *stride) +{ + struct nfp_net_fw_version fw_ver; + struct nfp_cpp_area *area; + u8 __iomem *ctrl_bar; + int err = 0; + + ctrl_bar = nfp_pf_map_rtsym(pf, NULL, "_pf%d_net_bar0", NFP_PF_CSR_SLICE_SIZE, &area); + if (IS_ERR(ctrl_bar)) { + nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + return pf->fw_loaded ? PTR_ERR(ctrl_bar) : 1; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || + fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + *stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 5: + *stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + } + + if (!pf->multi_pf.en) + goto end; + + /* Enable multi-PF. */ + if (readl(ctrl_bar + NFP_NET_CFG_CAP_WORD1) & NFP_NET_CFG_CTRL_MULTI_PF) { + unsigned long long addr; + u32 cfg_q, cpp_id, ret; + unsigned long timeout; + + writel(NFP_NET_CFG_CTRL_MULTI_PF, ctrl_bar + NFP_NET_CFG_CTRL_WORD1); + writel(NFP_NET_CFG_UPDATE_GEN, ctrl_bar + NFP_NET_CFG_UPDATE); + + /* Config queue is next to txq. */ + cfg_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ) + 1; + addr = nfp_qcp_queue_offset(pf->dev_info, cfg_q) + NFP_QCP_QUEUE_ADD_WPTR; + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + err = nfp_cpp_writel(pf->cpp, cpp_id, addr, 1); + if (err) + goto end; + + timeout = jiffies + HZ * NFP_NET_POLL_TIMEOUT; + while ((ret = readl(ctrl_bar + NFP_NET_CFG_UPDATE))) { + if (ret & NFP_NET_CFG_UPDATE_ERR) { + nfp_err(pf->cpp, "Enalbe multi-PF failed\n"); + err = -EIO; + break; + } + + usleep_range(250, 500); + if (time_is_before_eq_jiffies(timeout)) { + nfp_err(pf->cpp, "Enalbe multi-PF timeout\n"); + err = -ETIMEDOUT; + break; + } + }; + } else { + nfp_err(pf->cpp, "Loaded firmware doesn't support multi-PF\n"); + err = -EINVAL; + } + +end: + nfp_cpp_area_release_free(area); + return err; +} + /* * PCI device functions */ int nfp_net_pci_probe(struct nfp_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; - int stride; + int stride = 0; int err; INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); @@ -703,9 +800,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) return -EINVAL; } + err = nfp_net_pre_init(pf, &stride); + if (err) + return err; + pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; + if (pf->multi_pf.en && pf->max_data_vnics != 1) { + nfp_err(pf->cpp, "Only one data_vnic per PF is supported in multiple PF setup, please update FW.\n"); + return -EPERM; + } err = nfp_net_pci_map_mem(pf); if (err) @@ -718,34 +823,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_unmap; } - nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || - fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { - nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - - /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { - stride = 2; - nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); - } else { - switch (fw_ver.major) { - case 1 ... 5: - stride = 4; - break; - default: - nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - } - err = nfp_net_pf_app_init(pf, qc_bar, stride); if (err) goto err_unmap; @@ -778,11 +855,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; + err = nfp_net_pf_init_sriov(pf); + if (err) + goto err_clean_vnics; + devl_unlock(devlink); devlink_register(devlink); return 0; +err_clean_vnics: + nfp_net_pf_clean_vnics(pf); err_stop_app: nfp_net_pf_app_stop(pf); err_free_irqs: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 6eeeb0fda91fac4d494f1b60794f1591360f41bd..67aea9445aa29cbbbbf376b31d1d5925269d0777 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -14,6 +14,9 @@ #include "nfp_net.h" #include "nfp_net_sriov.h" +/* The configurations that precede VF creating. */ +#define NFP_NET_VF_PRE_CONFIG NFP_NET_VF_CFG_MB_CAP_SPLIT + static int nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { @@ -29,6 +32,10 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool return -EOPNOTSUPP; } + /* No need to check vf for the pre-configurations. */ + if (cap & NFP_NET_VF_PRE_CONFIG) + return 0; + if (vf < 0 || vf >= app->pf->num_vfs) { if (warn) nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); @@ -65,7 +72,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - int err; + int err, abs_vf; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) @@ -78,13 +85,14 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return -EINVAL; } + abs_vf = vf + app->pf->multi_pf.vf_fid; /* Write MAC to VF entry in VF config symbol */ - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + abs_vf * NFP_NET_VF_CFG_SZ; writel(get_unaligned_be32(mac), app->pf->vfcfg_tbl2 + vf_offset); writew(get_unaligned_be16(mac + 4), app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); - err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + err = nfp_net_sriov_update(app, abs_vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); if (!err) nfp_info(app->pf->cpp, "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n", @@ -138,6 +146,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, if (vlan_tag && is_proto_sup) vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); @@ -162,6 +171,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, return -EINVAL; } + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE, max_tx_rate ? max_tx_rate : @@ -188,6 +198,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) return err; /* Write spoof check control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -212,6 +223,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) return err; /* Write trust control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -246,6 +258,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, } /* Write link state to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -271,7 +284,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, if (err) return err; - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + (vf + app->pf->multi_pf.vf_fid) * NFP_NET_VF_CFG_SZ; mac_hi = readl(app->pf->vfcfg_tbl2 + vf_offset); mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); @@ -309,3 +322,21 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, return 0; } + +int nfp_net_pf_init_sriov(struct nfp_pf *pf) +{ + int err; + + if (!pf->multi_pf.en || !pf->limit_vfs) + return 0; + + err = nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_SPLIT, "split", true); + if (err) + return err; + + writeb(pf->limit_vfs, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_CNT); + + /* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_fid to FW. */ + return nfp_net_sriov_update(pf->app, pf->multi_pf.vf_fid, + NFP_NET_VF_CFG_MB_UPD_SPLIT, "split"); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 2d445fa199dc8884d94f8f8f9d3d7dab2dfe2c1b..8de9590188199c1ae11d56a6923c0ba527ce3fab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -21,6 +21,7 @@ #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_CAP_SPLIT (0x1 << 8) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -30,6 +31,8 @@ #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_UPD_SPLIT (0x1 << 8) +#define NFP_NET_VF_CFG_MB_VF_CNT 0x6 #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -68,4 +71,6 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int nfp_app_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int nfp_net_pf_init_sriov(struct nfp_pf *pf); + #endif /* _NFP_NET_SRIOV_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 54640bcb70fbabff03bb91873710021fdc4b7c4b..dadd6844c3855c709fa79b4274a2e29f642ac2fd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -189,7 +189,8 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; - port->netdev->dev_port = id; + if (!pf->multi_pf.en) + port->netdev->dev_port = id; if (pf->mac_stats_mem) port->eth_stats = pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index db94b0bddc925136748b7af2bcd0828f4f83eaba..89a131cffc48a500040eba32b9f05bb168cc5458 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -64,6 +64,10 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +/* Keepalive */ +#define NFP_KEEPALIVE "nfp.beat" +#define NFP_KEEPALIVE_MAGIC 0x6e66702e62656174ULL /* ASCII of "nfp.beat" */ + int nfp_resource_table_init(struct nfp_cpp *cpp); struct nfp_resource * diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 3f10c5365c80ebb2fe079b779fee644a46ed33da..8e60e20c4fee0ccc3a1cb062a68e8d446e277b14 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -29,6 +29,7 @@ #include "nfp_cpp.h" #include "nfp_dev.h" +#include "../nfp_main.h" #include "nfp6000/nfp6000.h" @@ -532,7 +533,8 @@ static int bar_cmp(const void *aptr, const void *bptr) * BAR1.0-BAR1.7: -- * BAR2.0-BAR2.7: -- */ -static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface, + struct nfp_pf *pf) { const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( @@ -611,7 +613,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - int pf; + int pf_id; msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); @@ -624,8 +626,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) switch (nfp->pdev->device) { case PCI_DEVICE_ID_NFP3800: - pf = nfp->pdev->devfn & 7; - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + pf_id = pf->multi_pf.id; + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf_id); break; case PCI_DEVICE_ID_NFP4000: case PCI_DEVICE_ID_NFP5000: @@ -1309,7 +1311,8 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = { * Return: NFP CPP handle */ struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info) +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf) { struct nfp6000_pcie *nfp; u16 interface; @@ -1353,7 +1356,7 @@ nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_i goto err_free_nfp; } - err = enable_bars(nfp, interface); + err = enable_bars(nfp, interface, pf); if (err) goto err_free_nfp; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 097660b673dbf251dd32d631007be3332a43feba..e992f5c910132f0ae23001c1a29945f0511728d2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -11,7 +11,14 @@ #include "nfp_cpp.h" +/* Vendor specific register layout */ +#define NFP_VNDR_HEADER_OFFSET 0x0 +#define NFP_VNDR_PF_ID_OFFSET 0x4 + +struct nfp_pf; + struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info); +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf); #endif /* NFP6000_PCIE_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c index 0725b51c2a95cb9b46f39bdf4551d411b6e6d742..8a7c5de0de772e6ef57e7400782fe4fde1298972 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -19,6 +19,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0a00, .pcie_expl_offset = 0xd000, .qc_area_sz = 0x100000, + .pf_num_per_unit = 4, }, [NFP_DEV_NFP3800_VF] = { .dma_mask = DMA_BIT_MASK(48), @@ -38,6 +39,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0400, .pcie_expl_offset = 0x1000, .qc_area_sz = 0x80000, + .pf_num_per_unit = 1, }, [NFP_DEV_NFP6000_VF] = { .dma_mask = DMA_BIT_MASK(40), diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index e4d38178de0fd6bebff3767226eb5fcb0d2ba058..d948c9c4a09a07acd51341f3cfc08e92cff84034 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -35,6 +35,7 @@ struct nfp_dev_info { u32 pcie_cfg_expbar_offset; u32 pcie_expl_offset; u32 qc_area_sz; + u8 pf_num_per_unit; }; extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT]; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 49a4d3f56b56a8cec0501ded5e362fc154686651..4042352f83b08e6eae3054fbb66c9bfdc860ef4a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -101,6 +101,10 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, u64 value); u8 __iomem * +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 7136bc48530ba08cf431339481bb352e34e8a9b6..55d799d420aa37c8e88b9390032b2208e81622b8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -37,7 +37,8 @@ #define NSP_COMMAND 0x08 #define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) -#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_CODE_MJ_VER GENMASK_ULL(31, 28) +#define NSP_COMMAND_CODE GENMASK_ULL(27, 16) #define NSP_COMMAND_DMA_BUF BIT_ULL(1) #define NSP_COMMAND_START BIT_ULL(0) @@ -58,7 +59,7 @@ #define NFP_CAP_CMD_DMA_SG 0x28 #define NSP_MAGIC 0xab10 -#define NSP_MAJOR 0 +#define NSP_MAJOR 1 #define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) @@ -101,6 +102,7 @@ enum nfp_nsp_cmd { SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */ + SPCODE_DEV_ACTIVATE = 29, /* Activate hardware for multiple pfs case */ }; struct nfp_nsp_dma_buf { @@ -247,14 +249,14 @@ static int nfp_nsp_check(struct nfp_nsp *state) state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); - if (state->ver.major != NSP_MAJOR) { + if (state->ver.major > NSP_MAJOR) { nfp_err(cpp, "Unsupported ABI %hu.%hu\n", state->ver.major, state->ver.minor); return -EINVAL; } if (state->ver.minor < NSP_MINOR) { - nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n", - NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR); + nfp_err(cpp, "ABI too old to support NIC operation (x.%u < x.%u), please update the management FW on the flash\n", + state->ver.minor, NSP_MINOR); return -EINVAL; } @@ -380,6 +382,7 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE_MJ_VER, state->ver.major) | FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | FIELD_PREP(NSP_COMMAND_START, 1)); @@ -730,6 +733,17 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state) return nfp_nsp_command(state, SPCODE_SOFT_RESET); } +int nfp_nsp_device_activate(struct nfp_nsp *state) +{ + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 38. + */ + if (nfp_nsp_get_abi_ver_minor(state) < 38) + return -EOPNOTSUPP; + + return nfp_nsp_command(state, SPCODE_DEV_ACTIVATE); +} + int nfp_nsp_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_command(state, SPCODE_MAC_INIT); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac049172e3713ee70c09d0e94a110115cd7..f34b996b0749656cb7e97df198199fcc327ccaca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -17,6 +17,7 @@ u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_device_activate(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 2260c2403a83a177df578bfe5bcad47981cc339f..97a4417a1c1b0df00ae8946609789f3dffe6dbb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -520,8 +520,9 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, } u8 __iomem * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { const struct nfp_rtsym *sym; u8 __iomem *mem; @@ -540,12 +541,12 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return (u8 __iomem *)ERR_PTR(err); } - if (sym->size < min_size) { + if (sym->size < min_size + offset) { nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr + offset, sym->size - offset, area); if (IS_ERR(mem)) { nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); @@ -554,3 +555,10 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return mem; } + +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_rtsym_map_offset(rtbl, name, id, 0, min_size, area); +} diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index 9dd5afe37f6e44ca0c4043db58514a0964246e89..e7a2d01bcbff1c2832dbab6206f4b75fe24b1579 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -12,7 +12,8 @@ static int nfp_nic_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; - if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count && + !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 4dbc076f72d65a9b97fd95172b54f2013145a4de..f9a3f3321e596f5789905440db91fc071b06516a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -29,6 +29,7 @@ /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_3_70 0x37 #define DWMAC_CORE_4_00 0x40 #define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ee3604f58def52ce99a14ff0ee37819f2aab1aac..f03d78385ed978659047227abe2e00ffb71a6b9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -8,15 +8,88 @@ #include #include #include "stmmac.h" +#include "dwmac_dma.h" +#include "dwmac1000.h" + +/* Normal Loongson Tx Summary */ +#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Summary */ +#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000 + +#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \ + DMA_INTR_ENA_NIE_RX_LOONGSON | \ + DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE) + +/* Abnormal Loongson Tx Summary */ +#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Summary */ +#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000 + +#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \ + DMA_INTR_ENA_AIE_RX_LOONGSON | \ + DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE) + +#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \ + DMA_INTR_ABNORMAL_LOONGSON) + +/* Normal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000 + +/* Abnormal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000 + +/* Fatal Loongson Tx Bus Error Interrupt */ +#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000 +/* Fatal Loongson Rx Bus Error Interrupt */ +#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000 + +#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \ + DMA_STATUS_NIS_RX_LOONGSON | \ + DMA_STATUS_AIS_TX_LOONGSON | \ + DMA_STATUS_AIS_RX_LOONGSON | \ + DMA_STATUS_FBI_TX_LOONGSON | \ + DMA_STATUS_FBI_RX_LOONGSON) + +#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \ + DMA_STATUS_RPS | DMA_STATUS_RU | \ + DMA_STATUS_RI | DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON_LOONGSON) + +#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \ + DMA_STATUS_TJT | DMA_STATUS_TU | \ + DMA_STATUS_TPS | DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON_LOONGSON) + +#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 +#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13 +#define LOONGSON_DWMAC_CORE_1_00 0x10 /* Loongson custom IP */ +#define CHANNEL_NUM 8 + +struct loongson_data { + u32 gmac_verion; + struct device *dev; +}; + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; -static int loongson_default_data(struct plat_stmmacenet_data *plat) +static void loongson_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { + /* Get bus_id, this can be overloaded later */ + plat->bus_id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->has_gmac = 1; plat->force_sf_dma_mode = 1; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; + plat->mac_interface = PHY_INTERFACE_MODE_GMII; /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; @@ -24,10 +97,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) /* Set the maxmtu to a default of JUMBO_LEN */ plat->maxmtu = JUMBO_LEN; - /* Set default number of RX and TX queues to use */ - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - /* Disable Priority config by default */ plat->tx_queues_cfg[0].use_prio = false; plat->rx_queues_cfg[0].use_prio = false; @@ -45,23 +114,346 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) plat->dma_cfg->pblx8 = true; plat->multicast_filter_bins = 256; +} + +static int loongson_gmac_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + loongson_default_data(pdev, plat); + + plat->mdio_bus_data->phy_mask = 0; + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + + return 0; +} + +static struct stmmac_pci_info loongson_gmac_pci_info = { + .setup = loongson_gmac_data, +}; + +static void loongson_gnet_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 chan) +{ + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + if (dma_cfg->pblx8) + value |= DMA_BUS_MODE_MAXPBL; + + value |= DMA_BUS_MODE_USP; + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (dma_cfg->atds) + value |= DMA_BUS_MODE_ATDS; + + if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr + + DMA_CHAN_INTR_ENA(chan)); +} + +static int loongson_gnet_dma_interrupt(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_extra_stats *x, + u32 chan, u32 dir) +{ + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); + u32 abnor_intr_status; + u32 nor_intr_status; + u32 fb_intr_status; + u32 intr_status; + int ret = 0; + + /* read the status register (CSR5) */ + intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX_LOONGSON; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX_LOONGSON; + + nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON | + DMA_STATUS_NIS_RX_LOONGSON); + abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON | + DMA_STATUS_AIS_RX_LOONGSON); + fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON | + DMA_STATUS_FBI_RX_LOONGSON); + + /* ABNORMAL interrupts */ + if (unlikely(abnor_intr_status)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(fb_intr_status)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(nor_intr_status)) { + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan)); + + return ret; +} + +static void loongson_gnet_fix_speed(void *priv, unsigned int speed, + unsigned int mode) +{ + struct loongson_data *ld = (struct loongson_data *)priv; + struct net_device *ndev = dev_get_drvdata(ld->dev); + struct stmmac_priv *ptr = netdev_priv(ndev); + + /* The controller and PHY don't work well together. + * We need to use the PS bit to check if the controller's status + * is correct and reset PHY if necessary. + * MAC_CTRL_REG.15 is defined by the GMAC_CONTROL_PS macro. + */ + if (speed == SPEED_1000) { + if (readl(ptr->ioaddr + MAC_CTRL_REG) & + GMAC_CONTROL_PS) + /* Word around hardware bug, restart autoneg */ + phy_restart_aneg(ndev->phydev); + } +} + +static int loongson_gnet_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + loongson_default_data(pdev, plat); + + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + plat->mdio_bus_data->phy_mask = ~(u32)BIT(2); + plat->fix_mac_speed = loongson_gnet_fix_speed; + + /* GNET devices with dev revision 0x00 do not support manually + * setting the speed to 1000. + */ + if (pdev->revision == 0x00) + plat->flags |= STMMAC_FLAG_DISABLE_FORCE_1000; + return 0; } +static struct stmmac_pci_info loongson_gnet_pci_info = { + .setup = loongson_gnet_data, +}; + +static int loongson_dwmac_config_legacy(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res, + struct device_node *np) +{ + if (np) { + res->irq = of_irq_get_byname(np, "macirq"); + if (res->irq < 0) { + dev_err(&pdev->dev, "IRQ macirq not found\n"); + return -ENODEV; + } + + res->wol_irq = of_irq_get_byname(np, "eth_wake_irq"); + if (res->wol_irq < 0) { + dev_info(&pdev->dev, + "IRQ eth_wake_irq not found, using macirq\n"); + res->wol_irq = res->irq; + } + + res->lpi_irq = of_irq_get_byname(np, "eth_lpi"); + if (res->lpi_irq < 0) { + dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); + return -ENODEV; + } + } else { + res->irq = pdev->irq; + res->wol_irq = res->irq; + } + + return 0; +} + +static int loongson_dwmac_config_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res, + struct device_node *np) +{ + int i, ret, vecs; + + vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1); + ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI); + if (ret < 0) { + dev_info(&pdev->dev, + "MSI enable failed, Fallback to legacy interrupt\n"); + return loongson_dwmac_config_legacy(pdev, plat, res, np); + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = 0; + + /* INT NAME | MAC | CH7 rx | CH7 tx | ... | CH0 rx | CH0 tx | + * --------- ----- -------- -------- ... -------- -------- + * IRQ NUM | 0 | 1 | 2 | ... | 15 | 16 | + */ + for (i = 0; i < CHANNEL_NUM; i++) { + res->rx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 1 + i * 2); + res->tx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 2 + i * 2); + } + + plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; + + return 0; +} + +static struct mac_device_info *loongson_dwmac_setup(void *apriv) +{ + struct stmmac_priv *priv = apriv; + struct mac_device_info *mac; + struct stmmac_dma_ops *dma; + struct loongson_data *ld; + struct pci_dev *pdev; + + ld = priv->plat->bsp_priv; + pdev = to_pci_dev(priv->device); + + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + if (!mac) + return NULL; + + dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + /* The original IP-core version is 0x37 in all Loongson GNET + * (ls2k2000 and ls7a2000), but the GNET HW designers have changed the + * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the Loongson + * ls2k2000 MAC to emphasize the differences: multiple DMA-channels, + * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the + * original value so the correct HW-interface would be selected. + */ + if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00) { + priv->synopsys_id = DWMAC_CORE_3_70; + *dma = dwmac1000_dma_ops; + dma->init_chan = loongson_gnet_dma_init_channel; + dma->dma_interrupt = loongson_gnet_dma_interrupt; + mac->dma = dma; + } + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + + /* Pre-initialize the respective "mac" fields as it's done in + * dwmac1000_setup() + */ + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + /* The GMAC devices with PCI ID 0x7a03 does not support any pause mode. + * The GNET devices without CORE ID 0x10 does not support half-duplex. + */ + if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) { + mac->link.caps = MAC_10 | MAC_100 | MAC_1000; + } else { + if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00) + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + else + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10FD | MAC_100FD | MAC_1000FD; + } + + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed10 = GMAC_CONTROL_PS; + mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->link.speed1000 = 0; + mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return mac; +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; + int ret, i, bus_id, phy_mode; + struct stmmac_pci_info *info; struct stmmac_resources res; + struct loongson_data *ld; struct device_node *np; - int ret, i, phy_mode; np = dev_of_node(&pdev->dev); - if (!np) { - pr_info("dwmac_loongson_pci: No OF node\n"); - return -ENODEV; - } - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; @@ -72,17 +464,13 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (!plat->mdio_bus_data) return -ENOMEM; - plat->mdio_node = of_get_child_by_name(np, "mdio"); - if (plat->mdio_node) { - dev_info(&pdev->dev, "Found MDIO subnode\n"); - plat->mdio_bus_data->needs_reset = true; - } - plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); - if (!plat->dma_cfg) { - ret = -ENOMEM; - goto err_put_node; - } + if (!plat->dma_cfg) + return -ENOMEM; + + ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL); + if (!ld) + return -ENOMEM; /* Enable pci device */ ret = pci_enable_device(pdev); @@ -101,55 +489,68 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id break; } - plat->bus_id = of_alias_get_id(np, "ethernet"); - if (plat->bus_id < 0) - plat->bus_id = pci_dev_id(pdev); + pci_set_master(pdev); - phy_mode = device_get_phy_mode(&pdev->dev); - if (phy_mode < 0) { - dev_err(&pdev->dev, "phy_mode not found\n"); - ret = phy_mode; + info = (struct stmmac_pci_info *)id->driver_data; + ret = info->setup(pdev, plat); + if (ret) goto err_disable_device; - } - plat->phy_interface = phy_mode; - plat->mac_interface = PHY_INTERFACE_MODE_GMII; + if (np) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + if (plat->mdio_node) { + dev_info(&pdev->dev, "Found MDIO subnode\n"); + plat->mdio_bus_data->needs_reset = true; + } + + bus_id = of_alias_get_id(np, "ethernet"); + if (bus_id >= 0) + plat->bus_id = bus_id; + + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) { + dev_err(&pdev->dev, "phy_mode not found\n"); + ret = phy_mode; + goto err_disable_device; + } + plat->phy_interface = phy_mode; + } - pci_set_master(pdev); + plat->bsp_priv = ld; + plat->setup = loongson_dwmac_setup; + ld->dev = &pdev->dev; - loongson_default_data(plat); - pci_enable_msi(pdev); memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; + ld->gmac_verion = readl(res.addr + GMAC_VERSION) & 0xff; - res.irq = of_irq_get_byname(np, "macirq"); - if (res.irq < 0) { - dev_err(&pdev->dev, "IRQ macirq not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } + switch (ld->gmac_verion) { + case LOONGSON_DWMAC_CORE_1_00: + plat->rx_queues_to_use = CHANNEL_NUM; + plat->tx_queues_to_use = CHANNEL_NUM; - res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); - if (res.wol_irq < 0) { - dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n"); - res.wol_irq = res.irq; - } + /* Only channel 0 supports checksum, + * so turn off checksum to enable multiple channels. + */ + for (i = 1; i < CHANNEL_NUM; i++) + plat->tx_queues_cfg[i].coe_unsupported = 1; - res.lpi_irq = of_irq_get_byname(np, "eth_lpi"); - if (res.lpi_irq < 0) { - dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); - ret = -ENODEV; - goto err_disable_msi; + ret = loongson_dwmac_config_msi(pdev, plat, &res, np); + break; + default: /* 0x35 device and 0x37 device. */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + ret = loongson_dwmac_config_legacy(pdev, plat, &res, np); + break; } ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) - goto err_disable_msi; + goto err_disable_device; return ret; -err_disable_msi: - pci_disable_msi(pdev); err_disable_device: pci_disable_device(pdev); err_put_node: @@ -216,7 +617,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, loongson_dwmac_resume); static const struct pci_device_id loongson_dwmac_id_table[] = { - { PCI_VDEVICE(LOONGSON, 0x7a03) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) }, + { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) }, {} }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); @@ -235,4 +637,5 @@ module_pci_driver(loongson_dwmac_driver); MODULE_DESCRIPTION("Loongson DWMAC PCI driver"); MODULE_AUTHOR("Qing Zhang "); +MODULE_AUTHOR("Yanteng Si "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 63998d65fef8eb2696dda0c9f32ef4a64a8dd7db..1fa6406f2dd46904d8e842821763621dad5086aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) * Called from stmmac via stmmac_dma_ops->init */ static void sun8i_dwmac_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); @@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv, writel(v, ioaddr + EMAC_TX_CTL1); } -static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr) +static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { u32 v; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index daf79cdbd3ecfe89969cf9edd03172a256007bf5..66c0c22908b17ae1c4128f19d6f4cb4c12be58d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) writel(value, ioaddr + DMA_AXI_BUS_MODE); } -static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) +static void dwmac1000_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) { - u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; - /* - * Set the DMA PBL (Programmable Burst Length) mode. + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Set the DMA PBL (Programmable Burst Length) mode. * * Note: before stmmac core 3.50 this mode bit was 4xPBL, and * post 3.5 mode bit acts as 8*PBL. @@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, if (dma_cfg->mixed_burst) value |= DMA_BUS_MODE_MB; - if (atds) + if (dma_cfg->atds) value |= DMA_BUS_MODE_ATDS; if (dma_cfg->aal) value |= DMA_BUS_MODE_AAL; - writel(value, ioaddr + DMA_BUS_MODE); + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); /* Mask interrupts by writing to CSR7 */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan)); } static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, @@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, dma_addr_t dma_rx_phy, u32 chan) { /* RX descriptor base address list must be written into DMA CSR3 */ - writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan)); } static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, @@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, dma_addr_t dma_tx_phy, u32 chan) { /* TX descriptor base address list must be written into DMA CSR4 */ - writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable RX store and forward mode\n"); @@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, /* Configure flow control based on rx fifo size */ csr6 = dwmac1000_configure_fc(csr6, fifosz); - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); @@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, csr6 |= DMA_CONTROL_TTC_256; } - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv, @@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr, static void dwmac1000_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, u32 riwt, u32 queue) { - writel(riwt, ioaddr + DMA_RX_WATCHDOG); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, - .init = dwmac1000_dma_init, + .init_chan = dwmac1000_dma_init_channel, .init_rx_chan = dwmac1000_dma_init_rx, .init_tx_chan = dwmac1000_dma_init_tx, .axi = dwmac1000_dma_axi, @@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .get_hw_feature = dwmac1000_get_hw_feature, .rx_watchdog = dwmac1000_rx_watchdog, }; +EXPORT_SYMBOL_GPL(dwmac1000_dma_ops); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index dea270f60cc3e215c7ff2b36ca013a77575e29cf..f861babc06f97abf07880b3bef04fa6c42657bde 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -19,7 +19,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a9837985a483d8eecc19afa6d52f40f3266895fe..bdb4f527289d2d4411d0c1c9b93a7417d3120df9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -69,7 +69,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, init_waitqueue_head(&priv->tstamp_busy_wait); } -static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) +static void dwmac4_update_caps(struct stmmac_priv *priv) { if (priv->plat->tx_queues_to_use > 1) priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); @@ -1161,7 +1161,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1204,7 +1204,7 @@ const struct stmmac_ops dwmac4_ops = { const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1253,7 +1253,7 @@ const struct stmmac_ops dwmac410_ops = { const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 7805a66a0bc08fd0ae5841d08272f35a6de43fa0..22a044d93e172f6e667e6c3a8a0830304a38f232 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv, } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 72672391675f6eb14c4098545e6fc94e5b264574..363a85469594702efa1ba6fcd46b6eae45187c15 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,23 @@ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +/* Following DMA defines are channels oriented */ +#define DMA_CHAN_BASE_OFFSET 0x100 + +static inline u32 dma_chan_base_addr(u32 base, u32 chan) +{ + return base + chan * DMA_CHAN_BASE_OFFSET; +} + +#define DMA_CHAN_XMT_POLL_DEMAND(chan) dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan) +#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan) +#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan) +#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan) +#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan) +#define DMA_CHAN_RCV_BASE_ADDR(chan) dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan) +#define DMA_CHAN_TX_BASE_ADDR(chan) dma_chan_base_addr(DMA_TX_BASE_ADDR, chan) +#define DMA_CHAN_RX_WATCHDOG(chan) dma_chan_base_addr(DMA_RX_WATCHDOG, chan) + /* SW Reset */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ @@ -152,7 +169,7 @@ #define NUM_DWMAC1000_DMA_REGS 23 #define NUM_DWMAC4_DMA_REGS 27 -void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -168,5 +185,4 @@ void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir); int dwmac_dma_reset(void __iomem *ioaddr); - #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 85e18f9a22f92091bb98f1892d7bb1f5f08bcf2a..4846bf49c576a2647c38f32c3c99bad996ce8458 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr) } /* CSR1 enables the transmit DMA to check for new descriptor */ -void dwmac_enable_dma_transmission(void __iomem *ioaddr) +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { - writel(1, ioaddr + DMA_XMT_POLL_DEMAND); + writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan)); } void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value |= DMA_INTR_DEFAULT_RX; if (tx) value |= DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value &= ~DMA_INTR_DEFAULT_RX; if (tx) value &= ~DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } #ifdef DWMAC_DMA_DEBUG @@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ - u32 intr_status = readl(ioaddr + DMA_STATUS); + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); #ifdef DWMAC_DMA_DEBUG /* Enable it to monitor DMA rx/tx status in case of critical problems */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index dd2ab6185c40e813ee4401857875d3e8478303e7..7840bc403788ef8df62870630273abc513da34de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr) } static void dwxgmac2_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 47fb8e1646c2e945a2e047585fdfc4573ccb650f..eb4003ca7f5b75f419bbe6efa7114d32bf9314e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -167,8 +167,7 @@ struct dma_features; struct stmmac_dma_ops { /* DMA core initialization */ int (*reset)(void __iomem *ioaddr); - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - int atds); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg); void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan); void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -190,7 +189,7 @@ struct stmmac_dma_ops { /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, void __iomem *ioaddr); - void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan); void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -300,8 +299,8 @@ struct stmmac_est; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); - /* Get phylink capabilities */ - void (*phylink_get_caps)(struct stmmac_priv *priv); + /* Update MAC capabilities */ + void (*update_caps)(struct stmmac_priv *priv); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -423,8 +422,8 @@ struct stmmac_ops { #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) -#define stmmac_mac_phylink_get_caps(__priv) \ - stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) +#define stmmac_mac_update_caps(__priv) \ + stmmac_do_void_callback(__priv, mac, update_caps, __priv) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 521b1b5ffebb4f784e6b479784d1b75da5a42cc7..cf83cc95169bd397b9cd26880a77205815bd5b07 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -412,6 +412,12 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, return 0; } + if (priv->plat->flags & STMMAC_FLAG_DISABLE_FORCE_1000) { + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) + return -EOPNOTSUPP; + } + return phylink_ethtool_ksettings_set(priv->phylink, cmd); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d6ee90fef2eca4b4c3e62b3783739fea0bf9ca56..80acff8b69e9ab20ec248808ccd1d8989c0512e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -936,6 +936,22 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) priv->pause, tx_cnt); } +static unsigned long stmmac_mac_get_caps(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + /* Refresh the MAC-specific capabilities */ + stmmac_mac_update_caps(priv); + + config->mac_capabilities = priv->hw->link.caps; + + if (priv->plat->max_speed) + phylink_limit_mac_speed(config, priv->plat->max_speed); + + return config->mac_capabilities; +} + static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { @@ -1105,6 +1121,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, @@ -1204,7 +1221,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; struct phylink *phylink; - int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; @@ -1225,15 +1241,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) xpcs_get_interfaces(priv->hw->xpcs, priv->phylink_config.supported_interfaces); - /* Get the MAC specific capabilities */ - stmmac_mac_phylink_get_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1926,13 +1933,18 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->dma_rx = NULL; + rx_q->dma_erx = NULL; if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); + rx_q->buf_pool = NULL; + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); + rx_q->page_pool = NULL; } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, @@ -1978,8 +1990,14 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + tx_q->dma_etx = NULL; + tx_q->dma_entx = NULL; + tx_q->dma_tx = NULL; + kfree(tx_q->tx_skbuff_dma); + tx_q->tx_skbuff_dma = NULL; kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff = NULL; } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, @@ -2494,7 +2512,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) true, priv->mode, true, true, xdp_desc.len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; @@ -2926,7 +2944,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 chan = 0; - int atds = 0; int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { @@ -2935,7 +2952,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) - atds = 1; + priv->plat->dma_cfg->atds = 1; ret = stmmac_reset(priv, priv->ioaddr); if (ret) { @@ -2944,7 +2961,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); @@ -4627,7 +4644,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4846,7 +4863,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, u64_stats_update_end(&txq_stats->q_syncp); } - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; @@ -7183,7 +7200,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) { struct stmmac_priv *priv = netdev_priv(dev); int ret = 0, i; - int max_speed; if (netif_running(dev)) stmmac_release(dev); @@ -7197,14 +7213,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); - stmmac_mac_phylink_get_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - stmmac_napi_add(dev); if (netif_running(dev)) diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 23cd610bd3766c2b2785e94f1faa9da54990ec00..85cdbdd44fec70d1b20e348c38368f047107eec6 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -26,7 +26,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX - select PHYLIB + select PHYLINK help This driver supports Wangxun(R) GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294e72e235abbc90ed47aaaf91bfd2ff3..152049600148940e6f594320526e3aee8450dca9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,172 @@ #include #include +#include #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" +#include "wx_lib.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +177,247 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); + +int wx_nway_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_nway_reset(wx->phylink); +} +EXPORT_SYMBOL(wx_nway_reset); + +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_get_link_ksettings); + +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_set_link_ksettings); + +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + phylink_ethtool_get_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_get_pauseparam); + +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_set_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_set_pauseparam); + +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ring->rx_max_pending = WX_MAX_RXD; + ring->tx_max_pending = WX_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = wx->rx_ring_count; + ring->tx_pending = wx->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +EXPORT_SYMBOL(wx_get_ringparam); + +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; + /* only valid if in constant ITR mode */ + if (wx->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = wx->rx_itr_setting; + else + ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (wx->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = wx->tx_itr_setting; + else + ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; + + return 0; +} +EXPORT_SYMBOL(wx_get_coalesce); + +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u16 tx_itr_param, rx_itr_param; + struct wx_q_vector *q_vector; + u16 max_eitr; + int i; + + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EOPNOTSUPP; + } + + if (ec->tx_max_coalesced_frames_irq) + wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if (wx->mac.type == wx_mac_sp) + max_eitr = WX_SP_MAX_EITR; + else + max_eitr = WX_EM_MAX_EITR; + + if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || + (ec->tx_coalesce_usecs > (max_eitr >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + wx->rx_itr_setting = ec->rx_coalesce_usecs; + + if (wx->rx_itr_setting == 1) + rx_itr_param = WX_20K_ITR; + else + rx_itr_param = wx->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + wx->tx_itr_setting = ec->tx_coalesce_usecs; + + if (wx->tx_itr_setting == 1) { + if (wx->mac.type == wx_mac_sp) + tx_itr_param = WX_12K_ITR; + else + tx_itr_param = WX_20K_ITR; + } else { + tx_itr_param = wx->tx_itr_setting; + } + + /* mixed Rx/Tx */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + wx->tx_itr_setting = wx->rx_itr_setting; + + for (i = 0; i < wx->num_q_vectors; i++) { + q_vector = wx->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + wx_write_eitr(q_vector); + } + + return 0; +} +EXPORT_SYMBOL(wx_set_coalesce); + +u32 wx_get_msglevel(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return wx->msg_enable; +} +EXPORT_SYMBOL(wx_get_msglevel); + +void wx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct wx *wx = netdev_priv(netdev); + + wx->msg_enable = data; +} +EXPORT_SYMBOL(wx_set_msglevel); + +static unsigned int wx_max_channels(struct wx *wx) +{ + unsigned int max_combined; + + if (!wx->msix_q_entries) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else { + /* support up to max allowed queues with RSS */ + if (wx->mac.type == wx_mac_sp) + max_combined = 63; + else + max_combined = 8; + } + + return max_combined; +} + +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct wx *wx = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = wx_max_channels(wx); + + /* report info for other vector */ + if (wx->msix_q_entries) { + ch->max_other = 1; + ch->other_count = 1; + } + + /* record RSS queues */ + ch->combined_count = wx->ring_feature[RING_F_RSS].indices; +} +EXPORT_SYMBOL(wx_get_channels); + +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + unsigned int count = ch->combined_count; + struct wx *wx = netdev_priv(dev); + + /* verify other_count has not changed */ + if (ch->other_count != 1) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > wx_max_channels(wx)) + return -EINVAL; + + wx->ring_feature[RING_F_RSS].limit = count; + + return 0; +} +EXPORT_SYMBOL(wx_set_channels); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c69454070bbccd60a5e1f549e75677442b..fee7260384ef1fe0901e35210a499951395307d7 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,40 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +int wx_nway_reset(struct net_device *netdev); +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack); +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +u32 wx_get_msglevel(struct net_device *netdev); +void wx_set_msglevel(struct net_device *netdev, u32 data); +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch); +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 52130df26aee5360e3379a1ef154e20e12eb76e9..1db754615cca31b530ce554411b87f540f8ac371 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; @@ -57,9 +149,9 @@ void wx_irq_disable(struct wx *wx) int vector; for (vector = 0; vector < wx->num_q_vectors; vector++) - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_q_entries[vector].vector); - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_entry->vector); } else { synchronize_irq(pdev->irq); } @@ -1066,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx) wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); } +#define WX_ETH_FRAMING 20 + +/** + * wx_hpbthresh - calculate high water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_hpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING; + tc = link; + + /* Calculate delay value for device */ + dv_id = WX_DV(link, tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = WX_BT2KB(dv_id); + rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + dev_warn(&wx->pdev->dev, + "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/** + * wx_lpbthresh - calculate low water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_lpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + u32 dv_id; + int tc; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = WX_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return WX_BT2KB(dv_id); +} + +/** + * wx_pbthresh_setup - calculate and setup high low water marks + * + * @wx: board private structure to calculate for + **/ +static void wx_pbthresh_setup(struct wx *wx) +{ + wx->fc.high_water = wx_hpbthresh(wx); + wx->fc.low_water = wx_lpbthresh(wx); + + /* Low water marks must not be larger than high water marks */ + if (wx->fc.low_water > wx->fc.high_water) + wx->fc.low_water = 0; +} + static void wx_configure_port(struct wx *wx) { u32 value, i; @@ -1430,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx) wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); } +static void wx_store_reta(struct wx *wx) +{ + u8 *indir_tbl = wx->rss_indir_tbl; + u32 reta = 0; + u32 i; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(wx, WX_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +static void wx_setup_reta(struct wx *wx) +{ + u16 rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 random_key_size = WX_RSS_KEY_SIZE / 4; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < random_key_size; i++) + wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); + + /* Fill out redirection table */ + memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl)); + + for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) { + if (j == rss_i) + j = 0; + + wx->rss_indir_tbl[i] = j; + } + + wx_store_reta(wx); +} + +static void wx_setup_mrqc(struct wx *wx) +{ + u32 rss_field = 0; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = WX_RDB_RA_CTL_RSS_IPV4 | + WX_RDB_RA_CTL_RSS_IPV4_TCP | + WX_RDB_RA_CTL_RSS_IPV4_UDP | + WX_RDB_RA_CTL_RSS_IPV6 | + WX_RDB_RA_CTL_RSS_IPV6_TCP | + WX_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); + + wx_setup_reta(wx); + + if (wx->rss_enabled) + rss_field |= WX_RDB_RA_CTL_RSS_EN; + + wr32(wx, WX_RDB_RA_CTL, rss_field); +} + /** * wx_configure_rx - Configure Receive Unit after Reset * @wx: pointer to private structure @@ -1462,6 +1695,8 @@ void wx_configure_rx(struct wx *wx) wr32(wx, WX_PSR_CTL, psrctl); } + wx_setup_mrqc(wx); + /* set_rx_buffer_len must be called before ring initialization */ wx_set_rx_buffer_len(wx); @@ -1492,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx) void wx_configure(struct wx *wx) { wx_set_rxpba(wx); + wx_pbthresh_setup(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -1658,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) } EXPORT_SYMBOL(wx_get_pcie_msix_counts); +/** + * wx_init_rss_key - Initialize wx RSS key + * @wx: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static int wx_init_rss_key(struct wx *wx) +{ + u32 *rss_key; + + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; + } + + return 0; +} + int wx_sw_init(struct wx *wx) { struct pci_dev *pdev = wx->pdev; @@ -1685,14 +1943,23 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = swab16((u16)ssid); } + err = wx_init_rss_key(wx); + if (err < 0) { + wx_err(wx, "rss key allocation failed\n"); + return err; + } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, sizeof(struct wx_mac_addr), GFP_KERNEL); if (!wx->mac_table) { wx_err(wx, "mac_table allocation failed\n"); + kfree(wx->rss_key); return -ENOMEM; } + wx->msix_in_use = false; + return 0; } EXPORT_SYMBOL(wx_sw_init); @@ -1911,6 +2178,201 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl |= WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause) +{ + u16 pause_time = WX_DEFAULT_FCPAUSE; + u32 mflcn_reg, fccfg_reg, reg; + u32 fcrtl, fcrth; + int i; + + /* Low water mark of zero causes XOFF floods */ + if (tx_pause && wx->fc.high_water) { + if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) { + wx_err(wx, "Invalid water mark configuration\n"); + return -EINVAL; + } + } + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(wx, WX_RDB_RFCC); + fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X; + + if (rx_pause) + mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE; + if (tx_pause) + fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X; + + /* Set 802.3x based flow control settings. */ + wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(wx, WX_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (tx_pause && wx->fc.high_water) { + fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE; + wr32(wx, WX_RDB_RFCL, fcrtl); + fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE; + } else { + wr32(wx, WX_RDB_RFCL, 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576; + } + + wr32(wx, WX_RDB_RFCH, fcrth); + + /* Configure pause time */ + reg = pause_time * 0x00010001; + wr32(wx, WX_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(wx, WX_RDB_RFCRT, pause_time / 2); + + /* We should set the drop enable bit if: + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (wx->num_rx_queues > 1 && !tx_pause) { + for (i = 0; i < wx->num_rx_queues; i++) + wx_enable_rx_drop(wx, wx->rx_ring[i]); + } else { + for (i = 0; i < wx->num_rx_queues; i++) + wx_disable_rx_drop(wx, wx->rx_ring[i]); + } + + return 0; +} +EXPORT_SYMBOL(wx_fc_enable); + +/** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + /** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 0b3447bc6f2fec53fac9ccd5144430380b17d46b..9e219fa717a225b4e91e688d8f5a98b5daec606b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); @@ -34,5 +41,8 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index c37500aa06379129a4db87b48bed9603b36cf722..59ac53185ab82a638793905811ae52b2e4af958f 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -421,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -654,6 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -809,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -888,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1465,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -1561,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all); **/ static void wx_set_rss_queues(struct wx *wx) { - wx->num_rx_queues = wx->mac.max_rx_queues; - wx->num_tx_queues = wx->mac.max_tx_queues; + struct wx_ring_feature *f; + + /* set mask for 16 queue limit of RSS */ + f = &wx->ring_feature[RING_F_RSS]; + f->indices = f->limit; + + wx->num_rx_queues = f->limit; + wx->num_tx_queues = f->limit; } static void wx_set_num_queues(struct wx *wx) @@ -1588,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx) struct irq_affinity affd = {0, }; int nvecs, i; - nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + /* We start by asking for one vector per queue pair */ + nvecs = max(wx->num_rx_queues, wx->num_tx_queues); + nvecs = min_t(int, nvecs, num_online_cpus()); + nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors); - wx->msix_entries = kcalloc(nvecs, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!wx->msix_entries) + wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_q_entries) return -ENOMEM; + /* One for non-queue interrupts */ + nvecs += 1; + + if (!wx->msix_in_use) { + wx->msix_entry = kcalloc(1, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entry) { + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + return -ENOMEM; + } + } + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, nvecs, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &affd); if (nvecs < 0) { wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + kfree(wx->msix_entry); + wx->msix_entry = NULL; return nvecs; } + wx->msix_entry->entry = 0; + wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0); + nvecs -= 1; for (i = 0; i < nvecs; i++) { - wx->msix_entries[i].entry = i; - wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + wx->msix_q_entries[i].entry = i; + wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1); } - /* one for msix_other */ - nvecs -= 1; wx->num_q_vectors = nvecs; - wx->num_rx_queues = nvecs; - wx->num_tx_queues = nvecs; return 0; } @@ -1638,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; - wx->num_rx_queues = 1; - wx->num_tx_queues = 1; - wx->num_q_vectors = 1; + /* Disable RSS */ + dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); + wx->ring_feature[RING_F_RSS].limit = 1; + + wx_set_num_queues(wx); /* minmum one for queue, one for misc*/ nvecs = 1; @@ -1899,8 +1930,12 @@ void wx_reset_interrupt_capability(struct wx *wx) return; if (pdev->msix_enabled) { - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + if (!wx->msix_in_use) { + kfree(wx->msix_entry); + wx->msix_entry = NULL; + } } pci_free_irq_vectors(wx->pdev); } @@ -1972,7 +2007,7 @@ void wx_free_irq(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; /* free only the irqs that were actually requested */ if (!q_vector->rx.ring && !q_vector->tx.ring) @@ -1982,7 +2017,7 @@ void wx_free_irq(struct wx *wx) } if (wx->mac.type == wx_mac_em) - free_irq(wx->msix_entries[vector].vector, wx); + free_irq(wx->msix_entry->vector, wx); } EXPORT_SYMBOL(wx_free_irq); @@ -2059,6 +2094,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2076,7 +2112,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -static void wx_write_eitr(struct wx_q_vector *q_vector) +void wx_write_eitr(struct wx_q_vector *q_vector) { struct wx *wx = q_vector->wx; int v_idx = q_vector->v_idx; @@ -2089,7 +2125,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg |= WX_PX_ITR_CNT_WDIS; - wr32(wx, WX_PX_ITR(v_idx), itr_reg); + wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg); } /** @@ -2135,9 +2171,9 @@ void wx_configure_vectors(struct wx *wx) wx_write_eitr(q_vector); } - wx_set_ivar(wx, -1, 0, v_idx); + wx_set_ivar(wx, -1, 0, 0); if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(v_idx), 1950); + wr32(wx, WX_PX_ITR(0), 1950); } EXPORT_SYMBOL(wx_configure_vectors); @@ -2596,8 +2632,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2633,6 +2672,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); @@ -2641,11 +2686,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); - if (changed & NETIF_F_RXHASH) + if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, WX_RDB_RA_CTL_RSS_EN); - else + wx->rss_enabled = true; + } else { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); + wx->rss_enabled = false; + } netdev->features = features; @@ -2658,4 +2706,71 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) } EXPORT_SYMBOL(wx_set_features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring) +{ + int i, err = 0; + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != wx->tx_ring_count) { + for (i = 0; i < wx->num_tx_queues; i++) { + memcpy(&temp_ring[i], wx->tx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_tx_count; + err = wx_setup_tx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new tx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_tx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + wx_free_tx_resources(wx->tx_ring[i]); + + memcpy(wx->tx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != wx->rx_ring_count) { + for (i = 0; i < wx->num_rx_queues; i++) { + memcpy(&temp_ring[i], wx->rx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_rx_count; + err = wx_setup_rx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new rx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_rx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_rx_queues; i++) { + wx_free_rx_resources(wx->rx_ring[i]); + memcpy(wx->rx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->rx_ring_count = new_rx_count; + } +} +EXPORT_SYMBOL(wx_set_ring); + +MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index df1f4a5951f06ccb44e583ac6a3301838e999832..ec909e876720ca05c709ffb59060d3a4e9546426 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx); int wx_setup_isb_resources(struct wx *wx); void wx_free_isb_resources(struct wx *wx); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_write_eitr(struct wx_q_vector *q_vector); void wx_configure_vectors(struct wx *wx); void wx_clean_all_rx_rings(struct wx *wx); void wx_clean_all_tx_rings(struct wx *wx); @@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring); #endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c555af9ed51b29e1b8b069c9c020fb182a2aadfe..b4dc4f3411174abed580580e7b2dbc0097d0eb40 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #define WX_NCSI_SUP 0x8000 @@ -59,6 +60,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +114,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +129,17 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C +/* Flow Control Registers */ +#define WX_RDB_RFCV 0x19200 +#define WX_RDB_RFCL 0x19220 +#define WX_RDB_RFCL_XONE BIT(31) +#define WX_RDB_RFCH 0x19260 +#define WX_RDB_RFCH_XOFFE BIT(31) +#define WX_RDB_RFCRT 0x192A0 +#define WX_RDB_RFCC 0x192A4 +#define WX_RDB_RFCC_RFCE_802_3X BIT(3) /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -113,8 +147,16 @@ #define WX_RDB_PL_CFG_L2HDR BIT(3) #define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) #define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) +#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) #define WX_RDB_RA_CTL 0x194F4 #define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ +#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16) +#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17) +#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20) +#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) +#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) +#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) /******************************* PSR Registers *******************************/ /* psr control */ @@ -218,6 +260,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -251,6 +295,7 @@ enum WX_MSCA_CMD_value { #define WX_MSCC_SADDR BIT(18) #define WX_MSCC_BUSY BIT(22) #define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ @@ -278,6 +323,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ #define WX_7K_ITR 595 #define WX_12K_ITR 336 +#define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_EM_MAX_EITR 0x00007FFCU @@ -300,8 +346,10 @@ enum WX_MSCA_CMD_value { #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) +#define WX_PX_RR_CFG_DROP_EN BIT(30) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) @@ -339,8 +387,46 @@ enum WX_MSCA_CMD_value { #define WX_MAC_STATE_MODIFIED 0x2 #define WX_MAC_STATE_IN_USE 0x4 +/* BitTimes (BT) conversion */ +#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define WX_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define WX_PFC_D 672 +/* Calculate Cable Delay */ +#define WX_CABLE_DC 5556 /* Delay Copper */ +/* Calculate Delay incurred from higher layer */ +#define WX_HD 6144 + +/* Calculate Interface Delay */ +#define WX_PHY_D 12800 +#define WX_MAC_D 4096 +#define WX_XAUI_D (2 * 1024) +#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D) +/* Calculate PCI Bus delay for low thresholds */ +#define WX_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define WX_DV(_max_frame_link, _max_frame_tc) \ + ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \ + (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \ + 2 * WX_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define WX_LOW_DV(_max_frame_tc) \ + (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1)) + +/* flow control */ +#define WX_DEFAULT_FCPAUSE 0xFFFF + #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 +#define WX_MIN_RXD 128 +#define WX_MIN_TXD 128 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define VMDQ_P(p) p @@ -766,9 +852,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -812,6 +905,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -835,6 +929,19 @@ struct wx_q_vector { struct wx_ring ring[] ____cacheline_internodealigned_in_smp; }; +struct wx_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +enum wx_ring_f_enum { + RING_F_NONE = 0, + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + enum wx_isb_idx { WX_ISB_HEADER, WX_ISB_MISC, @@ -843,6 +950,38 @@ enum wx_isb_idx { WX_ISB_MAX }; +struct wx_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ +}; + +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -856,6 +995,7 @@ struct wx { enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_fc_info fc; struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; @@ -876,6 +1016,8 @@ struct wx { int speed; int duplex; struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; bool wol_hw_supported; bool ncsi_enabled; @@ -903,7 +1045,10 @@ struct wx { struct wx_q_vector *q_vector[64]; unsigned int queues_per_pool; - struct msix_entry *msix_entries; + struct msix_entry *msix_q_entries; + struct msix_entry *msix_entry; + bool msix_in_use; + struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE]; /* misc interrupt status block */ dma_addr_t isb_dma; @@ -911,17 +1056,26 @@ struct wx { u32 isb_tag[WX_ISB_MAX]; #define WX_MAX_RETA_ENTRIES 128 +#define WX_RSS_INDIR_TBL_MAX 64 u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; - + bool rss_enabled; #define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT(i) +#define WX_INTR_Q(i) BIT((i) + 1) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) @@ -951,6 +1105,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ @@ -962,4 +1127,9 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) #define wx_dbg(wx, fmt, arg...) \ dev_dbg(&(wx)->pdev->dev, fmt, ##arg) +static inline struct wx *phylink_to_wx(struct phylink_config *config) +{ + return container_of(config, struct wx, phylink_config); +} + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9aacfe4708d082f05e9ac7f693c8bed6..cdf35733705f94fb17f142b1a9ff588091b14969 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -7,7 +7,10 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" #include "ngbe_ethtool.h" +#include "ngbe_type.h" static void ngbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -41,14 +44,92 @@ static int ngbe_set_wol(struct net_device *netdev, return 0; } +static int ngbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + ngbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + wx_configure(wx); + ngbe_up(wx); + + return 0; +} + +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + static const struct ethtool_ops ngbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .nway_reset = wx_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = ngbe_set_channels, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de95277c73758e9a8a6651ad2c0c5c9803..6459bc1d7c2249e6f623170b772b6c6229557716 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a4d63d2f3c5bbebce79e6e2f0a07713ee7d558dc..fdd6b4f70b7a5c0ee4c50e5ba9218411107909be 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx) } } -/** - * ngbe_init_rss_key - Initialize wx RSS key - * @wx: device handle - * - * Allocates and initializes the RSS key if it is not allocated. - **/ -static inline int ngbe_init_rss_key(struct wx *wx) -{ - u32 *rss_key; - - if (!wx->rss_key) { - rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; - - netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); - wx->rss_key = rss_key; - } - - return 0; -} - /** * ngbe_sw_init - Initialize general software structures * @wx: board private structure to initialize @@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx) dev_err(&pdev->dev, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; - if (ngbe_init_rss_key(wx)) - return -ENOMEM; + wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; @@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) if (queues) wx_intr_enable(wx, NGBE_INTR_ALL); else - wx_intr_enable(wx, NGBE_INTR_MISC(wx)); + wx_intr_enable(wx, NGBE_INTR_MISC); } /** @@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entries[vector].vector, + err = request_irq(wx->msix_entry->vector, ngbe_msix_other, 0, netdev->name, wx); if (err) { @@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -330,17 +309,19 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } -static void ngbe_down(struct wx *wx) +void ngbe_down(struct wx *wx) { - phy_stop(wx->phydev); + phylink_stop(wx->phylink); ngbe_disable_device(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } -static void ngbe_up(struct wx *wx) +void ngbe_up(struct wx *wx) { wx_configure_vectors(wx); @@ -357,7 +338,7 @@ static void ngbe_up(struct wx *wx) if (wx->gpio_ctrl) ngbe_sfp_modules_txrx_powerctl(wx, true); - phy_start(wx->phydev); + phylink_start(wx->phylink); } /** @@ -386,7 +367,7 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_free_resources; - err = ngbe_phy_connect(wx); + err = phylink_connect_phy(wx->phylink, wx->phydev); if (err) goto err_free_irq; @@ -402,7 +383,7 @@ static int ngbe_open(struct net_device *netdev) return 0; err_dis_phy: - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); err_free_irq: wx_free_irq(wx); err_free_resources: @@ -428,7 +409,7 @@ static int ngbe_close(struct net_device *netdev) ngbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); return 0; @@ -478,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev) } } +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ngbe_close(dev); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + static const struct net_device_ops ngbe_netdev_ops = { .ndo_open = ngbe_open, .ndo_stop = ngbe_close, @@ -580,6 +594,7 @@ static int ngbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - @@ -675,14 +690,10 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: + phylink_destroy(wx->phylink); wx_control_hw(wx, false); err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); @@ -712,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev) netdev = wx->netdev; unregister_netdev(netdev); + phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 5007addd119aa54f0c24ecb45525e88ed333bb66..ec54b18c5fe73b942cdeb4c362168c06e7b16a28 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,27 +51,33 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } -static void ngbe_handle_link_change(struct net_device *dev) +static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct wx *wx = netdev_priv(dev); - struct phy_device *phydev; +} + +static void ngbe_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ +} + +static void ngbe_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); u32 lan_speed, reg; - phydev = wx->phydev; - if (!(wx->link != phydev->link || - wx->speed != phydev->speed || - wx->duplex != phydev->duplex)) - return; + wx_fc_enable(wx, tx_pause, rx_pause); - wx->link = phydev->link; - wx->speed = phydev->speed; - wx->duplex = phydev->duplex; - switch (phydev->speed) { + switch (speed) { case SPEED_10: lan_speed = 0; break; @@ -194,58 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev) lan_speed = 2; break; } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); - if (phydev->link) { - reg = rd32(wx, WX_MAC_TX_CFG); - reg &= ~WX_MAC_TX_CFG_SPEED_MASK; - reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; - wr32(wx, WX_MAC_TX_CFG, reg); - /* Re configure MAC RX */ - reg = rd32(wx, WX_MAC_RX_CFG); - wr32(wx, WX_MAC_RX_CFG, reg); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - reg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, reg); - } - phy_print_status(phydev); + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + + /* Re configure MAC Rx */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); } -int ngbe_phy_connect(struct wx *wx) +static const struct phylink_mac_ops ngbe_mac_ops = { + .mac_config = ngbe_mac_config, + .mac_link_down = ngbe_mac_link_down, + .mac_link_up = ngbe_mac_link_up, +}; + +static int ngbe_phylink_init(struct wx *wx) { - int ret; + struct phylink_config *config; + phy_interface_t phy_mode; + struct phylink *phylink; - /* The MAC only has add the Tx delay and it can not be modified. - * So just disable TX delay in PHY, and it is does not matter to - * internal phy. - */ - ret = phy_connect_direct(wx->netdev, - wx->phydev, - ngbe_handle_link_change, - PHY_INTERFACE_MODE_RGMII_RXID); - if (ret) { - wx_err(wx, "PHY connect failed.\n"); - return ret; - } + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->mac_managed_pm = true; - return 0; -} + phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces); -static void ngbe_phy_fixup(struct wx *wx) -{ - struct phy_device *phydev = wx->phydev; - struct ethtool_eee eee; - - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - phydev->mac_managed_pm = true; - if (wx->mac_type != em_mac_type_mdi) - return; - /* disable EEE, internal phy does not support eee */ - memset(&eee, 0, sizeof(eee)); - phy_ethtool_set_eee(phydev, &eee); + phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + wx->phylink = phylink; + + return 0; } int ngbe_mdio_init(struct wx *wx) @@ -266,8 +154,8 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); @@ -280,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx) return -ENODEV; phy_attached_info(wx->phydev); - ngbe_phy_fixup(wx); wx->link = 0; wx->speed = 0; wx->duplex = 0; + ret = ngbe_phylink_init(wx); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h index 0a6400dd89c4c08da275e04bc6a25577045c77a3..f610b771888a5ed39283a1af7c452104ea505e03 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -7,6 +7,5 @@ #ifndef _NGBE_MDIO_H_ #define _NGBE_MDIO_H_ -int ngbe_phy_connect(struct wx *wx); int ngbe_mdio_init(struct wx *wx); #endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d557513e24c510aa3fdc89dc01871e2eb..f48ed7fc1805ab0cd89e4bcbafd9ef3406668f0f 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,9 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 @@ -83,7 +80,7 @@ NGBE_PX_MISC_IEN_GPIO) #define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define NGBE_INTR_MISC BIT(0) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_CFG_LAN_SPEED 0x14440 @@ -108,6 +105,7 @@ #define NGBE_FW_CMD_ST_FAIL 0x70657376 #define NGBE_MAX_FDIR_INDICES 7 +#define NGBE_MAX_RSS_INDICES 8 #define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) #define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) @@ -133,4 +131,8 @@ extern char ngbe_driver_name[]; +void ngbe_down(struct wx *wx); +void ngbe_up(struct wx *wx); +int ngbe_setup_tc(struct net_device *dev, u8 tc); + #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a427043e2f608793d4d81226bcc63..084e2faf9db192f119fd5c771163794c1d05dfd2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -7,38 +7,93 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" #include "txgbe_type.h" #include "txgbe_ethtool.h" -static int txgbe_nway_reset(struct net_device *netdev) +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; - return phylink_ethtool_nway_reset(txgbe->phylink); -} + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); -static int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } - return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + txgbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + txgbe_up(wx); + + return 0; } -static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; - return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); } static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, - .nway_reset = txgbe_nway_reset, + .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = txgbe_get_link_ksettings, - .set_link_ksettings = txgbe_set_link_ksettings, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = txgbe_set_channels, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 37274525027002a42d21fa36ea6db39b03f5cb68..d6b2b3c781b6f2b25f6349c020af38e75c77b359 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -70,114 +70,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) wr32(wx, WX_TS_DALARM_THRE, 614); } -/** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure @@ -306,6 +198,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb187ae8da87d25cd3f2d3c881199b5192..1f3ecf60e3c44d971e6bb12cc24324478cb108ca 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -6,7 +6,6 @@ int txgbe_disable_sec_tx_path(struct wx *wx); void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0ba4c98815a9609550ea3046aabbe0eb..3b151c410a5c22845a4022e88723b2facb9ef9c2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues) wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); if (queues) wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } @@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx) static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; - struct txgbe *txgbe; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - txgbe = netdev_to_txgbe(netdev); - phylink_start(txgbe->phylink); + phylink_start(wx->phylink); /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -286,20 +284,26 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } -static void txgbe_down(struct wx *wx) +void txgbe_down(struct wx *wx) { - struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); - txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(txgbe->phylink); + phylink_stop(wx->phylink); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } +void txgbe_up(struct wx *wx) +{ + wx_configure(wx); + txgbe_up_complete(wx); +} + /** * txgbe_init_type_code - Initialize the shared code * @wx: pointer to hardware structure @@ -374,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx) wx_err(wx, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; + wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -500,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev) } } +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(wx); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + txgbe_open(dev); + + return 0; +} + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -536,7 +579,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -637,6 +679,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - @@ -734,13 +777,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: @@ -781,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 4159c84035fdceb318e0617d455bccb132f51ea5..1b84d495d14e8d2cc538eca54cb1a0b4a3f87a47 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, phy_interface_t interface) { - struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); + struct txgbe *txgbe = wx->priv; if (interface == PHY_INTERFACE_MODE_10GBASER) return &txgbe->xpcs->pcs; @@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, static void txgbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); } @@ -186,9 +187,11 @@ static void txgbe_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; + wx_fc_enable(wx, tx_pause, rx_pause); + txcfg = rd32(wx, WX_MAC_TX_CFG); txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; @@ -217,7 +220,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); @@ -228,7 +231,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); txgbe_enable_sec_tx_path(wx); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); @@ -253,10 +256,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) phy_interface_t phy_mode; struct phylink *phylink; - config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - + config = &wx->phylink_config; config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | @@ -287,7 +287,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) } } - txgbe->phylink = phylink; + wx->phylink = phylink; return 0; } @@ -483,11 +483,11 @@ static void txgbe_irq_handler(struct irq_desc *desc) TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); - phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); + phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); } /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); } static int txgbe_gpio_init(struct txgbe *txgbe) @@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; - girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector; + + /* now only suuported on MSI-X interrupt */ + if (!wx->msix_entry) + return -EPERM; + + girq->parents[0] = wx->msix_entry->vector; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; @@ -647,58 +652,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int txgbe_ext_phy_init(struct txgbe *txgbe) { struct phy_device *phydev; @@ -715,8 +668,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) return -ENOMEM; mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; @@ -753,6 +706,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) int txgbe_init_phy(struct txgbe *txgbe) { + struct wx *wx = txgbe->wx; int ret; if (txgbe->wx->media_type == sp_media_copper) @@ -760,46 +714,48 @@ int txgbe_init_phy(struct txgbe *txgbe) ret = txgbe_swnodes_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register software nodes\n"); + wx_err(wx, "failed to register software nodes\n"); return ret; } ret = txgbe_mdio_pcs_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); + wx_err(wx, "failed to init mdio pcs: %d\n", ret); goto err_unregister_swnode; } ret = txgbe_phylink_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init phylink\n"); + wx_err(wx, "failed to init phylink\n"); goto err_destroy_xpcs; } ret = txgbe_gpio_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init gpio\n"); + wx_err(wx, "failed to init gpio\n"); goto err_destroy_phylink; } ret = txgbe_clock_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register clock: %d\n", ret); + wx_err(wx, "failed to register clock: %d\n", ret); goto err_destroy_phylink; } ret = txgbe_i2c_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); + wx_err(wx, "failed to init i2c interface: %d\n", ret); goto err_unregister_clk; } ret = txgbe_sfp_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register sfp\n"); + wx_err(wx, "failed to register sfp\n"); goto err_unregister_i2c; } + wx->msix_in_use = true; + return 0; err_unregister_i2c: @@ -808,7 +764,7 @@ int txgbe_init_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); err_destroy_phylink: - phylink_destroy(txgbe->phylink); + phylink_destroy(wx->phylink); err_destroy_xpcs: xpcs_destroy(txgbe->xpcs); err_unregister_swnode: @@ -820,8 +776,8 @@ int txgbe_init_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe) { if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->phylink); - phylink_destroy(txgbe->phylink); + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); return; } @@ -829,7 +785,8 @@ void txgbe_remove_phy(struct txgbe *txgbe) platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); - phylink_destroy(txgbe->phylink); + phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); + txgbe->wx->msix_in_use = false; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95ce4f21e2fbcb745474316f282e72..270a6fd9ad0b07361416308ec6f58c27513ce869 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,12 +95,10 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 +#define TXGBE_MAX_RSS_INDICES 63 #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) @@ -128,19 +123,16 @@ #define TXGBE_DEFAULT_RX_WORK 128 #endif -#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) -#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_MISC BIT(0) +#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) #define TXGBE_MAX_EITR GENMASK(11, 3) extern char txgbe_driver_name[]; -static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - return wx->priv; -} +void txgbe_down(struct wx *wx); +void txgbe_up(struct wx *wx); +int txgbe_setup_tc(struct net_device *dev, u8 tc); #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ @@ -181,7 +173,6 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct dw_xpcs *xpcs; - struct phylink *phylink; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; diff --git a/drivers/net/ethernet/yunsilicon/Kconfig b/drivers/net/ethernet/yunsilicon/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a387a8ddeba4b2c5274d131aa6080fa7ba586a66 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config NET_VENDOR_YUNSILICON + bool "Yunsilicon devices" + default y + depends on PCI || NET + depends on ARM64 || X86_64 + help + If you have a network (Ethernet or RDMA) device belonging to this + class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Yunsilicon devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_YUNSILICON + +source "drivers/net/ethernet/yunsilicon/xsc/net/Kconfig" +source "drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig" + +endif # NET_VENDOR_YUNSILICON diff --git a/drivers/net/ethernet/yunsilicon/Makefile b/drivers/net/ethernet/yunsilicon/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0c603d2bf207e4e589ce7ed68261aff7321879d6 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Makefile for the Yunsilicon device drivers. +# + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc/net/ +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc/pci/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h new file mode 100644 index 0000000000000000000000000000000000000000..76f0c506444649a12602889936f3c1360ed65c61 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_CQ_H +#define XSC_CORE_CQ_H + +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_core_cq { + u32 cqn; + int cqe_sz; + u64 arm_db; + u64 ci_db; + struct xsc_core_device *dev; + atomic_t refcount; + struct completion free; + unsigned int vector; + int irqn; + u16 dim_us; + u16 dim_pkts; + void (*comp)(struct xsc_core_cq *cq); + void (*event)(struct xsc_core_cq *cq, enum xsc_event); + u32 cons_index; + unsigned int arm_sn; + struct xsc_rsc_debug *dbg; + int pid; + u32 reg_next_cid; + u32 reg_done_pid; + struct xsc_eq *eq; +}; + +enum { + XSC_CQE_OWNER_MASK = 1, +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, +}; + +enum { + XSC_CQ_DB_REQ_NOT_SOL = 1, + XSC_CQ_DB_REQ_NOT = 0, +}; + +static inline void xsc_cq_arm(struct xsc_core_cq *cq, u8 solicited) +{ + union xsc_cq_doorbell db; + + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + db.arm = solicited; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +} + +static inline void xsc_cq_set_ci(struct xsc_core_cq *cq) +{ + struct xsc_core_device *xdev = cq->dev; + union xsc_cq_doorbell db; + + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + /* ensure write val visable before doorbell */ + wmb(); + + writel(db.val, REG_ADDR(xdev, cq->ci_db)); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen); +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq); +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out); +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq); +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq); + +void xsc_init_cq_table(struct xsc_core_device *dev); +void xsc_cleanup_cq_table(struct xsc_core_device *dev); +#endif /* XSC_CORE_CQ_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/device.h b/drivers/net/ethernet/yunsilicon/xsc/common/device.h new file mode 100644 index 0000000000000000000000000000000000000000..1d1b0be093798ad76fa963b92b02f11a7a3d15ee --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/device.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVICE_H +#define XSC_DEVICE_H + +#include +#include + +enum { + XSC_MAX_COMMANDS = 32, + XSC_CMD_DATA_BLOCK_SIZE = 512, + XSC_PCI_CMD_XPORT = 7, +}; + +enum { + XSC_PERM_LOCAL_READ = 1 << 0, + XSC_PERM_LOCAL_WRITE = 1 << 1, + XSC_PERM_REMOTE_READ = 1 << 2, + XSC_PERM_REMOTE_WRITE = 1 << 3, + XSC_PERM_ATOMIC = 1 << 6, + XSC_PERM_UMR_EN = 1 << 7, +}; + +enum { + XSC_ACCESS_MODE_PA = 0, + XSC_ACCESS_MODE_MTT = 1, + XSC_ACCESS_MODE_KLM = 2 +}; + +enum { + XSC_MKEY_REMOTE_INVAL = 1 << 24, + XSC_MKEY_FLAG_SYNC_UMR = 1 << 29, + XSC_MKEY_BSF_EN = 1 << 30, + XSC_MKEY_LEN64 = 1 << 31, +}; + +enum { + XSC_BF_REGS_PER_PAGE = 4, + XSC_MAX_UAR_PAGES = 1 << 8, + XSC_MAX_UUARS = XSC_MAX_UAR_PAGES * XSC_BF_REGS_PER_PAGE, +}; + +enum { + XSC_DEV_CAP_FLAG_RC = 1LL << 0, + XSC_DEV_CAP_FLAG_UC = 1LL << 1, + XSC_DEV_CAP_FLAG_UD = 1LL << 2, + XSC_DEV_CAP_FLAG_XRC = 1LL << 3, + XSC_DEV_CAP_FLAG_SRQ = 1LL << 6, + XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + XSC_DEV_CAP_FLAG_APM = 1LL << 17, + XSC_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + XSC_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + XSC_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + XSC_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + XSC_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + XSC_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + XSC_DEV_CAP_FLAG_DCT = 1LL << 41, + XSC_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + +enum xsc_event { + XSC_EVENT_TYPE_COMP = 0x0, + XSC_EVENT_TYPE_COMM_EST = 0x02,//mad + XSC_EVENT_TYPE_CQ_ERROR = 0x04, + XSC_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + XSC_EVENT_TYPE_INTERNAL_ERROR = 0x08,//tpe私有err,无IB event对应 + XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,//IBV_EVENT_QP_REQ_ERR + XSC_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,//IBV_EVENT_QP_ACCESS_ERR +}; + +struct xsc_cmd_prot_block { + u8 data[XSC_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //init to 0, dma user should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#define XSC_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum xsc_traffic_types { + XSC_TT_IPV4, + XSC_TT_IPV4_TCP, + XSC_TT_IPV4_UDP, + XSC_TT_IPV6, + XSC_TT_IPV6_TCP, + XSC_TT_IPV6_UDP, + XSC_TT_IPV4_IPSEC_AH, + XSC_TT_IPV6_IPSEC_AH, + XSC_TT_IPV4_IPSEC_ESP, + XSC_TT_IPV6_IPSEC_ESP, + XSC_TT_ANY, + XSC_NUM_TT, +}; + +#define XSC_NUM_INDIR_TIRS XSC_NUM_TT + +enum { + XSC_HASH_FUNC_XOR = 0, + XSC_HASH_FUNC_TOP = 1, + XSC_HASH_FUNC_TOP_SYM = 2, + XSC_HASH_FUNC_RSV = 3, +}; + +enum { + XSC_L3_PROT_TYPE_IPV4 = 1 << 0, + XSC_L3_PROT_TYPE_IPV6 = 1 << 1, +}; + +enum { + XSC_L4_PROT_TYPE_TCP = 1 << 0, + XSC_L4_PROT_TYPE_UDP = 1 << 1, +}; + +struct xsc_tirc_config { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +static inline u8 hash_func_type(u8 hash_func) +{ + switch (hash_func) { + case ETH_RSS_HASH_TOP: + return XSC_HASH_FUNC_TOP; + case ETH_RSS_HASH_XOR: + return XSC_HASH_FUNC_XOR; + default: + return XSC_HASH_FUNC_TOP; + } +} + +#endif /* XSC_DEVICE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9fdfb738d8f6947af63353f5f761090df77268 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DOORBELL_H +#define XSC_DOORBELL_H + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) +#define XSC_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define XSC_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) spinlock_t name +#define XSC_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define XSC_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32)val[0], dest); + __raw_writel((__force u32)val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* XSC_DOORBELL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h new file mode 100644 index 0000000000000000000000000000000000000000..03705978a85a68c84a6fa19a7d5ed466bae31efb --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DRIVER_H +#define XSC_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include "common/device.h" +#include "common/doorbell.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "common/qpts.h" + +#define LS_64(val, field) (((u64)(val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_64(val, field) ((u64)((val) & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) (((val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) (((val) & field ## _MASK) >> field ## _SHIFT) + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum { + XSC_MAX_FW_PORTS = 1, +}; + +enum { + XSC_MAX_IRQ_NAME = 32 +}; + +enum { + XSC_MAX_EQ_NAME = 20 +}; + +enum { + XSC_REG_PCAP = 0x5001, + XSC_REG_PMTU = 0x5003, + XSC_REG_PTYS = 0x5004, + XSC_REG_PAOS = 0x5006, + XSC_REG_PMAOS = 0x5012, + XSC_REG_PUDE = 0x5009, + XSC_REG_PMPE = 0x5010, + XSC_REG_PELC = 0x500e, + XSC_REG_PMLP = 0, /* TBD */ + XSC_REG_NODE_DESC = 0x6001, + XSC_REG_HOST_ENDIANNESS = 0x7004, + XSC_REG_MCIA = 0x9014, +}; + +enum dbg_rsc_type { + XSC_DBG_RSC_QP, + XSC_DBG_RSC_EQ, + XSC_DBG_RSC_CQ, +}; + +struct xsc_field_desc { + struct dentry *dent; + int i; +}; + +struct xsc_rsc_debug { + struct xsc_core_device *xdev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct xsc_field_desc fields[]; +}; + +struct xsc_buf_list { + void *buf; + dma_addr_t map; +}; + +struct xsc_buf { + struct xsc_buf_list direct; + struct xsc_buf_list *page_list; + int nbufs; + int npages; + int page_shift; + int size; +}; + +struct xsc_frag_buf { + struct xsc_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + +struct xsc_frag_buf_ctrl { + struct xsc_buf_list *frags; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + +struct xsc_cq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct xsc_eq { + struct xsc_core_device *dev; + struct xsc_cq_table cq_table; + u32 doorbell;//offset from bar0/2 space start + u32 cons_index; + struct xsc_buf buf; + int size; + unsigned int irqn; + u16 eqn; + int nent; + cpumask_var_t mask; + char name[XSC_MAX_EQ_NAME]; + struct list_head list; + int index; + struct xsc_rsc_debug *dbg; +}; + +struct xsc_core_mr { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; +}; + +struct xsc_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct xsc_eq pages_eq; + struct xsc_eq async_eq; + struct xsc_eq cmd_eq; + int num_comp_vectors; + int eq_vec_comp_base; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct xsc_irq_info { + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct counter_name_map { + int index; + const char *reg_name; +}; + +struct counter_reg_map { + int index; + int reg_addr; +}; + +struct xsc_dev_resource { + struct xsc_qp_table qp_table; + struct xsc_cq_table cq_table; + struct xsc_eq_table eq_table; + struct xsc_irq_info *irq_info; + spinlock_t mkey_lock; /* protect mkey */ + u8 mkey_key; + struct mutex alloc_mutex; /* protect buffer alocation according to numa node */ + int numa_node; + int fw_pages; + int reg_pages; + struct mutex pgdir_mutex; /* protect pgdir_list */ + struct list_head pgdir_list; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *qptrace_debugfs; + struct dentry *dbg_root; +}; + +struct xsc_db { + __be32 *db; + union { + struct xsc_db_pgdir *pgdir; + struct xsc_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + XSC_COMP_EQ_SIZE = 1024, +}; + +/*replace by struct define in ofed*/ +struct xsc_db_pgdir { + struct list_head list; + unsigned long *bitmap; + __be32 *db_page; + dma_addr_t db_dma; +}; + +static inline void *xsc_buf_offset(struct xsc_buf *buf, int offset) +{ + if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static inline struct xsc_core_device *pci2xdev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *xsc_debugfs_root; + +static inline void *xsc_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + +static inline void xsc_vfree(const void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +int xsc_dev_init(struct xsc_core_device *xdev); +void xsc_dev_cleanup(struct xsc_core_device *xdev); +int xsc_cmd_init(struct xsc_core_device *xdev); +void xsc_cmd_cleanup(struct xsc_core_device *xdev); +void xsc_cmd_use_events(struct xsc_core_device *xdev); +void xsc_cmd_use_polling(struct xsc_core_device *xdev); +int xsc_cmd_err_handler(struct xsc_core_device *xdev); +void xsc_cmd_resp_handler(struct xsc_core_device *xdev); +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr); +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size); +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf); +void xsc_buf_free(struct xsc_core_device *dev, struct xsc_buf *buf); +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen); +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr); +void xsc_reg_local_dma_mr(struct xsc_core_device *dev); +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn); +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); +int xsc_eq_init(struct xsc_core_device *dev); +void xsc_eq_cleanup(struct xsc_core_device *dev); +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int index); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages); +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages); +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type); +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn); +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type); +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name); +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq); +int xsc_start_eqs(struct xsc_core_device *dev); +void xsc_stop_eqs(struct xsc_core_device *dev); + +int xsc_qp_debugfs_init(struct xsc_core_device *dev); +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps); + +int xsc_debug_eq_add(struct xsc_core_device *xdev, struct xsc_eq *eq); +void xsc_debug_eq_remove(struct xsc_core_device *xdev, struct xsc_eq *eq); +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen); +int xsc_eq_debugfs_init(struct xsc_core_device *dev); +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_cq_debugfs_init(struct xsc_core_device *dev); +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev); + +const char *xsc_command_str(int command); +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev); +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev); + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev); +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev); + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node); +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node); +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db); +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf); + +static inline u32 xsc_mkey_to_idx(u32 mkey) +{ + return mkey >> ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +static inline u32 xsc_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +enum { + XSC_PROF_MASK_QP_SIZE = (u64)1 << 0, + XSC_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, + XSC_PROF_MASK_MR_CACHE = (u64)1 << 2, +}; + +#endif /* XSC_DRIVER_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/port.h b/drivers/net/ethernet/yunsilicon/xsc/common/port.h new file mode 100644 index 0000000000000000000000000000000000000000..a44af6c88c0678334898a963ed2bb1ca982edc0f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/port.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_PORT_H__ +#define __XSC_PORT_H__ + +enum xsc_module_id { + XSC_MODULE_ID_SFP = 0x3, + XSC_MODULE_ID_QSFP = 0xC, + XSC_MODULE_ID_QSFP_PLUS = 0xD, + XSC_MODULE_ID_QSFP28 = 0x11, + XSC_MODULE_ID_QSFP_DD = 0x18, + XSC_MODULE_ID_DSFP = 0x1B, + XSC_MODULE_ID_QSFP_PLUS_CMIS = 0x1E, +}; + +#define XSC_EEPROM_MAX_BYTES 32 +#define XSC_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define XSC_I2C_ADDR_LOW 0x50 +#define XSC_I2C_ADDR_HIGH 0x51 +#define XSC_EEPROM_PAGE_LENGTH 256 +#define XSC_EEPROM_HIGH_PAGE_LENGTH 128 + +struct xsc_module_eeprom_query_params { + u16 size; + u16 offset; + u16 i2c_address; + u32 page; + u32 bank; + u32 module_number; +}; + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data); +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h new file mode 100644 index 0000000000000000000000000000000000000000..fd3d6ee4a8dfe27ba6ae2999532acd9e8884529f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QP_H +#define XSC_QP_H + +#include "common/xsc_hsi.h" +#include "common/device.h" +#include "common/driver.h" + +enum { + XSC_QP_PM_MIGRATED = 0x3, + XSC_QP_PM_ARMED = 0x0, + XSC_QP_PM_REARM = 0x1 +}; + +enum { + XSC_WQE_CTRL_CQ_UPDATE = 2 << 2, + XSC_WQE_CTRL_SOLICITED = 1 << 1, +}; + +struct xsc_send_wqe_ctrl_seg { + __le32 msg_opcode:8; + __le32 with_immdt:1; + __le32 csum_en:2; + __le32 ds_data_num:5; + __le32 wqe_id:16; + __le32 msg_len; + union { + __le32 opcode_data; + struct { + u8 has_pph:1; + u8 so_type:1; + __le16 so_data_size:14; + u8:8; + u8 so_hdr_len:8; + }; + struct { + __le16 desc_id; + __le16 is_last_wqe:1; + __le16 dst_qp_id:15; + }; + }; + __le32 se:1; + __le32 ce:1; + __le32:30; +}; + +struct xsc_wqe_data_seg { + union { + __le32 in_line:1; + struct { + __le32:1; + __le32 seg_len:31; + __le32 mkey; + __le64 va; + }; + struct { + __le32:1; + __le32 len:7; + u8 in_line_data[15]; + }; + }; +}; + +struct xsc_wqe_ctrl_seg_2 { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +struct xsc_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; + u8 reserved0[4]; + u8 rmac[6]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct xsc_wqe_data_seg_2 { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct xsc_core_qp { + void (*event)(struct xsc_core_qp *qp, int type); + int qpn; + atomic_t refcount; + struct completion free; + struct xsc_rsc_debug *dbg; + int pid; + u16 qp_type; + u16 eth_queue_type; + struct dentry *trace; + struct xsc_qp_trace *trace_info; + u16 qp_type_internal; + u16 grp_id; + u8 mac_id; +}; + +struct xsc_qp_rsc { + struct list_head node; + u32 qpn; + struct completion delayed_release; + struct xsc_core_device *xdev; +}; + +struct xsc_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; + u8 dmac[6]; + u8 smac[6]; + __be16 af_type; + __be32 sip[4]; + __be32 dip[4]; + __be16 sport; + u8 ecn_dscp; + u8 vlan_valid; + __be16 vlan_id; + u8 dci_cfi_prio_sl; //not left moved yet. +}; + +static inline struct xsc_core_qp *__xsc_qp_lookup(struct xsc_core_device *xdev, u32 qpn) +{ + return radix_tree_lookup(&xdev->dev_res->qp_table.tree, qpn); +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen); +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp); +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen); + +void xsc_init_qp_table(struct xsc_core_device *xdev); +void xsc_cleanup_qp_table(struct xsc_core_device *xdev); +int xsc_debug_qp_add(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_debug_qp_remove(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +int xsc_create_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_remove_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +void xsc_init_delayed_release(void); +void xsc_stop_delayed_release(void); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status); + +#endif /* XSC_QP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h new file mode 100644 index 0000000000000000000000000000000000000000..57eb829f811b1dd8b4e41f8daf2545caf3565780 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __QPTS_H__ +#define __QPTS_H__ + +struct __packed xsc_qp_trace { + u16 main_ver; + u16 sub_ver; + u32 pid; + u16 qp_type; + u16 af_type; + union { + u32 s_addr4; + u8 s_addr6[16]; + } s_addr; + union { + u32 d_addr4; + u8 d_addr6[16]; + } d_addr; + u16 s_port; + u16 d_port; + u32 affinity_idx; + u64 timestamp; + u32 lqpn; + u32 rqpn; +}; + +struct __packed qpt_update_affinity { + u32 aff_new; + u32 aff_old; +}; + +struct __packed qpt_update_sport { + u16 port_new; + u16 port_old; +}; + +struct __packed qpt_update_data { + u64 timestamp; + u32 qpn; + u32 bus; + u32 dev; + u32 fun; + union { + struct qpt_update_affinity affinity; + struct qpt_update_sport sport; + } update; +}; + +struct __packed xsc_qpt_update_msg { + u16 main_ver; + u16 sub_ver; + u32 type; //0:UPDATE_TYPE_SPORT; 1:UPDATE_TYPE_AFFINITY + struct qpt_update_data data; +}; + +enum { + YS_QPTRACE_UPDATE_TYPE_SPORT = 0, + YS_QPTRACE_UPDATE_TYPE_AFFINITY, +}; + +#define YS_QPTRACE_VER_MAJOR 2 +#define YS_QPTRACE_VER_MINOR 0 + +int qpts_init(void); +void qpts_fini(void); +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..d259d69f2211023474fb837100032f10eeb7437d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef RES_OBJ_H +#define RES_OBJ_H + +#include +#include +#include "common/xsc_core.h" + +struct xsc_res_obj { + struct list_head node; + struct xsc_bdf_file *file; + void (*release_method)(void *obj); + char *data; + unsigned int datalen; +}; + +struct xsc_pd_obj { + struct xsc_res_obj obj; + unsigned int pdn; +}; + +struct xsc_mr_obj { + struct xsc_res_obj obj; + unsigned int mkey; +}; + +struct xsc_cq_obj { + struct xsc_res_obj obj; + unsigned int cqn; +}; + +struct xsc_qp_obj { + struct xsc_res_obj obj; + unsigned int qpn; +}; + +struct xsc_pct_obj { + struct xsc_res_obj obj; + unsigned int pct_idx; +}; + +struct xsc_wct_obj { + struct xsc_res_obj obj; + unsigned int wct_idx; +}; + +struct xsc_em_obj { + struct xsc_res_obj obj; + unsigned int em_idx[54]; +}; + +struct xsc_flow_pct_v4_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v4_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +enum RES_OBJ_TYPE { + RES_OBJ_PD, + RES_OBJ_MR, + RES_OBJ_CQ, + RES_OBJ_QP, + RES_OBJ_PCT, + RES_OBJ_WCT, + RES_OBJ_EM, + RES_OBJ_MAX +}; + +static inline unsigned long xsc_idx_to_key(unsigned int obj_type, unsigned int idx) +{ + return ((unsigned long)obj_type << 32) | idx; +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, unsigned int pdn, + char *data, unsigned int datalen); +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn); + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, unsigned int mkey, + char *data, unsigned int datalen); +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey); + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen); +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn); + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen); +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn); + +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen); +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority); + +void xsc_close_bdf_file(struct xsc_bdf_file *file); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/version.h b/drivers/net/ethernet/yunsilicon/xsc/common/version.h new file mode 100644 index 0000000000000000000000000000000000000000..8c7c6e03f5a147afb57981cadda7736cc24b2c5b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/version.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#define BRANCH_VERSION 1 +#define MAJOR_VERSION 2 +#define MINOR_VERSION 0 +#define BUILD_VERSION 367 +#define HOTFIX_NUM 446 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h new file mode 100644 index 0000000000000000000000000000000000000000..dad39f12e26590d87560b770da1335db472df7e2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_VPORT_H +#define XSC_VPORT_H + +#include "common/xsc_core.h" +#include +#include "common/xsc_fs.h" + +#define XSC_VPORT_PF_PLACEHOLDER (1u) +#define XSC_VPORT_UPLINK_PLACEHOLDER (1u) +#define XSC_VPORT_ECPF_PLACEHOLDER(dev) (xsc_ecpf_vport_exists(dev) || \ + xsc_core_is_ecpf_esw_manager(dev)) + +#define XSC_SPECIAL_VPORTS(dev) (XSC_VPORT_PF_PLACEHOLDER + \ + XSC_VPORT_UPLINK_PLACEHOLDER + \ + XSC_VPORT_ECPF_PLACEHOLDER(dev)) + +#define XSC_VPORT_MANAGER(dev) (xsc_core_is_vport_manager(dev)) + +enum { + XSC_CAP_INLINE_MODE_L2, + XSC_CAP_INLINE_MODE_VPORT_CONTEXT, + XSC_CAP_INLINE_MODE_NOT_REQUIRED, +}; + +/* Vport number for each function must keep unchanged */ +enum { + XSC_VPORT_PF = 0x0, + XSC_VPORT_FIRST_VF = 0x1, + XSC_VPORT_ECPF = 0xfffe, + XSC_VPORT_UPLINK = 0xffff, +}; + +enum { + XSC_VPORT_ADMIN_STATE_DOWN = 0x0, + XSC_VPORT_ADMIN_STATE_UP = 0x1, + XSC_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport); +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state); +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline); +void xsc_query_min_inline(struct xsc_core_device *dev, u8 *min_inline); +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline); +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu); +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu); +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid); +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid); +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr); +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep); +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid); +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast); +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmcast_flag, bool promisc_flag, + int allmcast, int promisc); +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk); +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust); +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans); +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add); +int xsc_query_vport_down_stats(struct xsc_core_device *dev, u16 vport, + u8 other_vport, u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz); +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req); +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate); + +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev); +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen); +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other); +#endif /* XSC_VPORT_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..4864cb747cdea43b8f3a7f9b417dd022a1b7728b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Thu Feb 29 15:33:50 CST 2024 + */ + +#ifndef XSC_HW_H +#define XSC_HW_H + +//hif_irq_csr_defines.h +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100070 + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 262144 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +//clsf_dma_csr_defines.h +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010048 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa60100d0 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa60100c0 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa60100d4 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010020 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010024 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010028 + +//hif_tbl_csr_defines.h +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa11000b4 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1100114 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000f0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000ec +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000f8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000fc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa1100100 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000e8 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5d0e6c8c78dfcc0b8f5a796cd744eda2a0b855 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -0,0 +1,2513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CMD_H +#define XSC_CMD_H + +#define CMDQ_VERSION 0x32 + +#define MAX_MBOX_OUT_LEN 2048 + +#define QOS_PRIO_MAX 7 +#define QOS_DSCP_MAX 63 +#define MAC_PORT_DSCP_SHIFT 6 +#define QOS_PCP_MAX 7 +#define DSCP_PCP_UNSET 255 +#define MAC_PORT_PCP_SHIFT 3 +#define XSC_MAX_MAC_NUM 8 +#define XSC_BOARD_SN_LEN 32 +#define MAX_PKT_LEN 9800 +#define XSC_RTT_CFG_QPN_MAX 32 + +#define XSC_PCIE_LAT_CFG_INTERVAL_MAX 8 +#define XSC_PCIE_LAT_CFG_HISTOGRAM_MAX 9 +#define XSC_PCIE_LAT_EN_DISABLE 0 +#define XSC_PCIE_LAT_EN_ENABLE 1 +#define XSC_PCIE_LAT_PERIOD_MIN 1 +#define XSC_PCIE_LAT_PERIOD_MAX 20 +#define DPU_PORT_WGHT_CFG_MAX 1 + +enum { + XSC_CMD_STAT_OK = 0x0, + XSC_CMD_STAT_INT_ERR = 0x1, + XSC_CMD_STAT_BAD_OP_ERR = 0x2, + XSC_CMD_STAT_BAD_PARAM_ERR = 0x3, + XSC_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + XSC_CMD_STAT_BAD_RES_ERR = 0x5, + XSC_CMD_STAT_RES_BUSY = 0x6, + XSC_CMD_STAT_LIM_ERR = 0x8, + XSC_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + XSC_CMD_STAT_IX_ERR = 0xa, + XSC_CMD_STAT_NO_RES_ERR = 0xf, + XSC_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + XSC_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + XSC_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + XSC_CMD_STAT_BAD_PKT_ERR = 0x30, + XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +enum { + DPU_PORT_WGHT_TARGET_HOST, + DPU_PORT_WGHT_TARGET_SOC, + DPU_PORT_WGHT_TARGET_NUM, +}; + +enum { + DPU_PRIO_WGHT_TARGET_HOST2SOC, + DPU_PRIO_WGHT_TARGET_SOC2HOST, + DPU_PRIO_WGHT_TARGET_HOSTSOC2LAG, + DPU_PRIO_WGHT_TARGET_NUM, +}; + +#define XSC_AP_FEAT_UDP_SPORT_MIN 1024 +#define XSC_AP_FEAT_UDP_SPORT_MAX 65535 + +enum { + XSC_CMD_OP_QUERY_HCA_CAP = 0x100, + XSC_CMD_OP_QUERY_ADAPTER = 0x101, + XSC_CMD_OP_INIT_HCA = 0x102, + XSC_CMD_OP_TEARDOWN_HCA = 0x103, + XSC_CMD_OP_ENABLE_HCA = 0x104, + XSC_CMD_OP_DISABLE_HCA = 0x105, + XSC_CMD_OP_MODIFY_HCA = 0x106, + XSC_CMD_OP_QUERY_PAGES = 0x107, + XSC_CMD_OP_MANAGE_PAGES = 0x108, + XSC_CMD_OP_SET_HCA_CAP = 0x109, + XSC_CMD_OP_QUERY_CMDQ_VERSION = 0x10a, + XSC_CMD_OP_QUERY_MSIX_TBL_INFO = 0x10b, + XSC_CMD_OP_FUNCTION_RESET = 0x10c, + XSC_CMD_OP_DUMMY = 0x10d, + XSC_CMD_OP_SET_DEBUG_INFO = 0x10e, + XSC_CMD_OP_QUERY_PSV_FUNCID = 0x10f, + XSC_CMD_OP_ALLOC_IA_LOCK = 0x110, + XSC_CMD_OP_RELEASE_IA_LOCK = 0x111, + XSC_CMD_OP_ENABLE_RELAXED_ORDER = 0x112, + XSC_CMD_OP_QUERY_GUID = 0x113, + XSC_CMD_OP_ACTIVATE_HW_CONFIG = 0x114, + + XSC_CMD_OP_CREATE_MKEY = 0x200, + XSC_CMD_OP_QUERY_MKEY = 0x201, + XSC_CMD_OP_DESTROY_MKEY = 0x202, + XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + XSC_CMD_OP_REG_MR = 0x204, + XSC_CMD_OP_DEREG_MR = 0x205, + XSC_CMD_OP_SET_MPT = 0x206, + XSC_CMD_OP_SET_MTT = 0x207, + + XSC_CMD_OP_CREATE_EQ = 0x301, + XSC_CMD_OP_DESTROY_EQ = 0x302, + XSC_CMD_OP_QUERY_EQ = 0x303, + + XSC_CMD_OP_CREATE_CQ = 0x400, + XSC_CMD_OP_DESTROY_CQ = 0x401, + XSC_CMD_OP_QUERY_CQ = 0x402, + XSC_CMD_OP_MODIFY_CQ = 0x403, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ = 0x404, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ = 0x405, + + XSC_CMD_OP_CREATE_QP = 0x500, + XSC_CMD_OP_DESTROY_QP = 0x501, + XSC_CMD_OP_RST2INIT_QP = 0x502, + XSC_CMD_OP_INIT2RTR_QP = 0x503, + XSC_CMD_OP_RTR2RTS_QP = 0x504, + XSC_CMD_OP_RTS2RTS_QP = 0x505, + XSC_CMD_OP_SQERR2RTS_QP = 0x506, + XSC_CMD_OP_2ERR_QP = 0x507, + XSC_CMD_OP_RTS2SQD_QP = 0x508, + XSC_CMD_OP_SQD2RTS_QP = 0x509, + XSC_CMD_OP_2RST_QP = 0x50a, + XSC_CMD_OP_QUERY_QP = 0x50b, + XSC_CMD_OP_CONF_SQP = 0x50c, + XSC_CMD_OP_MAD_IFC = 0x50d, + XSC_CMD_OP_INIT2INIT_QP = 0x50e, + XSC_CMD_OP_SUSPEND_QP = 0x50f, + XSC_CMD_OP_UNSUSPEND_QP = 0x510, + XSC_CMD_OP_SQD2SQD_QP = 0x511, + XSC_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + XSC_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + XSC_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + XSC_CMD_OP_CREATE_MULTI_QP = 0x515, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ = 0x516, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ = 0x517, + XSC_CMD_OP_QUERY_QP_FLUSH_STATUS = 0x518, + + XSC_CMD_OP_CREATE_PSV = 0x600, + XSC_CMD_OP_DESTROY_PSV = 0x601, + XSC_CMD_OP_QUERY_PSV = 0x602, + XSC_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + XSC_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + XSC_CMD_OP_CREATE_SRQ = 0x700, + XSC_CMD_OP_DESTROY_SRQ = 0x701, + XSC_CMD_OP_QUERY_SRQ = 0x702, + XSC_CMD_OP_ARM_RQ = 0x703, + XSC_CMD_OP_RESIZE_SRQ = 0x704, + + XSC_CMD_OP_ALLOC_PD = 0x800, + XSC_CMD_OP_DEALLOC_PD = 0x801, + XSC_CMD_OP_ALLOC_UAR = 0x802, + XSC_CMD_OP_DEALLOC_UAR = 0x803, + + XSC_CMD_OP_ATTACH_TO_MCG = 0x806, + XSC_CMD_OP_DETACH_FROM_MCG = 0x807, + + XSC_CMD_OP_ALLOC_XRCD = 0x80e, + XSC_CMD_OP_DEALLOC_XRCD = 0x80f, + + XSC_CMD_OP_ACCESS_REG = 0x805, + + XSC_CMD_OP_MODIFY_RAW_QP = 0x81f, + + XSC_CMD_OP_ENABLE_NIC_HCA = 0x810, + XSC_CMD_OP_DISABLE_NIC_HCA = 0x811, + XSC_CMD_OP_MODIFY_NIC_HCA = 0x812, + + XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x820, + XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x821, + XSC_CMD_OP_QUERY_VPORT_STATE = 0x822, + XSC_CMD_OP_MODIFY_VPORT_STATE = 0x823, + XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x824, + XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x825, + XSC_CMD_OP_QUERY_HCA_VPORT_GID = 0x826, + XSC_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x827, + XSC_CMD_OP_QUERY_VPORT_COUNTER = 0x828, + XSC_CMD_OP_QUERY_PRIO_STATS = 0x829, + XSC_CMD_OP_QUERY_PHYPORT_STATE = 0x830, + XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + XSC_CMD_OP_QUERY_LINK_INFO = 0x832, + XSC_CMD_OP_QUERY_PFC_PRIO_STATS = 0x833, + XSC_CMD_OP_MODIFY_LINK_INFO = 0x834, + XSC_CMD_OP_QUERY_FEC_PARAM = 0x835, + XSC_CMD_OP_MODIFY_FEC_PARAM = 0x836, + + XSC_CMD_OP_LAG_CREATE = 0x840, + XSC_CMD_OP_LAG_ADD_MEMBER = 0x841, + XSC_CMD_OP_LAG_REMOVE_MEMBER = 0x842, + XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS = 0x843, + XSC_CMD_OP_LAG_UPDATE_HASH_TYPE = 0x844, + XSC_CMD_OP_LAG_DESTROY = 0x845, + + XSC_CMD_OP_LAG_SET_QOS = 0x848, + XSC_CMD_OP_ENABLE_MSIX = 0x850, + + XSC_CMD_OP_IOCTL_FLOW = 0x900, + XSC_CMD_OP_IOCTL_OTHER = 0x901, + + XSC_CMD_OP_IOCTL_SET_DSCP_PMT = 0x1000, + XSC_CMD_OP_IOCTL_GET_DSCP_PMT = 0x1001, + XSC_CMD_OP_IOCTL_SET_TRUST_MODE = 0x1002, + XSC_CMD_OP_IOCTL_GET_TRUST_MODE = 0x1003, + XSC_CMD_OP_IOCTL_SET_PCP_PMT = 0x1004, + XSC_CMD_OP_IOCTL_GET_PCP_PMT = 0x1005, + XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI = 0x1006, + XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI = 0x1007, + XSC_CMD_OP_IOCTL_SET_PFC = 0x1008, + XSC_CMD_OP_IOCTL_GET_PFC = 0x1009, + XSC_CMD_OP_IOCTL_SET_RATE_LIMIT = 0x100a, + XSC_CMD_OP_IOCTL_GET_RATE_LIMIT = 0x100b, + XSC_CMD_OP_IOCTL_SET_SP = 0x100c, + XSC_CMD_OP_IOCTL_GET_SP = 0x100d, + XSC_CMD_OP_IOCTL_SET_WEIGHT = 0x100e, + XSC_CMD_OP_IOCTL_GET_WEIGHT = 0x100f, + XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT = 0x1010, + XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT = 0x1011, + XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT = 0x1012, + XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT = 0x1013, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN = 0x1014, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN = 0x1015, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD = 0x1016, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD = 0x1017, + XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH = 0x1018, + XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS = 0x1019, + + XSC_CMD_OP_IOCTL_SET_ENABLE_RP = 0x1030, + XSC_CMD_OP_IOCTL_SET_ENABLE_NP = 0x1031, + XSC_CMD_OP_IOCTL_SET_INIT_ALPHA = 0x1032, + XSC_CMD_OP_IOCTL_SET_G = 0x1033, + XSC_CMD_OP_IOCTL_SET_AI = 0x1034, + XSC_CMD_OP_IOCTL_SET_HAI = 0x1035, + XSC_CMD_OP_IOCTL_SET_TH = 0x1036, + XSC_CMD_OP_IOCTL_SET_BC_TH = 0x1037, + XSC_CMD_OP_IOCTL_SET_CNP_OPCODE = 0x1038, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_B = 0x1039, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_F = 0x103a, + XSC_CMD_OP_IOCTL_SET_CNP_ECN = 0x103b, + XSC_CMD_OP_IOCTL_SET_DATA_ECN = 0x103c, + XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL = 0x103d, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME = 0x103e, + XSC_CMD_OP_IOCTL_SET_CNP_DSCP = 0x103f, + XSC_CMD_OP_IOCTL_SET_CNP_PCP = 0x1040, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA = 0x1041, + XSC_CMD_OP_IOCTL_GET_CC_CFG = 0x1042, + XSC_CMD_OP_IOCTL_GET_CC_STAT = 0x104b, + XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE = 0x1052, + XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR = 0x1053, + XSC_CMD_OP_IOCTL_SET_SCALE = 0x1054, + + XSC_CMD_OP_IOCTL_SET_HWC = 0x1060, + XSC_CMD_OP_IOCTL_GET_HWC = 0x1061, + + XSC_CMD_OP_SET_MTU = 0x1100, + XSC_CMD_OP_QUERY_ETH_MAC = 0X1101, + + XSC_CMD_OP_QUERY_HW_STATS = 0X1200, + XSC_CMD_OP_QUERY_PAUSE_CNT = 0X1201, + XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS = 0x1202, + XSC_CMD_OP_QUERY_HW_STATS_RDMA = 0X1203, + XSC_CMD_OP_QUERY_HW_STATS_ETH = 0X1204, + XSC_CMD_OP_QUERY_HW_GLOBAL_STATS = 0X1210, + + XSC_CMD_OP_SET_RTT_EN = 0X1220, + XSC_CMD_OP_GET_RTT_EN = 0X1221, + XSC_CMD_OP_SET_RTT_QPN = 0X1222, + XSC_CMD_OP_GET_RTT_QPN = 0X1223, + XSC_CMD_OP_SET_RTT_PERIOD = 0X1224, + XSC_CMD_OP_GET_RTT_PERIOD = 0X1225, + XSC_CMD_OP_GET_RTT_RESULT = 0X1226, + XSC_CMD_OP_GET_RTT_STATS = 0X1227, + + XSC_CMD_OP_SET_LED_STATUS = 0X1228, + + XSC_CMD_OP_AP_FEAT = 0x1400, + XSC_CMD_OP_PCIE_LAT_FEAT = 0x1401, + + XSC_CMD_OP_GET_LLDP_STATUS = 0x1500, + XSC_CMD_OP_SET_LLDP_STATUS = 0x1501, + + XSC_CMD_OP_SET_VPORT_RATE_LIMIT = 0x1600, + + XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801, + XSC_CMD_OP_USER_EMU_CMD = 0x8000, + + XSC_CMD_OP_MAX +}; + +enum { + XSC_CMD_EVENT_RESP_CHANGE_LINK = 0x0001, + XSC_CMD_EVENT_RESP_TEMP_WARN = 0x0002, + XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION = 0x0004, +}; + +enum xsc_eth_qp_num_sel { + XSC_ETH_QP_NUM_8K_SEL = 0, + XSC_ETH_QP_NUM_8K_8TC_SEL, + XSC_ETH_QP_NUM_SEL_MAX, +}; + +enum xsc_eth_vf_num_sel { + XSC_ETH_VF_NUM_SEL_8 = 0, + XSC_ETH_VF_NUM_SEL_16, + XSC_ETH_VF_NUM_SEL_32, + XSC_ETH_VF_NUM_SEL_64, + XSC_ETH_VF_NUM_SEL_128, + XSC_ETH_VF_NUM_SEL_256, + XSC_ETH_VF_NUM_SEL_512, + XSC_ETH_VF_NUM_SEL_1024, + XSC_ETH_VF_NUM_SEL_MAX +}; + +enum { + LINKSPEED_MODE_UNKNOWN = -1, + LINKSPEED_MODE_10G = 10000, + LINKSPEED_MODE_25G = 25000, + LINKSPEED_MODE_40G = 40000, + LINKSPEED_MODE_50G = 50000, + LINKSPEED_MODE_100G = 100000, + LINKSPEED_MODE_200G = 200000, + LINKSPEED_MODE_400G = 400000, +}; + +enum { + MODULE_SPEED_UNKNOWN, + MODULE_SPEED_10G, + MODULE_SPEED_25G, + MODULE_SPEED_40G_R4, + MODULE_SPEED_50G_R, + MODULE_SPEED_50G_R2, + MODULE_SPEED_100G_R2, + MODULE_SPEED_100G_R4, + MODULE_SPEED_200G_R4, + MODULE_SPEED_200G_R8, + MODULE_SPEED_400G_R8, +}; + +enum xsc_dma_direct { + DMA_DIR_TO_MAC, + DMA_DIR_READ, + DMA_DIR_WRITE, + DMA_DIR_LOOPBACK, + DMA_DIR_MAX, +}; + +/* hw feature bitmap, 32bit */ +enum xsc_hw_feature_flag { + XSC_HW_RDMA_SUPPORT = 0x1, + XSC_HW_PFC_PRIO_STATISTIC_SUPPORT = 0x2, + XSC_HW_THIRD_FEATURE = 0x4, + XSC_HW_PFC_STALL_STATS_SUPPORT = 0x8, + XSC_HW_RDMA_CM_SUPPORT = 0x20, + + XSC_HW_LAST_FEATURE = 0x80000000, +}; + +enum xsc_lldp_dcbx_sub_cmd { + XSC_OS_HANDLE_LLDP_STATUS = 0x1, + XSC_DCBX_STATUS +}; + +struct xsc_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 ver; +}; + +struct xsc_outbox_hdr { + u8 status; + u8 rsvd[5]; + __be16 ver; +}; + +struct xsc_alloc_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_num; + u8 rsvd[7]; +}; + +#define XSC_RES_NUM_IAE_GRP 16 + +struct xsc_alloc_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_pci_driver_init_params_in { + struct xsc_inbox_hdr hdr; + __be32 s_wqe_mode; + __be32 r_wqe_mode; + __be32 local_timeout_retrans; + u8 mac_lossless_prio[XSC_MAX_MAC_NUM]; + __be32 group_mod; +}; + +struct xsc_pci_driver_init_params_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*CQ mbox*/ +struct xsc_cq_context { + __be16 eqn; + __be16 pa_num; + __be16 glb_func_id; + u8 log_cq_sz; + u8 cq_type; +}; + +struct xsc_create_cq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_cq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*QP mbox*/ +struct xsc_create_qp_request { + __be16 input_qpn; + __be16 pa_num; + u8 qp_type; + u8 log_sq_sz; + u8 log_rq_sz; + u8 dma_direct;//0 for dma read, 1 for dma write + __be32 pdn; + __be16 cqn_send; + __be16 cqn_recv; + __be16 glb_funcid; + /*rsvd,rename logic_port used to transfer logical_port to fw*/ + u8 rsvd[2]; + __be64 pas[]; +}; + +struct xsc_create_qp_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_qp_request req; +}; + +struct xsc_create_qp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_qp_flush_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; +}; + +struct xsc_query_qp_flush_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_qp_context { + __be32 remote_qpn; + __be32 cqn_send; + __be32 cqn_recv; + __be32 next_send_psn; + __be32 next_recv_psn; + __be32 pdn; + __be16 src_udp_port; + __be16 path_id; + u8 mtu_mode; + u8 lag_sel; + u8 lag_sel_en; + u8 retry_cnt; + u8 rnr_retry; + u8 dscp; + u8 state; + u8 hop_limit; + u8 dmac[6]; + u8 smac[6]; + __be32 dip[4]; + __be32 sip[4]; + __be16 ip_type; + __be16 grp_id; + u8 vlan_valid; + u8 dci_cfi_prio_sl; + __be16 vlan_id; + u8 qp_out_port; + u8 pcie_no; + __be16 lag_id; + __be16 func_id; + __be16 rsvd; +}; + +struct xsc_query_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_query_qp_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_qp_context ctx; +}; + +struct xsc_modify_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + struct xsc_qp_context ctx; + u8 no_need_wait; +}; + +struct xsc_modify_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_multiqp_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_num; + u8 qp_type; + u8 rsvd; + __be32 req_len; + u8 data[]; +}; + +struct xsc_create_multiqp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn_base; +}; + +struct xsc_alloc_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 rsvd; + __be32 rsvd2; +}; + +struct xsc_alloc_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qnum_base; + __be32 pa_list_base; + __be32 rsvd; +}; + +struct xsc_release_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 qnum_base; + __be32 pa_list_base; +}; + +struct xsc_release_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 rsvd; + __be32 rsvd2; + __be32 rsvd3; +}; + +/* MSIX TABLE mbox */ +struct xsc_msix_table_info_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 index; + u8 rsvd[6]; +}; + +struct xsc_msix_table_info_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 addr_lo; + __be32 addr_hi; + __be32 data; +}; + +/*EQ mbox*/ +struct xsc_eq_context { + __be16 vecidx; + __be16 pa_num; + u8 log_eq_sz; + __be16 glb_func_id; + u8 is_async_eq; + u8 rsvd; +}; + +struct xsc_create_eq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_eq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_eq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_eq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; + +}; + +struct xsc_destroy_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*PD mbox*/ +struct xsc_alloc_pd_request { + u8 rsvd[8]; +}; + +struct xsc_alloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_alloc_pd_request req; +}; + +struct xsc_alloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct xsc_dealloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; + +}; + +struct xsc_dealloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*MR mbox*/ +struct xsc_register_mr_request { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; + __be64 pas[]; +}; + +struct xsc_register_mr_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_register_mr_request req; +}; + +struct xsc_register_mr_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_mpt_item { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd[5]; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; +}; + +struct xsc_set_mpt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mpt_item mpt_item; +}; + +struct xsc_set_mpt_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mtt_base; + u8 rsvd[4]; +}; + +struct xsc_mtt_setting { + __be32 mtt_base; + __be32 pa_num; + __be64 pas[]; +}; + +struct xsc_set_mtt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mtt_setting mtt_setting; +}; + +struct xsc_set_mtt_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[4]; +}; + +struct xsc_create_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd; +}; + +struct xsc_access_reg_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 register_id; + __be32 arg; + __be32 data[]; +}; + +struct xsc_access_reg_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + __be32 data[]; +}; + +struct xsc_mad_ifc_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 data[256]; +}; + +struct xsc_mad_ifc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[256]; +}; + +struct xsc_query_eq_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct xsc_query_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + struct xsc_eq_context ctx; +}; + +struct xsc_query_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct xsc_query_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 cmdq_ver; + u8 rsvd[6]; +}; + +struct xsc_cmd_dummy_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_dummy_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_fw_version { + u8 fw_version_major; + u8 fw_version_minor; + __be16 fw_version_patch; + __be32 fw_version_tweak; + u8 fw_version_extra_flag; + u8 rsvd[7]; +}; + +struct xsc_hca_cap { + u8 rsvd1[12]; + u8 send_seg_num; + u8 send_wqe_shift; + u8 recv_seg_num; + u8 recv_wqe_shift; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 log_max_mtt; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 log_max_tso; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 log_max_msix; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 log_max_qp_depth; + u8 log_max_ra_res_qp; + __be16 max_vfs; + __be16 raweth_qp_id_end; + __be16 raw_tpe_qp_num; + __be16 max_qp_count; + __be16 raweth_qp_id_base; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 max_num_eqs; + u8 num_ports; + u8 log_max_msg; + u8 mac_port; + __be16 raweth_rss_qp_id_base; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + __be16 msix_base; + __be16 msix_num; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[8]; + __be32 hw_feature_flag;/*enum xsc_hw_feature_flag*/ + __be16 pf0_vf_funcid_base; + __be16 pf0_vf_funcid_top; + __be16 pf1_vf_funcid_base; + __be16 pf1_vf_funcid_top; + __be16 pcie0_pf_funcid_base; + __be16 pcie0_pf_funcid_top; + __be16 pcie1_pf_funcid_base; + __be16 pcie1_pf_funcid_top; + u8 log_msx_atomic_size_qp; + u8 pcie_host; + u8 rsvd28; + u8 log_msx_atomic_size_dc; + u8 board_sn[XSC_BOARD_SN_LEN]; + u8 max_tc; + u8 mac_bit; + __be16 funcid_to_logic_port; + u8 rsvd29[6]; + u8 nif_port_num; + u8 reg_mr_via_cmdq; + __be32 hca_core_clock; + __be32 max_rwq_indirection_tables;/*rss_caps*/ + __be32 max_rwq_indirection_table_size;/*rss_caps*/ + __be32 chip_ver_h; + __be32 chip_ver_m; + __be32 chip_ver_l; + __be32 hotfix_num; + __be32 feature_flag; + __be32 rx_pkt_len_max; + __be32 glb_func_id; + __be64 tx_db; + __be64 rx_db; + __be64 complete_db; + __be64 complete_reg; + __be64 event_db; + __be32 qp_rate_limit_min; + __be32 qp_rate_limit_max; + struct xsc_fw_version fw_ver; + u8 lag_logic_port_ofst; +}; + +struct xsc_cmd_query_hca_cap_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 cpu_num; + u8 rsvd[6]; +}; + +struct xsc_cmd_query_hca_cap_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_hca_cap hca_cap; +}; + +struct xsc_cmd_enable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + __be16 max_msix_vec; + __be16 cpu_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_enable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_disable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_disable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_modify_hca_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pp_bypass; + u8 esw_mode; + u8 rsvd0[6]; +}; + +struct xsc_cmd_modify_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_query_special_ctxs_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_special_ctxs_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 dump_fill_mkey; + __be32 reserved_lkey; +}; + +/* vport mbox */ +struct xsc_nic_vport_context { + __be32 min_wqe_inline_mode:3; + __be32 disable_mc_local_lb:1; + __be32 disable_uc_local_lb:1; + __be32 roce_en:1; + + __be32 arm_change_event:1; + __be32 event_on_mtu:1; + __be32 event_on_promisc_change:1; + __be32 event_on_vlan_change:1; + __be32 event_on_mc_address_change:1; + __be32 event_on_uc_address_change:1; + __be32 affiliation_criteria:4; + __be32 affiliated_vhca_id; + + __be16 mtu; + + __be64 system_image_guid; + __be64 port_guid; + __be64 node_guid; + + __be32 qkey_violation_counter; + + __be16 spoofchk:1; + __be16 trust:1; + __be16 promisc:1; + __be16 allmcast:1; + __be16 vlan_allowed:1; + __be16 allowed_list_type:3; + __be16 allowed_list_size:10; + + __be16 vlan_proto; + __be16 vlan; + u8 qos; + u8 permanent_address[6]; + u8 current_address[6]; + u8 current_uc_mac_address[0][2]; +}; + +enum { + XSC_HCA_VPORT_SEL_PORT_GUID = 1 << 0, + XSC_HCA_VPORT_SEL_NODE_GUID = 1 << 1, + XSC_HCA_VPORT_SEL_STATE_POLICY = 1 << 2, +}; + +struct xsc_hca_vport_context { + u32 field_select; + u32 port_physical_state:4; + u32 vport_state_policy:4; + u32 port_state:4; + u32 vport_state:4; + u32 rcvd0:16; + + u64 system_image_guid; + u64 port_guid; + u64 node_guid; + + u16 qkey_violation_counter; + u16 pkey_violation_counter; +}; + +struct xsc_query_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 allowed_list_type:3; + u32 rsvd:12; +}; + +struct xsc_modify_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + __be16 outer_vlan_id; + u8 rsvd[2]; +}; + +struct xsc_modify_nic_vport_field_select { + __be32 affiliation:1; + __be32 disable_uc_local_lb:1; + __be32 disable_mc_local_lb:1; + __be32 node_guid:1; + __be32 port_guid:1; + __be32 min_inline:1; + __be32 mtu:1; + __be32 change_event:1; + __be32 promisc:1; + __be32 allmcast:1; + __be32 permanent_address:1; + __be32 current_address:1; + __be32 addresses_list:1; + __be32 roce_en:1; + __be32 spoofchk:1; + __be32 trust:1; + __be32 rsvd:16; +}; + +struct xsc_modify_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + __be32 other_vport:1; + __be32 vport_number:16; + __be32 rsvd:15; + __be16 caps; + __be16 caps_mask; + __be16 lag_id; + + struct xsc_modify_nic_vport_field_select field_select; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_query_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +struct xsc_modify_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_modify_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_array128 { + u8 array128[16]; +}; + +struct xsc_query_hca_vport_gid_out { + struct xsc_outbox_hdr hdr; + u16 gids_num; + struct xsc_array128 gid[]; +}; + +struct xsc_query_hca_vport_gid_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 gid_index; +}; + +struct xsc_pkey { + u16 pkey; +}; + +struct xsc_query_hca_vport_pkey_out { + struct xsc_outbox_hdr hdr; + struct xsc_pkey pkey[]; +}; + +struct xsc_query_hca_vport_pkey_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 pkey_index; +}; + +struct xsc_query_vport_state_out { + struct xsc_outbox_hdr hdr; + u8 admin_state:4; + u8 state:4; +}; + +struct xsc_query_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; +}; + +struct xsc_modify_vport_state_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_modify_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; + u8 admin_state:4; + u8 rsvd1:4; +}; + +struct xsc_traffic_counter { + u64 packets; + u64 bytes; +}; + +struct xsc_query_vport_counter_out { + struct xsc_outbox_hdr hdr; + struct xsc_traffic_counter received_errors; + struct xsc_traffic_counter transmit_errors; + struct xsc_traffic_counter received_ib_unicast; + struct xsc_traffic_counter transmitted_ib_unicast; + struct xsc_traffic_counter received_ib_multicast; + struct xsc_traffic_counter transmitted_ib_multicast; + struct xsc_traffic_counter received_eth_broadcast; + struct xsc_traffic_counter transmitted_eth_broadcast; + struct xsc_traffic_counter received_eth_unicast; + struct xsc_traffic_counter transmitted_eth_unicast; + struct xsc_traffic_counter received_eth_multicast; + struct xsc_traffic_counter transmitted_eth_multicast; +}; + +struct xsc_query_vport_counter_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +/* ioctl mbox */ +struct xsc_ioctl_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_ioctl_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 error; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_modify_raw_qp_request { + u16 qpn; + u16 lag_id; + u16 func_id; + u8 dma_direct; + u8 prio; + u8 qp_out_port; + u8 rsvd[7]; +}; + +struct xsc_modify_raw_qp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pcie_no; + u8 rsv[7]; + struct xsc_modify_raw_qp_request req; +}; + +struct xsc_modify_raw_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#define ETH_ALEN 6 + +struct xsc_create_lag_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; +}; + +struct xsc_add_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; + u8 mad_mac_idx; +}; + +struct xsc_remove_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 mad_mac_idx; + u8 bond_mode; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_update_lag_member_status_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd; +}; + +struct xsc_update_lag_hash_type_request { + __be16 lag_id; + u8 lag_sel_mode; + u8 rsvd[5]; +}; + +struct xsc_destroy_lag_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd[3]; +}; + +struct xsc_set_lag_qos_request { + __be16 lag_id; + u8 member_idx; + u8 lag_op; + u8 resv[4]; +}; + +struct xsc_create_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_lag_request req; +}; + +struct xsc_create_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_add_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_add_lag_member_request req; +}; + +struct xsc_add_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_remove_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_remove_lag_member_request req; +}; + +struct xsc_remove_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_member_status_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_member_status_request req; +}; + +struct xsc_update_lag_member_status_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_hash_type_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_hash_type_request req; +}; + +struct xsc_update_lag_hash_type_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_destroy_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_destroy_lag_request req; +}; + +struct xsc_destroy_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_set_lag_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_set_lag_qos_request req; +}; + +struct xsc_set_lag_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*ioctl qos*/ +struct xsc_qos_req_prfx { + u8 mac_port; + u8 rsvd[7]; +}; + +struct xsc_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_qos_req_prfx req_prfx; + u8 data[]; +}; + +struct xsc_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_prio_stats { + u64 tx_bytes; + u64 rx_bytes; + u64 tx_pkts; + u64 rx_pkts; +}; + +struct xsc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_pfc_prio_stats { + u64 tx_pause; + u64 tx_pause_duration; + u64 rx_pause; + u64 rx_pause_duration; +}; + +struct xsc_pfc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_pfc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_hw_stats_rdma_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 np_cnp_sent; + u64 rp_cnp_handled; + u64 np_ecn_marked_roce_packets; + u64 rp_cnp_ignored; + u64 read_rsp_out_of_seq; + u64 implied_nak_seq_err; + /*by function*/ + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + /*global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_rdma_vf { + /*by function*/ + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; +}; + +struct xsc_hw_stats_rdma { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_rdma_pf pf_stats; + struct xsc_hw_stats_rdma_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_eth_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 tx_pause; + u64 rx_pause; + u64 rx_fcs_errors; + u64 rx_discards; + u64 tx_multicast_phy; + u64 tx_broadcast_phy; + u64 rx_multicast_phy; + u64 rx_broadcast_phy; + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_eth_vf { + /*by function*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; +}; + +struct xsc_hw_stats_eth { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_eth_pf pf_stats; + struct xsc_hw_stats_eth_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; + u8 is_lag; + u8 lag_member_num; + u8 member_port[]; +}; + +struct xsc_hw_stats_rdma_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_rdma hw_stats; +}; + +struct xsc_hw_stats_eth_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_eth hw_stats; +}; + +struct xsc_hw_global_stats_rdma { + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; + u64 rx_icrc_encapsulated; + u64 req_cqe_error; + u64 resp_cqe_error; + u64 cqe_msg_code_error; +}; + +struct xsc_hw_global_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsv[4]; +}; + +struct xsc_hw_global_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_global_stats_rdma hw_stats; +}; + +struct xsc_pfc_stall_stats { + /*by mac port*/ + u64 tx_pause_storm_triggered; +}; + +struct xsc_pfc_stall_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; +}; + +struct xsc_pfc_stall_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_stall_stats pfc_stall_stats; +}; + +struct xsc_dscp_pmt_set { + u8 dscp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_dscp_pmt_get { + u8 prio_map[QOS_DSCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_set { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_get { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_pcp_pmt_set { + u8 pcp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_pcp_pmt_get { + u8 prio_map[QOS_PCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_default_pri_set { + u8 priority; + u8 rsvd[7]; +}; + +struct xsc_default_pri_get { + u8 priority; + u8 rsvd[7]; +}; + +#define PFC_WATCHDOG_EN_OFF 0 +#define PFC_WATCHDOG_EN_ON 1 +struct xsc_watchdog_en_set { + u8 en; +}; + +struct xsc_watchdog_en_get { + u8 en; +}; + +#define PFC_WATCHDOG_PERIOD_MIN 1 +#define PFC_WATCHDOG_PERIOD_MAX 4000000 +struct xsc_watchdog_period_set { + u32 period; +}; + +struct xsc_watchdog_period_get { + u32 period; +}; + +struct xsc_event_resp { + u8 resp_cmd_type; /* bitmap:0x0001: link up/down */ +}; + +struct xsc_event_linkstatus_resp { + u8 linkstatus; /*0:down, 1:up*/ +}; + +struct xsc_event_linkinfo { + u8 linkstatus; /*0:down, 1:up*/ + u8 port; + u8 duplex; + u8 autoneg; + u32 linkspeed; + u64 supported; + u64 advertising; + u64 supported_fec; /* reserved, not support currently */ + u64 advertised_fec; /* reserved, not support currently */ + u64 supported_speed[2]; + u64 advertising_speed[2]; +}; + +struct xsc_lldp_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 os_handle_lldp; + u8 sub_type; +}; + +struct xsc_lldp_status_mbox_out { + struct xsc_outbox_hdr hdr; + union { + __be32 os_handle_lldp; + __be32 dcbx_status; + } status; +}; + +struct xsc_vport_rate_limit_mobox_in { + struct xsc_inbox_hdr hdr; + u8 other_vport; + __be16 vport_number; + __be16 rsvd0; + __be32 rate; +}; + +struct xsc_vport_rate_limit_mobox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_event_query_type_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_type_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_resp ctx; +}; + +struct xsc_event_query_linkstatus_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_linkstatus_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkstatus_resp ctx; +}; + +struct xsc_event_query_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_event_query_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_port_admin_status_mbox_in { + struct xsc_inbox_hdr hdr; + u16 admin_status; + +}; + +struct xsc_event_set_port_admin_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_led_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 port_id; +}; + +struct xsc_event_set_led_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_modify_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u32 fec; +}; + +struct xsc_event_modify_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_query_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 active_fec; + u32 fec_cfg; + u32 status; +}; + +#define PFC_ON_PG_PRFL_IDX 0 +#define PFC_OFF_PG_PRFL_IDX 1 +#define PFC_ON_QMU_VALUE 0 +#define PFC_OFF_QMU_VALUE 1 + +#define NIF_PFC_EN_ON 1 +#define NIF_PFC_EN_OFF 0 + +#define PFC_CFG_CHECK_TIMEOUT_US 8000000 +#define PFC_CFG_CHECK_SLEEP_TIME_US 200 +#define PFC_CFG_CHECK_MAX_RETRY_TIMES \ + (PFC_CFG_CHECK_TIMEOUT_US / PFC_CFG_CHECK_SLEEP_TIME_US) +#define PFC_CFG_CHECK_VALID_CNT 3 + +enum { + PFC_OP_ENABLE = 0, + PFC_OP_DISABLE, + PFC_OP_MODIFY, + PFC_OP_TYPE_MAX, +}; + +enum { + DROP_TH_CLEAR = 0, + DROP_TH_RECOVER, + DROP_TH_RECOVER_LOSSY, + DROP_TH_RECOVER_LOSSLESS, +}; + +struct xsc_pfc_cfg { + u8 req_prio; + u8 req_pfc_en; + u8 curr_prio; + u8 curr_pfc_en; + u8 pfc_op; + u8 lossless_num; +}; + +#define LOSSLESS_NUM_INVAILD 9 +struct xsc_pfc_set { + u8 priority; + u8 pfc_on; + u8 type; + u8 src_prio; + u8 lossless_num; +}; + +#define PFC_PRIO_MAX 7 +struct xsc_pfc_get { + u8 pfc_on[PFC_PRIO_MAX + 1]; + u8 max_prio; +}; + +struct xsc_pfc_set_drop_th_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; + u8 cfg_type; +}; + +struct xsc_pfc_set_drop_th_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_pfc_get_cfg_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; +}; + +struct xsc_pfc_get_cfg_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_rate_limit_set { + u32 rate_cir; + u32 limit_id; + u8 limit_level; + u8 rsvd[7]; +}; + +struct xsc_rate_limit_get { + u32 rate_cir[QOS_PRIO_MAX + 1]; + u32 max_limit_id; + u8 limit_level; + u8 rsvd[3]; +}; + +struct xsc_sp_set { + u8 sp[QOS_PRIO_MAX + 1]; +}; + +struct xsc_sp_get { + u8 sp[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_weight_set { + u8 weight[QOS_PRIO_MAX + 1]; +}; + +struct xsc_weight_get { + u8 weight[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_dpu_port_weight_set { + u8 target; + u8 weight[DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsv[5]; +}; + +struct xsc_dpu_port_weight_get { + u8 weight[DPU_PORT_WGHT_TARGET_NUM][DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsvd[4]; +}; + +struct xsc_dpu_prio_weight_set { + u8 target; + u8 weight[QOS_PRIO_MAX + 1]; + u8 rsv[7]; +}; + +struct xsc_dpu_prio_weight_get { + u8 weight[DPU_PRIO_WGHT_TARGET_NUM][QOS_PRIO_MAX + 1]; +}; + +struct xsc_cc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_ctrl_cmd { + u16 cmd; + u16 len; + u8 val[]; +}; + +struct xsc_cc_cmd_enable_rp { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_enable_np { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_init_alpha { + u16 cmd; + u16 len; + u32 alpha; + u32 section; +}; + +struct xsc_cc_cmd_g { + u16 cmd; + u16 len; + u32 g; + u32 section; +}; + +struct xsc_cc_cmd_ai { + u16 cmd; + u16 len; + u32 ai; + u32 section; +}; + +struct xsc_cc_cmd_hai { + u16 cmd; + u16 len; + u32 hai; + u32 section; +}; + +struct xsc_cc_cmd_th { + u16 cmd; + u16 len; + u32 threshold; + u32 section; +}; + +struct xsc_cc_cmd_bc { + u16 cmd; + u16 len; + u32 bytecount; + u32 section; +}; + +struct xsc_cc_cmd_cnp_opcode { + u16 cmd; + u16 len; + u32 opcode; +}; + +struct xsc_cc_cmd_cnp_bth_b { + u16 cmd; + u16 len; + u32 bth_b; +}; + +struct xsc_cc_cmd_cnp_bth_f { + u16 cmd; + u16 len; + u32 bth_f; +}; + +struct xsc_cc_cmd_cnp_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_data_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_cnp_tx_interval { + u16 cmd; + u16 len; + u32 interval; // us + u32 section; +}; + +struct xsc_cc_cmd_evt_rsttime { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_cnp_dscp { + u16 cmd; + u16 len; + u32 dscp; + u32 section; +}; + +struct xsc_cc_cmd_cnp_pcp { + u16 cmd; + u16 len; + u32 pcp; + u32 section; +}; + +struct xsc_cc_cmd_evt_period_alpha { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_clamp_tgt_rate { + u16 cmd; + u16 len; + u32 clamp_tgt_rate; + u32 section; +}; + +struct xsc_cc_cmd_max_hai_factor { + u16 cmd; + u16 len; + u32 max_hai_factor; + u32 section; +}; + +struct xsc_cc_cmd_scale { + u16 cmd; + u16 len; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_cfg { + u16 cmd; + u16 len; + u32 enable_rp; + u32 enable_np; + u32 init_alpha; + u32 g; + u32 ai; + u32 hai; + u32 threshold; + u32 bytecount; + u32 opcode; + u32 bth_b; + u32 bth_f; + u32 cnp_ecn; + u32 data_ecn; + u32 cnp_tx_interval; + u32 evt_period_rsttime; + u32 cnp_dscp; + u32 cnp_pcp; + u32 evt_period_alpha; + u32 clamp_tgt_rate; + u32 max_hai_factor; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_stat { + u16 cmd; + u16 len; + u32 section; +}; + +struct xsc_cc_cmd_stat { + u32 cnp_handled; + u32 alpha_recovery; + u32 reset_timeout; + u32 reset_bytecount; +}; + +struct xsc_set_mtu_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 mtu; + __be16 rx_buf_sz_min; + u8 mac_port; + u8 rsvd; +}; + +struct xsc_hwc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_hwc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct hwc_set_t { + u8 type; + u8 s_wqe_mode; + u8 r_wqe_mode; + u8 ack_timeout; + u8 group_mode; + u8 lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 retry_cnt_th; + u8 adapt_to_other; + u8 alloc_qp_id_mode; + u16 vf_num_per_pf; + u16 max_vf_num_per_pf; + u8 eth_pkt_offset; + u8 rdma_pkt_offset; + u8 tso_eth_pkt_offset; + u8 tx_dedi_pref; + u8 reg_mr_via_cmdq; + u8 per_dst_grp_thr; + u8 per_dst_grp_cnt; + u8 dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct hwc_get_t { + u8 cur_s_wqe_mode; + u8 next_s_wqe_mode; + u8 cur_r_wqe_mode; + u8 next_r_wqe_mode; + u8 cur_ack_timeout; + u8 next_ack_timeout; + u8 cur_group_mode; + u8 next_group_mode; + u8 cur_lossless_prio[XSC_MAX_MAC_NUM]; + u8 next_lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 cur_retry_cnt_th; + u8 next_retry_cnt_th; + u8 cur_adapt_to_other; + u8 next_adapt_to_other; + u16 cur_vf_num_per_pf; + u16 next_vf_num_per_pf; + u16 cur_max_vf_num_per_pf; + u16 next_max_vf_num_per_pf; + u8 cur_eth_pkt_offset; + u8 next_eth_pkt_offset; + u8 cur_rdma_pkt_offset; + u8 next_rdma_pkt_offset; + u8 cur_tso_eth_pkt_offset; + u8 next_tso_eth_pkt_offset; + u8 cur_alloc_qp_id_mode; + u8 next_alloc_qp_id_mode; + u8 cur_tx_dedi_pref; + u8 next_tx_dedi_pref; + u8 cur_reg_mr_via_cmdq; + u8 next_reg_mr_via_cmdq; + u8 cur_per_dst_grp_thr; + u8 next_per_dst_grp_thr; + u8 cur_per_dst_grp_cnt; + u8 next_per_dst_grp_cnt; + u8 cur_dcbx_status[XSC_MAX_MAC_NUM]; + u8 next_dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct xsc_set_mtu_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_query_eth_mac_mbox_in { + struct xsc_inbox_hdr hdr; + u8 index; +}; + +struct xsc_query_eth_mac_mbox_out { + struct xsc_outbox_hdr hdr; + u8 mac[6]; +}; + +struct xsc_query_pause_cnt_mbox_in { + struct xsc_inbox_hdr hdr; + u16 mac_port; + u16 cnt_type; + u32 reg_addr; +}; + +struct xsc_query_pause_cnt_mbox_out { + struct xsc_outbox_hdr hdr; + u64 val; +}; + +enum { + XSC_TBM_CAP_HASH_PPH = 0, + XSC_TBM_CAP_RSS, + XSC_TBM_CAP_PP_BYPASS, + XSC_TBM_CAP_PCT_DROP_CONFIG, +}; + +struct xsc_nic_attr { + __be16 caps; + __be16 caps_mask; + u8 mac_addr[6]; +}; + +struct xsc_rss_attr { + u8 rss_en; + u8 hfunc; + __be16 rqn_base; + __be16 rqn_num; + __be32 hash_tmpl; +}; + +struct xsc_cmd_enable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_attr rss; +}; + +struct xsc_cmd_enable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[2]; +}; + +struct xsc_nic_dis_attr { + __be16 caps; +}; + +struct xsc_cmd_disable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_dis_attr nic; +}; + +struct xsc_cmd_disable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +enum { + XSC_RSS_HASH_KEY_UPDATE = 0, + XSC_RSS_HASH_TEMP_UPDATE, + XSC_RSS_HASH_FUNC_UPDATE, + XSC_RSS_RXQ_UPDATE, + XSC_RSS_RXQ_DROP, +}; + +struct xsc_rss_modify_attr { + u8 caps_mask; + u8 rss_en; + __be16 rqn_base; + __be16 rqn_num; + u8 hfunc; + __be32 hash_tmpl; + u8 hash_key[52]; +}; + +struct xsc_cmd_modify_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_modify_attr rss; +}; + +struct xsc_cmd_modify_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_function_reset_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 glb_func_id; + u8 rsvd[6]; +}; + +struct xsc_function_reset_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +enum { + XSC_PCIE_LAT_FEAT_SET_EN = 0, + XSC_PCIE_LAT_FEAT_GET_EN, + XSC_PCIE_LAT_FEAT_SET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_HISTOGRAM, + XSC_PCIE_LAT_FEAT_GET_PEAK, + XSC_PCIE_LAT_FEAT_HW, + XSC_PCIE_LAT_FEAT_HW_INIT, +}; + +struct xsc_pcie_lat { + u8 pcie_lat_enable; + u32 pcie_lat_interval[XSC_PCIE_LAT_CFG_INTERVAL_MAX]; + u32 pcie_lat_histogram[XSC_PCIE_LAT_CFG_HISTOGRAM_MAX]; + u32 pcie_lat_peak; +}; + +struct xsc_pcie_lat_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_pcie_lat_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_reg_mcia { + u8 module; + u8 status; + + u8 i2c_device_address; + u8 page_number; + u8 device_address; + + u8 size; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + +struct xsc_rtt_en_mbox_in { + struct xsc_inbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_en_mbox_out { + struct xsc_outbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_qpn_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_get_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_period_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 period; //ms +}; + +struct xsc_rtt_period_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 period; //ms + u8 rsvd[4]; +}; + +struct xsc_rtt_result_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 result[32]; +}; + +struct rtt_stats { + u64 rtt_succ_snd_req_cnt; + u64 rtt_succ_snd_rsp_cnt; + u64 rtt_fail_snd_req_cnt; + u64 rtt_fail_snd_rsp_cnt; + u64 rtt_rcv_req_cnt; + u64 rtt_rcv_rsp_cnt; + u64 rtt_rcv_unk_cnt; + u64 rtt_grp_invalid_cnt; +}; + +struct xsc_rtt_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct rtt_stats stats; +}; + +enum { + XSC_AP_FEAT_SET_UDP_SPORT = 0, +}; + +struct xsc_ap_feat_set_udp_sport { + u32 qpn; + u32 udp_sport; +}; + +struct xsc_ap { + struct xsc_ap_feat_set_udp_sport set_udp_sport; +}; + +struct xsc_ap_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_ap_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_set_debug_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 set_field; + u8 log_level; + u8 cmd_verbose; + u8 rsvd[5]; +}; + +struct xsc_set_debug_info_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 guid; +}; + +struct xsc_cmd_activate_hw_config_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_activate_hw_config_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#endif /* XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h new file mode 100644 index 0000000000000000000000000000000000000000..122b06a87991d4c798fed3c6aab2712232744545 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -0,0 +1,1315 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_H +#define XSC_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_macro.h" +#include "common/xsc_cmd.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_auto_hw.h" +#include "common/driver.h" +#include "common/xsc_reg.h" +#include "common/xsc_eswitch.h" + +extern uint xsc_debug_mask; +extern unsigned int xsc_log_level; + +#ifndef mmiowb +#define mmiowb() +#endif + +#define XSC_PCI_VENDOR_ID 0x1f67 + +#define XSC_MC_PF_DEV_ID 0x1011 +#define XSC_MC_VF_DEV_ID 0x1012 + +#define XSC_MF_HOST_PF_DEV_ID 0x1051 +#define XSC_MF_HOST_VF_DEV_ID 0x1052 +#define XSC_MF_SOC_PF_DEV_ID 0x1053 + +#define XSC_MS_PF_DEV_ID 0x1111 +#define XSC_MS_VF_DEV_ID 0x1112 + +#define XSC_MV_HOST_PF_DEV_ID 0x1151 +#define XSC_MV_HOST_VF_DEV_ID 0x1152 +#define XSC_MV_SOC_PF_DEV_ID 0x1153 + +#define REG_ADDR(dev, offset) \ + (xsc_core_is_pf(dev) ? ((dev->bar) + ((offset) - 0xA0000000)) : ((dev->bar) + (offset))) + +#define REG_WIDTH_TO_STRIDE(width) ((width) / 8) +#define QPM_PAM_TBL_NUM 4 +#define QPM_PAM_TBL_NUM_MASK 3 +#define QPM_PAM_TBL_INDEX_SHIFT 2 +#define QPM_PAM_PAGE_SHIFT 12 + +#define XSC_SUB_DEV_ID_MC_50 0xC050 +#define XSC_SUB_DEV_ID_MC_100 0xC100 +#define XSC_SUB_DEV_ID_MC_200 0xC200 +#define XSC_SUB_DEV_ID_MC_400S 0xC400 +#define XSC_SUB_DEV_ID_MF_50 0xF050 +#define XSC_SUB_DEV_ID_MF_200 0xF200 +#define XSC_SUB_DEV_ID_MS_50 0xA050 +#define XSC_SUB_DEV_ID_MS_100Q 0xA104 +#define XSC_SUB_DEV_ID_MS_200 0xA200 +#define XSC_SUB_DEV_ID_MS_200S 0xA201 +#define XSC_SUB_DEV_ID_MS_400M 0xA202 +#define XSC_SUB_DEV_ID_MS_200_OCP 0xA203 +#define XSC_SUB_DEV_ID_MV_100 0xD100 +#define XSC_SUB_DEV_ID_MV_200 0xD200 + +#define XSC_MAX_PRODUCT_NAME_LEN 32 + +enum { + XSC_LOG_LEVEL_DBG = 0, + XSC_LOG_LEVEL_INFO = 1, + XSC_LOG_LEVEL_WARN = 2, + XSC_LOG_LEVEL_ERR = 3, +}; + +enum { + XSC_CHIP_MC, + XSC_CHIP_MF, + XSC_CHIP_MS, + XSC_CHIP_MV, + XSC_CHIP_UNKNOWN, +}; + +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + +#define xsc_dev_log(condition, level, dev, fmt, ...) \ +do { \ + if (condition) \ + dev_printk(level, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_dbg(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_DBG, KERN_DEBUG, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_dbg_once(__dev, format, ...) \ + dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_dbg_mask(__dev, mask, format, ...) \ +do { \ + if ((mask) & xsc_debug_mask) \ + xsc_core_dbg(__dev, format, ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_err(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_ERR, KERN_ERR, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited(&(__dev)->pdev->dev, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_warn(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_WARN, KERN_WARNING, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_info(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_INFO, KERN_INFO, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_pr_debug(format, ...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug(format, ##__VA_ARGS__); \ +} while (0) + +#define assert(__dev, expr) \ +do { \ + if (!(expr)) { \ + dev_err(&(__dev)->pdev->dev, \ + "Assertion failed! %s, %s, %s, line %d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } \ +} while (0) + +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#define XSC_PCIE_NO_HOST 0x0 +#define XSC_PCIE_NO_SOC 0x1 +#define XSC_PCIE_NO_UNSET 0xFF + +enum xsc_driver_mode { + HOST_MODE, + SOC_MODE, +}; + +u8 xsc_get_driver_work_mode(void); + +enum xsc_dev_event { + XSC_DEV_EVENT_SYS_ERROR, + XSC_DEV_EVENT_PORT_UP, + XSC_DEV_EVENT_PORT_DOWN, + XSC_DEV_EVENT_PORT_INITIALIZED, + XSC_DEV_EVENT_LID_CHANGE, + XSC_DEV_EVENT_PKEY_CHANGE, + XSC_DEV_EVENT_GUID_CHANGE, + XSC_DEV_EVENT_CLIENT_REREG, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + XSC_CMD_TIMEOUT_MSEC = 10 * 1000, + XSC_CMD_WQ_MAX_NAME = 32, +}; + +enum { + XSC_MAX_NAME_LEN = 32, +}; + +enum { + XSC_MAX_PORTS = 2, +}; + +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; + +enum { + XSC_CMD_DATA, /* print command payload only */ + XSC_CMD_TIME, /* print command execution time */ +}; + +enum xsc_rdma_driver_id { + RDMA_DRIVER_XSC_UNKNOWN, + RDMA_DRIVER_XSC5, + RDMA_DRIVER_XSC4, +}; + +/* mutex for interface device list */ +extern struct mutex xsc_intf_mutex; + +#define GROUP_REFER_CNT_SIZE 1024 + +struct qp_group_refer { + spinlock_t lock; /* protect refer_cnt[] */ + u16 refer_cnt[GROUP_REFER_CNT_SIZE]; +}; + +struct xsc_priv_device { + char device_name[IB_DEVICE_NAME_MAX]; + dev_t devno; + struct cdev cdev; + struct list_head mem_list; + spinlock_t mem_lock; /* protect mem_list */ + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ +}; + +enum xsc_pci_status { + XSC_PCI_STATUS_DISABLED, + XSC_PCI_STATUS_ENABLED, +}; + +enum xsc_device_state { + XSC_DEVICE_STATE_UNINITIALIZED, + XSC_DEVICE_STATE_UP, + XSC_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum xsc_interface_state { + XSC_INTERFACE_STATE_UP = BIT(0), + XSC_INTERFACE_STATE_TEARDOWN = BIT(1), +}; + +enum { + XSC_INTERFACE_PROTOCOL_IB = 0, + XSC_INTERFACE_PROTOCOL_ETH = 1, +}; + +enum { + XSC_INTERFACE_ADDED, + XSC_INTERFACE_ATTACHED, +}; + +#define CONFIG_XSC_SRIOV 1 + +enum xsc_coredev_type { + XSC_COREDEV_PF, + XSC_COREDEV_VF, + XSC_COREDEV_SF +}; + +enum { + XSC_PCI_DEV_IS_VF = 1 << 0, +}; + +enum port_state_policy { + XSC_POLICY_DOWN = 0, + XSC_POLICY_UP = 1, + XSC_POLICY_FOLLOW = 2, + XSC_POLICY_INVALID = 0xffffffff +}; + +enum { + XSC_CAP_PORT_TYPE_IB = 0x0, + XSC_CAP_PORT_TYPE_ETH = 0x1, +}; + +enum xsc_inline_modes { + XSC_INLINE_MODE_NONE, + XSC_INLINE_MODE_L2, + XSC_INLINE_MODE_IP, + XSC_INLINE_MODE_TCP_UDP, +}; + +struct xsc_core_device; + +struct xsc_vf_context { + int enabled; + u64 port_guid; + u64 node_guid; + enum port_state_policy policy; +}; + +struct xsc_sriov_vf { + struct xsc_core_device *dev; + struct kobject kobj; + int vf; +}; + +struct xsc_pci_sriov { + /* standard SRIOV capability fields, mostly for debug */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs of PF */ + u16 initial_vfs; /* initial VFs of PF */ + u16 num_vfs; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +struct xsc_core_sriov { + int num_vfs; + u16 max_vfs; + u16 vf_bdf_base; + u8 probe_vf; + struct xsc_vf_context *vfs_ctx; + struct kobject *config; + struct kobject *groups_config; + struct kobject node_guid_kobj; + struct xsc_sriov_vf *vfs; + struct xsc_pci_sriov pci_sriov; +}; + +struct xsc_vgroup { + struct xsc_core_device *dev; + u32 group_id; + u32 num_vports; + u32 tsar_ix; + u32 max_rate; + u32 min_rate; + u32 bw_share; + struct kobject kobj; + struct list_head list; +}; + +struct xsc_vport_info { + u8 mac[ETH_ALEN]; + u16 vlan; + u8 qos; + __be16 vlan_proto; + u64 node_guid; + int link_state; + u32 min_rate; + u32 max_rate; + u8 spoofchk; + u8 trusted; + u8 roce; + /* the admin approved vlan list */ + DECLARE_BITMAP(vlan_trunk_8021q_bitmap, VLAN_N_VID); + u32 group; +}; + +#define XSC_L2_ADDR_HASH_SIZE 8 + +enum xsc_eswitch_vport_event { + XSC_VPORT_UC_ADDR_CHANGE = BIT(0), + XSC_VPORT_MC_ADDR_CHANGE = BIT(1), + XSC_VPORT_PROMISC_CHANGE = BIT(2), + XSC_VPORT_VLAN_CHANGE = BIT(3), +}; + +struct xsc_vport { + struct xsc_core_device *dev; + u16 vport; + struct hlist_head uc_list[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head mc_list[XSC_L2_ADDR_HASH_SIZE]; + /* The requested vlan list from the vport side */ + DECLARE_BITMAP(req_vlan_bitmap, VLAN_N_VID); + /* Actual accepted vlans on the acl tables */ + DECLARE_BITMAP(acl_vlan_8021q_bitmap, VLAN_N_VID); + struct work_struct vport_change_handler; + + struct xsc_vport_info info; + + struct { + u8 enabled; + u32 esw_tsar_ix; + u32 bw_share; + u32 min_rate; + u32 max_rate; + } qos; + + u8 enabled; + enum xsc_eswitch_vport_event enabled_events; + u16 match_id; + u32 bond_metadata; + u16 vlan_id; + u8 vlan_qos; + __be16 vlan_proto; +}; + +struct xsc_eswitch { + struct xsc_core_device *dev; + u32 flags; + int total_vports; + int enabled_vports; + int num_vfs; + struct xsc_vport *vports; + struct workqueue_struct *work_queue; + + /* Synchronize between vport change events + * and async SRIOV admin state changes + */ + struct mutex state_lock; + + /* Protects eswitch mode changes occurring via sriov + * state change, devlink commands. + */ + struct mutex mode_lock; + int mode; + int nvports; + u16 manager_vport; + u16 first_host_vport; +}; + +struct xsc_core_health { + u8 sick; +}; + +struct xsc_priv { + char name[XSC_MAX_NAME_LEN]; + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; /* protect ctx_list */ + int numa_node; + struct xsc_core_sriov sriov; + struct xsc_eswitch *eswitch; + struct xsc_core_health health; +}; + +struct xsc_port_ctrl { + struct list_head node; + dev_t devid; + struct cdev cdev; + struct device *device; + struct list_head file_list; + spinlock_t file_lock; /* protect file_list */ +}; + +typedef int (*restore_func_t)(struct xsc_core_device *dev); + +struct xsc_bdf_file { + unsigned long key; + struct radix_tree_root obj_tree; /* protect obj_tree */ + spinlock_t obj_lock; + struct xsc_core_device *xdev; + restore_func_t restore_nic_fn; +}; + +struct xsc_port_ctrl_file { + struct list_head file_node; + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ + struct xsc_bdf_file *root_bdf; + struct xsc_port_ctrl *ctrl; +}; + +struct xsc_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct xsc_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u8 log_max_msix; + u32 max_cqes; + u32 max_wqes; + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + u32 log_max_msg; + u32 num_ports; + u32 max_ra_res_qp; + u32 max_ra_req_qp; + u32 max_srq_wqes; + u32 bf_reg_size; + u32 bf_regs_per_page; + struct xsc_port_caps port[XSC_MAX_PORTS]; + u8 ext_port_cap[XSC_MAX_PORTS]; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + u32 min_page_sz; + u32 send_ds_num; + u32 send_wqe_shift; + u32 recv_ds_num; + u32 recv_wqe_shift; + u32 rx_pkt_len_max; + + u32 msix_enable:1; + u32 port_type:1; + u32 embedded_cpu:1; + u32 eswitch_manager:1; + u32 ecpf_vport_exists:1; + u32 vport_group_manager:1; + u32 sf:1; + u32 wqe_inline_mode:3; + u32 raweth_qp_id_base:15; + u32 rsvd0:7; + + u16 max_vfs; + u8 log_max_qp_depth; + u8 log_max_current_uc_list; + u8 log_max_current_mc_list; + u16 log_max_vlan_list; + u8 fdb_multi_path_to_table; + u8 log_esw_max_sched_depth; + + u8 max_num_sf_partitions; + u8 log_max_esw_sf; + u16 sf_base_id; + + u32 max_tc:8; + u32 ets:1; + u32 dcbx:1; + u32 dscp:1; + u32 sbcam_reg:1; + u32 qos:1; + u32 port_buf:1; + u32 rsvd1:2; + u32 raw_tpe_qp_num:16; + u32 max_num_eqs:8; + u32 mac_port:8; + u32 raweth_rss_qp_id_base:16; + u16 msix_base; + u16 msix_num; + u8 log_max_mtt; + u8 log_max_tso; + u32 hca_core_clock; + u32 max_rwq_indirection_tables;/*rss_caps*/ + u32 max_rwq_indirection_table_size;/*rss_caps*/ + u16 raweth_qp_id_end; + u32 qp_rate_limit_min; + u32 qp_rate_limit_max; + u32 hw_feature_flag; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u8 nif_port_num; + u8 pcie_host; + u8 mac_bit; + u16 funcid_to_logic_port; + u8 lag_logic_port_ofst; +}; + +struct cache_ent { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +#define CMD_FIRST_SIZE 8 +struct xsc_cmd_first { + __be32 data[CMD_FIRST_SIZE]; +}; + +struct xsc_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct xsc_cmd_mailbox *next; +}; + +struct xsc_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_cmd_first first; + struct xsc_cmd_mailbox *next; +}; + +#define RSP_FIRST_SIZE 14 +struct xsc_rsp_first { + __be32 data[RSP_FIRST_SIZE]; //can be larger, xsc_rsp_layout +}; + +struct xsc_rsp_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_rsp_first first; + struct xsc_cmd_mailbox *next; +}; + +typedef void (*xsc_cmd_cbk_t)(int status, void *context); + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 vf_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_work_ent { + struct xsc_cmd_msg *in; + struct xsc_rsp_msg *out; + int idx; + struct completion done; + struct xsc_cmd *cmd; + struct work_struct work; + struct xsc_cmd_layout *lay; + struct xsc_rsp_layout *rsp_lay; + int ret; + u8 status; + u8 token; + struct timespec64 ts1; + struct timespec64 ts2; +}; + +struct xsc_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct xsc_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct xsc_cmd_reg { + u32 req_pid_addr; + u32 req_cid_addr; + u32 rsp_pid_addr; + u32 rsp_cid_addr; + u32 req_buf_h_addr; + u32 req_buf_l_addr; + u32 rsp_buf_h_addr; + u32 rsp_buf_l_addr; + u32 msix_vec_addr; + u32 element_sz_addr; + u32 q_depth_addr; + u32 interrupt_stat_addr; +}; + +enum xsc_cmd_status { + XSC_CMD_STATUS_NORMAL, + XSC_CMD_STATUS_TIMEDOUT, +}; + +struct xsc_cmd { + struct xsc_cmd_reg reg; + void *cmd_buf; + void *cq_buf; + dma_addr_t dma; + dma_addr_t cq_dma; + u16 cmd_pid; + u16 cq_cid; + u8 owner_bit; + u8 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + spinlock_t alloc_lock; /* protect command queue allocations */ + spinlock_t token_lock; /* protect token allocations */ + spinlock_t doorbell_lock; /* protect cmdq req pid doorbell */ + u8 token; + unsigned long bitmask; + char wq_name[XSC_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct task_struct *cq_task; + struct semaphore sem; + int mode; + struct xsc_cmd_work_ent *ent_arr[XSC_MAX_COMMANDS]; + struct dma_pool *pool; + struct xsc_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct xsc_cmd_stats stats[XSC_CMD_OP_MAX]; + unsigned int irqn; + u8 ownerbit_learned; + u8 cmd_status; +}; + +struct xsc_lock { + spinlock_t lock; /* xsc spin lock */ +}; + +struct xsc_reg_addr { + u64 tx_db; + u64 rx_db; + u64 complete_db; + u64 complete_reg; + u64 event_db; + u64 cpm_get_lock; + u64 cpm_put_lock; + u64 cpm_lock_avail; + u64 cpm_data_mem; + u64 cpm_cmd; + u64 cpm_addr; + u64 cpm_busy; +}; + +struct xsc_board_info { + u32 board_id; + char board_sn[XSC_BOARD_SN_LEN]; + __be64 guid; + u8 guid_valid; + u8 hw_config_activated; +}; + +/* our core device */ +struct xsc_core_device { + struct pci_dev *pdev; + struct device *device; + struct xsc_priv priv; + struct xsc_dev_resource *dev_res; + void *xsc_ib_dev; + void *netdev; + void *eth_priv; + void *ovs_priv; + void __iomem *bar; + int bar_num; + + u8 mac_port; /* mac port */ + u8 pcie_no; /* pcie number */ + u8 pf_id; + u16 vf_id; + u16 glb_func_id; /* function id */ + + u16 gsi_qpn; /* logic qpn for gsi*/ + u16 msix_vec_base; + + struct mutex pci_status_mutex; /* protect pci_status */ + enum xsc_pci_status pci_status; + struct mutex intf_state_mutex; /* protect intf_state */ + unsigned long intf_state; + enum xsc_coredev_type coredev_type; + struct xsc_caps caps; + atomic_t num_qps; + struct xsc_cmd cmd; + struct xsc_lock reg_access_lock; + + void *counters_priv; + struct xsc_priv_device priv_device; + struct xsc_board_info *board_info; + void (*event)(struct xsc_core_device *dev, + enum xsc_dev_event event, unsigned long param); + + void (*event_handler)(void *adapter); + + struct xsc_reg_addr regs; + u32 chip_ver_h; + u32 chip_ver_m; + u32 chip_ver_l; + u32 hotfix_num; + u32 feature_flag; + u16 cmdq_ver; + u8 fw_version_major; + u8 fw_version_minor; + u16 fw_version_patch; + u32 fw_version_tweak; + u8 fw_version_extra_flag; + cpumask_var_t xps_cpumask; + + u8 reg_mr_via_cmdq; + u8 user_mode; + + struct xsc_port_ctrl port_ctrl; + + void *rtt_priv; + void *ap_priv; + void *pcie_lat; + + u8 bond_id; + struct list_head slave_node; +}; + +struct xsc_feature_flag { + u8 fpga_type:2; + u8 hps_ddr:2; + u8 onchip_ft:1; + u8 rdma_icrc:1; + u8 ma_xbar:1; + u8 anlt_fec:1; + u8 pp_tbl_dma:1; + u8 pct_exp:1; +}; + +struct xsc_interface { + struct list_head list; + int protocol; + + void *(*add)(struct xsc_core_device *dev); + void (*remove)(struct xsc_core_device *dev, void *context); + int (*attach)(struct xsc_core_device *dev, void *context); + void (*detach)(struct xsc_core_device *dev, void *context); + void (*event)(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long param); + void *(*get_dev)(void *context); +}; + +struct xsc_device_context { + struct list_head list; + struct xsc_interface *intf; + void *context; + unsigned long state; +}; + +struct xsc_mem_entry { + struct list_head list; + char task_name[TASK_COMM_LEN]; + struct xsc_ioctl_mem_info mem_info; +}; + +struct xsc_device_product_info { + u16 vendor; + u16 device; + u16 subdevice; + char product_name[XSC_MAX_PRODUCT_NAME_LEN]; +}; + +#define XSC_DEVICE_PRODUCT_INFO(vend, dev, subdev, name) \ + .vendor = (vend), .device = (dev), \ + .subdevice = (subdev), .product_name = (name) + +static inline bool xsc_fw_is_available(struct xsc_core_device *dev) +{ + return dev->cmd.cmd_status == XSC_CMD_STATUS_NORMAL; +} + +int xsc_debugfs_init(struct xsc_core_device *dev); +void xsc_debugfs_fini(struct xsc_core_device *dev); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); + +bool xsc_device_registered(struct xsc_core_device *dev); +int xsc_register_device(struct xsc_core_device *dev); +void xsc_unregister_device(struct xsc_core_device *dev); +void xsc_attach_device(struct xsc_core_device *dev); +void xsc_detach_device(struct xsc_core_device *dev); +int xsc_register_interface(struct xsc_interface *intf); +void xsc_unregister_interface(struct xsc_interface *intf); +void xsc_reload_interface(struct xsc_core_device *dev, int protocol); +void xsc_reload_interfaces(struct xsc_core_device *dev, + int protocol1, int protocol2, + bool valid1, bool valid2); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_dev_list_lock(void); +void xsc_dev_list_unlock(void); +int xsc_dev_list_trylock(void); + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id); +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, + void *out, int out_size); +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_reg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_dereg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_eth_reset(struct xsc_core_device *dev); +int xsc_tbm_init(struct xsc_core_device *dev); +int xsc_qos_init(struct xsc_core_device *xdev); + +bool xsc_chk_chip_ver(struct xsc_core_device *dev); + +int xsc_alloc_iae_idx(struct xsc_core_device *dev, int *iae_idx); +void xsc_release_iae_idx(struct xsc_core_device *dev, int *iae_idx); +int xsc_get_iae_idx(struct xsc_core_device *dev); + +int xsc_create_res(struct xsc_core_device *dev); +void xsc_destroy_res(struct xsc_core_device *dev); + +int xsc_counters_init(struct ib_device *ib_dev, + struct xsc_core_device *dev); +void xsc_counters_fini(struct ib_device *ib_dev, + struct xsc_core_device *dev); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev); +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); + +int xsc_priv_alloc_chrdev_region(void); +void xsc_priv_unregister_chrdev_region(void); + +int xsc_eth_sysfs_create(struct net_device *netdev, struct xsc_core_device *dev); +void xsc_eth_sysfs_remove(struct net_device *netdev, struct xsc_core_device *dev); +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev); + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev); + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps); +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix); +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num); +int xsc_cmd_modify_hca(struct xsc_core_device *dev); +int xsc_query_guid(struct xsc_core_device *dev); +void xsc_free_board_info(void); + +int xsc_irq_eq_create(struct xsc_core_device *dev); +int xsc_irq_eq_destroy(struct xsc_core_device *dev); + +int xsc_sriov_init(struct xsc_core_device *dev); +void xsc_sriov_cleanup(struct xsc_core_device *dev); +int xsc_sriov_attach(struct xsc_core_device *dev); +void xsc_sriov_detach(struct xsc_core_device *dev); +int xsc_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int xsc_sriov_sysfs_init(struct xsc_core_device *dev); +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev); +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj); +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj); +u32 xsc_eth_pcie_read32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr); +void xsc_eth_pcie_write32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr, u32 val); +struct cpumask *xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector); +void mask_cpu_by_node(int node, struct cpumask *dstp); +int xsc_get_link_speed(struct xsc_core_device *dev); +int xsc_chip_type(struct xsc_core_device *dev); +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev); + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) + +static inline bool xsc_sriov_is_enabled(struct xsc_core_device *dev) +{ + return pci_num_vf(dev->pdev) ? true : false; +} + +static inline u16 xsc_core_max_vfs(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.max_vfs; +} + +static inline int xsc_core_vfs_num(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.num_vfs; +} + +static inline bool xsc_core_is_pf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_PF; +} + +static inline bool xsc_core_is_sf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_SF; +} + +static inline bool xsc_core_is_ecpf(struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu; +} + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) +#define ESW_ALLOWED(esw) ((esw) && XSC_ESWITCH_MANAGER((esw)->dev)) + +static inline bool +xsc_core_is_ecpf_esw_manager(const struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu && dev->caps.eswitch_manager; +} + +static inline bool +xsc_ecpf_vport_exists(const struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev) && dev->caps.ecpf_vport_exists; +} + +static inline bool +xsc_core_is_vport_manager(const struct xsc_core_device *dev) +{ + return dev->caps.vport_group_manager && xsc_core_is_pf(dev); +} + +static inline bool xsc_rl_is_supported(struct xsc_core_device *dev) +{ + return false; +} + +/* define in andes */ +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +struct iae_cmd { + union { + struct { + u32 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u32 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u32 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + unsigned int raw_data; + }; +}; + +static inline void acquire_ia_lock(struct xsc_core_device *xdev, int *iae_idx) +{ + int lock_val; + int lock_vld; + + lock_val = readl(REG_ADDR(xdev, xdev->regs.cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +#define ACQUIRE_IA_LOCK(bp, iae_idx) \ + do { \ + int idx; \ + acquire_ia_lock(bp, &idx); \ + iae_idx = idx; \ + } while (0) + +static inline void release_ia_lock(struct xsc_core_device *xdev, int lock_idx) +{ + writel(lock_idx, REG_ADDR(xdev, xdev->regs.cpm_put_lock)); +} + +#define RELEASE_IA_LOCK(bp, iae_idx) release_ia_lock(bp, iae_idx) + +static inline void ia_write_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < n; i++) { + writel(*(ptr++), REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + } +} + +static inline void ia_read_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + u32 *pptr = ptr; + + for (i = 0; i < n; i++) { + *(pptr) = readl(REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + pptr = pptr + 1; + } +} + +static inline void ia_write_reg_addr(struct xsc_core_device *xdev, u32 reg, int iae_idx) +{ + int offset = xdev->regs.cpm_addr + (iae_idx) * CPM_IAE_ADDR_REG_STRIDE; + + writel(reg, REG_ADDR(xdev, offset)); +} + +static inline void initiate_ia_cmd(struct xsc_core_device *xdev, int iae_idx, int length, int r0w1) +{ + struct iae_cmd cmd; + int addr = xdev->regs.cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writel(cmd.raw_data, REG_ADDR(xdev, addr)); +} + +static inline void initiate_ia_write_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia_read_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_complete(struct xsc_core_device *xdev, int iae_idx) +{ + while ((readl(REG_ADDR(xdev, xdev->regs.cpm_busy)) & (1 << iae_idx))) + ; +} + +static inline void ia_write_reg_mr(struct xsc_core_device *xdev, u32 reg, + u32 *ptr, int n, int idx) +{ + ia_write_data(xdev, ptr, n, idx); + ia_write_reg_addr(xdev, reg, idx); + initiate_ia_write_cmd(xdev, idx, n); +} + +#define IA_WRITE_REG_MR(bp, reg, ptr, n, idx) ia_write_reg_mr(bp, reg, ptr, n, idx) + +static inline void ia_write(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_data(xdev, ptr, n, iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_write_cmd(xdev, iae_idx, n); + release_ia_lock(xdev, iae_idx); +} + +#define IA_WRITE(bp, reg, ptr, n) ia_write(bp, reg, ptr, n) + +static inline void ia_read(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_read_cmd(xdev, iae_idx, n); + wait_for_complete(xdev, iae_idx); + ia_read_data(xdev, ptr, n, iae_idx); + release_ia_lock(xdev, iae_idx); +} + +#define IA_READ(bp, reg, ptr, n) ia_read(bp, reg, ptr, n) + +static inline u32 reg_read32(struct xsc_core_device *dev, u32 offset) +{ + u32 val = 0; + + if (xsc_core_is_pf(dev)) + val = readl(REG_ADDR(dev, offset)); + else + IA_READ(dev, offset, &val, 1); + + return val; +} + +static inline void reg_write32(struct xsc_core_device *dev, u32 offset, u32 val) +{ + u32 *ptr = &val; + + if (xsc_core_is_pf(dev)) + writel(val, REG_ADDR(dev, offset)); + else + IA_WRITE(dev, offset, ptr, 1); +} + +#define REG_RD32(dev, offset) reg_read32(dev, offset) +#define REG_WR32(dev, offset, val) reg_write32(dev, offset, val) + +static inline unsigned long bdf_to_key(unsigned int domain, unsigned int bus, unsigned int devfn) +{ + return ((unsigned long)domain << 32) | ((bus & 0xff) << 16) | (devfn & 0xff); +} + +static inline void +funcid_to_pf_vf_index(struct xsc_caps *caps, u16 func_id, u8 *pf_no, u8 *pf_id, u16 *vf_id) +{ + if (func_id >= caps->pf0_vf_funcid_base && func_id <= caps->pf0_vf_funcid_top) { + *pf_id = 0; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf0_vf_funcid_base; + } else if (func_id >= caps->pf1_vf_funcid_base && func_id <= caps->pf1_vf_funcid_top) { + *pf_id = 1; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf1_vf_funcid_base; + } else if (func_id >= caps->pcie0_pf_funcid_base && func_id <= caps->pcie0_pf_funcid_top) { + *pf_id = func_id - caps->pcie0_pf_funcid_base; + *pf_no = 0; + *vf_id = -1; + } else { + *pf_id = func_id - caps->pcie1_pf_funcid_base; + *pf_no = 1; + *vf_id = -1; + } +} + +static inline bool +is_support_rdma(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_RDMA_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_rdma_cm(struct xsc_core_device *dev) +{ + return dev->caps.hw_feature_flag & XSC_HW_RDMA_CM_SUPPORT; +} + +static inline bool +is_support_pfc_prio_statistic(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_PRIO_STATISTIC_SUPPORT) + return true; + + return false; +} + +static inline bool +is_support_pfc_stall_stats(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_STALL_STATS_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_hw_pf_stats(struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev); +} + +static inline void xsc_set_user_mode(struct xsc_core_device *dev, u8 mode) +{ + dev->user_mode = mode; +} + +static inline u8 xsc_get_user_mode(struct xsc_core_device *dev) +{ + return dev->user_mode; +} + +void xsc_pci_exit(void); + +void xsc_remove_eth_driver(void); + +void xsc_remove_rdma_driver(void); + +void xsc_set_exit_flag(void); +bool xsc_get_exit_flag(void); +bool exist_incomplete_qp_flush(void); +#endif /* XSC_CORE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h new file mode 100644 index 0000000000000000000000000000000000000000..9da4396d66eedabd4b2461c3460cdbc781d9aeb9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ESWITCH_H +#define XSC_ESWITCH_H + +enum { + XSC_ESWITCH_NONE, + XSC_ESWITCH_LEGACY, + XSC_ESWITCH_OFFLOADS +}; + +enum { + REP_ETH, + REP_IB, + NUM_REP_TYPES, +}; + +enum { + REP_UNREGISTERED, + REP_REGISTERED, + REP_LOADED, +}; + +enum xsc_switchdev_event { + XSC_SWITCHDEV_EVENT_PAIR, + XSC_SWITCHDEV_EVENT_UNPAIR, +}; + +enum { + SET_VLAN_STRIP = BIT(0), + SET_VLAN_INSERT = BIT(1), + CLR_VLAN_STRIP = BIT(2), + CLR_VLAN_INSERT = BIT(3), +}; + +#endif /* XSC_ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h new file mode 100644 index 0000000000000000000000000000000000000000..97cbded4a2f20bb98a7971de5e6420979e9b1af4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FS_H +#define XSC_FS_H + +#include +#include +#include + +enum xsc_list_type { + XSC_NVPRT_LIST_TYPE_UC = 0x0, + XSC_NVPRT_LIST_TYPE_MC = 0x1, + XSC_NVPRT_LIST_TYPE_VLAN = 0x2, + XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD = 0x03, +}; + +enum xsc_vlan_rule_type { + XSC_VLAN_RULE_TYPE_UNTAGGED, + XSC_VLAN_RULE_TYPE_ANY_CTAG_VID, + XSC_VLAN_RULE_TYPE_ANY_STAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, +}; + +struct xsc_vlan_table { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_svlans, VLAN_N_VID); + u8 cvlan_filter_disabled; +}; + +struct xsc_l2_table { + struct hlist_head netdev_uc[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[XSC_L2_ADDR_HASH_SIZE]; + u8 broadcast_enabled; + u8 allmulti_enabled; + u8 promisc_enabled; +}; + +struct xsc_flow_steering { + struct xsc_vlan_table vlan; + struct xsc_l2_table l2; +}; + +int xsc_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int xsc_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void xsc_set_rx_mode_work(struct work_struct *work); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h new file mode 100644 index 0000000000000000000000000000000000000000..d1fa8b207607a2852951647c61eb04d0fcce7b81 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HSI_H +#define XSC_HSI_H + +#include + +#include +#include +#include "common/xsc_macro.h" + +#ifdef MSIX_SUPPORT +#else +#define NEED_CREATE_RX_THREAD +#endif + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) +#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) + +#ifndef EQ_NUM_MAX +#define EQ_NUM_MAX 1024 +#endif +#ifndef EQ_SIZE_MAX +#define EQ_SIZE_MAX 1024 +#endif + +#define XSC_RSS_INDIR_TBL_S 256 +#define XSC_MAX_TSO_PAYLOAD 0x10000/*64kb*/ + +#define MAX_BOARD_NUM 32 + +#define DMA_LO_LE(x) __cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) __cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) + +#define WR_LE_16(x, val) (x = __cpu_to_le16(val)) +#define WR_LE_32(x, val) (x = __cpu_to_le32(val)) +#define WR_LE_64(x, val) (x = __cpu_to_le64(val)) +#define WR_LE_R64(x, val) (DMA_REGPAIR_LE(x, val)) +#define WR_BE_32(x, val) (x = __cpu_to_be32(val)) + +#define RD_LE_16(x) __le16_to_cpu(x) +#define RD_LE_32(x) __le32_to_cpu(x) +#define RD_BE_32(x) __be32_to_cpu(x) + +#define WR_REG(addr, val) mmio_write64_le(addr, val) +#define RD_REG(addr) mmio_read64_le(addr) + +#define XSC_MPT_MAP_EN 0 + +/* FIXME: 32-byte alignment for SW descriptors for Amber for now */ +#define XSC_DESC_ALIGNMENT 32 + +/* each ds holds one fragment in skb */ +#define XSC_MAX_RX_FRAGS 4 +#define XSC_RX_FRAG_SZ_ORDER 0 +#define XSC_RX_FRAG_SZ (PAGE_SIZE << XSC_RX_FRAG_SZ_ORDER) +#define DEFAULT_FRAG_SIZE (2048) + +/* message opcode */ +enum { + XSC_MSG_OPCODE_SEND = 0, + XSC_MSG_OPCODE_RDMA_WRITE = 1, + XSC_MSG_OPCODE_RDMA_READ = 2, + XSC_MSG_OPCODE_MAD = 3, + XSC_MSG_OPCODE_RDMA_ACK = 4, + XSC_MSG_OPCODE_RDMA_ACK_READ = 5, + XSC_MSG_OPCODE_RDMA_CNP = 6, + XSC_MSG_OPCODE_RAW = 7, + XSC_MSG_OPCODE_VIRTIO_NET = 8, + XSC_MSG_OPCODE_VIRTIO_BLK = 9, + XSC_MSG_OPCODE_RAW_TPE = 10, + XSC_MSG_OPCODE_INIT_QP_REQ = 11, + XSC_MSG_OPCODE_INIT_QP_RSP = 12, + XSC_MSG_OPCODE_INIT_PATH_REQ = 13, + XSC_MSG_OPCODE_INIT_PATH_RSP = 14, +}; + +/* TODO: sw cqe opcode*/ +enum { + XSC_OPCODE_RDMA_REQ_SEND = 0, + XSC_OPCODE_RDMA_REQ_SEND_IMMDT = 1, + XSC_OPCODE_RDMA_RSP_RECV = 2, + XSC_OPCODE_RDMA_RSP_RECV_IMMDT = 3, + XSC_OPCODE_RDMA_REQ_WRITE = 4, + XSC_OPCODE_RDMA_REQ_WRITE_IMMDT = 5, + XSC_OPCODE_RDMA_RSP_WRITE_IMMDT = 6, + XSC_OPCODE_RDMA_REQ_READ = 7, + XSC_OPCODE_RDMA_REQ_ERROR = 8, + XSC_OPCODE_RDMA_RSP_ERROR = 9, + XSC_OPCODE_RDMA_CQE_ERROR = 10, + XSC_OPCODE_RDMA_MAD_REQ_SEND, + XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +enum { + XSC_REQ = 0, + XSC_RSP = 1, +}; + +enum { + XSC_WITHOUT_IMMDT = 0, + XSC_WITH_IMMDT = 1, +}; + +enum { + XSC_ERR_CODE_NAK_RETRY = 0x40, + XSC_ERR_CODE_NAK_OPCODE = 0x41, + XSC_ERR_CODE_NAK_MR = 0x42, + XSC_ERR_CODE_NAK_OPERATION = 0x43, + XSC_ERR_CODE_NAK_RNR = 0x44, + XSC_ERR_CODE_LOCAL_MR = 0x45, + XSC_ERR_CODE_LOCAL_LEN = 0x46, + XSC_ERR_CODE_LOCAL_OPCODE = 0x47, + XSC_ERR_CODE_CQ_OVER_FLOW = 0x48, + XSC_ERR_CODE_STRG_ACC_GEN_CQE = 0x4c, + XSC_ERR_CODE_CQE_ACC = 0x4d, + XSC_ERR_CODE_FLUSH = 0x4e, + XSC_ERR_CODE_MALF_WQE_HOST = 0x50, + XSC_ERR_CODE_MALF_WQE_INFO = 0x51, + XSC_ERR_CODE_MR_NON_NAK = 0x52, + XSC_ERR_CODE_OPCODE_GEN_CQE = 0x61, + XSC_ERR_CODE_MANY_READ = 0x62, + XSC_ERR_CODE_LEN_GEN_CQE = 0x63, + XSC_ERR_CODE_MR = 0x65, + XSC_ERR_CODE_MR_GEN_CQE = 0x66, + XSC_ERR_CODE_OPERATION = 0x67, + XSC_ERR_CODE_MALF_WQE_INFO_GEN_NAK = 0x68, +}; + +/* QP type */ +enum { + XSC_QUEUE_TYPE_RDMA_RC = 0, + XSC_QUEUE_TYPE_RDMA_MAD = 1, + XSC_QUEUE_TYPE_RAW = 2, + XSC_QUEUE_TYPE_VIRTIO_NET = 3, + XSC_QUEUE_TYPE_VIRTIO_BLK = 4, + XSC_QUEUE_TYPE_RAW_TPE = 5, + XSC_QUEUE_TYPE_RAW_TSO = 6, + XSC_QUEUE_TYPE_RAW_TX = 7, + XSC_QUEUE_TYPE_INVALID = 0xFF, +}; + +/* CQ type */ +enum { + XSC_CQ_TYPE_NORMAL = 0, + XSC_CQ_TYPE_VIRTIO = 1, +}; + +enum xsc_qp_state { + XSC_QP_STATE_RST = 0, + XSC_QP_STATE_INIT = 1, + XSC_QP_STATE_RTR = 2, + XSC_QP_STATE_RTS = 3, + XSC_QP_STATE_SQER = 4, + XSC_QP_STATE_SQD = 5, + XSC_QP_STATE_ERR = 6, + XSC_QP_STATE_SQ_DRAINING = 7, + XSC_QP_STATE_SUSPENDED = 9, + XSC_QP_NUM_STATE +}; + +enum { + XSC_SEND_SEG_MAX = 32, + XSC_BASE_WQE_SHIFT = 4, + XSC_SEND_SEG_NUM = 4, + XSC_SEND_WQE_SHIFT = 6, + XSC_CTRL_SEG_NUM = 1, + XSC_RADDR_SEG_NUM = 1, +}; + +enum { + XSC_RECV_SEG_MAX = 4, + XSC_RECV_SEG_NUM = 1, + XSC_RECV_WQE_SHIFT = 4, +}; + +enum { + XSC_INLINE_SIZE_MAX = 15, +}; + +/* Descriptors that are allocated by SW and accessed by HW, 32-byte aligned + * this is to keep descriptor structures packed + */ +struct regpair { + __le32 lo; + __le32 hi; +}; + +struct xsc_cqe { + union { + u8 msg_opcode; + struct { + u8 error_code:7; + u8 is_error:1; + }; + }; + __le32 qp_id:15; + u8 rsv1:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + __le16 rsv[3]; + __le16 rsv2:15; + u8 owner:1; +}; + +/* CQ doorbell */ +union xsc_cq_doorbell { + struct{ + u32 cq_next_cid:16; + u32 cq_id:15; + u32 arm:1; + }; + u32 val; +}; + +/* EQE TBD */ +struct xsc_eqe { + u8 type; + u8 sub_type; + __le16 queue_id:15; + u8 rsv1:1; + u8 err_code; + u8 rsvd[2]; + u8 rsv2:7; + u8 owner:1; +}; + +/* EQ doorbell */ +union xsc_eq_doorbell { + struct{ + u32 eq_next_cid : 11; + u32 eq_id : 11; + u32 arm : 1; + }; + u32 val; +}; + +/*for beryl tcam table .begin*/ +#define XSC_TBM_PCT_DW_SIZE_MAX 20 +#define XSC_TCAM_REG_ADDR_STRIDE 4 + +enum xsc_tbm_tcam_type { + XSC_TBM_TCAM_PCT = 0, + XSC_TBM_TCAM_PRS_STAGE0, + XSC_TBM_TCAM_PRS_STAGE1, + XSC_TBM_TCAM_PRS_STAGE2, +}; + +enum xsc_tbm_tcam_oper { + XSC_TCAM_OP_X_WRITE = 0, + XSC_TCAM_OP_Y_WRITE, + XSC_TCAM_OP_ACTION_WRITE, + XSC_TCAM_OP_X_READ, + XSC_TCAM_OP_Y_READ, + XSC_TCAM_OP_ACTION_READ, + XSC_TCAM_OP_TCAM_FLUSH, + XSC_TCAM_OP_ACTION_FLUSH, + XSC_TCAM_OP_CPU_SEARCH, + XSC_TCAM_OP_LONG_X_WRT, + XSC_TCAM_OP_LONG_Y_WRT +}; + +enum xsc_tbm_prs_stage_encode { + XSC_PRS_STAGE0_HDR_TYPE_NONE = 0x00, + XSC_PRS_STAGE0_HDR_TYPE_ETH0 = 0x01, + XSC_PRS_STAGE1_HDR_TYPE_NONE = 0x10, + XSC_PRS_STAGE1_HDR_TYPE_RSV = 0x11, + XSC_PRS_STAGE1_HDR_TYPE_IPV4 = 0x12, + XSC_PRS_STAGE1_HDR_TYPE_IPV6 = 0x13, + XSC_PRS_STAGE2_HDR_TYPE_NONE = 0x20, + XSC_PRS_STAGE2_HDR_TYPE_TCP = 0x21, + XSC_PRS_STAGE2_HDR_TYPE_UDP = 0x22, + XSC_PRS_STAGE2_HDR_TYPE_GRE = 0x23, + XSC_PRS_STAGE2_HDR_TYPE_RSV = 0x24, + XSC_PRS_STAGE2_HDR_TYPE_IFA_TCP = 0x25, + XSC_PRS_STAGE2_HDR_TYPE_IFA_UDP = 0x26, + XSC_PRS_STAGE2_HDR_TYPE_IFA_GRE = 0x27, + XSC_PRS_STAGE6_HDR_TYPE_ICMP = 0x63, + XSC_PRS_STAGEX_HDR_TYPE_PAYLOAD = 0xa0, + XSC_PRS_STAGEX_HDR_TYPE_BTH = 0xa1, +}; + +enum xsc_tbm_prs_eth_hdr_type_encode { + ETH_HDR_TYPE_MAC0 = 0x0, + ETH_HDR_TYPE_MAC0_VLANA = 0x2, + ETH_HDR_TYPE_MAC0_VLANA_VLANB = 0x3, +}; + +enum xsc_tbm_pct_pkttype { + XSC_PCT_RDMA_NORMAL = 0x0, + XSC_PCT_RDMA_CNP, + XSC_PCT_RDMA_MAD, + XSC_PCT_RAW, + XSC_PCT_RAW_TPE, + XSC_PCT_VIRTIO_NET_TO_HOST, + XSC_PCT_SOC_WITH_PPH, +}; + +enum xsc_tbm_pct_inport { + XSC_PCT_PORT_NIF0 = 0x0, + XSC_PCT_PORT_NIF1, + XSC_PCT_PORT_PCIE0_PF0, + XSC_PCT_PORT_PCIE0_PF1, + XSC_PCT_PORT_PCIE1_PF0, +}; + +/*for beryl tcam table .end*/ + +/* Size of WQE */ +#define XSC_SEND_WQE_SIZE BIT(XSC_SEND_WQE_SHIFT) +#define XSC_RECV_WQE_SIZE BIT(XSC_RECV_WQE_SHIFT) + +union xsc_db_data { + struct { + __le32 sq_next_pid:16; + __le32 sqn:15; + __le32:1; + }; + struct { + __le32 rq_next_pid:13; + __le32 rqn:15; + __le32:4; + }; + struct { + __le32 cq_next_cid:16; + __le32 cqn:15; + __le32 solicited:1; + + }; + __le32 raw_data; +}; + +#define XSC_BROADCASTID_MAX 2 +#define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX / 2) + +enum { + XSC_EQ_VEC_ASYNC = 0, + XSC_VEC_CMD = 1, + XSC_VEC_CMD_EVENT = 2, + XSC_DMA_READ_DONE_VEC = 3, + XSC_EQ_VEC_COMP_BASE, +}; + +struct rxe_bth { + u8 opcode; + u8 flags; + __be16 pkey; + __be32 qpn; + __be32 apsn; +}; + +struct rxe_deth { + __be32 qkey; + __be32 sqp; +}; + +#endif /* XSC_HSI_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..e2355cf91a02da774ab5c43b030936ca31b300f7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IOCTL_H +#define XSC_IOCTL_H + +#include +#include + +/* Documentation/ioctl/ioctl-number.txt */ +#define XSC_IOCTL_MAGIC (0x1b) /* TBD */ +#define XSC_IOCTL_CMDQ \ + _IOWR(XSC_IOCTL_MAGIC, 1, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_GET \ + _IOR(XSC_IOCTL_MAGIC, 2, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_SET \ + _IOWR(XSC_IOCTL_MAGIC, 3, struct xsc_ioctl_hdr) +#define XSC_IOCTL_MEM \ + _IOWR(XSC_IOCTL_MAGIC, 4, struct xsc_ioctl_hdr) +#define XSC_IOCTL_CMDQ_RAW \ + _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr) +#define XSC_IOCTL_USER_MODE \ + _IOWR(XSC_IOCTL_MAGIC, 8, struct xsc_ioctl_hdr) + +#define XSC_IOCTL_CHECK_FILED 0x01234567 +enum { + XSC_IOCTL_OP_GET_LOCAL, + XSC_IOCTL_OP_GET_VF_INFO, + XSC_IOCTL_OP_GET_CONTEXT, + XSC_IOCTL_OP_GET_INFO_BY_BDF, + XSC_IOCTL_OP_GET_MAX +}; + +enum { + XSC_IOCTL_GET_PHY_INFO = 0x100, + XSC_IOCTL_GET_FORCE_PCP = 0x101, + XSC_IOCTL_GET_FORCE_DSCP = 0x102, + XSC_IOCTL_GET_CMA_PCP = 0x103, + XSC_IOCTL_GET_CMA_DSCP = 0x104, + XSC_IOCTL_GET_CONTEXT = 0x105, + XSC_IOCTL_GAT_MAX +}; + +enum { + XSC_IOCTL_SET_QP_STATUS = 0x200, + XSC_IOCTL_SET_FORCE_PCP = 0x201, + XSC_IOCTL_SET_FORCE_DSCP = 0x202, + XSC_IOCTL_SET_CMA_PCP = 0x203, + XSC_IOCTL_SET_CMA_DSCP = 0x204, + XSC_IOCTL_SET_MAX +}; + +enum { + XSC_IOCTL_MEM_ALLOC = 0x300, + XSC_IOCTL_MEM_FREE, + XSC_IOCTL_MEM_MAX +}; + +enum { + XSC_IOCTL_GET_VECTOR_MATRIX = 0x400, + XSC_IOCTL_SET_LOG_LEVEL = 0x401, + XSC_IOCTL_SET_CMD_VERBOSE = 0x402, + XSC_IOCTL_DRIVER_MAX +}; + +enum { + XSC_IOCTL_OPCODE_ENABLE_USER_MODE = 0x600, +}; + +enum xsc_flow_tbl_id { + XSC_FLOW_TBL_IPAT, //IN_PORT_ATTR + XSC_FLOW_TBL_IPVLANMT, //IN_PORT_VLAN_MEMBER + XSC_FLOW_TBL_IN_VLAN_M, //IN_VLAN_MAPPING + XSC_FLOW_TBL_HOST_VLAN_M, //HOST_VLAN_MAPPING + XSC_FLOW_TBL_PCT_V4, //PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_PCT_V6, //PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_WCT_KP, //WCT_KEY_PROFILE + XSC_FLOW_TBL_WCT, //WILDCARD_TBL + XSC_FLOW_TBL_FKP, //FLOW_KEY_PROFILE + XSC_FLOW_TBL_EM, //EXACT_MATCH + XSC_FLOW_TBL_FAT, //FLOW_ACTION + XSC_FLOW_TBL_TNL_ECP, //TUNNEL_ENCAP + XSC_FLOW_TBL_ERP_HDR, //ERSPAN_HDR_INFO + XSC_FLOW_TBL_MIR_IDX, //MIRROR_INDEX + XSC_FLOW_TBL_MIR, //MIRROR_TBL + XSC_FLOW_TBL_MIR_HDR, //ENCAP_MIRROR_HDR + XSC_FLOW_TBL_VER, //VERSION_TBL + XSC_FLOW_TBL_LCMT, //LCMT_TBL + XSC_FLOW_TBL_CT, //CONN_TRACK + XSC_FLOW_TBL_EPAT, //EG_PORT_ATTR + XSC_FLOW_TBL_OPVLANMT, //OUT_PORT_VLAN_MEMBER + XSC_FLOW_TBL_RSS_HASH, //RSS_HASH + XSC_FLOW_TBL_MDF_MAC, //MODIFY_MAC + XSC_FLOW_TBL_MDF_IP, //MODIFY_IP + XSC_FLOW_TBL_MDF_TPID, //MODIFY_TPID + XSC_FLOW_TBL_ECP_HDR, //ENCAP_HDR + XSC_FLOW_TBL_ECP_MAC, //ENCAP_MAC + XSC_FLOW_TBL_ECP_IP, //ENCAP_IP + XSC_FLOW_TBL_ECP_TPID, //ENCAP_TPID + XSC_FLOW_TBL_ECP_TP_TNL, //ENCAP_TP_TUNNEL + XSC_FLOW_TBL_ECP_DPORT, //ENCAP_DPORT + XSC_FLOW_TBL_VFSO, //VF_START_OFST + XSC_FLOW_TBL_IACL, //INGRESS_ACL + XSC_FLOW_TBL_IACL_CNT, //INGRESS_ACL_COUNTER + XSC_FLOW_TBL_EACL, //EGRESS_ACL + XSC_FLOW_TBL_EACL_CNT, //EGRESS_ACL_COUNTER + XSC_FLOW_TBL_EM_EXT, //EXACT_MATCH_EXT + XSC_FLOW_TBL_EM_EXT_2M_HASH_ADR, //EM_EXT_2M_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_1G_HASH_ADR, //EM_EXT_1G_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_2M_KEY_ADR, //EM_EXT_2M_KEY_ADDR + XSC_FLOW_TBL_EM_EXT_1G_KEY_ADR, //EM_EXT_1G_KEY_ADDR + XSC_FLOW_TBL_PG_QP_SET_ID, //PG_QP_SET_ID + XSC_FLOW_DIR_REGISTER, //DIR_REGISTER + XSC_FLOW_INDIR_REGISTER, //INDIR_REGISTER + XSC_FLOW_TBL_BM_PCT_V4, //BIM MATCH PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_BM_PCT_V6, //BIM MATCH PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_BM_WCT, //BIM MATCH WILDCARD_TBL + XSC_FLOW_TBL_BM_IACL, //BIM MATCH INGRESS_ACL + XSC_FLOW_TBL_BMT, //BROADCAST MEMBER + XSC_FLOW_TBL_BOMT, //BROADCAST OUTPUT + XSC_FLOW_TBL_PST, //pst + XSC_FLOW_DMA_WR, //DMA WRITE + XSC_FLOW_DMA_RD, //DMA READ + XSC_FLOW_PARSER_TBL, //PARSER_TBL + XSC_FLOW_UDF_AWARE_TBL, //UDF_AWARE_TBL + XSC_FLOW_UDF_UNAWARE_TBL, //UDF_UNAWARE_TBL + XSC_FLOW_MTR_CTRL_TBL, //MTR_CTRL_TBL + XSC_FLOW_MTR_FLOW_PD, //MTR_FLOW_PD + XSC_FLOW_MTR_VPORT_PD, //MTR_VPORT_PD + XSC_FLOW_MTR_VPG_PD, //MTR_VPG_PD + XSC_FLOW_MTR_FLOW_SCAN, //MTR_FLOW_SCAN + XSC_FLOW_MTR_VPORT_SCAN, //MTR_VPORT_SCAN + XSC_FLOW_MTR_VPG_SCAN, //MTR_VPG_SCAN + XSC_FLOW_MTR_MAPPING, //MTR_MAPPING + XSC_FLOW_PRG_ACT_IDX, //PRG_ACT_INDEX + XSC_FLOW_PRG_ACT0, //PRG_ACT0 + XSC_FLOW_PRG_ACT1, //PRG_ACT1 + XSC_FLOW_PRG_ACT2, //PRG_ACT2 + XSC_FLOW_NIF_PRI_CNT, //NIF_PRI_CNT + XSC_FLOW_PRS2CLSF_SRC_PORT_CNT, //PRS2CLSF_SRC_PORT_CNT + XSC_FLOW_QUEUE_RX_CNT, //QUEUE_TX_CNT + XSC_FLOW_QUEUE_TX_CNT, //QUEUE_TX_CNT + XSC_FLOW_MAC_LAG_PORT_SEL, //MAC_LAG_PORT_SEL + XSC_FLOW_EXT_CT_CLR, //EXT_CT_CLR + XSC_FLOW_IP_TBL_CFG, //IP_TBL_CFG + XSC_FLOW_RSS_HASH_INIT_KEY_CFG, //SS_HASH_INIT_KEY_CFG + XSC_FLOW_QP_ID_BASE_CFG, //QP_ID_BASE_CFG + XSC_FLOW_PSS_INFO, //CLSF_CTRL_PSS_INFO + XSC_FLOW_SNAPSHOT, //SNAPSHOT + XSC_FLOW_PSS_MATCH_KEY, //PSS_MATCH_KEY + XSC_FLOW_PSS_CLR, //PSS_CLEAR + XSC_FLOW_PSS_START, //PSS_START + XSC_FLOW_PSS_DONE, //PSS_DONE + XSC_FLOW_MAC_PORT_MTU, //MAC_PORT_MTU + XSC_FLOW_ECP_PKT_LEN_INC, //ECP_PKT_LEN_INC + XSC_FLOW_TCP_FLAGS_CFG, //TCP_FLAGS_CFG + XSC_FLOW_DBG_CNT, //DBG_CNT + XSC_FLOW_PRS_REC_PORT_UDF_SEL, + XSC_FLOW_TBL_MAX +}; + +enum xsc_other_tbl_id { + XSC_OTHER_TBL_MAX +}; + +enum xsc_ioctl_op { + XSC_IOCTL_OP_ADD, + XSC_IOCTL_OP_DEL, + XSC_IOCTL_OP_GET, + XSC_IOCTL_OP_CLR, + XSC_IOCTL_OP_MOD, + XSC_IOCTL_OP_MAX +}; + +struct xsc_ioctl_mem_info { + u32 mem_num; + u32 size; + u64 vir_addr; + u64 phy_addr; +}; + +/* get phy info */ +struct xsc_ioctl_get_phy_info_attr { + u16 bdf; + u16 rsvd; +}; + +struct xsc_ioctl_qp_range { + u16 opcode; + int num; + u32 qpn; +}; + +struct xsc_ioctl_get_phy_info_res { + u32 domain; + u32 bus; + u32 devfn; + u32 pcie_no; //pcie number + u32 func_id; //pf glb func id + u32 pcie_host; //host pcie number + u32 mac_phy_port; //mac port + u32 funcid_to_logic_port_off; + u16 lag_id; + u16 raw_qp_id_base; + u16 raw_rss_qp_id_base; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u16 lag_port_start; + u16 raw_tpe_qp_num; + int send_seg_num; + int recv_seg_num; + u8 on_chip_tbl_vld; + u8 dma_rw_tbl_vld; + u8 pct_compress_vld; + u32 chip_version; + u32 hca_core_clock; + u8 mac_bit; + u8 esw_mode; + u32 board_id; +}; + +struct xsc_ioctl_get_vf_info_res { + u16 vf_id; //start from 1, 0 is reserved for pf + u16 phy_port; //pcie0=0, pcie1=1 + u16 pf_id; //pf0=0, pf1=1 + u32 func_id; + u32 logic_port; +}; + +struct xsc_alloc_ucontext_req { + u32 domain; + u32 bus; + u32 devfn; +}; + +struct xsc_ioctl_force_pcp { + int pcp; +}; + +struct xsc_ioctl_force_dscp { + int dscp; +}; + +struct xsc_alloc_ucontext_resp { + int max_cq; + int max_qp; + u32 max_rwq_indirection_table_size; + u64 qpm_tx_db; + u64 qpm_rx_db; + u64 cqm_next_cid_reg; + u64 cqm_armdb; + u32 send_ds_num; + u32 recv_ds_num; + u32 send_ds_shift; + u32 recv_ds_shift; + u32 glb_func_id; + u32 max_wqes; +}; + +struct xsc_ioctl_cma_pcp { + int pcp; +}; + +struct xsc_ioctl_cma_dscp { + int dscp; +}; + +struct xsc_ioctl_set_debug_info { + unsigned int log_level; + unsigned int cmd_verbose; +}; + +struct xsc_ioctl_user_mode_attr { + u8 enable; +}; + +/* type-value */ +struct xsc_ioctl_data_tl { + u16 table; /* table id */ + u16 opmod; /* add/del/mod */ + u16 length; + u16 rsvd; +}; + +/* public header */ +struct xsc_ioctl_attr { + u16 opcode; /* ioctl cmd */ + u16 length; /* data length */ + u32 error; /* ioctl error info */ + u16 ver; + u16 rsvd; + u8 data[]; /* specific table info */ +}; + +struct xsc_ioctl_emu_hdr { + u16 in_length; /* cmd req length */ + u16 out_length; /* cmd rsp length */ + u8 data[]; /* emu cmd content start from here */ +}; + +struct xsc_ioctl_hdr { + u32 check_filed; /* Validity verification fileds */ + u32 domain; + u32 bus; + u32 devfn; + struct xsc_ioctl_attr attr; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h new file mode 100644 index 0000000000000000000000000000000000000000..24aa39a15e9d16dd21df005275364ffe411edb91 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_LAG_H +#define XSC_LAG_H + +#define XSC_BOARD_LAG_MAX XSC_MAX_PORTS + +enum lag_event_type { + XSC_LAG_CREATE, + XSC_LAG_ADD_MEMBER, + XSC_LAG_REMOVE_MEMBER, + XSC_LAG_UPDATE_MEMBER_STATUS, + XSC_LAG_UPDATE_HASH_TYPE, + XSC_LAG_DESTROY, + XSC_LAG_EVENT_MAX +}; + +enum lag_slave_status { + XSC_LAG_SLAVE_INACTIVE, + XSC_LAG_SLAVE_ACTIVE, + XSC_LAG_SLAVE_STATUS_MAX, +}; + +enum { + XSC_SLEEP, + XSC_WAKEUP, + XSC_EXIT, +}; + +enum { + XSC_LAG_FLAG_ROCE = 1 << 0, + XSC_LAG_FLAG_SRIOV = 1 << 1, + XSC_LAG_FLAG_KERNEL = 1 << 2, +}; + +enum xsc_lag_hash { + XSC_LAG_HASH_L23, + XSC_LAG_HASH_L34, + XSC_LAG_HASH_E23, + XSC_LAG_HASH_E34, +}; + +enum { + QOS_LAG_OP_CREATE = 0, + QOS_LAG_OP_ADD_MEMBER = 1, + QOS_LAG_OP_DEL_MEMBER = 2, + QOS_LAG_OP_DESTROY = 3, +}; + +#define BOND_ID_INVALID U8_MAX +#define BOARD_ID_INVALID U32_MAX +#define LAG_ID_INVALID U16_MAX + +#define XSC_LAG_MODE_FLAGS (XSC_LAG_FLAG_ROCE | XSC_LAG_FLAG_SRIOV | XSC_LAG_FLAG_KERNEL) + +struct xsc_lag { + struct net_device *bond_dev; + u8 bond_mode; + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; + u8 lag_type; + u16 lag_id; + atomic_t qp_cnt[XSC_MAX_PORTS]; + struct list_head slave_list; + u8 xsc_member_cnt; + u32 board_id; + int mode_changes_in_progress; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_lag_event { + struct list_head node; + enum lag_event_type event_type; + struct xsc_core_device *xdev; + struct xsc_core_device *roce_lag_xdev; + u8 bond_mode; + u8 lag_type; + u8 hash_type; + u8 lag_sel_mode; + u16 lag_id; + enum lag_slave_status slave_status; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct lag_event_list { + struct list_head head; + spinlock_t lock; /* protect lag_event_list */ + struct task_struct *bond_poll_task; + wait_queue_head_t wq; + int wait_flag; + u8 event_type; +}; + +struct xsc_board_lag { + struct xsc_lag xsc_lag[XSC_BOARD_LAG_MAX]; + u32 board_id; + struct kref ref; + u8 bond_valid_mask; + struct lag_event_list lag_event_list; + struct notifier_block nb; + struct mutex lock; /* protects board_lag */ +}; + +void xsc_lag_add_xdev(struct xsc_core_device *xdev); +void xsc_lag_remove_xdev(struct xsc_core_device *xdev); +void xsc_lag_add_netdev(struct net_device *ndev); +void xsc_lag_remove_netdev(struct net_device *ndev); +void xsc_lag_disable(struct xsc_core_device *xdev); +void xsc_lag_enable(struct xsc_core_device *xdev); +bool xsc_lag_is_roce(struct xsc_core_device *xdev); +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev); +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev); +u16 xsc_get_lag_id(struct xsc_core_device *xdev); +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev); + +static inline void xsc_board_lag_lock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_lock(&board_lag->lock); +} + +static inline void xsc_board_lag_unlock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_unlock(&board_lag->lock); +} + +#endif /* XSC_LAG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h new file mode 100644 index 0000000000000000000000000000000000000000..db23b910f8e3c51152cdf660d8d79c225ac4cd11 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_MACRO_H +#define XSC_MACRO_H + +#ifndef NO_MSIX_SUPPORT +#define MSIX_SUPPORT +#endif + +#ifndef NO_RSS_SUPPORT +#define XSC_RSS_SUPPORT +#endif + +#ifndef NO_BQL_SUPPORT +#define XSC_BQL_SUPPORT +#endif + +#endif /*XSC_MACRO_H*/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..665103ac4dfa12e4232a1478e22a623b68d44ff0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PORT_CTRL_H +#define XSC_PORT_CTRL_H + +/*mmap msg encode*/ +enum { + XSC_MMAP_MSG_SQDB = 0, + XSC_MMAP_MSG_RQDB = 1, + XSC_MMAP_MSG_CQDB = 2, + XSC_MMAP_MSG_ARM_CQDB = 3, +}; + +#define TRY_NEXT_CB 0x1a2b3c4d + +typedef int (*port_ctrl_cb)(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data); + +void xsc_port_ctrl_remove(struct xsc_core_device *dev); +int xsc_port_ctrl_probe(struct xsc_core_device *dev); +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data); +void xsc_port_ctrl_cb_dereg(const char *name); + +void xsc_port_ctrl_fini(void); +int xsc_port_ctrl_init(void); +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h new file mode 100644 index 0000000000000000000000000000000000000000..c200ba8928974d743cf5bab124fbd9cd13583211 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PP_H +#define XSC_PP_H + +enum { + XSC_HASH_FIELD_SEL_SRC_IP = 1 << 0, + XSC_HASH_FIELD_SEL_PROTO = 1 << 1, + XSC_HASH_FIELD_SEL_DST_IP = 1 << 2, + XSC_HASH_FIELD_SEL_SPORT = 1 << 3, + XSC_HASH_FIELD_SEL_DPORT = 1 << 4, + XSC_HASH_FIELD_SEL_SRC_IPV6 = 1 << 5, + XSC_HASH_FIELD_SEL_DST_IPV6 = 1 << 6, + XSC_HASH_FIELD_SEL_SPORT_V6 = 1 << 7, + XSC_HASH_FIELD_SEL_DPORT_V6 = 1 << 8, +}; + +#define XSC_HASH_IP (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP_PORTS (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_SPORT |\ + XSC_HASH_FIELD_SEL_DPORT |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6 (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6_PORTS (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_SPORT_V6 |\ + XSC_HASH_FIELD_SEL_DPORT_V6 |\ + XSC_HASH_FIELD_SEL_PROTO) + +enum { + XSC_HASH_TMPL_IDX_IP_PORTS_IP6_PORTS = 0, + XSC_HASH_TMPL_IDX_IP_IP6, + XSC_HASH_TMPL_IDX_IP_PORTS_IP6, + XSC_HASH_TMPL_IDX_IP_IP6_PORTS, + XSC_HASH_TMPL_IDX_MAX, +}; + +#endif /* XSC_PP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h new file mode 100644 index 0000000000000000000000000000000000000000..fec39d7137f57cb17fd2eafb248c060fa90caabf --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PPH_H +#define XSC_PPH_H + +#define XSC_PPH_HEAD_LEN 64 + +enum { + L4_PROTO_NONE = 0, + L4_PROTO_TCP = 1, + L4_PROTO_UDP = 2, + L4_PROTO_ICMP = 3, + L4_PROTO_GRE = 4, +}; + +enum { + L3_PROTO_NONE = 0, + L3_PROTO_IP = 2, + L3_PROTO_IP6 = 3, +}; + +struct epp_pph { + u16 outer_eth_type; //2 bytes + u16 inner_eth_type; //4 bytes + + u16 rsv1:1; + u16 outer_vlan_flag:2; + u16 outer_ip_type:2; + u16 outer_ip_ofst:5; + u16 outer_ip_len:6; //6 bytes + + u16 rsv2:1; + u16 outer_tp_type:3; + u16 outer_tp_csum_flag:1; + u16 outer_tp_ofst:7; + u16 ext_tunnel_type:4; //8 bytes + + u8 tunnel_ofst; //9 bytes + u8 inner_mac_ofst; //10 bytes + + u32 rsv3:2; + u32 inner_mac_flag:1; + u32 inner_vlan_flag:2; + u32 inner_ip_type:2; + u32 inner_ip_ofst:8; + u32 inner_ip_len:6; + u32 inner_tp_type:2; + u32 inner_tp_csum_flag:1; + u32 inner_tp_ofst:8; //14 bytees + + u16 rsv4:1; + u16 payload_type:4; + u16 payload_ofst:8; + u16 pkt_type:3; //16 bytes + + u16 rsv5:2; + u16 pri:3; + u16 logical_in_port:11; + u16 vlan_info; + u8 error_bitmap:8; //21 bytes + + u8 rsv6:7; + u8 recirc_id_vld:1; + u16 recirc_id; //24 bytes + + u8 rsv7:7; + u8 recirc_data_vld:1; + u32 recirc_data; //29 bytes + + u8 rsv8:6; + u8 mark_tag_vld:2; + u16 mark_tag; //32 bytes + + u8 rsv9:4; + u8 upa_to_soc:1; + u8 upa_from_soc:1; + u8 upa_re_up_call:1; + u8 upa_pkt_drop:1; //33 bytes + + u8 ucdv; + u16 rsv10:2; + u16 pkt_len:14; //36 bytes + + u16 rsv11:2; + u16 pkt_hdr_ptr:14; //38 bytes + + u64 rsv12:5; + u64 csum_ofst:8; + u64 csum_val:29; + u64 csum_plen:14; + u64 rsv11_0:8; //46 bytes + + u64 rsv11_1; + u64 rsv11_2; + u16 rsv11_3; +}; + +#define OUTER_L3_BIT BIT(3) +#define OUTER_L4_BIT BIT(2) +#define INNER_L3_BIT BIT(1) +#define INNER_L4_BIT BIT(0) +#define OUTER_BIT (OUTER_L3_BIT | OUTER_L4_BIT) +#define INNER_BIT (INNER_L3_BIT | INNER_L4_BIT) +#define OUTER_AND_INNER (OUTER_BIT | INNER_BIT) + +#define PACKET_UNKNOWN BIT(4) + +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET (6UL) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK (0XF00) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET (8) + +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET (20UL) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK (0XFF) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET (0) + +#define XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(PPH_BASE_ADDR) \ + ((*(u16 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK) >> EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET) + +#define XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(PPH_BASE_ADDR) \ + ((*(u8 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK) >> EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET) + +#define PPH_OUTER_IP_TYPE_OFF (4UL) +#define PPH_OUTER_IP_TYPE_MASK (0x3) +#define PPH_OUTER_IP_TYPE_SHIFT (11) +#define PPH_OUTER_IP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_TYPE_OFF)) >> \ + PPH_OUTER_IP_TYPE_SHIFT) & PPH_OUTER_IP_TYPE_MASK) + +#define PPH_OUTER_IP_OFST_OFF (4UL) +#define PPH_OUTER_IP_OFST_MASK (0x1f) +#define PPH_OUTER_IP_OFST_SHIFT (6) +#define PPH_OUTER_IP_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_OFST_OFF)) >> \ + PPH_OUTER_IP_OFST_SHIFT) & PPH_OUTER_IP_OFST_MASK) + +#define PPH_OUTER_IP_LEN_OFF (4UL) +#define PPH_OUTER_IP_LEN_MASK (0x3f) +#define PPH_OUTER_IP_LEN_SHIFT (0) +#define PPH_OUTER_IP_LEN(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_LEN_OFF)) >> \ + PPH_OUTER_IP_LEN_SHIFT) & PPH_OUTER_IP_LEN_MASK) + +#define PPH_OUTER_TP_TYPE_OFF (6UL) +#define PPH_OUTER_TP_TYPE_MASK (0x7) +#define PPH_OUTER_TP_TYPE_SHIFT (12) +#define PPH_OUTER_TP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_TP_TYPE_OFF)) >> \ + PPH_OUTER_TP_TYPE_SHIFT) & PPH_OUTER_TP_TYPE_MASK) + +#define PPH_PAYLOAD_OFST_OFF (14UL) +#define PPH_PAYLOAD_OFST_MASK (0xff) +#define PPH_PAYLOAD_OFST_SHIFT (3) +#define PPH_PAYLOAD_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_PAYLOAD_OFST_OFF)) >> \ + PPH_PAYLOAD_OFST_SHIFT) & PPH_PAYLOAD_OFST_MASK) + +#define PPH_CSUM_OFST_OFF (38UL) +#define PPH_CSUM_OFST_MASK (0xff) +#define PPH_CSUM_OFST_SHIFT (51) +#define PPH_CSUM_OFST(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_OFST_OFF)) >> \ + PPH_CSUM_OFST_SHIFT) & PPH_CSUM_OFST_MASK) + +#define PPH_CSUM_VAL_OFF (38UL) +#define PPH_CSUM_VAL_MASK (0xeffffff) +#define PPH_CSUM_VAL_SHIFT (22) +#define PPH_CSUM_VAL(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ + PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) +#endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..6b2c84017c18cafc5568743aa7c94d2d27b74f2a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_H +#define XSC_REG_H +#define CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0x0 +#define CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0x4 +#define CMDQM_HOST_REQ_PID_MEM_ADDR 0x8 +#define CMDQM_HOST_REQ_CID_MEM_ADDR 0xc +#define CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0x10 +#define CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0x14 +#define CMDQM_HOST_RSP_PID_MEM_ADDR 0x18 +#define CMDQM_HOST_RSP_CID_MEM_ADDR 0x1c +#define CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0x20 +#define CMDQM_VECTOR_ID_MEM_ADDR 0x24 +#define CMDQM_Q_ELEMENT_SZ_REG_ADDR 0x28 +#define CMDQM_HOST_Q_DEPTH_REG_ADDR 0x2c + +#define CPM_LOCK_GET_REG_ADDR 0x30 +#define CPM_LOCK_PUT_REG_ADDR 0x34 +#define CPM_LOCK_AVAIL_REG_ADDR 0x38 +#define CPM_IDA_DATA_MEM_ADDR 0x3c +#define CPM_IDA_CMD_REG_ADDR 0x83c +#define CPM_IDA_ADDR_REG_ADDR 0x840 +#define CPM_IDA_BUSY_REG_ADDR 0x8c0 + +#define DB_CQ_FUNC_MEM_ADDR 0x8c4 +#define DB_EQ_FUNC_MEM_ADDR 0x8c8 +#define DB_CQ_CID_DIRECT_MEM_ADDR 0x8cc +#define TX_DB_FUNC_MEM_ADDR 0x8d0 +#define RX_DB_FUNC_MEM_ADDR 0x8d4 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..30889caa96034016e41721361f99b3911365cba2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config YUNSILICON_XSC_ETH + tristate "Yunsilicon XSC ethernet driver" + default n + depends on YUNSILICON_XSC_PCI + help + This driver provides ethernet support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_eth. diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a6b1a4a300aa8e203f83731ddf2a769e2e3fc993 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_eth.o + +xsc_eth-y := main.o xsc_eth_ctrl.o xsc_eth_tx.o xsc_eth_rx.o xsc_eth_txrx.o \ + ut_main.o xsc_eth_ethtool.o xsc_eth_stats.o xsc_dcbnl.o xsc_hw_comm.o \ + xsc_eth_sysfs.o xsc_fs.o xsc_eth_dim.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c new file mode 100644 index 0000000000000000000000000000000000000000..3ed7be4e5d7d8b7ea5eafb60da381b02a73b05b9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -0,0 +1,3397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_pp.h" + +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_accel.h" +#include "xsc_eth_ctrl.h" +#include "../pci/eswitch.h" + +#include "common/xsc_fs.h" +#include "common/vport.h" +#include "common/qp.h" +#include "xsc_eth_dim.h" + +MODULE_LICENSE("GPL"); + +#define MAX_VF_NUM_MINIDUMP 1024 + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq); +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context); + +static int xsc_eth_open(struct net_device *netdev); +static int xsc_eth_close(struct net_device *netdev); +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc); + +#ifdef NEED_CREATE_RX_THREAD +extern uint32_t xsc_eth_rx_thread_create(struct xsc_adapter *adapter); +#endif + +static inline void xsc_set_feature(netdev_features_t *features, + netdev_features_t feature, + bool enable) +{ + if (enable) + *features |= feature; + else + *features &= ~feature; +} + +typedef int (*xsc_feature_handler)(struct net_device *netdev, bool enable); + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status); + +static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, + struct xsc_queue_attr *attr, u8 type) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (adapter->nic_param.sq_size == 0) + adapter->nic_param.sq_size = BIT(xdev->caps.log_max_qp_depth); + if (adapter->nic_param.rq_size == 0) + adapter->nic_param.rq_size = BIT(xdev->caps.log_max_qp_depth); + + if (type == XSC_QUEUE_TYPE_EQ) { + attr->q_type = XSC_QUEUE_TYPE_EQ; + attr->ele_num = XSC_EQ_ELE_NUM; + attr->ele_size = XSC_EQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_EQ_ELE_SZ); + attr->q_log_size = order_base_2(XSC_EQ_ELE_NUM); + } else if (type == XSC_QUEUE_TYPE_RQCQ) { + attr->q_type = XSC_QUEUE_TYPE_RQCQ; + attr->ele_num = min_t(int, XSC_RQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_RQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_RQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQCQ) { + attr->q_type = XSC_QUEUE_TYPE_SQCQ; + attr->ele_num = min_t(int, XSC_SQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_SQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_SQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_RQ) { + attr->q_type = XSC_QUEUE_TYPE_RQ; + attr->ele_num = adapter->nic_param.rq_size; + attr->ele_size = xdev->caps.recv_ds_num * XSC_RECV_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQ) { + attr->q_type = XSC_QUEUE_TYPE_SQ; + attr->ele_num = adapter->nic_param.sq_size; + attr->ele_size = xdev->caps.send_ds_num * XSC_SEND_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } +} + +static void xsc_eth_init_frags_partition(struct xsc_rq *rq) +{ + struct xsc_wqe_frag_info next_frag = {}; + struct xsc_wqe_frag_info *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < xsc_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > + XSC_RX_FRAG_SZ) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = 1; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = 1; +} + +static int xsc_eth_init_di_list(struct xsc_rq *rq, int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + xsc_eth_init_frags_partition(rq); + + return 0; +} + +static void xsc_eth_free_di_list(struct xsc_rq *rq) +{ + kvfree(rq->wqe.di); +} + +int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) +{ + struct xsc_page_cache *cache = &rq->page_cache; + + cache->sz = 1 << log_init_sz; + cache->page_cache = kvzalloc_node(cache->sz * sizeof(*cache->page_cache), + GFP_KERNEL, node); + if (!cache->page_cache) + return -ENOMEM; + + return 0; +} + +void xsc_rx_free_page_cache(struct xsc_rq *rq) +{ + struct xsc_page_cache *cache = &rq->page_cache; + u32 i; + + for (i = cache->head; i != cache->tail; i = (i + 1) & (cache->sz - 1)) { + struct xsc_dma_info *dma_info = &cache->page_cache[i]; + + xsc_page_release_dynamic(rq, dma_info, false); + } + kvfree(cache->page_cache); +} + +int xsc_eth_reset(struct xsc_core_device *dev) +{ + return 0; +} + +void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) +{ + struct xsc_cq *xsc_cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = xsc_cq->xdev; + + if (event != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_core_err(xdev, "Unexpected event type %d on CQ %06x\n", + event, xcq->cqn); + return; + } + + xsc_core_err(xdev, "Eth catch CQ ERROR:%x, cqn: %d\n", event, xcq->cqn); +} + +void xsc_eth_completion_event(struct xsc_core_cq *xcq) +{ + struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = cq->xdev; + struct xsc_rq *rq = NULL; + + if (unlikely(!cq->channel)) { + xsc_core_warn(xdev, "cq%d->channel is null\n", xcq->cqn); + return; + } + + rq = &cq->channel->qp.rq[0]; + + set_bit(XSC_CHANNEL_NAPI_SCHED, &cq->channel->flags); + cq->channel->stats->poll = 0; + cq->channel->stats->poll_tx = 0; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + xsc_core_warn(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", + cq->channel->chl_idx, xcq->cqn, cq->napi->state); + + napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; +} + +static inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(xcq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to destroy cq, err=%d out.status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = 0; + return 0; +} + +int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, + struct xsc_create_cq_mbox_in *in, int insize) +{ + int err, ret = -1; + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + xcq->cons_index = 0; + xcq->arm_sn = 0; + atomic_set(&xcq->refcount, 1); + init_completion(&xcq->free); + + spin_lock_irq(&table->lock); + ret = radix_tree_insert(&table->tree, xcq->cqn, xcq); + spin_unlock_irq(&table->lock); + if (ret) + goto err_insert_cq; + return 0; + +err_insert_cq: + err = xsc_cmd_destroy_cq(xdev, xcq); + if (err) + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", xcq->cqn, err); + return ret; +} + +int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->xcq.cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + err = -ENOENT; + goto err_delete_cq; + } + + if (tmp != &cq->xcq) { + err = -EINVAL; + goto err_delete_cq; + } + + err = xsc_cmd_destroy_cq(xdev, &cq->xcq); + if (err) + goto err_destroy_cq; + + if (atomic_dec_and_test(&cq->xcq.refcount)) + complete(&cq->xcq.free); + wait_for_completion(&cq->xcq.free); + return 0; + +err_destroy_cq: + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", + cq->xcq.cqn, err); + return err; +err_delete_cq: + xsc_core_warn(xdev, "cqn=%d not found in tree, err=%d\n", + cq->xcq.cqn, err); + return err; +} + +void xsc_eth_free_cq(struct xsc_cq *cq) +{ + xsc_eth_wq_destroy(&cq->wq_ctrl); +} + +int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + int *prqn_base) +{ + int ret; + struct xsc_create_multiqp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MULTI_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, + "failed to create rss rq, qp_num=%d, type=%d, err=%d out.status=%u\n", + in->qp_num, in->qp_type, ret, out.hdr.status); + return -ENOEXEC; + } + + *prqn_base = be32_to_cpu(out.qpn_base) & 0xffffff; + return 0; +} + +void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) +{ + struct xsc_rq *rq; + struct xsc_sq *sq; + struct xsc_core_device *xdev; + + if (qp->eth_queue_type == XSC_RES_RQ) { + rq = container_of(qp, struct xsc_rq, cqp); + xdev = rq->cq.xdev; + } else if (qp->eth_queue_type == XSC_RES_SQ) { + sq = container_of(qp, struct xsc_sq, cqp); + xdev = sq->cq.xdev; + } else { + pr_err("%s:Unknown eth qp type %d\n", __func__, type); + return; + } + + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + xsc_core_err(xdev, "%s:Async event %x on QP %d\n", __func__, type, qp->qpn); + break; + default: + xsc_core_err(xdev, "%s: Unexpected event type %d on QP %d\n", + __func__, type, qp->qpn); + return; + } +} + +int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + int ret = -1; + struct xsc_create_qp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create rq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + prq->rqn = be32_to_cpu(out.qpn) & 0xffffff; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", __func__, prq->rqn, ret); + return ret; + } + + return 0; +} + +int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, prq->rqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set rq%d status=rst, err=%d\n", prq->rqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(prq->rqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy rq%d, err=%d out.status=%u\n", + prq->rqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_eth_free_rx_wqe(struct xsc_rq *rq) +{ + u16 wqe_ix; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + + while (!xsc_wq_cyc_is_empty(wq)) { + wqe_ix = xsc_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + xsc_wq_cyc_pop(wq); + } +} + +static void xsc_free_qp_rq(struct xsc_rq *rq) +{ + if (rq->page_cache.page_cache) + xsc_rx_free_page_cache(rq); + + kvfree(rq->wqe.frags); + kvfree(rq->wqe.di); + + if (rq->page_pool) + page_pool_destroy(rq->page_pool); + + xsc_eth_wq_destroy(&rq->wq_ctrl); +} + +int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + struct xsc_create_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + psq->sqn = be32_to_cpu(out.qpn) & 0xffffff; + + return 0; +} + +int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) +{ + struct xsc_modify_raw_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_RAW_QP); + + ret = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to modify sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, psq->sqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set sq%d status=rst, err=%d\n", psq->sqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(psq->sqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy sq%d, err=%d out.status=%u\n", + psq->sqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_free_qp_sq_db(struct xsc_sq *sq) +{ + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); +} + +static void xsc_free_qp_sq(struct xsc_sq *sq) +{ + xsc_free_qp_sq_db(sq); + xsc_eth_wq_destroy(&sq->wq_ctrl); +} + +static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) +{ + int wq_sz = xsc_wq_cyc_get_size(&sq->wq); + struct xsc_core_device *xdev = sq->cq.xdev; + int df_sz = wq_sz * xdev->caps.send_ds_num; + + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), + GFP_KERNEL, numa); + + if (!sq->db.dma_fifo || !sq->db.wqe_info) { + xsc_free_qp_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_core_cq *core_cq = &pcq->xcq; + u32 i; + u8 q_log_size = pcq_param->cq_attr.q_log_size; + u8 ele_log_size = pcq_param->cq_attr.ele_log_size; + + pcq_param->wq.db_numa_node = cpu_to_node(c->cpu); + pcq_param->wq.buf_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_cqwq_create(xdev, &pcq_param->wq, + q_log_size, ele_log_size, &pcq->wq, + &pcq->wq_ctrl); + if (ret) + return ret; + + core_cq->cqe_sz = pcq_param->cq_attr.ele_num; + core_cq->comp = xsc_eth_completion_event; + core_cq->event = xsc_eth_cq_error_event; + core_cq->vector = c->chl_idx; + + for (i = 0; i < xsc_cqwq_get_size(&pcq->wq); i++) { + struct xsc_cqe *cqe = xsc_cqwq_get_wqe(&pcq->wq, i); + + cqe->owner = 1; + } + pcq->xdev = xdev; + + return ret; +} + +#ifdef NEED_CREATE_RX_THREAD +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_create_cq_mbox_in *in; + int inlen; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(c->adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, + &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + + kfree(in); + xsc_core_info(c->adapter->xdev, "create cqn%d, func_id=%d, ret=%d\n", + pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); + return ret; +} +#else +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_create_cq_mbox_in *in; + int inlen; + int eqn, irqn; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + + in->ctx.eqn = eqn; + in->ctx.eqn = cpu_to_be16(in->ctx.eqn); + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + if (ret == 0) { + pcq->xcq.irqn = irqn; + pcq->xcq.eq = xsc_eq_get(xdev, pcq->xcq.vector); + } + +err: + kvfree(in); + xsc_core_info(c->adapter->xdev, "create ch%d cqn%d, eqn=%d, func_id=%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); + return ret; +} +#endif + +static int xsc_eth_open_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + + ret = xsc_eth_alloc_cq(c, pcq, pcq_param); + if (ret) + return ret; + + ret = xsc_eth_set_cq(c, pcq, pcq_param); + if (ret) + goto err_set_cq; + + xsc_cq_notify_hw_rearm(pcq); + + pcq->napi = &c->napi; + pcq->channel = c; + pcq->rx = (pcq_param->cq_attr.q_type == XSC_QUEUE_TYPE_RQCQ) ? 1 : 0; + + return 0; + +err_set_cq: + xsc_eth_free_cq(pcq); + return ret; +} + +static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + ret = xsc_eth_destroy_cq(xdev, pcq); + if (ret) { + xsc_core_warn(xdev, "failed to close ch%d cq%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, ret); + return ret; + } + + xsc_eth_free_cq(pcq); + + return 0; +} + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + + return xsc_modify_qp(xdev, &in, &out, qpn, status); +} + +int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) +{ + struct xsc_set_mtu_mbox_in in; + struct xsc_set_mtu_mbox_out out; + int ret; + + memset(&in, 0, sizeof(struct xsc_set_mtu_mbox_in)); + memset(&out, 0, sizeof(struct xsc_set_mtu_mbox_out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTU); + in.mtu = cpu_to_be16(mtu); + in.rx_buf_sz_min = cpu_to_be16(rx_buf_sz); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, &in, sizeof(struct xsc_set_mtu_mbox_in), &out, + sizeof(struct xsc_set_mtu_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to set hw_mtu=%u rx_buf_sz=%u, err=%d, status=%d\n", + mtu, rx_buf_sz, ret, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} + +int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) +{ + struct xsc_query_eth_mac_mbox_out *out; + struct xsc_query_eth_mac_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_ETH_MAC); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err || out->hdr.status) { + xsc_core_warn(dev, "get mac failed! err=%d, out.status=%u\n", err, out->hdr.status); + err = -ENOEXEC; + goto exit; + } + + memcpy(mac, out->mac, 6); + xsc_core_dbg(dev, "get mac %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + +exit: + kfree(out); + + return err; +} + +int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel *c) +{ + int ret = 0; + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + c->qp.rq[i].post_wqes(&c->qp.rq[i]); + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.rq[i].rqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.sq[i].sqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + return 0; +} + +int xsc_eth_modify_qps(struct xsc_adapter *adapter, + struct xsc_eth_channels *chls) +{ + int ret; + int i; + + for (i = 0; i < chls->num_chl; i++) { + struct xsc_channel *c = &chls->c[i]; + + ret = xsc_eth_modify_qps_channel(adapter, c); + if (ret) + return ret; + } + + return 0; +} + +u32 xsc_rx_get_linear_frag_sz(u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + + return XSC_SKB_FRAG_SZ(byte_count); +} + +bool xsc_rx_is_linear_skb(u32 mtu) +{ + u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); + + return linear_frag_sz <= PAGE_SIZE; +} + +static int xsc_eth_alloc_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param) +{ + struct xsc_adapter *adapter = c->adapter; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct page_pool_params pagepool_params = { 0 }; + u32 pool_size = 1 << q_log_size; + u8 ele_log_size = prq_param->rq_attr.ele_log_size; + struct xsc_stats *stats = c->adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + int cache_init_sz = 0; + int wq_sz; + int i, f; + int ret = 0; + + prq->stats = &channel_stats->rq; + prq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &prq_param->wq, + q_log_size, ele_log_size, &prq->wqe.wq, + &prq->wq_ctrl); + if (ret) + return ret; + + wq_sz = xsc_wq_cyc_get_size(&prq->wqe.wq); + + prq->wqe.info = prq_param->frags_info; + prq->wqe.frags = kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), + sizeof(*prq->wqe.frags)), + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!prq->wqe.frags) { + ret = -ENOMEM; + goto err_alloc_frags; + } + + ret = xsc_eth_init_di_list(prq, wq_sz, c->cpu); + if (ret) + goto err_init_di; + + prq->buff.map_dir = DMA_FROM_DEVICE; +#ifdef XSC_PAGE_CACHE + cache_init_sz = wq_sz << prq->wqe.info.log_num_frags; + ret = xsc_rx_alloc_page_cache(prq, cpu_to_node(c->cpu), ilog2(cache_init_sz)); + if (ret) + goto err_create_pool; +#endif + + /* Create a page_pool and register it with rxq */ + pool_size = wq_sz << prq->wqe.info.log_num_frags; + pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; + pagepool_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pagepool_params.pool_size = pool_size; + pagepool_params.nid = cpu_to_node(c->cpu); + pagepool_params.dev = c->adapter->dev; + pagepool_params.dma_dir = prq->buff.map_dir; + + prq->page_pool = page_pool_create(&pagepool_params); + if (IS_ERR(prq->page_pool)) { + ret = PTR_ERR(prq->page_pool); + prq->page_pool = NULL; + goto err_create_pool; + } + + if (c->chl_idx == 0) + xsc_core_dbg(adapter->xdev, + "page pool: size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", + pool_size, c->cpu, pagepool_params.nid, + cache_init_sz, adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); + + for (i = 0; i < wq_sz; i++) { + struct xsc_eth_rx_wqe_cyc *wqe = + xsc_wq_cyc_get_wqe(&prq->wqe.wq, i); + + for (f = 0; f < prq->wqe.info.num_frags; f++) { + u32 frag_size = prq->wqe.info.arr[f].frag_size; + + wqe->data[f].seg_len = cpu_to_le32(frag_size); + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + } + + for (; f < prq->wqe.info.frags_max_num; f++) { + wqe->data[f].seg_len = 0; + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + wqe->data[f].va = 0; + } + } + + prq->post_wqes = xsc_eth_post_rx_wqes; + prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; + prq->dealloc_wqe = xsc_eth_dealloc_rx_wqe; + prq->wqe.skb_from_cqe = xsc_rx_is_linear_skb(adapter->nic_param.mtu) ? + xsc_skb_from_cqe_linear : + xsc_skb_from_cqe_nonlinear; + prq->ix = c->chl_idx; + prq->frags_sz = adapter->nic_param.rq_frags_size; + + if (adapter->nic_param.rx_dim_enabled) { + INIT_WORK(&prq->dim_obj.dim.work, xsc_rx_dim_work); + prq->dim_obj.dim.mode = + adapter->nic_param.rx_cq_moderation.cq_period_mode; + hrtimer_init(&prq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + prq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_RQ_STATE_AM, &prq->state); + } + + return 0; + +err_create_pool: + xsc_eth_free_di_list(prq); +err_init_di: + kvfree(prq->wqe.frags); +err_alloc_frags: + xsc_eth_wq_destroy(&prq->wq_ctrl); + return ret; +} + +#ifdef XSC_RSS_SUPPORT +static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, + struct xsc_rq_param *prq_param, + struct xsc_eth_channels *chls, + unsigned int num_chl) +{ + int ret = 0, err = 0; + struct xsc_create_multiqp_mbox_in *in; + struct xsc_create_qp_request *req; + u8 q_log_size = prq_param->rq_attr.q_log_size; + int paslen = 0; + struct xsc_rq *prq; + struct xsc_channel *c; + int rqn_base; + int inlen; + int entry_len; + int i, j, n; + int hw_npages; + + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto err_alloc_rqs; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*support different npages number smoothly*/ + entry_len = sizeof(struct xsc_create_qp_request) + + sizeof(__be64) * hw_npages; + + paslen += entry_len; + } + } + + inlen = sizeof(struct xsc_create_multiqp_mbox_in) + paslen; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_create_rss_rqs; + } + + in->qp_num = cpu_to_be16(num_chl); + in->qp_type = XSC_QUEUE_TYPE_RAW; + in->req_len = cpu_to_be32(inlen); + + req = (struct xsc_create_qp_request *)&in->data[0]; + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /* no use for eth */ + req->input_qpn = cpu_to_be16(0); + req->qp_type = XSC_QUEUE_TYPE_RAW; + req->log_rq_sz = ilog2(adapter->xdev->caps.recv_ds_num) + + q_log_size; + req->pa_num = cpu_to_be16(hw_npages); + req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + req->cqn_send = req->cqn_recv; + req->glb_funcid = cpu_to_be16(adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &req->pas[0], hw_npages); + n++; + req = (struct xsc_create_qp_request *)(&in->data[0] + entry_len * n); + } + } + + ret = xsc_eth_create_rss_qp_rqs(adapter->xdev, in, inlen, &rqn_base); + kvfree(in); + if (ret) + goto err_create_rss_rqs; + + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + prq->rqn = rqn_base + n; + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + ret = create_resource_common(adapter->xdev, &prq->cqp); + if (ret) { + err = ret; + xsc_core_err(adapter->xdev, + "create resource common error qp:%d errno:%d\n", + prq->rqn, ret); + continue; + } + + n++; + } + } + if (err) + return err; + + adapter->channels.rqn_base = rqn_base; + xsc_core_info(adapter->xdev, "rqn_base=%d, rq_num=%d, state=0x%lx\n", + rqn_base, num_chl, prq->state); + return 0; + +err_create_rss_rqs: + i = num_chl; +err_alloc_rqs: + for (--i; i >= 0; i--) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + xsc_free_qp_rq(prq); + } + } + return ret; +} + +#else +static int xsc_eth_open_qp_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param, + u32 rq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct xsc_create_qp_mbox_in *in; + int hw_npages; + int inlen; + int ret = 0; + + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto out; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_alloc_rq; + } + + in->req.input_qpn = cpu_to_be16(XSC_QPN_RQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW; + in->req.log_rq_sz = ilog2(xdev->caps.recv_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + in->req.cqn_send = in->req.cqn_recv; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_rq(xdev, prq, in, inlen); + if (ret) + goto err_create_rq; + + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "failed to init rqn%d, err=%d\n", + prq->rqn, ret); + goto err_destroy_rq; + } + + xsc_core_info(c->adapter->xdev, "rqn=%d ch_num=%d state=0x%llx\n", + prq->rqn, c->chl_idx, prq->state); + + kvfree(in); + + return 0; + +err_destroy_rq: + xsc_eth_destroy_qp_rq(xdev, prq); +err_create_rq: + kvfree(in); +err_alloc_rq: + xsc_free_qp_rq(prq); +out: + return ret; +} +#endif + +static int xsc_eth_close_qp_rq(struct xsc_channel *c, struct xsc_rq *prq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + destroy_resource_common(xdev, &prq->cqp); + + ret = xsc_eth_destroy_qp_rq(xdev, prq); + if (ret) + return ret; + + xsc_eth_free_rx_wqe(prq); + xsc_free_qp_rq(prq); + + return 0; +} + +static int xsc_eth_open_qp_sq(struct xsc_channel *c, + struct xsc_sq *psq, + struct xsc_sq_param *psq_param, + u32 sq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = psq_param->sq_attr.q_log_size; + u8 ele_log_size = psq_param->sq_attr.ele_log_size; + struct xsc_stats *stats = adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + struct xsc_create_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_in *modify_in; + int hw_npages; + int inlen; + int ret; + + psq->stats = &channel_stats->sq[sq_idx]; + psq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(xdev, &psq_param->wq, + q_log_size, ele_log_size, &psq->wq, + &psq->wq_ctrl); + if (ret) + return ret; + + hw_npages = DIV_ROUND_UP(psq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_sq_wq_destroy; + } + in->req.input_qpn = cpu_to_be16(XSC_QPN_SQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW_TSO; /*default sq is tso qp*/ + in->req.log_sq_sz = ilog2(xdev->caps.send_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_send = cpu_to_be16(psq->cq.xcq.cqn); + in->req.cqn_recv = in->req.cqn_send; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&psq->wq_ctrl.buf, + &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_sq(xdev, psq, in, inlen); + if (ret) + goto err_sq_in_destroy; + + psq->cqp.qpn = psq->sqn; + psq->cqp.event = xsc_eth_qp_event; + psq->cqp.eth_queue_type = XSC_RES_SQ; + + ret = create_resource_common(xdev, &psq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", + __func__, psq->sqn, ret); + goto err_sq_destroy; + } + + psq->channel = c; + psq->ch_ix = c->chl_idx; + psq->txq_ix = psq->ch_ix + sq_idx * adapter->channels.num_chl; + + /*need to querify from hardware*/ + psq->hw_mtu = XSC_ETH_HW_MTU_SEND; + psq->stop_room = 1; + + ret = xsc_eth_alloc_qp_sq_db(psq, psq_param->wq.db_numa_node); + if (ret) + goto err_sq_common_destroy; + + inlen = sizeof(struct xsc_modify_raw_qp_mbox_in); + modify_in = kvzalloc(inlen, GFP_KERNEL); + if (!modify_in) { + ret = -ENOMEM; + goto err_sq_common_destroy; + } + + modify_in->req.qp_out_port = xdev->pf_id; + modify_in->pcie_no = xdev->pcie_no; + modify_in->req.qpn = cpu_to_be16((u16)(psq->sqn)); + modify_in->req.func_id = cpu_to_be16(xdev->glb_func_id); + modify_in->req.dma_direct = DMA_DIR_TO_MAC; + modify_in->req.prio = sq_idx; + ret = xsc_eth_modify_qp_sq(xdev, modify_in); + if (ret) + goto err_sq_modify_in_destroy; + + kvfree(modify_in); + kvfree(in); + + if (adapter->nic_param.tx_dim_enabled) { + INIT_WORK(&psq->dim_obj.dim.work, xsc_tx_dim_work); + psq->dim_obj.dim.mode = adapter->nic_param.tx_cq_moderation.cq_period_mode; + hrtimer_init(&psq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + psq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_SQ_STATE_AM, &psq->state); + } + + xsc_core_info(c->adapter->xdev, + "open sq ok, ch%d_sq%d_qpn=%d, state=0x%lx, db_numa=%d, buf_numa=%d\n", + c->chl_idx, sq_idx, psq->sqn, psq->state, + psq_param->wq.db_numa_node, psq_param->wq.buf_numa_node); + + return 0; + +err_sq_modify_in_destroy: + kvfree(modify_in); + +err_sq_common_destroy: + destroy_resource_common(xdev, &psq->cqp); + +err_sq_destroy: + xsc_eth_destroy_qp_sq(xdev, psq); + +err_sq_in_destroy: + kvfree(in); + +err_sq_wq_destroy: + xsc_eth_wq_destroy(&psq->wq_ctrl); + return ret; +} + +static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + destroy_resource_common(xdev, &psq->cqp); + + ret = xsc_eth_destroy_qp_sq(xdev, psq); + if (ret) + return ret; + + xsc_free_tx_wqe(c->adapter->dev, psq); + xsc_free_qp_sq(psq); + + return 0; +} + +int xsc_eth_open_channel(struct xsc_adapter *adapter, + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) +{ + int ret = 0; + struct net_device *netdev = adapter->netdev; + struct xsc_stats *stats = adapter->stats; + struct xsc_core_device *xdev = adapter->xdev; + int i, j, eqn, irqn; + const struct cpumask *aff; + + c->adapter = adapter; + c->netdev = adapter->netdev; + c->chl_idx = idx; + c->num_tc = adapter->nic_param.num_tc; + c->stats = &stats->channel_stats[idx].ch; + + /*1rq per channel, and may have multi sqs per channel*/ + c->qp.rq_num = 1; + c->qp.sq_num = c->num_tc; + + if (xdev->caps.msix_enable) { + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + aff = irq_get_affinity_mask(irqn); + c->aff_mask = aff; + c->cpu = cpumask_first(aff); + } + + if (c->qp.sq_num > XSC_MAX_NUM_TC || c->qp.rq_num > XSC_MAX_NUM_TC) { + ret = -EINVAL; + goto err; + } + + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.rq[i].cq, &chl_param->rqcq_param); + if (ret) { + j = i - 1; + goto err_open_rq_cq; + } + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.sq[i].cq, &chl_param->sqcq_param); + if (ret) { + j = i - 1; + goto err_open_sq_cq; + } + } + +#ifndef XSC_RSS_SUPPORT + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_qp_rq(c, &c->qp.rq[i], &chl_param->rq_param, i); + if (ret) { + j = i - 1; + goto err_open_rq; + } + } +#endif + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_qp_sq(c, &c->qp.sq[i], &chl_param->sq_param, i); + if (ret) { + j = i - 1; + goto err_open_sq; + } + } + + netif_napi_add(netdev, &c->napi, xsc_eth_napi_poll); + + xsc_core_dbg(adapter->xdev, "open channel%d ok\n", idx); + return 0; + +err_open_sq: + for (; j >= 0; j--) + xsc_eth_close_qp_sq(c, &c->qp.sq[j]); + j = (c->qp.rq_num - 1); +#ifndef XSC_RSS_SUPPORT +err_open_rq: + for (; j >= 0; j--) + xsc_eth_close_qp_rq(c, &c->qp.rq[j]); + j = (c->qp.sq_num - 1); +#endif +err_open_sq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.sq[j].cq); + j = (c->qp.rq_num - 1); +err_open_rq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.rq[j].cq); +err: + xsc_core_warn(adapter->xdev, + "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", + idx, c->qp.sq_num, c->qp.rq_num, ret); + return ret; +} + +static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + int frag_stride; + int i = 0; + + if (xsc_rx_is_linear_skb(mtu)) { + frag_stride = xsc_rx_get_linear_frag_sz(mtu); + frag_stride = roundup_pow_of_two(frag_stride); + + frags_info->arr[0].frag_size = byte_count; + frags_info->arr[0].frag_stride = frag_stride; + frags_info->num_frags = 1; + frags_info->wqe_bulk = PAGE_SIZE / frag_stride; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + goto out; + } + + if (byte_count <= DEFAULT_FRAG_SIZE) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 1; + } else if (byte_count <= PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 1; + } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else if (byte_count <= 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else { + if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { + frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + for (i = 0; i < frags_info->num_frags; i++) { + frags_info->arr[i].frag_size = PAGE_SIZE_4K; + frags_info->arr[i].frag_stride = PAGE_SIZE_4K; + } + } else { + frags_info->arr[0].frag_size = 4 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 4 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } + + if (PAGE_SIZE <= PAGE_SIZE_4K) { + frags_info->wqe_bulk_min = 4; + frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk_min, 8); + } else if (PAGE_SIZE <= 2 * PAGE_SIZE_4K) { + frags_info->wqe_bulk = 2; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } else { + frags_info->wqe_bulk = + PAGE_SIZE / (frags_info->num_frags * frags_info->arr[0].frag_size); + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } + +out: + frags_info->log_num_frags = order_base_2(frags_info->num_frags); + + return frags_info->num_frags * frags_info->arr[0].frag_size; +} + +static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, + struct xsc_rq_frags_info *frags_info, + struct xsc_eth_params *params) +{ + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); + frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; +} + +static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, + struct xsc_channel_param *chl_param) +{ + xsc_eth_build_queue_param(adapter, &chl_param->rqcq_param.cq_attr, + XSC_QUEUE_TYPE_RQCQ); + chl_param->rqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sqcq_param.cq_attr, + XSC_QUEUE_TYPE_SQCQ); + chl_param->sqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, + XSC_QUEUE_TYPE_SQ); + chl_param->sq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->rq_param.rq_attr, + XSC_QUEUE_TYPE_RQ); + chl_param->rq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_build_rq_frags_info(&chl_param->rq_param.rq_attr, + &chl_param->rq_param.frags_info, + &adapter->nic_param); +} + +int xsc_eth_open_channels(struct xsc_adapter *adapter) +{ + int ret = 0; + int i; + struct xsc_channel_param *chl_param; + struct xsc_eth_channels *chls = &adapter->channels; + struct xsc_core_device *xdev = adapter->xdev; + bool free_rq = false; + + chls->num_chl = adapter->nic_param.num_channels; + chls->c = kcalloc_node(chls->num_chl, sizeof(struct xsc_channel), + GFP_KERNEL, xdev->priv.numa_node); + if (!chls->c) { + ret = -ENOMEM; + goto err; + } + + chl_param = kvzalloc(sizeof(*chl_param), GFP_KERNEL); + if (!chl_param) { + ret = -ENOMEM; + goto err_free_ch; + } + + xsc_eth_build_channel_param(adapter, chl_param); + + for (i = 0; i < chls->num_chl; i++) { + ret = xsc_eth_open_channel(adapter, i, &chls->c[i], chl_param); + if (ret) + goto err_open_channel; +#ifndef XSC_RSS_SUPPORT + free_rq = true; +#endif + } + +#ifdef XSC_RSS_SUPPORT + ret = xsc_eth_open_rss_qp_rqs(adapter, &chl_param->rq_param, chls, chls->num_chl); + if (ret) + goto err_open_channel; + free_rq = true; +#endif + + for (i = 0; i < chls->num_chl; i++) + napi_enable(&chls->c[i].napi); + + /* flush cache to memory before interrupt and napi_poll running */ + smp_wmb(); + + ret = xsc_eth_modify_qps(adapter, chls); + if (ret) + goto err_modify_qps; + + kvfree(chl_param); + xsc_core_info(adapter->xdev, "open %d channels ok\n", chls->num_chl); + return 0; + +err_modify_qps: + i = chls->num_chl; +err_open_channel: + for (--i; i >= 0; i--) + xsc_eth_close_channel(&chls->c[i], free_rq); + + kvfree(chl_param); +err_free_ch: + kfree(chls->c); +err: + chls->num_chl = 0; + xsc_core_warn(adapter->xdev, "failed to open %d channels, err=%d\n", + chls->num_chl, ret); + return ret; +} + +static void xsc_eth_activate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + psq->txq = netdev_get_tx_queue(psq->channel->netdev, psq->txq_ix); + set_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + netdev_tx_reset_queue(psq->txq); + netif_tx_start_queue(psq->txq); + } +} + +static void xsc_eth_deactivate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + clear_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + } +} + +static void xsc_activate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + set_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +static void xsc_deactivate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + clear_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +void xsc_eth_activate_channel(struct xsc_channel *c) +{ + xsc_eth_activate_txqsq(c); + xsc_activate_rq(c); +} + +void xsc_eth_deactivate_channel(struct xsc_channel *c) +{ + xsc_deactivate_rq(c); + xsc_eth_deactivate_txqsq(c); +} + +static void xsc_eth_activate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_activate_channel(&chs->c[i]); +} + +static void xsc_eth_deactivate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_deactivate_channel(&chs->c[i]); + + /* Sync with all NAPIs to wait until they stop using queues. */ + synchronize_net(); + + for (i = 0; i < chs->num_chl; i++) + /* last doorbell out */ + napi_disable(&chs->c[i].napi); +} + +static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) +{ + struct xsc_channel *c; + struct xsc_sq *psq; + int i, tc; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + adapter->txq2sq[psq->txq_ix] = psq; + adapter->channel_tc2realtxq[i][tc] = + i + tc * adapter->channels.num_chl; + } + } +} + +void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) +{ + int num_txqs; + struct net_device *netdev = adapter->netdev; + + num_txqs = adapter->channels.num_chl * adapter->nic_param.num_tc; + xsc_netdev_set_tcs(adapter, adapter->channels.num_chl, adapter->nic_param.num_tc); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, adapter->channels.num_chl); + + xsc_eth_build_tx2sq_maps(adapter); + xsc_eth_activate_channels(&adapter->channels); + netif_tx_start_all_queues(adapter->netdev); +} + +void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) +{ + netif_tx_disable(adapter->netdev); + xsc_eth_deactivate_channels(&adapter->channels); +} + +static int xsc_eth_sw_init(struct xsc_adapter *adapter) +{ + int ret; + + ret = xsc_eth_open_channels(adapter); + if (ret) + return ret; + + xsc_eth_activate_priv_channels(adapter); + + return 0; +} + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + if (free_rq) + xsc_eth_close_qp_rq(c, &c->qp.rq[i]); + xsc_eth_close_cq(c, &c->qp.rq[i].cq); + memset(&c->qp.rq[i], 0, sizeof(struct xsc_rq)); + } + + for (i = 0; i < c->qp.sq_num; i++) { + xsc_eth_close_qp_sq(c, &c->qp.sq[i]); + xsc_eth_close_cq(c, &c->qp.sq[i].cq); + } + + netif_napi_del(&c->napi); +} + +static void xsc_eth_close_channels(struct xsc_adapter *adapter) +{ + int i; + struct xsc_channel *c = NULL; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + xsc_core_dbg(adapter->xdev, "start to close channel%d\n", c->chl_idx); + + xsc_eth_close_channel(c, true); + } + + kfree(adapter->channels.c); + adapter->channels.num_chl = 0; +} + +static void xsc_eth_sw_deinit(struct xsc_adapter *adapter) +{ + xsc_eth_deactivate_priv_channels(adapter); + + return xsc_eth_close_channels(adapter); +} + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) +{ + int err; + + struct xsc_event_set_led_status_mbox_in in; + struct xsc_event_set_led_status_mbox_out out; + + /*query linkstatus cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_LED_STATUS); + in.port_id = id; + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.status) { + xsc_core_err(adapter->xdev, "failed to set led to %d, err=%d, status=%d\n", + id, err, out.status); + return -1; + } + + return 0; +} + +bool xsc_eth_get_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + struct xsc_core_device *xdev = adapter->xdev; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + link_up = xsc_query_vport_state(xdev, XSC_CMD_OP_QUERY_VPORT_STATE, vport); + + xsc_core_dbg(adapter->xdev, "link_status=%d\n", link_up); + + return link_up ? true : false; +} + +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_query_linkinfo_mbox_in in; + struct xsc_event_query_linkinfo_mbox_out out; + int i, err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_LINK_INFO); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + memcpy(plinkinfo, &out.ctx, sizeof(*plinkinfo)); + + plinkinfo->linkspeed = be32_to_cpu(plinkinfo->linkspeed); + plinkinfo->supported = be64_to_cpu(plinkinfo->supported); + plinkinfo->advertising = be64_to_cpu(plinkinfo->advertising); + for (i = 0; i < ARRAY_SIZE(plinkinfo->supported_speed); i++) { + plinkinfo->supported_speed[i] = be64_to_cpu(plinkinfo->supported_speed[i]); + plinkinfo->advertising_speed[i] = be64_to_cpu(plinkinfo->advertising_speed[i]); + } + + return 0; +} + +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_modify_linkinfo_mbox_in in; + struct xsc_event_modify_linkinfo_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_LINK_INFO); + memcpy(&in.ctx, plinkinfo, sizeof(*plinkinfo)); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +int xsc_get_link_speed(struct xsc_core_device *dev) +{ + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) { + xsc_core_err(adapter->xdev, "fail to get linkspeed, return 25G\n"); + return MODULE_SPEED_25G; + } + + return linkinfo.linkspeed; +} +EXPORT_SYMBOL(xsc_get_link_speed); + +#if defined(MSIX_SUPPORT) +int xsc_eth_change_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + + link_up = xsc_eth_get_link_status(adapter); + + if (link_up && !netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else if (!link_up && netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link down\n"); + netif_carrier_off(adapter->netdev); + } + + return 0; +} + +static void xsc_eth_event_work(struct work_struct *work) +{ + int err; + struct xsc_event_query_type_mbox_in in; + struct xsc_event_query_type_mbox_out out; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, event_work); + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + return; + + /*query cmd_type cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to query event type, err=%d, stats=%d\n", + err, out.hdr.status); + goto failed; + } + + switch (out.ctx.resp_cmd_type) { + case XSC_CMD_EVENT_RESP_CHANGE_LINK: + err = xsc_eth_change_link_status(adapter); + if (err) { + xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); + goto failed; + } + + xsc_core_dbg(adapter->xdev, "event cmdtype=%04x\n", out.ctx.resp_cmd_type); + break; + case XSC_CMD_EVENT_RESP_TEMP_WARN: + xsc_core_warn(adapter->xdev, "[Minor]nic chip temperature high warning\n"); + break; + case XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION: + xsc_core_warn(adapter->xdev, "[Critical]nic chip was over-temperature\n"); + break; + default: + xsc_core_info(adapter->xdev, "unknown event cmdtype=%04x\n", + out.ctx.resp_cmd_type); + break; + } + +failed: + return; +} + +void xsc_eth_event_handler(void *arg) +{ + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + queue_work(adapter->workq, &adapter->event_work); +} +#endif + +int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_enable_nic_hca_mbox_in in = {}; + struct xsc_cmd_enable_nic_hca_mbox_out out = {}; + u16 caps = 0; + u16 caps_mask = 0; + int err; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); + +#ifdef XSC_RSS_SUPPORT + in.rss.rss_en = 1; + in.rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in.rss.rqn_num = cpu_to_be16(adapter->channels.num_chl); + in.rss.hash_tmpl = cpu_to_be32(adapter->rss_params.rss_hash_tmpl); + in.rss.hfunc = hash_func_type(adapter->rss_params.hfunc); +#else + in.rss.rss_en = 0; + if (adapter->channels.c) + in.rss.rqn_base = cpu_to_be16(adapter->channels.c[0].qp.rq[0].rqn - + xdev->caps.raweth_rss_qp_id_base); +#endif + caps_mask |= BIT(XSC_TBM_CAP_RSS); + + if (netdev->features & NETIF_F_RXCSUM) + caps |= BIT(XSC_TBM_CAP_HASH_PPH); + caps_mask |= BIT(XSC_TBM_CAP_HASH_PPH); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->flags & IFF_SLAVE)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); + + in.nic.caps = cpu_to_be16(caps); + in.nic.caps_mask = cpu_to_be16(caps_mask); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + xsc_core_info(xdev, "caps=0x%x, caps_mask=0x%x\n", caps, caps_mask); + + return 0; +} + +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev) +{ + return xsc_eth_enable_nic_hca((struct xsc_adapter *)dev->eth_priv); +} +EXPORT_SYMBOL(xsc_eth_restore_nic_hca); + +int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_disable_nic_hca_mbox_in in = {}; + struct xsc_cmd_disable_nic_hca_mbox_out out = {}; + int err; + u16 caps = 0; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->priv_flags & IFF_BONDING)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + in.nic.caps = cpu_to_be16(caps); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *modify) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_rss_params *rss = &adapter->rss_params; + struct xsc_eth_params *params = &adapter->nic_param; + struct xsc_cmd_modify_nic_hca_mbox_in *in = + (struct xsc_cmd_modify_nic_hca_mbox_in *)modify; + u32 hash_field = 0; + int key_len; + u8 rss_caps_mask = 0; + + if (xsc_get_user_mode(xdev)) + return; + + if (change & BIT(XSC_RSS_RXQ_DROP)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = 0; + rss_caps_mask |= BIT(XSC_RSS_RXQ_DROP); + goto rss_caps; + } + + if (change & BIT(XSC_RSS_RXQ_UPDATE)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = cpu_to_be16(params->num_channels); + rss_caps_mask |= BIT(XSC_RSS_RXQ_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_KEY_UPDATE)) { + key_len = min(sizeof(in->rss.hash_key), sizeof(rss->toeplitz_hash_key)); + memcpy(&in->rss.hash_key, rss->toeplitz_hash_key, key_len); + rss_caps_mask |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_TEMP_UPDATE)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP] | + rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + in->rss.hash_tmpl = cpu_to_be32(hash_field); + rss_caps_mask |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_FUNC_UPDATE)) { + in->rss.hfunc = hash_func_type(rss->hfunc); + rss_caps_mask |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + +rss_caps: + if (rss_caps_mask) { + in->rss.caps_mask = rss_caps_mask; + in->rss.rss_en = 1; + in->nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_RSS)); + in->nic.caps = in->nic.caps_mask; + } +} + +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + + xsc_eth_rss_params_change(adapter, flags, &in); + if (in.rss.caps_mask) { + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + } + + return 0; +} + +static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, + struct xsc_eth_params *params) +{ +#ifdef MSIX_SUPPORT + struct xsc_core_device *xdev = priv->xdev; + int num_comp_vectors, irq; + + num_comp_vectors = priv->nic_param.comp_vectors; + cpumask_clear(xdev->xps_cpumask); + + for (irq = 0; irq < num_comp_vectors; irq++) { + mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); + netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); + } +#endif +} + +static int xsc_set_port_admin_status(struct xsc_adapter *adapter, + enum xsc_port_status status) +{ + struct xsc_event_set_port_admin_status_mbox_in in; + struct xsc_event_set_port_admin_status_mbox_out out; + int ret = 0; + + if (!xsc_core_is_pf(adapter->xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS); + in.admin_status = cpu_to_be16(status); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set port admin status, err=%d, status=%d\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_eth_open(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = XSCALE_RET_SUCCESS; + + mutex_lock(&adapter->state_lock); + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + xsc_core_warn(adapter->xdev, "unnormal ndo_open when status=%d\n", + adapter->status); + goto ret; + } + + spin_lock_init(&adapter->lock); + + ret = xsc_eth_sw_init(adapter); + if (ret) + goto ret; + + ret = xsc_eth_reset(xdev); + if (ret) + goto sw_deinit; + + ret = xsc_eth_enable_nic_hca(adapter); + if (ret) + goto sw_deinit; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) { + xsc_core_warn(xdev, "xsc_eth_rx_thread_create failed, err=%d\n", ret); + goto sw_deinit; + } +#endif + +#if defined(MSIX_SUPPORT) + /*INIT_WORK*/ + INIT_WORK(&adapter->event_work, xsc_eth_event_work); + xdev->event_handler = xsc_eth_event_handler; + + if (xsc_eth_get_link_status(adapter)) { + netdev_info(netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else { + netdev_info(netdev, "Link down\n"); + } +#else + netif_carrier_on(netdev); +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); + + xsc_set_port_admin_status(adapter, XSC_PORT_UP); + + goto ret; + +sw_deinit: + xsc_eth_sw_deinit(adapter); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(xdev, "open %s %s, ret=%d\n", + netdev->name, ret ? "failed" : "ok", ret); + if (ret) + return XSCALE_RET_ERROR; + else + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_close(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + mutex_lock(&adapter->state_lock); + + if (!netif_device_present(netdev)) { + ret = -ENODEV; + goto ret; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + goto ret; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + + netif_carrier_off(adapter->netdev); + + xsc_eth_sw_deinit(adapter); + + ret = xsc_eth_disable_nic_hca(adapter); + if (ret) + xsc_core_warn(adapter->xdev, "failed to disable nic hca, err=%d\n", ret); + + xsc_set_port_admin_status(adapter, XSC_PORT_DOWN); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "close device %s %s, ret=%d\n", + adapter->netdev->name, ret ? "failed" : "ok", ret); + + return ret; +} + +static int xsc_eth_set_mac(struct net_device *netdev, void *addr) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct sockaddr *saddr = addr; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + ret = xsc_modify_nic_vport_mac_address(xdev, vport, saddr->sa_data, false); + if (ret) + xsc_core_err(adapter->xdev, "%s: xsc set mac addr failed\n", __func__); + + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + return 0; +} + +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc) +{ + int tc; + + netdev_reset_tc(priv->netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(priv->netdev, ntc); + + /* Map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(priv->netdev, tc, nch, 0); +} + +static int xsc_update_netdev_queues(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + int num_txqs, num_rxqs, nch, ntc; + int old_num_txqs, old_ntc; + int err; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + bool disabling; +#endif + + old_num_txqs = netdev->real_num_tx_queues; + old_ntc = netdev->num_tc ? : 1; + + nch = priv->nic_param.num_channels; + ntc = priv->nic_param.num_tc; + num_txqs = nch * ntc; + num_rxqs = nch;// * priv->profile->rq_groups; + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + disabling = num_txqs < netdev->real_num_tx_queues; +#endif + + xsc_netdev_set_tcs(priv, nch, ntc); + + err = netif_set_real_num_tx_queues(netdev, num_txqs); + if (err) { + netdev_warn(netdev, + "netif_set_real_num_tx_queues failed, txqs=%d->%d, tc=%d->%d, err=%d\n", + old_num_txqs, num_txqs, old_ntc, ntc, err); + goto err_tcs; + } + + err = netif_set_real_num_rx_queues(netdev, num_rxqs); + if (err) { + netdev_warn(netdev, "netif_set_real_num_rx_queues failed, rxqs=%d, err=%d\n", + num_rxqs, err); + goto err_txqs; + } + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + if (disabling) + synchronize_net(); +#endif + + return 0; + +err_txqs: + /* netif_set_real_num_rx_queues could fail only when nch increased. Only + * one of nch and ntc is changed in this function. That means, the call + * to netif_set_real_num_tx_queues below should not fail, because it + * decreases the number of TX queues. + */ + WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); + +err_tcs: + xsc_netdev_set_tcs(priv, old_num_txqs / old_ntc, old_ntc); + return err; +} + +void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + +int xsc_eth_num_channels_changed(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + u16 count = priv->nic_param.num_channels; + int err; + + err = xsc_update_netdev_queues(priv); + if (err) + goto err; + + if (!netif_is_rxfh_configured(priv->netdev)) + xsc_build_default_indir_rqt(priv->rss_params.indirection_rqt, + XSC_INDIR_RQT_SIZE, count); + + return 0; + +err: + netdev_err(netdev, "%s: failed to change rss rxq number %d, err=%d\n", + __func__, count, err); + return err; +} + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate) +{ + struct net_device *netdev = adapter->netdev; + int carrier_ok; + int ret = 0; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_DROP)); + if (ret) + goto close_channels; + + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + + if (preactivate) { + ret = preactivate(adapter); + if (ret) + goto out; + } + + ret = xsc_eth_open_channels(adapter); + if (ret) + goto close_channels; + + if (postactivate) { + ret = postactivate(adapter); + if (ret) + goto close_channels; + } + + xsc_eth_activate_priv_channels(adapter); + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_UPDATE)); + if (ret) + goto close_channels; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) + goto close_channels; +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + goto out; + +close_channels: + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + +out: + if (carrier_ok) + netif_carrier_on(netdev); + xsc_core_dbg(adapter->xdev, "channels=%d, mtu=%d, err=%d\n", + adapter->nic_param.num_channels, + adapter->nic_param.mtu, ret); + return ret; +} + +int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) +{ + u32 new_mtu = priv->nic_param.mtu; + int ret; + + ret = xsc_eth_set_hw_mtu(priv->xdev, XSC_SW2HW_MTU(new_mtu), + XSC_SW2HW_RX_PKT_LEN(new_mtu)); + + return ret; +} + +static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int ret = 0; + int max_buf_len = 0; + + if (new_mtu > netdev->max_mtu || new_mtu < netdev->min_mtu) { + netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); + return -EINVAL; + } + + if (!xsc_rx_is_linear_skb(new_mtu)) { + max_buf_len = adapter->xdev->caps.recv_ds_num * PAGE_SIZE; + if (new_mtu > max_buf_len) { + netdev_err(netdev, "Bad MTU (%d), max buf len is %d\n", + new_mtu, max_buf_len); + return -EINVAL; + } + } + mutex_lock(&adapter->state_lock); + adapter->nic_param.mtu = new_mtu; + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ret = xsc_eth_nic_mtu_changed(adapter); + if (ret) + adapter->nic_param.mtu = old_mtu; + else + netdev->mtu = adapter->nic_param.mtu; + goto out; + } + + ret = xsc_safe_switch_channels(adapter, xsc_eth_nic_mtu_changed, NULL); + if (ret) + goto out; + + netdev->mtu = adapter->nic_param.mtu; + +out: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "mtu change from %d to %d, new_mtu=%d, err=%d\n", + old_mtu, netdev->mtu, new_mtu, ret); + return ret; +} + +static void xsc_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + + xsc_fold_sw_stats64(adapter, stats); +} + +static void xsc_set_rx_mode(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + queue_work(priv->workq, &priv->set_rx_mode_work); +} + +int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + ret = xsc_eswitch_set_vport_mac(xdev->priv.eswitch, vf + 1, mac); + if (ret) + xsc_core_err(xdev, "xsc set mac addr failed\n"); + + return ret; +} + +static int xsc_set_vf_trust(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_trust(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_spoofchk(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_vport *evport = xsc_eswitch_get_vport(xdev->priv.eswitch, vf + 1); + int err; + + if (!(dev->features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + xsc_core_err(xdev, "dev features not support STAG_RX %llu STAG_TX %llu\n", + dev->features & NETIF_F_HW_VLAN_STAG_RX, + dev->features & NETIF_F_HW_VLAN_STAG_TX); + return -EOPNOTSUPP; + } + + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) + return -EPROTONOSUPPORT; + + err = xsc_eswitch_set_vport_vlan(xdev->priv.eswitch, vf + 1, + vlan, qos, vlan_proto); + if (err) { + xsc_core_err(xdev, "fail to set vf %d vlan %u qos %u err=%d\n", + vf, vlan, qos, err); + return err; + } + + if (evport) { + evport->vlan_id = vlan; + evport->vlan_qos = qos; + evport->vlan_proto = vlan_proto; + } + + return 0; +} + +int xsc_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_core_sriov *sriov = &xdev->priv.sriov; + int err; + + if (!netif_device_present(dev) || sriov->num_vfs > MAX_VF_NUM_MINIDUMP) + return -EOPNOTSUPP; + + err = xsc_eswitch_get_vport_config(esw, vf + 1, ivi); + + return err; +} + +int xsc_set_vf_link_state(struct net_device *dev, int vf, + int link_state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + + return xsc_eswitch_set_vport_state(esw, vf + 1, link_state); +} + +int set_feature_rxcsum(struct net_device *netdev, bool enable) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + in.nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_HASH_PPH)); + in.nic.caps = cpu_to_be16(enable << XSC_TBM_CAP_HASH_PPH); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(netdev, "failed to change rxcsum=%d, err=%d, status=%d\n", + enable, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int set_feature_vlan_offload(struct net_device *netdev, bool enable) +{ + int err = 0, i; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_vport *evport = NULL; + + if (!enable) { + for (i = 0; i < adapter->xdev->priv.eswitch->num_vfs; i++) { + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, + i + 1); + if (evport && (evport->vlan_id || evport->vlan_qos)) { + evport->vlan_id = 0; + evport->vlan_qos = 0; + err = xsc_eswitch_set_vport_vlan(adapter->xdev->priv.eswitch, + i + 1, evport->vlan_id, + evport->vlan_qos, + evport->vlan_proto); + if (err) + xsc_core_err(adapter->xdev, "fail to clear vf vlan offload vf=%d err=%d\n", + i, err); + } + } + } + + return 0; +} + +static int xsc_handle_feature(struct net_device *netdev, + netdev_features_t *features, + netdev_features_t wanted_features, + netdev_features_t feature, + xsc_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ netdev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s feature %pNF failed, err %d\n", + enable ? "Enable" : "Disable", &feature, err); + return err; + } + + xsc_set_feature(features, feature, enable); + + return 0; +} + +int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t oper_features = netdev->features; + int err = 0; + +#define XSC_HANDLE_FEATURE(feature, handler) \ + xsc_handle_feature(netdev, &oper_features, features, feature, handler) + + err |= XSC_HANDLE_FEATURE(NETIF_F_RXCSUM, set_feature_rxcsum); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_RX, set_feature_vlan_offload); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_TX, set_feature_vlan_offload); + if (err) { + netdev->features = oper_features; + return -EINVAL; + } + + return 0; +} + +static netdev_features_t xsc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX)) + features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX; + return features; +} + +#ifdef HAVE_NETDEVICE_OPS_SELECT_QUEUE_FALLBACK +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + int txq_ix = fallback(dev, skb, NULL); + u16 num_channels; + int up = 0; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#else +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq_ix, up = 0; + u16 num_channels; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + txq_ix = netdev_pick_tx(dev, skb, NULL); + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb_vlan_tag_get_prio(skb); + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#endif + +static int xsc_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_core_device *pf_xdev; + struct net_device *pf_netdev; + struct pci_dev *pdev = xdev->pdev; + int ret = len; + + if (!pdev) + return -EOPNOTSUPP; + if (!xsc_core_is_pf(xdev)) { + if (!pdev->physfn) + return -EOPNOTSUPP; + pf_xdev = pci_get_drvdata(pdev->physfn); + if (!pf_xdev || !pf_xdev->netdev) + return -EOPNOTSUPP; + pf_netdev = (struct net_device *)pf_xdev->netdev; + ret = snprintf(buf, len, "%s_%d", + pf_netdev->name, xdev->vf_id); + } else { + return -EOPNOTSUPP; + } + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static int xsc_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + u16 vport; + int err = 0; + u32 rate = 0; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + if (min_tx_rate > 0) + return -EOPNOTSUPP; + + vport = vf + 1; + xsc_core_dbg(xdev, "set vf rate %d Mbps\n", max_tx_rate); + + rate = (u32)max_tx_rate; + err = xsc_eswitch_set_vport_rate(esw, vport, rate, 0); + if (err) { + xsc_core_err(xdev, "set_vf_rate failed!! err=%d\n", err); + return -EINVAL; + } + + return 0; +} + +static const struct net_device_ops xsc_netdev_ops = { + .ndo_open = xsc_eth_open, + .ndo_stop = xsc_eth_close, + .ndo_start_xmit = xsc_eth_xmit_start, + + .ndo_set_rx_mode = xsc_set_rx_mode, + .ndo_validate_addr = NULL, + .ndo_set_mac_address = xsc_eth_set_mac, + .ndo_change_mtu = xsc_eth_change_mtu, + + .ndo_tx_timeout = NULL, + .ndo_set_tx_maxrate = NULL, + .ndo_vlan_rx_add_vid = xsc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xsc_vlan_rx_kill_vid, + .ndo_do_ioctl = NULL, + .ndo_set_vf_mac = xsc_set_vf_mac, + .ndo_set_vf_vlan = xsc_set_vf_vlan, + .ndo_set_vf_rate = xsc_set_vf_rate, + .ndo_set_vf_spoofchk = xsc_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = NULL, + .ndo_set_vf_trust = xsc_set_vf_trust, + .ndo_get_vf_config = xsc_get_vf_config, + .ndo_set_vf_link_state = xsc_set_vf_link_state, + .ndo_get_stats64 = xsc_get_stats, + .ndo_setup_tc = NULL, + .ndo_set_features = xsc_eth_set_features, + .ndo_fix_features = xsc_fix_features, + .ndo_fdb_add = NULL, + .ndo_bridge_setlink = NULL, + .ndo_bridge_getlink = NULL, + .ndo_dfwd_add_station = NULL, + .ndo_dfwd_del_station = NULL, + .ndo_get_phys_port_name = xsc_get_phys_port_name, + +#ifdef HAVE_NETDEVICE_OPS_UDP_TUNNEL + .ndo_udp_tunnel_add = NULL, + .ndo_udp_tunnel_del = NULL, +#endif + .ndo_features_check = NULL, + .ndo_select_queue = xsc_select_queue, +}; + +static int xsc_get_max_num_channels(struct xsc_core_device *xdev) +{ +#ifdef NEED_CREATE_RX_THREAD + return 8; +#else + return min_t(int, xdev->dev_res->eq_table.num_comp_vectors, + XSC_ETH_MAX_NUM_CHANNELS); +#endif +} + +static int xsc_eth_netdev_init(struct xsc_adapter *adapter) +{ + unsigned int node, tc, nch; + + tc = adapter->nic_param.num_tc; + nch = adapter->nic_param.max_num_ch; + node = dev_to_node(adapter->dev); + adapter->txq2sq = kcalloc_node(nch * tc, + sizeof(*adapter->txq2sq), GFP_KERNEL, node); + if (!adapter->txq2sq) + goto err_out; + + mutex_init(&adapter->state_lock); + + INIT_WORK(&adapter->set_rx_mode_work, xsc_set_rx_mode_work); + + adapter->workq = create_singlethread_workqueue("xsc_eth"); + if (!adapter->workq) + goto err_free_priv; + + netif_carrier_off(adapter->netdev); + + return 0; + +err_free_priv: + kfree(adapter->txq2sq); +err_out: + return -ENOMEM; +} + +static const struct xsc_tirc_config tirc_default_config[XSC_NUM_INDIR_TIRS] = { + [XSC_TT_IPV4] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP, + }, + [XSC_TT_IPV4_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV4_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV6] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP6, + }, + [XSC_TT_IPV6_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, + [XSC_TT_IPV6_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, +}; + +struct xsc_tirc_config xsc_tirc_get_default_config(enum xsc_traffic_types tt) +{ + return tirc_default_config[tt]; +} + +void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) +{ + enum xsc_traffic_types tt; + + rss_params->hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss_params->toeplitz_hash_key, + sizeof(rss_params->toeplitz_hash_key)); + + xsc_build_default_indir_rqt(rss_params->indirection_rqt, + XSC_INDIR_RQT_SIZE, num_channels); + + for (tt = 0; tt < XSC_NUM_INDIR_TIRS; tt++) { + rss_params->rx_hash_fields[tt] = + tirc_default_config[tt].rx_hash_fields; + } + rss_params->rss_hash_tmpl = XSC_HASH_IP_PORTS | XSC_HASH_IP6_PORTS; +} + +void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_num) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eth_params *params = &adapter->nic_param; + + params->mtu = SW_DEFAULT_MTU; + params->num_tc = tc_num; + + params->comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + params->max_num_ch = ch_num; + params->num_channels = ch_num; + + params->rq_max_size = BIT(xdev->caps.log_max_qp_depth); + params->sq_max_size = BIT(xdev->caps.log_max_qp_depth); + xsc_build_rss_params(&adapter->rss_params, adapter->nic_param.num_channels); + + if (params->num_channels > XSC_NET_DIM_ENABLE_THRESHOLD) { + params->rx_dim_enabled = 1; + params->tx_dim_enabled = 1; + xsc_set_rx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + xsc_set_tx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + } + + xsc_core_info(xdev, "mtu=%d, num_ch=%d(max=%d), num_tc=%d\n", + params->mtu, params->num_channels, + params->max_num_ch, params->num_tc); +} + +void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + + /* Set up network device as normal. */ + netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + netdev->netdev_ops = &xsc_netdev_ops; + +#ifdef CONFIG_XSC_CORE_EN_DCB + netdev->dcbnl_ops = &xsc_dcbnl_ops; +#endif + eth_set_ethtool_ops(netdev); + + netdev->min_mtu = SW_MIN_MTU; + netdev->max_mtu = SW_MAX_MTU; + /*mtu - macheaderlen - ipheaderlen should be aligned in 8B*/ + netdev->mtu = SW_DEFAULT_MTU; + + netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;//NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO;//NETIF_F_TSO_ECN + netdev->vlan_features |= NETIF_F_TSO6; + //todo: enable rx csum + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_GSO_PARTIAL; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + if (xsc_vxlan_allowed(xdev) || xsc_geneve_tx_allowed(xdev) || + xsc_any_tunnel_proto_supported(xdev)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO; //NETIF_F_TSO_ECN + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + } + + netdev->features |= netdev->hw_features; + netdev->features |= NETIF_F_HIGHDMA; +} + +static int xsc_eth_nic_init(struct xsc_adapter *adapter, + void *rep_priv, u32 ch_num, u32 tc_num) +{ + int err = -1; + + xsc_eth_build_nic_params(adapter, ch_num, tc_num); + + err = xsc_eth_netdev_init(adapter); + if (err) + return err; + + xsc_eth_build_nic_netdev(adapter); + + return 0; +} + +static void xsc_eth_nic_cleanup(struct xsc_adapter *adapter) +{ + destroy_workqueue(adapter->workq); + kfree(adapter->txq2sq); +} + +/* create xdev resource,pd/domain/mkey */ +int xsc_eth_create_xdev_resources(struct xsc_core_device *xdev) +{ + return 0; +} + +static int xsc_eth_init_nic_tx(struct xsc_adapter *adapter) +{ + /*create tis table*/ +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_initialize(adapter); +#endif + + return 0; +} + +static int xsc_eth_cleanup_nic_tx(struct xsc_adapter *adapter) +{ + return 0; +} + +/* init tx: create hw resource, set register according to spec */ +int xsc_eth_init_nic_rx(struct xsc_adapter *adapter) +{ + /* create rqt and tir table + * tir table:base on traffic type like ip4_tcp/ipv6_tcp/ + * each rqt table for a traffic type + */ + + return 0; +} + +static int xsc_eth_cleanup_nic_rx(struct xsc_adapter *adapter) +{ + return 0; +} + +static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char mac[6] = {0}; + int ret = 0; + + ret = xsc_eth_get_mac(adapter->xdev, mac); + if (ret) { + xsc_core_warn(adapter->xdev, "get mac failed %d, generate random mac...", ret); + eth_random_addr(mac); + } + dev_addr_mod(netdev, 0, mac, 6); + + if (!is_valid_ether_addr(netdev->perm_addr)) + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static int xsc_eth_nic_enable(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (xsc_core_is_pf(xdev)) + xsc_lag_add_netdev(adapter->netdev); + xsc_eth_l2_addr_init(adapter); + + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), + XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); + +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_init_app(adapter); +#endif + + rtnl_lock(); + netif_device_attach(adapter->netdev); + rtnl_unlock(); + + return 0; +} + +static void xsc_eth_nic_disable(struct xsc_adapter *adapter) +{ + rtnl_lock(); + if (netif_running(adapter->netdev)) + xsc_eth_close(adapter->netdev); + netif_device_detach(adapter->netdev); + rtnl_unlock(); + + if (xsc_core_is_pf(adapter->xdev)) + xsc_lag_remove_netdev(adapter->netdev); +} + +/* call init tx/rx, enable function about nic init */ +static int xsc_attach_netdev(struct xsc_adapter *adapter) +{ + int err = -1; + + err = xsc_eth_init_nic_tx(adapter); + if (err) + return err; + + err = xsc_eth_init_nic_rx(adapter); + if (err) + return err; + + err = xsc_eth_nic_enable(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_detach_netdev(struct xsc_adapter *adapter) +{ + xsc_eth_nic_disable(adapter); + + flush_workqueue(adapter->workq); + + xsc_eth_cleanup_nic_rx(adapter); + xsc_eth_cleanup_nic_tx(adapter); + adapter->status = XSCALE_ETH_DRIVER_DETACH; +} + +static int xsc_eth_attach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + int err = -1; + + if (netif_device_present(adapter->netdev)) + return 0; + + err = xsc_eth_create_xdev_resources(xdev); + if (err) + return err; + + err = xsc_attach_netdev(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_eth_detach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + if (!netif_device_present(adapter->netdev)) + return; + + xsc_detach_netdev(adapter); +} + +static void *xsc_eth_add(struct xsc_core_device *xdev) +{ + int err = -1; + int num_chl, num_tc; + struct net_device *netdev; + struct xsc_adapter *adapter = NULL; + void *rep_priv = NULL; + + num_chl = xsc_get_max_num_channels(xdev); + num_tc = xdev->caps.max_tc; + + /* Allocate ourselves a network device with room for our info */ + netdev = alloc_etherdev_mqs(sizeof(struct xsc_adapter), + num_chl * num_tc, num_chl); + if (unlikely(!netdev)) { + xsc_core_warn(xdev, "alloc_etherdev_mqs failed, txq=%d, rxq=%d\n", + (num_chl * num_tc), num_chl); + return NULL; + } + + /* Set up our device-specific information */ + netdev->dev.parent = &xdev->pdev->dev; + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = xdev->pdev; + adapter->dev = &adapter->pdev->dev; + adapter->xdev = (void *)xdev; + xdev->eth_priv = adapter; + + err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); + if (err) { + xsc_core_warn(xdev, "xsc_nic_init failed, num_ch=%d, num_tc=%d, err=%d\n", + num_chl, num_tc, err); + goto err_free_netdev; + } + + err = xsc_eth_attach(xdev, adapter); + if (err) { + xsc_core_warn(xdev, "xsc_eth_attach failed, err=%d\n", err); + goto err_cleanup_netdev; + } + + adapter->stats = kvzalloc(sizeof(*adapter->stats), GFP_KERNEL); + if (unlikely(!adapter->stats)) + goto err_detach; + + err = register_netdev(netdev); + if (err) { + xsc_core_warn(xdev, "register_netdev failed, err=%d\n", err); + goto err_reg_netdev; + } + + err = xsc_eth_sysfs_create(netdev, xdev); + if (err) + goto err_sysfs_create; + + xdev->netdev = (void *)netdev; + adapter->status = XSCALE_ETH_DRIVER_INIT; + + return adapter; + +err_sysfs_create: + unregister_netdev(adapter->netdev); +err_reg_netdev: + kfree(adapter->stats); +err_detach: + xsc_eth_detach(xdev, adapter); +err_cleanup_netdev: + xsc_eth_nic_cleanup(adapter); +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) +{ + struct xsc_adapter *adapter = NULL; + + if (!xdev) + return; + + adapter = xdev->eth_priv; + if (!adapter) { + xsc_core_warn(xdev, "failed! adapter is null\n"); + return; + } + + xsc_core_info(adapter->xdev, "remove netdev %s entry\n", adapter->netdev->name); + + xsc_eth_sysfs_remove(adapter->netdev, xdev); + + unregister_netdev(adapter->netdev); + + kfree(adapter->stats); + + xsc_eth_detach(xdev, adapter); + xsc_eth_nic_cleanup(adapter); + + free_netdev(adapter->netdev); + + xdev->netdev = NULL; + xdev->eth_priv = NULL; +} + +static struct xsc_interface xsc_interface = { + .add = xsc_eth_add, + .remove = xsc_eth_remove, + .event = NULL, + .protocol = XSC_INTERFACE_PROTOCOL_ETH, +}; + +int xsc_net_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc net driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_remove_eth_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_net_nb = { + .notifier_call = xsc_net_reboot_event_handler, + .next = NULL, + .priority = 1, +}; + +void xsc_remove_eth_driver(void) +{ + pr_info("remove ethernet driver\n"); + xsc_eth_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); +} + +static __init int xsc_net_driver_init(void) +{ + int ret; + + pr_info("add ethernet driver\n"); + ret = xsc_register_interface(&xsc_interface); + if (ret != 0) { + pr_err("failed to register interface\n"); + goto out; + } + + ret = xsc_eth_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + goto out; + } + + register_reboot_notifier(&xsc_net_nb); + return 0; +out: + return -1; +} + +static __exit void xsc_net_driver_exit(void) +{ + unregister_reboot_notifier(&xsc_net_nb); + xsc_remove_eth_driver(); +} + +module_init(xsc_net_driver_init); +module_exit(xsc_net_driver_exit); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c new file mode 100644 index 0000000000000000000000000000000000000000..6c4afad1be8fbc6af4400917d1c2e545e7c88b98 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" +#include "xsc_accel.h" +#include +#include +#include "xsc_eth_txrx.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" + +#ifdef NEED_CREATE_RX_THREAD + +extern void xsc_cq_notify_hw(struct xsc_cq *cq); + +DEFINE_PER_CPU(bool, txcqe_get); +EXPORT_PER_CPU_SYMBOL(txcqe_get); + +u32 xsc_eth_process_napi(struct xsc_adapter *adapter) +{ + int work_done = 0; + bool err = false; + int budget = 1; + int i, chl; + int errtx = false; + struct xsc_channel *c; + struct xsc_rq *prq; + struct xsc_ch_stats *ch_stats; + + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + for (chl = 0; chl < adapter->channels.num_chl; chl++) { + c = &adapter->channels.c[chl]; + prq = &c->qp.rq[0]; + ch_stats = c->stats; + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + errtx |= xsc_poll_tx_cq(&c->qp.sq[i].cq, budget); + ETH_DEBUG_LOG("errtx=%u.\r\n", errtx); + if (likely(__this_cpu_read(txcqe_get))) { + xsc_cq_notify_hw(&c->qp.sq[i].cq); + __this_cpu_write(txcqe_get, false); + } + } + + work_done = xsc_poll_rx_cq(&prq->cq, budget); + + ETH_DEBUG_LOG("work_done=%d.\r\n", work_done); + + if (work_done != 0) { + xsc_cq_notify_hw(&prq->cq); + err |= prq->post_wqes(prq); + + ETH_DEBUG_LOG("err=%u.\r\n", err); + } else { + ETH_DEBUG_LOG("no-load.\r\n"); + } + + ch_stats->arm++; + } + } + + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_rx_thread(void *arg) +{ + u32 ret = XSCALE_RET_SUCCESS; + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + while (kthread_should_stop() == 0) { + if (need_resched()) + schedule(); + ret = xsc_eth_process_napi(adapter); + if (ret != XSCALE_RET_SUCCESS) + ETH_DEBUG_LOG("unexpected branch.\r\n"); + + ETH_DEBUG_LOG("adapter=%p\r\n", adapter); + } + ETH_DEBUG_LOG("do_exit.\r\n"); + + return XSCALE_RET_SUCCESS; +} + +u32 g_thread_count; +u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) +{ + struct task_struct *task = NULL; + + task = kthread_create(xsc_eth_rx_thread, (void *)adapter, + "xsc_rx%i", g_thread_count); + if (!task) + return XSCALE_RET_ERROR; + + ETH_DEBUG_LOG("thread_count=%d\r\n", g_thread_count); + + kthread_bind(task, g_thread_count); + wake_up_process(task); + adapter->task = task; + + g_thread_count++; + + return XSCALE_RET_SUCCESS; +} +#endif /* NEED_CREATE_RX_THREAD */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h new file mode 100644 index 0000000000000000000000000000000000000000..1378be66b6156f6e6e16df29e5ffcdaf4a20a7b4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ACCEL_H +#define XSC_ACCEL_H + +#include +#include +#include "common/xsc_core.h" + +static inline void xsc_udp_gso_handle_tx_skb(struct sk_buff *skb) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + udp_hdr(skb)->len = htons(payload_len); +} + +static inline struct sk_buff *xsc_accel_handle_tx(struct sk_buff *skb) +{ + /*no not consider tls and ipsec*/ + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + xsc_udp_gso_handle_tx_skb(skb); + return skb; +} + +static inline bool xsc_vxlan_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_geneve_tx_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_any_tunnel_proto_supported(struct xsc_core_device *dev) +{ + return false; +} + +#endif /* XSC_ACCEL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c new file mode 100644 index 0000000000000000000000000000000000000000..36503b3113f78769d66d5c0b9108c7a3503be58e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c @@ -0,0 +1,1482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/vport.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" +#include "xsc_hw_comm.h" + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#define XSC_100MB (100000) +#define XSC_1GB (1000000) +#define XSC_RATE_LIMIT_BASE (16000) +#define XSC_WRR_DIV_BASE 10 +#define XSC_WRR_DEFAULT_WEIGHT 10 +#define XSC_DCBX_WFQ_TOTAL_WEIGHT 100 +#define XSC_DCBX_MAX_TC 8 + +#define XSC_CEE_STATE_UP 1 +#define XSC_CEE_STATE_DOWN 0 + +/* Max supported cable length is 1000 meters */ +#define XSC_MAX_CABLE_LENGTH 1000 + +enum { + XSC_VENDOR_TC_GROUP_NUM = 7, + XSC_LOWEST_PRIO_GROUP = 0, +}; + +#ifdef CONFIG_XSC_CORE_EN_DCB +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state); +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio); +static u8 xsc_dcbnl_setall(struct net_device *netdev); + +static int xsc_max_tc(struct xsc_core_device *dev) +{ + u8 num_tc = dev->caps.max_tc ? : 8; + + if (num_tc > XSC_DCBX_MAX_TC) + num_tc = XSC_DCBX_MAX_TC; + + return num_tc - 1; +} + +static void xsc_pfc_array2bitmap(u8 *pfcbitmap, u8 *array) +{ + u8 i; + + *pfcbitmap = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (array[i]) + *pfcbitmap = *pfcbitmap | (1 << i); + } +} + +static void xsc_pfc_bitmap2array(u8 pfcbitmap, u8 *array) +{ + u8 i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if ((pfcbitmap >> i) & 0x1) + array[i] = 1; + } +} + +static int xsc_query_port_prio_tc(struct xsc_core_device *xdev, int prio, u8 *tc) +{ + /* user priotity to tc 0:0; 1:1; 2:2; 3:3 ... 7:7 */ + *tc = (u8)prio; + return 0; +} + +static int xsc_set_port_prio_tc(struct xsc_core_device *xdev, u8 *prio_tc) +{ + u8 i; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + prio_tc[i] = i; + + return 0; +} + +static int xsc_wfq_to_wrr_adpat(struct xsc_core_device *xdev, u8 *dst_bw, + u8 *src_bw, u8 ets_cnt, u8 min_weight) +{ + u8 i, index; + u8 max_commom_div = 1; + u8 flag[XSC_DCBX_WFQ_TOTAL_WEIGHT] = {0}; + + if (min_weight >= XSC_DCBX_WFQ_TOTAL_WEIGHT || !ets_cnt) + return 0; + + for (index = 1; index <= min_weight; index++) { + for (i = 0; i < ets_cnt; i++) { + /*any ets bw can not div by whole,flag = 1*/ + if (src_bw[i] % index) { + flag[index] = 1; + break; + } + } + } + + for (index = 1; index <= min_weight; index++) { + if (flag[index] == 0) + max_commom_div = index; + } + + xsc_core_dbg(xdev, "max_commom_div = %d, min_weight = %d\n", max_commom_div, min_weight); + + for (i = 0; i < ets_cnt; i++) { + dst_bw[i] = src_bw[i] / max_commom_div; + xsc_core_dbg(xdev, "dst_bw[%d] = %d\n", i, dst_bw[i]); + } + + return 0; +} + +static int xsc_wrr_to_wfq_adpat(struct xsc_core_device *xdev, + struct xsc_weight_get *wrr, u8 *bandwidth) +{ + u8 i, wrr_cnt = 0, index; + u16 wrr_total_weight = 0, wfq_tatal_weight = 0; + u16 portion = 0; + u16 rmndr = 0; + u16 temp[IEEE_8021QAZ_MAX_TCS] = {0}; + + /*1 calc cur wrr weight total*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + wrr_total_weight += wrr->weight[i]; + wrr_cnt++; + } + } + + xsc_core_dbg(xdev, "%s: wrr_total_weight = %d max_prio = %d\n", + __func__, wrr_total_weight, wrr->max_prio); + + if (!wrr_total_weight || wrr_total_weight > XSC_DCBX_WFQ_TOTAL_WEIGHT) + return -EINVAL; + + portion = XSC_DCBX_WFQ_TOTAL_WEIGHT / wrr_total_weight; + rmndr = XSC_DCBX_WFQ_TOTAL_WEIGHT % wrr_total_weight; + + /*2 calc major wfq weight*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + temp[i] = wrr->weight[i] * portion; + wfq_tatal_weight += temp[i]; + } + } + + xsc_core_dbg(xdev, "portion = %d, rmndr = %d, wfq_tatal = %d\n", + portion, rmndr, wfq_tatal_weight); + + /*3 average remainder to every prio*/ + if (rmndr > 0) { + for (i = 0; i < rmndr; i++) { + index = i % wrr_cnt; + temp[index] = temp[index] + 1; + } + } + for (i = 0; i <= wrr->max_prio; i++) + bandwidth[i] = (u8)temp[i]; + + return 0; +} + +static int xsc_query_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + int err = 0; + struct xsc_rate_limit_get req; + struct xsc_rate_limit_get rsp; + + memset(&req, 0, sizeof(struct xsc_rate_limit_get)); + memset(&rsp, 0, sizeof(struct xsc_rate_limit_get)); + /*0--port rate limit; 1--priority rate limit*/ + req.limit_level = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_RATE_LIMIT, &req, &rsp); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + ratelimit[i] = (u64)(rsp.rate_cir[i]); + + return 0; +} + +static int xsc_modify_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + struct xsc_rate_limit_set req; + + memset(&req, 0, sizeof(struct xsc_rate_limit_set)); + req.limit_level = 1; + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.rate_cir = (u32)ratelimit[i]; + req.limit_id = i; + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_RATE_LIMIT, &req, NULL); + } + + return 0; +} + +static int xsc_query_port_bw_config(struct xsc_core_device *xdev, u8 *bandwidth) +{ + u8 i; + u8 sp_cnt = 0; + int err = 0; + struct xsc_sp_get sp_rsp; + struct xsc_weight_get weight_rsp; + + memset(&sp_rsp, 0, sizeof(struct xsc_sp_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_SP, NULL, &sp_rsp); + if (err) + return err; + /*SP enable,bandwidth is 0*/ + for (i = 0; i <= sp_rsp.max_prio; i++) { + if (sp_rsp.sp[i]) { + sp_cnt++; + bandwidth[i] = 0; + } + } + + xsc_core_dbg(xdev, "sp_cnt = %d, max_prio = %d\n", sp_cnt, sp_rsp.max_prio); + + memset(&weight_rsp, 0, sizeof(struct xsc_weight_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_WEIGHT, NULL, &weight_rsp); + if (err) + return err; + + xsc_core_dbg(xdev, "weight_rsp.max_prio = %d\n", weight_rsp.max_prio); + for (i = 0; i <= weight_rsp.max_prio; i++) + xsc_core_dbg(xdev, "i = %d, weight = %d\n", i, weight_rsp.weight[i]); + + xsc_wrr_to_wfq_adpat(xdev, &weight_rsp, bandwidth); + + return 0; +} + +static int xsc_query_port_pfc(struct xsc_core_device *xdev, u8 *pfc_bitmap) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) + return err; + + xsc_pfc_array2bitmap(pfc_bitmap, rsp.pfc_on); + + return 0; +} + +static int xsc_query_port_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + u8 i; + int err = 0; + struct xsc_pfc_prio_stats_mbox_in req; + struct xsc_pfc_prio_stats_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_prio_stats_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_prio_stats_mbox_out)); + + req.pport = xdev->mac_port; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_QUERY_PFC_PRIO_STATS, &req, &rsp); + if (err == 0 && rsp.hdr.status == 0) { + for (i = 0; i <= xsc_max_tc(xdev); i++) { + pfc->requests[i] = rsp.prio_stats[i].tx_pause; + pfc->indications[i] = rsp.prio_stats[i].rx_pause; + } + } + + return 0; +} + +static int xsc_query_port_pfc_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + xsc_query_port_stats(xdev, pfc); + + xsc_query_port_pfc(xdev, &pfc->pfc_en); + + return 0; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 pfcbitmap) +{ + u8 i; + u8 pfc_en[IEEE_8021QAZ_MAX_TCS] = {0}; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + xsc_pfc_bitmap2array(pfcbitmap, pfc_en); + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.pfc_on = pfc_en[i]; + req.priority = i; + xsc_core_dbg(xdev, "%s: prio %d, pfc %d\n", __func__, i, req.pfc_on); + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + } + return 0; +} + +static int xsc_cmd_set_dscp2prio(struct xsc_core_device *xdev, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_dscp_pmt_set req; + + memset(&req, 0, sizeof(struct xsc_dscp_pmt_set)); + req.dscp = dscp; + req.priority = prio; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_DSCP_PMT, &req, NULL); + if (err) + return err; + + xsc_core_dbg(xdev, "%s: dscp %d mapping to prio %d\n", __func__, dscp, prio); + + return 0; +} + +static int xsc_cmd_set_trust_state(struct xsc_core_device *xdev, u8 trust_state) +{ + int err = 0; + struct xsc_trust_mode_set req; + + memset(&req, 0, sizeof(struct xsc_trust_mode_set)); + + /*set trust state,0,DSCP mdoe; 1,PCP mode*/ + if (trust_state == XSC_QPTS_TRUST_PCP) + req.is_pcp = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_TRUST_MODE, &req, NULL); + if (err) + return err; + + return 0; +} + +static int xsc_cmd_get_trust_state(struct xsc_core_device *xdev, u8 *trust_state) +{ + int err; + struct xsc_trust_mode_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_trust_mode_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_TRUST_MODE, NULL, &rsp); + if (err) + return err; + + if (rsp.is_pcp) + *trust_state = XSC_QPTS_TRUST_PCP; + else + *trust_state = XSC_QPTS_TRUST_DSCP; + + return 0; +} + +static int xsc_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int err = 0; + int i; + + if (!priv->dcbx.enable || !xdev->caps.ets) + return -EOPNOTSUPP; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { + /*get prio->tc mapping*/ + xsc_query_port_prio_tc(xdev, i, &ets->prio_tc[i]); + } + + err = xsc_query_port_bw_config(xdev, ets->tc_tx_bw); + if (err) + return err; + + for (i = 0; i < ets->ets_cap; i++) { + if (!ets->tc_tx_bw[i]) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else if (ets->tc_tx_bw[i] < XSC_MAX_BW_ALLOC) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + + xsc_core_dbg(xdev, "%s: tc%d, bw=%d\n", + __func__, i, ets->tc_tx_bw[i]); + } + + memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static void xsc_build_tc_tx_bw_sch(struct xsc_core_device *xdev, + struct ieee_ets *ets, u8 *tc_tx_bw, + u8 *tc_sp_enable, int max_tc) +{ + u8 i; + u8 ets_cnt = 0; + u8 min_weight = 0xff; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + tc_tx_bw[i] = 1; + tc_sp_enable[i] = i + 1; + break; + case IEEE_8021QAZ_TSA_ETS: + ets_cnt++; + if (ets->tc_tx_bw[i] <= min_weight) + min_weight = ets->tc_tx_bw[i]; + break; + } + } + xsc_wfq_to_wrr_adpat(xdev, tc_tx_bw, ets->tc_tx_bw, ets_cnt, min_weight); +} + +static int xsc_set_port_tx_bw_sch(struct xsc_core_device *xdev, u8 *tc_sp_enable, u8 *tc_tx_bw) +{ + u8 i; + int err = 0; + struct xsc_sp_set req_sch; + struct xsc_weight_set req_weight; + + memset(&req_sch, 0, sizeof(struct xsc_sp_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_sch.sp[i] = tc_sp_enable[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_SP, &req_sch, NULL); + if (err) + return err; + + memset(&req_weight, 0, sizeof(struct xsc_weight_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_weight.weight[i] = tc_tx_bw[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_WEIGHT, &req_weight, NULL); + if (err) + return err; + + return 0; +} + +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets) +{ + struct xsc_core_device *xdev = priv->xdev; + u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = {1}; + u8 tc_sp_enable[IEEE_8021QAZ_MAX_TCS]; + int max_tc = xsc_max_tc(xdev); + int err = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(tc_sp_enable, 0, IEEE_8021QAZ_MAX_TCS); + xsc_build_tc_tx_bw_sch(xdev, ets, tc_tx_bw, tc_sp_enable, max_tc); + xsc_set_port_prio_tc(xdev, ets->prio_tc); + + err = xsc_set_port_tx_bw_sch(xdev, tc_sp_enable, tc_tx_bw); + if (err) + return err; + + memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static int xsc_dbcnl_validate_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + bool have_ets_tc = false; + int bw_sum = 0; + int i; + + if (!priv->dcbx.enable) + return 0; + + /* Validate Priority */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= XSC_MAX_PRIORITY) { + netdev_err(netdev, + "Failed to validate ETS: priority value greater than max(%d)\n", + XSC_MAX_PRIORITY); + return -EINVAL; + } + } + + /* Validate Bandwidth Sum */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + /* do not allow ets with 0 weight */ + have_ets_tc = true; + if (!ets->tc_tx_bw[i]) + return -EINVAL; + bw_sum += ets->tc_tx_bw[i]; + } + } + + xsc_core_dbg(xdev, "%s bw_sum = %d\n", __func__, bw_sum); + + if (have_ets_tc && bw_sum != 100) { + netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); + return -EINVAL; + } + return 0; +} + +static int xsc_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return 0; + + if (!priv->xdev->caps.ets) + return -EOPNOTSUPP; + + err = xsc_dbcnl_validate_ets(dev, ets); + if (err) + return err; + + err = xsc_dcbnl_ieee_setets_core(priv, ets); + if (err) + return err; + + return 0; +} + +static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + pfc->pfc_cap = xsc_max_tc(xdev) + 1; + pfc->pfc_en = 0; + if (xdev->caps.port_buf) + pfc->delay = priv->dcbx.cable_len; + xsc_query_port_pfc_stats(xdev, pfc); + + xsc_core_dbg(xdev, "%s: pfc_en=0x%x\n", __func__, pfc->pfc_en); + + return 0; +} + +static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + u8 curr_pfc_en; + int ret = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + /* pfc_en */ + xsc_query_port_pfc(xdev, &curr_pfc_en); + if (pfc->pfc_en != curr_pfc_en) { + ret = xsc_set_port_pfc(xdev, pfc->pfc_en); + if (ret) + return ret; + } + + xsc_core_dbg(xdev, "%s: new_pfc_en=0x%x, cur_pfc_en=0x%x\n", + __func__, pfc->pfc_en, curr_pfc_en); + return ret; +} + +static u8 xsc_dcbnl_getdcbx(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: dcbx->cap=0x%x\n", __func__, priv->dcbx.cap); + return priv->dcbx.cap; +} + +static u8 xsc_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_dcbx *dcbx = &priv->dcbx; + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + int err = 0; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + req.os_handle_lldp = cpu_to_be32(1); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_SET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "set LLDP status fail,err %d\n", err); + return err; + } + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: mode=%d, dcbx->cap = %d\n", __func__, mode, dcbx->cap); + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + if (mode == dcbx->cap) + return 0; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + /*mode switch, set base config*/ + if (mode & DCB_CAP_DCBX_VER_IEEE) { + xsc_dcbnl_ieee_setets(dev, &ets); + xsc_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + xsc_dcbnl_setall(dev); + } + + dcbx->cap = mode; + + return 0; +} + +static int xsc_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct dcb_app temp; + bool is_new; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Save the old entry info */ + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + temp.protocol = app->protocol; + temp.priority = priv->dcbx_dp.dscp2prio[app->protocol]; + + /* Check if need to switch to dscp trust state */ + if (!priv->dcbx.dscp_app_cnt) { + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_DSCP); + if (err) + return err; + } + + /* Skip the fw command if new and old mapping are the same */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) { + err = xsc_set_dscp2prio(priv, app->protocol, app->priority); + if (err) + goto fw_err; + } + + /* Delete the old entry if exists */ + is_new = false; + err = dcb_ieee_delapp(dev, &temp); + if (err) + is_new = true; + + /* Add new entry and update counter */ + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + if (is_new) + priv->dcbx.dscp_app_cnt++; + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Skip if no dscp app entry */ + if (!priv->dcbx.dscp_app_cnt) + return -ENOENT; + + /* Check if the entry matches fw setting */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) + return -ENOENT; + + /* Delete the app entry */ + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + /* Reset the priority mapping back to zero */ + err = xsc_set_dscp2prio(priv, app->protocol, 0); + if (err) + goto fw_err; + + priv->dcbx.dscp_app_cnt--; + + /* Check if need to switch to pcp trust state */ + if (!priv->dcbx.dscp_app_cnt) + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS] = {0}; + int i, err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); + + err = xsc_query_port_ets_rate_limit(xdev, max_bw_value); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + maxrate->tc_maxrate[i] = max_bw_value[i] * XSC_RATE_LIMIT_BASE / XSC_1GB; + + return 0; +} + +static int xsc_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + if (!maxrate->tc_maxrate[i]) + continue; + max_bw_value[i] = maxrate->tc_maxrate[i] * XSC_1GB / XSC_RATE_LIMIT_BASE; + xsc_core_dbg(xdev, "%s: tc_%d <=> max_bw %llu * 16kbps\n", + __func__, i, max_bw_value[i]); + } + + return xsc_modify_port_ets_rate_limit(xdev, max_bw_value); +} + +static u8 xsc_dcbnl_setall(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + struct ieee_ets ets; + struct ieee_pfc pfc; + int err = -EOPNOTSUPP; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!xdev->caps.ets) + goto out; + + memset(&ets, 0, sizeof(ets)); + memset(&pfc, 0, sizeof(pfc)); + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; + } + + err = xsc_dbcnl_validate_ets(netdev, &ets); + if (err) + goto out; + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) { + netdev_err(netdev, + "%s, Failed to set ETS: %d\n", __func__, err); + goto out; + } + + /* Set PFC */ + pfc.pfc_cap = xsc_max_tc(xdev) + 1; + if (!cee_cfg->pfc_enable) + pfc.pfc_en = 0; + else + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + pfc.pfc_en |= cee_cfg->pfc_setting[i] << i; + + err = xsc_dcbnl_ieee_setpfc(netdev, &pfc); + if (err) { + netdev_err(netdev, + "%s, Failed to set PFC: %d\n", __func__, err); + goto out; + } +out: + return err ? XSC_DCB_NO_CHG : XSC_DCB_CHG_RESET; +} + +static u8 xsc_dcbnl_getstate(struct net_device *netdev) +{ + return XSC_CEE_STATE_UP; +} + +static void xsc_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable || !perm_addr) + return; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + xsc_query_nic_vport_mac_address(priv->xdev, 0, perm_addr); +} + +static void xsc_dcbnl_setpgtccfgtx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, type=%d, pgid=%d, bw_pct=%d, up_map=%d\n", + __func__, priority, prio_type, pgid, + bw_pct, up_map); + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->prio_to_pg_map[priority] = pgid; +} + +static void xsc_dcbnl_setpgtccfgrx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgtccfg rx, not support\n"); +} + +static void xsc_dcbnl_setpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, bw_pct); + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->pg_bw_pct[pgid] = bw_pct; +} + +static void xsc_dcbnl_setpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgbwgcfg rx, not support\n"); +} + +static void xsc_dcbnl_getpgtccfgtx(struct net_device *netdev, + int priority, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (!xdev->caps.ets) { + netdev_err(netdev, "%s, ets is not supported\n", __func__); + return; + } + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + xsc_query_port_prio_tc(xdev, priority, pgid); + + *up_map = *pgid; + *prio_type = 0; + *bw_pct = 100; + + xsc_core_dbg(xdev, "%s: prio=%d, pgid=%d, bw_pct=%d\n", + __func__, priority, *pgid, *bw_pct); +} + +static void xsc_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "pgtccfgrx Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void xsc_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct ieee_ets ets; + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + xsc_dcbnl_ieee_getets(netdev, &ets); + *bw_pct = ets.tc_tx_bw[pgid]; + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, *bw_pct); +} + +static void xsc_dcbnl_setpfccfg(struct net_device *netdev, + int priority, u8 setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, setting); + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (setting > 1) + return; + + cee_cfg->pfc_setting[priority] = setting; +} + +static void xsc_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "bwgcfgrx Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static int xsc_dcbnl_get_priority_pfc(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct ieee_pfc pfc; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + err = xsc_dcbnl_ieee_getpfc(netdev, &pfc); + + if (err) + *setting = 0; + else + *setting = (pfc.pfc_en >> priority) & 0x01; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, *setting); + return err; +} + +static void xsc_dcbnl_getpfccfg(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable) + return; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (!setting) + return; + + xsc_dcbnl_get_priority_pfc(netdev, priority, setting); +} + +static u8 xsc_dcbnl_getcap(struct net_device *netdev, + int capid, u8 *cap) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u8 rval = 0; + + if (!priv->dcbx.enable) + return rval; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx.cap | + DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE; + break; + default: + *cap = 0; + rval = 1; + break; + } + + xsc_core_dbg(xdev, "%s: capid=%d, cap=%d, ret=%d\n", + __func__, capid, *cap, rval); + return rval; +} + +static int xsc_dcbnl_getnumtcs(struct net_device *netdev, + int tcs_id, u8 *num) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + switch (tcs_id) { + case DCB_NUMTCS_ATTR_PG: + case DCB_NUMTCS_ATTR_PFC: + *num = xsc_max_tc(xdev) + 1; + break; + default: + return -EINVAL; + } + + xsc_core_dbg(xdev, "%s: tcs_id=%d, tc_num=%d\n", + __func__, tcs_id, *num); + return 0; +} + +static u8 xsc_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct ieee_pfc pfc; + + if (!priv->dcbx.enable) + return XSC_CEE_STATE_DOWN; + + if (xsc_dcbnl_ieee_getpfc(netdev, &pfc)) + return XSC_CEE_STATE_DOWN; + + return pfc.pfc_en ? XSC_CEE_STATE_UP : XSC_CEE_STATE_DOWN; +} + +static void xsc_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + if (state != XSC_CEE_STATE_UP && state != XSC_CEE_STATE_DOWN) + return; + + cee_cfg->pfc_enable = state; +} + +const struct dcbnl_rtnl_ops xsc_dcbnl_ops = { + .ieee_getets = xsc_dcbnl_ieee_getets, + .ieee_setets = xsc_dcbnl_ieee_setets, + .ieee_getmaxrate = xsc_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = xsc_dcbnl_ieee_setmaxrate, + .ieee_getpfc = xsc_dcbnl_ieee_getpfc, + .ieee_setpfc = xsc_dcbnl_ieee_setpfc, + .ieee_setapp = xsc_dcbnl_ieee_setapp, + .ieee_delapp = xsc_dcbnl_ieee_delapp, + .getdcbx = xsc_dcbnl_getdcbx, + .setdcbx = xsc_dcbnl_setdcbx, + + /* CEE interfaces */ + .setall = xsc_dcbnl_setall, + .getstate = xsc_dcbnl_getstate, + .getpermhwaddr = xsc_dcbnl_getpermhwaddr, + + .setpgtccfgtx = xsc_dcbnl_setpgtccfgtx, + .setpgtccfgrx = xsc_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = xsc_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = xsc_dcbnl_setpgbwgcfgrx, + + .getpgtccfgtx = xsc_dcbnl_getpgtccfgtx, + .getpgtccfgrx = xsc_dcbnl_getpgtccfgrx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgtx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgrx, + + .setpfccfg = xsc_dcbnl_setpfccfg, + .getpfccfg = xsc_dcbnl_getpfccfg, + .getcap = xsc_dcbnl_getcap, + .getnumtcs = xsc_dcbnl_getnumtcs, + .getpfcstate = xsc_dcbnl_getpfcstate, + .setpfcstate = xsc_dcbnl_setpfcstate, +}; + +static void xsc_dcbnl_query_dcbx_mode(struct xsc_core_device *xdev, + enum xsc_dcbx_oper_mode *mode) +{ + int err = 0; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + *mode = XSC_DCBX_PARAM_VER_OPER_HOST; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "get LLDP status fail,err %d\n", err); + return; + } + + rsp.status.os_handle_lldp = be32_to_cpu(rsp.status.os_handle_lldp); + xsc_core_dbg(xdev, "%s: lldp os handle = %u\n", __func__, rsp.status.os_handle_lldp); + if (rsp.status.os_handle_lldp != XSC_DCBX_PARAM_VER_OPER_HOST) + *mode = XSC_DCBX_PARAM_VER_OPER_AUTO; +} + +static void xsc_ets_init(struct xsc_adapter *priv) +{ + struct ieee_ets ets; + int err; + int i; + + if (!priv->xdev->caps.ets) + return; + memset(&ets, 0, sizeof(ets)); + ets.ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets.ets_cap; i++) { + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = i; + ets.tc_tx_bw[i] = XSC_WRR_DEFAULT_WEIGHT; + } + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) + netdev_err(priv->netdev, + "%s, Failed to init ETS: %d\n", __func__, err); +} + +enum { + INIT, + DELETE, +}; + +static void xsc_dcbnl_dscp_app(struct xsc_adapter *priv, int action) +{ + struct dcb_app temp; + struct xsc_core_device *xdev = priv->xdev; + int i; + + xsc_core_dbg(xdev, "%s: action=%d\n", __func__, action); + if (!priv->xdev->caps.dscp) + return; + + /* No SEL_DSCP entry in non DSCP state */ + if (priv->dcbx_dp.trust_state != XSC_QPTS_TRUST_DSCP) + return; + + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + for (i = 0; i < XSC_MAX_DSCP; i++) { + temp.protocol = i; + temp.priority = priv->dcbx_dp.dscp2prio[i]; + if (action == INIT) + dcb_ieee_setapp(priv->netdev, &temp); + else + dcb_ieee_delapp(priv->netdev, &temp); + } + + priv->dcbx.dscp_app_cnt = (action == INIT) ? XSC_MAX_DSCP : 0; +} + +void xsc_dcbnl_init_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, INIT); +} + +void xsc_dcbnl_delete_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, DELETE); +} + +static int xsc_query_trust_state(struct xsc_core_device *xdev, u8 *trust) +{ + int err = 0; + + err = xsc_cmd_get_trust_state(xdev, trust); + if (err) + return err; + + return 0; +} + +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state) +{ + int err = 0; + + err = xsc_cmd_set_trust_state(priv->xdev, trust_state); + if (err) + return err; + + priv->dcbx_dp.trust_state = trust_state; + + return err; +} + +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_core_device *xdev = priv->xdev; + + xsc_core_dbg(xdev, "%s: dscp=%d, prio=%d\n", + __func__, dscp, prio); + + err = xsc_cmd_set_dscp2prio(priv->xdev, dscp, prio); + if (err) + return err; + + priv->dcbx_dp.dscp2prio[dscp] = prio; + return err; +} + +static int xsc_query_dscp2prio(struct xsc_core_device *xdev, u8 *dscp2prio) +{ + int err = 0; + struct xsc_dscp_pmt_get rsp; + + memset(&rsp, 0, sizeof(rsp)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_DSCP_PMT, NULL, &rsp); + if (err) + return err; + + memcpy(dscp2prio, rsp.prio_map, sizeof(u8) * XSC_MAX_DSCP); + + return 0; +} + +static int xsc_trust_initialize(struct xsc_adapter *priv) +{ + struct xsc_core_device *xdev = priv->xdev; + int err; + + priv->dcbx_dp.trust_state = XSC_QPTS_TRUST_PCP; + + if (!xdev->caps.dscp) + return 0; + + err = xsc_query_trust_state(xdev, &priv->dcbx_dp.trust_state); + if (err) + return err; + + err = xsc_query_dscp2prio(xdev, priv->dcbx_dp.dscp2prio); + if (err) + return err; + + return 0; +} + +#define XSC_BUFFER_CELL_SHIFT 7 +static u16 xsc_query_port_buffers_cell_size(struct xsc_adapter *priv) +{ + return (1 << XSC_BUFFER_CELL_SHIFT); +} + +static void xsc_cee_init(struct xsc_adapter *priv) +{ + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + int i, max_tc; + u8 pfc_bitmap; + + memset(cee_cfg, 0, sizeof(*cee_cfg)); + + cee_cfg->pfc_enable = 1; + + xsc_query_port_pfc(xdev, &pfc_bitmap); + + xsc_pfc_bitmap2array(pfc_bitmap, cee_cfg->pfc_setting); + + max_tc = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < max_tc; i++) + cee_cfg->prio_to_pg_map[i] = i % max_tc; +} + +static u8 xsc_dcbnl_get_dcbx_status(struct xsc_core_device *xdev) +{ + u8 enable = 0; + int err; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_hwc_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_hwc_mbox_out)); + + req.sub_type = XSC_DCBX_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) + return 0; + + enable = (u8)be32_to_cpu(rsp.status.dcbx_status); + + return enable; +} + +void xsc_dcbnl_initialize(struct xsc_adapter *priv) +{ + struct xsc_dcbx *dcbx = &priv->dcbx; + struct xsc_core_device *xdev = priv->xdev; + + xsc_trust_initialize(priv); + + if (!priv->xdev->caps.qos) + return; + + if (priv->xdev->caps.dcbx) + xsc_dcbnl_query_dcbx_mode(xdev, &dcbx->mode); + + priv->dcbx.enable = xsc_dcbnl_get_dcbx_status(xdev); + + if (priv->dcbx.enable) { + priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE; + + if (priv->dcbx.mode == XSC_DCBX_PARAM_VER_OPER_HOST) + priv->dcbx.cap = priv->dcbx.cap | DCB_CAP_DCBX_HOST; + + priv->dcbx.port_buff_cell_sz = xsc_query_port_buffers_cell_size(priv); + priv->dcbx.manual_buffer = 0; + priv->dcbx.cable_len = XSC_DEFAULT_CABLE_LEN; + + xsc_cee_init(priv); + xsc_ets_init(priv); + } +} +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h new file mode 100644 index 0000000000000000000000000000000000000000..be7e6d89c9f6dc2555de2f5245f2cd9187a12b0a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_H +#define XSC_ETH_H + +#include "common/qp.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "common/version.h" +#include +#include "common/xsc_fs.h" + +#define XSC_INVALID_LKEY 0x100 + +#define XSCALE_ETH_PHYPORT_DOWN 0 +#define XSCALE_ETH_PHYPORT_UP 1 +#ifdef CONFIG_DCB +#define CONFIG_XSC_CORE_EN_DCB 1 +#endif +#define XSC_PAGE_CACHE 1 + +#define XSCALE_DRIVER_NAME "xsc_eth" +#define XSCALE_RET_SUCCESS 0 +#define XSCALE_RET_ERROR 1 + +enum { + XSCALE_ETH_DRIVER_INIT, + XSCALE_ETH_DRIVER_OK, + XSCALE_ETH_DRIVER_CLOSE, + XSCALE_ETH_DRIVER_DETACH, +}; + +#define XSCALE_ETH_QP_NUM_MAX 1 +#define XSCALE_RX_THREAD_MAX 128 + +enum { + XSC_BW_NO_LIMIT = 0, + XSC_100_MBPS_UNIT = 3, + XSC_GBPS_UNIT = 4, +}; + +struct xsc_cee_config { + /* bw pct for priority group */ + u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; + u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; + u8 pfc_setting[CEE_DCBX_MAX_PRIO]; + u8 pfc_enable; +}; + +enum { + XSC_DCB_CHG_RESET, + XSC_DCB_NO_CHG, + XSC_DCB_CHG_NO_RESET, +}; + +enum xsc_qpts_trust_state { + XSC_QPTS_TRUST_PCP = 1, + XSC_QPTS_TRUST_DSCP = 2, +}; + +enum xsc_dcbx_oper_mode { + XSC_DCBX_PARAM_VER_OPER_HOST = 0x0, + XSC_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + +enum { + XSC_PORT_BUFFER_CABLE_LEN = BIT(0), + XSC_PORT_BUFFER_PFC = BIT(1), + XSC_PORT_BUFFER_PRIO2BUFFER = BIT(2), + XSC_PORT_BUFFER_SIZE = BIT(3), +}; + +struct xsc_dcbx { + u8 enable; + enum xsc_dcbx_oper_mode mode; + struct xsc_cee_config cee_cfg; /* pending configuration */ + u8 dscp_app_cnt; + + /* The only setting that cannot be read from FW */ + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + u8 cap; + + /* Buffer configuration */ + u8 manual_buffer; + u32 cable_len; + u32 xoff; + u16 port_buff_cell_sz; +}; + +struct xsc_bufferx_reg { + u8 lossy; + u8 epsb; + u32 size; + u32 xoff; + u32 xon; +}; + +struct xsc_port_buffer { + u32 port_buffer_size; + u32 spare_buffer_size; + struct xsc_bufferx_reg buffer[XSC_MAX_BUFFER]; +}; + +struct xsc_dcbx_dp { + u8 dscp2prio[XSC_MAX_DSCP]; + u8 trust_state; +}; + +struct xsc_rss_params { + u32 indirection_rqt[XSC_INDIR_RQT_SIZE]; + u32 rx_hash_fields[XSC_NUM_INDIR_TIRS]; + u8 toeplitz_hash_key[52]; + u8 hfunc; + u32 rss_hash_tmpl; +}; + +struct xsc_vlan_params { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); +}; + +struct xsc_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct device *dev; + struct xsc_core_device *xdev; + + struct xsc_eth_params nic_param; + struct xsc_rss_params rss_params; + struct xsc_vlan_params vlan_params; + + struct xsc_flow_steering fs; + + struct workqueue_struct *workq; + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct work_struct event_work; + + struct xsc_eth_channels channels; + struct xsc_sq **txq2sq; + + u32 status; + spinlock_t lock; /* adapter lock */ + + struct mutex state_lock; /* Protects Interface state */ + struct xsc_stats *stats; + + struct xsc_dcbx dcbx; + struct xsc_dcbx_dp dcbx_dp; + + u32 msglevel; + + struct task_struct *task; + + int channel_tc2realtxq[XSC_ETH_MAX_NUM_CHANNELS][XSC_MAX_NUM_TC]; +}; + +struct xsc_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_buffer { + struct sk_buff *skb; + unsigned long *h_skb_data; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_wqe { + struct xsc_send_wqe_ctrl_seg ctrl; + struct xsc_wqe_data_seg data[]; +}; + +typedef int (*xsc_eth_fp_preactivate)(struct xsc_adapter *priv); +typedef int (*xsc_eth_fp_postactivate)(struct xsc_adapter *priv); + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate); +int xsc_eth_num_channels_changed(struct xsc_adapter *priv); +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 change); +bool xsc_eth_get_link_status(struct xsc_adapter *adapter); +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter); + +/* Use this function to get max num channels after netdev was created */ +static inline int xsc_get_netdev_max_channels(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + return min_t(unsigned int, netdev->num_rx_queues, + netdev->num_tx_queues); +} + +static inline int xsc_get_netdev_max_tc(struct xsc_adapter *adapter) +{ + return adapter->nic_param.num_tc; +} + +#ifdef CONFIG_XSC_CORE_EN_DCB +extern const struct dcbnl_rtnl_ops xsc_dcbnl_ops; +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets); +void xsc_dcbnl_initialize(struct xsc_adapter *priv); +void xsc_dcbnl_init_app(struct xsc_adapter *priv); +void xsc_dcbnl_delete_app(struct xsc_adapter *priv); +#endif +#endif /* XSC_ETH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h new file mode 100644 index 0000000000000000000000000000000000000000..49550e1f87d205686f8d13b0b9ad5658e78fc131 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMMON_H +#define XSC_ETH_COMMON_H + +#include "xsc_queue.h" +#include "xsc_eth_compat.h" +#include "common/xsc_pph.h" +#include "common/xsc_hsi.h" + +#define SW_MIN_MTU 64 +#define SW_DEFAULT_MTU 1500 +#define SW_MAX_MTU 9600 + +#define XSC_ETH_HW_MTU_SEND 9800 /*need to obtain from hardware*/ +#define XSC_ETH_HW_MTU_RECV 9800 /*need to obtain from hardware*/ +#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + 14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256) + +#define XSC_RX_MAX_HEAD (256) +#define XSC_RX_HEADROOM NET_SKB_PAD + +#define XSC_QPN_SQN_STUB 1025 +#define XSC_QPN_RQN_STUB 1024 + +#define XSC_LOG_INDIR_RQT_SIZE 0x8 + +#define XSC_INDIR_RQT_SIZE BIT(XSC_LOG_INDIR_RQT_SIZE) +#ifdef XSC_RSS_SUPPORT +#define XSC_ETH_MIN_NUM_CHANNELS 2 +#else +#define XSC_ETH_MIN_NUM_CHANNELS 1 +#endif +#define XSC_ETH_MAX_NUM_CHANNELS XSC_INDIR_RQT_SIZE + +#define XSC_TX_NUM_TC 1 +#define XSC_MAX_NUM_TC 8 +#define XSC_ETH_MAX_TC_TOTAL (XSC_ETH_MAX_NUM_CHANNELS * XSC_MAX_NUM_TC) +#define XSC_ETH_MAX_QP_NUM_PER_CH (XSC_MAX_NUM_TC + 1) + +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define XSC_MIN_SKB_FRAG_SZ (XSC_SKB_FRAG_SZ(XSC_RX_HEADROOM)) +#define XSC_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(XSC_MIN_SKB_FRAG_SZ))) + +#define XSC_MIN_LOG_RQ_SZ (1 + XSC_LOG_MAX_RX_WQE_BULK) +#define XSC_DEF_LOG_RQ_SZ 0xa +#define XSC_MAX_LOG_RQ_SZ 0xd + +#define XSC_MIN_LOG_SQ_SZ 0x6 +#define XSC_DEF_LOG_SQ_SZ 0xa +#define XSC_MAX_LOG_SQ_SZ 0xd + +#define XSC_SQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_SQ_SZ) +#define XSC_RQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_RQ_SZ) + +#define XSC_LOG_RQCQ_SZ 0xb +#define XSC_LOG_SQCQ_SZ 0xa + +#define XSC_RQCQ_ELE_NUM BIT(XSC_LOG_RQCQ_SZ) +#define XSC_SQCQ_ELE_NUM BIT(XSC_LOG_SQCQ_SZ) +#define XSC_RQ_ELE_NUM XSC_RQ_ELE_NUM_DEF //ds number of a wqebb +#define XSC_SQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //DS number +#define XSC_EQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //number of eq entry??? + +#define XSC_RQCQ_ELE_SZ 32 //size of a rqcq entry +#define XSC_SQCQ_ELE_SZ 32 //size of a sqcq entry +#define XSC_RQ_ELE_SZ XSC_RECV_WQE_BB +#define XSC_SQ_ELE_SZ XSC_SEND_WQE_BB +#define XSC_EQ_ELE_SZ 8 //size of a eq entry + +#define XSC_CQ_POLL_BUDGET 64 +#define XSC_TX_POLL_BUDGET 128 + +#define XSC_NET_DIM_ENABLE_THRESHOLD 16 + +#define XSC_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ +#define XSC_MAX_PRIORITY 8 +#define XSC_MAX_DSCP 64 +#define XSC_MAX_BUFFER 8 +#define XSC_DEFAULT_CABLE_LEN 7 /* 7 meters */ + +enum xsc_port_status { + XSC_PORT_DOWN = 0, + XSC_PORT_UP = 1, +}; + +/*all attributes of queue, MAYBE no use for some special queue*/ + +enum xsc_queue_type { + XSC_QUEUE_TYPE_EQ = 0, + XSC_QUEUE_TYPE_RQCQ, + XSC_QUEUE_TYPE_SQCQ, + XSC_QUEUE_TYPE_RQ, + XSC_QUEUE_TYPE_SQ, + XSC_QUEUE_TYPE_MAX, +}; + +struct xsc_queue_attr { + u8 q_type; + u32 ele_num; + u32 ele_size; + u8 ele_log_size; + u8 q_log_size; +}; + +/*MUST set value before create queue*/ +struct xsc_eth_eq_attr { + struct xsc_queue_attr xsc_eq_attr; +}; + +struct xsc_eth_cq_attr { + struct xsc_queue_attr xsc_cq_attr; +}; + +struct xsc_eth_rq_attr { + struct xsc_queue_attr xsc_rq_attr; +}; + +struct xsc_eth_sq_attr { + struct xsc_queue_attr xsc_sq_attr; +}; + +struct xsc_eth_qp_attr { + struct xsc_queue_attr xsc_qp_attr; +}; + +struct xsc_eth_rx_wqe_cyc { +#ifdef DECLARE_FLEX_ARRAY + DECLARE_FLEX_ARRAY(struct xsc_wqe_data_seg, data); +#else + struct xsc_wqe_data_seg data[0]; +#endif +}; + +struct xsc_eq_param { + struct xsc_queue_attr eq_attr; +}; + +struct xsc_cq_param { + struct xsc_wq_param wq; + struct cq_cmd { + u8 abc[16]; + } cqc; + struct xsc_queue_attr cq_attr; +}; + +struct xsc_rq_param { + struct xsc_wq_param wq; + struct xsc_queue_attr rq_attr; + struct xsc_rq_frags_info frags_info; + +}; + +struct xsc_sq_param { +// struct xsc_rq_cmd_param sqc; + struct xsc_wq_param wq; + struct xsc_queue_attr sq_attr; +}; + +struct xsc_qp_param { +// struct xsc_qp_cmd_param qpc; + struct xsc_queue_attr qp_attr; +}; + +struct xsc_channel_param { + struct xsc_cq_param rqcq_param; + struct xsc_cq_param sqcq_param; + struct xsc_rq_param rq_param; + struct xsc_sq_param sq_param; + struct xsc_qp_param qp_param; +}; + +struct xsc_eth_qp { + u16 rq_num; + u16 sq_num; + struct xsc_rq rq[XSC_MAX_NUM_TC]; /*may be use one only*/ + struct xsc_sq sq[XSC_MAX_NUM_TC]; /*reserved to tc*/ +}; + +enum channel_flags { + XSC_CHANNEL_NAPI_SCHED = 1, +}; + +struct xsc_channel { + /* data path */ + struct xsc_eth_qp qp; + struct napi_struct napi; + u8 num_tc; + int chl_idx; + + /*relationship*/ + struct xsc_adapter *adapter; + struct net_device *netdev; + int cpu; + unsigned long flags; + + /* data path - accessed per napi poll */ + const struct cpumask *aff_mask; + struct irq_desc *irq_desc; + struct xsc_ch_stats *stats; +} ____cacheline_aligned_in_smp; + +enum xsc_eth_priv_flag { + XSC_PFLAG_RX_NO_CSUM_COMPLETE, + XSC_PFLAG_SNIFFER, + XSC_PFLAG_DROPLESS_RQ, + XSC_PFLAG_RX_COPY_BREAK, + XSC_PFLAG_RX_CQE_BASED_MODER, + XSC_PFLAG_TX_CQE_BASED_MODER, + XSC_NUM_PFLAGS, /* Keep last */ +}; + +#define XSC_SET_PFLAG(params, pflag, enable) \ + do { \ + if (enable) \ + (params)->pflags |= BIT(pflag); \ + else \ + (params)->pflags &= ~(BIT(pflag)); \ + } while (0) + +#define XSC_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) + +struct xsc_eth_params { + u16 num_channels; + u16 max_num_ch; + u8 num_tc; + u32 mtu; + u32 hard_mtu; + u32 comp_vectors; + u32 sq_size; + u32 sq_max_size; + u8 rq_wq_type; + u32 rq_size; + u32 rq_max_size; + u32 rq_frags_size; + + u16 num_rl_txqs; + u8 rx_cqe_compress_def; + u8 tunneled_offload_en; + u8 lro_en; + u8 tx_min_inline_mode; + u8 vlan_strip_disable; + u8 scatter_fcs_en; + u8 rx_dim_enabled; + u8 tx_dim_enabled; + u32 rx_dim_usecs_low; + u32 rx_dim_frames_low; + u32 tx_dim_usecs_low; + u32 tx_dim_frames_low; + u32 lro_timeout; + u32 pflags; + + xsc_dim_cq_moder_t rx_cq_moderation; + xsc_dim_cq_moder_t tx_cq_moderation; +}; + +struct xsc_eth_channels { + struct xsc_channel *c; + unsigned int num_chl; + u32 rqn_base; +}; + +struct xsc_eth_redirect_rqt_param { + u8 is_rss; + union { + u32 rqn; /* Direct RQN (Non-RSS) */ + struct { + u8 hfunc; + struct xsc_eth_channels *channels; + } rss; /* RSS data */ + }; +}; + +union xsc_send_doorbell { + struct{ + s32 next_pid : 16; + u32 qp_num : 15; + }; + u32 send_data; +}; + +union xsc_recv_doorbell { + struct{ + s32 next_pid : 13; + u32 qp_num : 15; + }; + u32 recv_data; +}; + +#endif /* XSC_ETH_COMMON_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5e34982faa46aece80d0052c50956b059e3badef --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMPAT_H +#define XSC_ETH_COMPAT_H + +#define xsc_netdev_xmit_more(skb) netdev_xmit_more() + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..ccf21b8c704bf513f15f38fcd864206d79d4c732 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_hw_comm.h" + +#define XSC_ETH_CTRL_NAME "eth_ctrl" + +struct mutex pfc_mutex; /* protect pfc operation */ + +static void encode_watchdog_set(void *data, u32 mac_port) +{ + struct xsc_watchdog_period_set *req = + (struct xsc_watchdog_period_set *)data; + + req->period = __cpu_to_be32(req->period); +} + +static void decode_watchdog_get(void *data) +{ + struct xsc_watchdog_period_get *resp = + (struct xsc_watchdog_period_get *)data; + + resp->period = __be32_to_cpu(resp->period); +} + +static void encode_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void decode_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_get_port_pfc(struct xsc_core_device *xdev, u8 *pfc, u8 pfc_size) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc, err: %d\n", err); + return err; + } + + memcpy(pfc, rsp.pfc_on, pfc_size); + + return 0; +} + +static int xsc_set_port_pfc_drop_th(struct xsc_core_device *xdev, u8 prio, u8 cfg_type) +{ + int err = 0; + struct xsc_pfc_set_drop_th_mbox_in req; + struct xsc_pfc_set_drop_th_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_out)); + + req.prio = prio; + req.cfg_type = cfg_type; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH, &req, &rsp); + if (err) { + xsc_core_err(xdev, + "failed to set pfc drop th, err: %d, prio: %d, cfg_type: %d\n", + err, prio, cfg_type); + return err; + } + + return 0; +} + +static int xsc_set_drop_th(struct xsc_core_device *xdev, + const struct xsc_pfc_cfg *pfc_cfg, + u8 cfg_type) +{ + int err = 0; + + if (cfg_type == DROP_TH_CLEAR) { + err = xsc_set_port_pfc_drop_th(xdev, pfc_cfg->req_prio, cfg_type); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_set_port_pfc_drop_th(xdev, pfc_cfg->curr_prio, cfg_type); + } else if (cfg_type == DROP_TH_RECOVER) { + if (pfc_cfg->pfc_op == PFC_OP_DISABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSY); + } else if (pfc_cfg->pfc_op == PFC_OP_ENABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + } else if (pfc_cfg->pfc_op == PFC_OP_MODIFY) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + err |= xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->curr_prio, + DROP_TH_RECOVER_LOSSY); + } + } + + return err; +} + +static int xsc_get_port_pfc_cfg_status(struct xsc_core_device *xdev, u8 prio, int *status) +{ + int err = 0; + struct xsc_pfc_get_cfg_status_mbox_in req; + struct xsc_pfc_get_cfg_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_out)); + + req.prio = prio; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc cfg status, err: %d, prio: %d\n", err, prio); + return err; + } + + *status = rsp.hdr.status; + + return 0; +} + +static int xsc_get_cfg_status(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg, + int *status) +{ + int err = 0; + + err = xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->req_prio, status); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->curr_prio, status); + + return err; +} + +static int xsc_wait_pfc_check_complete(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + int status = 0; + u32 valid_cnt = 0; + u32 retry_cnt = 0; + + while (retry_cnt < PFC_CFG_CHECK_MAX_RETRY_TIMES) { + err = xsc_get_cfg_status(xdev, pfc_cfg, &status); + + if (err || status) { + valid_cnt = 0; + } else { + valid_cnt++; + if (valid_cnt >= PFC_CFG_CHECK_VALID_CNT) + break; + } + + retry_cnt++; + usleep_range(PFC_CFG_CHECK_SLEEP_TIME_US, + PFC_CFG_CHECK_SLEEP_TIME_US + 1); + } + + if (retry_cnt >= PFC_CFG_CHECK_MAX_RETRY_TIMES) { + xsc_core_err(xdev, "pfc check timeout, req_prio: %d, curr_prio:%d\n", + pfc_cfg->req_prio, pfc_cfg->curr_prio); + err = -EFAULT; + } + + return err | status; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 prio, + u8 pfc_on, u8 pfc_op, u8 *lossless_num) +{ + int err = 0; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + req.priority = prio; + req.pfc_on = pfc_on; + req.type = pfc_op; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to set pfc, err: %d, prio: %d, pfc_on: %d\n", + err, prio, pfc_on); + return err; + } + + *lossless_num = rsp.lossless_num; + + return 0; +} + +static int xsc_set_pfc(struct xsc_core_device *xdev, struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + u8 lossless_num = LOSSLESS_NUM_INVAILD; + + switch (pfc_cfg->pfc_op) { + case PFC_OP_DISABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_ENABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_MODIFY: + err = xsc_set_port_pfc(xdev, pfc_cfg->curr_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + err |= xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + default: + xsc_core_err(xdev, "unsupported pfc operation: %d\n", pfc_cfg->pfc_op); + err = -EINVAL; + } + + pfc_cfg->lossless_num = lossless_num; + return err; +} + +static int handle_pfc_cfg(struct xsc_core_device *xdev, + struct xsc_qos_mbox_in *in, int in_size, + struct xsc_qos_mbox_out *out, int out_size) +{ + const struct xsc_pfc_set *req = (struct xsc_pfc_set *)in->data; + struct xsc_pfc_set *rsp = (struct xsc_pfc_set *)out->data; + struct xsc_pfc_cfg pfc_cfg; + u8 curr_pfc[PFC_PRIO_MAX + 1] = {0}; + int idx; + int err = 0; + bool invalid_op = false; + + if (!mutex_trylock(&pfc_mutex)) { + xsc_core_err(xdev, "pfc is configuring by other user\n"); + return -EBUSY; + } + + memcpy(rsp, req, sizeof(struct xsc_pfc_set)); + memset(&pfc_cfg, 0, sizeof(struct xsc_pfc_cfg)); + + if (req->priority < 0 || req->priority > PFC_PRIO_MAX) { + xsc_core_err(xdev, "invalid req priority: %d\n", req->priority); + err = -EINVAL; + goto err_process; + } + + pfc_cfg.req_prio = req->priority; + pfc_cfg.req_pfc_en = req->pfc_on; + pfc_cfg.curr_pfc_en = 0; + pfc_cfg.pfc_op = PFC_OP_TYPE_MAX; + pfc_cfg.lossless_num = LOSSLESS_NUM_INVAILD; + + err = xsc_get_port_pfc(xdev, curr_pfc, sizeof(curr_pfc)); + if (err) + goto err_process; + + for (idx = 0; idx < PFC_PRIO_MAX + 1; idx++) { + if (curr_pfc[idx] == NIF_PFC_EN_ON) { + pfc_cfg.curr_prio = idx; + pfc_cfg.curr_pfc_en = 1; + break; + } + } + + if (pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio != pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_MODIFY; + else + invalid_op = true; + } else if (pfc_cfg.curr_pfc_en && !pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio == pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_DISABLE; + else + invalid_op = true; + } else if (!pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + pfc_cfg.pfc_op = PFC_OP_ENABLE; + } else { + invalid_op = true; + } + + if (invalid_op) { + xsc_core_err(xdev, "invalid operation, req_pfc_cfg:%d,%d curr_pfc_cfg:%d,%d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en); + err = 0; + goto err_process; + } + + xsc_core_dbg(xdev, "req_pfc_cfg:%d, %d curr_pfc_cfg: %d,%d, pfc_op: %d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en, pfc_cfg.pfc_op); + + err = xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_CLEAR); + if (err) + goto err_process; + + err = xsc_wait_pfc_check_complete(xdev, &pfc_cfg); + if (!err) + err = xsc_set_pfc(xdev, &pfc_cfg); + + err |= xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_RECOVER); + +err_process: + mutex_unlock(&pfc_mutex); + + if (pfc_cfg.pfc_op == PFC_OP_MODIFY) + rsp->src_prio = pfc_cfg.curr_prio; + else + rsp->src_prio = pfc_cfg.req_prio; + + rsp->lossless_num = pfc_cfg.lossless_num; + rsp->type = pfc_cfg.pfc_op; + out->hdr.status = err; + xsc_core_dbg(xdev, "response lossless_num: %d, src_prio: %d, type: %d, hdr status: %d\n", + rsp->lossless_num, rsp->src_prio, rsp->type, out->hdr.status); + return err; +} + +static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->req_prfx.mac_port = xdev->mac_port; + + if (encode) + encode((void *)in->data, xdev->mac_port); + + if (hdr->attr.opcode == XSC_CMD_OP_IOCTL_SET_PFC) + err = handle_pfc_cfg(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + else + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_hwc_mbox_in *in; + struct xsc_hwc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dscp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dscp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_trust_mode_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_trust_mode_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pcp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pcp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_default_pri_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_default_pri_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pfc_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_rate_limit_set), 0, + encode_rlimit_set, NULL); + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, decode_rlimit_get); + case XSC_CMD_OP_IOCTL_SET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_sp_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_sp_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_port_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_port_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_prio_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_prio_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, + sizeof(struct hwc_set_t), sizeof(struct hwc_set_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, sizeof(struct hwc_get_t), + sizeof(struct hwc_get_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_en_set), 0, + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_en_get), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_period_set), 0, + encode_watchdog_set, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_period_get), + NULL, decode_watchdog_get); + default: + return TRY_NEXT_CB; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _eth_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static void _eth_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_ETH_CTRL_NAME); +} + +static int _eth_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_ETH_CTRL_NAME, _eth_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_ETH_CTRL_NAME); + + return ret; +} + +static void _pfc_global_res_init(void) +{ + mutex_init(&pfc_mutex); +} + +void xsc_eth_ctrl_fini(void) +{ + _eth_ctrl_reg_fini(); +} + +int xsc_eth_ctrl_init(void) +{ + _pfc_global_res_init(); + return _eth_ctrl_reg_init(); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e93f0afc4197c699b47839e9e560badf5e49f2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_CTRL_H +#define XSC_ETH_CTRL_H + +void xsc_eth_ctrl_fini(void); +int xsc_eth_ctrl_init(void); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..61850c2ea9dee622de2c0ca6b43e00bc68bd2ab5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DEBUG_H +#define XSC_ETH_DEBUG_H + +#include "common/xsc_core.h" +#include +#include "xsc_eth.h" + +static bool debug; +#define FUN_LINE_FMT "%s %d " + +#define ETH_DEBUG_LOG(fmt, ...) do { } while (0) + +#define XSC_MSG_LEVEL (NETIF_MSG_LINK) // | NETIF_MSG_HW) + +#define xsc_eth_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msglevel) \ + netdev_warn(priv->netdev, format, \ + ##__VA_ARGS__); \ +} while (0) + +#define WQE_CSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("cseg->has_pph: %d\n", (seg)->has_pph); \ + ETH_DEBUG_LOG("cseg->so_type: %d\n", (seg)->so_type); \ + ETH_DEBUG_LOG("cseg->so_hdr_len: %d\n", (seg)->so_hdr_len); \ + ETH_DEBUG_LOG("cseg->so_data_size: %d\n", (seg)->so_data_size); \ + ETH_DEBUG_LOG("cseg->msg_opcode: %d\n", (seg)->msg_opcode); \ + ETH_DEBUG_LOG("cseg->wqe_id: %d\n", (seg)->wqe_id); \ + ETH_DEBUG_LOG("cseg->ds_data_num: %d\n", (seg)->ds_data_num); \ + ETH_DEBUG_LOG("cseg->msg_len: %d\n", (seg)->msg_len); \ + } while (0) + +#define WQE_DSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("dseg->va: %#llx\n", (seg)->va); \ + ETH_DEBUG_LOG("dseg->in_line: %d\n", (seg)->in_line); \ + ETH_DEBUG_LOG("dseg->mkey: %d\n", (seg)->mkey); \ + ETH_DEBUG_LOG("dseg->seg_len: %d\n", (seg)->seg_len); \ + } while (0) + +static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int direct) +{ + if (!debug) + return; + + netdev_info(skb->dev, "pkt[%s]: skb_len=%d, head_len=%d\n", + (direct ? "tx" : "rx"), skb->len, headlen); + + if (skb) { + char *buf = skb->data; + int i, j; + int pos; + + for (i = 0; i < headlen; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pr_info("\n"); + + pos = headlen; + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + int fsz = skb_frag_size(frag); + + buf = (char *)(page_address(frag->bv_page) + frag->bv_offset); + for (i = 0; i < fsz; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pos += frag->bv_len; + } + pr_info("\n"); + } +} + +#define ETH_SQ_STATE(sq) \ + do { \ + if (test_bit(__QUEUE_STATE_STACK_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_STACK_XOFF\n"); \ + else if (test_bit(__QUEUE_STATE_DRV_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_DRV_XOFF\n"); \ + else \ + ETH_DEBUG_LOG("sq is %ld\n", (sq)->txq->state); \ + } while (0) + +static inline void xsc_pkt_pph_dump(char *data, int len) +{ + int i; + + if (!debug) + return; + + for (i = 0; i < len; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)data)[i]); + } +} + +#endif /* XSC_ETH_DEBUG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c new file mode 100644 index 0000000000000000000000000000000000000000..3a29fa03e92ad9bd1d9def35ef7265ba0e9dce4b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_dim.h" +#include "xsc_queue.h" +#include "xsc_eth_stats.h" + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + return moder; +} + +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) + params->tx_cq_moderation = net_dim_get_tx_moderation(cq_period_mode, + XSC_DEF_TX_DIM_PROFILE_IDX); + else + params->tx_cq_moderation = xsc_get_def_tx_moderation(cq_period_mode); + + XSC_SET_PFLAG(params, XSC_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->rx_dim_enabled) { + params->rx_cq_moderation = net_dim_get_rx_moderation(cq_period_mode, + XSC_DEF_RX_DIM_PROFILE_IDX); + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_EQE) + params->rx_cq_moderation.usec = + XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE; + } else { + params->rx_cq_moderation = xsc_get_def_rx_moderation(cq_period_mode); + } + + params->rx_dim_usecs_low = XSC_PARAMS_RX_DIM_USECS_LOW; + params->rx_dim_frames_low = XSC_PARAMS_RX_DIM_FRAMES_LOW; + + XSC_SET_PFLAG(params, XSC_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_handle_tx_dim(struct xsc_sq *sq) +{ + xsc_dim_sample_t *sample = &sq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_SQ_STATE_AM, &sq->state))) + return; + + dim_update_sample(sq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&sq->dim_obj.dim, *sample); +} + +void xsc_handle_rx_dim(struct xsc_rq *rq) +{ + xsc_dim_sample_t *sample = &rq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_RQ_STATE_AM, &rq->state))) + return; + + dim_update_sample(rq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&rq->dim_obj.dim, *sample); +} + +static void xsc_complete_dim_work(xsc_dim_t *dim, xsc_dim_cq_moder_t moder, + struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + xcq->dim_us = moder.usec; + xcq->dim_pkts = moder.pkts; + dim->state = XSC_DIM_START_MEASURE; +} + +void xsc_rx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_rq *rq = container_of(dim_obj, struct xsc_rq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, rq->cq.xdev, &rq->cq.xcq); + rq->stats->dim_pkts = cur_moder.pkts; +} + +void xsc_tx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_sq *sq = container_of(dim_obj, struct xsc_sq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, sq->cq.xdev, &sq->cq.xcq); + sq->stats->dim_pkts = cur_moder.pkts; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..1e3515db5eef996ca76a07f17ec7422f3349a970 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DIM_H +#define XSC_ETH_DIM_H + +#include "xsc_eth_common.h" + +#define XSC_DEF_RX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x40 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE 0x40 + +#define XSC_PARAMS_RX_DIM_USECS_LOW 8 +#define XSC_PARAMS_RX_DIM_FRAMES_LOW 2 + +#define XSC_DEF_TX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x80 +#define XSC_MAX_COAL_TIME 512 +#define XSC_MAX_COAL_FRAMES 1024 + +#define XSC_DIM_START_MEASURE DIM_START_MEASURE + +enum { + XSC_CQ_PERIOD_MODE_START_FROM_EQE = DIM_CQ_PERIOD_MODE_START_FROM_EQE, + XSC_CQ_PERIOD_MODE_START_FROM_CQE = DIM_CQ_PERIOD_MODE_START_FROM_CQE, + XSC_CQ_PERIOD_NUM_MODES +}; + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode); +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode); +u8 xsc_to_net_dim_cq_period_mode(u8 cq_period_mode); +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); + +void xsc_tx_dim_work(struct work_struct *work); +void xsc_rx_dim_work(struct work_struct *work); + +void xsc_handle_tx_dim(struct xsc_sq *sq); +void xsc_handle_rx_dim(struct xsc_rq *rq); + +#endif /* XSC_ETH_DIM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..01c055372003dcd3c77ef65bfc82542420a9c88c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c @@ -0,0 +1,1279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth.h" +#include "common/xsc_cmd.h" +#include "common/xsc_pp.h" +#include "common/port.h" +#include "xsc_eth_dim.h" + +typedef int (*xsc_pflag_handler)(struct net_device *dev, bool enable); + +struct pflag_desc { + char name[ETH_GSTRING_LEN]; + xsc_pflag_handler handler; +}; + +static const char * const fpga_type_name[] = {"S", "L"}; +static const char * const hps_ddr_name[] = {"1", "2", "4", "unknown"}; +static const char * const onchip_ft_name[] = {"N", "O" }; +static const char * const rdma_icrc_name[] = {"N", "C" }; +static const char * const ma_xbar_name[] = {"N", "X" }; +static const char * const anlt_fec_name[] = {"N", "A" }; +static const char * const pp_tbl_dma_name[] = {"N", "D" }; +static const char * const pct_exp_name[] = {"N", "E" }; + +enum { + XSC_ST_LINK_STATE, + XSC_ST_LINK_SPEED, + XSC_ST_HEALTH_INFO, +#ifdef CONFIG_INET + XSC_ST_LOOPBACK, +#endif + XSC_ST_NUM, +}; + +const char xsc_self_tests[XSC_ST_NUM][ETH_GSTRING_LEN] = { + "Link Test", + "Speed Test", + "Health Test", +#ifdef CONFIG_INET + "Loopback Test", +#endif +}; + +static int xsc_test_loopback(struct xsc_adapter *adapter) +{ + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + netdev_err(adapter->netdev, + "\tCan't perform loopback test while device is down\n"); + return -ENODEV; + } + return 0; +} + +static int xsc_test_health_info(struct xsc_adapter *adapter) +{ + struct xsc_core_health *health = &adapter->xdev->priv.health; + + return health->sick ? 1 : 0; +} + +static int xsc_test_link_state(struct xsc_adapter *adapter) +{ + u8 port_state; + + if (!netif_carrier_ok(adapter->netdev)) + return 1; + + port_state = xsc_eth_get_link_status(adapter); + return port_state == 0 ? 1 : 0; +} + +static int xsc_test_link_speed(struct xsc_adapter *adapter) +{ + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return 1; + + return 0; +} + +static int set_pflag_rx_no_csum_complete(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_NO_CSUM_COMPLETE, enable); + + return 0; +} + +static int set_pflag_sniffer(struct net_device *dev, bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_SNIFFER, enable); + + return 0; +} + +static int set_pflag_dropless_rq(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_DROPLESS_RQ, enable); + + return 0; +} + +static int set_pflag_rx_copy_break(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_COPY_BREAK, enable); + + return 0; +} + +static int cqe_mode_to_period_mode(bool val) +{ + return val ? XSC_CQ_PERIOD_MODE_START_FROM_CQE : XSC_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static int set_pflag_cqe_based_moder(struct net_device *dev, bool enable, + bool is_rx_cq) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u8 cq_period_mode, current_cq_period_mode; + struct xsc_eth_params new_params; + int err; + + cq_period_mode = cqe_mode_to_period_mode(enable); + + current_cq_period_mode = is_rx_cq ? + priv->nic_param.rx_cq_moderation.cq_period_mode : + priv->nic_param.tx_cq_moderation.cq_period_mode; + + if (cq_period_mode == current_cq_period_mode) + return 0; + + new_params = priv->nic_param; + if (is_rx_cq) + xsc_set_rx_cq_mode_params(&new_params, cq_period_mode); + else + xsc_set_tx_cq_mode_params(&new_params, cq_period_mode); + + priv->nic_param = new_params; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + return err; +} + +static int set_pflag_rx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, true); +} + +static int set_pflag_tx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, false); +} + +static const struct pflag_desc xsc_priv_flags[XSC_NUM_PFLAGS] = { + { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, + { "sniffer", set_pflag_sniffer }, + { "dropless_rq", set_pflag_dropless_rq}, + { "rx_copy_break", set_pflag_rx_copy_break}, + { "rx_cqe_moder", set_pflag_rx_cqe_moder}, + { "tx_cqe_moder", set_pflag_tx_cqe_moder}, +}; + +int xsc_priv_flags_num(void) +{ + return ARRAY_SIZE(xsc_priv_flags); +} + +const char *xsc_priv_flags_name(int flag) +{ + return xsc_priv_flags[flag].name; +} + +static int xsc_handle_pflag(struct net_device *dev, + u32 wanted_flags, + enum xsc_eth_priv_flag flag) +{ + struct xsc_adapter *priv = netdev_priv(dev); + bool enable = !!(wanted_flags & BIT(flag)); + u32 changes = wanted_flags ^ priv->nic_param.pflags; + int err; + + if (!(changes & BIT(flag))) + return 0; + + err = xsc_priv_flags[flag].handler(dev, enable); + if (err) + netdev_err(dev, "%s private flag '%s' failed err %d\n", + enable ? "Enable" : "Disable", + xsc_priv_flags[flag].name, err); + + return err; +} + +int xsc_set_priv_flags(struct net_device *dev, u32 pflags) +{ + struct xsc_adapter *priv = netdev_priv(dev); + enum xsc_eth_priv_flag pflag; + int err; + + mutex_lock(&priv->state_lock); + + for (pflag = 0; pflag < XSC_NUM_PFLAGS; pflag++) { + err = xsc_handle_pflag(dev, pflags, pflag); + if (err) + break; + } + + mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(dev); + + return err; +} + +static int xsc_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int size_read = 0; + u8 data[4] = {0}; + + size_read = xsc_query_module_eeprom(xdev, 0, 3, data); + if (size_read < 3) + return -EIO; + + /* data[0] = identifier byte */ + switch (data[0]) { + case XSC_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + /* data[1] = revision id */ + if (data[0] == XSC_MODULE_ID_QSFP28 || data[1] >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case XSC_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + /* Verify if module EEPROM is a flat memory. In case of flat + * memory only page 00h (0-255 bytes) can be read. Otherwise + * upper pages 01h and 02h can also be read. Upper pages 10h + * and 11h are currently not supported by the driver. + */ + if (data[2] & 0x80) + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + else + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", + __func__, data[0]); + return -EINVAL; + } + + return 0; +} + +static int xsc_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int offset = ee->offset; + int size_read; + int i = 0; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + size_read = xsc_query_module_eeprom(xdev, offset, ee->len - i, data + i); + + if (!size_read) + /* Done reading */ + return 0; + + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_eeprom failed:0x%x\n", + __func__, size_read); + return 0; + } + + i += size_read; + offset += size_read; + } + + return 0; +} + +static int xsc_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_module_eeprom_query_params query; + u8 *data = page_data->data; + int size_read; + int i = 0; + + if (!page_data->length) + return -EINVAL; + + memset(data, 0, page_data->length); + + query.offset = page_data->offset; + query.i2c_address = page_data->i2c_address; + query.bank = page_data->bank; + query.page = page_data->page; + while (i < page_data->length) { + query.size = page_data->length - i; + size_read = xsc_query_module_eeprom_by_page(xdev, &query, data + i); + + // Done reading, return how many bytes was read + if (!size_read) + return i; + + if (size_read == -EINVAL) + return -EINVAL; + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_module_eeprom_by_page failed:0x%x\n", + __func__, size_read); + return i; + } + + i += size_read; + query.offset += size_read; + } + + return i; +} + +u32 xsc_get_priv_flags(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return priv->nic_param.pflags; +} + +static void xsc_set_drv_fw_version(struct ethtool_drvinfo *info, struct xsc_core_device *xdev) +{ + u8 fw_ver_major = xdev->fw_version_major; + u8 fw_ver_minor = xdev->fw_version_minor; + u16 fw_ver_patch = xdev->fw_version_patch; + u32 fw_ver_tweak = xdev->fw_version_tweak; + u8 fw_ver_extra_flag = xdev->fw_version_extra_flag; + + if (fw_ver_tweak == 0) { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } + } else { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } + } +} + +static void xsc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + snprintf(info->driver, sizeof(info->driver), "%s", XSCALE_DRIVER_NAME); + + if (HOTFIX_NUM == 0) + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION); + else + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d.H%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, HOTFIX_NUM); + + xsc_set_drv_fw_version(info, adapter->xdev); + strscpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +static void xsc_fill_stats_strings(struct xsc_adapter *adapter, u8 *data) +{ + int i, idx = 0; + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_strings(adapter, data, idx); +} + +static int xsc_self_test_num(struct xsc_adapter *adapter) +{ + return ARRAY_SIZE(xsc_self_tests); +} + +static void xsc_ethtool_get_strings(struct xsc_adapter *adapter, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + xsc_fill_stats_strings(adapter, data); + break; + + case ETH_SS_TEST: + for (i = 0; i < xsc_self_test_num(adapter); i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_self_tests[i], + ETH_GSTRING_LEN); + break; + + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < XSC_NUM_PFLAGS; i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_priv_flags[i].name, + ETH_GSTRING_LEN); + break; + + default: + ETH_DEBUG_LOG("wrong stringset\n"); + break; + } +} + +static void xsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_strings(adapter, stringset, data); +} + +static int xsc_ethtool_get_sset_count(struct xsc_adapter *adapter, int sset) +{ + int i, num_stats = 0; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < xsc_num_stats_grps; i++) + num_stats += xsc_stats_grps[i].get_num_stats(adapter); + return num_stats; + case ETH_SS_PRIV_FLAGS: + return XSC_NUM_PFLAGS; + case ETH_SS_TEST: + return xsc_self_test_num(adapter); + default: + return -EOPNOTSUPP; + } +} + +static int xsc_get_sset_count(struct net_device *dev, int sset) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + return xsc_ethtool_get_sset_count(adapter, sset); +} + +static int (*xsc_st_func[XSC_ST_NUM])(struct xsc_adapter *) = { + xsc_test_link_state, + xsc_test_link_speed, + xsc_test_health_info, +#ifdef CONFIG_INET + xsc_test_loopback, +#endif +}; + +static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) +{ + struct xsc_adapter *priv = netdev_priv(ndev); + int i; + + memset(buf, 0, sizeof(u64) * XSC_ST_NUM); + + mutex_lock(&priv->state_lock); + netdev_info(ndev, "Self test begin..\n"); + + for (i = 0; i < XSC_ST_NUM; i++) { + netdev_info(ndev, "\t[%d] %s start..\n", + i, xsc_self_tests[i]); + buf[i] = xsc_st_func[i](priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", + i, xsc_self_tests[i], buf[i]); + } + + mutex_unlock(&priv->state_lock); + + for (i = 0; i < XSC_ST_NUM; i++) { + if (buf[i]) { + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + } + netdev_info(ndev, "Self test out: status flags(0x%x)\n", + etest->flags); +} + +static void xsc_update_stats(struct xsc_adapter *adapter) +{ + int i; + + for (i = xsc_num_stats_grps - 1; i >= 0; i--) + if (xsc_stats_grps[i].update_stats) + xsc_stats_grps[i].update_stats(adapter); +} + +static void xsc_ethtool_get_ethtool_stats(struct xsc_adapter *adapter, + struct ethtool_stats *stats, u64 *data) +{ + int i, idx = 0; + + mutex_lock(&adapter->state_lock); + xsc_update_stats(adapter); + mutex_unlock(&adapter->state_lock); + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_stats(adapter, data, idx); +} + +static void xsc_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_ethtool_stats(adapter, stats, data); +} + +static u32 xsc_get_msglevel(struct net_device *dev) +{ + return ((struct xsc_adapter *)netdev_priv(dev))->msglevel; +} + +static void xsc_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct xsc_adapter *)netdev_priv(dev))->msglevel = val; +} + +static void xsc_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + param->rx_max_pending = 8192; //hack for H3C + param->rx_pending = priv->nic_param.rq_size; + param->tx_max_pending = 8192; //hack for H3C + param->tx_pending = priv->nic_param.sq_size; +} + +static int xsc_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u32 old_rq_size, old_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + + if (param->rx_pending < BIT(XSC_MIN_LOG_RQ_SZ)) { + netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%ld)\n", + __func__, param->rx_pending, BIT(XSC_MIN_LOG_RQ_SZ)); + return -EINVAL; + } + if (param->rx_pending > priv->nic_param.rq_max_size) { + netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, priv->nic_param.rq_max_size); + param->rx_pending = priv->nic_param.rq_max_size; + } + + if (param->tx_pending < BIT(XSC_MIN_LOG_SQ_SZ)) { + netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%ld)\n", + __func__, param->tx_pending, BIT(XSC_MIN_LOG_SQ_SZ)); + return -EINVAL; + } + if (param->tx_pending > priv->nic_param.sq_max_size) { + netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, priv->nic_param.sq_max_size); + param->tx_pending = priv->nic_param.sq_max_size; + } + + if (param->rx_pending == priv->nic_param.rq_size && + param->tx_pending == priv->nic_param.sq_size) + return 0; + + mutex_lock(&priv->state_lock); + + if (priv->status != XSCALE_ETH_DRIVER_OK) + goto unlock; + + old_rq_size = priv->nic_param.rq_size; + old_sq_size = priv->nic_param.sq_size; + priv->nic_param.rq_size = param->rx_pending; + priv->nic_param.sq_size = param->tx_pending; + + netdev_info(priv->netdev, "%s: tx_pending(%d->%d), rx_pending(%d->%d)\n", + __func__, old_sq_size, param->tx_pending, + old_rq_size, priv->nic_param.rq_size); + err = xsc_safe_switch_channels(priv, NULL, NULL); + if (err) { + priv->nic_param.rq_size = old_rq_size; + priv->nic_param.sq_size = old_sq_size; + netdev_err(priv->netdev, "%s: set ringparams failed, err=%d\n", + __func__, err); + } + +unlock: + mutex_unlock(&priv->state_lock); + + return err; +} + +static void xsc_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + ch->max_combined = priv->nic_param.max_num_ch; + ch->combined_count = priv->nic_param.num_channels; + + mutex_unlock(&priv->state_lock); +} + +static int xsc_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + unsigned int ch_max = params->max_num_ch; + unsigned int ch_num_old = params->num_channels; + unsigned int count = ch->combined_count; + int err = 0; + + if (!count) { + netdev_info(priv->netdev, "%s: combined_count=0 not supported\n", __func__); + return -EINVAL; + } + + if (ch->rx_count || ch->tx_count) { + netdev_info(priv->netdev, "%s: separate rx/tx count not supported\n", __func__); + return -EINVAL; + } + + if (count > ch_max) { + netdev_info(priv->netdev, "%s: count (%d) > max (%d)\n", + __func__, count, ch_max); + return -EINVAL; + } + + if (ch_num_old == count) + return 0; + + mutex_lock(&priv->state_lock); + + params->num_channels = count; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + err = xsc_eth_num_channels_changed(priv); + if (err) + params->num_channels = ch_num_old; + goto out; + } + + /* Switch to new channels, set new parameters and close old ones */ + err = xsc_safe_switch_channels(priv, NULL, xsc_eth_num_channels_changed); + +out: + mutex_unlock(&priv->state_lock); + netdev_info(priv->netdev, "set combined_cnt=%d, err=%d\n", count, err); + + return err; +} + +static int flow_type_to_traffic_type(u32 flow_type) +{ + switch (flow_type) { + case IPV4_FLOW: + return XSC_TT_IPV4; + case TCP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case UDP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case IPV6_FLOW: + return XSC_TT_IPV6; + case TCP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case UDP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case AH_V4_FLOW: + return XSC_TT_IPV4_IPSEC_AH; + case AH_V6_FLOW: + return XSC_TT_IPV6_IPSEC_AH; + case ESP_V4_FLOW: + return XSC_TT_IPV4_IPSEC_ESP; + case ESP_V6_FLOW: + return XSC_TT_IPV6_IPSEC_ESP; + default: + return -EINVAL; + } +} + +static int xsc_get_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 hash_field = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + hash_field = priv->rss_params.rx_hash_fields[tt]; + nfc->data = 0; + + if (hash_field & XSC_HASH_FIELD_SEL_PROTO) + nfc->data |= RXH_L3_PROTO; + if (tt == XSC_TT_IPV4_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IP) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT) + nfc->data |= RXH_L4_B_2_3; + } else if (tt == XSC_TT_IPV6_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + nfc->data |= RXH_L4_B_2_3; + } + + return 0; +} + +static int xsc_set_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 rx_hash_field = XSC_HASH_FIELD_SEL_PROTO; + u32 change = 0; + int ret = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + /* RSS does not support anything other than hashing to queues + * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest + * port. + */ + if (nfc->flow_type != TCP_V4_FLOW && + nfc->flow_type != TCP_V6_FLOW && + nfc->flow_type != UDP_V4_FLOW && + nfc->flow_type != UDP_V6_FLOW) + return -EOPNOTSUPP; + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EOPNOTSUPP; + + if (nfc->flow_type == TCP_V4_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IP; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IP; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT; + } else if (nfc->flow_type == TCP_V6_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IPV6; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IPV6; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT_V6; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT_V6; + } else { + return 0; + } + + mutex_lock(&priv->state_lock); + if (rx_hash_field != priv->rss_params.rx_hash_fields[tt]) { + change |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + priv->rss_params.rx_hash_fields[tt] = rx_hash_field; + } + + xsc_core_info(priv->xdev, "flow_type=%d, change=0x%x, hash_tmpl=0x%x\n", + nfc->flow_type, change, rx_hash_field); + if (change) + ret = xsc_eth_modify_nic_hca(priv, change); + + mutex_unlock(&priv->state_lock); + return ret; +} + +int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + int err = 0; + + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = params->num_channels; + return 0; + } + + switch (info->cmd) { + case ETHTOOL_GRXFH: + err = xsc_get_rss_hash_opt(priv, info); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int xsc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = xsc_set_rss_hash_opt(priv, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u32 xsc_get_rxfh_key_size(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return sizeof(priv->rss_params.toeplitz_hash_key); +} + +static u32 xsc_get_rxfh_indir_size(struct net_device *netdev) +{ + return XSC_INDIR_RQT_SIZE; +} + +int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_rss_params *rss = &priv->rss_params; + + if (indir) + memcpy(indir, rss->indirection_rqt, + sizeof(rss->indirection_rqt)); + + if (key) + memcpy(key, rss->toeplitz_hash_key, + sizeof(rss->toeplitz_hash_key)); + + if (hfunc) + *hfunc = rss->hfunc; + + return 0; +} + +int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_rss_params *rss = &priv->rss_params; + u32 refresh = 0; + int err = 0; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_XOR && + hfunc != ETH_RSS_HASH_TOP) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { + rss->hfunc = hfunc; + refresh |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + + if (key) { + memcpy(rss->toeplitz_hash_key, key, sizeof(rss->toeplitz_hash_key)); + if (rss->hfunc == ETH_RSS_HASH_TOP) + refresh |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (refresh > 0 && priv->status == XSCALE_ETH_DRIVER_OK) + err = xsc_eth_modify_nic_hca(priv, refresh); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int xsc_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return -EINVAL; + + cmd->base.port = linkinfo.port; + cmd->base.duplex = linkinfo.duplex; + cmd->base.autoneg = linkinfo.autoneg; + switch (linkinfo.linkspeed) { + case MODULE_SPEED_UNKNOWN: + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + break; + case MODULE_SPEED_10G: + cmd->base.speed = LINKSPEED_MODE_10G; + break; + case MODULE_SPEED_25G: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + case MODULE_SPEED_40G_R4: + cmd->base.speed = LINKSPEED_MODE_40G; + break; + case MODULE_SPEED_50G_R: + case MODULE_SPEED_50G_R2: + cmd->base.speed = LINKSPEED_MODE_50G; + break; + case MODULE_SPEED_100G_R2: + case MODULE_SPEED_100G_R4: + cmd->base.speed = LINKSPEED_MODE_100G; + break; + case MODULE_SPEED_200G_R4: + case MODULE_SPEED_200G_R8: + cmd->base.speed = LINKSPEED_MODE_200G; + break; + case MODULE_SPEED_400G_R8: + cmd->base.speed = LINKSPEED_MODE_400G; + break; + default: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + } + + //when link down, show speed && duplex as unknown + if (!linkinfo.linkstatus) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + } + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + bitmap_copy(cmd->link_modes.supported, (unsigned long *)linkinfo.supported_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(cmd->link_modes.advertising, (unsigned long *)linkinfo.advertising_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + bitmap_or(cmd->link_modes.supported, cmd->link_modes.supported, + (unsigned long *)&linkinfo.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_or(cmd->link_modes.advertising, cmd->link_modes.advertising, + (unsigned long *)&linkinfo.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return 0; +} + +static int xsc_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + int err = 0, i; + + if (!adapter) { + pr_err("%s fail to find adapter\n", __func__); + return -EINVAL; + } + + memset(&linkinfo, 0, sizeof(struct xsc_event_linkinfo)); + + linkinfo.port = cmd->base.port; + linkinfo.duplex = cmd->base.duplex; + linkinfo.autoneg = cmd->base.autoneg; + linkinfo.linkspeed = cpu_to_be32(cmd->base.speed); + + bitmap_copy((unsigned long *)linkinfo.supported_speed, + cmd->link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy((unsigned long *)linkinfo.advertising_speed, + cmd->link_modes.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + for (i = 0; i < ARRAY_SIZE(linkinfo.supported_speed); i++) { + linkinfo.supported_speed[i] = be64_to_cpu(linkinfo.supported_speed[i]); + linkinfo.advertising_speed[i] = be64_to_cpu(linkinfo.advertising_speed[i]); + } + + err = xsc_eth_set_link_info(adapter, &linkinfo); + if (err) + xsc_core_err(adapter->xdev, "fail to set link info err %d\n", err); + + return err; +} + +static int xsc_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + xsc_eth_set_led_status(xdev->pf_id, adapter); + break; + case ETHTOOL_ID_INACTIVE: + xsc_eth_set_led_status(LED_ACT_ON_HW, adapter); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int xsc_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_modify_fecparam_mbox_in in; + struct xsc_event_modify_fecparam_mbox_out out; + u32 new_fec = fec->fec; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_FEC_PARAM); + in.fec = cpu_to_be32(new_fec); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +static int xsc_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_query_fecparam_mbox_in in; + struct xsc_event_query_fecparam_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_FEC_PARAM); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + fec->active_fec = be32_to_cpu(out.active_fec); + fec->fec = be32_to_cpu(out.fec_cfg); + + return err; +} + +static int xsc_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + + rx_moder = &priv->nic_param.rx_cq_moderation; + coal->rx_coalesce_usecs = rx_moder->usec; + coal->rx_max_coalesced_frames = rx_moder->pkts; + coal->use_adaptive_rx_coalesce = priv->nic_param.rx_dim_enabled; + + tx_moder = &priv->nic_param.tx_cq_moderation; + coal->tx_coalesce_usecs = tx_moder->usec; + coal->tx_max_coalesced_frames = tx_moder->pkts; + coal->use_adaptive_tx_coalesce = priv->nic_param.tx_dim_enabled; + coal->rx_coalesce_usecs_low = priv->nic_param.rx_dim_usecs_low; + coal->rx_max_coalesced_frames_low = priv->nic_param.rx_dim_frames_low; + + kernel_coal->use_cqe_mode_rx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_TX_CQE_BASED_MODER); + + return 0; +} + +static int xsc_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + struct xsc_eth_params new_params = {}; + int err = 0; + bool reset_rx, reset_tx; + u8 mode; + + if (coal->tx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > XSC_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %u usecs\n", + __func__, XSC_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames_low > XSC_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %u\n", + __func__, XSC_MAX_COAL_FRAMES); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + new_params = priv->nic_param; + + rx_moder = &new_params.rx_cq_moderation; + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + new_params.rx_dim_usecs_low = coal->rx_coalesce_usecs_low; + new_params.rx_dim_frames_low = coal->rx_max_coalesced_frames_low; + + tx_moder = &new_params.tx_cq_moderation; + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; + new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + priv->nic_param = new_params; + goto out; + } + + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->nic_param.rx_dim_enabled; + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->nic_param.tx_dim_enabled; + + if (rx_moder->cq_period_mode != kernel_coal->use_cqe_mode_rx) { + rx_moder->cq_period_mode = kernel_coal->use_cqe_mode_rx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER, + rx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_rx = true; + } + if (tx_moder->cq_period_mode != kernel_coal->use_cqe_mode_tx) { + tx_moder->cq_period_mode = kernel_coal->use_cqe_mode_tx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER, + tx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_tx = true; + } + + if (reset_rx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER); + + xsc_set_rx_cq_mode_params(&new_params, mode); + } + if (reset_tx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER); + + xsc_set_tx_cq_mode_params(&new_params, mode); + } + + priv->nic_param = new_params; + if (!reset_rx && !reset_tx) + goto out; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + +out: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct ethtool_ops xsc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = xsc_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = xsc_get_strings, + .get_sset_count = xsc_get_sset_count, + .get_ethtool_stats = xsc_get_ethtool_stats, + .get_ringparam = xsc_get_ringparam, + .set_ringparam = xsc_set_ringparam, + .set_channels = xsc_set_channels, + .get_channels = xsc_get_channels, + .get_coalesce = xsc_get_coalesce, + .set_coalesce = xsc_set_coalesce, + .get_ts_info = NULL, + .get_link_ksettings = xsc_get_link_ksettings, + .set_link_ksettings = xsc_set_link_ksettings, + .get_rxfh_key_size = xsc_get_rxfh_key_size, + .get_rxfh_indir_size = xsc_get_rxfh_indir_size, + .get_rxfh = xsc_get_rxfh, + .set_rxfh = xsc_set_rxfh, + .get_rxnfc = xsc_get_rxnfc, + .set_rxnfc = xsc_set_rxnfc, + .get_module_info = xsc_get_module_info, + .get_module_eeprom = xsc_get_module_eeprom, + .get_module_eeprom_by_page = xsc_get_module_eeprom_by_page, + .get_priv_flags = xsc_get_priv_flags, + .set_priv_flags = xsc_set_priv_flags, + .get_msglevel = xsc_get_msglevel, + .set_msglevel = xsc_set_msglevel, + .self_test = xsc_self_test, + .set_phys_id = xsc_set_phys_id, + .get_fecparam = xsc_get_fecparam, + .set_fecparam = xsc_set_fecparam, +}; + +void eth_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &xsc_ethtool_ops; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..eb2eb3491c148560ef9f108b6099b73a91f1f5b9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_ETHTOOL_H +#define XSC_ETH_ETHTOOL_H + +void eth_set_ethtool_ops(struct net_device *dev); + +/* EEPROM Standards for plug in modules */ +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif + +#define LED_ACT_ON_HW 0xff + +#endif /* XSC_ETH_ETHTOOL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..547556aa536b99c37d2d2af13b746a1969d09045 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include +#include "common/xsc_pp.h" + +#define PAGE_REF_ELEV (U16_MAX) +/* Upper bound on number of packets that share a single page */ +#define PAGE_REF_THRSD (PAGE_SIZE / 64) + +static inline void xsc_rq_notify_hw(struct xsc_rq *rq) +{ + struct xsc_core_device *xdev = rq->cq.xdev; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + union xsc_recv_doorbell doorbell_value; + u64 rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); + + ETH_DEBUG_LOG("rq%d_db_val=0x%x, recv_ds=%d\n", + rq->rqn, doorbell_value.recv_data, + xdev->caps.recv_ds_num); + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = rqwqe_id; + doorbell_value.qp_num = rq->rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(doorbell_value.recv_data, REG_ADDR(xdev, xdev->regs.rx_db)); +} + +static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, + struct xsc_cqe *cqe, + struct sk_buff *skb) +{ + struct xsc_rss_params *rss = &adapter->rss_params; + u32 hash_field; + bool l3_hash = false; + bool l4_hash = false; + int ht = 0; + + if (adapter->netdev->features & NETIF_F_RXHASH) { + if (skb->protocol == htons(ETH_P_IP)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP || + hash_field & XSC_HASH_FIELD_SEL_DST_IP) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT || + hash_field & XSC_HASH_FIELD_SEL_DPORT) + l4_hash = true; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6 || + hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6 || + hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + l4_hash = true; + } + + if (l3_hash && l4_hash) + ht = PKT_HASH_TYPE_L4; + else if (l3_hash) + ht = PKT_HASH_TYPE_L3; + if (ht) + skb_set_hash(skb, be32_to_cpu(cqe->vni), ht); + } +} + +static inline unsigned short from32to16(unsigned int x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline bool handle_udp_frag_csum(struct sk_buff *skb, struct epp_pph *pph) +{ +#ifdef XSC_UDP_FRAG_CSUM + char *head = (char *)pph; + struct iphdr *iph; + u8 l3_proto = PPH_OUTER_IP_TYPE(head); + u8 l4_proto = PPH_OUTER_TP_TYPE(head); + u16 csum_off = (u16)PPH_CSUM_OFST(head); + u16 csum_plen = (u16)PPH_CSUM_PLEN(head); + u8 payload_off = PPH_PAYLOAD_OFST(head); + u32 hw_csum = PPH_CSUM_VAL(head); + u16 udp_check = 0; + u16 udp_len = 0; + u32 off = 64; + __wsum csum1, csum2, csum3, csum; + +#ifdef CUM_SKB_DATA + head = (char *)skb->data; + off = 0; +#endif + + if (l4_proto != L4_PROTO_UDP && l4_proto != L4_PROTO_NONE) + return false; + + off += ETH_HLEN; + if (l3_proto == L3_PROTO_IP) { + iph = (struct iphdr *)(head + off); + if (!ip_is_fragment(iph)) + return false; + +#ifdef UDP_CSUM_DEBUG + netdev_dbg("ip_id=%d frag_off=0x%x l4_prt=%d l3_prt=%d iph_off=%d ip_len=%d csum_off=%d pload_off=%d\n", + ntohs(iph->id), ntohs(iph->frag_off), + l4_proto, l3_proto, PPH_OUTER_IP_OFST(head), PPH_OUTER_IP_LEN(pph), + csum_off, payload_off); +#endif + + off += iph->ihl * 4; + if (l4_proto == L4_PROTO_UDP) { + struct udphdr *uh = (struct udphdr *)(head + off); + + udp_check = uh->check; + udp_len = ntohs(uh->len); + } + + if (csum_off == 0) + csum_off = 256; + + netdev_dbg("%s: ip_id=%d frag_off=0x%x skb_len=%d data_len=%d csum_off=%d csum_plen=%d payload_off=%d udp_off=%d udp_len=%d udp_check=0x%x\n", + __func__, ntohs(iph->id), ntohs(iph->frag_off), + skb->len, skb->data_len, + csum_off, csum_plen, payload_off, off, udp_len, udp_check); +#ifdef CUM_RAW_DATA_DUMP + xsc_pkt_pph_dump((char *)head, 272); +#endif + + if (csum_off < off) { + csum1 = csum_partial((char *)(head + csum_off), (off - csum_off), 0); + csum2 = htons(from32to16(hw_csum)); + csum = csum_sub(csum2, csum1); + } else if (csum_off > off) { + csum2 = csum_partial((char *)(head + csum_off), csum_plen, 0); + csum1 = csum_partial((char *)(head + off), (csum_off - off), 0); + csum = htons(from32to16(hw_csum)); + csum = csum_partial((char *)(head + off), (csum_off - off), csum); + csum3 = csum_partial((char *)(head + off), (skb->len - off + 64), 0); + } else { + csum = htons(from32to16(hw_csum)); + } + skb->csum = csum_unfold(from32to16(csum)); + + ETH_DEBUG_LOG("%s: sw_cal_csum[%d:%d]=0x%x -> 0x%x\n", + __func__, off, csum_off, csum1, from32to16(csum1)); + ETH_DEBUG_LOG("%s: sw_cal_hw_csum[%d:%d]=0x%x -> 0x%x, hw_csum=0x%x -> 0x%x\n", + __func__, csum_off, csum_plen, csum2, from32to16(csum2), + hw_csum, from32to16(hw_csum)); + ETH_DEBUG_LOG("%s: sw_cal_tot_csum[%d:%d]=0x%x -> 0x%x, skb_csum=0x%x -> 0x%x\n", + __func__, off, skb->len, csum3, from32to16(csum3), csum, skb->csum); + + skb->ip_summed = CHECKSUM_COMPLETE; + + return true; + } +#endif + + return false; +} + +static inline void xsc_handle_csum(struct xsc_cqe *cqe, struct xsc_rq *rq, + struct sk_buff *skb, struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + struct xsc_dma_info *dma_info = wi->di; + int offset_from = wi->offset; + struct epp_pph *hw_pph = page_address(dma_info->page) + offset_from; + + if (unlikely((netdev->features & NETIF_F_RXCSUM) == 0)) + goto csum_none; + + if (unlikely(XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(hw_pph) & PACKET_UNKNOWN)) + goto csum_none; + + if (handle_udp_frag_csum(skb, hw_pph)) { + stats->csum_succ++; + goto out; + } + + if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_AND_INNER))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT) && (cqe->csum_err & INNER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (!XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + + stats->csum_unnecessary++; + } else { + stats->csum_err++; + } + + goto out; + +csum_none: + skb->csum = 0; + skb->ip_summed = CHECKSUM_NONE; + stats->csum_none++; +out: + return; +} + +static inline void xsc_build_rx_skb(struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->netdev; + struct xsc_adapter *adapter = c->adapter; + + skb->mac_len = ETH_HLEN; + + skb_record_rx_queue(skb, rq->ix); + xsc_handle_csum(cqe, rq, skb, wi); + + skb->protocol = eth_type_trans(skb, netdev); + xsc_skb_set_hash(adapter, cqe, skb); +} + +static inline void xsc_complete_rx_cqe(struct xsc_rq *rq, + struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; + xsc_build_rx_skb(cqe, cqe_bcnt, rq, skb, wi); + + rq->dim_obj.sample.pkt_ctr = rq->stats->packets; + rq->dim_obj.sample.byte_ctr = rq->stats->bytes; +} + +static inline void xsc_add_skb_frag(struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_dma_info *di, + u32 frag_offset, u32 len, + unsigned int truesize) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_sync_single_for_cpu(dev, di->addr + frag_offset, len, DMA_FROM_DEVICE); + page_ref_inc(di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + di->page, frag_offset, len, truesize); +} + +static inline void xsc_copy_skb_header(struct device *dev, + struct sk_buff *skb, + struct xsc_dma_info *dma_info, + int offset_from, u32 headlen) +{ + void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(dev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, from, len); +} + +static inline struct sk_buff *xsc_build_linear_skb(struct xsc_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_dma_info *di = wi->di; + u16 rx_headroom = rq->buff.headroom; + int pph_len = has_pph ? XSC_PPH_HEAD_LEN : 0; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + wi->offset; + data = va + rx_headroom + pph_len; + frag_size = XSC_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->cq.xdev->device, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ + prefetch(data); + + skb = xsc_build_linear_skb(rq, va, frag_size, (rx_headroom + pph_len), + (cqe_bcnt - pph_len)); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *head_wi = wi; + struct xsc_wqe_frag_info *rx_wi = wi; + u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + struct net_device *netdev = c->adapter->netdev; + u8 fragcnt = 0; + u16 head_offset = head_wi->offset; + u16 frag_consumed_bytes = 0; + int i = 0; + +#ifndef NEED_CREATE_RX_THREAD + skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#else + skb = netdev_alloc_skb(netdev, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#endif + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + if (likely(has_pph)) { + headlen = min_t(u32, XSC_RX_MAX_HEAD, (cqe_bcnt - XSC_PPH_HEAD_LEN)); + frag_headlen = headlen + XSC_PPH_HEAD_LEN; + byte_cnt = cqe_bcnt - headlen - XSC_PPH_HEAD_LEN; + head_offset += XSC_PPH_HEAD_LEN; + } + + if (byte_cnt == 0 && (XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_RX_COPY_BREAK))) { + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + wi->is_available = 1; + goto ret; + } + + for (i = 0; i < rq->wqe.info.num_frags; i++, rx_wi++) + rx_wi->is_available = 0; + + while (byte_cnt) { + /*figure out whether the first fragment can be a page ?*/ + frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + xsc_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + ETH_DEBUG_LOG("consumed=%d, frag_size=%d, byte_cnt=%d, cqe_bcnt=%d, addr=0x%llx\n", + frag_consumed_bytes, frag_info->frag_size, byte_cnt, + cqe_bcnt, (u64)wi->di->addr); + + /*to protect extend wqe read, drop exceed bytes*/ + frag_headlen = 0; + fragcnt++; + if (fragcnt == rq->wqe.info.num_frags) { + if (byte_cnt) { + rq->stats->oversize_pkts_sw_drop += byte_cnt; + netdev_warn(netdev, + "large packet reach the maximum rev-wqe num.\n"); + netdev_warn(netdev, + "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", + byte_cnt, fragcnt, headlen, cqe_bcnt, + frag_consumed_bytes, frag_info->frag_size); + } + break; + } + + frag_info++; + wi++; + } + +ret: + /* copy header */ + xsc_copy_skb_header(dev, skb, head_wi->di, head_offset, headlen); + + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + skbdata_debug_dump(skb, headlen, 0); + + return skb; +} + +static inline bool xsc_rx_cache_is_empty(struct xsc_page_cache *cache) +{ + return cache->head == cache->tail; +} + +static inline bool xsc_page_is_reserved(struct page *page) +{ + return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); +} + +static inline bool xsc_rx_cache_get(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + struct xsc_core_device *xdev = rq->cq.xdev; + + if (unlikely(xsc_rx_cache_is_empty(cache))) { + stats->cache_empty++; + return false; + } + + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->cache_busy++; + return false; + } + + stats->cache_reuse++; + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (cache->sz - 1); + + dma_sync_single_for_device(&xdev->pdev->dev, dma_info->addr, + PAGE_SIZE, DMA_FROM_DEVICE); + + return true; +} + +static inline bool xsc_rx_cache_put(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + u32 tail_next = (cache->tail + 1) & (cache->sz - 1); + + if (tail_next == cache->head) { + stats->cache_full++; + return false; + } + + if (unlikely(xsc_page_is_reserved(dma_info->page))) { + stats->cache_waive++; + return false; + } + + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; + return true; +} + +void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_unmap_page(dev, dma_info->addr, XSC_RX_FRAG_SZ, rq->buff.map_dir); +} + +static inline void xsc_put_page(struct xsc_dma_info *dma_info) +{ + put_page(dma_info->page); +} + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, bool recycle) +{ + if (likely(recycle)) { +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_put(rq, dma_info)) + return; +#endif + + xsc_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); + } else { + xsc_page_dma_unmap(rq, dma_info); + page_pool_put_defragged_page(rq->page_pool, + dma_info->page, + -1, true); + } +} + +static inline void xsc_put_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag, bool recycle) +{ + if (frag->last_in_page) + xsc_page_release_dynamic(rq, frag->di, recycle); +} + +static inline struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static inline void xsc_free_rx_wqe(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, bool recycle) +{ + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { + if (wi->is_available && recycle) + continue; + xsc_put_rx_frag(rq, wi, recycle); + } +} + +static void xsc_dump_error_rqcqe(struct xsc_rq *rq, + struct xsc_cqe *cqe) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + u32 ci = xsc_cqwq_get_ci(&rq->cq.wq); + + net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", + netdev->name, rq->cq.xcq.cqn, ci, + rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); +} + +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_channel *c = rq->cq.channel; + u8 cqe_opcode = get_cqe_opcode(cqe); + struct xsc_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 ci; + + ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); + wi = get_frag(rq, ci); + if (unlikely(cqe_opcode & BIT(7))) { + xsc_dump_error_rqcqe(rq, cqe); + rq->stats->cqe_err++; + goto free_wqe; + } + + cqe_bcnt = le32_to_cpu(cqe->msg_len); + if (cqe->has_pph && cqe_bcnt <= XSC_PPH_HEAD_LEN) { + rq->stats->wqe_err++; + goto free_wqe; + } + + if (unlikely(cqe_bcnt > rq->frags_sz)) { + if (!XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_DROPLESS_RQ)) { + rq->stats->oversize_pkts_sw_drop += cqe_bcnt; + goto free_wqe; + } else { + rq->stats->oversize_pkts_err++; + } + } + + cqe_bcnt = min_t(u32, cqe_bcnt, rq->frags_sz); + skb = rq->wqe.skb_from_cqe(rq, wi, cqe_bcnt, cqe->has_pph); + if (!skb) + goto free_wqe; + + xsc_complete_rx_cqe(rq, cqe, + cqe->has_pph == 1 ? cqe_bcnt - XSC_PPH_HEAD_LEN : cqe_bcnt, + skb, wi); + +#ifdef NEED_CREATE_RX_THREAD + netif_rx_ni(skb); +#else + napi_gro_receive(rq->cq.napi, skb); +#endif + +free_wqe: + xsc_free_rx_wqe(rq, wi, true); + xsc_wq_cyc_pop(wq); +} + +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget) +{ + struct xsc_rq *rq = container_of(cq, struct xsc_rq, cq); + struct xsc_cqwq *cqwq = &cq->wq; + struct xsc_cqe *cqe; + int work_done = 0; + struct xsc_ch_stats *ch_stats = cq->channel->stats; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + return 0; + + while ((work_done < budget) && (cqe = xsc_cqwq_get_cqe(cqwq))) { + rq->stats->cqes++; + + rq->handle_rx_cqe(cqwq, rq, cqe); + ++work_done; + + xsc_cqwq_pop(cqwq); + } + + if (!work_done) + goto out; + + xsc_cq_notify_hw(cq); + /* ensure cq space is freed before enabling more cqes */ + wmb(); + +out: + ch_stats->poll += work_done; + if (work_done < budget) { + if (ch_stats->poll == 0) + ch_stats->poll_0++; + else if (ch_stats->poll < 64) + ch_stats->poll_1_63++; + else if (ch_stats->poll < 512) + ch_stats->poll_64_511++; + else if (ch_stats->poll < 1024) + ch_stats->poll_512_1023++; + else if (ch_stats->poll >= 1024) + cq->channel->stats->poll_1024++; + } + + return work_done; +} + +static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_get(rq, dma_info)) + return 0; + + rq->stats->cache_alloc++; +#endif + + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page(dev, dma_info->page, 0, + XSC_RX_FRAG_SZ, rq->buff.map_dir); + if (unlikely(dma_mapping_error(dev, dma_info->addr))) { + page_pool_recycle_direct(rq->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static inline int xsc_get_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag) +{ + int err = 0; + + if (!frag->offset && !frag->is_available) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = xsc_page_alloc_mapped(rq, frag->di); + + return err; +} + +static int xsc_alloc_rx_wqe(struct xsc_rq *rq, struct xsc_eth_rx_wqe_cyc *wqe, u16 ix) +{ + struct xsc_wqe_frag_info *frag = get_frag(rq, ix); + u64 addr; + int i; + int err; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = xsc_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; + + addr = cpu_to_le64(frag->di->addr + frag->offset + rq->buff.headroom); + wqe->data[i].va = addr; + if (frag->offset == 0) + ETH_DEBUG_LOG("rq%d_wqe%d_frag%d off=%d last=%d refcnt=%d addr=0x%llx\n", + rq->rqn, ix, i, frag->offset, frag->last_in_page, + page_ref_count(frag->di->page), addr); + } + + return 0; + +free_frags: + while (--i >= 0) + xsc_put_rx_frag(rq, --frag, true); + + return err; +} + +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix) +{ + struct xsc_wqe_frag_info *wi = get_frag(rq, ix); + + xsc_free_rx_wqe(rq, wi, false); +} + +static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_eth_rx_wqe_cyc *wqe; + int err; + int i; + int idx; + + for (i = 0; i < wqe_bulk; i++) { + idx = xsc_wq_cyc_ctr2ix(wq, (ix + i)); + wqe = xsc_wq_cyc_get_wqe(wq, idx); + + err = xsc_alloc_rx_wqe(rq, wqe, idx); + if (unlikely(err)) { + rq->stats->buff_alloc_err++; + goto free_wqes; + } + } + + return 0; + +free_wqes: + while (--i >= 0) + xsc_eth_dealloc_rx_wqe(rq, ix + i); + + return err; +} + +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk, wqe_bulk_min; + int alloc; + u16 head; + int err; + + wqe_bulk = rq->wqe.info.wqe_bulk; + wqe_bulk_min = rq->wqe.info.wqe_bulk_min; + if (xsc_wq_cyc_missing(wq) < wqe_bulk) + return false; + + do { + head = xsc_wq_cyc_get_head(wq); + + alloc = min_t(int, wqe_bulk, xsc_wq_cyc_missing(wq)); + if (alloc < wqe_bulk && alloc >= wqe_bulk_min) + alloc = alloc & 0xfffffffe; + + if (alloc > 0) { + err = xsc_alloc_rx_wqes(rq, head, alloc); + if (unlikely(err)) + break; + + xsc_wq_cyc_push_n(wq, alloc); + rq->stats->wqes += alloc; + } + } while (xsc_wq_cyc_missing(wq) >= wqe_bulk_min); + + dma_wmb(); + + /* ensure wqes are visible to device before updating doorbell record */ + xsc_rq_notify_hw(rq); + + return !!err; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..8b75ce05afb132ff7a9080ec72a23e950af3d3fc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_cmd.h" +#include "common/xsc_core.h" + +#include "xsc_eth_stats.h" +#include "xsc_eth.h" + +static const struct counter_desc sw_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_unnecessary) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_none) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_succ) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial_inner) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_stopped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_dropped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_xmit_more) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_wake) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_null) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_buff_alloc_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_reuse) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_full) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_empty) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_busy) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_alloc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_waive) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_ext) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_rdc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_events) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_0) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1_63) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_64_511) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_512_1023) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1024) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_tx) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_arm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_noarm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_aff_change) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) + +static int xsc_grp_sw_get_num_stats(struct xsc_adapter *adapter) +{ + return NUM_SW_COUNTERS; +} + +static int xsc_grp_sw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + sw_stats_desc[i].format, + ETH_GSTRING_LEN); + return idx; +} + +static int xsc_grp_sw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = XSC_READ_CTR64_CPU(&adapter->stats->sw, sw_stats_desc, i); + return idx; +} + +void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) +{ + struct xsc_sw_stats *s = &adapter->stats->sw; + int max_tc = xsc_get_netdev_max_tc(adapter); + int i; + + memset(s, 0, sizeof(*s)); + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = + &adapter->stats->channel_stats[i]; + + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + struct xsc_ch_stats *ch_stats = &channel_stats->ch; + int j; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_err += rq_stats->csum_err; + s->rx_csum_succ += rq_stats->csum_succ; + s->rx_cqes += rq_stats->cqes; + s->rx_cqe_err += rq_stats->cqe_err; + s->rx_wqes += rq_stats->wqes; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_oversize_pkts_err += rq_stats->oversize_pkts_err; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_alloc += rq_stats->cache_alloc; + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_cache_ext += rq_stats->cache_ext; + s->rx_cache_rdc += rq_stats->cache_rdc; + s->rx_dim_us += rq_stats->dim_us; + s->rx_dim_pkts += rq_stats->dim_pkts; + + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_poll_0 += ch_stats->poll_0; + s->ch_poll_1_63 += ch_stats->poll_1_63; + s->ch_poll_64_511 += ch_stats->poll_64_511; + s->ch_poll_512_1023 += ch_stats->poll_512_1023; + s->ch_poll_1024 += ch_stats->poll_1024; + s->ch_poll_tx += ch_stats->poll_tx; + s->ch_arm += ch_stats->arm; + s->ch_noarm += ch_stats->noarm; + s->ch_aff_change += ch_stats->aff_change; + + for (j = 0; j < max_tc; j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_csum_partial += sq_stats->csum_partial; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_cqes += sq_stats->cqes; + s->tx_queue_wake += sq_stats->wake; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_oversize_pkts_sw_drop += sq_stats->oversize_pkts_sw_drop; + s->txdone_skb_null += sq_stats->txdone_skb_null; + s->txdone_skb_refcnt_err += sq_stats->txdone_skb_refcnt_err; + s->skb_linear += sq_stats->skb_linear; + s->tx_dim_us += sq_stats->dim_us; + s->tx_dim_pkts += sq_stats->dim_pkts; + } + } +} + +static const struct counter_desc rq_stats_desc[] = { + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, packets) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, bytes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_unnecessary) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_none) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_succ) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cqes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_us) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_pkts) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, wqe_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, buff_alloc_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_reuse) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_full) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_empty) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_busy) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_alloc) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_waive) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_ext) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_rdc) }, +}; + +static const struct counter_desc sq_stats_desc[] = { + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial_inner) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_none) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, stopped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dropped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, xmit_more) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, wake) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_us) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_pkts) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqe_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_null) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, skb_linear) }, +}; + +static const struct counter_desc ch_stats_desc[] = { + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, events) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_0) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1_63) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_64_511) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_512_1023) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1024) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_tx) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, arm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, noarm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, aff_change) }, +}; + +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) + +static int xsc_grp_channels_get_num_stats(struct xsc_adapter *adapter) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * max_tc); +} + +static int xsc_grp_channels_fill_strings(struct xsc_adapter *adapter, u8 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ch_stats_desc[j].format, i); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + sq_stats_desc[j].format, + i + tc * max_nch); + + return idx; +} + +static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + struct xsc_stats *stats = adapter->stats; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].ch, + ch_stats_desc, j); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].rq, + rq_stats_desc, j); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].sq[tc], + sq_stats_desc, j); + + return idx; +} + +static const struct counter_desc hw_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 7), + +}; + +static const struct counter_desc hw_pfc_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 7), +}; + +static const struct counter_desc hw_eth_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_fcs_errors) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_discards) }, + + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_broadcast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_broadcast_phy) }, + + /*by global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_eth_stats_vf_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_bytes) }, +}; + +static const struct counter_desc pfc_stall_stats_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_pfc_stall_stats, tx_pause_storm_triggered) }, +}; + +static int get_hw_stats_eth(struct xsc_core_device *dev, struct xsc_hw_stats_eth *stats_eth) +{ + int ret; + struct xsc_hw_stats_mbox_in in; + struct xsc_hw_stats_eth_mbox_out out; + + memset(stats_eth, 0, sizeof(*stats_eth)); + + if (!dev) + return -1; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_ETH); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return -1; + + memcpy(stats_eth, &out.hw_stats, sizeof(*stats_eth)); + return 0; +} + +static int xsc_hw_get_num_stats(struct xsc_adapter *adapter) +{ + int ret = 0; + + if (is_support_hw_pf_stats(adapter->xdev)) { + ret = ARRAY_SIZE(hw_prio_stats_desc) + ARRAY_SIZE(hw_eth_stats_pf_desc) + + (is_support_pfc_prio_statistic(adapter->xdev) ? + ARRAY_SIZE(hw_pfc_prio_stats_desc) : 0) + + (is_support_pfc_stall_stats(adapter->xdev) ? + ARRAY_SIZE(pfc_stall_stats_desc) : 0); + } else { + ret = ARRAY_SIZE(hw_eth_stats_vf_desc); + } + + return ret; +} + +static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + struct xsc_core_device *xdev; + + xdev = adapter->xdev; + + if (is_support_hw_pf_stats(xdev)) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_prio_statistic(xdev)) + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_pfc_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_pf_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_stall_stats(xdev)) + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + pfc_stall_stats_desc[i].format, + ETH_GSTRING_LEN); + } else { + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_vf_desc[i].format, + ETH_GSTRING_LEN); + } + + return idx; +} + +static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + struct xsc_prio_stats_mbox_in in; + struct xsc_prio_stats_mbox_out out; + struct xsc_pfc_prio_stats_mbox_in pfc_prio_in; + struct xsc_pfc_prio_stats_mbox_out pfc_prio_out; + struct xsc_pfc_stall_stats_mbox_in pfc_stall_in; + struct xsc_pfc_stall_stats_mbox_out pfc_stall_out; + struct xsc_core_device *xdev; + int ret; + u32 i; + u64 val; + u8 *stats; + struct xsc_hw_stats_eth stats_eth; + int ret_s; + + xdev = adapter->xdev; + ret_s = get_hw_stats_eth(xdev, &stats_eth); + + if (is_support_hw_pf_stats(xdev)) { + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PRIO_STATS); + in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&in, + sizeof(struct xsc_prio_stats_mbox_in), + (void *)&out, sizeof(struct xsc_prio_stats_mbox_out)); + if (ret == 0 && out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&out.prio_stats, + hw_prio_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_prio_statistic(xdev)) { + memset(&pfc_prio_in, 0, sizeof(pfc_prio_in)); + memset(&pfc_prio_out, 0, sizeof(pfc_prio_out)); + pfc_prio_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + pfc_prio_in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&pfc_prio_in, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + (void *)&pfc_prio_out, + sizeof(struct xsc_pfc_prio_stats_mbox_out)); + if (ret == 0 && pfc_prio_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_prio_out.prio_stats, + hw_pfc_prio_stats_desc, + i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + if (!ret_s && stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.pf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_pf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_stall_stats(xdev)) { + memset(&pfc_stall_in, 0, sizeof(pfc_stall_in)); + memset(&pfc_stall_out, 0, sizeof(pfc_stall_out)); + pfc_stall_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS); + pfc_stall_in.mac_port = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, + (void *)&pfc_stall_in, + sizeof(struct xsc_pfc_stall_stats_mbox_in), + (void *)&pfc_stall_out, + sizeof(struct xsc_pfc_stall_stats_mbox_out)); + if (ret == 0 && pfc_stall_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_stall_out.pfc_stall_stats, + pfc_stall_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + } else { + if (!ret_s && !stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.vf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_vf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + return idx; +} + +/* The stats groups order is opposite to the update_stats() order calls */ +const struct xsc_stats_grp xsc_stats_grps[] = { + { + .get_num_stats = xsc_grp_sw_get_num_stats, + .fill_strings = xsc_grp_sw_fill_strings, + .fill_stats = xsc_grp_sw_fill_stats, + .update_stats = xsc_grp_sw_update_stats, + }, + + { + .get_num_stats = xsc_grp_channels_get_num_stats, + .fill_strings = xsc_grp_channels_fill_strings, + .fill_stats = xsc_grp_channels_fill_stats, + }, + + { + .get_num_stats = xsc_hw_get_num_stats, + .fill_strings = xsc_hw_fill_strings, + .fill_stats = xsc_hw_fill_stats, + }, +}; + +const int xsc_num_stats_grps = ARRAY_SIZE(xsc_stats_grps); + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s) +{ + int i, j; + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = &adapter->stats->channel_stats[i]; + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < xsc_get_netdev_max_tc(adapter); j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..069c5d8ad0dbbda55f1a56bbf768ae2738554290 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_EN_STATS_H +#define XSC_EN_STATS_H + +#include "xsc_eth_common.h" + +#define XSC_READ_CTR64_CPU(ptr, dsc, i) \ + (*(u64 *)((char *)(ptr) + (dsc)[i].offset)) + +#define ETH_GSTRING_LEN 32 + +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) +#define XSC_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) + +#define XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio) (#fld "_prio"#prio) +#define XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio) \ + (offsetof(type, fld) + (sizeof(type) * (prio))) +#define XSC_DECLARE_HW_PRIO_STAT(type, fld, prio) \ + {XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio), \ + XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio)} + +struct xsc_rq_stats { + u64 packets; + u64 bytes; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_err; + u64 csum_succ; + u64 cqes; + u64 cqe_err; + u64 wqes; + u64 wqe_err; + u64 oversize_pkts_sw_drop; + u64 oversize_pkts_err; + u64 buff_alloc_err; + u64 cache_reuse; + u64 cache_full; + u64 cache_empty; + u64 cache_busy; + u64 cache_alloc; + u64 cache_waive; + u64 cache_ext; + u64 cache_rdc; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_sq_stats { + /* commonly accessed in data path */ + u64 packets; + u64 bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_partial; + u64 csum_partial_inner; + /* less likely accessed in data path */ + u64 csum_none; + u64 stopped; + u64 dropped; + u64 xmit_more; + /* dirtied @completion */ + u64 cqes; + u64 wake; + u64 cqe_err; + u64 oversize_pkts_sw_drop; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_ch_stats { + u64 events; + u64 poll; + u64 poll_0; + u64 poll_1_63; + u64 poll_64_511; + u64 poll_512_1023; + u64 poll_1024; + u64 poll_tx; + u64 arm; + u64 noarm; + u64 aff_change; +} ____cacheline_aligned_in_smp; + +struct xsc_adapter; +struct xsc_stats_grp { + u16 update_stats_mask; + int (*get_num_stats)(struct xsc_adapter *adapter); + int (*fill_strings)(struct xsc_adapter *adapter, u8 *data, int idx); + int (*fill_stats)(struct xsc_adapter *adapter, u64 *data, int idx); + void (*update_stats)(struct xsc_adapter *adapter); +}; + +struct counter_desc { + char format[ETH_GSTRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_tso_inner_packets; + u64 tx_tso_inner_bytes; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_err; + u64 rx_csum_succ; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_stopped; + u64 tx_queue_dropped; + u64 tx_xmit_more; + u64 tx_cqes; + u64 tx_queue_wake; + u64 tx_cqe_err; + u64 tx_oversize_pkts_sw_drop; + u64 tx_dim_us; + u64 tx_dim_pkts; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 rx_cqes; + u64 rx_cqe_err; + u64 rx_wqes; + u64 rx_wqe_err; + u64 rx_oversize_pkts_sw_drop; + u64 rx_oversize_pkts_err; + u64 rx_buff_alloc_err; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_alloc; + u64 rx_cache_waive; + u64 rx_cache_ext; + u64 rx_cache_rdc; + u64 rx_dim_us; + u64 rx_dim_pkts; + u64 ch_events; + u64 ch_poll; + u64 ch_poll_0; + u64 ch_poll_1_63; + u64 ch_poll_64_511; + u64 ch_poll_512_1023; + u64 ch_poll_1024; + u64 ch_poll_tx; + u64 ch_arm; + u64 ch_noarm; + u64 ch_aff_change; +}; + +struct xsc_channel_stats { + struct xsc_ch_stats ch; + struct xsc_sq_stats sq[XSC_MAX_NUM_TC]; + struct xsc_rq_stats rq; +} ____cacheline_aligned_in_smp; + +struct xsc_stats { + struct xsc_sw_stats sw; + struct xsc_channel_stats channel_stats[XSC_ETH_MAX_NUM_CHANNELS]; +}; + +extern const struct xsc_stats_grp xsc_stats_grps[]; +extern const int xsc_num_stats_grps; + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s); + +#endif /* XSC_EN_STATS_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8709b22c3b879e766f14de9fe2400b92637f3354 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" + +static void pcie_lat_hw_work(struct work_struct *work) +{ + int err; + struct delayed_work *dwork = to_delayed_work(work); + struct xsc_pcie_lat_work *pcie_lat = container_of(dwork, struct xsc_pcie_lat_work, work); + struct xsc_core_device *xdev = pcie_lat->xdev; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } + schedule_delayed_work_on(smp_processor_id(), dwork, + msecs_to_jiffies(pcie_lat->period * 1000)); +} + +static void pcie_lat_hw_init(struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW_INIT); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } +} + +static ssize_t pcie_lat_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_EN); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%hhu\n", out.pcie_lat.pcie_lat_enable); +} + +static ssize_t pcie_lat_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *pcie_lat = adapter->xdev->pcie_lat; + int err; + u16 pcie_lat_enable; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + err = kstrtou16(buf, 0, &pcie_lat_enable); + if (err != 0) + return -EINVAL; + + if (pcie_lat_enable != XSC_PCIE_LAT_EN_DISABLE && + pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { + xsc_core_err(adapter->xdev, + "pcie_lat_enable should be set as %d or %d, cannot be %d\n", + XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, + pcie_lat_enable); + return -EPERM; + } + + if (pcie_lat_enable == XSC_PCIE_LAT_EN_ENABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { + pcie_lat_hw_init(adapter->xdev); + pcie_lat->adapter = adapter; + INIT_DELAYED_WORK(&pcie_lat->work, pcie_lat_hw_work); + schedule_delayed_work_on(smp_processor_id(), &pcie_lat->work, + msecs_to_jiffies(pcie_lat->period * 1000)); + } else if (pcie_lat_enable == XSC_PCIE_LAT_EN_DISABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_ENABLE) { + cancel_delayed_work_sync(&pcie_lat->work); + } + + pcie_lat->enable = pcie_lat_enable; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = pcie_lat_enable; + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to set pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_enable); + +static ssize_t pcie_lat_interval_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_INTERVAL); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat interval, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_INTERVAL_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_interval); + +static ssize_t pcie_lat_period_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + + return sprintf(buf, "%u\n", tmp->period); +} + +static ssize_t pcie_lat_period_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + int err; + u32 pcie_lat_period; + + err = kstrtouint(buf, 0, &pcie_lat_period); + if (err != 0) + return -EINVAL; + + if (pcie_lat_period < XSC_PCIE_LAT_PERIOD_MIN || + pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { + xsc_core_err(adapter->xdev, "pcie_lat_period should be set between [%d-%d], cannot be %d\n", + XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, + pcie_lat_period); + return -EPERM; + } + + tmp->period = pcie_lat_period; + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_period); + +static ssize_t pcie_lat_histogram_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int i, err; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_HISTOGRAM); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get pcie_lat histogram, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_HISTOGRAM_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_histogram); + +static ssize_t pcie_lat_peak_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_PEAK); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat peak, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_peak)); +} + +static DEVICE_ATTR_RO(pcie_lat_peak); + +static struct attribute *pcie_lat_attrs[] = { + &dev_attr_pcie_lat_enable.attr, + &dev_attr_pcie_lat_interval.attr, + &dev_attr_pcie_lat_period.attr, + &dev_attr_pcie_lat_histogram.attr, + &dev_attr_pcie_lat_peak.attr, + NULL, +}; + +static struct attribute_group pcie_lat_group = { + .name = "pcie_lat", + .attrs = pcie_lat_attrs, +}; + +static int xsc_pcie_lat_sysfs_init(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + struct xsc_pcie_lat_work *tmp; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + xdev->pcie_lat = tmp; + tmp->xdev = xdev; + + tmp->enable = XSC_PCIE_LAT_EN_DISABLE; + tmp->period = XSC_PCIE_LAT_PERIOD_MIN; + + err = sysfs_create_group(&dev->dev.kobj, &pcie_lat_group); + if (err) + goto remove_pcie_lat; + + return 0; + +remove_pcie_lat: + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + kfree(tmp); + + return err; +} + +static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_work *tmp; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + tmp = xdev->pcie_lat; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = XSC_PCIE_LAT_EN_DISABLE; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set pcie_lat disable, err(%u), status(%u)\n", + err, out.hdr.status); + + if (tmp->enable == XSC_PCIE_LAT_EN_ENABLE) + cancel_delayed_work_sync(&tmp->work); + + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + + if (!xdev->pcie_lat) + return; + + kfree(tmp); + xdev->pcie_lat = NULL; +} + +int xsc_eth_sysfs_create(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + err = xsc_pcie_lat_sysfs_init(dev, xdev); + + return err; +} + +void xsc_eth_sysfs_remove(struct net_device *dev, struct xsc_core_device *xdev) +{ + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + xsc_pcie_lat_sysfs_fini(dev, xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..8f5b4ecd9ed9de6e0d15e775c874f1feba8c245c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_common.h" +#include "common/xsc_hsi.h" +#include "common/qp.h" +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" + +#define XSC_OPCODE_RAW 0x7 + +static inline void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + void *wqe; + + /*caution, sp->pc is default to be zero*/ + *pi = xsc_wq_cyc_ctr2ix(wq, sq->pc); + wqe = xsc_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, size); + + return wqe; +} + +u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) +{ + struct xsc_sq_stats *stats = sq->stats; + u16 ihs; + + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; + } + + return ihs; +} + +void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (skb->encapsulation) { + cseg->csum_en = XSC_ETH_WQE_INNER_AND_OUTER_CSUM; + sq->stats->csum_partial_inner++; + } else { + cseg->csum_en = XSC_ETH_WQE_OUTER_CSUM; + sq->stats->csum_partial++; + } + } else { + cseg->csum_en = XSC_ETH_WQE_NONE_CSUM; + sq->stats->csum_none++; + } +} + +static inline struct xsc_sq_dma *xsc_dma_get(struct xsc_sq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + +static inline void xsc_dma_push(struct xsc_sq *sq, dma_addr_t addr, u32 size, + enum xsc_dma_map_type map_type) +{ + struct xsc_sq_dma *dma = xsc_dma_get(sq, sq->dma_fifo_pc++); + + dma->addr = addr; + dma->size = size; + dma->type = map_type; + ETH_DEBUG_LOG("dma = %p, dma->addr = %#llx\n", dma, dma->addr); +} + +static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) +{ + switch (dma->type) { + case XSC_DMA_MAP_SINGLE: + dma_unmap_single(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + case XSC_DMA_MAP_PAGE: + dma_unmap_page(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + default: + ETH_DEBUG_LOG("%s\n", "xsc_tx_dma_unmap unknown DMA type!\n"); + } +} + +static void xsc_dma_unmap_wqe_err(struct xsc_sq *sq, u8 num_dma) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + int i; + + for (i = 0; i < num_dma; i++) { + struct xsc_sq_dma *last_pushed_dma = xsc_dma_get(sq, --sq->dma_fifo_pc); + + xsc_tx_dma_unmap(dev, last_pushed_dma); + } +} + +static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 mss, u16 ihs, u16 headlen, + u8 opcode, u16 ds_cnt, u32 num_bytes, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + int send_wqe_ds_num_log = ilog2(xdev->caps.send_ds_num); + + xsc_txwqe_build_cseg_csum(sq, skb, cseg); + + if (mss != 0) { + cseg->has_pph = 0; + cseg->so_type = 1; + cseg->so_hdr_len = ihs; + cseg->so_data_size = cpu_to_le16(mss); + } + + cseg->msg_opcode = opcode; + cseg->wqe_id = cpu_to_le16(sq->pc << send_wqe_ds_num_log); + cseg->ds_data_num = ds_cnt - XSC_SEND_WQEBB_CTRL_NUM_DS; + cseg->msg_len = cpu_to_le32(num_bytes); + + cseg->ce = 1; + + WQE_CSEG_DUMP("cseg", cseg); +} + +static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 ihs, u16 headlen, + struct xsc_wqe_data_seg *dseg) +{ + dma_addr_t dma_addr = 0; + u8 num_dma = 0; + int i; + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + if (headlen) { + dma_addr = dma_map_single(dev, skb->data, headlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(headlen); + + WQE_DSEG_DUMP("dseg-headlen", dseg); + + xsc_dma_push(sq, dma_addr, headlen, XSC_DMA_MAP_SINGLE); + num_dma++; + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(dev, frag, 0, fsz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(fsz); + + WQE_DSEG_DUMP("dseg-frag", dseg); + + xsc_dma_push(sq, dma_addr, fsz, XSC_DMA_MAP_PAGE); + num_dma++; + dseg++; + } + + return num_dma; + +dma_unmap_wqe_err: + xsc_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; +} + +static inline bool xsc_wqc_has_room_for(struct xsc_wq_cyc *wq, + u16 cc, u16 pc, u16 n) +{ + return (xsc_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); +} + +static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, + struct xsc_sq *sq) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct xsc_core_device *xdev = adapter->xdev; + union xsc_send_doorbell doorbell_value; + int send_ds_num_log = ilog2(xdev->caps.send_ds_num); + + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = pc << send_ds_num_log; + doorbell_value.qp_num = sq->sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + ETH_DEBUG_LOG("pc = %d sqn = %d\n", pc, sq->sqn); + ETH_DEBUG_LOG("doorbell_value = %#x\n", doorbell_value.send_data); + writel(doorbell_value.send_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, + struct xsc_tx_wqe_info *wi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + + wi->num_bytes = num_bytes; + wi->num_dma = num_dma; + wi->num_wqebbs = num_wqebbs; + wi->skb = skb; + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_sent_queue(sq->txq, num_bytes); + ETH_SQ_STATE(sq); +#endif + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + ETH_DEBUG_LOG("%s\n", "hw tstamp\n"); + } + + /*1*/ + sq->pc += wi->num_wqebbs; + ETH_DEBUG_LOG("%d\n", sq->pc); + + if (unlikely(!xsc_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { + netif_tx_stop_queue(sq->txq); + sq->stats->stopped++; + ETH_DEBUG_LOG("%p %d %d %d\n", wq, sq->cc, sq->pc, sq->stop_room); + } + + ETH_DEBUG_LOG("%d %d\n", xsc_netdev_xmit_more(skb), netif_xmit_stopped(sq->txq)); + + if (!xsc_netdev_xmit_more(skb) || netif_xmit_stopped(sq->txq)) + xsc_sq_notify_hw(wq, sq->pc, sq); +} + +static void xsc_dump_error_sqcqe(struct xsc_sq *sq, + struct xsc_cqe *cqe) +{ + u32 ci = xsc_cqwq_get_ci(&sq->cq.wq); + struct net_device *netdev = sq->channel->netdev; + + net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%x\n", + netdev->name, sq->cq.xcq.cqn, ci, + sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) +{ + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci, npkts = 0; + u32 nbytes = 0; + int i; + + while (sq->cc != sq->pc) { + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sq->cc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct xsc_sq_dma *dma = + xsc_dma_get(sq, sq->dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + + dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; + sq->cc += wi->num_wqebbs; + } + +#ifdef XSC_BQL_SUPPORT + netdev_tx_completed_queue(sq->txq, npkts, nbytes); +#endif +} + +#ifdef NEED_CREATE_RX_THREAD + DECLARE_PER_CPU(bool, txcqe_get); +#endif + +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) +{ + struct xsc_adapter *adapter; + struct device *dev; + struct xsc_sq_stats *stats; + struct xsc_sq *sq; + struct xsc_cqe *cqe; + u32 dma_fifo_cc; + u32 nbytes = 0; + u16 npkts = 0; + u16 sqcc; + int i = 0; + + sq = container_of(cq, struct xsc_sq, cq); + if (!test_bit(XSC_ETH_SQ_STATE_ENABLED, &sq->state)) + return false; + + adapter = sq->channel->adapter; + dev = adapter->dev; + + cqe = xsc_cqwq_get_cqe(&cq->wq); + if (!cqe) + goto out; + + stats = sq->stats; + + if (unlikely(get_cqe_opcode(cqe) & BIT(7))) { + xsc_dump_error_sqcqe(sq, cqe); + stats->cqe_err++; + return false; + } + +#ifdef NEED_CREATE_RX_THREAD + __this_cpu_write(txcqe_get, true); +#endif + + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + i = 0; + do { + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + int j; + u16 ci; + + xsc_cqwq_pop(&cq->wq); + + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + /*cqe may be overstanding in real test, not by nop in other*/ + if (unlikely(!skb)) { + stats->txdone_skb_null++; + continue; + } + + for (j = 0; j < wi->num_dma; j++) { + struct xsc_sq_dma *dma = xsc_dma_get(sq, dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + +#ifndef NEED_CREATE_RX_THREAD + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + napi_consume_skb(skb, napi_budget); +#else + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + if (refcount_read(&skb->users) < 1) + stats->txdone_skb_refcnt_err++; + napi_consume_skb(skb, 0); +#endif + ETH_DEBUG_LOG("ci=%d, sqcc=%d, pkts=%d\n", ci, sqcc, npkts); + + } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); + + stats->cqes += i; + + xsc_cq_notify_hw(cq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + ETH_DEBUG_LOG("dma_fifo_cc=%d, sqcc=%d\n", dma_fifo_cc, sqcc); + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + ETH_SQ_STATE(sq); +#endif + + if (netif_tx_queue_stopped(sq->txq) && + xsc_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room)) { + netif_tx_wake_queue(sq->txq); + stats->wake++; + } + +out: + return (i == napi_budget); +} + +static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, + struct xsc_sq *sq, + struct xsc_tx_wqe *wqe, + u16 pi) +{ + struct xsc_send_wqe_ctrl_seg *cseg; + struct xsc_wqe_data_seg *dseg; + struct xsc_tx_wqe_info *wi; + struct xsc_sq_stats *stats = sq->stats; + struct xsc_core_device *xdev = sq->cq.xdev; + u16 ds_cnt; + u16 mss, ihs, headlen; + u8 opcode; + u32 num_bytes, num_dma; + u8 num_wqebbs; + +retry_send: + /* Calc ihs and ds cnt, no writes to wqe yet */ + /*ctrl-ds, it would be reduce in ds_data_num*/ + ds_cnt = XSC_SEND_WQEBB_CTRL_NUM_DS; + + /*in andes inline is bonding with gso*/ + if (skb_is_gso(skb)) { + opcode = XSC_OPCODE_RAW; + mss = skb_shinfo(skb)->gso_size; + ihs = xsc_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { + opcode = XSC_OPCODE_RAW; + mss = 0; + ihs = 0; + num_bytes = skb->len; + stats->packets++; + } + + /*linear data in skb*/ + headlen = skb->len - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + ETH_DEBUG_LOG("skb_len=%d data_len=%d nr_frags=%d mss=%d ihs=%d headlen=%d ds_cnt=%d\n", + skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, + mss, ihs, headlen, ds_cnt); + + /*to make the connection, only linear data is present*/ + skbdata_debug_dump(skb, headlen, 1); + + /* Check packet size. */ + if (unlikely(mss == 0 && num_bytes > sq->hw_mtu)) { + sq->stats->oversize_pkts_sw_drop++; + goto err_drop; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, xdev->caps.send_ds_num); + /*if ds_cnt exceed one wqe, drop it*/ + if (num_wqebbs != 1) { + sq->stats->skb_linear++; + if (skb_linearize(skb)) + goto err_drop; + goto retry_send; + } + + /* fill wqe */ + wi = (struct xsc_tx_wqe_info *)&sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + dseg = &wqe->data[0]; + + if (unlikely(num_bytes == 0)) + goto err_drop; + + xsc_txwqe_build_csegs(sq, skb, mss, ihs, headlen, + opcode, ds_cnt, num_bytes, cseg); + + /*inline header is also use dma to transport*/ + num_dma = xsc_txwqe_build_dsegs(sq, skb, ihs, headlen, dseg); + if (unlikely(num_dma < 0)) + goto err_drop; + + xsc_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi); + + stats->bytes += num_bytes; + stats->xmit_more += xsc_netdev_xmit_more(skb); + + sq->dim_obj.sample.pkt_ctr = sq->stats->packets; + sq->dim_obj.sample.byte_ctr = sq->stats->bytes; + + return NETDEV_TX_OK; + +err_drop: + ETH_DEBUG_LOG("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", + __func__, ds_cnt, num_wqebbs, num_dma); + stats->dropped++; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) +{ + u32 ret; + u32 queue_id; + struct xsc_sq *sq; + struct xsc_tx_wqe *wqe; + u16 pi; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + + if (!skb) { + ETH_DEBUG_LOG("skb == NULL\n"); + return NETDEV_TX_OK; + } + + if (!adapter) { + ETH_DEBUG_LOG("adapter == NULL\n"); + return NETDEV_TX_BUSY; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ETH_DEBUG_LOG("adapter->status = %d\n", adapter->status); + return NETDEV_TX_BUSY; + } + + queue_id = skb_get_queue_mapping(skb); + ETH_DEBUG_LOG("queue_id = %d\n", queue_id); + assert(adapter->xdev, queue_id < XSC_ETH_MAX_TC_TOTAL); + + sq = adapter->txq2sq[queue_id]; + if (!sq) { + ETH_DEBUG_LOG("sq = NULL\n"); + return NETDEV_TX_BUSY; + } + ETH_DEBUG_LOG("sqn = %d\n", sq->sqn); + + wqe = xsc_sq_fetch_wqe(sq, xdev->caps.send_ds_num * XSC_SEND_WQE_DS, &pi); + ETH_DEBUG_LOG("wqe = %p pi = %d\n", wqe, pi); + assert(adapter->xdev, wqe); + +#ifndef ANDES_DRIVER + skb = xsc_accel_handle_tx(skb); +#endif + + ret = xsc_eth_xmit_frame(skb, sq, wqe, pi); + + ETH_DEBUG_LOG("ret = %d\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..13699c6dd0dc99ce015e787fc8d6855b3c7551cf --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_dim.h" + +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) +{ + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + db.arm = 0; + + /* ensure doorbell record is visible to device before ringing the doorbell */ + wmb(); + writel(db.val, REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->arm++; +} + +void xsc_cq_notify_hw(struct xsc_cq *cq) +{ + struct xsc_core_device *xdev = cq->xdev; + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + dma_wmb(); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + + writel(db.val, REG_ADDR(xdev, xdev->regs.complete_reg)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->noarm++; +} + +static inline bool xsc_channel_no_affinity_change(struct xsc_channel *c) +{ + int current_cpu = smp_processor_id(); + + return cpumask_test_cpu(current_cpu, c->aff_mask); +} + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer) +{ + struct xsc_dim_reduce_work *reduce = (struct xsc_dim_reduce_work *)timer; + struct xsc_cq *cq = container_of(reduce, struct xsc_cq, cq_reduce); + + xsc_cq_notify_hw_rearm(cq); + + return HRTIMER_NORESTART; +} + +int xsc_eth_napi_poll(struct napi_struct *napi, int budget) +{ + struct xsc_channel *c = container_of(napi, struct xsc_channel, napi); + struct xsc_eth_params *params = &c->adapter->nic_param; + struct xsc_rq *rq = &c->qp.rq[0]; + struct xsc_sq *sq = NULL; + bool busy = false; + int work_done = 0; + int tx_budget = 0; + int i; + + rcu_read_lock(); + + clear_bit(XSC_CHANNEL_NAPI_SCHED, &c->flags); + + tx_budget = params->sq_size >> 2; + for (i = 0; i < c->num_tc; i++) + busy |= xsc_poll_tx_cq(&c->qp.sq[i].cq, tx_budget); + + /* budget=0 means: don't poll rx rings */ + if (likely(budget)) { + work_done = xsc_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + } + + busy |= rq->post_wqes(rq); + + if (busy) { + if (likely(xsc_channel_no_affinity_change(c))) { + rcu_read_unlock(); + return budget; + } + c->stats->aff_change++; + if (budget && work_done == budget) + work_done--; + } + +#ifdef NETDEV_NAPI_COMP_DONE_RETURN_VOID + napi_complete_done(napi, work_done); +#else + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; +#endif + + for (i = 0; i < c->num_tc; i++) { + sq = &c->qp.sq[i]; + + if (test_bit(XSC_ETH_SQ_STATE_AM, &sq->state)) { + struct xsc_dim_reduce_work *reduce_sq = NULL; + u32 dim_us_tx = params->tx_cq_moderation.usec; + + xsc_handle_tx_dim(sq); + + reduce_sq = &sq->cq.cq_reduce; + if (hrtimer_is_queued(&reduce_sq->timer)) + continue; + + dim_us_tx = min_t(u32, sq->cq.xcq.dim_us, dim_us_tx); + sq->stats->dim_us = dim_us_tx; + if (dim_us_tx) { + hrtimer_start(&reduce_sq->timer, + ns_to_ktime(dim_us_tx * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + continue; + } + } + xsc_cq_notify_hw_rearm(&sq->cq); + } + + if (test_bit(XSC_ETH_RQ_STATE_AM, &rq->state)) { + struct xsc_dim_reduce_work *reduce = &rq->cq.cq_reduce; + u32 dim_us = params->rx_cq_moderation.usec; + + xsc_handle_rx_dim(rq); + + if (c->stats->poll <= params->rx_dim_frames_low) { + dim_us = 0; + if (c->stats->poll == 0 && hrtimer_is_queued(&reduce->timer)) + goto out; + } else { + dim_us = min_t(u32, rq->cq.xcq.dim_us, dim_us); + } + rq->stats->dim_us = dim_us; + + if (dim_us) { + if (hrtimer_is_queued(&reduce->timer)) + goto out; + + reduce->dim_us = dim_us; + + if (dim_us <= params->rx_dim_usecs_low) { + udelay(dim_us); + xsc_cq_notify_hw_rearm(&rq->cq); + } else { + hrtimer_start(&reduce->timer, + ns_to_ktime(dim_us * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + } + goto out; + } + } + + xsc_cq_notify_hw_rearm(&rq->cq); + +#ifndef NETDEV_NAPI_COMP_DONE_RETURN_VOID +out: +#endif + rcu_read_unlock(); + return work_done; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..005f1ae4a55a38b880604941475d6d525e818dba --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RXTX_H +#define XSC_RXTX_H + +#include "xsc_eth.h" +#include "common/qp.h" +#include "xsc_eth_debug.h" + +enum { + XSC_ETH_WQE_NONE_CSUM, + XSC_ETH_WQE_INNER_CSUM, + XSC_ETH_WQE_OUTER_CSUM, + XSC_ETH_WQE_INNER_AND_OUTER_CSUM, +}; + +#define ANDES_DRIVER + +static inline u32 xsc_cqwq_get_size(struct xsc_cqwq *wq) +{ + return wq->fbc.sz_m1 + 1; +} + +static inline struct xsc_cqe *xsc_cqwq_get_wqe(struct xsc_cqwq *wq, u32 ix) +{ + struct xsc_cqe *cqe = xsc_frag_buf_get_wqe(&wq->fbc, ix); + + ETH_DEBUG_LOG("cqe = %p\n", cqe); + + return cqe; +} + +static inline struct xsc_cqe *xsc_cqwq_get_cqe(struct xsc_cqwq *wq) +{ + struct xsc_cqe *cqe; + u8 cqe_ownership_bit; + u8 sw_ownership_val; + u32 ci = xsc_cqwq_get_ci(wq); + + cqe = xsc_cqwq_get_wqe(wq, ci); + + cqe_ownership_bit = cqe->owner & XSC_CQE_OWNER_MASK; + sw_ownership_val = xsc_cqwq_get_wrap_cnt(wq) & 1; + ETH_DEBUG_LOG("ci=%d, cqe_owner=%d, sw_owner=%d\n", + ci, cqe_ownership_bit, sw_ownership_val); + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + /* ensure cqe content is read after cqe ownership bit */ + dma_rmb(); + + return cqe; +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq); +int xsc_eth_napi_poll(struct napi_struct *napi, int budget); +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget); +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget); +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe); +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); +void xsc_cq_notify_hw(struct xsc_cq *cq); +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev); + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, + bool recycle); + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c new file mode 100644 index 0000000000000000000000000000000000000000..7379574f1a7e3f032fc60b44cce3f52eedb34bc8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth.h" +#include "common/vport.h" +#include "common/xsc_fs.h" + +static int xsc_vport_context_update_vlans(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, + u16 vid, bool add) +{ + struct net_device *ndev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + int err; + + err = xsc_modify_nic_vport_vlans(xdev, vid, add); + if (err) + netdev_err(ndev, "Failed to modify vport vid:%d rule_type:%d err:%d\n", + vid, rule_type, err); + return err; +} + +static int xsc_add_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + return xsc_vport_context_update_vlans(adapter, rule_type, vid, true); +} + +static void xsc_del_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + xsc_vport_context_update_vlans(adapter, rule_type, vid, false); +} + +static int xsc_vlan_rx_add_cvid(struct xsc_adapter *adapter, u16 vid) +{ + int err; + + set_bit(vid, adapter->fs.vlan.active_cvlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, adapter->vlan_params.active_cvlans); + + return err; +} + +static int xsc_vlan_rx_add_svid(struct xsc_adapter *adapter, u16 vid) +{ + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(vid, adapter->fs.vlan.active_svlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int xsc_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) + return xsc_vlan_rx_add_cvid(adapter, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return xsc_vlan_rx_add_svid(adapter, vid); + + return -EOPNOTSUPP; +} + +int xsc_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, adapter->fs.vlan.active_cvlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } + + return 0; +} + +void xsc_set_rx_mode_work(struct work_struct *work) +{ + int err = 0; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, + set_rx_mode_work); + struct net_device *dev = adapter->netdev; + struct xsc_l2_table *l2 = &adapter->fs.l2; + + bool rx_mode_enable = (adapter->status == XSCALE_ETH_DRIVER_OK); + bool promisc_enabled = rx_mode_enable && (dev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (dev->flags & IFF_ALLMULTI); + + bool enable_promisc = !l2->promisc_enabled && promisc_enabled; + bool disable_promisc = l2->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !l2->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = l2->allmulti_enabled && !allmulti_enabled; + bool change = enable_promisc | disable_promisc | enable_allmulti | disable_allmulti; + + if (change) + err = xsc_modify_nic_vport_promisc(adapter->xdev, + (enable_allmulti | disable_allmulti), + (enable_promisc | disable_promisc), + allmulti_enabled, promisc_enabled); + if (err) { + xsc_core_err(adapter->xdev, "failed to set rx mode, err = %d\n", err); + + return; + } + + l2->promisc_enabled = promisc_enabled; + l2->allmulti_enabled = allmulti_enabled; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c new file mode 100644 index 0000000000000000000000000000000000000000..32eb74563e4be442ef9e2956118250c9b39e8406 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "common/xsc_cmd.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" + +static void precmd_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void postcmd_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_dcbx_hw_qos_cmdq(struct xsc_core_device *xdev, u16 opcode, + void *inupt, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + int err; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + if (inupt) + memcpy(&in->data, inupt, expect_req_size); + + in->hdr.opcode = __cpu_to_be16(opcode); + in->req_prfx.mac_port = xdev->mac_port; + + if (precmdq) + precmdq((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + if (postcmdq) + postcmdq((void *)out->data); + + if (output) + memcpy(output, out->data, expect_resp_size); + + kvfree(in); + kvfree(out); + return 0; + +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int xsc_dcbx_hw_common(struct xsc_core_device *xdev, u16 opcode, + void *input, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + int ret; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)input; + hdr->opcode = __cpu_to_be16(opcode); + + ret = xsc_cmd_exec(xdev, (void *)input, expect_req_size, + (void *)output, expect_resp_size); + + return ret; +} + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp) +{ + switch (opcode) { + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, postcmd_rlimit_get); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_set), + 0, precmd_rlimit_set, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_pfc_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, + sizeof(struct xsc_trust_mode_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_trust_mode_set), 0, + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_dscp_pmt_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_dscp_pmt_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_sp_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_sp_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_weight_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_weight_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + sizeof(struct xsc_pfc_prio_stats_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_GET_LLDP_STATUS: + case XSC_CMD_OP_SET_LLDP_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_lldp_status_mbox_in), + sizeof(struct xsc_lldp_status_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set_drop_th_mbox_in), + sizeof(struct xsc_pfc_set_drop_th_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_get_cfg_status_mbox_in), + sizeof(struct xsc_pfc_get_cfg_status_mbox_out), + NULL, NULL); + fallthrough; + default: + xsc_core_dbg(xdev, "unknown type=%d\n", opcode); + } + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..a9043f85fa057cd159e9877da8e861c985c3e5c7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HW_COMMON_H +#define XSC_HW_COMMON_H + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf05a26c7407b9cadfe3dc2b87fd0761a06ffff --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QUEUE_H +#define XSC_QUEUE_H + +#include +#include + +#include + +#include "../pci/wq.h" + +enum { + XSC_SEND_WQE_DS = 16, + XSC_SEND_WQE_BB = 64, +}; + +enum { + XSC_RECV_WQE_DS = 16, + XSC_RECV_WQE_BB = 16, +}; + +#define XSC_SEND_WQEBB_NUM_DS (XSC_SEND_WQE_BB / XSC_SEND_WQE_DS) +#define XSC_LOG_SEND_WQEBB_NUM_DS ilog2(XSC_SEND_WQEBB_NUM_DS) + +#define XSC_RECV_WQEBB_NUM_DS (XSC_RECV_WQE_BB / XSC_RECV_WQE_DS) +#define XSC_LOG_RECV_WQEBB_NUM_DS ilog2(XSC_RECV_WQEBB_NUM_DS) + +#define XSC_SEND_WQEBB_CTRL_NUM_DS 1 + +enum { + XSC_ETH_RQ_STATE_ENABLED, + XSC_ETH_RQ_STATE_AM, + XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, +}; + +enum { + XSC_ETH_SQ_STATE_ENABLED, + XSC_ETH_SQ_STATE_AM, +}; + +struct xsc_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct xsc_wqe_frag_info { + struct xsc_dma_info *di; + u32 offset; + u8 last_in_page; + u8 is_available; +}; + +struct xsc_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct xsc_rq_frags_info { + struct xsc_rq_frag_info arr[XSC_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; + u8 wqe_bulk_min; + u8 frags_max_num; +}; + +#define xsc_dim_t struct dim +#define xsc_dim_sample_t struct dim_sample +#define xsc_dim_cq_moder_t struct dim_cq_moder + +struct xsc_dim { + xsc_dim_t dim; + xsc_dim_sample_t sample; +}; + +struct xsc_dim_reduce_work { + struct hrtimer timer; + u32 dim_us; +}; + +struct xsc_cq { + /* data path - accessed per cqe */ + struct xsc_cqwq wq; + + /* data path - accessed per napi poll */ + u16 event_ctr; + struct napi_struct *napi; + struct xsc_core_cq xcq; + struct xsc_channel *channel; + + /* control */ + struct xsc_core_device *xdev; + struct xsc_wq_ctrl wq_ctrl; + u8 rx; + struct xsc_dim_reduce_work cq_reduce; +} ____cacheline_aligned_in_smp; + +struct xsc_pcie_lat_work { + struct xsc_core_device *xdev; + struct xsc_adapter *adapter; + struct delayed_work work; + u16 enable; + u32 period; +}; + +#define XSC_PAGE_CACHE_LOG_MAX_RQ_MULT 6 +#define XSC_PAGE_CACHE_REDUCE_WORK_INTERVAL 200 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_GRACE_PERIOD 1000 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT 4 + +struct xsc_page_cache_reduce { + struct delayed_work reduce_work; + u32 success; + unsigned long next_ts; + unsigned long grace_period; + unsigned long delay; + struct xsc_dma_info *pending; + u32 npages; +}; + +struct xsc_page_cache { + struct xsc_dma_info *page_cache; + u32 head; + u32 tail; + u32 sz; + u32 resv; +}; + +struct xsc_rq; +struct xsc_cqe; +typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, + struct xsc_cqe *cqe); +typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); +typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); +typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); + +struct xsc_rq { + struct xsc_core_qp cqp; + struct { + struct xsc_wq_cyc wq; + struct xsc_wqe_frag_info *frags; + struct xsc_dma_info *di; + struct xsc_rq_frags_info info; + xsc_fp_skb_from_cqe skb_from_cqe; + } wqe; + + struct { + u16 headroom; + u8 map_dir; /* dma map direction */ + } buff; + + struct page_pool *page_pool; + struct xsc_wq_ctrl wq_ctrl; + struct xsc_cq cq; + u32 rqn; + int ix; + + unsigned long state; + struct work_struct recover_work; + struct xsc_rq_stats *stats; + struct xsc_dim dim_obj; + + u32 hw_mtu; + u32 frags_sz; + + xsc_fp_handle_rx_cqe handle_rx_cqe; + xsc_fp_post_rx_wqes post_wqes; + xsc_fp_dealloc_wqe dealloc_wqe; + struct xsc_page_cache page_cache; +} ____cacheline_aligned_in_smp; + +struct xsc_tx_wqe_info { + struct sk_buff *skb; + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +enum xsc_dma_map_type { + XSC_DMA_MAP_SINGLE, + XSC_DMA_MAP_PAGE +}; + +struct xsc_sq_dma { + dma_addr_t addr; + u32 size; + enum xsc_dma_map_type type; +}; + +struct xsc_sq { + struct xsc_core_qp cqp; + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + struct xsc_dim dim_obj; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + + struct xsc_cq cq; + + /* read only */ + struct xsc_wq_cyc wq; + u32 dma_fifo_mask; + struct xsc_sq_stats *stats; + struct { + struct xsc_sq_dma *dma_fifo; + struct xsc_tx_wqe_info *wqe_info; + } db; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u16 stop_room; + + __be32 mkey_be; + unsigned long state; + unsigned int hw_mtu; + + /* control path */ + struct xsc_wq_ctrl wq_ctrl; + struct xsc_channel *channel; + int ch_ix; + int txq_ix; + struct work_struct recover_work; +} ____cacheline_aligned_in_smp; + +struct rdma_opcode_data { + u32 immdt_value; +} __packed __aligned(4); + +struct raw_opcode_data { + u16 has_pph : 1; + u16 so_type : 1; + u16 so_data_size : 14; + u8 rsv; + u8 so_hdr_len; +} __packed __aligned(4); + +struct rawtype_opcode_data { + u16 desc_id; + u16 is_last_wqe : 1; + u16 dst_qp_id : 15; +} __packed __aligned(4); + +struct xsc_wqe_ctrl_seg { + u8 msg_opcode; + u8 with_immdt : 1; + u8 csum_en : 2; + u8 ds_data_num : 5; + u16 wqe_id; + u32 msg_len; + union { + struct rdma_opcode_data _rdma_opcode_data; + struct raw_opcode_data _raw_opcode_data; + struct rawtype_opcode_data _rawtype_opcode_data; + } opcode_data; + u32 se : 1; + u32 ce : 1; + u32 rsv : 30; +}; + +static inline u8 get_cqe_opcode(struct xsc_cqe *cqe) +{ + return cqe->msg_opcode; +} + +static inline void xsc_dump_err_cqe(struct xsc_core_device *dev, + struct xsc_cqe *cqe) +{ + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, cqe, + sizeof(*cqe), false); +} + +#endif /* XSC_QUEUE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..fafa69b8a478699e4ea9e06d4880e3986467bc6a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon PCI configuration +# + +config YUNSILICON_XSC_PCI + tristate "Yunsilicon XSC PCI driver" + default n + select NET_DEVLINK + select PAGE_POOL + help + This driver is common for Yunsilicon XSC + ethernet and RDMA drivers. + + To compile this driver as a module, choose M here. The module + will be called xsc_pci. diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..45a7d473cac795fc063c5c39df596772455031dd --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o + +xsc_pci-y := main.o eq.o intf.o debugfs.o alloc.o wq.o cq.o qp.o \ + cmd2.o fw.o port.o mr.o pd.o xsc_lag.o xsc_pci_ctrl.o\ + pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o res_obj.o qpts.o\ + fw/cmd.o \ + fw/xsc_flow.o \ + fw/xsc_res.o \ + fw/osdep.o \ + fw/xsc_mem.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c new file mode 100644 index 0000000000000000000000000000000000000000..cdef1b996fdfb13b7ac07b2d62fa61f18df96f0a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&xdev->pdev->dev, + size, &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_alloc_coherent(&xdev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + + pages = kmalloc_array(buf->nbufs, sizeof(*pages), GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) { + if (is_vmalloc_addr(buf->page_list[i].buf)) + pages[i] = vmalloc_to_page(buf->page_list[i].buf); + else + pages[i] = virt_to_page(buf->page_list[i].buf); + } + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + xsc_buf_free(xdev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_buf_alloc); + +void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf) +{ + int i; + + if (buf->nbufs == 1) { + dma_free_coherent(&xdev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + } else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(xsc_buf_free); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages) +{ + u64 addr; + int i; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << PAGE_SHIFT_4K); + else + addr = buf->page_list[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_array); + +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages) +{ + int i; + dma_addr_t addr; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + addr = buf->frags[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_frag_array); + +static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct device *device = &xdev->pdev->dev; + int original_node; + void *cpu_handle; + + /* WA for kernels that don't use numa_mem_id in alloc_pages_node */ + if (node == NUMA_NO_NODE) +#ifdef HAVE_NUMA_MEM_ID + node = numa_mem_id(); +#else + node = first_memory_node; +#endif + + mutex_lock(&dev_res->alloc_mutex); + original_node = dev_to_node(device); + set_dev_node(device, node); + cpu_handle = dma_alloc_coherent(device, size, dma_handle, + GFP_KERNEL); + set_dev_node(device, original_node); + mutex_unlock(&dev_res->alloc_mutex); + return cpu_handle; +} + +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node) +{ + int i; + + buf->size = size; + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); + buf->page_shift = PAGE_SHIFT; + buf->frags = kcalloc(buf->npages, sizeof(struct xsc_buf_list), + GFP_KERNEL); + if (!buf->frags) + goto err_out; + + for (i = 0; i < buf->npages; i++) { + struct xsc_buf_list *frag = &buf->frags[i]; + int frag_sz = min_t(int, size, PAGE_SIZE); + + frag->buf = xsc_dma_zalloc_coherent_node(xdev, frag_sz, + &frag->map, node); + if (!frag->buf) + goto err_free_buf; + if (frag->map & ((1 << buf->page_shift) - 1)) { + dma_free_coherent(&xdev->pdev->dev, frag_sz, + buf->frags[i].buf, buf->frags[i].map); + xsc_core_warn(xdev, "unexpected map alignment: %pad, page_shift=%d\n", + &frag->map, buf->page_shift); + goto err_free_buf; + } + size -= frag_sz; + } + + return 0; + +err_free_buf: + while (i--) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, + buf->frags[i].map); + kfree(buf->frags); +err_out: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_alloc_node); + +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf) +{ + int size = buf->size; + int i; + + for (i = 0; i < buf->npages; i++) { + int frag_sz = min_t(int, size, PAGE_SIZE); + + dma_free_coherent(&xdev->pdev->dev, frag_sz, buf->frags[i].buf, + buf->frags[i].map); + size -= frag_sz; + } + kfree(buf->frags); +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_free); + +static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, + int node) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + struct xsc_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); + if (!pgdir->bitmap) { + kfree(pgdir); + return NULL; + } + + bitmap_fill(pgdir->bitmap, db_per_page); + + pgdir->db_page = xsc_dma_zalloc_coherent_node(xdev, PAGE_SIZE, + &pgdir->db_dma, node); + if (!pgdir->db_page) { + bitmap_free(pgdir->bitmap); + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int xsc_alloc_db_from_pgdir(struct xsc_db_pgdir *pgdir, + struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, db_per_page); + if (i >= db_per_page) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * cache_line_size(); + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + db->db[0] = 0; + db->db[1] = 0; + + return 0; +} + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node) +{ + struct xsc_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + list_for_each_entry(pgdir, &xdev->dev_res->pgdir_list, list) + if (!xsc_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = xsc_alloc_db_pgdir(xdev, node); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &xdev->dev_res->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(xsc_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&xdev->dev_res->pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_db_alloc_node); + +int xsc_db_alloc(struct xsc_core_device *xdev, struct xsc_db *db) +{ + return xsc_db_alloc_node(xdev, db, xdev->priv.numa_node); +} +EXPORT_SYMBOL_GPL(xsc_db_alloc); + +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + bitmap_free(db->u.pgdir->bitmap); + kfree(db->u.pgdir); + } + + mutex_unlock(&xdev->dev_res->pgdir_mutex); +} +EXPORT_SYMBOL_GPL(xsc_db_free); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c new file mode 100644 index 0000000000000000000000000000000000000000..9f7966169b182a08a5de7e292bdbf18f34e0a303 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c @@ -0,0 +1,2148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" +#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "tmp_cmdq_defines.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + XSC_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + XSC_CMD_DATA_BLOCK_SIZE, +}; + +enum { + XSC_CMD_DELIVERY_STAT_OK = 0x0, + XSC_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + XSC_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + XSC_CMD_DELIVERY_STAT_FW_ERR = 0x6, + XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +static struct xsc_cmd_work_ent *alloc_cmd(struct xsc_cmd *cmd, + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out) +{ + struct xsc_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->cmd = cmd; + + return ent; +} + +static u8 alloc_token(struct xsc_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct xsc_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct xsc_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct xsc_cmd_layout *get_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static struct xsc_rsp_layout *get_cq_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cq_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct xsc_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct xsc_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); +} + +static void calc_chain_sig(struct xsc_cmd_mailbox *head, u8 token) +{ + struct xsc_cmd_mailbox *next = head; + + while (next) { + calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void set_signature(struct xsc_cmd_work_ent *ent) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in->next, ent->token); + calc_chain_sig(ent->out->next, ent->token); +} + +static void free_cmd(struct xsc_cmd_work_ent *ent) +{ + kfree(ent); +} + +static int verify_signature(struct xsc_cmd_work_ent *ent) +{ + struct xsc_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->rsp_lay, sizeof(*ent->rsp_lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + xsc_pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + xsc_pr_debug("\n"); +} + +const char *xsc_command_str(int command) +{ + switch (command) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case XSC_CMD_OP_ENABLE_HCA: + return "ENABLE_HCA"; + + case XSC_CMD_OP_DISABLE_HCA: + return "DISABLE_HCA"; + + case XSC_CMD_OP_MODIFY_HCA: + return "MODIFY_HCA"; + + case XSC_CMD_OP_QUERY_CMDQ_VERSION: + return "QUERY_CMDQ_VERSION"; + + case XSC_CMD_OP_QUERY_MSIX_TBL_INFO: + return "QUERY_MSIX_TBL_INFO"; + + case XSC_CMD_OP_FUNCTION_RESET: + return "FUNCTION_RESET"; + + case XSC_CMD_OP_ALLOC_IA_LOCK: + return "ALLOC_IA_LOCK"; + + case XSC_CMD_OP_RELEASE_IA_LOCK: + return "RELEASE_IA_LOCK"; + + case XSC_CMD_OP_DUMMY: + return "DUMMY_CMD"; + + case XSC_CMD_OP_SET_DEBUG_INFO: + return "SET_DEBUG_INFO"; + + case XSC_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case XSC_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case XSC_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case XSC_CMD_OP_SET_MPT: + return "SET_MPT"; + + case XSC_CMD_OP_SET_MTT: + return "SET_MTT"; + + case XSC_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case XSC_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case XSC_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case XSC_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case XSC_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case XSC_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case XSC_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case XSC_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case XSC_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case XSC_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case XSC_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case XSC_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case XSC_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case XSC_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case XSC_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case XSC_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case XSC_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case XSC_CMD_OP_2RST_QP: + return "2RST_QP"; + + case XSC_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case XSC_CMD_OP_CONF_SQP: + return "CONF_SQP"; + + case XSC_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case XSC_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case XSC_CMD_OP_SQD2SQD_QP: + return "SQD2SQD_QP"; + + case XSC_CMD_OP_QUERY_QP_FLUSH_STATUS: + return "QUERY_QP_FLUSH_STATUS"; + + case XSC_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case XSC_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case XSC_CMD_OP_ACCESS_REG: + return "ACCESS_REG"; + + case XSC_CMD_OP_MODIFY_RAW_QP: + return "MODIFY_RAW_QP"; + + case XSC_CMD_OP_ENABLE_NIC_HCA: + return "ENABLE_NIC_HCA"; + + case XSC_CMD_OP_DISABLE_NIC_HCA: + return "DISABLE_NIC_HCA"; + + case XSC_CMD_OP_MODIFY_NIC_HCA: + return "MODIFY_NIC_HCA"; + + case XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + return "QUERY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + return "MODIFY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_VPORT_STATE: + return "QUERY_VPORT_STATE"; + + case XSC_CMD_OP_MODIFY_VPORT_STATE: + return "MODIFY_VPORT_STATE"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + return "QUERY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + return "MODIFY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_GID: + return "QUERY_HCA_VPORT_GID"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_PKEY: + return "QUERY_HCA_VPORT_PKEY"; + + case XSC_CMD_OP_QUERY_VPORT_COUNTER: + return "QUERY_VPORT_COUNTER"; + + case XSC_CMD_OP_QUERY_PRIO_STATS: + return "QUERY_PRIO_STATS"; + + case XSC_CMD_OP_QUERY_PHYPORT_STATE: + return "QUERY_PHYPORT_STATE"; + + case XSC_CMD_OP_QUERY_EVENT_TYPE: + return "QUERY_EVENT_TYPE"; + + case XSC_CMD_OP_QUERY_LINK_INFO: + return "QUERY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_LINK_INFO: + return "MODIFY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_FEC_PARAM: + return "MODIFY_FEC_PARAM"; + + case XSC_CMD_OP_QUERY_FEC_PARAM: + return "QUERY_FEC_PARAM"; + + case XSC_CMD_OP_LAG_CREATE: + return "LAG_CREATE"; + + case XSC_CMD_OP_LAG_ADD_MEMBER: + return "LAG ADD MEMBER"; + + case XSC_CMD_OP_LAG_REMOVE_MEMBER: + return "LAG REMOVE MEMBER"; + + case XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS: + return "LAG UPDATE MEMBER STATUS"; + + case XSC_CMD_OP_LAG_UPDATE_HASH_TYPE: + return "LAG UPDATE HASH TYPE"; + + case XSC_CMD_OP_LAG_DESTROY: + return "LAG_DESTROY"; + + case XSC_CMD_OP_LAG_SET_QOS: + return "LAG_SET_QOS"; + + case XSC_CMD_OP_ENABLE_MSIX: + return "ENABLE_MSIX"; + + case XSC_CMD_OP_IOCTL_FLOW: + return "CFG_FLOW_TABLE"; + + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return "SET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return "GET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return "SET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return "GET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return "SET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return "GET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return "SET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return "GET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_SET_PFC: + return "SET_PFC"; + + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return "SET_PFC_DROP_TH"; + + case XSC_CMD_OP_IOCTL_GET_PFC: + return "GET_PFC"; + + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return "GET_PFC_CFG_STATUS"; + + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return "SET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return "GET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_SET_SP: + return "SET_SP"; + + case XSC_CMD_OP_IOCTL_GET_SP: + return "GET_SP"; + + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return "SET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return "GET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return "DPU_SET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return "DPU_GET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return "DPU_SET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return "DPU_GET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return "SET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return "GET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return "SET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return "GET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return "ENABLE_RP"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return "ENABLE_NP"; + + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return "SET_INIT_ALPHA"; + + case XSC_CMD_OP_IOCTL_SET_G: + return "SET_G"; + + case XSC_CMD_OP_IOCTL_SET_AI: + return "SET_AI"; + + case XSC_CMD_OP_IOCTL_SET_HAI: + return "SET_HAI"; + + case XSC_CMD_OP_IOCTL_SET_TH: + return "SET_TH"; + + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return "SET_BC_TH"; + + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return "SET_CNP_OPCODE"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return "SET_CNP_BTH_B"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return "SET_CNP_BTH_F"; + + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return "SET_CNP_ECN"; + + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return "SET_DATA_ECN"; + + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return "SET_CNP_TX_INTERVAL"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return "SET_EVT_PERIOD_RSTTIME"; + + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return "SET_CNP_DSCP"; + + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return "SET_CNP_PCP"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return "SET_EVT_PERIOD_ALPHA"; + + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return "GET_CC_CFG"; + + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return "GET_CC_STAT"; + + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return "SET_CLAMP_TGT_RATE"; + + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return "SET_MAX_HAI_FACTOR"; + + case XSC_CMD_OP_IOCTL_SET_HWC: + return "SET_HWCONFIG"; + + case XSC_CMD_OP_IOCTL_GET_HWC: + return "GET_HWCONFIG"; + + case XSC_CMD_OP_SET_MTU: + return "SET_MTU"; + + case XSC_CMD_OP_QUERY_ETH_MAC: + return "QUERY_ETH_MAC"; + + case XSC_CMD_OP_QUERY_HW_STATS: + return "QUERY_HW_STATS"; + + case XSC_CMD_OP_QUERY_PAUSE_CNT: + return "QUERY_PAUSE_CNT"; + + case XSC_CMD_OP_SET_RTT_EN: + return "SET_RTT_EN"; + + case XSC_CMD_OP_GET_RTT_EN: + return "GET_RTT_EN"; + + case XSC_CMD_OP_SET_RTT_QPN: + return "SET_RTT_QPN"; + + case XSC_CMD_OP_GET_RTT_QPN: + return "GET_RTT_QPN"; + + case XSC_CMD_OP_SET_RTT_PERIOD: + return "SET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_PERIOD: + return "GET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_RESULT: + return "GET_RTT_RESULT"; + + case XSC_CMD_OP_GET_RTT_STATS: + return "ET_RTT_STATS"; + + case XSC_CMD_OP_SET_LED_STATUS: + return "SET_LED_STATUS"; + + case XSC_CMD_OP_AP_FEAT: + return "AP_FEAT"; + + case XSC_CMD_OP_PCIE_LAT_FEAT: + return "PCIE_LAT_FEAT"; + + case XSC_CMD_OP_USER_EMU_CMD: + return "USER_EMU_CMD"; + + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return "QUERY_PFC_PRIO_STATS"; + + case XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS: + return "QUERY_PFC_STALL_STATS"; + + case XSC_CMD_OP_QUERY_HW_STATS_RDMA: + return "QUERY_HW_STATS_RDMA"; + + case XSC_CMD_OP_QUERY_HW_STATS_ETH: + return "QUERY_HW_STATS_ETH"; + + case XSC_CMD_OP_SET_VPORT_RATE_LIMIT: + return "SET_VPORT_RATE_LIMIT"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct xsc_core_device *xdev, struct xsc_cmd_mailbox *next, + struct xsc_cmd_work_ent *ent, int input, int len) +{ + u16 op = be16_to_cpu(((struct xsc_inbox_hdr *)(ent->lay->in))->opcode); + int offset = 0; + + if (!(xsc_debug_mask & (1 << XSC_CMD_DATA))) + return; + + xsc_core_dbg(xdev, "dump command %s(0x%x) %s\n", xsc_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (input) { + dump_buf(ent->lay, sizeof(*ent->lay), offset); + offset += sizeof(*ent->lay); + } else { + dump_buf(ent->rsp_lay, sizeof(*ent->rsp_lay), offset); + offset += sizeof(*ent->rsp_lay); + } + + while (next && offset < len) { + xsc_core_dbg(xdev, "command block:\n"); + dump_buf(next->buf, sizeof(struct xsc_cmd_prot_block), offset); + offset += sizeof(struct xsc_cmd_prot_block); + next = next->next; + } +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct xsc_cmd_work_ent *ent = container_of(work, struct xsc_cmd_work_ent, work); + struct xsc_cmd *cmd = ent->cmd; + struct xsc_core_device *xdev = container_of(cmd, struct xsc_core_device, cmd); + struct xsc_cmd_layout *lay; + struct semaphore *sem; + unsigned long flags; + + sem = &cmd->sem; + down(sem); + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + up(sem); + return; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + + spin_lock_irqsave(&cmd->doorbell_lock, flags); + lay = get_inst(cmd, cmd->cmd_pid); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = ent->token; + lay->idx = ent->idx; + if (!cmd->checksum_disabled) + set_signature(ent); + else + lay->sig = 0xff; + dump_command(xdev, ent->in->next, ent, 1, ent->in->len); + + ktime_get_ts64(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + + cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + mmiowb(); + spin_unlock_irqrestore(&cmd->doorbell_lock, flags); +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case XSC_CMD_DELIVERY_STAT_OK: + return "no errors"; + case XSC_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case XSC_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command output length error"; + case XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct xsc_cmd_msg *in) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct xsc_core_device *xdev, struct xsc_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(XSC_CMD_TIMEOUT_MSEC); + int err; + struct xsc_cmd *cmd = &xdev->cmd; + + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = ent->ret; + + if (err == -ETIMEDOUT) { + cmd->cmd_status = XSC_CMD_STATUS_TIMEDOUT; + xsc_core_warn(xdev, "wait for %s(0x%x) response timeout!\n", + xsc_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } else if (err) { + xsc_core_dbg(xdev, "err %d, delivery status %s(%d)\n", err, + deliv_status_to_str(ent->status), ent->status); + } + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u8 *status) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + ktime_t t1, t2, delta; + struct xsc_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + struct semaphore *sem; + + ent = alloc_cmd(cmd, in, out); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + init_completion(&ent->done); + INIT_WORK(&ent->work, cmd_work_handler); + if (!queue_work(cmd->wq, &ent->work)) { + xsc_core_warn(xdev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + err = wait_func(xdev, ent); + if (err == -ETIMEDOUT) + goto out; + t1 = timespec64_to_ktime(ent->ts1); + t2 = timespec64_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + xsc_core_dbg_mask(xdev, 1 << XSC_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + xsc_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + + return err; + +out: + sem = &cmd->sem; + up(sem); +out_free: + free_cmd(ent); + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = xsc_cmd_exec(xdev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int xsc_copy_to_cmd_msg(struct xsc_cmd_msg *to, void *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + block->owner_status = 0; + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (!block->owner_status) + pr_err("block ownership check failed\n"); + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct xsc_cmd_mailbox *alloc_cmd_box(struct xsc_core_device *xdev, + gfp_t flags) +{ + struct xsc_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = dma_pool_alloc(xdev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + xsc_core_dbg(xdev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct xsc_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct xsc_core_device *xdev, + struct xsc_cmd_mailbox *mailbox) +{ + dma_pool_free(xdev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_cmd_msg(struct xsc_core_device *xdev, + struct xsc_cmd_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_rsp_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_rsp_msg(struct xsc_core_device *xdev, + struct xsc_rsp_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EPERM; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EPERM; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EPERM; + + outlen_str[7] = 0; + + err = kstrtoint(outlen_str, 10, &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "xsc_cmd_%s", + dev_name(&xdev->pdev->dev)); +} + +static void clean_debug_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + + if (!xsc_debugfs_root) + return; + + xsc_cmdif_debugfs_cleanup(xdev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int err = -ENOMEM; + + if (!xsc_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", xdev->dev_res->dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + xdev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, xdev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + xsc_cmdif_debugfs_init(xdev); + + return 0; + +err_dbg: + clean_debug_files(xdev); + return err; +} + +void xsc_cmd_use_events(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + while (cmd->cmd_pid != cmd->cq_cid) + msleep(20); + kthread_stop(cmd->cq_task); + cmd->cq_task = NULL; + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data); +void xsc_cmd_use_polling(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (cmd->cq_task) + wake_up_process(cmd->cq_task); + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct xsc_cmd_msg *alloc_msg(struct xsc_core_device *xdev, int in_size) +{ + struct xsc_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct xsc_cmd *cmd = &xdev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, in_size); + + return msg; +} + +static void free_msg(struct xsc_core_device *xdev, struct xsc_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + xsc_free_cmd_msg(xdev, msg); + } +} + +static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u16 dummy_cnt, u16 dummy_start_pid) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent **dummy_ent_arr; + struct xsc_cmd_layout *lay; + struct semaphore *sem; + int err = 0; + u16 i; + u16 free_cnt = 0; + u16 temp_pid = dummy_start_pid; + + sem = &cmd->sem; + + dummy_ent_arr = kcalloc(dummy_cnt, sizeof(struct xsc_cmd_work_ent *), GFP_KERNEL); + if (!dummy_ent_arr) { + err = -ENOMEM; + goto alloc_ent_arr_err; + } + + for (i = 0; i < dummy_cnt; i++) { + dummy_ent_arr[i] = alloc_cmd(cmd, in, out); + if (IS_ERR(dummy_ent_arr[i])) { + xsc_core_err(xdev, "failed to alloc cmd buffer\n"); + err = -ENOMEM; + free_cnt = i; + goto alloc_ent_err; + } + + down(sem); + + dummy_ent_arr[i]->idx = alloc_ent(cmd); + if (dummy_ent_arr[i]->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + err = -1; + free_cnt = i; + goto get_cmd_ent_idx_err; + } + dummy_ent_arr[i]->token = alloc_token(cmd); + cmd->ent_arr[dummy_ent_arr[i]->idx] = dummy_ent_arr[i]; + init_completion(&dummy_ent_arr[i]->done); + + lay = get_inst(cmd, temp_pid); + dummy_ent_arr[i]->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, dummy_ent_arr[i]->in->first.data, sizeof(dummy_ent_arr[i]->in)); + lay->inlen = cpu_to_be32(dummy_ent_arr[i]->in->len); + lay->outlen = cpu_to_be32(dummy_ent_arr[i]->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = dummy_ent_arr[i]->token; + lay->idx = dummy_ent_arr[i]->idx; + if (!cmd->checksum_disabled) + set_signature(dummy_ent_arr[i]); + else + lay->sig = 0xff; + temp_pid = (temp_pid + 1) % (1 << cmd->log_sz); + } + + /* ring doorbell after the descriptor is valid */ + wmb(); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + if (readl(REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)) != 0) + writel(0xF, REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)); + + mmiowb(); + xsc_core_dbg(xdev, "write 0x%x to command doorbell, idx %u ~ %u\n", cmd->cmd_pid, + dummy_ent_arr[0]->idx, dummy_ent_arr[dummy_cnt - 1]->idx); + + if (wait_for_completion_timeout(&dummy_ent_arr[dummy_cnt - 1]->done, + msecs_to_jiffies(3000)) == 0) { + xsc_core_err(xdev, "dummy_cmd %d ent timeout, cmdq fail\n", dummy_cnt - 1); + err = -ETIMEDOUT; + } else { + xsc_core_dbg(xdev, "%d ent done\n", dummy_cnt); + } + + for (i = 0; i < dummy_cnt; i++) + free_cmd(dummy_ent_arr[i]); + + kfree(dummy_ent_arr); + return err; + +get_cmd_ent_idx_err: + free_cmd(dummy_ent_arr[free_cnt]); + up(sem); +alloc_ent_err: + for (i = 0; i < free_cnt; i++) { + free_ent(cmd, dummy_ent_arr[i]->idx); + up(sem); + free_cmd(dummy_ent_arr[i]); + } + kfree(dummy_ent_arr); +alloc_ent_arr_err: + return err; +} + +static int xsc_dummy_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size, u16 dmmy_cnt, u16 dummy_start) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = dummy_work(xdev, inb, outb, dmmy_cnt, dummy_start); + + if (err) + goto out_out; + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} + +static int xsc_send_dummy_cmd(struct xsc_core_device *xdev, u16 gap, u16 dummy_start) +{ + struct xsc_cmd_dummy_mbox_out *out; + struct xsc_cmd_dummy_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DUMMY); + + err = xsc_dummy_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out), gap, dummy_start); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + +out_out: + kfree(out); +no_mem_out: + return err; +} + +static int request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + u16 req_pid, req_cid; + u16 gap; + + int err; + + req_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + req_cid = readl(REG_ADDR(xdev, cmd->reg.req_cid_addr)); + if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { + xsc_core_err(xdev, "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", + req_pid, req_cid, (1 << cmd->log_sz)); + return -1; + } + + if (req_pid == req_cid) + return 0; + + gap = (req_pid > req_cid) ? (req_pid - req_cid) : ((1 << cmd->log_sz) + req_pid - req_cid); + xsc_core_info(xdev, "Cmdq req_pid %d, req_cid %d, send %d dummy cmds\n", + req_pid, req_cid, gap); + + err = xsc_send_dummy_cmd(xdev, gap, req_cid); + if (err) { + xsc_core_err(xdev, "Send dummy cmd failed\n"); + goto send_dummy_fail; + } + +send_dummy_fail: + return err; +} + +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + u8 status = 0; + struct xsc_cmd *cmd = &xdev->cmd; + + if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) + return -ETIMEDOUT; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = xsc_cmd_invoke(xdev, inb, outb, &status); + if (err) + goto out_out; + + if (status) { + xsc_core_err(xdev, "opcode:%#x, err %d, status %d\n", + msg_to_opcode(inb), err, status); + err = status_to_err(status); + goto out_out; + } + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} +EXPORT_SYMBOL(_xsc_cmd_exec); + +static void destroy_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + struct xsc_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } +} + +static int create_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(xdev); + return err; +} + +static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xsc_rsp_layout *rsp) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + struct xsc_inbox_hdr *hdr; + + if (idx > cmd->max_reg_cmds || (cmd->bitmask & (1 << idx))) { + xsc_core_err(xdev, "idx[%d] exceed max cmds, or has no relative request.\n", idx); + return; + } + ent = cmd->ent_arr[idx]; + ent->rsp_lay = rsp; + ktime_get_ts64(&ent->ts2); + + memcpy(ent->out->first.data, ent->rsp_lay->out, sizeof(ent->rsp_lay->out)); + dump_command(xdev, ent->out->next, ent, 0, ent->out->len); + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = 0; + + hdr = (struct xsc_inbox_hdr *)ent->in->first.data; + xsc_core_dbg(xdev, "delivery status:%s(%d), rsp status=%d, opcode %#x, idx:%d,%d, ret=%d\n", + deliv_status_to_str(ent->status), ent->status, + ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status, + __be16_to_cpu(hdr->opcode), idx, ent->lay->idx, ent->ret); + free_ent(cmd, ent->idx); + complete(&ent->done); + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data) +{ + struct xsc_core_device *xdev = data; + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cmd->cq_cid == cq_pid) { + mdelay(3); + continue; + } + + //get cqe + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + //hw update cq doorbell but buf may not ready + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + continue; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + } + return 0; +} + +int xsc_cmd_err_handler(struct xsc_core_device *xdev) +{ + union interrupt_stat { + struct { + u32 hw_read_req_err:1; + u32 hw_write_req_err:1; + u32 req_pid_err:1; + u32 rsp_cid_err:1; + }; + u32 raw; + } stat; + int err = 0; + int retry = 0; + + stat.raw = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + while (stat.raw != 0) { + err++; + if (stat.hw_read_req_err) { + retry = 1; + stat.hw_read_req_err = 0; + xsc_core_err(xdev, "hw report read req from host failed!\n"); + } else if (stat.hw_write_req_err) { + retry = 1; + stat.hw_write_req_err = 0; + xsc_core_err(xdev, "hw report write req to fw failed!\n"); + } else if (stat.req_pid_err) { + stat.req_pid_err = 0; + xsc_core_err(xdev, "hw report unexpected req pid!\n"); + } else if (stat.rsp_cid_err) { + stat.rsp_cid_err = 0; + xsc_core_err(xdev, "hw report unexpected rsp cid!\n"); + } else { + stat.raw = 0; + xsc_core_err(xdev, "ignore unknown interrupt!\n"); + } + } + + if (retry) + writel(xdev->cmd.cmd_pid, REG_ADDR(xdev, xdev->cmd.reg.req_pid_addr)); + + if (err) + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + + return err; +} + +void xsc_cmd_resp_handler(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + const int budget = 32; + int count = 0; + + while (count < budget) { + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cq_pid == cmd->cq_cid) + return; + + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + return; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + + count++; + } +} + +static void xsc_cmd_handle_rsp_before_reload +(struct xsc_cmd *cmd, struct xsc_core_device *xdev) +{ + u32 rsp_pid, rsp_cid; + + rsp_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + rsp_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (rsp_pid == rsp_cid) + return; + + cmd->cq_cid = rsp_pid; + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); +} + +int xsc_cmd_init(struct xsc_core_device *xdev) +{ + int size = sizeof(struct xsc_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct xsc_cmd *cmd = &xdev->cmd; + u32 cmd_h, cmd_l; + u32 err_stat; + int err; + int i; + + //sriov need adapt for this process. + //now there is 544 cmdq resource, soc using from id 514 + if (xsc_core_is_pf(xdev)) { + cmd->reg.req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } else { + cmd->reg.req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } + + cmd->pool = dma_pool_create("xsc_cmd", &xdev->pdev->dev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->cq_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cq_buf) { + err = -ENOMEM; + goto err_free_cmd; + } + + cmd->dma = dma_map_single(&xdev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free; + } + + cmd->cq_dma = dma_map_single(&xdev->pdev->dev, cmd->cq_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->cq_dma)) { + err = -ENOMEM; + goto err_map_cmd; + } + + cmd->cmd_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + cmd->cq_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + cmd->ownerbit_learned = 0; + + xsc_cmd_handle_rsp_before_reload(cmd, xdev); + +#define ELEMENT_SIZE_LOG 6 //64B +#define Q_DEPTH_LOG 5 //32 + + cmd->log_sz = Q_DEPTH_LOG; + cmd->log_stride = readl(REG_ADDR(xdev, cmd->reg.element_sz_addr)); + writel(1 << cmd->log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); + if (cmd->log_stride != ELEMENT_SIZE_LOG) { + dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", + cmd->log_stride, ELEMENT_SIZE_LOG); + err = -ENODEV; + goto err_map; + } + + if (1 << cmd->log_sz > XSC_MAX_COMMANDS) { + dev_err(&xdev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_map; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&xdev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_map; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + spin_lock_init(&cmd->doorbell_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + + writel(cmd_h, REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); + + cmd_h = (u32)((u64)(cmd->cq_dma) >> 32); + cmd_l = (u32)(cmd->cq_dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + writel(cmd_h, REG_ADDR(xdev, cmd->reg.rsp_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.rsp_buf_l_addr)); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + xsc_core_dbg(xdev, "descriptor at dma 0x%llx 0x%llx\n", + (unsigned long long)(cmd->dma), (unsigned long long)(cmd->cq_dma)); + + cmd->mode = CMD_MODE_POLLING; + cmd->cmd_status = XSC_CMD_STATUS_NORMAL; + + err = create_msg_cache(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "failed to create command cache\n"); + goto err_map; + } + + set_wqname(xdev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&xdev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (!cmd->cq_task) { + dev_err(&xdev->pdev->dev, "failed to create cq task\n"); + err = -ENOMEM; + goto err_wq; + } + wake_up_process(cmd->cq_task); + + err = create_debugfs_files(xdev); + if (err) { + err = -ENOMEM; + goto err_task; + } + + err = request_pid_cid_mismatch_restore(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "request pid,cid wrong, restore failed\n"); + goto err_req_restore; + } + + // clear abnormal state to avoid the impact of previous error + err_stat = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + if (err_stat) { + xsc_core_warn(xdev, "err_stat 0x%x when initializing, clear it\n", err_stat); + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + } + + return 0; + +err_req_restore: +err_task: + kthread_stop(cmd->cq_task); + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(xdev); + +err_map: + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + +err_map_cmd: + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free: + free_pages((unsigned long)cmd->cq_buf, 0); + +err_free_cmd: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + dma_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(xsc_cmd_init); + +void xsc_cmd_cleanup(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + clean_debug_files(xdev); + destroy_workqueue(cmd->wq); + if (cmd->cq_task) + kthread_stop(cmd->cq_task); + destroy_msg_cache(xdev); + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cq_buf, 0); + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + dma_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(xsc_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case XSC_CMD_STAT_OK: + return "OK"; + case XSC_CMD_STAT_INT_ERR: + return "internal error"; + case XSC_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case XSC_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case XSC_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case XSC_CMD_STAT_RES_BUSY: + return "resource busy"; + case XSC_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case XSC_CMD_STAT_IX_ERR: + return "bad index"; + case XSC_CMD_STAT_NO_RES_ERR: + return "no resources"; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case XSC_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x)\n", + cmd_status_str(hdr->status), hdr->status); + + switch (hdr->status) { + case XSC_CMD_STAT_OK: return 0; + case XSC_CMD_STAT_INT_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OP_ERR: return -EOPNOTSUPP; + case XSC_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case XSC_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case XSC_CMD_STAT_RES_BUSY: return -EBUSY; + case XSC_CMD_STAT_LIM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_IX_ERR: return -EINVAL; + case XSC_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c new file mode 100644 index 0000000000000000000000000000000000000000..49a00f759b5fdecf9ac82c70c64f92d18081b6b4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include + +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + xsc_core_warn(xdev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen) +{ + int err; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + struct xsc_destroy_cq_mbox_in din; + struct xsc_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn); + cq->cons_index = 0; + cq->arm_sn = 0; + cq->arm_db = dev->regs.complete_db; + cq->ci_db = dev->regs.complete_reg; + cq->dev = dev; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = xsc_debug_cq_add(dev, cq); + if (err) + xsc_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + xsc_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(xsc_core_create_cq); + +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + xsc_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + xsc_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + xsc_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(xsc_core_destroy_cq); + +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out) +{ + struct xsc_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_query_cq); + +void xsc_init_cq_table(struct xsc_core_device *dev) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + xsc_cq_debugfs_init(dev); +} + +void xsc_cleanup_cq_table(struct xsc_core_device *dev) +{ + xsc_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..5ea8d8a29107272059704a9041130f17e647818c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/qp.h" +#include "common/cq.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *xsc_debugfs_root; +EXPORT_SYMBOL(xsc_debugfs_root); + +static ssize_t xsc_debugfs_reg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + char *buf; + int len; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + "xsc debugfs", + xsc_debugfs_reg_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + + return len; +} + +static ssize_t xsc_debugfs_reg_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xsc_core_device *xdev = filp->private_data; + u64 reg; + int cnt, len; + int num; + int offset; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + if (count >= sizeof(xsc_debugfs_reg_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(xsc_debugfs_reg_buf, + sizeof(xsc_debugfs_reg_buf) - 1, + ppos, buffer, count); + if (len < 0) + return len; + + xsc_debugfs_reg_buf[len] = '\0'; + + if (strncmp(xsc_debugfs_reg_buf, "write", 5) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[5], "%llx %n", + ®, &offset); + if (cnt == 1) { + int tmp; + int value; + int buf[8]; + int *ptr; + + offset += 5; + num = 0; + while (1) { + cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", &value, &tmp); + if (cnt < 2) + break; + xsc_core_info(xdev, "write: 0x%llx = 0x%x\n", + (reg + sizeof(int) * num), value); + offset += tmp; + buf[num++] = value; + if (num == 8) + break; + } + if (num > 1) { + ptr = &buf[0]; + IA_WRITE(xdev, reg, ptr, num); + } else if (num == 1) { + REG_WR32(xdev, reg, buf[0]); + } + } else { + xsc_core_err(xdev, "write \n"); + } + } else if (strncmp(xsc_debugfs_reg_buf, "read", 4) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[4], "%llx %d %n", ®, &num, &offset); + if (cnt == 2) { + int *buf; + int i; + int *ptr; + + buf = kcalloc(num, sizeof(int), GFP_KERNEL); + if (!buf) + return -ENOMEM; + ptr = buf; + IA_READ(xdev, reg, ptr, num); + xsc_core_info(xdev, "read: 0x%llx num:%d\n", reg, num); + for (i = 0; i < num; i++) + xsc_core_info(xdev, "read:0x%llx = %#x\n", + (reg + sizeof(int) * i), buf[i]); + } else if (cnt == 1) { + int value = REG_RD32(xdev, reg); + + xsc_core_info(xdev, "read: 0x%llx = %#x\n", reg, value); + } else { + xsc_core_err(xdev, "read \n"); + } + } else { + xsc_core_err(xdev, "Unknown command %s\n", xsc_debugfs_reg_buf); + xsc_core_err(xdev, "Available commands:\n"); + xsc_core_err(xdev, "read \n"); + xsc_core_err(xdev, "write \n"); + } + return count; +} + +static const struct file_operations xsc_debugfs_reg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xsc_debugfs_reg_read, + .write = xsc_debugfs_reg_write, +}; + +int xsc_debugfs_init(struct xsc_core_device *dev) +{ + const char *name = pci_name(dev->pdev); + struct dentry *pfile; + + if (!xsc_debugfs_root) + return -ENOMEM; + + dev->dev_res->dbg_root = debugfs_create_dir(name, xsc_debugfs_root); + if (dev->dev_res->dbg_root) { + pfile = debugfs_create_file("reg_ops", 0600, + dev->dev_res->dbg_root, dev, + &xsc_debugfs_reg_fops); + if (!pfile) + xsc_core_err(dev, "failed to create debugfs ops for %s\n", name); + } else { + xsc_core_err(dev, "failed to create debugfs dir for %s\n", name); + return -ENOMEM; + } + + return 0; +} + +void xsc_debugfs_fini(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->dbg_root); +} + +void xsc_register_debugfs(void) +{ + xsc_debugfs_root = debugfs_create_dir("xsc_pci", NULL); +} + +void xsc_unregister_debugfs(void) +{ + debugfs_remove(xsc_debugfs_root); +} + +int xsc_qp_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->dev_res->qp_debugfs = debugfs_create_dir("QPs", dev->dev_res->dbg_root); + if (!dev->dev_res->qp_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qp_debugfs); +} + +int xsc_eq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->eq_debugfs = debugfs_create_dir("EQs", dev->dev_res->dbg_root); + if (!dev->dev_res->eq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_cmd_stats *stats; + u64 field = 0; + int ret; + int err; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock(&stats->lock); + if (stats->n) + field = stats->sum / stats->n; + spin_unlock(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_cmd_stats *stats; + + stats = filp->private_data; + spin_lock(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev) +{ + struct xsc_cmd_stats *stats; + struct xsc_cmd *cmd; + struct dentry **cmdif_debugfs; + const char *namep; + int err; + int i; + + if (!xsc_debugfs_root) + return 0; + + cmd = &xdev->cmd; + cmdif_debugfs = &xdev->dev_res->cmdif_debugfs; + *cmdif_debugfs = debugfs_create_dir("commands", xdev->dev_res->dbg_root); + if (!*cmdif_debugfs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) { + stats = &cmd->stats[i]; + namep = xsc_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmdif_debugfs); + if (!stats->root) { + xsc_core_warn(xdev, "failed adding command %d\n", i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + xsc_core_warn(xdev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + debugfs_create_u64("n", 0400, stats->root, &stats->n); + } + } + + return 0; +out: + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); + return err; +} + +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); +} + +int xsc_cq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->cq_debugfs = debugfs_create_dir("CQs", dev->dev_res->dbg_root); + if (!dev->dev_res->cq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->cq_debugfs); +} + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->qptrace_debugfs = + debugfs_create_dir("QPTrace", dev->dev_res->dbg_root); + if (!dev->dev_res->qptrace_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qptrace_debugfs); +} + +static u64 qp_read_field(struct xsc_core_device *dev, struct xsc_core_qp *qp, + int index) +{ + struct xsc_query_qp_mbox_out *out; + struct xsc_qp_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = xsc_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query qp\n"); + goto out; + } + + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_MTU: + param = ctx->mtu_mode ? IB_MTU_1024 : IB_MTU_4096; + break; + case QP_RQPN: + param = cpu_to_be32(ctx->remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct xsc_core_device *dev, struct xsc_eq *eq, + int index) +{ + struct xsc_query_eq_mbox_out *out; + struct xsc_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + break; + case EQ_INTR: + break; + case EQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct xsc_core_device *dev, struct xsc_core_cq *cq, + int index) +{ + struct xsc_query_cq_mbox_out *out; + struct xsc_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_query_cq(dev, cq, out); + if (err) { + xsc_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + break; + case CQ_NUM_CQES: + break; + case CQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_field_desc *desc; + struct xsc_rsc_debug *d; + char tbuf[18]; + u64 field; + int ret; + int err; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case XSC_DBG_RSC_QP: + field = qp_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_EQ: + field = eq_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_CQ: + field = cq_read_field(d->xdev, d->object, desc->i); + break; + + default: + xsc_core_warn(d->xdev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct xsc_core_device *dev, enum dbg_rsc_type type, + struct dentry *root, struct xsc_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct xsc_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->xdev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct xsc_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int xsc_debug_qp_add(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_QP, dev->dev_res->qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void xsc_debug_qp_remove(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + +static int set_udp_sport(u32 qpn, u32 sport, struct xsc_core_device *xdev, struct xsc_qp_trace *t) +{ + int err; + struct xsc_ap_feat_mbox_in in; + struct xsc_ap_feat_mbox_out out; + struct timespec64 ts; + struct xsc_qpt_update_msg msg; + + ktime_get_boottime_ts64(&ts); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_AP_FEAT); + in.xsc_ap_feature_opcode = __cpu_to_be16(XSC_AP_FEAT_SET_UDP_SPORT); + in.ap.set_udp_sport.qpn = __cpu_to_be32(qpn); + in.ap.set_udp_sport.udp_sport = __cpu_to_be32(sport); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to set udp_sport, err(%u), status(%u)\n", err, + out.hdr.status); + return -EINVAL; + } + + msg.main_ver = YS_QPTRACE_VER_MAJOR; + msg.sub_ver = YS_QPTRACE_VER_MINOR; + msg.type = YS_QPTRACE_UPDATE_TYPE_SPORT; + msg.data.timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + msg.data.qpn = qpn; + msg.data.bus = xdev->pdev->bus->number; + msg.data.dev = PCI_SLOT(xdev->pdev->devfn); + msg.data.fun = PCI_FUNC(xdev->pdev->devfn); + msg.data.update.sport.port_old = t->s_port; + msg.data.update.sport.port_new = __cpu_to_be16(sport); + t->s_port = msg.data.update.sport.port_new; + + qpts_write_one_msg(&msg); + + xsc_core_info(xdev, "Set qpn(%u) udp_sport(%u)\n", qpn, sport); + + return 0; +} + +static ssize_t trace_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + int err; + int len; + + if (*pos) + return 0; + + if (!qp || !qp->trace_info) + return -EIO; + + trace_info = qp->trace_info; + + len = sizeof(struct xsc_qp_trace); + err = copy_to_user(buf, trace_info, len); + if (err) + return err; + + *pos += len; + return len; +} + +static ssize_t trace_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + struct xsc_core_device *xdev; + int ret = 0, len; + u32 sport; + char tmp_buf[256] = ""; + + ret = -EIO; + if (!qp || !qp->dbg || !qp->dbg->xdev || !qp->trace_info) { + pr_err("%s error null pointer!\n", __func__); + goto trace_write_out; + } + + trace_info = qp->trace_info; + xdev = qp->dbg->xdev; + + ret = 0; + /* don't allow partial writes */ + if (*pos != 0) { + xsc_core_err(xdev, "Don't allow partial writes!\n"); + goto trace_write_out; + } + + ret = -ENOSPC; + if (count >= sizeof(tmp_buf)) { + xsc_core_err(xdev, "Count out of size of buffer!\n"); + goto trace_write_out; + } + + len = simple_write_to_buffer(tmp_buf, sizeof(tmp_buf) - 1, + pos, buf, count); + ret = len; + if (len < 0) { + xsc_core_err(xdev, "Write to buffer error(%d)!\n", len); + goto trace_write_out; + } + + tmp_buf[len] = '\0'; + + // + // sport 10000 + if (strncmp(tmp_buf, "sport", 5) == 0) { + ret = kstrtouint(&tmp_buf[6], 0, &sport); + if (ret != 0) { + xsc_core_err(xdev, "error arguments: \n"); + ret = -EINVAL; + goto trace_write_out; + } + ret = set_udp_sport(trace_info->lqpn, sport, xdev, trace_info); + if (ret) { + ret = -EIO; + goto trace_write_out; + } + } else { + xsc_core_err(xdev, "invalid arguments: %s\n", tmp_buf); + ret = -EOPNOTSUPP; + goto trace_write_out; + } + + return count; + +trace_write_out: + return ret; +} + +static const struct file_operations fops_trace = { + .owner = THIS_MODULE, + .open = simple_open, + .read = trace_read, + .write = trace_write, +}; + +int xsc_create_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + char name[16]; + + if (!xsc_debugfs_root) + return 0; + + snprintf(name, sizeof(name), "%d", qp->qpn); + + qp->trace = debugfs_create_file(name, 0644, dev->dev_res->qptrace_debugfs, + (void *)qp, &fops_trace); + if (!qp->trace) + return -1; + + return 0; +} + +void xsc_remove_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove(qp->trace); +} + +int xsc_debug_eq_add(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_EQ, dev->dev_res->eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void xsc_debug_eq_remove(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + if (!xsc_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_CQ, dev->dev_res->cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + if (!xsc_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c new file mode 100644 index 0000000000000000000000000000000000000000..7ea5e1c78230948f67e0e289d5b3d522c4455878 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "devlink.h" +#include "eswitch.h" + +static const struct devlink_ops xsc_devlink_ops = { + .eswitch_mode_set = xsc_devlink_eswitch_mode_set, + .eswitch_mode_get = xsc_devlink_eswitch_mode_get, +}; + +struct devlink *xsc_devlink_alloc(struct device *dev) +{ + return devlink_alloc(&xsc_devlink_ops, sizeof(struct xsc_core_device), dev); +} + +void xsc_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +int xsc_devlink_register(struct devlink *devlink, struct device *dev) +{ + int err = 0; + + devlink_register(devlink); + return err; +} + +void xsc_devlink_unregister(struct devlink *devlink) +{ + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h new file mode 100644 index 0000000000000000000000000000000000000000..c08d04bfa989c30554857578543b06049c37aa43 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVLINK_H +#define XSC_DEVLINK_H + +#include + +struct devlink *xsc_devlink_alloc(struct device *dev); +void xsc_devlink_free(struct devlink *devlink); +int xsc_devlink_register(struct devlink *devlink, struct device *dev); +void xsc_devlink_unregister(struct devlink *devlink); + +#endif /* XSC_DEVLINK_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c new file mode 100644 index 0000000000000000000000000000000000000000..1ce0123fcdd2ee66582bdf557a6a97dcc15b8b7a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include "fw/xsc_fw.h" +#include "wq.h" +#include "common/xsc_core.h" + +enum { + XSC_EQE_SIZE = sizeof(struct xsc_eqe), + XSC_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + XSC_NUM_SPARE_EQE = 0x80, + XSC_NUM_ASYNC_EQE = 0x100, +}; + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int xsc_cmd_destroy_eq(struct xsc_core_device *dev, u32 eqn) +{ + struct xsc_destroy_eq_mbox_in in; + struct xsc_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_EQ); + in.eqn = cpu_to_be32(eqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct xsc_eqe *get_eqe(struct xsc_eq *eq, u32 entry) +{ + return xsc_buf_offset(&eq->buf, entry * XSC_EQE_SIZE); +} + +static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static void eq_update_ci(struct xsc_eq *eq, int arm) +{ + union xsc_eq_doorbell db; + + db.val = 0; + db.arm = !!arm; + db.eq_next_cid = eq->cons_index; + db.eq_id = eq->eqn; + writel(db.val, REG_ADDR(eq->dev, eq->doorbell)); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + rcu_read_lock(); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + rcu_read_unlock(); + + if (!cq) { + xsc_core_err(dev, "Completion event for bogus CQ, cqn=%d\n", cqn); + return; + } + + ++cq->arm_sn; + + if (!cq->comp) + xsc_core_err(dev, "cq->comp is NULL\n"); + else + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (unlikely(!cq)) { + xsc_core_err(dev, "Async event for bogus CQ, cqn=%d\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn, qpn, queue_id; + + while ((eqe = next_eqe_sw(eq))) { + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + switch (eqe->type) { + case XSC_EVENT_TYPE_COMP: + case XSC_EVENT_TYPE_INTERNAL_ERROR: + /* eqe is changing */ + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_cq_completion(dev, cqn); + break; + + case XSC_EVENT_TYPE_CQ_ERROR: + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_eq_cq_event(dev, cqn, eqe->type); + break; + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + queue_id = eqe->queue_id; + qpn = queue_id; + xsc_qp_event(dev, qpn, eqe->type); + break; + default: + xsc_core_warn(dev, "Unhandle event %d on EQ %d\n", eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with XSC_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { + xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_noarm\n", + eq->eqn, set_ci, eqe->queue_id); + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t xsc_msix_handler(int irq, void *eq_ptr) +{ + struct xsc_eq *eq = eq_ptr; + struct xsc_core_device *dev = eq->dev; + + xsc_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = XSC_EQE_OWNER_INIT_VAL; + } +} + +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + u16 msix_vec_offset = dev->msix_vec_base + vecidx; + struct xsc_create_eq_mbox_in *in; + struct xsc_create_eq_mbox_out out; + int err; + int inlen; + int hw_npages; + + eq->nent = roundup_pow_of_two(roundup(nent, XSC_NUM_SPARE_EQE)); + err = xsc_buf_alloc(dev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + hw_npages = DIV_ROUND_UP(eq->nent * XSC_EQE_SIZE, PAGE_SIZE_4K); + inlen = sizeof(*in) + sizeof(in->pas[0]) * hw_npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + xsc_fill_page_array(&eq->buf, in->pas, hw_npages); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_EQ); + in->ctx.log_eq_sz = ilog2(eq->nent); + in->ctx.vecidx = cpu_to_be16(msix_vec_offset); + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(dev->glb_func_id); + in->ctx.is_async_eq = (vecidx == XSC_EQ_VEC_ASYNC ? 1 : 0); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = -ENOSPC; + goto err_in; + } + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); + + eq->eqn = be32_to_cpu(out.eqn); + eq->irqn = pci_irq_vector(dev->pdev, vecidx); + eq->dev = dev; + eq->doorbell = dev->regs.event_db; + eq->index = vecidx; + xsc_core_dbg(dev, "msix%d request vector%d eq%d irq%d\n", + vecidx, msix_vec_offset, eq->eqn, eq->irqn); + + err = request_irq(eq->irqn, xsc_msix_handler, 0, + dev_res->irq_info[vecidx].name, eq); + if (err) + goto err_eq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + xsc_vfree(in); + return 0; + +err_eq: + xsc_cmd_destroy_eq(dev, eq->eqn); + +err_in: + xsc_vfree(in); + +err_buf: + xsc_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(xsc_create_map_eq); + +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_fw_is_available(dev)) + return 0; + + free_irq(eq->irqn, eq); + err = xsc_cmd_destroy_eq(dev, eq->eqn); + if (err) + xsc_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + xsc_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_destroy_unmap_eq); + +int xsc_eq_init(struct xsc_core_device *dev) +{ + int err; + + spin_lock_init(&dev->dev_res->eq_table.lock); + + err = xsc_eq_debugfs_init(dev); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eq_init); + +void xsc_eq_cleanup(struct xsc_core_device *dev) +{ + xsc_eq_debugfs_cleanup(dev); +} +EXPORT_SYMBOL_GPL(xsc_eq_cleanup); + +int xsc_start_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int err; + + err = xsc_create_map_eq(dev, &table->async_eq, XSC_EQ_VEC_ASYNC, + XSC_NUM_ASYNC_EQE, "xsc_async_eq"); + if (err) + xsc_core_warn(dev, "failed to create async EQ %d\n", err); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_start_eqs); + +void xsc_stop_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + + xsc_destroy_unmap_eq(dev, &table->async_eq); +} + +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen) +{ + struct xsc_query_eq_mbox_in in; + int err = 0; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + + if (out->hdr.status) + err = xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_eq_query); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c new file mode 100644 index 0000000000000000000000000000000000000000..005c8aa93d72075d9818976c1e1e86189fee0849 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c @@ -0,0 +1,812 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/vport.h" +#include "eswitch.h" +#include "common/xsc_lag.h" + +static int xsc_eswitch_check(const struct xsc_core_device *dev) +{ + if (!ESW_ALLOWED(dev->priv.eswitch)) + return -EPERM; + if (!dev->priv.eswitch->num_vfs) + return -EOPNOTSUPP; + + return 0; +} + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num) +{ + u16 idx; + + if (!esw || !xsc_core_is_vport_manager(esw->dev)) + return ERR_PTR(-EPERM); + + idx = xsc_eswitch_vport_num_to_index(esw, vport_num); + if (idx > esw->total_vports - 1) { + xsc_core_dbg(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", + vport_num, idx); + return ERR_PTR(-EINVAL); + } + + return &esw->vports[idx]; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport); + +static int eswitch_devlink_pf_support_check(const struct xsc_eswitch *esw) +{ + return 0; +} + +static int esw_mode_from_devlink(u16 mode, u16 *xsc_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *xsc_mode = XSC_ESWITCH_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *xsc_mode = XSC_ESWITCH_OFFLOADS; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_mode_to_devlink(u16 xsc_mode, u16 *mode) +{ + switch (xsc_mode) { + case XSC_ESWITCH_LEGACY: + *mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case XSC_ESWITCH_OFFLOADS: + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + return -EINVAL; + } + + return 0; +} + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + u16 cur_xsc_mode, xsc_mode = 0; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + if (esw_mode_from_devlink(mode, &xsc_mode)) + return -EINVAL; + + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_pf_support_check(esw); + if (err) + goto done; + + cur_xsc_mode = esw->mode; + + if (cur_xsc_mode == xsc_mode) + goto done; + + if (xsc_host_is_dpu_mode(dev) || + (cur_xsc_mode != XSC_ESWITCH_LEGACY && xsc_mode == XSC_ESWITCH_OFFLOADS) || + (cur_xsc_mode == XSC_ESWITCH_OFFLOADS && xsc_mode == XSC_ESWITCH_LEGACY)) { + xsc_core_err(dev, "%s failed: do not set mode %d to mode %d\n", + __func__, cur_xsc_mode, xsc_mode); + mutex_unlock(&esw->mode_lock); + return -EOPNOTSUPP; + } + + xsc_lag_disable(dev); + + esw->mode = xsc_mode; + if (esw->mode == XSC_ESWITCH_OFFLOADS) + xsc_cmd_modify_hca(dev); + + xsc_lag_enable(dev); + +done: + mutex_unlock(&esw->mode_lock); + return err; +} + +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + mutex_lock(&esw->mode_lock); + if (xsc_host_is_dpu_mode(dev)) + err = -EOPNOTSUPP; + else + err = esw_mode_to_devlink(esw->mode, mode); + mutex_unlock(&esw->mode_lock); + + return err; +} + +static void esw_vport_change_handle_locked(struct xsc_vport *vport) +{ + struct xsc_core_device *dev = vport->dev; + u8 mac[ETH_ALEN]; + + xsc_query_other_nic_vport_mac_address(dev, vport->vport, mac); +} + +static void esw_vport_change_handler(struct work_struct *work) +{ + struct xsc_vport *vport = + container_of(work, struct xsc_vport, vport_change_handler); + struct xsc_eswitch *esw = vport->dev->priv.eswitch; + + mutex_lock(&esw->state_lock); + esw_vport_change_handle_locked(vport); + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport, + enum xsc_eswitch_vport_event enabled_events) +{ + mutex_lock(&esw->state_lock); + if (vport->enabled) + goto unlock_out; + + bitmap_zero(vport->req_vlan_bitmap, VLAN_N_VID); + bitmap_zero(vport->acl_vlan_8021q_bitmap, VLAN_N_VID); + bitmap_zero(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + + /* Sync with current vport context */ + vport->enabled_events = enabled_events; + vport->enabled = true; + + esw->enabled_vports++; +unlock_out: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport) +{ + u16 vport_num = vport->vport; + + mutex_lock(&esw->state_lock); + if (!vport->enabled) + goto done; + + xsc_core_dbg(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + vport->enabled = false; + vport->enabled_events = 0; + esw->enabled_vports--; +done: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, + enum xsc_eswitch_vport_event enabled_events) +{ + struct xsc_vport *vport; + int i; + + vport = xsc_eswitch_get_vport(esw, XSC_VPORT_PF); + xsc_eswitch_enable_vport(esw, vport, enabled_events); + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) + xsc_eswitch_enable_vport(esw, vport, enabled_events); +} + +#define XSC_LEGACY_SRIOV_VPORT_EVENTS (XSC_VPORT_UC_ADDR_CHANGE | \ + XSC_VPORT_MC_ADDR_CHANGE | \ + XSC_VPORT_PROMISC_CHANGE | \ + XSC_VPORT_VLAN_CHANGE) + +static int esw_legacy_enable(struct xsc_eswitch *esw) +{ + struct xsc_vport *vport; + unsigned long i; + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) { + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + } + xsc_eswitch_enable_pf_vf_vports(esw, XSC_LEGACY_SRIOV_VPORT_EVENTS); + return 0; +} + +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int err; + + lockdep_assert_held(&esw->mode_lock); + + esw->num_vfs = num_vfs; + + if (esw->mode == XSC_ESWITCH_NONE) + err = esw_legacy_enable(esw); + else + err = -EOPNOTSUPP; + + if (err) + goto ret; + + esw->mode = mode; + + xsc_core_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", + mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + num_vfs, esw->enabled_vports); + + return 0; + +ret: + return err; +} + +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int ret; + + mutex_lock(&esw->mode_lock); + ret = xsc_eswitch_enable_locked(esw, mode, num_vfs); + mutex_unlock(&esw->mode_lock); + return ret; +} + +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf) +{ + int old_mode; + + lockdep_assert_held(&esw->mode_lock); + + if (esw->mode == XSC_ESWITCH_NONE) + return; + + xsc_core_info(esw->dev, "Disable: mode(%s)\n", + esw->mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS"); + + old_mode = esw->mode; + esw->mode = XSC_ESWITCH_NONE; + + esw->num_vfs = 0; +} + +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf) +{ + if (!ESW_ALLOWED(esw)) + return; + + mutex_lock(&esw->mode_lock); + xsc_eswitch_disable_locked(esw, clear_vf); + mutex_unlock(&esw->mode_lock); +} + +int xsc_eswitch_init(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw; + struct xsc_vport *vport; + int i, total_vports, err; + + if (!XSC_VPORT_MANAGER(dev)) { + if (xsc_core_is_pf(dev)) + xsc_core_err(dev, "%s XSC_VPORT_MANAGER check fail\n", __func__); + return 0; + } + + total_vports = xsc_eswitch_get_total_vports(dev); + + xsc_core_info(dev, "Total vports %d\n", total_vports); + + esw = kzalloc(sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = dev; + esw->manager_vport = xsc_eswitch_manager_vport(dev); + esw->first_host_vport = xsc_eswitch_first_host_vport_num(dev); + esw->work_queue = create_singlethread_workqueue("xsc_esw_wq"); + if (!esw->work_queue) { + err = -ENOMEM; + goto abort; + } + esw->vports = kcalloc(total_vports, sizeof(struct xsc_vport), + GFP_KERNEL); + if (!esw->vports) { + err = -ENOMEM; + goto abort; + } + esw->total_vports = total_vports; + + mutex_init(&esw->state_lock); + mutex_init(&esw->mode_lock); + + xsc_esw_for_all_vports(esw, i, vport) { + vport->vport = xsc_eswitch_index_to_vport_num(esw, i); + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + vport->info.vlan_proto = htons(ETH_P_8021Q); + vport->info.roce = true; + + vport->dev = dev; + INIT_WORK(&vport->vport_change_handler, + esw_vport_change_handler); + } + esw->enabled_vports = 0; + esw->mode = XSC_ESWITCH_NONE; + + dev->priv.eswitch = esw; + return 0; + +abort: + if (esw->work_queue) + destroy_workqueue(esw->work_queue); + kfree(esw->vports); + kfree(esw); + return 0; +} + +void xsc_eswitch_cleanup(struct xsc_core_device *dev) +{ + if (!dev->priv.eswitch || !XSC_VPORT_MANAGER(dev)) + return; + + xsc_core_dbg(dev, "cleanup\n"); + + destroy_workqueue(dev->priv.eswitch->work_queue); + kfree(dev->priv.eswitch->vports); + kfree(dev->priv.eswitch); +} + +#ifdef XSC_ESW_GUID_ENABLE +static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +{ + ((u8 *)node_guid)[7] = mac[0]; + ((u8 *)node_guid)[6] = mac[1]; + ((u8 *)node_guid)[5] = mac[2]; + ((u8 *)node_guid)[4] = 0xff; + ((u8 *)node_guid)[3] = 0xfe; + ((u8 *)node_guid)[2] = mac[3]; + ((u8 *)node_guid)[1] = mac[4]; + ((u8 *)node_guid)[0] = mac[5]; +} +#endif + +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + +#ifdef XSC_ESW_GUID_ENABLE + u64 node_guid; +#endif + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (is_multicast_ether_addr(mac)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) + xsc_core_warn(esw->dev, + "Set invalid MAC while spoofchk is on, vport(%d)\n", + vport); + + err = xsc_modify_other_nic_vport_mac_address(esw->dev, vport, mac, false); + if (err) { + xsc_core_err(esw->dev, + "Failed to xsc_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); + goto unlock; + } + + ether_addr_copy(evport->info.mac, mac); + +#ifdef XSC_ESW_GUID_ENABLE + node_guid_gen_from_mac(&node_guid, mac); + err = xsc_modify_other_nic_vport_node_guid(esw->dev, vport, node_guid); + if (err) + xsc_core_err(esw->dev, + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", + vport, err); + evport->info.node_guid = node_guid; +#endif + +#ifdef XSC_ESW_FDB_ENABLE + if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) + err = esw_vport_ingress_config(esw, evport); +#endif + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_mac); + +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + ether_addr_copy(mac, evport->info.mac); + mutex_unlock(&esw->state_lock); + return 0; +} + +int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, + u8 qos, __be16 proto, u8 set_flags) +{ + struct xsc_modify_nic_vport_context_in *in; + int err, in_sz; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.addresses_list = 1; + if ((set_flags & SET_VLAN_STRIP) || (set_flags & SET_VLAN_INSERT)) + in->nic_vport_ctx.vlan_allowed = 1; + else + in->nic_vport_ctx.vlan_allowed = 0; + in->vport_number = cpu_to_be16(vport); + in->other_vport = 1; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD; + in->nic_vport_ctx.vlan_proto = cpu_to_be16(ntohs(proto)); + in->nic_vport_ctx.qos = qos; + in->nic_vport_ctx.vlan = cpu_to_be16(vlan); + + err = xsc_modify_nic_vport_context(esw->dev, in, in_sz); + + kfree(in); + return err; +} + +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + u8 set_flags = 0; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + + if (vlan || qos) + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; + else + set_flags = CLR_VLAN_STRIP | CLR_VLAN_INSERT; + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + + err = __xsc_eswitch_set_vport_vlan(esw, vport, vlan, qos, vlan_proto, set_flags); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL_GPL(xsc_eswitch_set_vport_vlan); + +static int xsc_vport_link2ifla(u8 esw_link) +{ + switch (esw_link) { + case XSC_VPORT_ADMIN_STATE_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case XSC_VPORT_ADMIN_STATE_UP: + return IFLA_VF_LINK_STATE_ENABLE; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +static int xsc_ifla_link2vport(u8 ifla_link) +{ + switch (ifla_link) { + case IFLA_VF_LINK_STATE_DISABLE: + return XSC_VPORT_ADMIN_STATE_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return XSC_VPORT_ADMIN_STATE_UP; + } + return XSC_VPORT_ADMIN_STATE_AUTO; +} + +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state) +{ + u8 xsc_link = xsc_ifla_link2vport((u8)link_state); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_admin_state(esw->dev, XSC_CMD_OP_MODIFY_VPORT_STATE, + vport, 1, xsc_link); + if (err) { + xsc_core_warn(esw->dev, + "Failed to set vport %d link state %d, err = %d", + vport, xsc_link, err); + goto unlock; + } + + evport->info.link_state = xsc_link; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_state); + +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + bool pschk; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (spoofchk && !is_valid_ether_addr(evport->info.mac)) + xsc_core_warn(esw->dev, "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + + if (pschk != spoofchk) { + err = xsc_modify_nic_vport_spoofchk(esw->dev, vport, spoofchk); + if (err) + evport->info.spoofchk = pschk; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_spoofchk); + +static int xsc_eswitch_update_vport_trunk(struct xsc_eswitch *esw, + struct xsc_vport *evport, + unsigned long *old_trunk) +{ + DECLARE_BITMAP(diff_vlan_bm, VLAN_N_VID); + int err = 0; + + bitmap_xor(diff_vlan_bm, old_trunk, + evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + if (!bitmap_weight(diff_vlan_bm, VLAN_N_VID)) + return err; + + if (err) + bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, VLAN_N_VID); + + return err; +} + +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.vlan || evport->info.qos) { + err = -EPERM; + xsc_core_warn(esw->dev, + "VGT+ is not allowed when operating in VST mode vport(%d)\n", + vport); + goto unlock; + } + + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_set(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + +unlock: + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_clear(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport_num); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + if (setting != evport->info.trusted) { + err = xsc_modify_nic_vport_trust(esw->dev, vport_num, setting); + if (err) + goto unlock; + + evport->info.trusted = setting; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_trust); + +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_max_rate(evport->dev, vport, max_rate); + if (!err) { + evport->info.max_rate = max_rate; + evport->info.min_rate = min_rate; + } + mutex_unlock(&esw->state_lock); + + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_rate); + +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vport - 1; + + mutex_lock(&esw->state_lock); + ether_addr_copy(ivi->mac, evport->info.mac); + + ivi->linkstate = xsc_vport_link2ifla(evport->info.link_state); + ivi->spoofchk = evport->info.spoofchk; + ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; + ivi->max_tx_rate = evport->info.max_rate; + ivi->vlan = evport->vlan_id; + ivi->vlan_proto = evport->vlan_proto; + + mutex_unlock(&esw->state_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport_config); + +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate) +{ + return 0; +} + +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *in, int inlen) +{ + return 0; +} + +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *out, int outlen) +{ + return 0; +} + +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_stats *vf_stats) +{ + return 0; +} + +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats) +{ + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h new file mode 100644 index 0000000000000000000000000000000000000000..711e698cc0cc885ad10e78c7c2e74b54c76cea6e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef ESWITCH_H +#define ESWITCH_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" + +struct xsc_vport_drop_stats { + u64 rx_dropped; + u64 tx_dropped; +}; + +int xsc_eswitch_init(struct xsc_core_device *dev); +void xsc_eswitch_cleanup(struct xsc_core_device *dev); +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs); +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs); +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf); +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf); + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mod, struct netlink_ext_ack *extack); +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num); +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi); +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]); +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac); +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto); +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state); +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk); +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id); +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate); +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *in, int inlen); +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *out, int outlen); +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, + struct ifla_vf_stats *vf_stats); +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); + +#define xsc_esw_for_all_vports(esw, i, vport) \ + for ((i) = XSC_VPORT_PF; \ + (vport) = &(esw)->vports[(i)], \ + (i) < (esw)->total_vports; (i)++) + +#define xsc_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = XSC_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[(i)], \ + (i) <= (nvfs); (i)++) + +static inline int xsc_eswitch_uplink_idx(struct xsc_eswitch *esw) +{ + /* Uplink always locate at the last element of the array.*/ + return esw->total_vports - 1; +} + +static inline int xsc_eswitch_ecpf_idx(struct xsc_eswitch *esw) +{ + return esw->total_vports - 2; +} + +static inline int xsc_eswitch_vport_num_to_index(struct xsc_eswitch *esw, + u16 vport_num) +{ + if (vport_num == XSC_VPORT_ECPF) { + if (!xsc_ecpf_vport_exists(esw->dev) && + !xsc_core_is_ecpf_esw_manager(esw->dev)) + xsc_core_warn(esw->dev, "ECPF vport doesn't exist!\n"); + return xsc_eswitch_ecpf_idx(esw); + } + + if (vport_num == XSC_VPORT_UPLINK) + return xsc_eswitch_uplink_idx(esw); + + /* PF and VF vports start from 0 to max_vfs */ + return vport_num; +} + +static inline u16 xsc_eswitch_index_to_vport_num(struct xsc_eswitch *esw, + int index) +{ + if (index == xsc_eswitch_uplink_idx(esw)) + return XSC_VPORT_UPLINK; + return index; +} + +static inline u16 xsc_eswitch_manager_vport(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_ECPF : XSC_VPORT_PF; +} + +static inline u16 xsc_eswitch_first_host_vport_num(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_PF : XSC_VPORT_FIRST_VF; +} + +static inline u8 xsc_get_eswitch_mode(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw = dev->priv.eswitch; + + return ESW_ALLOWED(esw) ? esw->mode : XSC_ESWITCH_NONE; +} + +static inline bool xsc_host_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID); +} + +static inline bool xsc_pf_vf_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MF_HOST_VF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_VF_DEV_ID); +} + +static inline bool xsc_get_pp_bypass_res(struct xsc_core_device *dev, bool esw_set) +{ + return esw_set || xsc_pf_vf_is_dpu_mode(dev); +} + +static inline bool xsc_get_pct_drop_config(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MC_PF_DEV_ID) || + (dev->pdev->device == XSC_MF_SOC_PF_DEV_ID) || + (dev->pdev->device == XSC_MS_PF_DEV_ID) || + (dev->pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +#endif /* ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..91827fd56b0079c6ad30c63b20e1c7f05ca2a8ed --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include +#include "eswitch.h" + +static struct xsc_board_info *board_info[MAX_BOARD_NUM]; + +static struct xsc_board_info *xsc_get_board_info(char *board_sn) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + continue; + if (!strncmp(board_info[i]->board_sn, board_sn, XSC_BOARD_SN_LEN)) + return board_info[i]; + } + return NULL; +} + +static struct xsc_board_info *xsc_alloc_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + break; + } + if (i == MAX_BOARD_NUM) + return NULL; + board_info[i] = vmalloc(sizeof(*board_info[i])); + if (!board_info[i]) + return NULL; + memset(board_info[i], 0, sizeof(*board_info[i])); + board_info[i]->board_id = i; + return board_info[i]; +} + +void xsc_free_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) + vfree(board_info[i]); +} + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps) +{ + struct xsc_cmd_query_hca_cap_mbox_out *out; + struct xsc_cmd_query_hca_cap_mbox_in in; + int err; + u16 t16; + struct xsc_board_info *board_info = NULL; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HCA_CAP); + in.cpu_num = cpu_to_be16(num_online_cpus()); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + dev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); + caps->pf0_vf_funcid_base = be16_to_cpu(out->hca_cap.pf0_vf_funcid_base); + caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.pf0_vf_funcid_top); + caps->pf1_vf_funcid_base = be16_to_cpu(out->hca_cap.pf1_vf_funcid_base); + caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.pf1_vf_funcid_top); + caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); + caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); + caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); + caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + caps->funcid_to_logic_port = be16_to_cpu(out->hca_cap.funcid_to_logic_port); + if (xsc_core_is_pf(dev)) { + xsc_core_dbg(dev, "pf0_vf_range(%4u, %4u), pf1_vf_range(%4u, %4u)\n", + caps->pf0_vf_funcid_base, caps->pf0_vf_funcid_top, + caps->pf1_vf_funcid_base, caps->pf1_vf_funcid_top); + xsc_core_dbg(dev, "pcie0_pf_range=(%4u, %4u), pcie1_pf_range=(%4u, %4u)\n", + caps->pcie0_pf_funcid_base, caps->pcie0_pf_funcid_top, + caps->pcie1_pf_funcid_base, caps->pcie1_pf_funcid_top); + } + caps->pcie_host = out->hca_cap.pcie_host; + caps->nif_port_num = out->hca_cap.nif_port_num; + caps->hw_feature_flag = be32_to_cpu(out->hca_cap.hw_feature_flag); + + caps->raweth_qp_id_base = be16_to_cpu(out->hca_cap.raweth_qp_id_base); + caps->raweth_qp_id_end = be16_to_cpu(out->hca_cap.raweth_qp_id_end); + caps->raweth_rss_qp_id_base = be16_to_cpu(out->hca_cap.raweth_rss_qp_id_base); + caps->raw_tpe_qp_num = be16_to_cpu(out->hca_cap.raw_tpe_qp_num); + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->log_max_msix = out->hca_cap.log_max_msix & 0xf; + caps->mac_port = out->hca_cap.mac_port & 0xff; + dev->mac_port = caps->mac_port; + if (caps->num_ports > XSC_MAX_FW_PORTS) { + xsc_core_err(dev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, XSC_MAX_FW_PORTS); + err = -EINVAL; + goto out_out; + } + caps->send_ds_num = out->hca_cap.send_seg_num; + caps->send_wqe_shift = out->hca_cap.send_wqe_shift; + caps->recv_ds_num = out->hca_cap.recv_seg_num; + caps->recv_wqe_shift = out->hca_cap.recv_wqe_shift; + + caps->embedded_cpu = 0; + caps->ecpf_vport_exists = 0; + caps->eswitch_manager = 1; + caps->vport_group_manager = 1; + caps->log_max_current_uc_list = 0; + caps->log_max_current_mc_list = 0; + caps->log_max_vlan_list = 8; + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->log_max_mtt = out->hca_cap.log_max_mtt; + caps->log_max_tso = out->hca_cap.log_max_tso; + caps->hca_core_clock = be32_to_cpu(out->hca_cap.hca_core_clock); + caps->max_rwq_indirection_tables = + be32_to_cpu(out->hca_cap.max_rwq_indirection_tables); + caps->max_rwq_indirection_table_size = + be32_to_cpu(out->hca_cap.max_rwq_indirection_table_size); + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + caps->rx_pkt_len_max = be32_to_cpu(out->hca_cap.rx_pkt_len_max); + caps->max_vfs = be16_to_cpu(out->hca_cap.max_vfs); + caps->qp_rate_limit_min = be32_to_cpu(out->hca_cap.qp_rate_limit_min); + caps->qp_rate_limit_max = be32_to_cpu(out->hca_cap.qp_rate_limit_max); + +#ifdef MSIX_SUPPORT + caps->msix_enable = 1; +#else + caps->msix_enable = 0; +#endif + + caps->msix_base = be16_to_cpu(out->hca_cap.msix_base); + caps->msix_num = be16_to_cpu(out->hca_cap.msix_num); + + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = XSC_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << PAGE_SHIFT) - 1); + + caps->dcbx = 1; + caps->qos = 1; + caps->ets = 1; + caps->dscp = 1; + caps->max_tc = out->hca_cap.max_tc; + caps->log_max_qp_depth = out->hca_cap.log_max_qp_depth & 0xff; + caps->mac_bit = out->hca_cap.mac_bit; + caps->lag_logic_port_ofst = out->hca_cap.lag_logic_port_ofst; + + dev->chip_ver_h = be32_to_cpu(out->hca_cap.chip_ver_h); + dev->chip_ver_m = be32_to_cpu(out->hca_cap.chip_ver_m); + dev->chip_ver_l = be32_to_cpu(out->hca_cap.chip_ver_l); + dev->hotfix_num = be32_to_cpu(out->hca_cap.hotfix_num); + dev->feature_flag = be32_to_cpu(out->hca_cap.feature_flag); + + board_info = xsc_get_board_info(out->hca_cap.board_sn); + if (!board_info) { + board_info = xsc_alloc_board_info(); + if (!board_info) + return -ENOMEM; + + memcpy(board_info->board_sn, out->hca_cap.board_sn, sizeof(out->hca_cap.board_sn)); + } + dev->board_info = board_info; + + if (xsc_core_is_pf(dev)) { + dev->regs.tx_db = be64_to_cpu(out->hca_cap.tx_db); + dev->regs.rx_db = be64_to_cpu(out->hca_cap.rx_db); + dev->regs.complete_db = be64_to_cpu(out->hca_cap.complete_db); + dev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); + dev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); + } + + dev->fw_version_major = out->hca_cap.fw_ver.fw_version_major; + dev->fw_version_minor = out->hca_cap.fw_ver.fw_version_minor; + dev->fw_version_patch = be16_to_cpu(out->hca_cap.fw_ver.fw_version_patch); + dev->fw_version_tweak = be32_to_cpu(out->hca_cap.fw_ver.fw_version_tweak); + dev->fw_version_extra_flag = out->hca_cap.fw_ver.fw_version_extra_flag; + dev->reg_mr_via_cmdq = out->hca_cap.reg_mr_via_cmdq; +out_out: + kfree(out); + + return err; +} + +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix) +{ + struct xsc_cmd_enable_hca_mbox_in in; + struct xsc_cmd_enable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_HCA); + + in.vf_num = cpu_to_be16(vf_num); + in.max_msix_vec = cpu_to_be16(max_msix); + in.cpu_num = cpu_to_be16(num_online_cpus()); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_LEGACY; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, + "cpu's msix vec(%u) not enough for all %u vfs, err=%d, status=%d\n", + max_msix, vf_num, err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num) +{ + struct xsc_cmd_disable_hca_mbox_in in; + struct xsc_cmd_disable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_HCA); + in.vf_num = cpu_to_be16(vf_num); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_NONE; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to disable hca, err=%d, status=%d\n", + err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_modify_hca(struct xsc_core_device *dev) +{ + struct xsc_cmd_modify_hca_mbox_in in; + struct xsc_cmd_modify_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_HCA); + in.pp_bypass = xsc_get_pp_bypass_res(dev, true); + in.esw_mode = xsc_get_eswitch_mode(dev); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + + return err; +} + +static int xsc_cmd_query_guid(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_guid_mbox_in in; + struct xsc_cmd_query_guid_mbox_out out; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->guid = out.guid; + dev->board_info->guid_valid = 1; + return 0; +} + +int xsc_query_guid(struct xsc_core_device *dev) +{ + if (dev->board_info->guid_valid) + return 0; + + return xsc_cmd_query_guid(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h new file mode 100644 index 0000000000000000000000000000000000000000..94d8438010309c64a42a1abb2c8fb1a8ca036a90 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef BITOPS_H +#define BITOPS_H + +#include +#include + +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) +#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#define clear_bit(bit, bitmap) __clear_bit(bit, bitmap) + +static inline void xsc_clear_bit(int bit, long *bitmap) +{ + clear_bit(bit, bitmap); +} + +static inline int xsc_test_bit(int bit, long *bitmap) +{ + return test_bit(bit, bitmap); +} + +static inline int xsc_test_and_set_bit(int bit, long *bitmap) +{ + return test_and_set_bit(bit, bitmap); +} + +static inline void xsc_set_bit(int bit, long *bitmap) +{ + set_bit(bit, bitmap); +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..ca5e889050b3c2f896452cd404522549d5d41a22 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" + +#include "xsc_reg_struct.h" +#include "xsc_fw.h" +#include "xsc_flow.h" + +#include + +static inline void xsc_iae_lock(struct xsc_core_device *dev, int grp) +{ + spin_lock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline void xsc_iae_unlock(struct xsc_core_device *dev, int grp) +{ + spin_unlock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline int xsc_iae_idx_get(struct xsc_core_device *dev, int grp) +{ + return get_xsc_res(dev)->iae_idx[grp]; +} + +static inline int xsc_iae_grp_get(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + return atomic_inc_return(&xres->iae_grp) & XSC_RES_IAE_GRP_MASK; +} + +static int xsc_cmd_exec_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_create_mkey_mbox_out *resp = out; + u32 mpt_idx = 0; + + if (alloc_mpt_entry(xdev, &mpt_idx)) + return -EINVAL; + + resp->mkey = cpu_to_be32(mpt_idx & 0xffffff); + resp->hdr.status = 0; + + return 0; +} + +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_create_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_destroy_mkey_mbox_in *req = in; + struct xsc_destroy_mkey_mbox_out *resp = out; + u32 mkey = be32_to_cpu(req->mkey); + u32 mpt_idx = xsc_mkey_to_idx(mkey); + + dealloc_mpt_entry(xdev, &mpt_idx); + + resp->hdr.status = 0; + + return 0; +} + +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_destroy_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_register_mr_mbox_in *req = in; + struct xsc_register_mr_mbox_out *resp = out; + struct xsc_mpt_entry mpt_ent; + u32 mpt_idx = 0; + u32 mtt_base; + u64 va = be64_to_cpu(req->req.va_base); + u32 mem_size = be32_to_cpu(req->req.len); + u32 pdn = be32_to_cpu(req->req.pdn); + u32 key = be32_to_cpu(req->req.mkey); + int pa_num = be32_to_cpu(req->req.pa_num); + u32 *ptr; + u64 reg_addr; + int i; + int reg_stride; + int iae_idx, iae_grp; + + if (pa_num && alloc_mtt_entry(dev, pa_num, &mtt_base)) + return -EINVAL; + + mpt_idx = xsc_mkey_to_idx(key); + mpt_ent.va_l = va & 0xFFFFFFFF; + mpt_ent.va_h = va >> 32; + mpt_ent.mem_size = mem_size; + mpt_ent.pdn = pdn; + mpt_ent.key = key & 0xFF; + mpt_ent.mtt_base = mtt_base; + mpt_ent.acc = req->req.acc; + mpt_ent.page_mode = req->req.page_mode; + mpt_ent.mem_map_en = req->req.map_en; + mpt_ent.rsv = 0; + + get_xsc_res(dev)->mpt_entry[mpt_idx].va = va; + get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base = mtt_base; + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pa_num; + + ptr = (u32 *)&mpt_ent; + reg_stride = REG_WIDTH_TO_STRIDE(MMC_MPT_TBL_MEM_WIDTH); + reg_addr = MMC_MPT_TBL_MEM_ADDR + + mpt_idx * roundup_pow_of_two(reg_stride); + + iae_grp = xsc_iae_grp_get(dev); + iae_idx = xsc_iae_idx_get(dev, iae_grp); + + xsc_iae_lock(dev, iae_grp); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(mpt_ent) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mpt[%u]: va=%llx, mem_size=%u, pdn=%u\n", + mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); + xsc_core_info(dev, "key=%u, mtt_base=%u, acc=%u, page_mode=%u, mem_map_en=%u\n", + mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, + mpt_ent.page_mode, mpt_ent.mem_map_en); + + for (i = 0; i < pa_num; i++) { + u64 pa = req->req.pas[i]; + + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + ptr = (u32 *)&pa; + reg_addr = MMC_MTT_TBL_MEM_ADDR + + (mtt_base + i) * REG_WIDTH_TO_STRIDE(MMC_MTT_TBL_MEM_WIDTH); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(pa) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mtt: pa[%u]=%llx\n", i, pa); + } + + xsc_iae_unlock(dev, iae_grp); + + resp->hdr.status = 0; + return 0; +} + +int xsc_reg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_reg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_unregister_mr_mbox_in *req; + struct xsc_unregister_mr_mbox_out *resp; + u32 mpt_idx; + u32 mtt_base; + int pages_num; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + mpt_idx = be32_to_cpu(req->mkey); + xsc_core_info(dev, "mpt idx:%u\n", mpt_idx); + + pages_num = get_xsc_res(dev)->mpt_entry[mpt_idx].page_num; + mtt_base = get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base; + if (pages_num > 0) { + dealloc_mtt_entry(dev, pages_num, mtt_base); + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = 0; + } else { + xsc_core_dbg(dev, "no mtt entries to be freed, mpt_idx=%d\n", mpt_idx); + } + + resp->hdr.status = 0; + return 0; +} + +int xsc_dereg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_dereg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, + void *in, void *out) +{ + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_mbox_out *resp; + struct xsc_ioctl_data_tl *tl; + char *data; + u16 datalen; + u16 tllen = sizeof(struct xsc_ioctl_data_tl); + int opmod; + int table; + int length; + int ret = -EINVAL; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + data = (char *)req->data; + datalen = be16_to_cpu(req->len); + + if (datalen < tllen) + goto out; + + tl = (struct xsc_ioctl_data_tl *)data; + opmod = tl->opmod; + table = tl->table; + length = tl->length; + + switch (opmod) { + case XSC_IOCTL_OP_ADD: + ret = xsc_flow_add(dev, table, length, tl + 1); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(dev, "table=%d, opcode=0x%x, ret=%d\n", table, opmod, ret); + +out: + resp->hdr.status = 0; + resp->error = cpu_to_be32(ret); + return ret; +} + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id) +{ + int opcode, ret = 0; + unsigned long flags; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)in; + opcode = be16_to_cpu(hdr->opcode); + xsc_core_dbg(dev, "opcode: %x\n", opcode); + + xsc_acquire_lock(&dev->reg_access_lock, &flags); + switch (opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + ret = xsc_cmd_exec_ioctl_flow(dev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + /* ensure pci sequence */ + xsc_mmiowb(); + + xsc_release_lock(&dev->reg_access_lock, flags); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..fbc6c7699f7f7cbe9876cb724b8a22e94a5fd517 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_H +#define CMDQ_H + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 func_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 +#define CMD_FIRST_SIZE 8 +#define RSP_FIRST_SIZE 14 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_prot_block { + u8 data[512]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //fw should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#endif // XSC_CMD_H diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c new file mode 100644 index 0000000000000000000000000000000000000000..9c63cdae414be98db4d1154c1e5fd7fad9323c71 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" + +void xsc_lock_init(struct xsc_lock *lock) +{ + spin_lock_init(&lock->lock); +} + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *oflags) +{ + unsigned long flags; + + spin_lock_irqsave(&lock->lock, flags); + *oflags = flags; +} + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags) +{ + spin_unlock_irqrestore(&lock->lock, flags); +} + +void xsc_mmiowb(void) +{ + mmiowb(); +} + +void xsc_wmb(void) +{ + /* mem barrier for xsc operation */ + wmb(); +} + +void xsc_msleep(int timeout) +{ + msleep(timeout); +} + +void xsc_udelay(int timeout) +{ + udelay(timeout); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..9d858175963324a6fa2a38e045f6a84cf7848202 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef OSDEP_H +#define OSDEP_H + +#include "common/xsc_core.h" + +#define xsc_print printk + +void xsc_msleep(int timeout); + +void xsc_udelay(int timeout); + +void xsc_lock_init(struct xsc_lock *lock); + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *flags); + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags); + +void xsc_mmiowb(void); + +void xsc_wmb(void); + +void *xsc_malloc(unsigned int size); + +void xsc_free(void *addr); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h new file mode 100644 index 0000000000000000000000000000000000000000..44a1b78489024369aad1e090de06435018374f07 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_COUNTERS_H__ +#define __XSC_COUNTERS_H__ + +/* From E-tile Hard User Guide */ +#define NIF_ETH_TX_PFC_LOW 0x83c +#define NIF_ETH_TX_PFC_HIGH 0x83d +#define NIF_ETH_RX_PFC_LOW 0x93c +#define NIF_ETH_RX_PFC_HIGH 0x93d +#define NIF_ETH_TX_CNTR_CONFIG 0x845 +#define NIF_ETH_RX_CNTR_CONFIG 0x945 +#define NIF_ETH_RX_FCSERR_LOW 0x904 +#define NIF_ETH_RX_FCSERR_HIGH 0x905 + +#define XSC_CNT_WIDTH_32_BIT 32 +#define XSC_CNT_WIDTH_64_BIT 64 +#define XSC_CNT_MASK_32 0xffffffff +#define XSC_CNT_MASK_64 0xffffffffffffffff + +struct cnt_value_64 { + u32 va_l; + u32 va_h; +}; + +struct cnt_value_96 { + u32 va_l; + u32 va_m; + u32 va_h; +}; + +enum { + XSC_CNT_TYPE_TX_PAUSE = 0, + XSC_CNT_TYPE_RX_PAUSE, +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c new file mode 100644 index 0000000000000000000000000000000000000000..0623b0f7d4ecc8fd523bde50cb80145179eb7ab4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" + +#include "xsc_flow.h" + +static DECLARE_COMPLETION(dma_read_done); + +static inline int xsc_dma_wr_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, HIF_TBL_TBL_DL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_rd_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, CLSF_DMA_DMA_UL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_done(struct xsc_core_device *xdev) +{ + u32 done = 0; + + do { + done = REG_RD32(xdev, CLSF_DMA_DMA_DL_DONE_REG_ADDR); + } while ((done & 0x1) != 0x1); + + return done; +} + +static inline void xsc_dma_wr_success_get(struct xsc_core_device *xdev, u32 *success, u32 size) +{ + u32 *ptr = NULL; + + ptr = success; + IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size / sizeof(u32))); +} + +int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, + const struct tdi_dma_write_key_bits *key, + const struct tdi_dma_write_action_bits *action) +{ + u32 i = 0; + u32 busy = 0; + u32 dma_wr_num = 0; + u32 value = 0; + u32 done = 0; + u64 success[2]; + u32 data_len = 0; + u64 dma_wr_addr = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->entry_num) + return -1; + + dma_wr_num = ((action->entry_num + (XSC_DMA_WR_MAX - 1)) / XSC_DMA_WR_MAX); + + for (i = 0; i < dma_wr_num; i++) { + if ((action->entry_num % XSC_DMA_WR_MAX) && (i == (dma_wr_num - 1))) + data_len = ((action->entry_num % XSC_DMA_WR_MAX) * XSC_DMA_LEN); + else + data_len = (XSC_DMA_WR_MAX * XSC_DMA_LEN); + + busy = xsc_dma_wr_isbusy(xdev); + if (busy) + return -1; + + REG_WR32(xdev, CLSF_DMA_ERR_CODE_CLR_REG_ADDR, 1); + + value = ((data_len << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT) | + (key->host_id << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_DL_REQ_REG_ADDR, value); + + dma_wr_addr = (action->data_addr + ((i * XSC_DMA_WR_MAX) * XSC_DMA_LEN)); + value = (dma_wr_addr & HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_L_REG_ADDR, value); + + value = ((dma_wr_addr >> 32) & HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_DL_START_REG_ADDR, 1); + + done = xsc_dma_done(xdev); + if (done != XSC_DMA_WR_SUCCESS) { + memset(success, 0, sizeof(success)); + xsc_dma_wr_success_get(xdev, (u32 *)&success, sizeof(success)); + xsc_core_err(xdev, "DMA write time %d status 0x%lx%lx fail.\n", i, + (unsigned long)success[1], (unsigned long)success[0]); + return -1; + } + } + + return 0; +} + +void xsc_dma_read_done_complete(void) +{ + complete(&dma_read_done); +} + +int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, + const struct tdi_dma_read_key_bits *key, + const struct tdi_dma_read_action_bits *action) +{ + u32 busy = 0; + u32 value = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->burst_num) + return -1; + + busy = xsc_dma_rd_isbusy(xdev); + if (busy) + return -1; + + value = ((key->host_id << HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_UL_REQ_REG_ADDR, value); + + value = (action->data_addr & HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_L_REG_ADDR, value); + + value = ((action->data_addr >> 32) & HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_UL_START_REG_ADDR, 1); + + value = (key->tbl_id & CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK); + REG_WR32(xdev, CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR, value); + + value = ((action->burst_num << CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT) | + key->tbl_start_addr); + REG_WR32(xdev, CLSF_DMA_DMA_RD_ADDR_REG_ADDR, value); + + REG_WR32(xdev, CLSF_DMA_INDRW_RD_START_REG_ADDR, 1); + + /*wait msix interrupt */ + if (!wait_for_completion_timeout(&dma_read_done, msecs_to_jiffies(5000))) { + xsc_core_err(xdev, "wait for dma read done completion timeout.\n"); + return -ETIMEDOUT; + } + + REG_WR32(xdev, HIF_TBL_MSG_RDY_REG_ADDR, 1); + + return 0; +} + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data) +{ + int ret = -EINVAL; + struct xsc_flow_dma_write_add *dma_wr; + struct xsc_flow_dma_read_add *dma_rd; + + switch (table) { + case XSC_FLOW_DMA_WR: + if (length == sizeof(struct xsc_flow_dma_write_add)) { + dma_wr = (struct xsc_flow_dma_write_add *)data; + ret = xsc_flow_table_dma_write_add(xdev, &dma_wr->key, &dma_wr->action); + } + break; + case XSC_FLOW_DMA_RD: + if (length == sizeof(struct xsc_flow_dma_read_add)) { + dma_rd = (struct xsc_flow_dma_read_add *)data; + ret = xsc_flow_table_dma_read_add(xdev, &dma_rd->key, &dma_rd->action); + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..ec7c7a2c39597be0378975e823cf10d53c84e21f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FLOW_H +#define XSC_FLOW_H + +#include "osdep.h" + +#define XSC_DMA_LEN 64 +#define XSC_DMA_WR_MAX 128 +#define XSC_DMA_WR_SUCCESS 0x3 + +/* key */ +struct tdi_dma_write_key_bits { + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +struct tdi_dma_read_key_bits { + uint16_t tbl_start_addr:16; + uint8_t tbl_id:7; + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +/* action */ +struct tdi_dma_write_action_bits { + uint32_t entry_num:32; + uint64_t data_addr:64; +} __packed; + +struct tdi_dma_read_action_bits { + uint16_t burst_num:16; + uint64_t data_addr:64; +} __packed; + +/* ioctl data - add */ +struct xsc_flow_dma_write_add { + struct tdi_dma_write_key_bits key; + struct tdi_dma_write_action_bits action; +}; + +struct xsc_flow_dma_read_add { + struct tdi_dma_read_key_bits key; + struct tdi_dma_read_action_bits action; +}; + +struct xsc_logic_in_port_cfg_reg { + u32 phy_port_offset:11; + u32 resv0:5; + u32 func_id_offset:11; + u32 resv1:5; + u32 aps_port_offset:11; + u32 resv2:1; + u32 aps_port_rec_flg:1; + u32 resv3:19; +}; + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data); + +void xsc_dma_read_done_complete(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h new file mode 100644 index 0000000000000000000000000000000000000000..a949bb0f4a2c2f1cb08b03e4577c385b06568529 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FW_H +#define XSC_FW_H + +#include "osdep.h" + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_free_list { + struct list_head list; + int start; + int end; +}; + +struct xsc_free_list_wl { + struct xsc_free_list head; + struct xsc_lock lock; +}; + +struct xsc_mpt_info { + u64 va; + u32 mtt_base; + u32 page_num; +}; + +#define XSC_RES_IAE_GRP_MASK (XSC_RES_NUM_IAE_GRP - 1) +struct xsc_resources { + int refcnt; + atomic_t iae_grp; + int iae_idx[XSC_RES_NUM_IAE_GRP]; + spinlock_t iae_lock[XSC_RES_NUM_IAE_GRP]; /* iae group lock */ +#define XSC_MAX_MPT_NUM MMC_MPT_TBL_MEM_DEPTH + struct xsc_mpt_info mpt_entry[XSC_MAX_MPT_NUM]; + int max_mpt_num; + u64 mpt_tbl[XSC_MAX_MPT_NUM >> 6]; +#define XSC_MAX_MTT_NUM MMC_MTT_TBL_MEM_DEPTH + int max_mtt_num; + struct xsc_free_list_wl mtt_list; + struct xsc_lock lock; +}; + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev); + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max); + +int xsc_dealloc_res(u32 *res, u64 *res_tbl); + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align); + +int release_to_free_list(struct xsc_free_list_wl *list, u32 release, + u32 num_released); + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base); + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..758b5c77a263219e627c5c5cc0ed2ef7b2c5ef6b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +void *xsc_malloc(unsigned int size) +{ + return kmalloc(size, GFP_ATOMIC); +} + +void xsc_free(void *addr) +{ + kfree(addr); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..8eab3e6803a3272e5be79218e920a10018a00e61 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_DEFINE_H +#define XSC_REG_DEFINE_H + +struct xsc_mpt_entry { + u32 va_l; + u32 va_h; + u32 mem_size; + u32 pdn:24; + u32 key:8; + u32 mtt_base:18; + u32 acc:4; + u32 page_mode:2; + u32 mem_map_en:1; + u32 rsv:7; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c new file mode 100644 index 0000000000000000000000000000000000000000..8bd6916e21035a009f0fd39a47bf49816588e090 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_fw.h" + +struct xsc_resources *g_xres[MAX_BOARD_NUM]; + +static int xsc_alloc_free_list_res(struct xsc_free_list_wl *list, int max_num) +{ + struct xsc_free_list *free_node; + + xsc_lock_init(&list->lock); + INIT_LIST_HEAD(&list->head.list); + + free_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!free_node) + return -ENOMEM; + + free_node->start = 0; + free_node->end = free_node->start + max_num - 1; + list_add(&free_node->list, &list->head.list); + + return 0; +} + +static void xsc_destroy_free_list_res(struct xsc_free_list_wl *list) +{ + struct xsc_free_list *pos; + struct xsc_free_list *next; + + list_for_each_entry_safe(pos, next, &list->head.list, list) { + list_del(&pos->list); + xsc_free(pos); + } +} + +static int xsc_res_iae_init(struct xsc_core_device *dev) +{ + int i = 0; + int ret = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_alloc_ia_lock_mbox_in in; + struct xsc_alloc_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_IA_LOCK); + in.lock_num = XSC_RES_NUM_IAE_GRP; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to alloc ia lock from fw, ret = %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) { + res->iae_idx[i] = out.lock_idx[i]; + spin_lock_init(&res->iae_lock[i]); + } + + atomic_set(&res->iae_grp, 0); + + xsc_core_info(dev, "allocated %d iae groups", i); + + return 0; +} + +static void xsc_res_iae_release(struct xsc_core_device *dev) +{ + int ret = 0; + int i = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_release_ia_lock_mbox_in in; + struct xsc_release_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RELEASE_IA_LOCK); + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) + in.lock_idx[i] = res->iae_idx[i]; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret) + xsc_core_err(dev, "failed to release ia lock, ret = %d\n", ret); +} + +int xsc_create_res(struct xsc_core_device *dev) +{ + int ret = 0; + u32 board_id = dev->board_info->board_id; + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt++; + if (xres->refcnt > 1) + return 0; + } else { + g_xres[board_id] = vmalloc(sizeof(*g_xres[board_id])); + if (!g_xres[board_id]) + return -ENOMEM; + xres = g_xres[board_id]; + xres->refcnt = 1; + } + + xsc_lock_init(&xres->lock); + xres->max_mpt_num = XSC_MAX_MPT_NUM; + memset(xres->mpt_tbl, 0xFF, XSC_MAX_MPT_NUM >> 3); + /* reserved for local dma lkey */ + clear_bit(0, (unsigned long *)xres->mpt_tbl); + + ret = xsc_res_iae_init(dev); + if (ret) { + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return -EINVAL; + } + + xres->max_mtt_num = XSC_MAX_MTT_NUM; + ret = xsc_alloc_free_list_res(&xres->mtt_list, xres->max_mtt_num); + if (ret) + goto err_mtt; + + return ret; + +err_mtt: + xsc_res_iae_release(dev); + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return ret; +} + +void xsc_destroy_res(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt--; + if (xres->refcnt) + return; + + xsc_destroy_free_list_res(&xres->mtt_list); + xsc_res_iae_release(dev); + vfree(g_xres[dev->board_info->board_id]); + g_xres[dev->board_info->board_id] = NULL; + } +} + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev) +{ + return g_xres[dev->board_info->board_id]; +} + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) +{ + u32 bit_num; + + bit_num = find_first_bit((unsigned long *)res_tbl, max); + if (bit_num == max) + return -ENOMEM; + clear_bit(bit_num, (unsigned long *)res_tbl); + *res = bit_num; + return 0; +} + +int xsc_dealloc_res(u32 *res, u64 *res_tbl) +{ + if (test_and_set_bit(*res, (unsigned long *)res_tbl)) + return -EINVAL; + + *res = 0; + return 0; +} + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align) +{ + struct xsc_free_list *free_node; + struct xsc_free_list *next; + struct xsc_free_list *new_node; + unsigned long flags; + + *alloc = -1; + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + int start = round_up(free_node->start, base_align); + int avail_num = free_node->end - start + 1; + + if (required < avail_num) { + if (start > free_node->start) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + xsc_release_lock(&list->lock, flags); + return -ENOMEM; + } + new_node->start = free_node->start; + new_node->end = start - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } + *alloc = start; + free_node->start = start + required; + break; + } else if (required == avail_num) { + *alloc = start; + if (start > free_node->start) { + free_node->end = start - 1; + } else { + list_del(&free_node->list); + xsc_free(free_node); + } + break; + } + } + xsc_release_lock(&list->lock, flags); + + if (*alloc == -1) + return -EINVAL; + + return 0; +} + +int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release, + uint32_t num_released) +{ + struct xsc_free_list *free_node = NULL; + struct xsc_free_list *next, *prev; + struct xsc_free_list *new_node; + unsigned long flags; + bool new_flag = false; + bool end_merge = false; + int ret = 0; + + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + if (release + num_released < free_node->start) { + new_flag = true; + } else if (release + num_released == free_node->start) { + /* backward merge */ + end_merge = true; + free_node->start = release; + } + + if (new_flag || end_merge) { + /* forward merge, and backward merge if possible */ + if (free_node->list.prev == &list->head.list) + goto create_node; + + prev = list_entry(free_node->list.prev, struct xsc_free_list, list); + if (release == prev->end + 1) { + if (end_merge) { + prev->end = free_node->end; + list_del(&free_node->list); + xsc_free(free_node); + free_node = NULL; + } else { + prev->end = release + num_released - 1; + new_flag = false; + } + } + + break; + } + } + + if (list_empty(&list->head.list)) { + new_flag = true; + free_node = &list->head; + } + +create_node: + if (new_flag && free_node) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + ret = -ENOMEM; + goto ret; + } + new_node->start = release; + new_node->end = release + num_released - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } +ret: + xsc_release_lock(&list->lock, flags); + return ret; +} + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_alloc_res(mpt_idx, xres->mpt_tbl, xres->max_mpt_num)) + return -EINVAL; + + return 0; +} + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_dealloc_res(mpt_idx, xres->mpt_tbl)) + return -EINVAL; + + return 0; +} + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); + + xsc_core_dbg(dev, "alloc mtt for %d pages start from %d\n", + pages_num, *mtt_base); + + return ret; +} + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); + + xsc_core_dbg(dev, "mtt release %d pages start from %d\n", + pages_num, mtt_base); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c new file mode 100644 index 0000000000000000000000000000000000000000..da4761565f1aab6ca6ae12f11eb81cbcd2ade204 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +LIST_HEAD(intf_list); +LIST_HEAD(xsc_dev_list); +DEFINE_MUTEX(xsc_intf_mutex); // protect intf_list and xsc_dev_list + +static void xsc_add_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev; + + dev = container_of(priv, struct xsc_core_device, priv); + dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + + dev_ctx->context = intf->add(dev); + if (dev_ctx->context) { + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + if (intf->attach) + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else { + kfree(dev_ctx); + } +} + +static struct xsc_device_context *xsc_get_device(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + + /* caller of this function has mutex protection */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) + return dev_ctx; + + return NULL; +} + +static void xsc_remove_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + intf->remove(dev, dev_ctx->context); + + kfree(dev_ctx); +} + +int xsc_register_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &xsc_dev_list, dev_list) { + xsc_add_device(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_interface); + +void xsc_unregister_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + xsc_remove_device(intf, priv); + list_del(&intf->list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_interface); + +static void xsc_attach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->attach) { + if (test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + if (intf->attach(dev, dev_ctx->context)) + return; + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + dev_ctx->context = intf->add(dev); + if (!dev_ctx->context) + return; + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +static void xsc_detach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->detach) { + if (!test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + intf->detach(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (!test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + intf->remove(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +void xsc_attach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) { + xsc_attach_interface(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_attach_device); + +void xsc_attach_device_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) + xsc_attach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} + +void xsc_detach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + xsc_detach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_detach_device); + +bool xsc_device_registered(struct xsc_core_device *dev) +{ + struct xsc_priv *priv; + bool found = false; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + if (priv == &dev->priv) + found = true; + mutex_unlock(&xsc_intf_mutex); + + return found; +} + +int xsc_register_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&priv->dev_list, &xsc_dev_list); + list_for_each_entry(intf, &intf_list, list) + xsc_add_device(intf, priv); + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_device); + +void xsc_unregister_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry_reverse(intf, &intf_list, list) + xsc_remove_device(intf, priv); + list_del(&priv->dev_list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_device); + +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_add_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_add_dev_by_protocol); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_remove_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_remove_dev_by_protocol); + +void xsc_dev_list_lock(void) +{ + mutex_lock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_lock); + +void xsc_dev_list_unlock(void) +{ + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_unlock); + +int xsc_dev_list_trylock(void) +{ + return mutex_trylock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_trylock); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c new file mode 100644 index 0000000000000000000000000000000000000000..0c9ba75b2d3703ddab13b7e4abb91851524fa261 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include "devlink.h" +#include "eswitch.h" +#include "fw/xsc_counters.h" +#include "xsc_pci_ctrl.h" + +unsigned int xsc_debug_mask; +module_param_named(debug_mask, xsc_debug_mask, uint, 0644); +MODULE_PARM_DESC(debug_mask, + "debug mask: 1=dump cmd data, 2=dump cmd exec time, 3=both. Default=0"); + +unsigned int xsc_log_level = XSC_LOG_LEVEL_WARN; +module_param_named(log_level, xsc_log_level, uint, 0644); +MODULE_PARM_DESC(log_level, + "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=1"); +EXPORT_SYMBOL(xsc_log_level); + +static bool probe_vf = 1; +module_param_named(probe_vf, probe_vf, bool, 0644); +MODULE_PARM_DESC(probe_vf, "probe VFs or not, 0 = not probe, 1 = probe. Default = 1"); + +static bool xsc_hw_reset; + +#define DRIVER_NAME "xsc_pci" +#define DRIVER_VERSION "0.1.0" +#define ETH_DRIVER_NAME "xsc_eth" + +static const struct pci_device_id xsc_pci_id_table[] = { + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_SOC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_SOC_PF_DEV_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, xsc_pci_id_table); + +static const struct xsc_device_product_info xsc_product_list[] = { + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_50, "metaConnect-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_100, "metaConnect-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_200, "metaConnect-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_400S, "metaConnect-400S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_50, "metaFusion-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_200, "metaFusion-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_50, "metaScale-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_100Q, "metaScale-100Q")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200, "metaScale-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200S, "metaScale-200S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_400M, "metaScale-400M")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200_OCP, "metaScale-200-OCP")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_100, "metaVisor-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_200, "metaVisor-200")}, + {0} +}; + +#define IS_VIRT_FUNCTION(id) ((id)->driver_data == XSC_PCI_DEV_IS_VF) + +static bool need_write_reg_directly(void *in) +{ + struct xsc_inbox_hdr *hdr; + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_data_tl *tl; + char *data; + + hdr = (struct xsc_inbox_hdr *)in; + if (unlikely(be16_to_cpu(hdr->opcode) == XSC_CMD_OP_IOCTL_FLOW)) { + req = (struct xsc_ioctl_mbox_in *)in; + data = (char *)req->data; + tl = (struct xsc_ioctl_data_tl *)data; + if (tl->opmod == XSC_IOCTL_OP_ADD) { + if (unlikely(tl->table == XSC_FLOW_DMA_WR || tl->table == XSC_FLOW_DMA_RD)) + return true; + } + } + return false; +} + +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)in; + + hdr->ver = 0; + if (hdr->ver != 0) { + xsc_core_warn(dev, "recv an unexpected cmd ver = %d, opcode = %d\n", + be16_to_cpu(hdr->ver), be16_to_cpu(hdr->opcode)); + WARN_ON(hdr->ver != 0); + } + + if (need_write_reg_directly(in)) + return xsc_cmd_write_reg_directly(dev, in, in_size, out, + out_size, dev->glb_func_id); + return _xsc_cmd_exec(dev, in, in_size, out, out_size); +} +EXPORT_SYMBOL(xsc_cmd_exec); + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err = 0; + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + else + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + + if (!err) + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + + return err; +} + +static int xsc_pci_enable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->pci_status = XSC_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->pci_status_mutex); + + return err; +} + +static void xsc_pci_disable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->pci_status = XSC_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->pci_status_mutex); +} + +int xsc_priv_init(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + + strscpy(priv->name, dev_name(&dev->pdev->dev), XSC_MAX_NAME_LEN); + priv->name[XSC_MAX_NAME_LEN - 1] = 0; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + mutex_init(&dev->intf_state_mutex); + + return 0; +} + +int xsc_dev_res_init(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = NULL; + + dev_res = kvzalloc(sizeof(*dev_res), GFP_KERNEL); + if (!dev_res) + return -ENOMEM; + + dev->dev_res = dev_res; + /* init access lock */ + spin_lock_init(&dev->reg_access_lock.lock); + mutex_init(&dev_res->alloc_mutex); + mutex_init(&dev_res->pgdir_mutex); + INIT_LIST_HEAD(&dev_res->pgdir_list); + spin_lock_init(&dev_res->mkey_lock); + + return 0; +} + +void xsc_dev_res_cleanup(struct xsc_core_device *dev) +{ + kfree(dev->dev_res); + dev->dev_res = NULL; +} + +void xsc_init_reg_addr(struct xsc_core_device *dev) +{ + if (xsc_core_is_pf(dev)) { + dev->regs.cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR; + } else { + dev->regs.tx_db = TX_DB_FUNC_MEM_ADDR; + dev->regs.rx_db = RX_DB_FUNC_MEM_ADDR; + dev->regs.complete_db = DB_CQ_FUNC_MEM_ADDR; + dev->regs.complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR; + dev->regs.event_db = DB_EQ_FUNC_MEM_ADDR; + dev->regs.cpm_get_lock = CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = CPM_IDA_BUSY_REG_ADDR; + } +} + +int xsc_dev_init(struct xsc_core_device *dev) +{ + int err = 0; + + xsc_priv_init(dev); + + err = xsc_dev_res_init(dev); + if (err) { + xsc_core_err(dev, "xsc dev res init failed %d\n", err); + goto err_res_init; + } + + /* create debugfs */ + err = xsc_debugfs_init(dev); + if (err) { + xsc_core_err(dev, "xsc_debugfs_init failed %d\n", err); + goto err_debugfs_init; + } + + return 0; + +err_debugfs_init: + xsc_dev_res_cleanup(dev); +err_res_init: + return err; +} + +void xsc_dev_cleanup(struct xsc_core_device *dev) +{ +// iounmap(dev->iseg); + xsc_debugfs_fini(dev); + xsc_dev_res_cleanup(dev); +} + +static void xsc_product_info(struct pci_dev *pdev) +{ + const struct xsc_device_product_info *p_info = xsc_product_list; + + while (p_info->vendor) { + if (pdev->device == p_info->device && pdev->subsystem_device == p_info->subdevice) { + pr_info("Product: %s, Vendor: Yunsilicon\n", p_info->product_name); + break; + } + p_info++; + } +} + +static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id *id) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + int bar_num = 0; + void __iomem *bar_base = NULL; + + mutex_init(&dev->pci_status_mutex); + dev->priv.numa_node = dev_to_node(&pdev->dev); + if (dev->priv.numa_node == -1) + dev->priv.numa_node = 0; + + /* enable the device */ + err = xsc_pci_enable_device(dev); + if (err) { + xsc_core_err(dev, "failed to enable PCI device: err=%d\n", err); + goto err_ret; + } + + err = pci_request_region(pdev, bar_num, KBUILD_MODNAME); + if (err) { + xsc_core_err(dev, "failed to request %s pci_region=%d: err=%d\n", + KBUILD_MODNAME, bar_num, err); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + xsc_core_err(dev, "failed to set DMA capabilities mask: err=%d\n", err); + goto err_clr_master; + } + + bar_base = pci_ioremap_bar(pdev, bar_num); + if (!bar_base) { + xsc_core_err(dev, "failed to ioremap %s bar%d\n", KBUILD_MODNAME, bar_num); + goto err_clr_master; + } + + err = pci_save_state(pdev); + if (err) { + xsc_core_err(dev, "pci_save_state failed: err=%d\n", err); + goto err_io_unmap; + } + + dev->bar_num = bar_num; + dev->bar = bar_base; + + xsc_init_reg_addr(dev); + + return 0; + +err_io_unmap: + pci_iounmap(pdev, bar_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_region(pdev, bar_num); +err_disable: + xsc_pci_disable_device(dev); +err_ret: + return err; +} + +static void xsc_pci_fini(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (dev->bar) + pci_iounmap(pdev, dev->bar); + pci_clear_master(pdev); + pci_release_region(pdev, dev->bar_num); + xsc_pci_disable_device(dev); +} + +static int xsc_check_cmdq_version(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_cmdq_ver_mbox_out *out; + struct xsc_cmd_query_cmdq_ver_mbox_in in; + + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CMDQ_VERSION); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + if (be16_to_cpu(out->cmdq_ver) != CMDQ_VERSION) { + xsc_core_err(dev, "cmdq version check failed, expecting version %d, actual version %d\n", + CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); + err = -EINVAL; + goto out_out; + } + dev->cmdq_ver = CMDQ_VERSION; + +out_out: + kfree(out); +no_mem_out: + return err; +} + +int xsc_reset_function_resource(struct xsc_core_device *dev) +{ + struct xsc_function_reset_mbox_in in; + struct xsc_function_reset_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_FUNCTION_RESET); + in.glb_func_id = cpu_to_be16(dev->glb_func_id); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) + return -EINVAL; + + return 0; +} + +static int xsc_fpga_not_supported(struct xsc_core_device *dev) +{ +#define FPGA_VERSION_H 0x100 +#define ASIC_VERSION_H 0x20230423 + u32 ver_h; + + if (!xsc_core_is_pf(dev)) + return 0; + + ver_h = REG_RD32(dev, HIF_CPM_CHIP_VERSION_H_REG_ADDR); + if (ver_h != FPGA_VERSION_H && ver_h != ASIC_VERSION_H) { + xsc_core_err(dev, "fpga version 0x%x not supported\n", ver_h); + return 1; + } + + return 0; +} + +int xsc_chip_type(struct xsc_core_device *dev) +{ + switch (dev->pdev->device) { + case XSC_MC_PF_DEV_ID: + case XSC_MC_VF_DEV_ID: + return XSC_CHIP_MC; + case XSC_MF_HOST_PF_DEV_ID: + case XSC_MF_HOST_VF_DEV_ID: + case XSC_MF_SOC_PF_DEV_ID: + return XSC_CHIP_MF; + case XSC_MS_PF_DEV_ID: + case XSC_MS_VF_DEV_ID: + return XSC_CHIP_MS; + case XSC_MV_HOST_PF_DEV_ID: + case XSC_MV_HOST_VF_DEV_ID: + case XSC_MV_SOC_PF_DEV_ID: + return XSC_CHIP_MV; + default: + return XSC_CHIP_UNKNOWN; + } +} +EXPORT_SYMBOL(xsc_chip_type); + +#if defined(__sw_64__) +static void xsc_enable_relaxed_order(struct xsc_core_device *dev) +{ + struct xsc_cmd_enable_relaxed_order_in in; + struct xsc_cmd_enable_relaxed_order_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_RELAXED_ORDER); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto err_out; + + if (out.hdr.status) { + err = xsc_cmd_status_to_err(&out.hdr); + goto err_out; + } + + return; +err_out: + xsc_core_warn(dev, "Failed to enable relaxed order %d\n", err); +} +#endif + +static int xsc_cmd_activate_hw_config(struct xsc_core_device *dev) +{ + struct xsc_cmd_activate_hw_config_mbox_in in; + struct xsc_cmd_activate_hw_config_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACTIVATE_HW_CONFIG); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->hw_config_activated = 1; + return 0; +} + +static int xsc_activate_hw_config(struct xsc_core_device *dev) +{ + if (dev->board_info->hw_config_activated) + return 0; + + return xsc_cmd_activate_hw_config(dev); +} + +static int xsc_init_once(struct xsc_core_device *dev) +{ + int err; + + err = xsc_cmd_init(dev); + if (err) { + xsc_core_err(dev, "Failed initializing command interface, aborting\n"); + goto err_cmd_init; + } + + err = xsc_check_cmdq_version(dev); + if (err) { + xsc_core_err(dev, "Failed to check cmdq version\n"); + goto err_cmdq_ver_chk; + } + + err = xsc_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + xsc_core_err(dev, "Failed to query hca, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_query_guid(dev); + if (err) { + xsc_core_err(dev, "failed to query guid, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_activate_hw_config(dev); + if (err) { + xsc_core_err(dev, "failed to activate hw config, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_reset_function_resource(dev); + if (err) { + xsc_core_err(dev, "Failed to reset function resource\n"); + goto err_cmdq_ver_chk; + } + + funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, &dev->pcie_no, + &dev->pf_id, &dev->vf_id); + xsc_init_cq_table(dev); + xsc_init_qp_table(dev); + xsc_eq_init(dev); + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init sriov %d\n", err); + goto err_sriov_init; + } + err = xsc_eswitch_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init eswitch %d\n", err); + goto err_eswitch_init; + } +#endif + +#if defined(__sw_64__) + xsc_enable_relaxed_order(dev); +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_eswitch_init: + xsc_sriov_cleanup(dev); +err_sriov_init: + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); +#endif +err_cmdq_ver_chk: + xsc_cmd_cleanup(dev); +err_cmd_init: + return err; +} + +static int xsc_cleanup_once(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_eswitch_cleanup(dev); + xsc_sriov_cleanup(dev); +#endif + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); + xsc_cmd_cleanup(dev); + return 0; +} + +static int xsc_load(struct xsc_core_device *dev) +{ + int err; + + err = xsc_irq_eq_create(dev); + if (err) { + xsc_core_err(dev, "xsc_irq_eq_create failed %d\n", err); + goto err_irq_eq_create; + } + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_attach(dev); + if (err) { + xsc_core_err(dev, "sriov init failed %d\n", err); + goto err_sriov; + } +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_sriov: + xsc_irq_eq_destroy(dev); +#endif +err_irq_eq_create: + return err; +} + +static int xsc_unload(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_sriov_detach(dev); +#endif + if (xsc_fw_is_available(dev)) + xsc_irq_eq_destroy(dev); + + return 0; +} + +int xsc_load_one(struct xsc_core_device *dev, bool boot) +{ + int err = 0; + + mutex_lock(&dev->intf_state_mutex); + if (test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "interface is up, NOP\n"); + goto out; + } + + if (test_bit(XSC_INTERFACE_STATE_TEARDOWN, &dev->intf_state)) { + xsc_core_warn(dev, "device is being removed, stop load\n"); + err = -ENODEV; + goto out; + } + + if (boot) { + err = xsc_init_once(dev); + if (err) { + xsc_core_err(dev, "xsc_init_once failed %d\n", err); + goto err_dev_init; + } + } + + err = xsc_load(dev); + if (err) { + xsc_core_err(dev, "xsc_load failed %d\n", err); + goto err_load; + } + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) { + err = xsc_create_res(dev); + if (err) { + xsc_core_err(dev, "Failed to create resource, err=%d\n", err); + goto err_create_res; + } + } + + if (boot) { + err = xsc_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + + if (xsc_core_is_pf(dev)) + xsc_lag_add_xdev(dev); + + if (xsc_device_registered(dev)) { + xsc_attach_device(dev); + } else { + err = xsc_register_device(dev); + if (err) { + xsc_core_err(dev, "register device failed %d\n", err); + goto err_reg_dev; + } + } + + err = xsc_port_ctrl_probe(dev); + if (err) { + xsc_core_err(dev, "failed to probe port control node\n"); + goto err_port_ctrl; + } + + set_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + mutex_unlock(&dev->intf_state_mutex); + + return err; + +err_port_ctrl: + xsc_unregister_device(dev); +err_reg_dev: + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + if (boot) + xsc_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + +err_create_res: + xsc_unload(dev); + +err_load: + if (boot) + xsc_cleanup_once(dev); +err_dev_init: +out: + mutex_unlock(&dev->intf_state_mutex); + return err; +} + +int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) +{ + xsc_port_ctrl_remove(dev); + xsc_devlink_unregister(priv_to_devlink(dev)); + if (cleanup) + xsc_unregister_device(dev); + mutex_lock(&dev->intf_state_mutex); + if (!test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "%s: interface is down, NOP\n", + __func__); + if (cleanup) + xsc_cleanup_once(dev); + goto out; + } + + clear_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + if (xsc_device_registered(dev)) + xsc_detach_device(dev); + + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + + xsc_unload(dev); + + if (cleanup) + xsc_cleanup_once(dev); + +out: + mutex_unlock(&dev->intf_state_mutex); + + return 0; +} + +static int xsc_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct xsc_core_device *xdev; + struct xsc_priv *priv; + int err; + struct devlink *devlink; + + devlink = xsc_devlink_alloc(&pci_dev->dev); + if (!devlink) { + dev_err(&pci_dev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + xdev = devlink_priv(devlink); + + xsc_product_info(pci_dev); + xdev->pdev = pci_dev; + xdev->device = &pci_dev->dev; + priv = &xdev->priv; + xdev->coredev_type = (IS_VIRT_FUNCTION(id)) ? + XSC_COREDEV_VF : XSC_COREDEV_PF; + xsc_core_info(xdev, "dev_type=%d is_vf=%d\n", + xdev->coredev_type, pci_dev->is_virtfn); + +#ifdef CONFIG_XSC_SRIOV + priv->sriov.probe_vf = probe_vf; + if ((IS_VIRT_FUNCTION(id)) && !probe_vf) { + xsc_core_err(xdev, "VFs are not binded to xsc driver\n"); + return 0; + } +#endif + + /* init pcie device */ + pci_set_drvdata(pci_dev, xdev); + err = xsc_pci_init(xdev, id); + if (err) { + xsc_core_err(xdev, "xsc_pci_init failed %d\n", err); + goto err_pci_init; + } + + err = xsc_dev_init(xdev); + if (err) { + xsc_core_err(xdev, "xsc_dev_init failed %d\n", err); + goto err_dev_init; + } + + if (xsc_fpga_not_supported(xdev)) { + err = -EOPNOTSUPP; + goto err_version_check; + } + + err = xsc_load_one(xdev, true); + if (err) { + xsc_core_err(xdev, "xsc_load_one failed %d\n", err); + goto err_load; + } + + request_module_nowait(ETH_DRIVER_NAME); + + return 0; + +err_load: +err_version_check: + xsc_dev_cleanup(xdev); +err_dev_init: + xsc_pci_fini(xdev); +err_pci_init: + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(devlink); + return err; +} + +static void xsc_pci_remove(struct pci_dev *pci_dev) +{ + struct xsc_core_device *xdev = pci_get_drvdata(pci_dev); + + set_bit(XSC_INTERFACE_STATE_TEARDOWN, &xdev->intf_state); + xsc_unload_one(xdev, true); + xsc_dev_cleanup(xdev); + + xsc_pci_fini(xdev); + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(priv_to_devlink(xdev)); +} + +static struct pci_driver xsc_pci_driver = { + .name = "xsc-pci", + .id_table = xsc_pci_id_table, + .probe = xsc_pci_probe, + .remove = xsc_pci_remove, + +#ifdef CONFIG_XSC_SRIOV + .sriov_configure = xsc_core_sriov_configure, +#endif +}; + +int xsc_pci_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc pci driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_pci_exit(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_pci_nb = { + .notifier_call = xsc_pci_reboot_event_handler, + .next = NULL, + .priority = 0, +}; + +void xsc_pci_exit(void) +{ + xsc_stop_delayed_release(); + pci_unregister_driver(&xsc_pci_driver); + xsc_pci_ctrl_fini(); + xsc_port_ctrl_fini(); + xsc_unregister_debugfs(); + qpts_fini(); + xsc_free_board_info(); +} + +static int __init xsc_init(void) +{ + int err; + + xsc_register_debugfs(); + + qpts_init(); + + err = xsc_port_ctrl_init(); + if (err) { + pr_err("failed to initialize port control\n"); + goto err_port_ctrl; + } + + err = xsc_pci_ctrl_init(); + if (err) { + pr_err("failed to initialize dpdk ctrl\n"); + goto err_pci_ctrl; + } + + xsc_hw_reset = false; + err = pci_register_driver(&xsc_pci_driver); + if (err) { + pr_err("failed to register pci driver\n"); + goto err_register; + } + + xsc_init_delayed_release(); + register_reboot_notifier(&xsc_pci_nb); + + return 0; + +err_register: + xsc_pci_ctrl_fini(); +err_pci_ctrl: + xsc_port_ctrl_fini(); +err_port_ctrl: + xsc_unregister_debugfs(); + qpts_fini(); + return err; +} + +static void __exit xsc_fini(void) +{ + unregister_reboot_notifier(&xsc_pci_nb); + xsc_pci_exit(); +} + +module_init(xsc_init); +module_exit(xsc_fini); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c new file mode 100644 index 0000000000000000000000000000000000000000..a834a09d23da6727f9851fed71adbaa63586e020 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/xsc_cmd.h" + +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_create_mkey_mbox_in in; + struct xsc_create_mkey_mbox_out out; + int err; + u8 key; + + memset(&out, 0, sizeof(out)); + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MKEY); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_create_mkey(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec faile %d\n", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + mr->key = xsc_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + xsc_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + + return err; +} +EXPORT_SYMBOL(xsc_core_create_mkey); + +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mr->key); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_destroy_mkey(dev, &in, &out); + + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_destroy_mkey); + +int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 *mtt_base) +{ + struct xsc_set_mpt_mbox_in *in; + struct xsc_set_mpt_mbox_out out; + struct xsc_register_mr_request *req = &in_cmd->req; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + in->mpt_item.pdn = req->pdn; + in->mpt_item.pa_num = req->pa_num; + in->mpt_item.len = req->len; + in->mpt_item.mkey = req->mkey; + in->mpt_item.acc = req->acc; + in->mpt_item.page_mode = req->page_mode; + in->mpt_item.map_en = req->map_en; + in->mpt_item.va_base = req->va_base; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MPT); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "set mpt failed\n"); + kfree(in); + return -EINVAL; + } + + *mtt_base = be32_to_cpu(out.mtt_base); + kfree(in); + return 0; +} + +int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 mtt_base) +{ +#define PA_NUM_PER_CMD 1024 + struct xsc_set_mtt_mbox_in *seg_in; + struct xsc_set_mtt_mbox_out seg_out; + struct xsc_register_mr_request *req = &in_cmd->req; + int tot_pg_num = be32_to_cpu(req->pa_num); + int seg_idx, tot_seg_num, seg_pa_num; + int pa_idx_base = 0; + int i; + int in_len; + int err; + + tot_seg_num = (tot_pg_num & 0x7FF) ? ((tot_pg_num >> 10) + 1) : + (tot_pg_num >> 10); + for (seg_idx = 0; seg_idx < tot_seg_num; seg_idx++) { + seg_pa_num = (seg_idx != tot_seg_num - 1) ? PA_NUM_PER_CMD : + (tot_pg_num - ((tot_seg_num - 1) << 10)); + in_len = (seg_pa_num << 3) + sizeof(*seg_in); + seg_in = kzalloc(in_len, GFP_KERNEL); + if (!seg_in) { + err = -ENOMEM; + return err; + } + seg_in->mtt_setting.mtt_base = cpu_to_be32(mtt_base); + seg_in->mtt_setting.pa_num = cpu_to_be32(seg_pa_num); + for (i = 0; i < seg_pa_num; i++) + seg_in->mtt_setting.pas[i] = req->pas[pa_idx_base + i]; + seg_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTT); + + memset(&seg_out, 0, sizeof(seg_out)); + xsc_core_dbg(dev, "set mtt seg %d, pa_num %d, pa_idx_base %d, tot_seg %d\n", + seg_idx, seg_pa_num, pa_idx_base, tot_seg_num); + err = xsc_cmd_exec(dev, seg_in, in_len, &seg_out, sizeof(seg_out)); + if (err || seg_out.hdr.status) { + xsc_core_err(dev, "set mtt seg %d failed\n", seg_idx); + kfree(seg_in); + return -EINVAL; + } + kfree(seg_in); + pa_idx_base += seg_pa_num; + mtt_base += seg_pa_num; + } + return 0; +} + +int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = in_cmd->req.mkey; + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return -EINVAL; + } + return 0; +} + +int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) +{ + u32 mtt_base; + int err; + + err = xsc_set_mpt_via_cmdq(dev, in, &mtt_base); + if (err) { + xsc_core_err(dev, "set mpt via cmdq failed\n"); + return err; + } + + err = xsc_set_mtt_via_cmdq(dev, in, mtt_base); + if (err) { + xsc_core_err(dev, "set mtt via cmdq failed\n"); + goto set_mtt_err; + } + return 0; + +set_mtt_err: + err = xsc_dereg_mr_via_cmdq(dev, in); + if (err) + xsc_core_err(dev, "dereg error mr failed\n"); + return err; +} + +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen) +{ + struct xsc_register_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_REG_MR); + if (dev->reg_mr_via_cmdq) + err = xsc_reg_mr_via_cmdq(dev, in); + else + err = xsc_reg_mr(dev, in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_register_mr); + +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(xsc_mkey_to_idx(mr->key)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_dereg_mr(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_dereg_mr); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..7138c281ed203a38ce0454b840887acad4d77bcb --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#ifdef CONFIG_RFS_ACCEL +#include +#endif +#include "fw/xsc_flow.h" +#include "fw/xsc_fw.h" + +enum xsc_eq_type { + XSC_EQ_TYPE_COMP, + XSC_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + XSC_EQ_TYPE_PF, +#endif +}; + +struct xsc_irq { + struct atomic_notifier_head nh; + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_irq_table { + struct xsc_irq *irq; + int nvec; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif +}; + +struct xsc_msix_resource *g_msix_xres; + +static irqreturn_t xsc_dma_read_msix_handler(int irq, void *dev_id) +{ + xsc_dma_read_done_complete(); + return IRQ_HANDLED; +} + +static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) +{ + int err; + char *name = "xsc_dma_read_done"; + struct xsc_dev_resource *dev_res = xdev->dev_res; + int irqn; + u32 value = 0; + int vecid = 0; + + snprintf(dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(xdev->pdev)); + irqn = pci_irq_vector(xdev->pdev, XSC_DMA_READ_DONE_VEC); + err = request_irq(irqn, xsc_dma_read_msix_handler, 0, + dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); + + vecid = (xdev->msix_vec_base + XSC_DMA_READ_DONE_VEC); + value = ((1 << 12) | (vecid & 0xfff)); + REG_WR32(xdev, HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR, value); + + return err; +} + +static void xsc_free_irq(struct xsc_core_device *xdev, unsigned int vector) +{ + unsigned int irqn = 0; + + irqn = pci_irq_vector(xdev->pdev, vector); + disable_irq(irqn); + + if (xsc_fw_is_available(xdev)) + free_irq(irqn, xdev); +} + +static void xsc_dma_read_msix_fini(struct xsc_core_device *xdev) +{ + if (xdev->caps.msix_enable && xsc_core_is_pf(xdev)) + xsc_free_irq(xdev, XSC_DMA_READ_DONE_VEC); +} + +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + struct xsc_eq *eq_ret = NULL; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == i) { + eq_ret = eq; + break; + } + } + spin_unlock(&table->lock); + + return eq_ret; +} +EXPORT_SYMBOL(xsc_eq_get); + +void mask_cpu_by_node(int node, struct cpumask *dstp) +{ + int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (node == cpu_to_node(i)) + cpumask_set_cpu(i, dstp); + } +} +EXPORT_SYMBOL(mask_cpu_by_node); + +static int set_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + unsigned int irqn; + int ret; + + irqn = pci_irq_vector(dev->pdev, vecidx); + if (!zalloc_cpumask_var(&eq->mask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var rx cpumask failed"); + return -ENOMEM; + } + + if (!zalloc_cpumask_var(&dev->xps_cpumask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var tx cpumask failed"); + return -ENOMEM; + } + + mask_cpu_by_node(dev->priv.numa_node, eq->mask); + ret = irq_set_affinity_hint(irqn, eq->mask); + + return ret; +} + +static void clear_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + int irqn; + + irqn = pci_irq_vector(dev->pdev, vecidx); + irq_set_affinity_hint(irqn, NULL); + free_cpumask_var(eq->mask); +} + +static int set_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int err; + int i; + + for (i = 0; i < nvec; i++) { + err = set_comp_irq_affinity_hint(dev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); + + return err; +} + +static void clear_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int i; + + for (i = 0; i < nvec; i++) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); +} + +struct cpumask * +xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector) +{ + struct xsc_eq *eq = xsc_eq_get(dev, vector); + + if (unlikely(!eq)) + return NULL; + + return eq->mask; +} +EXPORT_SYMBOL(xsc_comp_irq_get_affinity_mask); + +static int xsc_alloc_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + struct xsc_eq_table *table = &dev_res->eq_table; + int nvec = dev->caps.msix_num; + int nvec_base; + int err; + + if (xsc_core_is_pf(dev)) + nvec_base = XSC_EQ_VEC_COMP_BASE; + else + /*VF device not need dma read done vector.*/ + nvec_base = (XSC_EQ_VEC_COMP_BASE - 1); + + if (nvec <= nvec_base) { + xsc_core_warn(dev, "failed to alloc irq vector(%d)\n", nvec); + return -ENOMEM; + } + + dev_res->irq_info = kcalloc(nvec, sizeof(*dev_res->irq_info), GFP_KERNEL); + if (!dev_res->irq_info) + return -ENOMEM; + + nvec = pci_alloc_irq_vectors(dev->pdev, nvec_base + 1, nvec, PCI_IRQ_MSIX); + if (nvec < 0) { + err = nvec; + goto err_free_irq_info; + } + + table->eq_vec_comp_base = nvec_base; + table->num_comp_vectors = nvec - nvec_base; + dev->msix_vec_base = dev->caps.msix_base; + xsc_core_info(dev, + "alloc msix_vec_num=%d, comp_num=%d, max_msix_num=%d, msix_vec_base=%d\n", + nvec, table->num_comp_vectors, dev->caps.msix_num, dev->msix_vec_base); + + return 0; + +err_free_irq_info: + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); + return err; +} + +static void xsc_free_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + if (!xsc_fw_is_available(dev)) + return; + + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); +} + +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + int err = -ENOENT; + + if (!dev->caps.msix_enable) + return 0; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(xsc_vector2eqn); + +static void free_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (xsc_destroy_unmap_eq(dev, eq)) + xsc_core_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + char name[XSC_MAX_IRQ_NAME]; + struct xsc_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = XSC_COMP_EQ_SIZE; + + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i); + err = xsc_create_map_eq(dev, eq, + i + table->eq_vec_comp_base, nent, name); + if (err) { + kfree(eq); + goto clean; + } + + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + +static irqreturn_t xsc_cmd_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + int err; + + disable_irq_nosync(dev->cmd.irqn); + err = xsc_cmd_err_handler(dev); + if (!err) + xsc_cmd_resp_handler(dev); + enable_irq(dev->cmd.irqn); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + writel(dev->msix_vec_base + vecidx, REG_ADDR(dev, dev->cmd.reg.msix_vec_addr)); + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_cmd", pci_name(dev->pdev)); + dev->cmd.irqn = pci_irq_vector(dev->pdev, vecidx); + return request_irq(dev->cmd.irqn, xsc_cmd_handler, 0, + dev_res->irq_info[vecidx].name, dev); +} + +void xsc_free_irq_for_cmdq(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD); +} + +static irqreturn_t xsc_event_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + + xsc_core_dbg(dev, "cmd event hint irq: %d\n", irq); + + if (!dev->eth_priv) + return IRQ_NONE; + + if (!dev->event_handler) + return IRQ_NONE; + + dev->event_handler(dev->eth_priv); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_event(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_eth_event", pci_name(dev->pdev)); + return request_irq(pci_irq_vector(dev->pdev, XSC_VEC_CMD_EVENT), xsc_event_handler, 0, + dev_res->irq_info[XSC_VEC_CMD_EVENT].name, dev); +} + +void xsc_free_irq_for_event(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD_EVENT); +} + +int xsc_cmd_enable_msix(struct xsc_core_device *xdev) +{ + struct xsc_msix_table_info_mbox_in in; + struct xsc_msix_table_info_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_MSIX); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "xsc_cmd_exec enable msix failed %d\n", err); + return err; + } + + return 0; +} + +int xsc_irq_eq_create(struct xsc_core_device *dev) +{ + int err; + + if (dev->caps.msix_enable == 0) + return 0; + + err = xsc_alloc_irq_vectors(dev); + if (err) { + xsc_core_err(dev, "enable msix failed, err=%d\n", err); + goto err_alloc_irq; + } + + err = xsc_start_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to start EQs, err=%d\n", err); + goto err_start_eqs; + } + + err = alloc_comp_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to alloc comp EQs, err=%d\n", err); + goto err_alloc_comp_eqs; + } + + err = xsc_request_irq_for_cmdq(dev, XSC_VEC_CMD); + if (err) { + xsc_core_err(dev, "failed to request irq for cmdq, err=%d\n", err); + goto err_request_cmd_irq; + } + + err = xsc_request_irq_for_event(dev); + if (err) { + xsc_core_err(dev, "failed to request irq for event, err=%d\n", err); + goto err_request_event_irq; + } + + if (dev->caps.msix_enable && xsc_core_is_pf(dev)) { + err = xsc_dma_read_msix_init(dev); + if (err) { + xsc_core_err(dev, "dma read msix init failed %d.\n", err); + goto err_dma_read_msix; + } + } + + err = set_comp_irq_affinity_hints(dev); + if (err) { + xsc_core_err(dev, "failed to alloc affinity hint cpumask, err=%d\n", err); + goto err_set_affinity; + } + + xsc_cmd_use_events(dev); + err = xsc_cmd_enable_msix(dev); + if (err) { + xsc_core_err(dev, "xsc_cmd_enable_msix failed %d.\n", err); + xsc_cmd_use_polling(dev); + goto err_set_affinity; + } + return 0; + +err_set_affinity: + xsc_dma_read_msix_fini(dev); +err_dma_read_msix: + xsc_free_irq_for_event(dev); +err_request_event_irq: + xsc_free_irq_for_cmdq(dev); +err_request_cmd_irq: + free_comp_eqs(dev); +err_alloc_comp_eqs: + xsc_stop_eqs(dev); +err_start_eqs: + xsc_free_irq_vectors(dev); +err_alloc_irq: + return err; +} + +int xsc_irq_eq_destroy(struct xsc_core_device *dev) +{ + if (dev->caps.msix_enable == 0) + return 0; + + xsc_stop_eqs(dev); + clear_comp_irq_affinity_hints(dev); + free_comp_eqs(dev); + + xsc_dma_read_msix_fini(dev); + xsc_free_irq_for_event(dev); + xsc_free_irq_for_cmdq(dev); + xsc_free_irq_vectors(dev); + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c new file mode 100644 index 0000000000000000000000000000000000000000..37db01d1742f8c6bf91d0e965df3491e971ed2aa --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" + +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn) +{ + struct xsc_alloc_pd_mbox_in in; + struct xsc_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_PD); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(xsc_core_alloc_pd); + +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_dealloc_pd); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/port.c b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c new file mode 100644 index 0000000000000000000000000000000000000000..80414f3917d97c1a4c4e7fae32ae4a29b56cf641 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/driver.h" +#include "common/port.h" + +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct xsc_access_reg_mbox_in *in = NULL; + struct xsc_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = xsc_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = xsc_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACCESS_REG); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + size_in, out, + sizeof(*out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + xsc_vfree(out); +ex1: + xsc_vfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_access_reg); + +struct xsc_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps) +{ + struct xsc_reg_pcap in; + struct xsc_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = xsc_core_access_reg(xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_set_port_caps); + +static int xsc_query_module_num(struct xsc_core_device *dev, int *module_num) +{ + *module_num = dev->mac_port; + return 0; +} + +static int xsc_query_module_id(struct xsc_core_device *dev, int module_num, + u8 *module_id) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int err, status; + u8 *ptr; + + in.i2c_device_address = XSC_I2C_ADDR_LOW; + in.module = module_num; + in.device_address = 0; + in.page_number = 0; + in.size = 1; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = out.dword_0; + + *module_id = ptr[0]; + + return 0; +} + +static int xsc_qsfp_eeprom_page(u16 offset) +{ + if (offset < XSC_EEPROM_PAGE_LENGTH) + /* Addresses between 0-255 - page 00 */ + return 0; + + /* Addresses between 256 - 639 belongs to pages 01, 02 and 03 + * For example, offset = 400 belongs to page 02: + * 1 + ((400 - 256)/128) = 2 + */ + return 1 + ((offset - XSC_EEPROM_PAGE_LENGTH) / + XSC_EEPROM_HIGH_PAGE_LENGTH); +} + +static int xsc_qsfp_eeprom_high_page_offset(int page_num) +{ + if (!page_num) /* Page 0 always start from low page */ + return 0; + + /* High page */ + return page_num * XSC_EEPROM_HIGH_PAGE_LENGTH; +} + +static void xsc_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = xsc_qsfp_eeprom_page(*offset); + *offset -= xsc_qsfp_eeprom_high_page_offset(*page_num); +} + +static void xsc_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < XSC_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = XSC_I2C_ADDR_HIGH; + *offset -= XSC_EEPROM_PAGE_LENGTH; +} + +static int xsc_query_mcia(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, u8 *data) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int status, err; + void *ptr; + u16 size; + + size = min_t(int, params->size, XSC_EEPROM_MAX_BYTES); + + in.i2c_device_address = params->i2c_address; + in.module = params->module_number; + in.device_address = params->offset; + in.page_number = params->page; + in.size = size; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + ptr = out.dword_0; + memcpy(data, ptr, size); + + return size; +} + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data) +{ + struct xsc_module_eeprom_query_params query = {0}; + u8 module_id; + int err; + + err = xsc_query_module_num(dev, &query.module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, query.module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + xsc_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + xsc_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (offset + size > XSC_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = XSC_EEPROM_PAGE_LENGTH - offset; + + query.size = size; + query.offset = offset; + + return xsc_query_mcia(dev, &query, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom); + +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data) +{ + u8 module_id; + int err; + + err = xsc_query_module_num(dev, ¶ms->module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, params->module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + if (params->page > 0) + return -EINVAL; + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + if (params->page > 3) + return -EINVAL; + break; + case XSC_MODULE_ID_DSFP: + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (params->i2c_address != XSC_I2C_ADDR_HIGH && + params->i2c_address != XSC_I2C_ADDR_LOW) { + xsc_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); + return -EINVAL; + } + + return xsc_query_mcia(dev, params, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom_by_page); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c new file mode 100644 index 0000000000000000000000000000000000000000..0e5d365c0b23ba5a1baf37e85aebcb82093e124a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/qp.h" +#include "common/driver.h" +#include +#include "common/xsc_core.h" + +#define GROUP_DESTROY_FLAG_SHFIT 15 +#define GROUP_DESTROY_FLAG_MASK (1 << (GROUP_DESTROY_FLAG_SHFIT)) + +#define GROUP_OTHER_HASH_SIZE 16 +#define GROUP_CC_HASH_SIZE (1024 - GROUP_OTHER_HASH_SIZE) + +enum { + GROUP_MODE_PER_QP = 0, + GROUP_MODE_PER_DEST_IP, +}; + +struct { + struct list_head head; + spinlock_t lock; /* protect delayed_release_list */ + struct task_struct *poll_task; + wait_queue_head_t wq; + int wait_flag; +} delayed_release_list; + +enum { + SLEEP, + WAKEUP, + EXIT, +}; + +static bool exit_flag; + +void xsc_set_exit_flag(void) +{ + exit_flag = true; +} +EXPORT_SYMBOL_GPL(xsc_set_exit_flag); + +bool xsc_get_exit_flag(void) +{ + return exit_flag; +} +EXPORT_SYMBOL_GPL(xsc_get_exit_flag); + +bool exist_incomplete_qp_flush(void) +{ + return !list_empty(&delayed_release_list.head); +} +EXPORT_SYMBOL_GPL(exist_incomplete_qp_flush); + +static bool xsc_qp_flush_finished(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_query_qp_flush_status_mbox_in in; + struct xsc_query_qp_flush_status_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP_FLUSH_STATUS); + in.qpn = cpu_to_be32(qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if ((!err && !out.hdr.status) || err == -ETIMEDOUT) + return true; + + xsc_core_dbg(xdev, "qp[%d] flush incomplete.\n", qpn); + return false; +} + +static int xsc_qp_flush_check(void *arg) +{ + struct xsc_qp_rsc *entry; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&delayed_release_list.lock); + entry = list_first_entry_or_null(&delayed_release_list.head, + struct xsc_qp_rsc, node); + if (!entry) { + spin_unlock(&delayed_release_list.lock); + wait_event_interruptible(delayed_release_list.wq, + delayed_release_list.wait_flag != SLEEP); + if (delayed_release_list.wait_flag == EXIT) + break; + delayed_release_list.wait_flag = SLEEP; + continue; + } + list_del(&entry->node); + spin_unlock(&delayed_release_list.lock); + + if (!exit_flag && !xsc_qp_flush_finished(entry->xdev, entry->qpn)) { + spin_lock(&delayed_release_list.lock); + list_add_tail(&entry->node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + } else { + complete(&entry->delayed_release); + } + } + + return 0; +} + +void xsc_init_delayed_release(void) +{ + INIT_LIST_HEAD(&delayed_release_list.head); + spin_lock_init(&delayed_release_list.lock); + init_waitqueue_head(&delayed_release_list.wq); + delayed_release_list.wait_flag = SLEEP; + delayed_release_list.poll_task = kthread_create(xsc_qp_flush_check, NULL, "qp flush check"); + if (delayed_release_list.poll_task) + wake_up_process(delayed_release_list.poll_task); +} + +void xsc_stop_delayed_release(void) +{ + delayed_release_list.wait_flag = EXIT; + wake_up(&delayed_release_list.wq); + if (delayed_release_list.poll_task) + kthread_stop(delayed_release_list.poll_task); +} + +static void xsc_wait_qp_flush_complete(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_qp_rsc qp_rsc; + int err = 0; + + if (exit_flag) + return; + + init_completion(&qp_rsc.delayed_release); + qp_rsc.qpn = qpn; + qp_rsc.xdev = xdev; + spin_lock(&delayed_release_list.lock); + list_add_tail(&qp_rsc.node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + delayed_release_list.wait_flag = WAKEUP; + wake_up(&delayed_release_list.wq); + + while ((err = wait_for_completion_interruptible(&qp_rsc.delayed_release)) + == -ERESTARTSYS) { + xsc_core_dbg(xdev, "qp %d wait for completion is interrupted, err = %d\n", + qpn, err); + if (need_resched()) + schedule(); + } +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) + return err; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + qp->pid = current->pid; + + return 0; +} +EXPORT_SYMBOL_GPL(create_resource_common); + +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); +} +EXPORT_SYMBOL_GPL(destroy_resource_common); + +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + struct xsc_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + xsc_core_warn(xdev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen) +{ + struct xsc_create_qp_mbox_out out; + struct xsc_destroy_qp_mbox_in din; + struct xsc_destroy_qp_mbox_out dout; + int err; + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + + memset(&dout, 0, sizeof(dout)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + + err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "ret %d", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(xdev, "current num of QPs %u\n", atomic_read(&xdev->num_qps)); + return xsc_cmd_status_to_err(&out.hdr); + } + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + xsc_core_info(xdev, "qpn = %u\n", qp->qpn); + + qp->trace_info = kzalloc(sizeof(*qp->trace_info), GFP_KERNEL); + if (!qp->trace_info) { + err = -ENOMEM; + goto err_cmd; + } + qp->trace_info->pid = current->pid; + qp->trace_info->timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + + err = create_resource_common(xdev, qp); + if (err) { + xsc_core_err(xdev, "err %d", err); + goto err_trace; + } + + err = xsc_debug_qp_add(xdev, qp); + if (err) + xsc_core_err(xdev, "failed adding QP %u to debug file system\n", + qp->qpn); + + atomic_inc(&xdev->num_qps); + return 0; +err_trace: + kfree(qp->trace_info); +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + xsc_cmd_exec(xdev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_create_qp); + +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + xsc_debug_qp_remove(xdev, qp); + xsc_remove_qptrace(xdev, qp); + kfree(qp->trace_info); + + destroy_resource_common(xdev, qp); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + atomic_dec(&xdev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_core_destroy_qp); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status) +{ + int ret = 0; + + in->hdr.opcode = cpu_to_be16(status); + in->qpn = cpu_to_be32(qpn); + in->no_need_wait = 1; + + ret = xsc_cmd_exec(xdev, in, sizeof(*in), out, sizeof(*out)); + if ((status == XSC_CMD_OP_2RST_QP || status == XSC_CMD_OP_2ERR_QP) && + out->hdr.status) { + xsc_wait_qp_flush_complete(xdev, qpn); + out->hdr.status = 0; + } + if (ret || out->hdr.status != 0) { + xsc_core_err(xdev, "failed to modify qp %u status=%u, err=%d out.status %u\n", + qpn, status, ret, out->hdr.status); + ret = -ENOEXEC; + } + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_modify_qp); + +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp) +{ + static const u16 optab[XSC_QP_NUM_STATE][XSC_QP_NUM_STATE] = { + [XSC_QP_STATE_RST] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_RST2INIT_QP, + }, + [XSC_QP_STATE_INIT] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_INIT2INIT_QP, + [XSC_QP_STATE_RTR] = XSC_CMD_OP_INIT2RTR_QP, + }, + [XSC_QP_STATE_RTR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTR2RTS_QP, + }, + [XSC_QP_STATE_RTS] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTS2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_RTS2SQD_QP, + }, + [XSC_QP_STATE_SQD] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQD2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_SQD2SQD_QP, + }, + [XSC_QP_STATE_SQER] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQERR2RTS_QP, + }, + [XSC_QP_STATE_ERR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + } + }; + + struct xsc_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= XSC_QP_NUM_STATE || new_state >= XSC_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + + if (new_state == XSC_QP_STATE_RTR) { + if (qp->qp_type_internal == XSC_QUEUE_TYPE_RDMA_RC && + ((in->ctx.ip_type == 0 && in->ctx.dip[0] == in->ctx.sip[0]) || + (in->ctx.ip_type != 0 && + memcmp(in->ctx.dip, in->ctx.sip, sizeof(in->ctx.sip)) == 0))) + in->ctx.qp_out_port = xdev->caps.nif_port_num + xdev->pcie_no; + else if (in->ctx.lag_sel_en == 0) + in->ctx.qp_out_port = xdev->pf_id; + else + in->ctx.qp_out_port = in->ctx.lag_sel; + + in->ctx.pcie_no = xdev->pcie_no; + in->ctx.func_id = cpu_to_be16(xdev->glb_func_id); + } + + err = xsc_modify_qp(xdev, in, &out, qp->qpn, op); + if (err) + return err; + + if (new_state == XSC_QP_STATE_RTR) { + qp->trace_info->main_ver = YS_QPTRACE_VER_MAJOR; + qp->trace_info->sub_ver = YS_QPTRACE_VER_MINOR; + qp->trace_info->qp_type = qp->qp_type; + qp->trace_info->s_port = in->ctx.src_udp_port; + qp->trace_info->d_port = cpu_to_be16(4791); + qp->trace_info->lqpn = qp->qpn; + qp->trace_info->rqpn = be32_to_cpu(in->ctx.remote_qpn); + qp->trace_info->affinity_idx = (in->ctx.lag_sel_en == 0 ? 0 : in->ctx.lag_sel); + qp->trace_info->af_type = (in->ctx.ip_type == 0 ? AF_INET : AF_INET6); + + if (in->ctx.ip_type == 0) { + qp->trace_info->s_addr.s_addr4 = in->ctx.sip[0]; + qp->trace_info->d_addr.d_addr4 = in->ctx.dip[0]; + } else { + memcpy(qp->trace_info->s_addr.s_addr6, in->ctx.sip, + sizeof(qp->trace_info->s_addr.s_addr6)); + memcpy(qp->trace_info->d_addr.d_addr6, in->ctx.dip, + sizeof(qp->trace_info->d_addr.d_addr6)); + } + + err = xsc_create_qptrace(xdev, qp); + if (err) + return err; + } + + return xsc_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(xsc_core_qp_modify); + +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen) +{ + struct xsc_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_qp_query); + +void xsc_init_qp_table(struct xsc_core_device *xdev) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + + xsc_qp_debugfs_init(xdev); + xsc_qptrace_debugfs_init(xdev); +} + +void xsc_cleanup_qp_table(struct xsc_core_device *xdev) +{ + xsc_qp_debugfs_cleanup(xdev); + xsc_qptrace_debugfs_cleanup(xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c new file mode 100644 index 0000000000000000000000000000000000000000..59122a490eb851dbf9136572563c241cde0e392d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/driver.h" + +#define QPTS_ELEMENT_MAX_NUM 0x4000 //16384 = 16k + +static struct proc_dir_entry *g_entry; +static DECLARE_WAIT_QUEUE_HEAD(g_ring_buff_wait); +static struct xsc_qpt_update_msg *g_ring_buff; +static struct mutex g_ring_buff_lock; + +static DECLARE_WAIT_QUEUE_HEAD(g_remove_wait); +static u32 g_pid; + +static unsigned long R; +static unsigned long R_cur; +static unsigned long W; + +static void send_signal(int sig_no) +{ + int ret; + struct task_struct *task = NULL; + + if (g_pid < 2) { + pr_err("%s error, pid(%u) is invalid.\n", __func__, g_pid); + return; + } + + rcu_read_lock(); + task = pid_task(find_vpid(g_pid), PIDTYPE_PID); + rcu_read_unlock(); + + if (!task) { + pr_err("%s error, get pid_task failed, pid(%d).\n", __func__, g_pid); + return; + } + + ret = send_sig(sig_no, task, 0); + if (ret < 0) + pr_err("%s error, send signal(%d) failed.\n", __func__, sig_no); +} + +static int read_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + if (R_cur == W) { + mutex_unlock(&g_ring_buff_lock); + return 0; + } + + *msg = g_ring_buff[R_cur]; + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + mutex_unlock(&g_ring_buff_lock); + + return 1; +} + +static void write_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + g_ring_buff[W] = *msg; + W = (W + 1) % QPTS_ELEMENT_MAX_NUM; + if (R == W) + R = (R + 1) % QPTS_ELEMENT_MAX_NUM; + + if (R_cur == W) + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_ring_buff_wait); +} + +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg) +{ + if (!msg) + return -1; + + write_buff(msg); + + return 0; +} +EXPORT_SYMBOL(qpts_write_one_msg); + +static int qpts_open(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + if (g_pid > 0) { + mutex_unlock(&g_ring_buff_lock); + goto end; + } + g_pid = current->pid; + R_cur = R; + mutex_unlock(&g_ring_buff_lock); + + return 0; +end: + pr_err("%s failed, pid:%d.\n", __func__, g_pid); + return -1; +} + +static int qpts_release(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + g_pid = 0; + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_remove_wait); + + return 0; +} + +static ssize_t qpts_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int error = -EINVAL, i = 0; + struct xsc_qpt_update_msg qpt_msg = {0}; + + if ((file->f_flags & O_NONBLOCK) && R_cur == W) + goto out; + + if (!buf || !count) { + pr_err("%s error, null buffer or count!\n", __func__); + goto out; + } + + error = wait_event_interruptible(g_ring_buff_wait, (R_cur != W)); + if (error) + goto out; + + while (!error && i < count && read_buff(&qpt_msg)) { + error = copy_to_user(buf, &qpt_msg, sizeof(qpt_msg)); + buf += sizeof(qpt_msg); + i += sizeof(qpt_msg); + } + + if (!error) + error = i; + +out: + return error; +} + +static __poll_t qpts_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &g_ring_buff_wait, wait); + + if (R_cur != W) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +const struct proc_ops qpts_ops = { + .proc_open = qpts_open, + .proc_read = qpts_read, + .proc_poll = qpts_poll, + .proc_release = qpts_release, +}; + +int qpts_init(void) +{ + g_ring_buff = kcalloc(QPTS_ELEMENT_MAX_NUM, sizeof(struct xsc_qpt_update_msg), GFP_KERNEL); + if (!g_ring_buff) + return -ENOMEM; + + mutex_init(&g_ring_buff_lock); + + g_entry = proc_create_data("qpts_kmsg", 0400, NULL, &qpts_ops, NULL); + if (!g_entry) { + pr_err("Could not create /proc/qpts_kmsg file!\n"); + goto error_qpts_init; + } + + return 0; + +error_qpts_init: + kfree(g_ring_buff); + g_ring_buff = NULL; + return -1; +} + +void qpts_fini(void) +{ + mutex_lock(&g_ring_buff_lock); + if (!g_pid) + g_pid = 1; + mutex_unlock(&g_ring_buff_lock); + + if (g_pid > 1) { + send_signal(SIGKILL); + wait_event_interruptible(g_remove_wait, (g_pid == 0)); + } + + remove_proc_entry("qpts_kmsg", NULL); + + kfree(g_ring_buff); + g_ring_buff = NULL; + g_entry = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..7471367ce83fe66a4021dadd62d0ac7c0a66b88e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/res_obj.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/driver.h" + +static int xsc_alloc_obj(struct xsc_res_obj *obj, struct xsc_bdf_file *file, + void (*release_func)(void *), unsigned long key, + char *data, unsigned int datalen) +{ + obj->release_method = release_func; + obj->file = file; + obj->datalen = datalen; + if (datalen) { + obj->data = kmalloc(datalen, GFP_KERNEL); + if (!obj->data) + return -ENOMEM; + memcpy(obj->data, data, datalen); + } + + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->obj_lock); + radix_tree_insert(&file->obj_tree, key, (void *)obj); + spin_unlock(&file->obj_lock); + radix_tree_preload_end(); + + return 0; +} + +static inline void xsc_free_obj(struct xsc_bdf_file *file, unsigned long key, + struct xsc_res_obj **obj) +{ + *obj = radix_tree_delete(&file->obj_tree, key); + if (!*obj) + return; + if ((*obj)->datalen) + kfree((*obj)->data); +} + +static void xsc_send_cmd_dealloc_pd(struct xsc_core_device *xdev, unsigned int pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dealloc pd %d\n", pdn); +} + +static void xsc_free_pd_obj(void *obj) +{ + struct xsc_pd_obj *pd_obj = container_of(obj, struct xsc_pd_obj, obj); + struct xsc_bdf_file *file = pd_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_dealloc_pd(file->xdev, pd_obj->pdn); + key = xsc_idx_to_key(RES_OBJ_PD, pd_obj->pdn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); + kfree(pd_obj); +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, + unsigned int pdn, char *data, unsigned int datalen) +{ + struct xsc_pd_obj *pd_obj; + unsigned long key; + int ret; + + pd_obj = kzalloc(sizeof(*pd_obj), GFP_KERNEL); + if (!pd_obj) + return -ENOMEM; + + pd_obj->pdn = pdn; + key = xsc_idx_to_key(RES_OBJ_PD, pdn); + ret = xsc_alloc_obj(&pd_obj->obj, file, xsc_free_pd_obj, key, data, datalen); + if (ret) { + kfree(pd_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc pd %d obj\n", pdn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pd_obj); + +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn) +{ + struct xsc_pd_obj *pd_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PD, pdn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pd_obj = container_of(obj, struct xsc_pd_obj, obj); + kfree(pd_obj); + xsc_core_dbg(file->xdev, "destroy pd %d obj\n", pdn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pd_obj); + +static void xsc_send_cmd_destroy_mkey(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_destroy_mkey(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy mkey %d\n", mkey); +} + +static void xsc_send_cmd_dereg_mr(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_dereg_mr(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dereg mr %d\n", mkey); +} + +static void xsc_free_mr_obj(void *obj) +{ + struct xsc_mr_obj *mr_obj = container_of(obj, struct xsc_mr_obj, obj); + struct xsc_bdf_file *file = mr_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mr_obj->mkey); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_mkey(file->xdev, mr_obj->mkey); + xsc_send_cmd_dereg_mr(file->xdev, mr_obj->mkey); + + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free mr obj: %d\n", mr_obj->mkey); + kfree(mr_obj); +} + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, + unsigned int mkey, char *data, unsigned int datalen) +{ + struct xsc_mr_obj *mr_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + int ret; + + mr_obj = kzalloc(sizeof(*mr_obj), GFP_KERNEL); + if (!mr_obj) + return -ENOMEM; + + mr_obj->mkey = mkey; + ret = xsc_alloc_obj(&mr_obj->obj, file, xsc_free_mr_obj, key, data, datalen); + if (ret) { + kfree(mr_obj); + return ret; + } + + xsc_core_dbg(file->xdev, "alloc mr %d obj\n", mkey); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_mr_obj); + +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey) +{ + struct xsc_mr_obj *mr_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + mr_obj = container_of(obj, struct xsc_mr_obj, obj); + kfree(mr_obj); + xsc_core_dbg(file->xdev, "destroy mr %d obj\n", mkey); +} +EXPORT_SYMBOL_GPL(xsc_destroy_mr_obj); + +static void xsc_send_cmd_destroy_cq(struct xsc_core_device *xdev, unsigned int cqn) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cqn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy cq %d\n", cqn); +} + +static void xsc_free_cq_obj(void *obj) +{ + struct xsc_cq_obj *cq_obj = container_of(obj, struct xsc_cq_obj, obj); + struct xsc_bdf_file *file = cq_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cq_obj->cqn); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_cq(file->xdev, cq_obj->cqn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free cq obj: %d\n", cq_obj->cqn); + kfree(cq_obj); +} + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen) +{ + struct xsc_cq_obj *cq_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + int ret; + + cq_obj = kzalloc(sizeof(*cq_obj), GFP_KERNEL); + if (!cq_obj) + return -ENOMEM; + + cq_obj->cqn = cqn; + ret = xsc_alloc_obj(&cq_obj->obj, file, xsc_free_cq_obj, key, data, datalen); + if (ret) { + kfree(cq_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc cq %d obj\n", cqn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_cq_obj); + +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn) +{ + struct xsc_cq_obj *cq_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + cq_obj = container_of(obj, struct xsc_cq_obj, obj); + kfree(cq_obj); + xsc_core_dbg(file->xdev, "destroy cq %d obj\n", cqn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_cq_obj); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + int ret; + + ret = xsc_modify_qp(xdev, &in, &out, qpn, XSC_CMD_OP_2RST_QP); + if (ret) + xsc_core_err(xdev, "failed to reset qp %u\n", qpn); +} + +static void xsc_send_cmd_destroy_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qpn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy qp %d\n", qpn); +} + +static void xsc_free_qp_obj(void *obj) +{ + struct xsc_qp_obj *qp_obj = container_of(obj, struct xsc_qp_obj, obj); + struct xsc_bdf_file *file = qp_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_2rst_qp(file->xdev, qp_obj->qpn); + xsc_send_cmd_destroy_qp(file->xdev, qp_obj->qpn); + + key = xsc_idx_to_key(RES_OBJ_QP, qp_obj->qpn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free qp obj: %d\n", qp_obj->qpn); + kfree(qp_obj); +} + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen) +{ + struct xsc_qp_obj *qp_obj; + unsigned long key; + int ret; + + qp_obj = kzalloc(sizeof(*qp_obj), GFP_KERNEL); + if (!qp_obj) + return -ENOMEM; + + qp_obj->qpn = qpn; + key = xsc_idx_to_key(RES_OBJ_QP, qpn); + ret = xsc_alloc_obj(&qp_obj->obj, file, xsc_free_qp_obj, key, data, datalen); + if (ret) { + kfree(qp_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc qp %d obj\n", qpn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_qp_obj); + +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn) +{ + struct xsc_qp_obj *qp_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_QP, qpn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + qp_obj = container_of(obj, struct xsc_qp_obj, obj); + kfree(qp_obj); + xsc_core_dbg(file->xdev, "destroy qp %d obj\n", qpn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_qp_obj); + +static void xsc_send_cmd_del_pct(struct xsc_core_device *xdev, + unsigned int priority) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_data_tl *tl; + struct xsc_flow_pct_v4_del *pct_v4; + unsigned int inlen; + unsigned int outlen; + int ret; + + inlen = sizeof(struct xsc_ioctl_mbox_in) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return; + + outlen = sizeof(struct xsc_ioctl_mbox_out) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + kfree(in); + return; + } + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_FLOW); + in->len = sizeof(struct xsc_ioctl_data_tl) + sizeof(struct xsc_flow_pct_v4_del); + in->len = cpu_to_be16(in->len); + tl = (struct xsc_ioctl_data_tl *)in->data; + tl->opmod = XSC_IOCTL_OP_DEL; + tl->table = XSC_FLOW_TBL_PCT_V4; + tl->length = sizeof(struct xsc_flow_pct_v4_del); + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + pct_v4->priority = priority; + out->len = in->len; + ret = xsc_cmd_exec(xdev, in, inlen, out, outlen); + if (ret || out->hdr.status != 0) + xsc_core_err(xdev, "failed to del pct %d\n", priority); + + kfree(in); + kfree(out); +} + +static void xsc_free_pct_obj(void *obj) +{ + struct xsc_pct_obj *pct_obj = container_of(obj, struct xsc_pct_obj, obj); + struct xsc_bdf_file *file = pct_obj->obj.file; + struct xsc_res_obj *_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, pct_obj->pct_idx); + + xsc_send_cmd_del_pct(file->xdev, pct_obj->pct_idx); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); + kfree(pct_obj); +} + +/* both pct4 and pct6 are allocated in the same tcam table, so we can delete pct6 + * by pct4 method + */ +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen) +{ + struct xsc_pct_obj *pct_obj; + int ret; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + pct_obj = kzalloc(sizeof(*pct_obj), GFP_KERNEL); + if (!pct_obj) + return -ENOMEM; + + pct_obj->pct_idx = priority; + ret = xsc_alloc_obj(&pct_obj->obj, file, xsc_free_pct_obj, key, data, datalen); + if (ret) + kfree(pct_obj); + xsc_core_dbg(file->xdev, "alloc pct %d obj\n", priority); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pct_obj); + +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority) +{ + struct xsc_pct_obj *pct_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pct_obj = container_of(obj, struct xsc_pct_obj, obj); + kfree(pct_obj); + xsc_core_dbg(file->xdev, "destroy pct %d obj\n", priority); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pct_obj); + +void xsc_close_bdf_file(struct xsc_bdf_file *file) +{ + struct radix_tree_iter iter; + void **slot; + struct xsc_res_obj *obj; + + xsc_core_warn(file->xdev, "release bdf file:%lx\n", file->key); + spin_lock(&file->obj_lock); + radix_tree_for_each_slot(slot, &file->obj_tree, &iter, 0) { + obj = (struct xsc_res_obj *)(*slot); + obj->release_method(obj); + } + spin_unlock(&file->obj_lock); +} +EXPORT_SYMBOL_GPL(xsc_close_bdf_file); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..057be7df0f0fbacb28f01fe640488667fd311122 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/xsc_lag.h" +#include "common/vport.h" +#include "eswitch.h" +#include "xsc_pci_ctrl.h" + +static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + u16 vf; + u16 max_msix = 0; + int err; + + max_msix = xsc_get_irq_matrix_global_available(dev); + xsc_core_info(dev, "global_available=%u\n", max_msix); + err = xsc_cmd_enable_hca(dev, num_vfs, max_msix); + if (err) + return err; + + if (!XSC_ESWITCH_MANAGER(dev)) + goto enable_vfs; + + err = xsc_eswitch_enable(dev->priv.eswitch, XSC_ESWITCH_LEGACY, + num_vfs); + if (err) { + xsc_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); + return err; + } + +enable_vfs: + err = xsc_create_vfs_sysfs(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "failed to create SRIOV sysfs (%d)\n", err); + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, true); + + return err; + } + + for (vf = 0; vf < num_vfs; vf++) + sriov->vfs_ctx[vf].enabled = 1; + + return 0; +} + +static void xsc_device_disable_sriov(struct xsc_core_device *dev, + int num_vfs, bool clear_vf) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int vf, err; + + err = xsc_cmd_disable_hca(dev, (u16)num_vfs); + if (err) { + xsc_core_warn(dev, "failed to disable hca, num_vfs=%d, err=%d\n", + num_vfs, err); + return; + } + + for (vf = num_vfs - 1; vf >= 0; vf--) { + if (!sriov->vfs_ctx[vf].enabled) + continue; + + sriov->vfs_ctx[vf].enabled = 0; + } + + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, clear_vf); + + xsc_destroy_vfs_sysfs(dev, num_vfs); +} + +static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int err; + + if (num_vfs > dev->caps.max_vfs) { + xsc_core_warn(dev, + "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", + num_vfs, dev->caps.max_vfs); + return -EINVAL; + } + + if (num_vfs && pci_num_vf(dev->pdev)) { + if (num_vfs == pci_num_vf(dev->pdev)) + return 0; + + xsc_core_warn(dev, "VFs already enabled. Disable before enabling %d VFs\n", + num_vfs); + return -EBUSY; + } + + xsc_lag_disable(dev); + + xsc_core_info(dev, "enable %d VFs\n", num_vfs); + + err = xsc_device_enable_sriov(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "xsc_device_enable_sriov failed, err=%d\n", err); + goto device_enable_sriov_err; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + xsc_core_warn(dev, "pci_enable_sriov failed, err=%d\n", err); + goto pci_enable_sriov_err; + } + + xsc_lag_enable(dev); + + return err; + +pci_enable_sriov_err: + xsc_device_disable_sriov(dev, num_vfs, true); + +device_enable_sriov_err: + xsc_lag_enable(dev); + + return err; +} + +static void xsc_sriov_disable(struct pci_dev *pdev) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int num_vfs = pci_num_vf(dev->pdev); + + xsc_lag_disable(dev); + + xsc_core_info(dev, "disable %d VFs\n", num_vfs); + pci_disable_sriov(pdev); + + xsc_device_disable_sriov(dev, num_vfs, true); + + xsc_lag_enable(dev); +} + +int xsc_core_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err = 0; + + if (num_vfs) + err = xsc_sriov_enable(pdev, num_vfs); + else + xsc_sriov_disable(pdev); + + if (!err) + sriov->num_vfs = num_vfs; + return err ? err : num_vfs; +} + +int xsc_sriov_attach(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + struct xsc_core_device *pf_xdev; + struct xsc_core_sriov *sriov; + + if (!xsc_core_is_pf(dev)) { + if (!pdev->physfn) /*for vf passthrough vm*/ + return 0; + + pf_xdev = pci_get_drvdata(pdev->physfn); + sriov = &pf_xdev->priv.sriov; + + sriov->vfs[dev->vf_id].vf = dev->vf_id; + sriov->vfs[dev->vf_id].dev = dev; + return 0; + } + + if (!dev->priv.sriov.num_vfs) + return 0; + + /* If sriov VFs exist in PCI level, enable them in device level */ + return xsc_device_enable_sriov(dev, pci_num_vf(dev->pdev)); +} + +void xsc_sriov_detach(struct xsc_core_device *dev) +{ + if (!xsc_core_is_pf(dev) || !dev->priv.sriov.num_vfs) + return; + + xsc_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); +} + +static u16 xsc_get_max_vfs(struct xsc_core_device *dev) +{ + /* In RH6.8 and lower pci_sriov_get_totalvfs might return -EINVAL + * return in that case 1 + */ + return (pci_sriov_get_totalvfs(dev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(dev->pdev); +} + +static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, + struct xsc_pci_sriov *iov) +{ + int pos; + struct pci_dev *pdev = dev->pdev; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + xsc_core_err(dev, "%s: failed to find SRIOV capability in device\n", + __func__); + return -ENODEV; + } + + iov->pos = pos; + pci_read_config_dword(pdev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); + pci_read_config_dword(pdev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_byte(pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +int xsc_sriov_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct pci_dev *pdev = dev->pdev; + struct xsc_pci_sriov *iov = &sriov->pci_sriov; + int total_vfs; + u32 vf_bus, vf_devfn; + int err; + + if (!xsc_core_is_pf(dev)) + return 0; + + err = xsc_sriov_pci_cfg_info(dev, iov); + if (err) { + xsc_core_warn(dev, "%s: pci not support sriov, err=%d\n", + __func__, err); + return 0; + } + + total_vfs = pci_sriov_get_totalvfs(pdev); + if (unlikely(iov->total_vfs == 0)) { + xsc_core_warn(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", + __func__, iov->total_vfs, sriov->num_vfs); + return 0; + } + sriov->max_vfs = xsc_get_max_vfs(dev); + sriov->num_vfs = pci_num_vf(pdev); + + vf_bus = pdev->bus->number + ((pdev->devfn + iov->offset) >> 8); + vf_devfn = (pdev->devfn + iov->offset) & 0xff; + sriov->vf_bdf_base = (u16)((vf_bus << 8) | vf_devfn); + + sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); + if (!sriov->vfs_ctx) + return -ENOMEM; + + xsc_core_info(dev, "total_vfs=%d, cur_vfs=%d, vf_bdf_base=0x%02x\n", + total_vfs, sriov->num_vfs, sriov->vf_bdf_base); + xsc_core_info(dev, "vf_offset=%d, stride=%d, vf_device_id=0x%x\n", + iov->offset, iov->stride, iov->vf_device); + err = xsc_sriov_sysfs_init(dev); + if (err) { + xsc_core_warn(dev, "failed to init SRIOV sysfs, err=%d\n", err); + kfree(sriov->vfs_ctx); + return err; + } + + return 0; +} + +void xsc_sriov_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + if (!xsc_core_is_pf(dev)) + return; + + xsc_sriov_sysfs_cleanup(dev); + kfree(sriov->vfs_ctx); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e5b07b0b5ecc484665bb3275a2e7808d79af284a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" +#include "eswitch.h" + +struct vf_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +struct vf_group_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_group_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_group_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static ssize_t port_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + union ib_gid gid; + int err; + u8 *p; + + err = xsc_query_hca_vport_gid(dev, 1, 1, g->vf, 0, &gid); + if (err) { + xsc_core_warn(dev, "failed to query gid at index 0 for vf %d\n", g->vf); + return err; + } + + p = &gid.raw[8]; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + return err; +} + +static ssize_t port_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select = XSC_HCA_VPORT_SEL_PORT_GUID; + in->port_guid = guid; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].port_guid = guid; + + return count; +} + +static int show_nic_node_guid(struct xsc_core_device *dev, u16 vf, + __be64 *node_guid) +{ + int err; + + err = xsc_query_nic_vport_node_guid(dev, vf + 1, node_guid); + if (!err) + *node_guid = cpu_to_be64(*node_guid); + + return err; +} + +static ssize_t node_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + __be64 guid; + + int err; + u8 *p; + + err = show_nic_node_guid(dev, g->vf, &guid); + if (err) { + xsc_core_warn(dev, "failed to query node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + p = (u8 *)&guid; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + + return err; +} + +static int modify_nic_node_guid(struct xsc_core_device *dev, u16 vf, + u64 node_guid) +{ + return xsc_modify_other_nic_vport_node_guid(dev, vf + 1, node_guid); +} + +static ssize_t node_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + err = modify_nic_node_guid(dev, g->vf, guid); + if (err) { + xsc_core_warn(dev, "failed to modify node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + return count; +} + +static const char *policy_str(enum port_state_policy policy) +{ + switch (policy) { + case XSC_POLICY_DOWN: return "Down\n"; + case XSC_POLICY_UP: return "Up\n"; + case XSC_POLICY_FOLLOW: return "Follow\n"; + default: return "Invalid policy\n"; + } +} + +static ssize_t policy_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_hca_vport_context *rep; + const char *p = NULL; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 1, 1, g->vf, rep); + if (err) { + xsc_core_warn(dev, "failed to query port policy for vf %d (%d)\n", + g->vf, err); + goto free; + } + p = policy_str(rep->vport_state_policy); + if (p) + sprintf(buf, "%s", p); + +free: + kfree(rep); + return p ? strlen(p) : err; +} + +static int strpolicy(const char *buf, enum port_state_policy *policy) +{ + if (sysfs_streq(buf, "Down")) { + *policy = XSC_POLICY_DOWN; + return 0; + } + + if (sysfs_streq(buf, "Up")) { + *policy = XSC_POLICY_UP; + return 0; + } + + if (sysfs_streq(buf, "Follow")) { + *policy = XSC_POLICY_FOLLOW; + return 0; + } + return -EINVAL; +} + +static ssize_t policy_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->vport_state_policy = policy; + in->field_select = XSC_HCA_VPORT_SEL_STATE_POLICY; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].policy = policy; + + return count; +} + +/* ETH SRIOV SYSFS */ +static ssize_t mac_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF Mac Address\n"); +} + +static ssize_t mac_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u8 mac[ETH_ALEN]; + int err; + + err = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (err == 6) + goto set_mac; + + if (sysfs_streq(buf, "Random")) + eth_random_addr(mac); + else + return -EINVAL; + +set_mac: + err = xsc_eswitch_set_vport_mac(dev->priv.eswitch, g->vf + 1, mac); + return err ? err : count; +} + +static ssize_t vlan_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, ": set VF Vlan, Qos, Vlan Proto(default 802.1Q)\n"); +} + +static ssize_t vlan_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + char vproto_ext[5] = {'\0'}; + __be16 vlan_proto; + u16 vlan_id; + u8 qos; + int err; + + err = sscanf(buf, "%hu:%hhu:802.%4s", &vlan_id, &qos, vproto_ext); + if (err == 3) { + if ((strcmp(vproto_ext, "1AD") == 0) || + (strcmp(vproto_ext, "1ad") == 0)) + vlan_proto = htons(ETH_P_8021AD); + else if ((strcmp(vproto_ext, "1Q") == 0) || + (strcmp(vproto_ext, "1q") == 0)) + vlan_proto = htons(ETH_P_8021Q); + else + return -EINVAL; + } else { + err = sscanf(buf, "%hu:%hhu", &vlan_id, &qos); + if (err != 2) + return -EINVAL; + vlan_proto = htons(ETH_P_8021Q); + } + + err = xsc_eswitch_set_vport_vlan(dev->priv.eswitch, g->vf + 1, + vlan_id, qos, vlan_proto); + return err ? err : count; +} + +static const char *vlan_proto_str(u16 vlan, u8 qos, __be16 vlan_proto) +{ + if (!vlan && !qos) + return "N/A"; + + switch (vlan_proto) { + case htons(ETH_P_8021AD): return "802.1ad"; + case htons(ETH_P_8021Q): return "802.1Q"; + default: return "Invalid vlan protocol"; + } +} + +static ssize_t spoofcheck_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to enable|disable VF SpoofCheck\n" + ); +} + +static ssize_t spoofcheck_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_spoofchk(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t trust_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to trust|untrust VF\n" + ); +} + +static ssize_t trust_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_trust(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t link_state_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set VF State\n"); +} + +static ssize_t link_state_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + err = xsc_eswitch_set_vport_state(dev->priv.eswitch, g->vf + 1, policy); + return err ? err : count; +} + +static ssize_t max_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF max rate\n"); +} + +static ssize_t max_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_tx_rate; + u32 min_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + min_tx_rate = esw->vports[g->vf + 1].info.min_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &max_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF min rate\n"); +} + +static ssize_t min_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf + 1].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_pf_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set PF min rate\n"); +} + +static ssize_t min_pf_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t group_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF vport group\n"); +} + +static ssize_t group_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 group_id; + int err; + + err = kstrtouint(buf, 10, &group_id); + if (err != 1) + return -EINVAL; + + if (group_id > 255) + return -EINVAL; + + err = xsc_eswitch_vport_update_group(esw, g->vf + 1, group_id); + + return err ? err : count; +} + +static ssize_t max_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group max rate\n"); +} + +static ssize_t max_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_rate; + int err; + + err = kstrtouint(buf, 10, &max_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_max_rate(esw, g->group_id, max_rate); + + return err ? err : count; +} + +static ssize_t min_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group min rate\n"); +} + +static ssize_t min_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_rate; + int err; + + err = kstrtouint(buf, 10, &min_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_min_rate(esw, g->group_id, min_rate); + + return err ? err : count; +} + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \ + scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg)) + +static ssize_t trunk_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport *vport = &esw->vports[g->vf + 1]; + u16 vlan_id = 0; + char *ret = buf; + + mutex_lock(&esw->state_lock); + if (!!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID)) { + ret += _sprintf(ret, buf, "Allowed 802.1Q VLANs:"); + for_each_set_bit(vlan_id, vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID) + ret += _sprintf(ret, buf, " %d", vlan_id); + ret += _sprintf(ret, buf, "\n"); + } + mutex_unlock(&esw->state_lock); + + return (ssize_t)(ret - buf); +} + +static ssize_t trunk_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + u16 start_vid, end_vid; + char op[5]; + int err; + + err = sscanf(buf, "%4s %hu %hu", op, &start_vid, &end_vid); + if (err != 3) + return -EINVAL; + + if (!strcmp(op, "add")) + err = xsc_eswitch_add_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else if (!strcmp(op, "rem")) + err = xsc_eswitch_del_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else + return -EINVAL; + + return err ? err : count; +} + +static ssize_t config_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport_info *ivi; + int vport = g->vf + 1; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + if (!(vport >= 0 && vport < esw->total_vports)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + ivi = &esw->vports[vport].info; + p += _sprintf(p, buf, "VF : %d\n", g->vf); + p += _sprintf(p, buf, "MAC : %pM\n", ivi->mac); + p += _sprintf(p, buf, "VLAN : %d\n", ivi->vlan); + p += _sprintf(p, buf, "QoS : %d\n", ivi->qos); + p += _sprintf(p, buf, "VLAN Proto : %s\n", + vlan_proto_str(ivi->vlan, ivi->qos, ivi->vlan_proto)); + p += _sprintf(p, buf, "SpoofCheck : %s\n", ivi->spoofchk ? "ON" : "OFF"); + p += _sprintf(p, buf, "Trust : %s\n", ivi->trusted ? "ON" : "OFF"); + p += _sprintf(p, buf, "LinkState : %s", policy_str(ivi->link_state)); + p += _sprintf(p, buf, "MinTxRate : %d\n", ivi->min_rate); + p += _sprintf(p, buf, "MaxTxRate : %d\n", ivi->max_rate); + p += _sprintf(p, buf, "VGT+ : %s\n", + !!bitmap_weight(ivi->vlan_trunk_8021q_bitmap, VLAN_N_VID) ? + "ON" : "OFF"); + p += _sprintf(p, buf, "RateGroup : %d\n", ivi->group); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t config_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + + mutex_lock(&esw->state_lock); + p += _sprintf(p, buf, "Num VFs : %d\n", g->num_vports); + p += _sprintf(p, buf, "MaxRate : %d\n", g->max_rate); + p += _sprintf(p, buf, "MinRate : %d\n", g->min_rate); + p += _sprintf(p, buf, "BWShare(Indirect cfg) : %d\n", g->bw_share); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vport *vport = xsc_eswitch_get_vport(dev->priv.eswitch, g->vf + 1); + struct ifla_vf_stats ifi; + struct xsc_vport_drop_stats stats = {}; + int err; + char *p = buf; + + err = xsc_eswitch_get_vport_stats(dev->priv.eswitch, g->vf + 1, &ifi); + if (err) + return -EINVAL; + + err = xsc_eswitch_query_vport_drop_stats(dev, vport, &stats); + if (err) + return -EINVAL; + + p += _sprintf(p, buf, "tx_packets : %llu\n", ifi.tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", ifi.tx_bytes); + p += _sprintf(p, buf, "tx_dropped : %llu\n", stats.tx_dropped); + p += _sprintf(p, buf, "rx_packets : %llu\n", ifi.rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", ifi.rx_bytes); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", ifi.broadcast); + p += _sprintf(p, buf, "rx_multicast : %llu\n", ifi.multicast); + p += _sprintf(p, buf, "rx_dropped : %llu\n", stats.rx_dropped); + + return (ssize_t)(p - buf); +} + +static ssize_t stats_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t num_vfs_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + int req_vfs; + int err; + + if (kstrtoint(buf, 0, &req_vfs) || req_vfs < 0 || + req_vfs > pci_sriov_get_totalvfs(pdev)) + return -EINVAL; + + err = xsc_core_sriov_configure(pdev, req_vfs); + if (err < 0) + return err; + + return count; +} + +static ssize_t num_vfs_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + return sprintf(buf, "%d\n", sriov->num_vfs); +} + +static DEVICE_ATTR_RW(num_vfs); + +static const struct sysfs_ops vf_sysfs_ops = { + .show = vf_attr_show, + .store = vf_attr_store, +}; + +static const struct sysfs_ops vf_group_sysfs_ops = { + .show = vf_group_attr_show, + .store = vf_group_attr_store, +}; + +#define VF_RATE_GROUP_ATTR(_name) struct vf_group_attributes vf_group_attr_##_name = \ + __ATTR(_name, 0644, _name##_group_show, _name##_group_store) +#define VF_ATTR(_name) struct vf_attributes vf_attr_##_name = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +VF_ATTR(node); +VF_ATTR(port); +VF_ATTR(policy); + +VF_ATTR(mac); +VF_ATTR(vlan); +VF_ATTR(link_state); +VF_ATTR(spoofcheck); +VF_ATTR(trust); +VF_ATTR(max_tx_rate); +VF_ATTR(min_tx_rate); +VF_ATTR(config); +VF_ATTR(trunk); +VF_ATTR(stats); +VF_ATTR(group); +VF_RATE_GROUP_ATTR(max_tx_rate); +VF_RATE_GROUP_ATTR(min_tx_rate); +VF_RATE_GROUP_ATTR(config); + +static struct attribute *vf_eth_attrs[] = { + &vf_attr_node.attr, + &vf_attr_mac.attr, + &vf_attr_vlan.attr, + &vf_attr_link_state.attr, + &vf_attr_spoofcheck.attr, + &vf_attr_trust.attr, + &vf_attr_max_tx_rate.attr, + &vf_attr_min_tx_rate.attr, + &vf_attr_config.attr, + &vf_attr_trunk.attr, + &vf_attr_stats.attr, + &vf_attr_group.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_eth); + +static struct attribute *vf_group_attrs[] = { + &vf_group_attr_max_tx_rate.attr, + &vf_group_attr_min_tx_rate.attr, + &vf_group_attr_config.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_group); + +static const struct kobj_type vf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_eth_groups, +}; + +static const struct kobj_type vf_group = { + .sysfs_ops = &vf_group_sysfs_ops, + .default_groups = vf_group_groups, +}; + +static struct vf_attributes pf_attr_min_pf_tx_rate = + __ATTR(min_tx_rate, 0644, min_pf_tx_rate_show, min_pf_tx_rate_store); + +static struct attribute *pf_eth_attrs[] = { + &pf_attr_min_pf_tx_rate.attr, + NULL, +}; +ATTRIBUTE_GROUPS(pf_eth); + +static const struct kobj_type pf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = pf_eth_groups, +}; + +static struct attribute *vf_ib_attrs[] = { + &vf_attr_node.attr, + &vf_attr_port.attr, + &vf_attr_policy.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_ib); + +static const struct kobj_type vf_type_ib = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_ib_groups, +}; + +static struct device_attribute *xsc_class_attributes[] = { + &dev_attr_num_vfs, +}; + +int xsc_sriov_sysfs_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int err; + int i; + + sriov->config = kobject_create_and_add("sriov", &device->kobj); + if (!sriov->config) + return -ENOMEM; + + if (dev->caps.log_esw_max_sched_depth) { + sriov->groups_config = kobject_create_and_add("groups", + sriov->config); + if (!sriov->groups_config) { + err = -ENOMEM; + goto err_groups; + } + } + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) { + err = device_create_file(device, xsc_class_attributes[i]); + if (err) + goto err_attr; + } + + return 0; + +err_attr: + if (sriov->groups_config) { + kobject_put(sriov->groups_config); + sriov->groups_config = NULL; + } + +err_groups: + kobject_put(sriov->config); + sriov->config = NULL; + return err; +} + +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) + device_remove_file(device, xsc_class_attributes[i]); + + if (dev->caps.log_esw_max_sched_depth) + kobject_put(sriov->groups_config); + kobject_put(sriov->config); + sriov->config = NULL; +} + +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err; + + err = kobject_init_and_add(group_kobj, &vf_group, sriov->groups_config, + "%d", group_id); + if (err) + return err; + + kobject_uevent(group_kobj, KOBJ_ADD); + + return 0; +} + +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj) +{ + kobject_put(group_kobj); +} + +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + static const struct kobj_type *sysfs; + int err; + int vf; + + sysfs = &vf_type_ib; + sysfs = &vf_type_eth; + + sriov->vfs = kcalloc(num_vfs + 1, sizeof(*sriov->vfs), GFP_KERNEL); + if (!sriov->vfs) + return -ENOMEM; + + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = vf; + err = kobject_init_and_add(&tmp->kobj, sysfs, sriov->config, + "%d", vf); + if (err) + goto err_vf; + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + } + + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = 0; + err = kobject_init_and_add(&tmp->kobj, &pf_type_eth, + sriov->config, "%s", "pf"); + if (err) { + --vf; + goto err_vf; + } + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + + return 0; + +err_vf: + for (; vf >= 0; vf--) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; + return err; +} + +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + int vf; + + if (num_vfs) { + tmp = &sriov->vfs[num_vfs]; + kobject_put(&tmp->kobj); + } + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6561c1e005def0e13c5ed73f825e71e39a52a0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_DEFINE_H +#define CMDQ_DEFINE_H + +#define CMDQ_PA_REG_ADDR 0xFC00000 +#define CMDQ_PA_REG_WIDTH 64 + +#define CMDQ_LOG_SIZE_REG_ADDR 0xFC00008 +#define CMDQ_LOG_SIZE_WIDTH 4 + +#define CMDQ_DB_REG_ADDR 0xFC0000C +#define CMDQ_DB_REG_WIDTH 32 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c new file mode 100644 index 0000000000000000000000000000000000000000..acbe7e83a9e20f251a034a6ae03097fd7cf8ac56 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -0,0 +1,954 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "eswitch.h" +#include "common/xsc_fs.h" +#include "net/xsc_eth.h" +#include "common/xsc_lag.h" + +static int _xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, void *out, int outlen) +{ + struct xsc_query_vport_state_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport) +{ + struct xsc_query_vport_state_out out; + + memset(&out, 0, sizeof(out)); + _xsc_query_vport_state(dev, opmod, vport, &out, sizeof(out)); + + return out.state; +} +EXPORT_SYMBOL(xsc_query_vport_state); + +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state) +{ + struct xsc_modify_vport_state_in in; + struct xsc_modify_vport_state_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + in.other_vport = other_vport; + in.admin_state = state; + + return xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other) +{ + struct xsc_query_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + in.vport_number = cpu_to_be16(vport); + if (vport || force_other) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +static int xsc_query_nic_vport_context(struct xsc_core_device *dev, u16 vport, + void *out, int outlen) +{ + return __xsc_query_nic_vport_context(dev, vport, out, outlen, 0); +} + +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *tmp; + int err; + + memset(&out, 0, sizeof(out)); + tmp = (struct xsc_modify_nic_vport_context_in *)in; + tmp->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "fail to modify nic vport err=%d status=%d\n", + err, out.hdr.status); + } + return err; +} + +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + if (!err) + *min_inline = out.nic_vport_ctx.min_wqe_inline_mode; + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_min_inline); + +void xsc_query_min_inline(struct xsc_core_device *dev, + u8 *min_inline_mode) +{ + switch (dev->caps.wqe_inline_mode) { + case XSC_CAP_INLINE_MODE_VPORT_CONTEXT: + if (!xsc_query_nic_vport_min_inline(dev, 0, min_inline_mode)) + break; + fallthrough; + case XSC_CAP_INLINE_MODE_L2: + *min_inline_mode = XSC_INLINE_MODE_L2; + break; + case XSC_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = XSC_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(xsc_query_min_inline); + +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline) +{ + struct xsc_modify_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.field_select.min_inline = 1; + in.vport_number = vport; + in.other_vport = 1; + in.nic_vport_ctx.min_wqe_inline_mode = min_inline; + + return xsc_modify_nic_vport_context(dev, &in, sizeof(in)); +} + +static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, + int force_other) +{ + struct xsc_query_nic_vport_context_out out; + u8 *out_addr; + int err; + + memset(&out, 0, sizeof(out)); + out_addr = out.nic_vport_ctx.permanent_address; + + err = __xsc_query_nic_vport_context(dev, vport, &out, sizeof(out), + force_other); + if (!err) + ether_addr_copy(addr, out_addr); + + return err; +} + +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 1); +} +EXPORT_SYMBOL_GPL(xsc_query_other_nic_vport_mac_address); + +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 0); +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_address); + +static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, int force_other, bool perm_mac) +{ + struct xsc_modify_nic_vport_context_in *in; + struct xsc_modify_nic_vport_context_out out; + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_vport *evport = NULL; + int err, in_sz, i; + u8 *mac_addr; + u16 caps = 0; + u16 caps_mask = 0; + u16 lag_id = xsc_get_lag_id(dev); + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->lag_id = cpu_to_be16(lag_id); + + if (perm_mac) { + in->field_select.permanent_address = 1; + mac_addr = in->nic_vport_ctx.permanent_address; + } else { + in->field_select.current_address = 1; + mac_addr = in->nic_vport_ctx.current_address; + } + + if (force_other) { + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, i + 1); + } + + if (xsc_get_pp_bypass_res(dev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + in->caps = cpu_to_be16(caps); + in->caps_mask = cpu_to_be16(caps_mask); + + ether_addr_copy(mac_addr, addr); + + in->field_select.addresses_list = 1; + if (evport) + in->nic_vport_ctx.vlan = cpu_to_be16(evport->vlan_id); + + in->nic_vport_ctx.vlan_allowed = 1; + + err = xsc_modify_nic_vport_context(dev, in, in_sz); + if (be16_to_cpu(out.outer_vlan_id)) + goto ret; + + for (i = 0; i < VLAN_N_VID; i++) { + if (test_bit(i, adapter->vlan_params.active_cvlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + if (test_bit(i, adapter->vlan_params.active_svlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + } + +ret: + kfree(in); + return err; +} + +static int __xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + struct xsc_vport_rate_limit_mobox_in in; + struct xsc_vport_rate_limit_mobox_out out; + int err = 0; + + memset(&in, 0, sizeof(struct xsc_vport_rate_limit_mobox_in)); + memset(&out, 0, sizeof(struct xsc_vport_rate_limit_mobox_out)); + + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_VPORT_RATE_LIMIT); + in.rate = cpu_to_be32(rate); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "modify_vport_max_rate failed!err=%d, status=%u\n", + err, out.hdr.status); + return -EINVAL; + } + + return 0; +} + +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 1, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_other_nic_vport_mac_address); + +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + return __xsc_modify_vport_max_rate(dev, vport, rate); +} +EXPORT_SYMBOL(xsc_modify_vport_max_rate); + +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 0, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_nic_vport_mac_address); + +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + if (!err) + *mtu = out.nic_vport_ctx.mtu; + + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mtu); + +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + memset(&in, 0, sizeof(in)); + in.field_select.mtu = 1; + in.nic_vport_ctx.mtu = mtu; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mtu); + +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int max_list_size; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = *list_size; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (req_list_size > max_list_size) { + xsc_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = sizeof(struct xsc_query_nic_vport_context_out) + + req_list_size * 8; + + memset(&in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = list_type; + in.vport_number = vport; + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + *list_size = req_list_size; + for (i = 0; i < req_list_size; i++) { + u8 *mac_addr = (u8 *)out->nic_vport_ctx.current_uc_mac_address[i]; + + ether_addr_copy(addr_list[i], mac_addr); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_list); + +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int max_list_size; + int in_sz; + int err; + int i; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + + list_size * 8; + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT; + in->field_select.addresses_list = 1; + in->nic_vport_ctx.allowed_list_type = list_type; + in->nic_vport_ctx.allowed_list_size = list_size; + + for (i = 0; i < list_size; i++) { + u8 *curr_mac = + (u8 *)(in->nic_vport_ctx.current_uc_mac_address[i]); + ether_addr_copy(curr_mac, addr_list[i]); + } + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_list); + +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = 1 << dev->caps.log_max_vlan_list; + out_sz = sizeof(*out) + req_list_size * 8; + + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in.vport_number = vport; + + if (vport) + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + + for (i = 0; i < req_list_size; i++) { + u16 *vlan_addr = (u16 *)&out->nic_vport_ctx.current_uc_mac_address[i]; + + bitmap_set(vlans, (*vlan_addr & 0xfff), 1); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_vlans); + +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int in_sz; + int err; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + in->field_select.addresses_list = 1; + + in->nic_vport_ctx.vlan_allowed = add; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in->nic_vport_ctx.vlan = cpu_to_be16(vid); + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + + if (err || out.hdr.status) { + xsc_core_err(dev, "Failed to modify vlan err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_vlans); + +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *system_image_guid = out.nic_vport_ctx.system_image_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_system_image_guid); + +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + + *node_guid = out.nic_vport_ctx.node_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_node_guid); + +static int __xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid, + int force_other) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + /* vport = 0 only if ECPF modifying Host PF */ + if (!vport && !force_other) + return -EINVAL; + if (!dev->caps.vport_group_manager) + return -EACCES; + + memset(&in, 0, sizeof(in)); + in.field_select.node_guid = 1; + in.vport_number = vport; + if (vport || force_other) + in.other_vport = 1; + + in.nic_vport_ctx.node_guid = node_guid; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} + +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 0); +} + +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 1); +} + +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *qkey_viol_cntr = out.nic_vport_ctx.qkey_violation_counter; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_qkey_viol_cntr); + +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_gid_in); + int out_sz = sizeof(struct xsc_query_hca_vport_gid_out); + struct xsc_query_hca_vport_gid_in *in; + struct xsc_query_hca_vport_gid_out *out; + int is_group_manager; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = dev->caps.vport_group_manager; + tbsz = dev->caps.port[port_num].gid_table_len; + xsc_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_GID; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + + in->gid_index = gid_index; + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + tmp = (union ib_gid *)((void *)out + + sizeof(struct xsc_query_hca_vport_gid_out)); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_gid); + +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_pkey_in); + int out_sz = sizeof(struct xsc_query_hca_vport_pkey_out); + struct xsc_query_hca_vport_pkey_in *in; + struct xsc_query_hca_vport_pkey_out *out; + int is_group_manager; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = dev->caps.vport_group_manager; + + tbsz = dev->caps.port[port_num].pkey_table_len; + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_PKEY; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + in->pkey_index = pkey_index; + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + pkarr = out->pkey; + for (i = 0; i < nout; i++, pkey++, pkarr += sizeof(*pkey)) + *pkey = *(u16 *)pkarr; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_pkey); + +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep) +{ + struct xsc_query_hca_vport_context_out *out = NULL; + struct xsc_query_hca_vport_context_in in; + int is_group_manager; + void *ctx; + int err; + + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT; + + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf_num; + } else { + err = -EPERM; + goto ex; + } + } + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + if (dev->caps.num_ports == 2) + in.port_num = port_num; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto ex; + + ctx = &out->hca_vport_ctx; + memcpy(rep, ctx, sizeof(struct xsc_hca_vport_context)); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_context); + +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid) +{ + struct xsc_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_node_guid); + +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast) +{ + struct xsc_query_nic_vport_context_out *out; + int err; + + out = kzalloc(sizeof(out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = xsc_query_nic_vport_context(dev, vport, out, sizeof(*out)); + if (err) + goto out; + + *promisc = out->nic_vport_ctx.promisc; + *allmcast = out->nic_vport_ctx.allmcast; + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_promisc); + +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmulti_flag, bool promisc_flag, + int allmulti, int promisc) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.allmcast = allmulti_flag; + in->nic_vport_ctx.allmcast = allmulti; + + in->field_select.promisc = promisc_flag; + in->nic_vport_ctx.promisc = promisc; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_promisc); + +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.spoofchk = 1; + in->nic_vport_ctx.spoofchk = spoofchk; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_spoofchk); + +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.trust = 1; + in->nic_vport_ctx.trust = (trust ? 1 : 0); + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_trust); + +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz) +{ + struct xsc_query_vport_counter_in *in; + int is_group_manager; + int err; + + is_group_manager = dev->caps.vport_group_manager; + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_VPORT_COUNTER; + if (other_vport) { + if (is_group_manager) { + in->other_vport = 1; + in->vport_number = (vf + 1); + } else { + err = -EPERM; + goto free; + } + } + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, sizeof(*in), out, out_sz); +free: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_vport_counter); + +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req) +{ + struct xsc_modify_hca_vport_context_in in; + struct xsc_modify_hca_vport_context_out out; + int is_group_manager; + int err; + + xsc_core_dbg(dev, "vf %d\n", vf); + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT; + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf; + } else { + err = -EPERM; + goto err; + } + } + + if (dev->caps.num_ports > 1) + in.port_num = port_num; + memcpy(&in.hca_vport_ctx, req, sizeof(*req)); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +err: + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_hca_vport_context); + +/** + * xsc_eswitch_get_total_vports - Get total vports of the eswitch + * + * @dev: Pointer to core device + * + * xsc_eswitch_get_total_vports returns total number of vports for + * the eswitch. + */ +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev) +{ + return XSC_SPECIAL_VPORTS(dev) + xsc_core_max_vfs(dev); +} +EXPORT_SYMBOL(xsc_eswitch_get_total_vports); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c new file mode 100644 index 0000000000000000000000000000000000000000..5d0c96f204e229134c11e5a2240a3beccb300126 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include "common/device.h" +#include "common/xsc_core.h" +#include "wq.h" + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq) +{ + return (u32)wq->fbc.sz_m1 + 1; +} +EXPORT_SYMBOL_GPL(xsc_wq_cyc_get_size); + +static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) +{ + return ((u32)1 << log_sz) << log_stride; +} + +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, + param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_cqwq_create); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + struct xsc_frag_buf_ctrl *fbc = &wq->fbc; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + wq->sz = xsc_wq_cyc_get_size(wq); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_cyc_create); + +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl) +{ + xsc_frag_buf_free(wq_ctrl->xdev, &wq_ctrl->buf); + xsc_db_free(wq_ctrl->xdev, &wq_ctrl->db); +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_destroy); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h new file mode 100644 index 0000000000000000000000000000000000000000..8811ef1bf0f772472c583dad59349c9ce84c90b1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_WQ_H__ +#define __XSC_WQ_H__ + +#include "common/cq.h" +#include "common/qp.h" + +struct xsc_wq_param { + int buf_numa_node; + int db_numa_node; +}; + +struct xsc_wq_ctrl { + struct xsc_core_device *xdev; + struct xsc_frag_buf buf; + struct xsc_db db; +}; + +struct xsc_wq_cyc { + struct xsc_frag_buf_ctrl fbc; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; +}; + +struct xsc_cqwq { + struct xsc_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ +}; + +enum xsc_res_type { + XSC_RES_UND = 0, + XSC_RES_RQ, + XSC_RES_SQ, + XSC_RES_MAX, +}; + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq); + +int xsc_buf_alloc_node(struct xsc_core_device *dev, int size, + struct xsc_frag_buf *buf, int node); + +/*api for eth driver*/ +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl); +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl); + +static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct xsc_frag_buf_ctrl *fbc) +{ + fbc->frags = frags; + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void xsc_init_fbc(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + struct xsc_frag_buf_ctrl *fbc) +{ + xsc_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); +} + +static inline void *xsc_frag_buf_get_wqe(struct xsc_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +xsc_frag_buf_get_idx_last_contig_stride(struct xsc_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); +} + +static inline int xsc_wq_cyc_missing(struct xsc_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int xsc_wq_cyc_is_empty(struct xsc_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void xsc_wq_cyc_push(struct xsc_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void xsc_wq_cyc_push_n(struct xsc_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void xsc_wq_cyc_pop(struct xsc_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline u16 xsc_wq_cyc_ctr2ix(struct xsc_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u16 xsc_wq_cyc_get_head(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 xsc_wq_cyc_get_tail(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); +} + +static inline void *xsc_wq_cyc_get_wqe(struct xsc_wq_cyc *wq, u16 ix) +{ + return xsc_frag_buf_get_wqe(&wq->fbc, ix); +} + +static inline u32 xsc_cqwq_ctr2ix(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u32 xsc_cqwq_get_ci(struct xsc_cqwq *wq) +{ + return xsc_cqwq_ctr2ix(wq, wq->cc); +} + +static inline u32 xsc_cqwq_get_ctr_wrap_cnt(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + +static inline u32 xsc_cqwq_get_wrap_cnt(struct xsc_cqwq *wq) +{ + return xsc_cqwq_get_ctr_wrap_cnt(wq, wq->cc); +} + +static inline void xsc_cqwq_pop(struct xsc_cqwq *wq) +{ + wq->cc++; +} + +#endif /* __XSC_WQ_H__ */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c new file mode 100644 index 0000000000000000000000000000000000000000..4d12ce7f0459c5a4dab282d8ef8c9b119ff5a4ae --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c @@ -0,0 +1,1418 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include +#include "common/xsc_lag.h" +#include "common/xsc_hsi.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "net/xsc_eth.h" + +#include +#include +#include +#include + +static struct xsc_board_lag *board_lag_array[MAX_BOARD_NUM]; + +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev) +{ + return board_lag_array[xdev->board_info->board_id]; +} +EXPORT_SYMBOL(xsc_board_lag_get); + +void xsc_board_lag_set(struct xsc_core_device *xdev, + void *board_lag) +{ + struct xsc_board_lag *board_lag_new = board_lag; + + board_lag_new->board_id = xdev->board_info->board_id; + board_lag_array[xdev->board_info->board_id] = board_lag_new; +} + +void xsc_board_lag_reset(u32 board_id) +{ + board_lag_array[board_id] = NULL; +} + +static u8 hash_type_map[] = { + [NETDEV_LAG_HASH_NONE] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L2] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L34] = XSC_LAG_HASH_L34, + [NETDEV_LAG_HASH_L23] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_E23] = XSC_LAG_HASH_E23, + [NETDEV_LAG_HASH_E34] = XSC_LAG_HASH_E34, + [NETDEV_LAG_HASH_UNKNOWN] = XSC_LAG_HASH_L23, +}; + +static inline u8 xsc_lag_hashtype_convert(enum netdev_lag_hash hash_type) +{ + return hash_type_map[hash_type]; +} + +static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + return NETDEV_LAG_TX_TYPE_ROUNDROBIN; + case BOND_MODE_ACTIVEBACKUP: + return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + case BOND_MODE_BROADCAST: + return NETDEV_LAG_TX_TYPE_BROADCAST; + case BOND_MODE_XOR: + case BOND_MODE_8023AD: + return NETDEV_LAG_TX_TYPE_HASH; + default: + return NETDEV_LAG_TX_TYPE_UNKNOWN; + } +} + +enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) +{ + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_LAYER34: + return NETDEV_LAG_HASH_L34; + case BOND_XMIT_POLICY_LAYER23: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_ENCAP23: + return NETDEV_LAG_HASH_E23; + case BOND_XMIT_POLICY_ENCAP34: + return NETDEV_LAG_HASH_E34; + default: + return NETDEV_LAG_HASH_UNKNOWN; + } +} + +static inline bool __xsc_lag_is_active(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_MODE_FLAGS); +} + +static inline bool __xsc_lag_is_roce(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_ROCE); +} + +static inline bool __xsc_lag_is_kernel(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_KERNEL); +} + +static inline struct xsc_lag *__xsc_get_lag(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + + if (!xdev) + return NULL; + + board_lag = xsc_board_lag_get(xdev); + if (!board_lag || xdev->bond_id == BOND_ID_INVALID) + return NULL; + + return &board_lag->xsc_lag[xdev->bond_id]; +} + +int xsc_cmd_create_lag(struct xsc_lag_event *entry) +{ + struct xsc_create_lag_mbox_in in = {}; + struct xsc_create_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_CREATE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "create LAG: lag_id = %d, lag_type = %d, lag_sel_mode = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->lag_sel_mode, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_add_lag_member_mbox_in in = {}; + struct xsc_add_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_ADD_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "add LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_remove_lag_member_mbox_in in = {}; + struct xsc_remove_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_REMOVE_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + if (entry->lag_type & XSC_LAG_FLAG_ROCE && entry->is_roce_lag_xdev) { + in.req.is_roce_lag_xdev = entry->is_roce_lag_xdev; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + in.req.not_roce_lag_xdev_mask = entry->not_roce_lag_xdev_mask; + } + + xsc_core_info(xdev, "remove LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_member_status_mbox_in in = {}; + struct xsc_update_lag_member_status_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS); + + in.req.lag_type = entry->lag_type; + in.req.bond_mode = entry->bond_mode; + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.mac_idx = xdev->pf_id; + in.req.slave_status = entry->slave_status; + + xsc_core_info(xdev, "update LAG member status: lag_id = %d, bond_mode = %d, lag_type = %d, slave_status = %d, mac_idx = %d\n", + entry->lag_id, entry->bond_mode, entry->lag_type, + entry->slave_status, xdev->pf_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG member status, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_hash_type_mbox_in in = {}; + struct xsc_update_lag_hash_type_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_HASH_TYPE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_sel_mode = entry->lag_sel_mode; + + xsc_core_info(xdev, "update LAG hash type: lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, in.req.lag_sel_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG hash type, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_destroy_lag(struct xsc_lag_event *entry) +{ + struct xsc_destroy_lag_mbox_in in = {}; + struct xsc_destroy_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_DESTROY); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + + xsc_core_info(xdev, "destroy LAG: lag_id = %d\n", entry->lag_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static int xsc_lag_set_qos(struct xsc_core_device *xdev, u16 lag_id, u8 member_idx, u8 lag_op) +{ + struct xsc_set_lag_qos_mbox_in in; + struct xsc_set_lag_qos_mbox_out out; + struct xsc_set_lag_qos_request *req; + int ret; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + req = &in.req; + + req->lag_id = cpu_to_be16(lag_id); + req->member_idx = member_idx; + req->lag_op = lag_op; + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_SET_QOS); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + return ret; +} + +void xsc_create_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_create_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_CREATE)) { + xsc_core_err(xdev, "failed to create QoS LAG %u\n", entry->lag_id); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_add_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_add_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + + return; + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_remove_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + struct xsc_core_device *roce_lag_xdev = entry->roce_lag_xdev; + + if (roce_lag && entry->is_roce_lag_xdev) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_remove_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (roce_lag && entry->is_roce_lag_xdev) { + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + xsc_add_dev_by_protocol(roce_lag_xdev, XSC_INTERFACE_PROTOCOL_IB); + } + + if (roce_lag && !entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + + return; + +out: + if (roce_lag && entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_update_lag_member_status(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_member_status(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } else if (entry->slave_status == XSC_LAG_SLAVE_INACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } +} + +void xsc_update_lag_hash_type(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_hash_type(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); +} + +void xsc_destroy_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_destroy_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_DESTROY)) + xsc_core_err(xdev, "failed to destroy QoS LAG %u\n", entry->lag_id); + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +static void (*handlers[XSC_LAG_EVENT_MAX])(struct xsc_lag_event *entry) = { + [XSC_LAG_CREATE] = xsc_create_lag, + [XSC_LAG_ADD_MEMBER] = xsc_add_lag_member, + [XSC_LAG_REMOVE_MEMBER] = xsc_remove_lag_member, + [XSC_LAG_UPDATE_MEMBER_STATUS] = xsc_update_lag_member_status, + [XSC_LAG_UPDATE_HASH_TYPE] = xsc_update_lag_hash_type, + [XSC_LAG_DESTROY] = xsc_destroy_lag, +}; + +static int xsc_do_bond_thread(void *arg) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = arg; + struct lag_event_list *lag_event_list; + int status; + + lag_event_list = &board_lag->lag_event_list; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&lag_event_list->lock); + entry = list_first_entry_or_null(&lag_event_list->head, + struct xsc_lag_event, node); + if (!entry) { + spin_unlock(&lag_event_list->lock); + wait_event_interruptible(lag_event_list->wq, + lag_event_list->wait_flag != XSC_SLEEP); + if (lag_event_list->wait_flag == XSC_EXIT) + break; + lag_event_list->wait_flag = XSC_SLEEP; + continue; + } + + spin_unlock(&lag_event_list->lock); + + if (entry->event_type >= XSC_LAG_EVENT_MAX) + goto free_entry; + + status = xsc_dev_list_trylock(); + if (!status) + continue; + + (*handlers[entry->event_type])(entry); + xsc_dev_list_unlock(); + +free_entry: + list_del(&entry->node); + kfree(entry); + } + + return 0; +} + +static inline bool xsc_is_roce_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool roce_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + roce_lag_support &= !xsc_sriov_is_enabled(xdev); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while sriov is open\n"); + break; + } + + roce_lag_support &= radix_tree_empty(&xdev->priv_device.bdf_tree); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while the ib device is open\n"); + break; + } + } + + return roce_lag_support; +} + +static bool xsc_is_sriov_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool sriov_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + sriov_lag_support &= (xdev->priv.eswitch->mode == XSC_ESWITCH_OFFLOADS); + if (!sriov_lag_support) + xsc_core_info(xdev, "create SRIOV LAG while the switchdev is not open\n"); + } + + return sriov_lag_support; +} + +static u8 xsc_get_lag_type(struct xsc_lag *lag) +{ + u8 lag_type; + bool roce_lag; + bool sriov_lag; + u8 lag_mode_support; + + lag_mode_support = (lag->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || + lag->tx_type == NETDEV_LAG_TX_TYPE_HASH); + roce_lag = lag_mode_support && xsc_is_roce_lag_allowed(lag); + sriov_lag = lag_mode_support && xsc_is_sriov_lag_allowed(lag); + lag_type = sriov_lag ? XSC_LAG_FLAG_SRIOV : + (roce_lag ? XSC_LAG_FLAG_ROCE : XSC_LAG_FLAG_KERNEL); + + return lag_type; +} + +static inline void pack_add_and_wake_wq(struct xsc_board_lag *board_lag, + struct xsc_lag_event *entry) +{ + spin_lock(&board_lag->lag_event_list.lock); + list_add_tail(&entry->node, &board_lag->lag_event_list.head); + spin_unlock(&board_lag->lag_event_list.lock); + board_lag->lag_event_list.wait_flag = XSC_WAKEUP; + wake_up(&board_lag->lag_event_list.wq); +} + +static inline enum lag_slave_status lag_slave_status_get(struct net_device *ndev) +{ + struct slave *slave = NULL; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_STATUS_MAX; + + if (!netif_is_bond_slave(ndev)) + goto out; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (bond_slave_is_up(slave) && bond_slave_can_tx(slave)) + slave_status = XSC_LAG_SLAVE_ACTIVE; + else + slave_status = XSC_LAG_SLAVE_INACTIVE; + +out: + return slave_status; +} + +void pack_lag_create(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct net_device *ndev = xdev->netdev; + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + lag->lag_type = xsc_get_lag_type(lag); + + entry->event_type = XSC_LAG_CREATE; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = lag_slave_status_get(ndev); + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_create_lag(entry); +} + +void pack_lag_add_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct net_device *ndev = xdev->netdev; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->lag_type = xsc_get_lag_type(lag); + if (entry->lag_type != lag->lag_type) { + xsc_core_err(xdev, "do not permit add slave to different type lag, xdev_lag_type = %d, lag_type = %d\n", + entry->lag_type, lag->lag_type); + + kfree(entry); + return; + } + + entry->event_type = XSC_LAG_ADD_MEMBER; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->slave_status = lag_slave_status_get(ndev); + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = lag->not_roce_lag_xdev_mask; + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_add_lag_member(entry); +} + +void pack_lag_remove_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_core_device *xdev_tmp = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 cnt = 0; + u8 not_roce_lag_xdev_mask = 0; + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_REMOVE_MEMBER; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + if (entry->lag_type & XSC_LAG_FLAG_ROCE) { + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + if (roce_lag_xdev == xdev) { + entry->is_roce_lag_xdev = 1; + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == 1) + continue; + + if (cnt == 2) { + roce_lag_xdev = xdev_tmp; + continue; + } + + not_roce_lag_xdev_mask |= BIT(xdev_tmp->pf_id); + } + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = not_roce_lag_xdev_mask; + } + } + + xsc_core_info(xdev, "lag_type = %d, is_roce_lag_xdev = %d, not_roce_lag_xdev_mask = %d\n", + entry->lag_type, entry->is_roce_lag_xdev, entry->not_roce_lag_xdev_mask); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_remove_lag_member(entry); +} + +void pack_lag_update_member_status(struct xsc_lag *lag, + struct net_device *ndev, enum lag_slave_status slave_status) +{ + struct xsc_lag_event *entry; + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_UPDATE_MEMBER_STATUS; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = slave_status; + + xsc_core_info(xdev, "lag_id = %d, slave_status = %d\n", + entry->lag_id, entry->slave_status); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_update_hash_type(struct xsc_lag *lag, + u8 bond_id, enum netdev_lag_hash hash_type) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *xdev = NULL; + struct xsc_board_lag *board_lag; + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + + board_lag = xsc_board_lag_get(xdev); + + entry->event_type = XSC_LAG_UPDATE_HASH_TYPE; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->lag_sel_mode = lag->hash_type; + + xsc_core_info(xdev, "lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, entry->lag_sel_mode); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_destroy(struct xsc_lag *lag, struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_DESTROY; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + lag->lag_type = 0; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, lag_type = %d\n", + lag->lag_id, lag->board_id, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_destroy_lag(entry); +} + +static u8 xsc_get_valid_bond_id(struct xsc_board_lag *board_lag) +{ + u8 bond_valid_mask = board_lag->bond_valid_mask; + u8 i; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (!(bond_valid_mask & BIT(i))) { + board_lag->bond_valid_mask = (bond_valid_mask | BIT(i)); + return i; + } + } + return BOND_ID_INVALID; +} + +static void xsc_lag_setup(struct xsc_board_lag *board_lag, + struct net_device *upper, struct xsc_core_device *xdev, bool no_wq) +{ + struct bonding *bond = netdev_priv(upper); + struct xsc_lag *lag = NULL; + u8 bond_id; + + bond_id = xsc_get_valid_bond_id(board_lag); + + if (bond_id == BOND_ID_INVALID) + return; + + xdev->bond_id = bond_id; + lag = &board_lag->xsc_lag[xdev->bond_id]; + + INIT_LIST_HEAD(&lag->slave_list); + list_add(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt = 1; + lag->bond_dev = upper; + lag->bond_mode = BOND_MODE(bond); + lag->tx_type = bond_lag_tx_type(bond); + lag->hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + lag->board_id = xdev->board_info->board_id; + lag->lag_id = xdev->caps.lag_logic_port_ofst + xdev->bond_id; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, bond_mode = %d\n", + lag->lag_id, lag->board_id, lag->bond_mode); + + pack_lag_create(lag, xdev, false); +} + +static bool xsc_is_ndev_xsc_pf(struct net_device *slave_ndev) +{ + struct device *dev = &slave_ndev->dev; + struct pci_dev *pdev = to_pci_dev(dev->parent); + + return (pdev->device == XSC_MS_PF_DEV_ID || + pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +static u8 xsc_get_bond_board_xsc_cnt(struct net_device *upper, + u32 board_id) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *ndev_tmp; + u8 slave_cnt = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + if (!ndev_tmp) + continue; + if (xsc_is_ndev_xsc_pf(ndev_tmp)) { + adapter = netdev_priv(ndev_tmp); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_id) + slave_cnt++; + } + } + rcu_read_unlock(); + + return slave_cnt; +} + +static void xsc_lag_member_add(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + list_add_tail(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt++; + lag->not_roce_lag_xdev_mask |= BIT(xdev->pf_id); + + xsc_core_dbg(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + pack_lag_add_member(lag, xdev, no_wq); +} + +static void xsc_lag_member_remove(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 bond_valid_mask; + + lag->xsc_member_cnt--; + + xsc_core_info(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + if (lag->xsc_member_cnt > 0) { + pack_lag_remove_member(lag, xdev, no_wq); + } else { + pack_lag_destroy(lag, xdev, no_wq); + + lag->lag_id = LAG_ID_INVALID; + lag->board_id = BOARD_ID_INVALID; + + bond_valid_mask = board_lag->bond_valid_mask; + board_lag->bond_valid_mask = bond_valid_mask & ~BIT(xdev->bond_id); + } + + list_del(&xdev->slave_node); + xdev->bond_id = BOND_ID_INVALID; +} + +static void xsc_lag_update_member(struct xsc_lag *lag, + struct net_device *ndev, struct net_device *upper, u8 bond_id) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + u8 xsc_slave_cnt = xsc_get_bond_board_xsc_cnt(upper, lag->board_id); + + xsc_core_dbg(xdev, "xsc_slave_cnt = %d, old_xsc_slave_cnt = %d\n", + xsc_slave_cnt, lag->xsc_member_cnt); + + if (xsc_slave_cnt > lag->xsc_member_cnt) + xsc_lag_member_add(lag, xdev, false); + + if (xsc_slave_cnt < lag->xsc_member_cnt) + xsc_lag_member_remove(lag, xdev, false); +} + +static u8 xsc_get_upper_bond_id(struct net_device *bond_ndev, + struct net_device *ndev, struct xsc_board_lag *board_lag, + bool hash_change) +{ + u8 i; + struct xsc_lag *lag; + u8 bond_valid_mask = board_lag->bond_valid_mask; + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + u8 bond_id = BOND_ID_INVALID; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (bond_valid_mask & BIT(i)) { + lag = &board_lag->xsc_lag[i]; + if (!hash_change) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (lag->bond_dev == bond_ndev && + lag->board_id == xdev->board_info->board_id) { + bond_id = i; + break; + } + } else { + if (lag->bond_dev == bond_ndev) { + bond_id = i; + break; + } + } + } + } + + return bond_id; +} + +static struct xsc_board_lag *xsc_board_lag_filter(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + + if (xsc_is_ndev_xsc_pf(ndev)) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_lag->board_id) + return board_lag; + } + + return NULL; +} + +static void xsc_handle_changeupper_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *upper = info->upper_dev; + u8 bond_id; + struct xsc_lag *lag; + + if (!netif_is_lag_master(upper) || !ndev) + return; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + + xsc_core_dbg(xdev, "bond_id = %d\n", bond_id); + + if (bond_id != BOND_ID_INVALID) { + lag = &board_lag->xsc_lag[bond_id]; + xsc_lag_update_member(lag, ndev, upper, bond_id); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } else { + xsc_lag_setup(board_lag, upper, xdev, false); + } + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct net_device *bond_dev; + struct slave *slave; + struct xsc_lag *lag; + u8 bond_id; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_INACTIVE; + + if (!netif_is_lag_port(ndev) || !info->lower_state_info) + return; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (!slave || !slave->bond || !slave->bond->dev) + return; + + bond_dev = slave->bond->dev; + + lag_lower_info = info->lower_state_info; + if (lag_lower_info->link_up && lag_lower_info->tx_enabled) + slave_status = XSC_LAG_SLAVE_ACTIVE; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + bond_id = xsc_get_upper_bond_id(bond_dev, ndev, board_lag, false); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + pack_lag_update_member_status(lag, ndev, slave_status); + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changehash_event(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct bonding *bond; + enum netdev_lag_hash hash_type; + struct xsc_lag *lag; + u8 bond_id; + + if (!netif_is_lag_master(ndev)) + return; + + bond = netdev_priv(ndev); + if (!bond_mode_uses_xmit_hash(bond)) + return; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(ndev, NULL, board_lag, true); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + + if (hash_type != lag->hash_type) { + lag->hash_type = hash_type; + pack_lag_update_hash_type(lag, bond_id, hash_type); + } + mutex_unlock(&board_lag->lock); +} + +static int xsc_lag_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct xsc_board_lag *board_lag; + + if (event != NETDEV_CHANGE && event != NETDEV_CHANGEUPPER && + event != NETDEV_CHANGELOWERSTATE) + return NOTIFY_DONE; + + board_lag = container_of(this, struct xsc_board_lag, nb); + if (!board_lag) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + xsc_handle_changeupper_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGELOWERSTATE: + xsc_handle_changelowerstate_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGE: + xsc_handle_changehash_event(board_lag, ndev); + break; + } + + return NOTIFY_DONE; +} + +static struct xsc_board_lag *xsc_board_lag_dev_alloc(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + struct lag_event_list *lag_event_list; + int err; + + board_lag = kzalloc(sizeof(*board_lag), GFP_KERNEL); + if (!board_lag) + return NULL; + + lag_event_list = &board_lag->lag_event_list; + + INIT_LIST_HEAD(&lag_event_list->head); + spin_lock_init(&lag_event_list->lock); + init_waitqueue_head(&lag_event_list->wq); + lag_event_list->wait_flag = XSC_SLEEP; + lag_event_list->bond_poll_task = + kthread_create(xsc_do_bond_thread, (void *)board_lag, "xsc board lag"); + if (lag_event_list->bond_poll_task) + wake_up_process(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = xsc_lag_netdev_event; + err = register_netdevice_notifier(&board_lag->nb); + if (err) + goto err_create_notifier; + + kref_init(&board_lag->ref); + mutex_init(&board_lag->lock); + board_lag->bond_valid_mask = 0; + + return board_lag; + +err_create_notifier: + xsc_core_err(xdev, "failed to register LAG netdev notifier\n"); + board_lag->nb.notifier_call = NULL; + kthread_stop(lag_event_list->bond_poll_task); + kfree(board_lag); + + return NULL; +} + +static int __xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (!board_lag) { + board_lag = xsc_board_lag_dev_alloc(xdev); + if (!board_lag) + return -EPIPE; + xsc_board_lag_set(xdev, board_lag); + } else { + kref_get(&board_lag->ref); + } + + xdev->bond_id = BOND_ID_INVALID; + + return 0; +} + +void xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + int err; + + xsc_dev_list_lock(); + err = __xsc_lag_add_xdev(xdev); + xsc_dev_list_unlock(); + + if (err) + xsc_core_dbg(xdev, "add xdev err=%d\n", err); +} +EXPORT_SYMBOL(xsc_lag_add_xdev); + +static void xsc_lag_dev_free(struct kref *ref) +{ + struct xsc_board_lag *board_lag = container_of(ref, struct xsc_board_lag, ref); + struct lag_event_list *lag_event_list = &board_lag->lag_event_list; + + if (board_lag->nb.notifier_call) + unregister_netdevice_notifier(&board_lag->nb); + + lag_event_list->wait_flag = XSC_EXIT; + wake_up(&lag_event_list->wq); + if (lag_event_list->bond_poll_task) + kthread_stop(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = NULL; + mutex_destroy(&board_lag->lock); + + xsc_board_lag_reset(board_lag->board_id); + kfree(board_lag); +} + +void xsc_lag_remove_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + xsc_dev_list_lock(); + if (board_lag) + kref_put(&board_lag->ref, xsc_lag_dev_free); + xsc_dev_list_unlock(); +} +EXPORT_SYMBOL(xsc_lag_remove_xdev); + +void xsc_lag_disable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || !__xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == lag->xsc_member_cnt) + pack_lag_destroy(lag, xdev_tmp, false); + else + pack_lag_remove_member(lag, xdev_tmp, false); + } + + lag->mode_changes_in_progress++; + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_disable); + +void xsc_lag_enable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || __xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + lag->mode_changes_in_progress--; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + if (cnt == 0) + pack_lag_create(lag, xdev_tmp, false); + else + pack_lag_add_member(lag, xdev_tmp, false); + + cnt++; + } + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_enable); + +void xsc_lag_add_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct bonding *bond = NULL; + struct net_device *upper = NULL; + struct slave *slave; + u8 bond_id = BOND_ID_INVALID; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag || ndev->reg_state != NETREG_REGISTERED || + !netif_is_bond_slave(ndev)) + return; + + rcu_read_lock(); + slave = bond_slave_get_rcu(ndev); + rcu_read_unlock(); + bond = bond_get_bond_by_slave(slave); + upper = bond->dev; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + lag = __xsc_get_lag(xdev); + + if (bond_id != BOND_ID_INVALID) + xsc_lag_member_add(lag, xdev, true); + else + xsc_lag_setup(board_lag, upper, xdev, true); + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_add_netdev); + +void xsc_lag_remove_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag) + return; + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag) + goto out; + + if (__xsc_lag_is_active(lag)) { + xsc_lag_member_remove(lag, xdev, true); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } + +out: + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_remove_netdev); + +bool xsc_lag_is_roce(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + + lag = __xsc_get_lag(xdev); + if (!lag) + return false; + + return __xsc_lag_is_roce(lag); +} +EXPORT_SYMBOL(xsc_lag_is_roce); + +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev) +{ + return __xsc_get_lag(xdev); +} +EXPORT_SYMBOL(xsc_get_lag); + +u16 xsc_get_lag_id(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + u16 lag_id = LAG_ID_INVALID; + + xsc_board_lag_lock(xdev); + lag = __xsc_get_lag(xdev); + if (lag && __xsc_lag_is_active(lag) && !__xsc_lag_is_kernel(lag)) + lag_id = lag->lag_id; + xsc_board_lag_unlock(xdev); + + return lag_id; +} +EXPORT_SYMBOL(xsc_get_lag_id); + +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev) +{ + struct xsc_core_device *roce_lag_xdev; + struct xsc_lag *lag; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = __xsc_get_lag(xdev); + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + } else { + roce_lag_xdev = xdev; + } + xsc_board_lag_unlock(xdev); + + return roce_lag_xdev; +} +EXPORT_SYMBOL(xsc_get_roce_lag_xdev); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..2e63e13bc97d01079af08f277b56011782b03e54 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include +#include +#include +#include +#include "xsc_pci_ctrl.h" +#include "common/res_obj.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(19) + +#define XSC_PCI_CTRL_NAME "pci_ctrl" + +static int xsc_pci_ctrl_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: resp->num ==0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: enomem\n"); + return -ENOMEM; + } + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static struct pci_dev *xsc_pci_get_pcidev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + return pci_get_domain_bus_and_slot(domain, bus, devfn); +} + +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + struct pci_dev *pdev = NULL; + struct xsc_core_device *xdev = NULL; + + pdev = xsc_pci_get_pcidev_by_bus_and_slot(domain, bus, devfn); + if (!pdev) + return NULL; + + xdev = pci_get_drvdata(pdev); + + return xdev; +} + +static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + struct xsc_core_device *rl_xdev; + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + case XSC_IOCTL_OP_GET_INFO_BY_BDF: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + xsc_core_dbg(xdev, "ioctrl get_pcidev. domain=%u, bus=%u, devfn=%u\n", + resp->domain, resp->bus, resp->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(resp->domain, resp->bus, resp->devfn); + if (!rl_xdev) + return -1; + + resp->pcie_no = rl_xdev->pcie_no; + resp->func_id = rl_xdev->glb_func_id; + resp->pcie_host = rl_xdev->caps.pcie_host; + resp->mac_phy_port = rl_xdev->mac_port; + resp->funcid_to_logic_port_off = rl_xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = rl_xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = rl_xdev->caps.send_ds_num; + resp->recv_seg_num = rl_xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = rl_xdev->caps.raw_tpe_qp_num; + resp->chip_version = rl_xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (rl_xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (rl_xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (rl_xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = rl_xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = rl_xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = rl_xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = rl_xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = rl_xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = rl_xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = rl_xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = rl_xdev->caps.pcie1_pf_funcid_top; + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_alloc_ucontext_req *req; + struct xsc_alloc_ucontext_resp *resp; + struct xsc_core_device *rl_xdev = NULL; + + if (tl->opmod != XSC_IOCTL_OP_GET_CONTEXT) + return -EINVAL; + + req = (struct xsc_alloc_ucontext_req *)(tl + 1); + xsc_core_dbg(xdev, "xsc_tdi_alloc_context req:\n"); + xsc_core_dbg(xdev, "req->domain=%u\n", req->domain); + xsc_core_dbg(xdev, "req->bus=%u\n", req->bus); + xsc_core_dbg(xdev, "req->devfn=%u\n", req->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(req->domain, req->bus, req->devfn); + if (!rl_xdev) + return -1; + + resp = (struct xsc_alloc_ucontext_resp *)(tl + 1); + + resp->max_cq = 1 << rl_xdev->caps.log_max_cq; + resp->max_qp = 1 << rl_xdev->caps.log_max_qp; + resp->max_rwq_indirection_table_size = rl_xdev->caps.max_rwq_indirection_table_size; + resp->qpm_tx_db = rl_xdev->regs.tx_db; + resp->qpm_rx_db = rl_xdev->regs.rx_db; + resp->cqm_next_cid_reg = rl_xdev->regs.complete_reg; + resp->cqm_armdb = rl_xdev->regs.complete_db; + resp->send_ds_num = rl_xdev->caps.send_ds_num; + resp->recv_ds_num = rl_xdev->caps.recv_ds_num; + resp->send_ds_shift = rl_xdev->caps.send_wqe_shift; + resp->recv_ds_shift = rl_xdev->caps.recv_wqe_shift; + resp->glb_func_id = rl_xdev->glb_func_id; + + resp->max_wqes = rl_xdev->caps.max_wqes; + + xsc_core_dbg(xdev, "xsc_tdi_alloc_context resp:\n"); + xsc_core_dbg(xdev, "resp->max_cq=%u\n", resp->max_cq); + xsc_core_dbg(xdev, "resp->max_qp=%u\n", resp->max_qp); + xsc_core_dbg(xdev, "resp->qpm_tx_db=%llx\n", resp->qpm_tx_db); + xsc_core_dbg(xdev, "resp->qpm_rx_db=%llx\n", resp->qpm_rx_db); + xsc_core_dbg(xdev, "resp->cqm_next_cid_reg=%llx\n", resp->cqm_next_cid_reg); + xsc_core_dbg(xdev, "resp->cqm_armdb=%llx\n", resp->cqm_armdb); + xsc_core_dbg(xdev, "resp->send_ds_num=%u\n", resp->send_ds_num); + xsc_core_dbg(xdev, "resp->send_ds_shift=%u\n", resp->send_ds_shift); + xsc_core_dbg(xdev, "resp->:recv_ds_num=%u\n", resp->recv_ds_num); + xsc_core_dbg(xdev, "resp->recv_ds_shift=%u\n", resp->recv_ds_shift); + xsc_core_dbg(xdev, "resp->glb_func_id=%u\n", resp->glb_func_id); + + return ret; +} + +int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } + +static struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", +}; + +unsigned long (*kallsyms_lookup_name_func)(const char *name) = NULL; + +//调用kprobe找到kallsyms_lookup_name的地址位置 +int find_kallsyms_lookup_name(void) +{ + int ret = -1; + + kp.addr = 0; + kp.pre_handler = noop_pre; + ret = register_kprobe(&kp); + if (ret < 0) + return ret; + + kallsyms_lookup_name_func = (void *)kp.addr; + unregister_kprobe(&kp); + return ret; +} + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev) +{ + struct db_irq_matrix *m; + static unsigned long addr; + static int flag; + char *name = "vector_matrix"; + int ret; + + if (flag == 0) { + ret = find_kallsyms_lookup_name(); + if (ret < 0) { + xsc_core_err(dev, "find kallsyms_lookup_name failed\n"); + return 0xffff; + } + + addr = kallsyms_lookup_name_func(name); + xsc_core_dbg(dev, "vector_matrix addr=0x%lx\n", addr); + if (addr == 0) { + xsc_core_err(dev, "not support, arch maybe not X86?\n"); + /* 返回0xffff,做到在不知道cpu vector剩余多少可用的情况 + * 下不影响fw用该值判断能否分配中断 + */ + return 0xffff; + } + flag = 1; + } + + m = (struct db_irq_matrix *)(*(long *)addr); + if (!m) { + xsc_core_err(dev, "vector_matrix is NULL\n"); + return 0xffff; + } + xsc_core_info(dev, "vector_matrix global_available=%u\n", m->global_available); + return m->global_available; +} + +int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_pci_ctrl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_pci_ctrl_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_GET_CONTEXT: + xsc_core_dbg(xdev, "case XSC_IOCTL_GET_CONTEXT:\n"); + ret = xsc_pci_ctrl_get_contextinfo(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long xsc_pci_ctrl_setinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + struct xsc_set_debug_info_mbox_in in; + struct xsc_set_debug_info_mbox_out out; + struct xsc_ioctl_set_debug_info info; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(xdev, "copy user_hdr from user failed, err = %d\n", err); + return -EFAULT; + } + + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(xdev, "incorrect check field, check field=%#x\n", hdr.check_filed); + return -EFAULT; + } + + if (hdr.attr.length != sizeof(info)) { + xsc_core_err(xdev, "unexpected length, length=%d\n", hdr.attr.length); + return -EFAULT; + } + + err = copy_from_user(&info, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(xdev, "copy attr.data from user failed, err = %d\n", err); + return -EFAULT; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_DEBUG_INFO); + switch (hdr.attr.opcode) { + case XSC_IOCTL_SET_LOG_LEVEL: + in.set_field = 0; + in.log_level = info.log_level; + break; + case XSC_IOCTL_SET_CMD_VERBOSE: + in.set_field = 1; + in.cmd_verbose = info.cmd_verbose; + break; + default: + xsc_core_err(xdev, "invalid opcode %d\n", hdr.attr.opcode); + return -EINVAL; + } + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to set debug info to fw, err = %d, status = %d\n", + err, out.hdr.status); + return -EFAULT; + } + + return 0; +} + +static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + u16 global_available; + u16 totalvfs; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_GET_CONTEXT: + case XSC_IOCTL_GET_VECTOR_MATRIX: + break; + default: + return TRY_NEXT_CB; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + + if (hdr.attr.opcode == XSC_IOCTL_GET_VECTOR_MATRIX) { + global_available = xsc_get_irq_matrix_global_available(xdev); + totalvfs = (pci_sriov_get_totalvfs(xdev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(xdev->pdev); + in->attr.error = err; + memcpy(in->attr.data, (void *)&global_available, sizeof(u16)); + memcpy(in->attr.data + sizeof(u16), (void *)&totalvfs, sizeof(u16)); + goto next; + } + + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_pci_ctrl_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, hdr.attr.length); + in->attr.error = err; +next: + if (copy_to_user((void *)user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_emu_cmd(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_emu_hdr *emu_hdr; + u8 *buffer; + int in_size; + int out_size; + int err; + + buffer = kvzalloc(hdr->attr.length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = copy_from_user(buffer, user_hdr->attr.data, hdr->attr.length); + if (err) + goto err_copy_user_data; + + emu_hdr = (struct xsc_ioctl_emu_hdr *)buffer; + in_size = emu_hdr->in_length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_alloc_in_mem; + } + memcpy(in, emu_hdr->data, emu_hdr->in_length); + + out_size = emu_hdr->out_length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto err_alloc_out_mem; + } + + err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data + sizeof(struct xsc_ioctl_emu_hdr), + out, out_size)) + err = -EFAULT; + + kvfree(out); + kvfree(in); + kvfree(buffer); + return err; + +err_alloc_out_mem: + kvfree(in); +err_alloc_in_mem: +err_copy_user_data: + kvfree(buffer); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, + (char *)&(((struct xsc_create_qp_mbox_in *)in)->req), + inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_pci_ctrl_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_core_device *xdev = file->xdev; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EINVAL; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(xdev, user_hdr, &hdr); + case XSC_CMD_OP_USER_EMU_CMD: + return xsc_ioctl_emu_cmd(xdev, user_hdr, &hdr); + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + int op; + struct xsc_core_device *dev = file->xdev; + struct xsc_create_mkey_mbox_out *resp; + struct xsc_unregister_mr_mbox_in *req; + u8 key; + u16 out_len; + int qpn = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + xsc_core_err(dev, "fail to copy_from_user user hdr attr\n"); + goto err_exit; + } + + op = be16_to_cpu(((struct xsc_inbox_hdr *)in)->opcode); + switch (op) { + case XSC_CMD_OP_CREATE_MKEY: + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_create_mkey(dev, in, out); + + resp = (struct xsc_create_mkey_mbox_out *)out; + resp->mkey = xsc_idx_to_mkey(be32_to_cpu(resp->mkey) & 0xffffff) | key; + resp->mkey = cpu_to_be32(resp->mkey); + break; + case XSC_CMD_OP_DESTROY_MKEY: + if (!dev->reg_mr_via_cmdq) + err = xsc_destroy_mkey(dev, in, out); + break; + case XSC_CMD_OP_REG_MR: + if (!dev->reg_mr_via_cmdq) + err = xsc_reg_mr(dev, in, out); + break; + case XSC_CMD_OP_DEREG_MR: + req = (struct xsc_unregister_mr_mbox_in *)in; + req->mkey = be32_to_cpu(req->mkey); + req->mkey = cpu_to_be32(xsc_mkey_to_idx(req->mkey)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_dereg_mr(dev, in, out); + break; + case XSC_CMD_OP_DESTROY_QP: + qpn = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_send_cmd_2rst_qp(dev, qpn); + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + default: + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + } + xsc_pci_ctrl_cmdq_handle_res_obj(file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) { + xsc_core_err(dev, "fail to copy_to_user user hdr attr\n"); + err = -EFAULT; + } +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_pci_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_pci_ctrl_cmdq(file, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + err = xsc_pci_ctrl_getinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_DRV_SET: + err = xsc_pci_ctrl_setinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_pci_ctrl_cmdq_raw(file, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +void xsc_pci_ctrl_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_PCI_CTRL_NAME); +} + +int xsc_pci_ctrl_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_PCI_CTRL_NAME, xsc_pci_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_PCI_CTRL_NAME); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..c57caed380b7f014af53607f66f1f71004ace9c2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PCI_CTRL_H +#define XSC_PCI_CTRL_H + +#include +#include +#include + +//for x86 +#ifndef NR_VECTORS +#define NR_VECTORS 256 +#endif +#define IRQ_MATRIX_BITS NR_VECTORS +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) + +struct db_cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + u8 initialized; + u8 online; + unsigned long alloc_map[IRQ_MATRIX_SIZE]; + unsigned long managed_map[IRQ_MATRIX_SIZE]; +}; + +struct db_irq_matrix { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct db_cpumap __percpu *maps; + unsigned long scratch_map[IRQ_MATRIX_SIZE]; + unsigned long system_map[IRQ_MATRIX_SIZE]; +}; + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev); + +int xsc_pci_ctrl_init(void); +void xsc_pci_ctrl_fini(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..7e5c34ebe2ddc5cf49e6900cde8a0720f0366d54 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/xsc_port_ctrl.h" +#include "common/res_obj.h" + +#define XSC_PORT_CTRL_MAX 1024 +#define XSC_PORT_CTRL_NAME_PRE "yunsilicon" +#define XSC_PORT_CTRL_NAME "port_ctrl" +#define XSC_PORT_CTRL_CB_NAME_LEN 15 +DECLARE_BITMAP(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + +struct xsc_port_ctrl_reg { + struct list_head node; + char name[XSC_PORT_CTRL_CB_NAME_LEN + 1]; + port_ctrl_cb cb; + void *data; +}; + +static dev_t g_port_ctrl_root_dev; +static struct class *g_port_ctrl_class; +static int g_port_ctrl_dev_cnt; +static struct list_head g_port_ctrl_cbs = LIST_HEAD_INIT(g_port_ctrl_cbs); +struct mutex g_port_ctrl_cbs_lock; /* protect port ctrl node list */ + +static int _port_ctrl_open(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl *ctrl = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl_file *file; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&file->bdf_tree, GFP_ATOMIC); + spin_lock_init(&file->bdf_lock); + file->ctrl = ctrl; + + file->root_bdf = kzalloc(sizeof(*file->root_bdf), GFP_KERNEL); + if (!file->root_bdf) { + kfree(file); + return -ENOMEM; + } + INIT_RADIX_TREE(&file->root_bdf->obj_tree, GFP_ATOMIC); + spin_lock_init(&file->root_bdf->obj_lock); + file->root_bdf->xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + + spin_lock(&ctrl->file_lock); + list_add_tail(&file->file_node, &ctrl->file_list); + spin_unlock(&ctrl->file_lock); + filp->private_data = file; + + xsc_core_info(file->root_bdf->xdev, "process %d open port ctrl file\n", current->pid); + + return 0; +} + +static void xsc_release_port_ctrl_file(struct xsc_port_ctrl_file *file) +{ + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + + xsc_close_bdf_file(file->root_bdf); + kfree(file->root_bdf); + spin_lock(&file->bdf_lock); + radix_tree_for_each_slot(slot, &file->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&file->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&file->bdf_lock); +} + +static int _port_ctrl_release(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl_file *file = filp->private_data; + + xsc_release_port_ctrl_file(file); + spin_lock(&file->ctrl->file_lock); + list_del(&file->file_node); + spin_unlock(&file->ctrl->file_lock); + kfree(file); + + return 0; +} + +static bool is_db_ofst(struct xsc_core_device *xdev, unsigned long offset) +{ + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + return true; + return false; +} + +static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + resource_size_t reg_base; + unsigned long start = (unsigned long)vma->vm_start; + unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + u64 addr; + u32 db_type; + u32 domain = 0; + u32 bus; + u32 devfn; + struct xsc_port_ctrl_file *file; + struct xsc_core_device *xdev; + struct xsc_core_device *rl_xdev; + u32 bdf; + + file = filp->private_data; + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + + xsc_core_dbg(xdev, "_port_ctrl_map:offset=%lx\n", offset); + + bdf = offset >> 32; + db_type = bdf & 0x0000000f; + devfn = (bdf >> 4) & 0x000000ff; + bus = (bdf >> 12) & 0x000000ff; + + xsc_core_dbg(xdev, "bus=%u,devfn=%u,db_type=%u\n", bus, devfn, db_type); + + if (bdf != 0) { + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(domain, bus, devfn); + if (!rl_xdev) + return -1; + + if (db_type == XSC_MMAP_MSG_SQDB) { + addr = rl_xdev->regs.tx_db; + } else if (db_type == XSC_MMAP_MSG_RQDB) { + addr = rl_xdev->regs.rx_db; + } else if (db_type == XSC_MMAP_MSG_CQDB) { + addr = rl_xdev->regs.complete_db; + } else if (db_type == XSC_MMAP_MSG_ARM_CQDB) { + addr = rl_xdev->regs.complete_reg; + } else { + pr_err("[%s:%d] mmap err\n", __func__, __LINE__); + return -1; + } + } else { + rl_xdev = xdev; + if (is_db_ofst(xdev, offset) || !offset) + addr = offset; + else + return -EINVAL; + } + + xsc_core_dbg(xdev, "tx_db=%llx,rx_db=%llx,cq_db=%llx,cq_reg=%llx\n", + rl_xdev->regs.tx_db, rl_xdev->regs.rx_db, + rl_xdev->regs.complete_db, rl_xdev->regs.complete_reg); + + reg_base = (pci_resource_start(rl_xdev->pdev, rl_xdev->bar_num) + (addr & PAGE_MASK)); + + if (addr) { + if (xdev->chip_ver_h == 0x100) + reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; + else + reg_base = reg_base - 0xA0000000; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, start, (reg_base >> PAGE_SHIFT), size, vma->vm_page_prot)) { + pr_err("[%s:%d] remap_pfn_range err\n", __func__, __LINE__); + return -1; + } + + return 0; +} + +static inline struct xsc_bdf_file *get_bdf_file(struct xsc_port_ctrl_file *file, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file; + struct xsc_core_device *rl_xdev; + unsigned long key; + + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + xsc_core_dbg(xdev, "domain=%x, bus=%x, devfn=%x\n", hdr->domain, hdr->bus, hdr->devfn); + if ((hdr->domain == 0 && hdr->bus == 0 && hdr->devfn == 0) || + (hdr->domain == pci_domain_nr(xdev->pdev->bus) && + hdr->bus == xdev->pdev->bus->number && + hdr->devfn == xdev->pdev->devfn)) + return file->root_bdf; + + key = bdf_to_key(hdr->domain, hdr->bus, hdr->devfn); + spin_lock(&file->bdf_lock); + bdf_file = radix_tree_lookup(&file->bdf_tree, key); + spin_unlock(&file->bdf_lock); + if (bdf_file) { + xsc_core_dbg(bdf_file->xdev, "find the bdf file: %lx\n", bdf_file->key); + return bdf_file; + } + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(hdr->domain, hdr->bus, hdr->devfn); + if (!rl_xdev) { + xsc_core_err(bdf_file->xdev, "fail to get xdev:domain=%x, bus=%x, devfn=%x\n", + hdr->domain, hdr->bus, hdr->devfn); + return NULL; + } + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!bdf_file) + return NULL; + + bdf_file->key = key; + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + bdf_file->xdev = rl_xdev; + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->bdf_lock); + radix_tree_insert(&file->bdf_tree, key, bdf_file); + spin_unlock(&file->bdf_lock); + radix_tree_preload_end(); + xsc_core_dbg(rl_xdev, "bdf file not exist, create it and add to port ctrl file\n"); + + return bdf_file; +} + +static long _port_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct xsc_port_ctrl_reg *p; + struct xsc_port_ctrl_file *file; + struct xsc_ioctl_hdr __user *user_hdr; + struct xsc_bdf_file *bdf_file; + struct xsc_ioctl_hdr hdr; + int err; + + file = filp->private_data; + user_hdr = (struct xsc_ioctl_hdr __user *)arg; + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + pr_err("%s: fail to copy from user hdr\n", __func__); + return err; + } + + bdf_file = get_bdf_file(file, &hdr); + if (!bdf_file) { + pr_err("%s: fail to find bdf file\n", __func__); + return -EFAULT; + } + + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (p->cb) { + err = p->cb(bdf_file, cmd, user_hdr, p->data); + if (err != TRY_NEXT_CB) + break; + } + } + + return err; +} + +static const struct file_operations g_port_ctrl_fops = { + .owner = THIS_MODULE, + .open = _port_ctrl_open, + .mmap = _port_ctrl_mmap, + .unlocked_ioctl = _port_ctrl_ioctl, + .compat_ioctl = _port_ctrl_ioctl, + .release = _port_ctrl_release, +}; + +static struct xsc_port_ctrl_reg *_port_ctrl_cbs_get(const char *name) +{ + struct xsc_port_ctrl_reg *p, *found; + + found = NULL; + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + found = p; + break; + } + } + + return found; +} + +static void _port_ctrl_data_fini(void) +{ + class_destroy(g_port_ctrl_class); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); +} + +static int _port_ctrl_data_init(void) +{ + int ret; + int major_devid; + + ret = alloc_chrdev_region(&g_port_ctrl_root_dev, 0, XSC_PORT_CTRL_MAX, + XSC_PORT_CTRL_NAME_PRE); + if (ret < 0) { + pr_err("%s cant't get major id\n", XSC_PORT_CTRL_NAME_PRE); + return -1; + } + + major_devid = MAJOR(g_port_ctrl_root_dev); + pr_info("requested major_devid %d\n", major_devid); + + g_port_ctrl_class = class_create(XSC_PORT_CTRL_NAME_PRE); + if (IS_ERR(g_port_ctrl_class)) { + pr_err("failed to call create class witch name %s\n", + XSC_PORT_CTRL_NAME_PRE); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); + return -1; + } + + g_port_ctrl_dev_cnt = 0; + + return 0; +} + +static void _port_ctrl_dev_del(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + struct xsc_port_ctrl_file *file, *n; + int dev_id = 0; + + ctrl = &dev->port_ctrl; + if (!ctrl) + return; + + dev_id = MINOR(ctrl->devid); + spin_lock(&ctrl->file_lock); + list_for_each_entry_safe(file, n, &ctrl->file_list, file_node) { + xsc_release_port_ctrl_file(file); + list_del(&file->file_node); + kfree(file); + } + spin_unlock(&ctrl->file_lock); + + device_destroy(g_port_ctrl_class, ctrl->devid); + cdev_del(&ctrl->cdev); + + clear_bit(dev_id, g_bitmap_dev_id); + g_port_ctrl_dev_cnt--; +} + +static int _port_ctrl_dev_add(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + int ret; + int dev_id = 0; + + if (g_port_ctrl_dev_cnt >= XSC_PORT_CTRL_MAX) { + xsc_core_err(dev, "too many port control devices\n"); + return -ENOMEM; + } + + ctrl = &dev->port_ctrl; + dev_id = find_first_zero_bit(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + ctrl->devid = g_port_ctrl_root_dev + dev_id; + ctrl->cdev.owner = THIS_MODULE; + INIT_LIST_HEAD(&ctrl->file_list); + spin_lock_init(&ctrl->file_lock); + cdev_init(&ctrl->cdev, &g_port_ctrl_fops); + ret = cdev_add(&ctrl->cdev, ctrl->devid, 1); + if (ret != 0) { + xsc_core_err(dev, "failed to add cdev\n"); + kfree(ctrl); + return -ENOMEM; + } + + ctrl->device = device_create(g_port_ctrl_class, NULL, ctrl->devid, NULL, + "%s!%s_%04x:%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, + XSC_PORT_CTRL_NAME, pci_domain_nr(dev->pdev->bus), + dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); + if (IS_ERR(ctrl->device)) { + xsc_core_err(dev, "failed to create port control device\n"); + cdev_del(&ctrl->cdev); + kfree(ctrl); + return -ENOMEM; + } + + g_port_ctrl_dev_cnt++; + set_bit(dev_id, g_bitmap_dev_id); + + return 0; +} + +static void _port_ctrl_cb_fini(void) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + } +} + +static int _port_ctrl_cb_init(void) +{ + mutex_init(&g_port_ctrl_cbs_lock); + + return 0; +} + +static void _port_ctrl_dev_flush(void) +{ +} + +void xsc_port_ctrl_fini(void) +{ + _port_ctrl_dev_flush(); + _port_ctrl_data_fini(); + _port_ctrl_cb_fini(); +} + +int xsc_port_ctrl_init(void) +{ + int ret; + + ret = _port_ctrl_data_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl data\n"); + return -1; + } + + ret = _port_ctrl_cb_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl cb\n"); + _port_ctrl_data_fini(); + return -1; + } + + return 0; +} + +void xsc_port_ctrl_remove(struct xsc_core_device *dev) +{ + _port_ctrl_dev_del(dev); +} + +int xsc_port_ctrl_probe(struct xsc_core_device *dev) +{ + int ret = 0; + + ret = _port_ctrl_dev_add(dev); + if (ret != 0) + xsc_core_err(dev, "failed to add new port control device\n"); + + return ret; +} + +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data) +{ + struct xsc_port_ctrl_reg *reg_node; + + if (strlen(name) > XSC_PORT_CTRL_CB_NAME_LEN) { + pr_err("the name is too long to register to port control\n"); + return -1; + } + + reg_node = _port_ctrl_cbs_get(name); + if (reg_node) { + pr_err("failed to register a duplicated node\n"); + return -1; + } + + reg_node = kmalloc(sizeof(*reg_node), GFP_KERNEL); + if (!reg_node) + return -1; + + strscpy(reg_node->name, name, sizeof(reg_node->name)); + reg_node->cb = cb; + reg_node->data = data; + INIT_LIST_HEAD(®_node->node); + + mutex_lock(&g_port_ctrl_cbs_lock); + list_add_tail(®_node->node, &g_port_ctrl_cbs); + mutex_unlock(&g_port_ctrl_cbs_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_reg); + +void xsc_port_ctrl_cb_dereg(const char *name) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + break; + } + } +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_dereg); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b5f012619e42da6a05329d3af1a55a56ae01e49b..00e6a5723230a996498d075977e2b935c58bb964 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -657,6 +657,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + unsigned long capabilities; struct phylink_pcs *pcs; int ret; @@ -696,10 +697,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, } /* Then validate the link parameters with the MAC */ - if (pl->mac_ops->validate) + if (pl->mac_ops->validate) { pl->mac_ops->validate(pl->config, supported, state); - else - phylink_generic_validate(pl->config, supported, state); + } else { + if (pl->mac_ops->mac_get_caps) + capabilities = pl->mac_ops->mac_get_caps(pl->config, + state->interface); + else + capabilities = pl->config->mac_capabilities; + + phylink_validate_mask_caps(supported, state, capabilities); + } return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 52c8fd3d5c4796290e12535d4d2047d31dd3a775..9f4d9fbc2fa71d6c906241d437d681ea8ea6a0e8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1079,6 +1079,10 @@ static irqreturn_t nvme_irq(int irq, void *data) struct nvme_queue *nvmeq = data; DEFINE_IO_COMP_BATCH(iob); +#ifdef CONFIG_LOONGARCH + /* Ensure that the data is completely in place */ + mb(); +#endif if (nvme_poll_cq(nvmeq, &iob)) { if (!rq_list_empty(iob.req_list)) nvme_pci_complete_batch(&iob); diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d3612dcb8e232802269a34c42d28e83..6449056b57dd3032b0a1fbd991f4248be0801d84 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index c0c3f28249907460fa7394e94253839f66a06ede..2a2a3ccd66ad3599638a21fc30df3facb71d02d5 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -342,6 +342,10 @@ config PCIE_XILINX_CPM Say 'Y' here if you want kernel support for the Xilinx Versal CPM host bridge. +config PCI_SW64 + bool + depends on SW64 && PCI + source "drivers/pci/controller/cadence/Kconfig" source "drivers/pci/controller/dwc/Kconfig" source "drivers/pci/controller/mobiveil/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 37c8663de7fe1ff7c9c948cd39f4b6ce1a912f5b..9d161c053bc4b206611b82a574c65a0c2f83bcb3 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o +obj-$(CONFIG_PCI_SW64) += pci-sunway.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index ef0b2efa9f93e0d99c23c35dcdbd9726cb040663..74d500819e92814ace3e1b9199b5d72e89b61199 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -341,6 +341,8 @@ struct dw_pcie_ep_ops { * driver. */ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); + + CK_KABI_RESERVE(1) }; struct dw_pcie_ep_func { @@ -364,6 +366,8 @@ struct dw_pcie_ep { void __iomem *msi_mem; phys_addr_t msi_mem_phys; struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; + + CK_KABI_RESERVE(1) }; struct dw_pcie_ops { @@ -378,6 +382,8 @@ struct dw_pcie_ops { enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie); int (*start_link)(struct dw_pcie *pcie); void (*stop_link)(struct dw_pcie *pcie); + + CK_KABI_RESERVE(1) }; struct dw_pcie { diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index bc630ab8a283162189989cc161cfe87ba4527cb8..9349d65b9dbcfe9f019d6cb9f5c7d07eed21a9c5 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -32,6 +33,7 @@ #define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GNET 0x7a13 #define DEV_LS7A_EHCI 0x7a14 +#define DEV_LS7A_OHCI 0x7a24 #define DEV_LS7A_DC2 0x7a36 #define DEV_LS7A_HDMI 0x7a37 @@ -80,6 +82,20 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +static void loongson_d3_quirk(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + pdev->no_d1d2 = 1; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_d3_quirk); + /* * Some Loongson PCIe ports have hardware limitations on their Maximum Read * Request Size. They can't handle anything larger than this. Sane @@ -175,6 +191,97 @@ static void loongson_pci_msi_quirk(struct pci_dev *dev) pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk); +static void loongson_ohci_quirk(struct pci_dev *dev) +{ + if (dev->revision == 0x2) + dev->resource[0].start += 0x1000; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); + +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (volatile void *)0x80000efdfb000174UL); + writel(0x80000000, (volatile void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { @@ -255,6 +362,36 @@ static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, return NULL; } +static int pci_loongson_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = bus->ops->map_bus(bus, devfn, where); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (size == 1) + *val = readb(addr); + else if (size == 2) + *val = readw(addr); + else + *val = readl(addr); + /* + * fix some pcie card not scanning properly when bus number is + * inconsistent during firmware and kernel scan phases. + */ + if (*val == 0x0 && where == PCI_VENDOR_ID) { + writel(*val, addr); + *val = readl(addr); + } + + + return PCIBIOS_SUCCESSFUL; +} + #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) @@ -278,7 +415,7 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, }; @@ -321,6 +458,7 @@ static int loongson_pci_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; + unsigned int num = 0; if (!node) return -ENODEV; @@ -345,7 +483,9 @@ static int loongson_pci_probe(struct platform_device *pdev) } if (priv->data->flags & FLAG_CFG1) { - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (priv->cfg0_base) + num = 1; + regs = platform_get_resource(pdev, IORESOURCE_MEM, num); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { @@ -402,7 +542,7 @@ const struct pci_ecam_ops loongson_pci_ecam_ops = { .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, } }; diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..036994ffde381f09d502a0c36fc6210f1f3a271c --- /dev/null +++ b/drivers/pci/controller/pci-sunway.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include + +void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + +#ifdef CONFIG_UNCORE_JUNZHANG +void set_adr_int(int node) +{ + sw64_io_write(node, ADR_INT_CONFIG, (0x0 << 16 | 0x3f)); + sw64_io_write(node, ADR_CTL, 0xc); +} +#endif + +void set_pcieport_service_irq(int node, int index) +{ + if (IS_ENABLED(CONFIG_PCIE_PME)) + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + + if (IS_ENABLED(CONFIG_PCIEAER)) + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); +} + +int chip_pcie_configure(struct pci_controller *hose) +{ + struct pci_dev *dev; + struct pci_bus *bus, *top; + struct list_head *next; + unsigned int max_read_size, smallest_max_payload; + int max_payloadsize; + unsigned long rc_index, node; + unsigned long piuconfig0, value; + unsigned int pcie_caps_offset; + unsigned int rc_conf_value; + u16 devctl, new_values; + bool rc_ari_disabled = false, found = false; + unsigned char bus_max_num; + + node = hose->node; + rc_index = hose->index; + smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD; + bus_max_num = hose->busn_space->start; + + top = hose->bus; + bus = top; + next = top->devices.next; + + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + if (!found) { + if (pci_is_root_bus(dev->bus)) { + if (list_empty(&dev->subordinate->devices)) + rc_ari_disabled = true; + } else { + if (!pci_ari_enabled(dev->bus)) { + rc_ari_disabled = true; + found = true; + } + } + } + + if (bus->busn_res.end > bus_max_num) + bus_max_num = bus->busn_res.end; + + /* Query device PCIe capability register */ + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + max_payloadsize = dev->pcie_mpss; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + } + + if (rc_ari_disabled) { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value &= ~PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } else { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value |= PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } + + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + rc_conf_value &= PCI_EXP_DEVCAP_PAYLOAD; + max_payloadsize = rc_conf_value; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + + max_read_size = 0x2; /* Limit to 512B */ + value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL); + value &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + value |= (max_read_size << 12) | (smallest_max_payload << 5); + write_rc_conf(node, rc_index, RC_EXP_DEVCTL, value); + new_values = (max_read_size << 12) | (smallest_max_payload << 5); + + piuconfig0 = read_piu_ior0(node, rc_index, PIUCONFIG0); + piuconfig0 &= ~(0x7fUL << 9); + if (smallest_max_payload == 0x2) { + piuconfig0 |= (0x20UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } else { + piuconfig0 |= (0x40UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } + + pr_info("Node%ld RC%ld MPSS %luB, MRRS %luB, Piuconfig0 %#lx, ARI %s\n", + node, rc_index, (1UL << smallest_max_payload) << 7, + (1UL << max_read_size) << 7, piuconfig0, + rc_ari_disabled ? "disabled" : "enabled"); + + /* Now, set the max_payload_size for all devices to that value. */ + bus = top; + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + + pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); + devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + devctl |= new_values; + pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); + } + + return bus_max_num; +} + +static int check_pci_linkup(unsigned long node, unsigned long index) +{ + unsigned long rc_debug; + + if (is_guest_or_emul()) { + if (node == 0 && index == 0) + return 0; + else + return 1; + } else { + rc_debug = read_piu_ior1(node, index, RCDEBUGINF1); + } + + return !(rc_debug == 0x111); +} + +static void set_rc_piu(unsigned long node, unsigned long index) +{ + unsigned int i __maybe_unused; + unsigned int value; + u32 rc_misc_ctrl; + + if (is_guest_or_emul()) + return; + + /* configure RC, set PCI-E root controller */ + write_rc_conf(node, index, RC_COMMAND, 0x00100007); + write_rc_conf(node, index, RC_PORT_LINK_CTL, 0x1f0020); + write_rc_conf(node, index, RC_EXP_DEVCTL, 0x2850); + write_rc_conf(node, index, RC_EXP_DEVCTL2, 0x6); + write_rc_conf(node, index, RC_ORDER_RULE_CTL, 0x0100); + + /* enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + + write_rc_conf(node, index, RC_PRIMARY_BUS, 0xffffff); + write_piu_ior0(node, index, PIUCONFIG0, PIUCONFIG0_INIT_VAL); + + write_piu_ior1(node, index, PIUCONFIG1, 0x2); + write_piu_ior1(node, index, ERRENABLE, -1); + + /* set DMA offset value PCITODMA_OFFSET */ + write_piu_ior0(node, index, EPDMABAR, PCITODMA_OFFSET); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + write_piu_ior0(node, index, MSIADDR, MSIX_MSG_ADDR); +#ifdef CONFIG_UNCORE_XUELANG + for (i = 0; i < 256; i++) + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), 0); +#endif + } +} + +static void set_intx(unsigned long node, unsigned long index, + unsigned long int_conf) +{ + if (is_guest_or_emul()) + return; + +#if defined(CONFIG_UNCORE_XUELANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x8UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x1UL << 10)); +#elif defined(CONFIG_UNCORE_JUNZHANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x1UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x8UL << 10)); +#endif +} + +static unsigned long get_rc_enable(unsigned long node) +{ + unsigned long rc_enable; + + if (is_guest_or_emul()) + return 1; + + rc_enable = sw64_io_read(node, IO_START); + + return rc_enable; +} + +static int map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; +} + +static void hose_init(struct pci_controller *hose) +{ + unsigned long pci_io_base; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + pci_io_base = IO_BASE | (hose->node << IO_NODE_SHIFT) + | PCI_BASE | (hose->index << IO_RC_SHIFT); + + hose->dense_mem_base = pci_io_base; + hose->dense_io_base = pci_io_base | PCI_LEGACY_IO; + hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG); + hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG); + + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; + hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; + hose->mem_space->name = "pci memory space"; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, hose->mem_space) < 0) + pr_err("Failed to request MEM on hose %ld\n", hose->index); + hose->pre_mem_space->start = pci_io_base | PCI_64BIT_MEMIO; + hose->pre_mem_space->end = hose->pre_mem_space->start + PCI_64BIT_MEMIO_SIZE - 1; + hose->pre_mem_space->name = "pci pre mem space"; + hose->pre_mem_space->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + + if (request_resource(&iomem_resource, hose->pre_mem_space) < 0) + pr_err("Failed to request 64bit MEM on hose %ld\n", hose->index); + hose->io_space->start = pci_io_base | PCI_LEGACY_IO; + hose->io_space->end = hose->io_space->start + PCI_LEGACY_IO_SIZE - 1; + hose->io_space->name = "pci io space"; + hose->io_space->flags = IORESOURCE_IO; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + pr_err("Failed to request IO on hose %ld\n", hose->index); + hose->busn_space->name = "PCI busn"; + hose->busn_space->start = 0xff; + hose->busn_space->end = 0xff; + hose->busn_space->flags = IORESOURCE_BUS; + hose->first_busno = hose->self_busno = hose->busn_space->start; + hose->last_busno = hose->busn_space->end; + + if (is_in_host()) { + if (IS_ENABLED(CONFIG_PCI_MSI)) + memset(hose->piu_msiconfig, 0, 256/8); + } +}; + +static struct sw64_pci_init_ops chip_pci_init_ops = { + .map_irq = map_irq, + .get_rc_enable = get_rc_enable, + .hose_init = hose_init, + .set_rc_piu = set_rc_piu, + .check_pci_linkup = check_pci_linkup, + .set_intx = set_intx, +}; + +void __init setup_chip_pci_ops(void) +{ + sw64_chip_init->pci_init = chip_pci_init_ops; +} + +static unsigned long rc_linkup; +static struct pci_controller *head, **tail = &head; + +static void pci_mark_rc_linkup(unsigned long node, unsigned long index) +{ + set_bit(node * 8 + index, &rc_linkup); +} + +static int pci_get_rc_linkup(unsigned long node, unsigned long index) +{ + return test_bit(node * 8 + index, &rc_linkup); +} + +/** + * Link the specified pci controller to list + */ +extern struct pci_controller *hose_head; +static void pci_link_controller(struct pci_controller *hose) +{ + if (unlikely(!hose)) + return; + + *tail = hose; + tail = &hose->next; + + if (!hose_head) + hose_head = head; +} + +struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num) +{ + struct pci_controller *hose; + + for (hose = head; hose; hose = hose->next) { + if (bus_num >= hose->first_busno && bus_num <= hose->last_busno) + return hose; + } + + return NULL; +} + +struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus) +{ + struct pci_config_window *cfg = NULL; + + if (unlikely(!bus)) + return NULL; + + if (acpi_disabled) + return (struct pci_controller *)(bus->sysdata); + + cfg = (struct pci_config_window *)bus->sysdata; + return (struct pci_controller *)(cfg->priv); +} + +/** + * PCIe Root Complex read config space operations + */ +static int sw64_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + /** + * Workaround for sw6a chipset due to only support scan with devfn = 0, + * while sw6b does not have this limit. + */ + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * PCIe Root Complex write config space operations + */ +int sw64_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * sw64_pcie_valid_device - check if a valid device is present on bus + * @bus : PCI bus structure + * @devfn: device/function + * + * @return: 'true' on success and 'false' if invalid device is found + */ +static bool sw64_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/** + * sw64_pcie_config_read - read val from config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val[out]: the value read from PCI host controller or device + * + * @return: Whether read operation success + */ +static int sw64_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw64_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (pci_get_rc_linkup(hose->node, hose->index)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +/** + * sw64_pcie_config_write - write val to config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val : the value write to PCI host controller or device + * + * @return: Whether write operation success + */ +static int sw64_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw64_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/** + * sw64_pcie_map_bus - get configuration base address + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * + * @return: base address of the configuration space needed to be + * accessed. + */ +static void __iomem *sw64_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw64_pcie_valid_device(bus, devfn)) + return NULL; + + /** + * ECAM of Sunway PCI host controller is slightly + * different from the standrad: + * [31:24]: bus number + * [23:19]: device number + * [18:16]: function number + * [15:12]: reserved + * [11:8] : extended config space registers + * [7:2] : legacy config space registers + */ + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +#ifdef CONFIG_ACPI +int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return map_irq(dev, slot, pin); +} + +static void setup_intx_irqs(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + + set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +static int sw64_pci_prepare_controller(struct pci_controller *hose, + struct acpi_device *adev) +{ + unsigned long long index, node; + unsigned long long rc_config_base_addr; + unsigned long long pci_io_base_addr; + unsigned long long ep_io_base_addr; + acpi_status rc; + + /* Get node from ACPI namespace */ + node = acpi_get_node(adev->handle); + if (node == NUMA_NO_NODE) { + dev_err(&adev->dev, "unable to get node ID\n"); + return -EEXIST; + } + + /* Get index from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "INDX", NULL, &index); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve INDX\n"); + return -EEXIST; + } + + /** + * Get Root Complex config space base address. + * + * For sw64, Root Complex config space base addr is different + * from Endpoint config space base address. Use MCFG table to + * pass Endpoint config space base address, and define Root Complex + * config space base address("RCCB") separately in the ACPI namespace. + */ + rc = acpi_evaluate_integer(adev->handle, "RCCB", NULL, &rc_config_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCCB\n"); + return -EEXIST; + } + + /* Get Root Complex I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "RCIO", NULL, &pci_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCIO\n"); + return -EEXIST; + } + + /* Get Endpoint I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "EPIO", NULL, &ep_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve EPIO\n"); + return -EEXIST; + } + + hose->iommu_enable = false; + hose->index = index; + hose->node = node; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base = pci_io_base_addr; + hose->dense_io_base = ep_io_base_addr; + + hose->rc_config_space_base = __va(rc_config_base_addr); + + hose->first_busno = 0xff; + hose->last_busno = 0xff; + hose->self_busno = 0xff; + + hose->need_domain_info = 0; + +#if IS_ENABLED(CONFIG_PCI_MSI) + if (is_in_host()) + memset(hose->piu_msiconfig, 0, 256 / 8); /* 256 bits bitmap */ +#endif + + /** + * There are two prerequisites for Root Complex + * of Sunway to work: + * 1. Root Complex enable + * 2. Root Complex link up + */ + set_rc_piu(hose->node, hose->index); + if (check_pci_linkup(hose->node, hose->index)) { + /** + * Root Complex link up failed. + * This usually means that no device on the slot. + */ + dev_info(&adev->dev, ": failed to link up\n", + hose->node, hose->index); + } else { + pci_mark_rc_linkup(hose->node, hose->index); + dev_info(&adev->dev, ": successfully link up\n", + hose->node, hose->index); + } + + setup_intx_irqs(hose); + + pci_link_controller(hose); + + return 0; +} + +/** + * Use the info from ACPI to init pci_controller + */ +static int sw64_pci_ecam_init(struct pci_config_window *cfg) +{ + struct pci_controller *hose = NULL; + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + phys_addr_t mcfg_addr; + int ret; + + /** + * First, check whether Root Complex is enabled. + * If Root Complex disabled, there's no need to continue. + * + * In ACPI namespace, we use _STA method to indicate + * whether Root Complex is enabled. + * + * The _STA has been checked when creating acpi_device. + * Double check here to get the latest hardware status. + */ + ret = acpi_bus_get_status(adev); + if (ret) { + dev_err(dev, "unable to retrieve _STA\n"); + return ret; + } + + if (!adev->status.present) { + dev_err(dev, "RC is not enabled\n"); + return -ENODEV; + } + + hose = kzalloc(sizeof(*hose), GFP_KERNEL); + if (!hose) { + dev_err(dev, "out of memory when alloc mem for pci_controller\n"); + return -ENOMEM; + } + + /* Get Endpoint config space base address from MCFG table */ + mcfg_addr = cfg->res.start - (cfg->busr.start << cfg->ops->bus_shift); + + /** + * "__va(mcfg_addr)" is equal to "cfg->win", so we can also use + * "hose->ep_config_space_base = cfg->win" here + */ + hose->ep_config_space_base = __va(mcfg_addr); + + /* Init pci_controller */ + ret = sw64_pci_prepare_controller(hose, adev); + if (ret) { + kfree(hose); + dev_err(&adev->dev, "failed to init pci controller\n"); + return ret; + } + + cfg->priv = (void *)hose; + + return 0; +} + +const struct pci_ecam_ops sw64_pci_ecam_ops = { + .bus_shift = 24, + .init = sw64_pci_ecam_init, + .pci_ops = { + .map_bus = sw64_pcie_map_bus, + .read = sw64_pcie_config_read, + .write = sw64_pcie_config_write, + } +}; +#endif diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 053bb9fac6e3e1e5b83158e3ad54f70acc233d5b..205e6aaaa032bd28bf2df2d9a3ba9891ba740262 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -408,12 +408,38 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } +#ifdef CONFIG_LOONGARCH +#include + +static unsigned int pci_irq_numbers = 32; + +static int __init pci_irq_limit(char *str) +{ + get_option(&str, &pci_irq_numbers); + + if (pci_irq_numbers == 0) + pci_irq_numbers = 32; + return 0; +} + +early_param("pci_irq_limit", pci_irq_limit); +#endif + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { int nvec; int rc; +#ifdef CONFIG_LOONGARCH + if (!disable_pci_irq_limit) { + if (maxvec > 32) { + maxvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } + } +#endif + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) return -EINVAL; @@ -788,6 +814,15 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int { int hwsize, rc, nvec = maxvec; +#ifdef CONFIG_LOONGARCH + if (!disable_pci_irq_limit) { + if (maxvec > 32) { + nvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } + } +#endif + if (maxvec < minvec) return -ERANGE; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9c59bf03d6579f5527ae7da35030cc88962c7cf1..b699839a7d4f84b914235d70af697860134cf326 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -526,7 +526,11 @@ static void pci_device_shutdown(struct device *dev) * If it is not a kexec reboot, firmware will hit the PCI * devices with big hammer and stop their DMA any way. */ +#ifdef CONFIG_LOONGARCH + if (kexec_in_progress && !pci_is_bridge(pci_dev) && (pci_dev->current_state <= PCI_D3hot)) +#else if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) +#endif pci_clear_master(pci_dev); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 095fa1910d36db2a82c6d3d1a316076a95b18877..4a5f5c9d189d15671d4daa7a6bd301a5138a8378 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,6 +32,9 @@ #include #include #include +#ifdef CONFIG_MACH_LOONGSON64 +#include +#endif #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -172,6 +175,15 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifdef CONFIG_MACH_LOONGSON64 + +#ifndef CONFIG_PM_SLEEP +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -4834,7 +4846,11 @@ int pcie_flr(struct pci_dev *dev) * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ +#ifdef CONFIG_SW64 + msleep(1000); +#else msleep(100); +#endif return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } @@ -5229,6 +5245,69 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) PCIE_RESET_READY_POLL_MS - delay); } +static void pci_save_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + int i; + + /* if not yitian 710, should return here */ + if (!dev->broken_bus_reset) + return; + + /* save pcie type1 config space header*/ + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &saved->dev_ctrl); + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &saved->root_ctrl); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &saved->dev_ctrl2); + + if (dev->acs_cap) + pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + &saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER + if (dev->aer_cap) + pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + &saved->root_err_cmd); +#endif + + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); +} + +static void pci_restore_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + if (!dev->broken_bus_reset) + return; + + /* restore pcie type1 config space header */ + pci_restore_config_space_range(dev, 0, 15, 0, false); + + /* + * restore Device Control, Root Control Register and Device Control 2 + * in PCI Express Capability + */ + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, saved->dev_ctrl); + pcie_capability_write_word(dev, PCI_EXP_RTCTL, saved->root_ctrl); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, saved->dev_ctrl2); + + /* restore ACS Capability Register */ + if (dev->acs_cap) + pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER + /* restore AER Root Error Command Register */ + if (dev->aer_cap) + pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + saved->root_err_cmd); +#endif + + /* restore Slot Control Register */ + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -5261,9 +5340,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { + int rc; + struct pci_saved_regs saved = { }; + + /* save key regs for yitian710 during bus rest*/ + pci_save_yitian710_regs(dev, &saved); + pcibios_reset_secondary_bus(dev); + rc = pci_bridge_wait_for_secondary_bus(dev, "bus reset"); - return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + /* restore regs for yitian710*/ + pci_restore_yitian710_regs(dev, &saved); + return rc; } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); @@ -6223,8 +6311,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; +#ifdef CONFIG_MACH_LOONGSON64 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); - +#endif if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -6242,7 +6331,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - if (bridge->no_inc_mrrs) { +#ifdef CONFIG_MACH_LOONGSON64 + if (pm_suspend_target_state == PM_SUSPEND_ON && + bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { @@ -6250,6 +6341,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; } } +#endif ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d69a17947ffcefb47cb09b0fc5f5d657ba015de1..f4b3d49ff8aa052a6c729a16fb93fe3e50e47e55 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -314,6 +314,15 @@ struct pci_sriov { u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_PCI_DOE diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 0aef6dc055b9220aec5b5d674df5ad735e993fc1..44e952e1a0ab0ca8d6cc25142e83b14005f557e2 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -423,17 +423,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -494,10 +483,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -505,22 +496,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -680,10 +675,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -706,10 +701,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7e84e472b3383c3c27e9ab55d6a54a02fd7be19a..5314ab5e9dc696c9d8376d9f4aa4024dbfb404b9 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -142,6 +142,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fd35ad0648a07b6165d8cdc162149bfdb87836c0..7a6db6e832008b6da251472f73b00c12cc03dc96 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -396,6 +396,50 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); #endif +static void loongson_pcie_msi_quirk(struct pci_dev *dev) +{ + u16 val; + u16 class; + + class = dev->class >> 8; + if (class == PCI_CLASS_BRIDGE_HOST) { + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val); + val |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a59, loongson_pcie_msi_quirk); + +#define DEV_PCIE_PORT_4 0x7a39 +#define DEV_PCIE_PORT_5 0x7a49 +#define DEV_PCIE_PORT_6 0x7a59 +#define DEV_PCIE_PORT_7 0x7a69 +static void loongson_d3_and_link_quirk(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge; + static const struct pci_device_id bridge_devids[] = { + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_4) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_5) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_6) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_7) }, + { 0, }, + }; + + /* look for the matching bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + if (bridge && pci_match_id(bridge_devids, bridge)) { + dev->dev_flags |= (PCI_DEV_FLAGS_NO_D3 | + PCI_DEV_FLAGS_NO_LINK_SPEED_CHANGE); + dev->no_d1d2 = 1; + break; + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_d3_and_link_quirk); + /* Chipsets where PCI->PCI transfers vanish or hang */ static void quirk_nopcipci(struct pci_dev *dev) { @@ -4444,6 +4488,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, 0x3c09, + quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) @@ -4556,6 +4602,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); +#ifndef CONFIG_SW64 /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the @@ -4612,6 +4659,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +#endif /* * pci_acs_ctrl_enabled - compare desired ACS controls with those provided @@ -5111,6 +5159,13 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, +#ifdef CONFIG_ARCH_PHYTIUM + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, +#endif + /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, @@ -5164,6 +5219,8 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, /* Wangxun nics */ { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3c09, pci_quirk_xgene_acs}, + { PCI_VENDOR_ID_LOONGSON, 0x3c19, pci_quirk_xgene_acs}, { 0 } }; @@ -6274,3 +6331,18 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev) pdev->d3cold_delay = 1000; } DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec); +/* + * On Alibaba yitian710 Soc, the Hardware does always clear pcie config space + * and some key registers between resetting the secondary bus. This results in + * the OS cannot recover the fatal pcie error, which causes unexpected system + * error finally. + * + * Luckily, it seems a simple save/restore of these regs during the bus reset + * can fix the issues. + */ +static void quirk_save_yitian710_regs(struct pci_dev *dev) +{ + dev->broken_bus_reset = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ALIBABA, 0x8000, + PCI_CLASS_BRIDGE_PCI, 8, quirk_save_yitian710_regs); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 273d67ecf6d2530f0f31c8dd393b31b1fde560ee..ec6e0d9194a1c577c1378444470a23ff614fc101 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. +config DWC_PCIE_PMU + tristate "Synopsys DesignWare PCIe PMU" + depends on PCI + help + Enable perf support for Synopsys DesignWare PCIe PMU Performance + monitoring event on platform including the Alibaba Yitian 710. + source "drivers/perf/arm_cspmu/Kconfig" source "drivers/perf/amlogic/Kconfig" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16b3ec4db916d0bcb5f3895a82c7d9b1a77e18a2..a06338e3401c9f6ebbe9593a5176e220f760a357 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o +obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 7bd1733d79770319039c75c10ef64c67079599d2..6f5c2739fe66c74700f3853067312131a1a1a1a7 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2435,6 +2435,7 @@ static int arm_cmn_probe(struct platform_device *pdev) struct arm_cmn *cmn; const char *name; static atomic_t id; + struct resource *cfg; int err, rootnode, this_id; cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); @@ -2449,7 +2450,16 @@ static int arm_cmn_probe(struct platform_device *pdev) rootnode = arm_cmn600_acpi_probe(pdev, cmn); } else { rootnode = 0; - cmn->base = devm_platform_ioremap_resource(pdev, 0); + + /* + * Avoid registering resources as the PMUs registers are + * scattered through CMN, and may appear either side of + * registers for other 'devices'. (e.g. the MPAM MSC controls). + */ + cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!cfg) + return -EINVAL; + cmn->base = devm_ioremap(&pdev->dev, cfg->start, resource_size(cfg)); if (IS_ERR(cmn->base)) return PTR_ERR(cmn->base); if (cmn->part == PART_CMN600) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..9cbea9675e21a07f6369a532ed87266a19b5ebc9 --- /dev/null +++ b/drivers/perf/dwc_pcie_pmu.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe PMU driver + * + * Copyright (C) 2021-2023 Alibaba Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 +#define DWC_PCIE_EVENT_CNT_CTL 0x8 + +/* + * Event Counter Data Select includes two parts: + * - 27-24: Group number(4-bit: 0..0x7) + * - 23-16: Event number(8-bit: 0..0x13) within the Group + * + * Put them together as in TRM. + */ +#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16) +#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8) +#define DWC_PCIE_CNT_STATUS BIT(7) +#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2) +#define DWC_PCIE_PER_EVENT_OFF 0x1 +#define DWC_PCIE_PER_EVENT_ON 0x3 +#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0) +#define DWC_PCIE_EVENT_PER_CLEAR 0x1 + +#define DWC_PCIE_EVENT_CNT_DATA 0xC + +#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10 +#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24) +#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8) +#define DWC_PCIE_DURATION_MANUAL_CTL 0x0 +#define DWC_PCIE_DURATION_1MS 0x1 +#define DWC_PCIE_DURATION_10MS 0x2 +#define DWC_PCIE_DURATION_100MS 0x3 +#define DWC_PCIE_DURATION_1S 0x4 +#define DWC_PCIE_DURATION_2S 0x5 +#define DWC_PCIE_DURATION_4S 0x6 +#define DWC_PCIE_DURATION_4US 0xFF +#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0) +#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1 + +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14 +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18 + +/* Event attributes */ +#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0) +#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16) +#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20) + +#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config) +#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config) +#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config) + +enum dwc_pcie_event_type { + DWC_PCIE_TIME_BASE_EVENT, + DWC_PCIE_LANE_EVENT, + DWC_PCIE_EVENT_TYPE_MAX, +}; + +#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0) +#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0) + +struct dwc_pcie_pmu { + struct pmu pmu; + struct pci_dev *pdev; /* Root Port device */ + u16 ras_des_offset; + u32 nr_lanes; + + struct hlist_node cpuhp_node; + struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; + int on_cpu; +}; + +#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu)) + +static int dwc_pcie_pmu_hp_state; +static struct list_head dwc_pcie_dev_info_head = + LIST_HEAD_INIT(dwc_pcie_dev_info_head); +static bool notify; + +struct dwc_pcie_dev_info { + struct platform_device *plat_dev; + struct pci_dev *pdev; + struct list_head dev_node; +}; + +struct dwc_pcie_vendor_id { + int vendor_id; +}; + +static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { + {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {.vendor_id = PCI_VENDOR_ID_AMPERE }, + {.vendor_id = PCI_VENDOR_ID_QCOM }, + {} /* terminator */ +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group dwc_pcie_cpumask_attr_group = { + .attrs = dwc_pcie_pmu_cpumask_attrs, +}; + +struct dwc_pcie_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +PMU_FORMAT_ATTR(eventid, "config:0-15"); +PMU_FORMAT_ATTR(type, "config:16-19"); +PMU_FORMAT_ATTR(lane, "config:20-27"); + +static struct attribute *dwc_pcie_format_attrs[] = { + &format_attr_type.attr, + &format_attr_eventid.attr, + &format_attr_lane.attr, + NULL, +}; + +static struct attribute_group dwc_pcie_format_attrs_group = { + .name = "format", + .attrs = dwc_pcie_format_attrs, +}; + +struct dwc_pcie_event_attr { + struct device_attribute attr; + enum dwc_pcie_event_type type; + u16 eventid; + u8 lane; +}; + +static ssize_t dwc_pcie_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc_pcie_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == DWC_PCIE_LANE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", + eattr->eventid, eattr->type); + else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n", + eattr->eventid, eattr->type); + + return 0; +} + +#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \ + (&((struct dwc_pcie_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .lane = _lane, \ + }})[0].attr.attr) + +#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0) +#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0) + +static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { + /* Group #0 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + + /* Group #1 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23), + + /* + * Leave it to the user to specify the lane ID to avoid generating + * a list of hundreds of events. + */ + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717), + NULL +}; + +static const struct attribute_group dwc_pcie_event_attrs_group = { + .name = "events", + .attrs = dwc_pcie_pmu_time_event_attrs, +}; + +static const struct attribute_group *dwc_pcie_attr_groups[] = { + &dwc_pcie_event_attrs_group, + &dwc_pcie_format_attrs_group, + &dwc_pcie_cpumask_attr_group, + NULL +}; + +static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + if (enable) + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON); + else + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF); +} + +static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, + DWC_PCIE_TIME_BASED_TIMER_START, enable); +} + +static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 val; + + pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val); + + return val; +} + +static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + int event_id = DWC_PCIE_EVENT_ID(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 lo, hi, ss; + u64 val; + + /* + * The 64-bit value of the data counter is spread across two + * registers that are not synchronized. In order to read them + * atomically, ensure that the high 32 bits match before and after + * reading the low 32 bits. + */ + pci_read_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi); + do { + /* snapshot the high 32 bits */ + ss = hi; + + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW, + &lo); + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, + &hi); + } while (hi != ss); + + val = ((u64)hi << 32) | lo; + /* + * The Group#1 event measures the amount of data processed in 16-byte + * units. Simplify the end-user interface by multiplying the counter + * at the point of read. + */ + if (event_id >= 0x20 && event_id <= 0x23) + val *= 16; + + return val; +} + +static void dwc_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + u64 delta, prev, now = 0; + + do { + prev = local64_read(&hwc->prev_count); + + if (type == DWC_PCIE_LANE_EVENT) + now = dwc_pcie_pmu_read_lane_event_counter(event); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + now = dwc_pcie_pmu_read_time_based_counter(event); + + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + delta = (now - prev) & DWC_PCIE_MAX_PERIOD; + /* 32-bit counter for Lane Event Counting */ + if (type == DWC_PCIE_LANE_EVENT) + delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD; + + local64_add(delta, &event->count); +} + +static int dwc_pcie_pmu_event_init(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct perf_event *sibling; + u32 lane; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) + return -EINVAL; + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->group_leader != event && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return -EINVAL; + } + + if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX) + return -EINVAL; + + if (type == DWC_PCIE_LANE_EVENT) { + lane = DWC_PCIE_EVENT_LANE(event); + if (lane < 0 || lane >= pcie_pmu->nr_lanes) + return -EINVAL; + } + + event->cpu = pcie_pmu->on_cpu; + + return 0; +} + +static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + hwc->state = 0; + local64_set(&hwc->prev_count, 0); + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, true); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true); +} + +static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->hw.state & PERF_HES_STOPPED) + return; + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, false); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false); + + dwc_pcie_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + int event_id = DWC_PCIE_EVENT_ID(event); + int lane = DWC_PCIE_EVENT_LANE(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 ctrl; + + /* one counter for each type and it is in use */ + if (pcie_pmu->event[type]) + return -ENOSPC; + + pcie_pmu->event[type] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (type == DWC_PCIE_LANE_EVENT) { + /* EVENT_COUNTER_DATA_REG needs clear manually */ + ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | + FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) | + FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR); + pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + ctrl); + } else if (type == DWC_PCIE_TIME_BASE_EVENT) { + /* + * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely + * use it with any manually controlled duration. And it is + * cleared when next measurement starts. + */ + ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL, + DWC_PCIE_DURATION_MANUAL_CTL) | + DWC_PCIE_TIME_BASED_CNT_ENABLE; + pci_write_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl); + } + + if (flags & PERF_EF_START) + dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE); + perf_event_update_userpage(event); + pcie_pmu->event[type] = NULL; +} + +static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node); +} + +/* + * Find the binded DES capability device info of a PCI device. + * @pdev: The PCI device. + */ +static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev) +{ + struct dwc_pcie_dev_info *dev_info; + + list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node) + if (dev_info->pdev == pdev) + return dev_info; + + return NULL; +} + +static void dwc_pcie_unregister_pmu(void *data) +{ + struct dwc_pcie_pmu *pcie_pmu = data; + + perf_pmu_unregister(&pcie_pmu->pmu); +} + +static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +{ + const struct dwc_pcie_vendor_id *vid; + u16 vsec = 0; + u32 val; + + if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) + return false; + + for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + vsec = pci_find_vsec_capability(pdev, vid->vendor_id, + DWC_PCIE_VSEC_RAS_DES_ID); + if (vsec) + break; + } + if (!vsec) + return false; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x04) + return false; + + pci_dbg(pdev, + "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return true; +} + +static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) +{ + platform_device_unregister(dev_info->plat_dev); + list_del(&dev_info->dev_node); + kfree(dev_info); +} + +static int dwc_pcie_register_dev(struct pci_dev *pdev) +{ + struct platform_device *plat_dev; + struct dwc_pcie_dev_info *dev_info; + u32 sbdf; + + sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf, + pdev, sizeof(*pdev)); + + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + /* Cache platform device to handle pci device hotplug */ + dev_info->plat_dev = plat_dev; + dev_info->pdev = pdev; + list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); + + return 0; +} + +static int dwc_pcie_pmu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dwc_pcie_dev_info *dev_info; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (!dwc_pcie_match_des_cap(pdev)) + return NOTIFY_DONE; + if (dwc_pcie_register_dev(pdev)) + return NOTIFY_BAD; + break; + case BUS_NOTIFY_DEL_DEVICE: + dev_info = dwc_pcie_find_dev_info(pdev); + if (!dev_info) + return NOTIFY_DONE; + dwc_pcie_unregister_dev(dev_info); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block dwc_pcie_pmu_nb = { + .notifier_call = dwc_pcie_pmu_notifier, +}; + +static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) +{ + struct pci_dev *pdev = plat_dev->dev.platform_data; + struct dwc_pcie_pmu *pcie_pmu; + char *name; + u32 sbdf, val; + u16 vsec; + int ret; + + vsec = pci_find_vsec_capability(pdev, pdev->vendor, + DWC_PCIE_VSEC_RAS_DES_ID); + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + sbdf = plat_dev->id; + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); + if (!name) + return -ENOMEM; + + pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->ras_des_offset = vsec; + pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); + pcie_pmu->on_cpu = -1; + pcie_pmu->pmu = (struct pmu){ + .name = name, + .parent = &pdev->dev, + .module = THIS_MODULE, + .attr_groups = dwc_pcie_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .event_init = dwc_pcie_pmu_event_init, + .add = dwc_pcie_pmu_event_add, + .del = dwc_pcie_pmu_event_del, + .start = dwc_pcie_pmu_event_start, + .stop = dwc_pcie_pmu_event_stop, + .read = dwc_pcie_pmu_event_update, + }; + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, + &pcie_pmu->cpuhp_node); + if (ret) { + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf); + return ret; + } + + /* Unwind when platform driver removes */ + ret = devm_add_action_or_reset(&plat_dev->dev, + dwc_pcie_pmu_remove_cpuhp_instance, + &pcie_pmu->cpuhp_node); + if (ret) + return ret; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf); + return ret; + } + ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, + pcie_pmu); + if (ret) + return ret; + + return 0; +} + +static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + if (pcie_pmu->on_cpu == -1) + pcie_pmu->on_cpu = cpumask_local_spread( + 0, dev_to_node(&pcie_pmu->pdev->dev)); + + return 0; +} + +static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + struct pci_dev *pdev; + unsigned int target; + int node; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + /* Nothing to do if this CPU doesn't own the PMU */ + if (cpu != pcie_pmu->on_cpu) + return 0; + + pcie_pmu->on_cpu = -1; + pdev = pcie_pmu->pdev; + node = dev_to_node(&pdev->dev); + + target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pci_err(pdev, "There is no CPU to set\n"); + return 0; + } + + /* This PMU does NOT support interrupt, just migrate context. */ + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + pcie_pmu->on_cpu = target; + + return 0; +} + +static struct platform_driver dwc_pcie_pmu_driver = { + .probe = dwc_pcie_pmu_probe, + .driver = {.name = "dwc_pcie_pmu",}, +}; + +static int __init dwc_pcie_pmu_init(void) +{ + struct pci_dev *pdev = NULL; + int ret; + + for_each_pci_dev(pdev) { + if (!dwc_pcie_match_des_cap(pdev)) + continue; + + ret = dwc_pcie_register_dev(pdev); + if (ret) { + pci_dev_put(pdev); + return ret; + } + } + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/dwc_pcie_pmu:online", + dwc_pcie_pmu_online_cpu, + dwc_pcie_pmu_offline_cpu); + if (ret < 0) + return ret; + + dwc_pcie_pmu_hp_state = ret; + + ret = platform_driver_register(&dwc_pcie_pmu_driver); + if (ret) + goto platform_driver_register_err; + + ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + if (ret) + goto platform_driver_register_err; + notify = true; + + return 0; + +platform_driver_register_err: + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); + + return ret; +} + +static void __exit dwc_pcie_pmu_exit(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + if (notify) + bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) + dwc_pcie_unregister_dev(dev_info); + platform_driver_unregister(&dwc_pcie_pmu_driver); + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); +} + +module_init(dwc_pcie_pmu_init); +module_exit(dwc_pcie_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller"); +MODULE_AUTHOR("Shuai Xue "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 7dfb7190580efaa47bcc8a68f73a642e23270f6f..79753411b778cb3766f100f05bb5605d239bea45 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -512,6 +512,7 @@ source "drivers/pinctrl/berlin/Kconfig" source "drivers/pinctrl/cirrus/Kconfig" source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" +source "drivers/pinctrl/zhaoxin/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/meson/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index dd6cda27029492812122d2712336237cebc234f3..4275eca92488ef21dbfbfd0e02b5a9e2f7416ed8 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += cirrus/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ +obj-$(CONFIG_X86) += zhaoxin/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-y += mvebu/ diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..65f95ca80d5c719bea361b24cbc9fcf2722efbac --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# Intel pin control drivers + +if (X86 || COMPILE_TEST) + +config PINCTRL_ZHAOXIN + tristate + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + +config PINCTRL_KX7000 + tristate "Zhaoxin KX7000 pinctrl and GPIO driver" + depends on ACPI && X86 + default m + select PINCTRL_ZHAOXIN + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KX7000 chipset pins and using them as GPIOs. + + To compile this driver as a module, choose M here: the + module will be called pinctrl-kx7000. + + If unsure, say Y. + +endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a3acfa66f196b489e0e7f398bd50cd8a0de6e0d9 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -0,0 +1,4 @@ +# zhaoxin pin control drivers + +obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o +obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c new file mode 100644 index 0000000000000000000000000000000000000000..f249dd369e7c8694efd04324dc4cb5a83f2d0491 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KX7000 pinctrl/GPIO driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include + +#include + +#include "pinctrl-zhaoxin.h" + +#define ZX_CAL_ARRAY(a, b) \ +{ \ + .pmio_offset = (a), \ + .size = (b), \ +} + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ +{ \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ +} + +/* kx7000 pin define */ +static const struct pinctrl_pin_desc kx7000_pins[] = { + + PINCTRL_PIN(0, "IOD_CPUTCK"), + PINCTRL_PIN(1, "IOD_CPUTMS"), + PINCTRL_PIN(2, "IOD_CPUTRST"), + PINCTRL_PIN(3, "IOD_CPUTDO"), + PINCTRL_PIN(4, "IOD_CPUTDI"), + PINCTRL_PIN(5, "IOD_ZLSCLK0"), + PINCTRL_PIN(6, "IOD_ZLDATA0"), + PINCTRL_PIN(7, "IOD_ZLSCLK1"), + PINCTRL_PIN(8, "IOD_ZLDATA1"), + PINCTRL_PIN(9, "IOD_CLK27M"), + PINCTRL_PIN(10, "IOD_CPURST"), + PINCTRL_PIN(11, "IOD_PWORK"), + PINCTRL_PIN(12, "IOD_RSMRST"), + PINCTRL_PIN(13, "IOD_THRMTRIP"), + //GPIO range 0 + PINCTRL_PIN(14, "USBHOC0"), + PINCTRL_PIN(15, "USBHOC1"), + PINCTRL_PIN(16, "USBHOC2"), + PINCTRL_PIN(17, "USBHOC3"), + PINCTRL_PIN(18, "USBHOC4"), + PINCTRL_PIN(19, "USBHOC5"), + PINCTRL_PIN(20, "USBHOC6"), + PINCTRL_PIN(21, "USBHOC7"), + //gpio range 1 + PINCTRL_PIN(22, "USB4SBTX0"), + PINCTRL_PIN(23, "USB4SBRX0"), + PINCTRL_PIN(24, "USB4SBTX1"), + PINCTRL_PIN(25, "USB4SBRX1"), + //gpio range 2 + PINCTRL_PIN(26, "I2C1DT"), + PINCTRL_PIN(27, "I2C1CK"), + PINCTRL_PIN(28, "I2C1INT"), + //gpio range 3 + PINCTRL_PIN(29, "I2C2DT"), + PINCTRL_PIN(30, "I2C2CK"), + //gpio range 4 + PINCTRL_PIN(31, "I2C2INT"), + //gpio range 5 + PINCTRL_PIN(32, "SMBDT1"), + PINCTRL_PIN(33, "SMBCK1"), + PINCTRL_PIN(34, "SMBDT2"), + PINCTRL_PIN(35, "SMBCK2"), + PINCTRL_PIN(36, "SMBALRT"), + //gpio range 6 + PINCTRL_PIN(37, "SME_I2CDT"), + PINCTRL_PIN(38, "SME_I2CCK"), + //gpio range 7 + PINCTRL_PIN(39, "PWM"), + PINCTRL_PIN(40, "TACH"), + //gpio range 8 + PINCTRL_PIN(41, "GPIO0"), + PINCTRL_PIN(42, "GPIO1"), + PINCTRL_PIN(43, "GPIO2"), + PINCTRL_PIN(44, "GPIO3"), + PINCTRL_PIN(45, "GPIO4"), + PINCTRL_PIN(46, "GPIO5"), + PINCTRL_PIN(47, "GPIO6"), + PINCTRL_PIN(48, "GPIO7"), + PINCTRL_PIN(49, "GPIO8"), + PINCTRL_PIN(50, "GPIO9"), + PINCTRL_PIN(51, "LPCCLK"), + PINCTRL_PIN(52, "LPCDRQ1"), + //gpio range 9 + PINCTRL_PIN(53, "LPCDRQ0"), + PINCTRL_PIN(54, "LPCFRAME"), + PINCTRL_PIN(55, "LPCAD3"), + PINCTRL_PIN(56, "LPCAD2"), + PINCTRL_PIN(57, "LPCAD1"), + PINCTRL_PIN(58, "LPCAD0"), + //gpio range 10 + PINCTRL_PIN(59, "SERIRQ"), + PINCTRL_PIN(60, "AZRST"), + PINCTRL_PIN(61, "AZBITCLK"), + PINCTRL_PIN(62, "AZSDIN0"), + PINCTRL_PIN(63, "AZSDIN1"), + PINCTRL_PIN(64, "AZSDOUT"), + PINCTRL_PIN(65, "AZSYNC"), + //gpio range 11 + PINCTRL_PIN(66, "I2S1_SCLK"), + PINCTRL_PIN(67, "I2S1_TXD"), + PINCTRL_PIN(68, "I2S1_WS"), + PINCTRL_PIN(69, "I2S1_MCLK"), + //gpio range 12 + PINCTRL_PIN(70, "I2S1_RXD"), + //gpio range 13 + PINCTRL_PIN(71, "I2S1_INT"), + PINCTRL_PIN(72, "MSPIDI"), + PINCTRL_PIN(73, "MSPIDO"), + PINCTRL_PIN(74, "MSPIIO2"), + PINCTRL_PIN(75, "MSPIIO3"), + PINCTRL_PIN(76, "MSPICLK"), + PINCTRL_PIN(77, "MSPISS0"), + //gpio range 14 + PINCTRL_PIN(78, "MSPISS1"), + PINCTRL_PIN(79, "MSPISS2"), + //gpio range 15 + PINCTRL_PIN(80, "SPIDEVINT"), + PINCTRL_PIN(81, "BIOSSEL"), + //gpio range 16 + PINCTRL_PIN(82, "THRM"), + PINCTRL_PIN(83, "PEXWAKE"), + PINCTRL_PIN(84, "PWRBTN"), + //gpio range 17 + PINCTRL_PIN(85, "SPKR"), + PINCTRL_PIN(86, "PME"), + //gpio range 18 + PINCTRL_PIN(87, "BATLOW"), + PINCTRL_PIN(88, "EXTSMI"), + PINCTRL_PIN(89, "SUSA"), + PINCTRL_PIN(90, "SUSB"), + PINCTRL_PIN(91, "SUSC"), + PINCTRL_PIN(92, "GPWAKE"), + PINCTRL_PIN(93, "RING"), + PINCTRL_PIN(94, "LID"), + PINCTRL_PIN(95, "SLPS0"), + PINCTRL_PIN(96, "PCIRST"), + PINCTRL_PIN(97, "SVID_VREN"), + //gpio range 19 + PINCTRL_PIN(98, "INTRUDER"), + //gpio range 20 + PINCTRL_PIN(99, "GFX_I2CCLK0"), + PINCTRL_PIN(100, "GFX_I2CDAT0"), + PINCTRL_PIN(101, "GFX_I2CCLK1"), + PINCTRL_PIN(102, "GFX_I2CDAT1"), + PINCTRL_PIN(103, "GFX_I2CCLK2"), + PINCTRL_PIN(104, "GFX_I2CDAT2"), + PINCTRL_PIN(105, "GFX_I2CCLK3"), + PINCTRL_PIN(106, "GFX_I2CDAT3"), + PINCTRL_PIN(107, "GFX_GPIO0"), + PINCTRL_PIN(108, "GFX_GPIO1"), + PINCTRL_PIN(109, "GFX_GPIO2"), + PINCTRL_PIN(110, "GFX_GPIO3"), + PINCTRL_PIN(111, "CRTHSYNC"), + PINCTRL_PIN(112, "CRTVSYNC"), +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static int calibrate_sattus[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static const struct reg_cal_array kx7000_int_cal[] = { + ZX_CAL_ARRAY(0x58, 16), + ZX_CAL_ARRAY(0x5A, 2), + ZX_CAL_ARRAY(0xDA, 16), + ZX_CAL_ARRAY(0xDE, 16), +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kx7000_int_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + } +}; + +static const struct reg_cal_array kx7000_status_cal[] = { + ZX_CAL_ARRAY((0x8), 16), + ZX_CAL_ARRAY((0xE), 2), + ZX_CAL_ARRAY((0xA), 16), + ZX_CAL_ARRAY((0xC), 16), +}; + +static const struct reg_calibrate status_cal[] = { + { + .reg = kx7000_status_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct reg_cal_array kx7000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0x0), 16), + ZX_CAL_ARRAY((0x6), 2), + ZX_CAL_ARRAY((0x2), 16), + ZX_CAL_ARRAY((0x4), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kx7000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct index_cal_array kx7000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0x98, NULL, 71), +}; + +static const struct index_cal_array kx7000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0x90, NULL, 71), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31, + 32, 33, 34, 35, + 36, 50, 51, 52, + 53, 54, 55, 56, + 57, 58, 59, 60, + 61, 62, 63, 64, + 65, 66, 67, 68, + 69, 70 +}; + +static const struct index_cal_array kx7000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), +}; + +static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kx7000_gpio_in_cal, + .gpio_out_cal = kx7000_gpio_out_cal, + .trigger_cal = kx7000_trigger_cal, + } +}; + +#define KX7000_GPP(s, e, g) \ +{ \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ +} + +static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { + KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(14, 19, 10), + KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(22, 25, 65), + KX7000_GPP(26, 28, 43), + KX7000_GPP(29, 30, 41), + KX7000_GPP(31, 31, 49), + KX7000_GPP(32, 36, 16), + KX7000_GPP(37, 38, 69), + KX7000_GPP(39, 40, 67), + KX7000_GPP(41, 50, 0), + KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(53, 53, 39), + KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(59, 59, 40), + KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(66, 69, 35), + KX7000_GPP(70, 70, 46), + KX7000_GPP(71, 71, 64), + KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(78, 78, 50), + KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(80, 80, 51), + KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(82, 82, 52), + KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(85, 85, 53), + KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(87, 95, 54), + KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(98, 98, 63), + KX7000_GPP(99, 112, 21), +}; + +static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { + .pins = kx7000_pins, + .npins = ARRAY_SIZE(kx7000_pins), + .pin_topologys = kx7000_pin_topologys, + .zhaoxin_pin_maps = kx7000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), +}; + +static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { + { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); + +static const struct dev_pm_ops kx7000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kx7000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_hid, + .driver = { + .name = "kx7000-pinctrl", + .acpi_match_table = kx7000_pinctrl_acpi_match, + .pm = &kx7000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kx7000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..1e434869d3dd8e24e33207562b95663acd49a6a1 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * zhaoxin pinctrl common code + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../core.h" +#include "pinctrl-zhaoxin.h" + +static int pin_to_hwgpio(struct pinctrl_gpio_range *range, unsigned int pin) +{ + int offset = 0; + + if (range->pins) { + for (offset = 0; offset < range->npins; offset++) + if (pin == range->pins[offset]) + break; + return range->base+offset-range->gc->base; + } else + return pin-range->pin_base+range->base-range->gc->base; +} + +static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + return inw(pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->ngroups; +} + +static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->groups[group].name; +} + +static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; + + return 0; +} + +static void zhaoxin_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) +{ + +} + +static const struct pinctrl_ops zhaoxin_pinctrl_ops = { + .get_groups_count = zhaoxin_get_groups_count, + .get_group_name = zhaoxin_get_group_name, + .get_group_pins = zhaoxin_get_group_pins, + .pin_dbg_show = zhaoxin_pin_dbg_show, +}; + +static int zhaoxin_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->nfunctions; +} + +static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->functions[function].name; +} + +static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, + const char * const **groups, unsigned int *const ngroups) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; + + return 0; +} + +static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + dev_dbg(pctrl->dev, "%s,group=%d,func=%d\n", __func__, group, function); + return 0; +} + +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 + +#define ZHAOXIN_PULL_UP 0xe0 + +static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, + bool isup) +{ + u16 tmp = 0; + u16 value; + u16 value_back = 0; + + if (isup) + tmp = ZHAOXIN_PULL_UP_10K|1; + else + tmp = ZHAOXIN_PULL_DOWN|1; + value = zx_pad_read16(pctrl, pin); + + /* for gpio */ + if (pin <= 0x32 && pin >= 0x29) { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value &= ~(0x1); + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } else {/* for pgpio */ + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value |= 0x1; + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } +} + +static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned int pin) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int hwgpio = pin_to_hwgpio(range, pin); + + dev_dbg(pctrl->dev, "%s, hwgpio=%d, pin=%d\n", __func__, hwgpio, pin); + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + return 0; +} + +static const struct pinmux_ops zhaoxin_pinmux_ops = { + .get_functions_count = zhaoxin_get_functions_count, + .get_function_name = zhaoxin_get_function_name, + .get_function_groups = zhaoxin_get_function_groups, + .set_mux = zhaoxin_pinmux_set_mux, + .gpio_request_enable = zhaoxin_gpio_request_enable, +}; + +static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) +{ + return 0; +} + +static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, + unsigned int nconfigs) +{ + return 0; +} + +static const struct pinconf_ops zhaoxin_pinconf_ops = { + .is_generic = true, + .pin_config_get = zhaoxin_config_get, + .pin_config_set = zhaoxin_config_set, +}; + +static const struct pinctrl_desc zhaoxin_pinctrl_desc = { + .pctlops = &zhaoxin_pinctrl_ops, + .pmxops = &zhaoxin_pinmux_ops, + .confops = &zhaoxin_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) +{ + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + const struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (offset >= map->zhaoxin_range_gpio_base && + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + int pin; + + pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; + if (padgrp) + *padgrp = map; + return pin; + } + } + return -EINVAL; +} + +static __maybe_unused int zhaoxin_pin_to_gpio(struct zhaoxin_pinctrl *pctrl, int pin) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + + pin_maps = pctrl->pin_maps; + if (!pin_maps) + return -EINVAL; + + return pin - pin_maps->zhaoxin_range_pin_base + pin_maps->zhaoxin_range_gpio_base; +} + +static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); + const struct index_cal_array *gpio_in_cal; + int gap = offset/16; + int bit = offset%16; + int pin; + int value; + + gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); + value &= (1<pin_topologys->gpio_out_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + if (value) + org |= (1<index+gap, org); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + return pinctrl_gpio_direction_output(chip->base + offset); +} + +static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + return gpiochip_generic_request(gc, offset); +} + +static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) +{ + gpiochip_generic_free(gc, offset); +} + +static int zhaoxin_gpio_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) +{ + return gpiochip_generic_config(gc, offset, config); +} + +static const struct gpio_chip zhaoxin_gpio_chip = { + .owner = THIS_MODULE, + .request = zhaoxin_gpio_request, + .free = zhaoxin_gpio_free, + .direction_input = zhaoxin_gpio_direction_input, + .direction_output = zhaoxin_gpio_direction_output, + .get = zhaoxin_gpio_get, + .set = zhaoxin_gpio_set, + .set_config = zhaoxin_gpio_config, +}; + +static void zhaoxin_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *status_cal; + const struct reg_cal_array *reg_off; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + int bit_off = 0; + u16 value; + u16 value_read; + + status_cal = pctrl->pin_topologys->status_cal; + if (gpio >= 0) { + for (i = 0; i < status_cal->size; i++) + if (gpio == status_cal->cal_array[i]) + break; + for (j = 0; j < status_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += status_cal->reg[j].size; + } + reg_off = &status_cal->reg[j-1]; + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); + value_read = value; + value |= (1<pm_pmio_base+reg_off->pmio_offset); + } +} + +static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + const struct reg_cal_array *reg_off, *mod; + int bit_off = 0; + u16 value; + u16 value1; + + int_cal = pctrl->pin_topologys->int_cal; + mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; + + if (gpio >= 0) { + for (i = 0; i < int_cal->size; i++) + if (gpio == int_cal->cal_array[i]) + break; + for (j = 0; j < int_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += int_cal->reg[j].size; + } + reg_off = &(int_cal->reg[j-1]); + mod = &(mod_sel_cal->reg[j-1]); + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = inw(pctrl->pmio_base+reg_off->pmio_offset); + if (mask) + value &= (~(1<pmio_base+reg_off->pmio_offset); + if (mask) { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } else { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } + } +} + +static void zhaoxin_gpio_irq_mask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, true); +} + +static void zhaoxin_gpio_irq_unmask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, false); +} + +/* + * father domain irq handle + */ +static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) +{ + struct zhaoxin_pinctrl *pctrl = data; + struct gpio_chip *gc = &pctrl->chip; + const struct reg_calibrate *init; + const struct reg_calibrate *stat_cal; + unsigned int i, bit_offset; + u16 status, enable; + unsigned long pending; + int index = 0; + int ret = 0; + int subirq; + unsigned int hwirq; + + init = pctrl->pin_topologys->int_cal; + stat_cal = pctrl->pin_topologys->status_cal; + for (i = 0; i < init->reg_cal_size; i++) { + pending = 0; + status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + enable &= status; + pending = enable; + for_each_set_bit(bit_offset, &pending, init->reg[i].size) { + hwirq = init->cal_array[index + bit_offset]; + subirq = irq_find_mapping(gc->irq.domain, hwirq); + generic_handle_irq(subirq); + } + + ret += pending ? 1 : 0; + index += init->reg[i].size; + } + + return IRQ_RETVAL(ret); +} + +static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int gpio = irqd_to_hwirq(d); + const struct index_cal_array *trigger_cal; + unsigned int pin; + unsigned long flags; + u8 index; + int position, point; + u16 value; + bool isup = true; + + trigger_cal = pctrl->pin_topologys->trigger_cal; + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (type & IRQ_TYPE_EDGE_FALLING) + isup = true; + else if (type & IRQ_TYPE_EDGE_RISING) + isup = true; + else if (type & IRQ_TYPE_LEVEL_LOW) + isup = true; + else if (type & IRQ_TYPE_LEVEL_HIGH) + isup = false; + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, isup); + + for (position = 0; position < trigger_cal->size; position++) + if (trigger_cal->cal_array[position] == gpio) + break; + + index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + point = position % 4; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + value = zx_pad_read16(pctrl, index); + + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + value |= TRIGGER_BOTH_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_FALLING) + value |= TRIGGER_FALL_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_RISING) + value |= TRIGGER_RISE_EDGE << (point*4); + else if (type & IRQ_TYPE_LEVEL_LOW) + value |= TRIGGER_LOW_LEVEL << (point*4); + else if (type & IRQ_TYPE_LEVEL_HIGH) + value |= TRIGGER_HIGH_LEVEL << (point*4); + else + dev_dbg(pctrl->dev, "%s wrang type\n", __func__); + + zx_pad_write16(pctrl, index, value); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int pin; + + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { + if (on) + enable_irq_wake(pctrl->irq); + else + disable_irq_wake(pctrl->irq); + } + + return 0; +} + +static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + int ret, i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + map->zhaoxin_range_gpio_base, map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; + } + } + + return 0; +} + +static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + unsigned int ngpio = 0; + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + pin_maps = &pctrl->pin_maps[i]; + if (pin_maps->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) + ngpio = pin_maps->zhaoxin_range_gpio_base + + pin_maps->zhaoxin_range_pin_size; + } + + return ngpio; +} + +static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) +{ + int ret; + struct gpio_irq_chip *girq; + + pctrl->chip = zhaoxin_gpio_chip; + + pctrl->chip.ngpio = zhaoxin_gpio_ngpio(pctrl); + pctrl->chip.label = dev_name(pctrl->dev); + pctrl->chip.parent = pctrl->dev; + pctrl->chip.base = -1; + pctrl->chip.add_pin_ranges = zhaoxin_gpio_add_pin_ranges; + + pctrl->irq = irq; + + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = zhaoxin_gpio_irq_ack; + pctrl->irqchip.irq_mask = zhaoxin_gpio_irq_mask; + pctrl->irqchip.irq_unmask = zhaoxin_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; + pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + + ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(pctrl->dev), pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to request interrupt\n"); + return ret; + } + girq = &pctrl->chip.irq; + girq->chip = &pctrl->irqchip; + /* This will let us handle the IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to register gpiochip\n"); + return ret; + } + + return 0; +} + +static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) +{ + return 0; +} + +static int zhaoxin_pinctrl_probe(struct platform_device *pdev, + const struct zhaoxin_pinctrl_soc_data *soc_data) +{ + struct zhaoxin_pinctrl *pctrl; + int ret, i, irq; + struct resource *res; + void __iomem *regs; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + raw_spin_lock_init(&pctrl->lock); + pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->pin_map_size = pctrl->soc->pin_map_size; + pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, + sizeof(*pctrl->pin_maps), GFP_KERNEL); + if (!pctrl->pin_maps) + return -ENOMEM; + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *community = &pctrl->pin_maps[i]; + *community = pctrl->soc->zhaoxin_pin_maps[i]; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + pctrl->pm_pmio_base = regs; + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = zhaoxin_pinctrl_pm_init(pctrl); + if (ret) + return ret; + pctrl->pctldesc = zhaoxin_pinctrl_desc; + pctrl->pctldesc.name = dev_name(&pdev->dev); + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; + pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); + if (IS_ERR(pctrl->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(pctrl->pctldev); + } + ret = zhaoxin_gpio_probe(pctrl, irq); + + if (ret) + return ret; + platform_set_drvdata(pdev, pctrl); + return 0; +} + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = device_get_match_data(&pdev->dev); + if (!data) + return -ENODATA; + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_hid); + +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = zhaoxin_pinctrl_get_soc_data(pdev); + if (IS_ERR(data)) + return PTR_ERR(data); + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_uid); + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data = NULL; + const struct zhaoxin_pinctrl_soc_data **table; + struct acpi_device *adev; + unsigned int i; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) { + const void *match = device_get_match_data(&pdev->dev); + + table = (const struct zhaoxin_pinctrl_soc_data **)match; + for (i = 0; table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { + data = table[i]; + break; + } + } + } else { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return ERR_PTR(-ENODEV); + + table = (const struct zhaoxin_pinctrl_soc_data **)id->driver_data; + data = table[pdev->id]; + } + + return data ?: ERR_PTR(-ENODATA); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_get_soc_data); + +#ifdef CONFIG_PM_SLEEP + +int zhaoxin_pinctrl_suspend_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_suspend_noirq); + +int zhaoxin_pinctrl_resume_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); +#endif + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zhaoxin pinctrl/GPIO core driver"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h new file mode 100644 index 0000000000000000000000000000000000000000..cebea382dbe996cb1637a4328517b5407ef877df --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * zhaoxin pinctrl common code + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + */ + +#ifndef PINCTRL_zhaoxin_H +#define PINCTRL_zhaoxin_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct platform_device; +struct device; + +/** + * struct zhaoxin_pingroup pin define + */ +struct zhaoxin_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + unsigned short mode; + const unsigned int *modes; +}; + +/** + * struct zhaoxin_function + */ +struct zhaoxin_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +/** + * struct zhaoxin_pin_map2_gpio + * @zhaoxin_range_pin_base + * @size: pin number + * @zhaoxin_range_gpio_base + */ +struct zhaoxin_pin_map2_gpio { + unsigned int zhaoxin_range_pin_base; + unsigned int zhaoxin_range_pin_size; + int zhaoxin_range_gpio_base; +}; + +#define MAX_GPIO 256 + +struct reg_cal_array { + int pmio_offset; + int size; +}; + +struct reg_calibrate { + const struct reg_cal_array *reg; + const int reg_cal_size; + const int *cal_array; + const int size; +}; + +struct index_cal_array { + int reg_port_base; + int reg_data_base; + int index; + int *cal_array; + int size; +}; + +struct zhaoxin_pin_topology { + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + const struct reg_calibrate *status_cal; + const struct index_cal_array *gpio_in_cal; + const struct index_cal_array *gpio_out_cal; + const struct index_cal_array *gpio_dir_cal; + const struct index_cal_array *trigger_cal; +}; + +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 + +struct zhaoxin_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct zhaoxin_pingroup *groups; + size_t ngroups; + const struct zhaoxin_function *functions; + size_t nfunctions; + const struct zhaoxin_pin_topology *pin_topologys; + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; + size_t pin_map_size; +}; + +const struct zhaoxin_pinctrl_soc_data * + zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); + +struct zhaoxin_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct zhaoxin_pinctrl_soc_data *soc; + const struct zhaoxin_pin_topology *pin_topologys; + struct zhaoxin_pin_map2_gpio *pin_maps; + size_t pin_map_size; + int irq; + int pmio_base; + void __iomem *pm_pmio_base; + int pmio_rx90; + int pmio_rx8c; +}; + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev); +int zhaoxin_pinctrl_resume_noirq(struct device *dev); +#endif + +#endif /* PINCTRL_zhaoxin_H */ diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 868b20361769c37048ef58392d9aae43abcaf021..f26534a4a83b5aedb85c616f90db739a0ceaba4a 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -9,6 +9,8 @@ source "drivers/platform/chrome/Kconfig" source "drivers/platform/mellanox/Kconfig" +source "drivers/platform/mpam/Kconfig" + source "drivers/platform/olpc/Kconfig" source "drivers/platform/surface/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 41640172975a795df917a824d0608e356588c925..54ee16e4e4d8a96e7795dfe152827693b862ad56 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -5,9 +5,11 @@ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_LOONGARCH) += loongarch/ +obj-$(CONFIG_SW64) += sw64/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam/ diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig index 5633e4d73991a69e4386e2771386d7c9a0181134..9ec1a86ef7faeac6f29d6ece028e2534da435416 100644 --- a/drivers/platform/loongarch/Kconfig +++ b/drivers/platform/loongarch/Kconfig @@ -16,6 +16,14 @@ menuconfig LOONGARCH_PLATFORM_DEVICES if LOONGARCH_PLATFORM_DEVICES +config CPU_HWMON + bool "Loongson CPU HWMon Driver" + depends on MACH_LOONGSON64 + select HWMON + default y + help + Loongson-3A/3B/3C CPU HWMon (temperature sensor) driver. + config LOONGSON_LAPTOP tristate "Generic Loongson-3 Laptop Driver" depends on ACPI diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile index f43ab03db1a2d5bde40f59cbd403c866ca7f1026..695688bed4232f2e1c99ae3be4f16e9799331e01 100644 --- a/drivers/platform/loongarch/Makefile +++ b/drivers/platform/loongarch/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o +obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o diff --git a/drivers/platform/loongarch/cpu_hwmon.c b/drivers/platform/loongarch/cpu_hwmon.c new file mode 100644 index 0000000000000000000000000000000000000000..c705d088c44a8476047491eccac51009f26d1824 --- /dev/null +++ b/drivers/platform/loongarch/cpu_hwmon.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include + +#include + +static int nr_packages; +static struct device *cpu_hwmon_dev; + +static int loongson3_cpu_temp(int cpu) +{ + u32 reg; + + reg = iocsr_read32(LOONGARCH_IOCSR_CPUTEMP) & 0xff; + + return (int)((s8)reg) * 1000; +} + +static ssize_t cpu_temp_label(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int id = (to_sensor_dev_attr(attr))->index - 1; + + return sprintf(buf, "CPU %d Temperature\n", id); +} + +static ssize_t get_cpu_temp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int id = (to_sensor_dev_attr(attr))->index - 1; + int value = loongson3_cpu_temp(id); + + return sprintf(buf, "%d\n", value); +} + +static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp5_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp5_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp7_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp7_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp8_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp8_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp9_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp9_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp10_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp10_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp11_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp11_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp12_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp12_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp13_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp13_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp14_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp14_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp15_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp15_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp16_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp16_label, 0444, cpu_temp_label, NULL, 4); + +static struct attribute *cpu_hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp5_label.dev_attr.attr, + &sensor_dev_attr_temp6_input.dev_attr.attr, + &sensor_dev_attr_temp6_label.dev_attr.attr, + &sensor_dev_attr_temp7_input.dev_attr.attr, + &sensor_dev_attr_temp7_label.dev_attr.attr, + &sensor_dev_attr_temp8_input.dev_attr.attr, + &sensor_dev_attr_temp8_label.dev_attr.attr, + &sensor_dev_attr_temp9_input.dev_attr.attr, + &sensor_dev_attr_temp9_label.dev_attr.attr, + &sensor_dev_attr_temp10_input.dev_attr.attr, + &sensor_dev_attr_temp10_label.dev_attr.attr, + &sensor_dev_attr_temp11_input.dev_attr.attr, + &sensor_dev_attr_temp11_label.dev_attr.attr, + &sensor_dev_attr_temp12_input.dev_attr.attr, + &sensor_dev_attr_temp12_label.dev_attr.attr, + &sensor_dev_attr_temp13_input.dev_attr.attr, + &sensor_dev_attr_temp13_label.dev_attr.attr, + &sensor_dev_attr_temp14_input.dev_attr.attr, + &sensor_dev_attr_temp14_label.dev_attr.attr, + &sensor_dev_attr_temp15_input.dev_attr.attr, + &sensor_dev_attr_temp15_label.dev_attr.attr, + &sensor_dev_attr_temp16_input.dev_attr.attr, + &sensor_dev_attr_temp16_label.dev_attr.attr, + NULL +}; +static umode_t cpu_hwmon_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + int id = i / 2; + + if (id < nr_packages) + return attr->mode; + return 0; +} + +static struct attribute_group cpu_hwmon_group = { + .attrs = cpu_hwmon_attributes, + .is_visible = cpu_hwmon_is_visible, +}; + +static const struct attribute_group *cpu_hwmon_groups[] = { + &cpu_hwmon_group, + NULL +}; + +static int cpu_initial_threshold = 72000; +static int cpu_thermal_threshold = 96000; +module_param(cpu_thermal_threshold, int, 0644); +MODULE_PARM_DESC(cpu_thermal_threshold, "cpu thermal threshold (96000 (default))"); + +static struct delayed_work thermal_work; + +static void do_thermal_timer(struct work_struct *work) +{ + int i, value, temp_max = 0; + + for (i = 0; i < nr_packages; i++) { + value = loongson3_cpu_temp(i); + if (value > temp_max) + temp_max = value; + } + + if (temp_max <= cpu_thermal_threshold) + schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000)); + else + orderly_poweroff(true); +} + +static int __init loongson_hwmon_init(void) +{ + int i, value, temp_max = 0; + + pr_info("Loongson Hwmon Enter...\n"); + + nr_packages = loongson_sysconf.nr_cpus / + loongson_sysconf.cores_per_package; + + cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon", + NULL, cpu_hwmon_groups); + if (IS_ERR(cpu_hwmon_dev)) { + pr_err("Hwmon register fail with %ld!\n", PTR_ERR(cpu_hwmon_dev)); + return PTR_ERR(cpu_hwmon_dev); + } + + for (i = 0; i < nr_packages; i++) { + value = loongson3_cpu_temp(i); + if (value > temp_max) + temp_max = value; + } + + pr_info("Initial CPU temperature is %d (highest).\n", temp_max); + if (temp_max > cpu_initial_threshold) + cpu_thermal_threshold += temp_max - cpu_initial_threshold; + + INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer); + schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000)); + + return 0; +} + +static void __exit loongson_hwmon_exit(void) +{ + cancel_delayed_work_sync(&thermal_work); + hwmon_device_unregister(cpu_hwmon_dev); +} + +module_init(loongson_hwmon_init); +module_exit(loongson_hwmon_exit); + +MODULE_AUTHOR("Huacai Chen "); +MODULE_DESCRIPTION("Loongson CPU Hwmon driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..75f5b2454fbe45b9531e6a680ecdc55df166200b --- /dev/null +++ b/drivers/platform/mpam/Kconfig @@ -0,0 +1,8 @@ +# Confusingly, this is everything but the CPU bits of MPAM. CPU here means +# CPU resources, not containers or cgroups etc. +config ARM_CPU_RESCTRL + bool + default y + depends on ARM64 && ARCH_HAS_CPU_RESCTRL + depends on MISC_FILESYSTEMS + select RESCTRL_RMID_DEPENDS_ON_CLOSID diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..37693be531c34151078e6e6055d0bb4eea9099bf --- /dev/null +++ b/drivers/platform/mpam/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o mpam_resctrl.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c new file mode 100644 index 0000000000000000000000000000000000000000..0134263c88af4b6e0c5a4f477c8ccde28471a302 --- /dev/null +++ b/drivers/platform/mpam/mpam_devices.c @@ -0,0 +1,2516 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +#define pr_fmt(fmt) "mpam: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "mpam_internal.h" + +extern int ddrc_freq; + +/* + * mpam_list_lock protects the SRCU lists when writing. Once the + * mpam_enabled key is enabled these lists are read-only, + * unless the error interrupt disables the driver. + */ +static DEFINE_MUTEX(mpam_list_lock); +static LIST_HEAD(mpam_all_msc); + +struct srcu_struct mpam_srcu; + +enum mpam_machine_type mpam_current_machine; + +/* MPAM isn't available until all the MSC have been probed. */ +static u32 mpam_num_msc; + +static int mpam_cpuhp_state; +static DEFINE_MUTEX(mpam_cpuhp_state_lock); + +/* + * The smallest common values for any CPU or MSC in the system. + * Generating traffic outside this range will result in screaming interrupts. + */ +u16 mpam_partid_max; +u8 mpam_pmg_max; +static bool partid_max_init, partid_max_published; +static DEFINE_SPINLOCK(partid_max_lock); + +/* + * mpam is enabled once all devices have been probed from CPU online callbacks, + * scheduled via this work_struct. If access to an MSC depends on a CPU that + * was not brought online at boot, this can happen surprisingly late. + */ +static DECLARE_WORK(mpam_enable_work, &mpam_enable); + +/* + * All mpam error interrupts indicate a software bug. On receipt, disable the + * driver. + */ +static DECLARE_WORK(mpam_broken_work, &mpam_disable); + +/* + * An MSC is a container for resources, each identified by their RIS index. + * Components are a group of RIS that control the same thing. + * Classes are the set components of the same type. + * + * e.g. The set of RIS that make up the L2 are a component. These are sometimes + * termed slices. They should be configured as if they were one MSC. + * + * e.g. The SoC probably has more than one L2, each attached to a distinct set + * of CPUs. All the L2 components are grouped as a class. + * + * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, + * then linked via struct mpam_ris to a component and a class. + * The same MSC may exist under different class->component paths, but the RIS + * index will be unique. + */ +LIST_HEAD(mpam_classes); + +static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) +{ + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + return readl_relaxed(msc->mapped_hwpage + reg); +} + +static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) +{ + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(val, msc->mapped_hwpage + reg); +} + +#define mpam_read_partsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + ____ret = __mpam_read_reg(msc, MPAMF_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_partsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + __mpam_write_reg(msc, MPAMCFG_##reg, val); \ +}) + +#define mpam_read_monsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + ____ret = __mpam_read_reg(msc, MSMON_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_monsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + __mpam_write_reg(msc, MSMON_##reg, val); \ +}) + +static u64 mpam_msc_read_idr(struct mpam_msc *msc) +{ + u64 idr_high = 0, idr_low; + + lockdep_assert_held(&msc->part_sel_lock); + + idr_low = mpam_read_partsel_reg(msc, IDR); + if (FIELD_GET(MPAMF_IDR_HAS_EXT, idr_low)) + idr_high = mpam_read_partsel_reg(msc, IDR + 4); + + return (idr_high << 32) | idr_low; +} + +static void mpam_msc_zero_esr(struct mpam_msc *msc) +{ + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR + 4); +} + +static u64 mpam_msc_read_esr(struct mpam_msc *msc) +{ + u64 esr_high = 0, esr_low; + + esr_low = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + esr_high = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR + 4); + + return (esr_high << 32) | esr_low; +} + +static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) +{ + u32 partsel; + + lockdep_assert_held(&msc->part_sel_lock); + + partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | + FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); + mpam_write_partsel_reg(msc, PART_SEL, partsel); +} + +int mpam_register_requestor(u16 partid_max, u8 pmg_max) +{ + int err = 0; + + spin_lock(&partid_max_lock); + if (!partid_max_init) { + mpam_partid_max = partid_max; + mpam_pmg_max = pmg_max; + partid_max_init = true; + } else if (!partid_max_published) { + mpam_partid_max = min(mpam_partid_max, partid_max); + mpam_pmg_max = min(mpam_pmg_max, pmg_max); + } else { + /* New requestors can't lower the values */ + if ((partid_max < mpam_partid_max) || (pmg_max < mpam_pmg_max)) + err = -EBUSY; + } + spin_unlock(&partid_max_lock); + + return err; +} +EXPORT_SYMBOL(mpam_register_requestor); + +static struct mpam_component * +mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + comp = kzalloc(sizeof(*comp), gfp); + if (!comp) + return ERR_PTR(-ENOMEM); + + comp->comp_id = id; + INIT_LIST_HEAD_RCU(&comp->ris); + /* affinity is updated when ris are added */ + INIT_LIST_HEAD_RCU(&comp->class_list); + comp->class = class; + + list_add_rcu(&comp->class_list, &class->components); + + return comp; +} + +static struct mpam_component * +mpam_component_get(struct mpam_class *class, int id, bool alloc, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(comp, &class->components, class_list) { + if (comp->comp_id == id) + return comp; + } + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_component_alloc(class, id, gfp); +} + +static struct mpam_class * +mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) +{ + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + class = kzalloc(sizeof(*class), gfp); + if (!class) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD_RCU(&class->components); + /* affinity is updated when ris are added */ + class->level = level_idx; + class->type = type; + INIT_LIST_HEAD_RCU(&class->classes_list); + ida_init(&class->ida_csu_mon); + ida_init(&class->ida_mbwu_mon); + + list_add_rcu(&class->classes_list, &mpam_classes); + + return class; +} + +static struct mpam_class * +mpam_class_get(u8 level_idx, enum mpam_class_types type, bool alloc, gfp_t gfp) +{ + bool found = false; + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == type && class->level == level_idx) { + found = true; + break; + } + } + + if (found) + return class; + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_class_alloc(level_idx, type, gfp); +} + +static void mpam_class_destroy(struct mpam_class *class) +{ + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&class->classes_list); + synchronize_srcu(&mpam_srcu); + kfree(class); +} + +static void mpam_comp_destroy(struct mpam_component *comp) +{ + struct mpam_class *class = comp->class; + + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&comp->class_list); + synchronize_srcu(&mpam_srcu); + kfree(comp); + + if (list_empty(&class->components)) + mpam_class_destroy(class); +} + +/* synchronise_srcu() before freeing ris */ +static void mpam_ris_destroy(struct mpam_msc_ris *ris) +{ + struct mpam_component *comp = ris->comp; + struct mpam_class *class = comp->class; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + clear_bit(ris->ris_idx, msc->ris_idxs); + list_del_rcu(&ris->comp_list); + list_del_rcu(&ris->msc_list); + + cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); + + if (list_empty(&comp->ris)) + mpam_comp_destroy(comp); +} + +/* + * There are two ways of reaching a struct mpam_msc_ris. Via the + * class->component->ris, or via the msc. + * When destroying the msc, the other side needs unlinking and cleaning up too. + * synchronise_srcu() before freeing msc. + */ +static void mpam_msc_destroy(struct mpam_msc *msc) +{ + struct mpam_msc_ris *ris, *tmp; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) + mpam_ris_destroy(ris); +} + +/* + * The cacheinfo structures are only populated when CPUs are online. + * This helper walks the device tree to include offline CPUs too. + */ +static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, + cpumask_t *affinity) +{ + int cpu, err; + u32 iter_level; + int iter_cache_id; + struct device_node *iter; + + if (!acpi_disabled) { + if (mpam_current_machine == MPAM_YITIAN710) + return acpi_pptt_get_cpumask_from_cache_id_and_level( + cache_id, cache_level, affinity); + return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + } + + for_each_possible_cpu(cpu) { + iter = of_get_cpu_node(cpu, NULL); + if (!iter) { + pr_err("Failed to find cpu%d device node\n", cpu); + return -ENOENT; + } + + while ((iter = of_find_next_cache_node(iter))) { + err = of_property_read_u32(iter, "cache-level", + &iter_level); + if (err || (iter_level != cache_level)) { + of_node_put(iter); + continue; + } + + /* + * get_cpu_cacheinfo_id() isn't ready until sometime + * during device_initcall(). Use cache_of_get_id(). + */ + iter_cache_id = cache_of_get_id(iter); + if (cache_id == ~0UL) { + of_node_put(iter); + continue; + } + + if (iter_cache_id == cache_id) + cpumask_set_cpu(cpu, affinity); + + of_node_put(iter); + } + } + + return 0; +} + + +/* + * cpumask_of_node() only knows about online CPUs. This can't tell us whether + * a class is represented on all possible CPUs. + */ +static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (node_id == cpu_to_node(cpu)) + cpumask_set_cpu(cpu, affinity); + } +} + +static int get_cpumask_from_cache(struct device_node *cache, + cpumask_t *affinity) +{ + int err; + u32 cache_level; + int cache_id; + + err = of_property_read_u32(cache, "cache-level", &cache_level); + if (err) { + pr_err("Failed to read cache-level from cache node\n"); + return -ENOENT; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + pr_err("Failed to calculate cache-id from cache node\n"); + return -ENOENT; + } + + return get_cpumask_from_cache_id(cache_id, cache_level, affinity); +} + +static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, + enum mpam_class_types type, + struct mpam_class *class, + struct mpam_component *comp) +{ + int err; + + switch (type) { + case MPAM_CLASS_CACHE: + err = get_cpumask_from_cache_id(comp->comp_id, class->level, + affinity); + if (err) + return err; + + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with cache node", + dev_name(&msc->pdev->dev)); + + break; + case MPAM_CLASS_MEMORY: + get_cpumask_from_node_id(comp->comp_id, affinity); + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with memory node", + dev_name(&msc->pdev->dev)); + break; + case MPAM_CLASS_UNKNOWN: + return 0; + } + + cpumask_and(affinity, affinity, &msc->accessibility); + + return 0; +} + +static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, + int component_id, gfp_t gfp) +{ + int err; + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + if (test_and_set_bit(ris_idx, msc->ris_idxs)) + return -EBUSY; + + ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), gfp); + if (!ris) + return -ENOMEM; + + class = mpam_class_get(class_id, type, true, gfp); + if (IS_ERR(class)) + return PTR_ERR(class); + + comp = mpam_component_get(class, component_id, true, gfp); + if (IS_ERR(comp)) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return PTR_ERR(comp); + } + + err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); + if (err) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return err; + } + + ris->ris_idx = ris_idx; + INIT_LIST_HEAD_RCU(&ris->comp_list); + INIT_LIST_HEAD_RCU(&ris->msc_list); + ris->msc = msc; + ris->comp = comp; + + cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_or(&class->affinity, &class->affinity, &ris->affinity); + list_add_rcu(&ris->comp_list, &comp->ris); + list_add_rcu(&ris->msc_list, &msc->ris); + + return 0; +} + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + int err; + + mutex_lock(&mpam_list_lock); + err = mpam_ris_create_locked(msc, ris_idx, type, class_id, + component_id, GFP_KERNEL); + mutex_unlock(&mpam_list_lock); + + return err; +} + +static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, + u8 ris_idx) +{ + int err; + struct mpam_msc_ris *ris, *found = ERR_PTR(-ENOENT); + + lockdep_assert_held(&mpam_list_lock); + + if (!test_bit(ris_idx, msc->ris_idxs)) { + err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, + 0, 0, GFP_ATOMIC); + if (err) + return ERR_PTR(err); + } + + list_for_each_entry(ris, &msc->ris, msc_list) { + if (ris->ris_idx == ris_idx) { + found = ris; + break; + } + } + + return found; +} + +static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) +{ + int err; + struct mpam_msc *msc = ris->msc; + struct mpam_props *props = &ris->props; + struct mpam_class *class = ris->comp->class; + + lockdep_assert_held(&msc->lock); + lockdep_assert_held(&msc->part_sel_lock); + + /* Cache Capacity Partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CCAP_PART, ris->idr)) { + u32 ccap_features = mpam_read_partsel_reg(msc, CCAP_IDR); + + props->cmax_wd = FIELD_GET(MPAMF_CCAP_IDR_CMAX_WD, ccap_features); + if (props->cmax_wd) + mpam_set_feature(mpam_feat_ccap_part, props); + } + + /* Cache Portion partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { + u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); + + props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); + if (props->cpbm_wd) + mpam_set_feature(mpam_feat_cpor_part, props); + } + + /* Memory bandwidth partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { + u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); + + /* portion bitmap resolution */ + props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); + if (props->mbw_pbm_bits && + FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) + mpam_set_feature(mpam_feat_mbw_part, props); + + props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) + mpam_set_feature(mpam_feat_mbw_max, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MIN, mbw_features)) + mpam_set_feature(mpam_feat_mbw_min, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_PROP, mbw_features)) + mpam_set_feature(mpam_feat_mbw_prop, props); + } + + /* Priority partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_PRI_PART, ris->idr)) { + u32 pri_features = mpam_read_partsel_reg(msc, PRI_IDR); + + props->intpri_wd = FIELD_GET(MPAMF_PRI_IDR_INTPRI_WD, pri_features); + if (props->intpri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_INTPRI, pri_features)) { + mpam_set_feature(mpam_feat_intpri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_INTPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_intpri_part_0_low, props); + } + + props->dspri_wd = FIELD_GET(MPAMF_PRI_IDR_DSPRI_WD, pri_features); + if (props->dspri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_DSPRI, pri_features)) { + mpam_set_feature(mpam_feat_dspri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_DSPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_dspri_part_0_low, props); + } + } + + /* Performance Monitoring */ + if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { + u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); + + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { + u32 csumonidr, discard; + + /* + * If the firmware max-nrdy-us property is missing, the + * CSU counters can't be used. Should we wait forever? + */ + err = device_property_read_u32(&msc->pdev->dev, + "arm,not-ready-us", + &discard); + + csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); + props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); + if (props->num_csu_mon && !err) + mpam_set_feature(mpam_feat_msmon_csu, props); + else if (props->num_csu_mon) + pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); + } + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + bool has_long; + u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); + + props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); + if (props->num_mbwu_mon) + mpam_set_feature(mpam_feat_msmon_mbwu, props); + + if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + + /* + * Treat long counter and its extension, lwd as mutually + * exclusive feature bits. Though these are dependent + * fields at the implementation level, there would never + * be a need for mpam_feat_msmon_mbwu_44counter (long + * counter) and mpam_feat_msmon_mbwu_63counter (lwd) + * bits to be set together. + * + * mpam_feat_msmon_mbwu isn't treated as an exclusive + * bit as this feature bit would be used as the "front + * facing feature bit" for any checks related to mbwu + * monitors. + */ + has_long = FIELD_GET(MPAMF_MBWUMON_IDR_HAS_LONG, mbwumonidr); + if (props->num_mbwu_mon && has_long) { + if (FIELD_GET(MPAMF_MBWUMON_IDR_LWD, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_63counter, props); + else + mpam_set_feature(mpam_feat_msmon_mbwu_44counter, props); + } + } + } + + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) + if (mpam_current_machine == MPAM_YITIAN710 && class->type == MPAM_CLASS_MEMORY) + mpam_set_feature(mpam_feat_impl_msmon_mbwu, props); + + /* + * RIS with PARTID narrowing don't have enough storage for one + * configuration per PARTID. If these are in a class we could use, + * reduce the supported partid_max to match the numer of intpartid. + * If the class is unknown, just ignore it. + */ + if (FIELD_GET(MPAMF_IDR_HAS_PARTID_NRW, ris->idr) && + class->type != MPAM_CLASS_UNKNOWN) { + u32 nrwidr = mpam_read_partsel_reg(msc, PARTID_NRW_IDR); + u16 partid_max = FIELD_GET(MPAMF_PARTID_NRW_IDR_INTPARTID_MAX, nrwidr); + + mpam_set_feature(mpam_feat_partid_nrw, props); + msc->partid_max = min(msc->partid_max, partid_max); + } +} + +static int mpam_msc_hw_probe(struct mpam_msc *msc) +{ + u64 idr; + u16 partid_max; + u8 ris_idx, pmg_max; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + idr = mpam_read_partsel_reg(msc, AIDR); + if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { + pr_err_once("%s does not match MPAM architecture v1.0\n", + dev_name(&msc->pdev->dev)); + spin_unlock(&msc->part_sel_lock); + return -EIO; + } + + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); + + /* Use these values so partid/pmg always starts with a valid value */ + msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + + for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + msc->partid_max = min(msc->partid_max, partid_max); + msc->pmg_max = min(msc->pmg_max, pmg_max); + msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXT_ESR, idr); + + ris = mpam_get_or_create_ris(msc, ris_idx); + if (IS_ERR(ris)) { + return PTR_ERR(ris); + } + ris->idr = idr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + mpam_ris_hw_probe(ris); + spin_unlock(&msc->part_sel_lock); + } + + spin_lock(&partid_max_lock); + mpam_partid_max = min(mpam_partid_max, msc->partid_max); + mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); + spin_unlock(&partid_max_lock); + + msc->probed = true; + + return 0; +} + +struct mon_read +{ + struct mpam_msc_ris *ris; + struct mon_cfg *ctx; + enum mpam_device_features type; + u64 *val; + int err; +}; + +static bool mpam_ris_has_mbwu_long_counter(struct mpam_msc_ris *ris) +{ + return (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props) || + mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)); +} + +static u64 mpam_msc_read_mbwu_l(struct mpam_msc *msc) +{ + int retry = 3; + u32 mbwu_l_low; + u64 mbwu_l_high1, mbwu_l_high2; + + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + do { + mbwu_l_high1 = mbwu_l_high2; + mbwu_l_low = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L); + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + + retry--; + } while (mbwu_l_high1 != mbwu_l_high2 && retry > 0); + + if (mbwu_l_high2 == mbwu_l_high1) + return (mbwu_l_high1 << 32) | mbwu_l_low; + return MSMON___NRDY_L; +} + +static void mpam_msc_zero_mbwu_l(struct mpam_msc *msc) +{ + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L); + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L + 4); +} + +static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mon_cfg *ctx = m->ctx; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_CSU; + break; + case mpam_feat_msmon_mbwu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_MBWU; + break; + default: + return; + } + + /* + * For CSU counters its implementation-defined what happens when not + * filtering by partid. + */ + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; + + *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); + if (m->ctx->match_pmg) { + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); + } + + if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props)) + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); +} + +static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT); + break; + case mpam_feat_msmon_mbwu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + break; + default: + return; + } +} + +static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, + u32 flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; + + /* + * Write the ctl_val with the enable bit cleared, reset the counter, + * then enable counter. + */ + switch (m->type) { + case mpam_feat_msmon_csu: + mpam_write_monsel_reg(msc, CFG_CSU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val); + mpam_write_monsel_reg(msc, CSU, 0); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + case mpam_feat_msmon_mbwu: + mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); + + if (mpam_ris_has_mbwu_long_counter(m->ris)) + mpam_msc_zero_mbwu_l(m->ris->msc); + else + mpam_write_monsel_reg(msc, MBWU, 0); + + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + + mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; + if (mbwu_state) + mbwu_state->prev_val = 0; + + break; + default: + return; + } +} + +static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) +{ + /* TODO: implement scaling counters */ + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + return GENMASK_ULL(62, 0); + else if (mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)) + return GENMASK_ULL(43, 0); + else + return GENMASK_ULL(30, 0); +} + +static void __ris_msmon_read(void *arg) +{ + bool nrdy = false; + unsigned long flags; + bool config_mismatch; + struct mon_read *m = arg; + u64 now, overflow_val = 0; + struct mon_cfg *ctx = m->ctx; + bool reset_on_next_read = false; + struct mpam_msc_ris *ris = m->ris; + struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; + u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; + + lockdep_assert_held(&msc->lock); + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, ctx->mon) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + if (m->type == mpam_feat_msmon_mbwu) { + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (mbwu_state) { + reset_on_next_read = mbwu_state->reset_on_next_read; + mbwu_state->reset_on_next_read = false; + } + } + + /* + * Read the existing configuration to avoid re-writing the same values. + * This saves waiting for 'nrdy' on subsequent reads. + */ + read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); + gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); + config_mismatch = cur_flt != flt_val || + cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN); + + if (config_mismatch || reset_on_next_read) + write_msmon_ctl_flt_vals(m, ctl_val, flt_val); + + switch (m->type) { + case mpam_feat_msmon_csu: + now = mpam_read_monsel_reg(msc, CSU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + break; + case mpam_feat_msmon_mbwu: + /* + * If long or lwd counters are supported, use them, else revert + * to the 32 bit counter. + */ + if (mpam_ris_has_mbwu_long_counter(ris)) { + now = mpam_msc_read_mbwu_l(msc); + nrdy = now & MSMON___NRDY_L; + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + now = FIELD_GET(MSMON___LWD_VALUE, now); + else + now = FIELD_GET(MSMON___L_VALUE, now); + } else { + now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + } + + if (nrdy) + break; + + if (!mbwu_state) + break; + + /* Add any pre-overflow value to the mbwu_state->val */ + if (mbwu_state->prev_val > now) + overflow_val = mpam_msmon_overflow_val(ris) - mbwu_state->prev_val; + + mbwu_state->prev_val = now; + mbwu_state->correction += overflow_val; + + /* Include bandwidth consumed before the last hardware reset */ + now += mbwu_state->correction; + break; + default: + return; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + + if (nrdy) { + m->err = -EBUSY; + return; + } + + *(m->val) += now; +} + +static void __ris_impl_msmon_read(void *arg) +{ + unsigned long flags; + struct mon_read *m = arg; + u64 mb_val = 0; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc *msc = m->ris->msc; + u32 custom_reg_base_addr, cycle, val; + + lockdep_assert_held(&msc->lock); + if (m->type != mpam_feat_impl_msmon_mbwu) + return; + + /* Other machine can extend this function */ + if (mpam_current_machine != MPAM_YITIAN710) + return; + + spin_lock_irqsave(&msc->part_sel_lock, flags); + + __mpam_write_reg(msc, MPAMCFG_PART_SEL, ctx->mon); + + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + + cycle = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_WINDW_OFFSET); + val = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_MBWC_OFFSET); + + spin_unlock_irqrestore(&msc->part_sel_lock, flags); + + if (val & MSMON___NRDY) { + m->err = -EBUSY; + return; + } + + mb_val = MBWU_GET(val); + + mb_val = mb_val * 32 * ddrc_freq * 1000000 / cycle; /* B/s */ + *(m->val) += mb_val; +} + +static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) +{ + int err, idx; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg->ris = ris; + + msc = ris->msc; + mutex_lock(&msc->lock); + if (arg->type == mpam_feat_msmon_csu || + arg->type == mpam_feat_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + else if (arg->type == mpam_feat_impl_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_impl_msmon_read, arg, true); + else + err = -EOPNOTSUPP; + mutex_unlock(&msc->lock); + if (!err && arg->err) + err = arg->err; + if (err) + break; + } + srcu_read_unlock(&mpam_srcu, idx); + + return err; +} + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features type, u64 *val) +{ + int err; + struct mon_read arg; + u64 wait_jiffies = 0; + struct mpam_props *cprops = &comp->class->props; + + might_sleep(); + + if (!mpam_is_enabled()) + return -EIO; + + if (!mpam_has_feature(type, cprops)) + return -EOPNOTSUPP; + + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + if (err == -EBUSY) + wait_jiffies = usecs_to_jiffies(comp->class->nrdy_usec); + + while (wait_jiffies) + wait_jiffies = schedule_timeout_uninterruptible(wait_jiffies); + + if (err == -EBUSY) { + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + } + + return err; +} + +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp) +{ + int idx, i; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + ris->mbwu_state[i].correction = 0; + ris->mbwu_state[i].reset_on_next_read = true; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) +{ + int idx; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + ris->mbwu_state[ctx->mon].correction = 0; + ris->mbwu_state[ctx->mon].reset_on_next_read = true; + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) +{ + u32 num_words, msb; + u32 bm = ~0; + int i; + + lockdep_assert_held(&msc->part_sel_lock); + + /* + * Write all ~0 to all but the last 32bit-word, which may + * have fewer bits... + */ + num_words = DIV_ROUND_UP(wd, 32); + for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) + __mpam_write_reg(msc, reg, bm); + + /* + * ....and then the last (maybe) partial 32bit word. When wd is a + * multiple of 32, msb should be 31 to write a full 32bit word. + */ + msb = (wd - 1) % 32; + bm = GENMASK(msb , 0); + if (bm) + __mpam_write_reg(msc, reg, bm); +} + +static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, + struct mpam_config *cfg) +{ + u32 pri_val = 0; + u16 cmax = MPAMCFG_CMAX_CMAX; + struct mpam_msc *msc = ris->msc; + u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; + struct mpam_props *rprops = &ris->props; + u16 dspri = GENMASK(rprops->dspri_wd, 0); + u16 intpri = GENMASK(rprops->intpri_wd, 0); + u32 custom_reg_base_addr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris->ris_idx, partid, msc); + + if(mpam_has_feature(mpam_feat_partid_nrw, rprops)) + mpam_write_partsel_reg(msc, INTPARTID, + (MPAMCFG_PART_SEL_INTERNAL | partid)); + + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { + if (mpam_has_feature(mpam_feat_cpor_part, cfg)) + mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, + rprops->cpbm_wd); + } + + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_part, cfg)) + mpam_write_partsel_reg(msc, MBW_PBM, cfg->mbw_pbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, + rprops->mbw_pbm_bits); + } + + if (mpam_has_feature(mpam_feat_mbw_min, rprops)) + mpam_write_partsel_reg(msc, MBW_MIN, 0); + + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_max, cfg)) + mpam_write_partsel_reg(msc, MBW_MAX, cfg->mbw_max); + else + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + } + + if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) + mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + + if (mpam_has_feature(mpam_feat_ccap_part, rprops)) + mpam_write_partsel_reg(msc, CMAX, cmax); + + if (mpam_has_feature(mpam_feat_intpri_part, rprops) || + mpam_has_feature(mpam_feat_dspri_part, rprops)) { + /* aces high? */ + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + intpri = 0; + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + dspri = 0; + + if (mpam_has_feature(mpam_feat_intpri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_INTPRI, intpri); + if (mpam_has_feature(mpam_feat_dspri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_DSPRI, dspri); + + mpam_write_partsel_reg(msc, PRI, pri_val); + } + + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) { + if (mpam_current_machine == MPAM_YITIAN710) { + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + __mpam_write_reg(msc, custom_reg_base_addr + + MPAMF_CUST_WINDW_OFFSET, + MBWU_WINWD_MAX); + } + } + + spin_unlock(&msc->part_sel_lock); +} + +struct reprogram_ris { + struct mpam_msc_ris *ris; + struct mpam_config *cfg; +}; + +/* Call with MSC lock held */ +static int mpam_reprogram_ris(void *_arg) +{ + u16 partid, partid_max; + struct reprogram_ris *arg = _arg; + struct mpam_msc_ris *ris = arg->ris; + struct mpam_config *cfg = arg->cfg; + + if (ris->in_reset_state) + return 0; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid <= partid_max; partid++) + mpam_reprogram_ris_partid(ris, partid, cfg); + + return 0; +} + +static int mpam_restore_mbwu_state(void *_ris) +{ + int i; + struct mon_read mwbu_arg; + struct mpam_msc_ris *ris = _ris; + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + if (ris->mbwu_state[i].enabled) { + mwbu_arg.ris = ris; + mwbu_arg.ctx = &ris->mbwu_state[i].cfg; + mwbu_arg.type = mpam_feat_msmon_mbwu; + + __ris_msmon_read(&mwbu_arg); + } + } + + return 0; +} + +static int mpam_save_mbwu_state(void *arg) +{ + int i; + u64 val; + struct mon_cfg *cfg; + unsigned long flags; + u32 cur_flt, cur_ctl, mon_sel; + struct mpam_msc_ris *ris = arg; + struct mpam_msc *msc = ris->msc; + struct msmon_mbwu_state *mbwu_state; + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + mbwu_state = &ris->mbwu_state[i]; + cfg = &mbwu_state->cfg; + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, i) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + cur_flt = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); + + if (mpam_ris_has_mbwu_long_counter(ris)) { + val = mpam_msc_read_mbwu_l(msc); + mpam_msc_zero_mbwu_l(msc); + } else { + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + } + + cfg->mon = i; + cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); + cfg->match_pmg = FIELD_GET(MSMON_CFG_x_CTL_MATCH_PMG, cur_ctl); + cfg->partid = FIELD_GET(MSMON_CFG_MBWU_FLT_PARTID, cur_flt); + mbwu_state->correction += val; + mbwu_state->enabled = FIELD_GET(MSMON_CFG_x_CTL_EN, cur_ctl); + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + + return 0; +} + +/* + * Called via smp_call_on_cpu() to prevent migration, while still being + * pre-emptible. + */ +static int mpam_reset_ris(void *arg) +{ + struct mpam_msc_ris *ris = arg; + struct reprogram_ris reprogram_arg; + struct mpam_config empty_cfg = { 0 }; + + if (ris->in_reset_state) + return 0; + + reprogram_arg.ris = ris; + reprogram_arg.cfg = &empty_cfg; + + mpam_reprogram_ris(&reprogram_arg); + + return 0; +} + +/* + * Get the preferred CPU for this MSC. If it is accessible from this CPU, + * this CPU is preferred. This can be preempted/migrated, it will only result + * in more work. + */ +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) +{ + int cpu = raw_smp_processor_id(); + + if (cpumask_test_cpu(cpu, &msc->accessibility)) + return cpu; + + return cpumask_first_and(&msc->accessibility, cpu_online_mask); +} + +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) +{ + lockdep_assert_irqs_enabled(); + lockdep_assert_cpus_held(); + lockdep_assert_held(&msc->lock); + + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); +} + +static void mpam_reset_msc(struct mpam_msc *msc, bool online) +{ + int idx; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + + /* + * Set in_reset_state when coming online. The reset state + * for non-zero partid may be lost while the CPUs are offline. + */ + ris->in_reset_state = online; + + if (mpam_is_enabled() && !online) + mpam_touch_msc(msc, &mpam_save_mbwu_state, ris); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_reprogram_msc(struct mpam_msc *msc) +{ + int idx; + u16 partid; + bool reset; + struct mpam_config *cfg; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + if (!mpam_is_enabled() && !ris->in_reset_state) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + ris->in_reset_state = true; + continue; + } + + reset = true; + for (partid = 0; partid <= mpam_partid_max; partid++) { + cfg = &ris->comp->cfg[partid]; + if (cfg->features) + reset = false; + + mpam_reprogram_ris_partid(ris, partid, cfg); + } + ris->in_reset_state = reset; + + if (mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + mpam_touch_msc(msc, &mpam_restore_mbwu_state, ris); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void _enable_percpu_irq(void *_irq) +{ + int *irq = _irq; + enable_percpu_irq(*irq, IRQ_TYPE_NONE); +} + +static int mpam_cpu_online(unsigned int cpu) +{ + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + _enable_percpu_irq(&msc->reenable_error_ppi); + + if (atomic_fetch_inc(&msc->online_refs) == 0) + mpam_reprogram_msc(msc); + mutex_unlock(&msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + if (mpam_is_enabled()) + mpam_resctrl_online_cpu(cpu); + + return 0; +} + +/* Before mpam is enabled, try to probe new MSC */ +static int mpam_discovery_cpu_online(unsigned int cpu) +{ + int err = 0; + struct mpam_msc *msc; + bool new_device_probed = false; + + if (mpam_is_enabled()) + return 0; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (!msc->probed) + err = mpam_msc_hw_probe(msc); + mutex_unlock(&msc->lock); + + if (!err) + new_device_probed = true; + else + break; // mpam_broken + } + mutex_unlock(&mpam_list_lock); + + if (new_device_probed && !err) + schedule_work(&mpam_enable_work); + + if (err < 0) + return err; + + return mpam_cpu_online(cpu); +} + +static int mpam_cpu_offline(unsigned int cpu) +{ + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + disable_percpu_irq(msc->reenable_error_ppi); + + if (atomic_dec_and_test(&msc->online_refs)) + mpam_reset_msc(msc, false); + mutex_unlock(&msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + if (mpam_is_enabled()) + mpam_resctrl_offline_cpu(cpu); + + return 0; +} + +static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) +{ + mutex_lock(&mpam_cpuhp_state_lock); + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mpam:online", + online, mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) { + pr_err("Failed to register cpuhp callbacks"); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); +} + +static int __setup_ppi(struct mpam_msc *msc) +{ + int cpu; + + msc->error_dev_id = alloc_percpu_gfp(struct mpam_msc *, GFP_KERNEL); + if (!msc->error_dev_id) + return -ENOMEM; + + for_each_cpu(cpu, &msc->accessibility) { + struct mpam_msc *empty = *per_cpu_ptr(msc->error_dev_id, cpu); + if (empty != NULL) { + pr_err_once("%s shares PPI with %s!\n", dev_name(&msc->pdev->dev), + dev_name(&empty->pdev->dev)); + return -EBUSY; + } + *per_cpu_ptr(msc->error_dev_id, cpu) = msc; + } + + return 0; +} + +static int mpam_msc_setup_error_irq(struct mpam_msc *msc) +{ + int irq; + + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + return 0; + + /* Allocate and initialise the percpu device pointer for PPI */ + if (irq_is_percpu(irq)) + + return __setup_ppi(msc); + + /* sanity check: shared interrupts can be routed anywhere? */ + if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { + pr_err_once("msc:%u is a private resource with a shared error interrupt", + msc->id); + return -EINVAL; + } + + return 0; +} + +static enum mpam_machine_type mpam_dt_get_machine_type(void) +{ + /* FIXME: not supported yet */ + return MPAM_DEFAULT_MACHINE; +} + +static int mpam_dt_count_msc(void) +{ + int count = 0; + struct device_node *np; + + for_each_compatible_node(np, NULL, "arm,mpam-msc") + count++; + + return count; +} + +static int mpam_dt_parse_resource(struct mpam_msc *msc, struct device_node *np, + u32 ris_idx) +{ + int err = 0; + u32 level = 0; + unsigned long cache_id; + struct device_node *cache; + + do { + if (of_device_is_compatible(np, "arm,mpam-cache")) { + cache = of_parse_phandle(np, "arm,mpam-device", 0); + if (!cache) { + pr_err("Failed to read phandle\n"); + break; + } + } else if (of_device_is_compatible(np->parent, "cache")) { + cache = np->parent; + } else { + /* For now, only caches are supported */ + cache = NULL; + break; + } + + err = of_property_read_u32(cache, "cache-level", &level); + if (err) { + pr_err("Failed to read cache-level\n"); + break; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + err = -ENOENT; + break; + } + + err = mpam_ris_create(msc, ris_idx, MPAM_CLASS_CACHE, level, + cache_id); + } while (0); + of_node_put(cache); + + return err; +} + + +static int mpam_dt_parse_resources(struct mpam_msc *msc, void *ignored) +{ + int err, num_ris = 0; + const u32 *ris_idx_p; + struct device_node *iter, *np; + + np = msc->pdev->dev.of_node; + for_each_child_of_node(np, iter) { + ris_idx_p = of_get_property(iter, "reg", NULL); + if (ris_idx_p) { + num_ris++; + err = mpam_dt_parse_resource(msc, iter, *ris_idx_p); + if (err) { + of_node_put(iter); + return err; + } + } + } + + if (!num_ris) + mpam_dt_parse_resource(msc, np, 0); + + return err; +} + +static int get_msc_affinity(struct mpam_msc *msc) +{ + struct device_node *parent; + u32 affinity_id; + int err; + + if (!acpi_disabled) { + err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", + &affinity_id); + if (err) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = acpi_pptt_get_cpus_from_container(affinity_id, + &msc->accessibility); + } + + return err; + } + + /* This depends on the path to of_node */ + parent = of_get_parent(msc->pdev->dev.of_node); + if (parent == of_root) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + if (of_device_is_compatible(parent, "cache")) { + err = get_cpumask_from_cache(parent, + &msc->accessibility); + } else { + err = -EINVAL; + pr_err("Cannot determine accessibility of MSC: %s\n", + dev_name(&msc->pdev->dev)); + } + } + of_node_put(parent); + + return err; +} + +static int fw_num_msc; + +static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) +{ + /* TODO: wake up tasks blocked on this MSC's PCC channel */ +} + +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + +static int mpam_msc_drv_probe(struct platform_device *pdev) +{ + int err; + pgprot_t prot; + void * __iomem io; + struct mpam_msc *msc; + struct resource *msc_res; + void *plat_data = pdev->dev.platform_data; + + mutex_lock(&mpam_list_lock); + do { + msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); + if (!msc) { + err = -ENOMEM; + break; + } + + INIT_LIST_HEAD_RCU(&msc->glbl_list); + msc->pdev = pdev; + + err = device_property_read_u32(&pdev->dev, "arm,not-ready-us", + &msc->nrdy_usec); + if (err) { + /* This will prevent CSU monitors being usable */ + msc->nrdy_usec = 0; + } + + err = get_msc_affinity(msc); + if (err) + break; + if (cpumask_empty(&msc->accessibility)) { + pr_err_once("msc:%u is not accessible from any CPU!", + msc->id); + err = -EINVAL; + break; + } + + mutex_init(&msc->lock); + msc->id = mpam_num_msc++; + INIT_LIST_HEAD_RCU(&msc->ris); + spin_lock_init(&msc->part_sel_lock); + spin_lock_init(&msc->mon_sel_lock); + + err = mpam_msc_setup_error_irq(msc); + if (err) { + msc = ERR_PTR(err); + break; + } + + if (device_property_read_u32(&pdev->dev, "pcc-channel", + &msc->pcc_subspace_id)) + msc->iface = MPAM_IFACE_MMIO; + else + msc->iface = MPAM_IFACE_PCC; + + if (msc->iface == MPAM_IFACE_MMIO) { + io = devm_platform_get_and_ioremap_resource(pdev, 0, + &msc_res); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + err = PTR_ERR(io); + break; + } + msc->mapped_hwpage_sz = msc_res->end - msc_res->start + 1; + msc->mapped_hwpage = io; + if (msc->mapped_hwpage_sz < MPAM_MIN_MMIO_SIZE) { + pr_err("MSC MMIO space size is too small\n"); + err = -EINVAL; + break; + } + } else if (msc->iface == MPAM_IFACE_PCC) { + msc->pcc_cl.dev = &pdev->dev; + msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; + msc->pcc_cl.tx_block = false; + msc->pcc_cl.tx_tout = 1000; /* 1s */ + msc->pcc_cl.knows_txdone = false; + + msc->pcc_chan = pcc_mbox_request_channel(&msc->pcc_cl, + msc->pcc_subspace_id); + if (IS_ERR(msc->pcc_chan)) { + pr_err("Failed to request MSC PCC channel\n"); + err = PTR_ERR(msc->pcc_chan); + break; + } + + prot = __acpi_get_mem_attribute(msc->pcc_chan->shmem_base_addr); + io = ioremap_prot(msc->pcc_chan->shmem_base_addr, + msc->pcc_chan->shmem_size, pgprot_val(prot)); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + pcc_mbox_free_channel(msc->pcc_chan); + err = PTR_ERR(io); + break; + } + + /* TODO: issue a read to update the registers */ + + msc->mapped_hwpage_sz = msc->pcc_chan->shmem_size; + msc->mapped_hwpage = io + sizeof(struct acpi_pcct_shared_memory); + } + + list_add_rcu(&msc->glbl_list, &mpam_all_msc); + platform_set_drvdata(pdev, msc); + } while (0); + mutex_unlock(&mpam_list_lock); + + if (!err) { + /* Create RIS entries described by firmware */ + if (!acpi_disabled) + err = acpi_mpam_parse_resources(msc, plat_data); + else + err = mpam_dt_parse_resources(msc, plat_data); + } + + if (err) + mpam_msc_drv_remove(pdev); + + if (!err && fw_num_msc == mpam_num_msc) + mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); + + return err; +} + +/* + * If a resource doesn't match class feature/configuration, do the right thing. + * For 'num' properties we can just take the minimum. + * For properties where the mismatched unused bits would make a difference, we + * nobble the class feature, as we can't configure all the resources. + * e.g. The L3 cache is composed of two resources with 13 and 17 portion + * bitmaps respectively. + */ +static void +__resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&mpam_list_lock); /* we modify class */ + + /* Clear missing features */ + cprops->features &= rprops->features; + + /* Clear incompatible features */ + if (cprops->cpbm_wd != rprops->cpbm_wd) + mpam_clear_feature(mpam_feat_cpor_part, &cprops->features); + if (cprops->mbw_pbm_bits != rprops->mbw_pbm_bits) + mpam_clear_feature(mpam_feat_mbw_part, &cprops->features); + + /* bwa_wd is a count of bits, fewer bits means less precision */ + if (cprops->bwa_wd != rprops->bwa_wd) + cprops->bwa_wd = min(cprops->bwa_wd, rprops->bwa_wd); + + /* For num properties, take the minimum */ + if (cprops->num_csu_mon != rprops->num_csu_mon) + cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); + if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) + cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); + + if (cprops->intpri_wd != rprops->intpri_wd) + cprops->intpri_wd = min(cprops->intpri_wd, rprops->intpri_wd); + if (cprops->dspri_wd != rprops->dspri_wd) + cprops->dspri_wd = min(cprops->dspri_wd, rprops->dspri_wd); + + /* {int,ds}pri may not have differing 0-low behaviour */ + if (mpam_has_feature(mpam_feat_intpri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_intpri_part, &cprops->features); + if (mpam_has_feature(mpam_feat_dspri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_dspri_part, &cprops->features); +} + +/* + * Copy the first component's first resources's properties and features to the + * class. __resource_props_mismatch() will remove conflicts. + * It is not possible to have a class with no components, or a component with + * no resources. + */ +static void mpam_enable_init_class_features(struct mpam_class *class) +{ + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + comp = list_first_entry_or_null(&class->components, + struct mpam_component, class_list); + if (WARN_ON(!comp)) + return; + + ris = list_first_entry_or_null(&comp->ris, + struct mpam_msc_ris, comp_list); + if (WARN_ON(!ris)) + return; + + class->props = ris->props; +} + +/* Merge all the common resource features into class. */ +static void mpam_enable_merge_features(void) +{ + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + mpam_enable_init_class_features(class); + + list_for_each_entry(comp, &class->components, class_list) { + list_for_each_entry(ris, &comp->ris, comp_list) { + __resource_props_mismatch(ris, class); + + class->nrdy_usec = max(class->nrdy_usec, + ris->msc->nrdy_usec); + } + } + } +} + +static char *mpam_errcode_names[16] = { + [0] = "No error", + [1] = "PARTID_SEL_Range", + [2] = "Req_PARTID_Range", + [3] = "MSMONCFG_ID_RANGE", + [4] = "Req_PMG_Range", + [5] = "Monitor_Range", + [6] = "intPARTID_Range", + [7] = "Unexpected_INTERNAL", + [8] = "Undefined_RIS_PART_SEL", + [9] = "RIS_No_Control", + [10] = "Undefined_RIS_MON_SEL", + [11] = "RIS_No_Monitor", + [12 ... 15] = "Reserved" +}; + +static int mpam_enable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(1, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static int mpam_disable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) +{ + u64 reg; + u16 partid; + u8 errcode, pmg, ris; + + if (WARN_ON_ONCE(!msc) || + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &msc->accessibility))) + return IRQ_NONE; + + reg = mpam_msc_read_esr(msc); + + errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); + if (!errcode) + return IRQ_NONE; + + /* Clear level triggered irq */ + mpam_msc_zero_esr(msc); + + partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); + pmg = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_RIS, reg); + + pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", + msc->id, mpam_errcode_names[errcode], partid, pmg, ris); + + /* + * To prevent this interrupt from repeatedly cancelling the scheduled + * work to disable mpam, disable the error interrupt. + */ + mpam_disable_msc_ecr(msc); + + schedule_work(&mpam_broken_work); + + return IRQ_HANDLED; +} + +static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = *(struct mpam_msc **)dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_spi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static int mpam_register_irqs(void) +{ + int err, irq; + struct mpam_msc *msc; + + lockdep_assert_cpus_held(); + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ + /* We anticipate sharing the interrupt with other MSCs */ + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, &mpam_ppi_handler, + "mpam:msc:error", + msc->error_dev_id); + if (err) + return err; + + mutex_lock(&msc->lock); + msc->reenable_error_ppi = irq; + smp_call_function_many(&msc->accessibility, + &_enable_percpu_irq, &irq, + true); + mutex_unlock(&msc->lock); + } else { + err = devm_request_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, IRQF_SHARED, + "mpam:msc:error", msc); + if (err) + return err; + } + + mutex_lock(&msc->lock); + msc->error_irq_requested = true; + mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); + msc->error_irq_hw_enabled = true; + mutex_unlock(&msc->lock); + } + + return 0; +} + +static void mpam_unregister_irqs(void) +{ + int irq; + struct mpam_msc *msc; + + cpus_read_lock(); + /* take the lock as free_irq() can sleep */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + mutex_lock(&msc->lock); + if (msc->error_irq_hw_enabled) { + mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); + msc->error_irq_hw_enabled = false; + } + + if (msc->error_irq_requested) { + if (irq_is_percpu(irq)) { + msc->reenable_error_ppi = 0; + free_percpu_irq(irq, msc->error_dev_id); + } else { + devm_free_irq(&msc->pdev->dev, irq, msc); + } + msc->error_irq_requested = false; + } + mutex_unlock(&msc->lock); + } + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); +} + +static void __destroy_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + kfree(comp->cfg); + list_for_each_entry(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + mbwu_state = ris->mbwu_state; + ris->mbwu_state = NULL; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + + kfree(mbwu_state); + } +} + +static int __allocate_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + if (comp->cfg) + return 0; + + comp->cfg = kcalloc(mpam_partid_max + 1, sizeof(*comp->cfg), GFP_KERNEL); + if (!comp->cfg) + return -ENOMEM; + + list_for_each_entry(ris, &comp->ris, comp_list) { + if (!ris->props.num_mbwu_mon) + continue; + + mbwu_state = kcalloc(ris->props.num_mbwu_mon, + sizeof(*ris->mbwu_state), GFP_KERNEL); + if (!mbwu_state) { + __destroy_component_cfg(comp); + return -ENOMEM; + } + + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + ris->mbwu_state = mbwu_state; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + } + + return 0; +} + +static int mpam_allocate_config(void) +{ + int err = 0; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) { + err = __allocate_component_cfg(comp); + if (err) + return err; + } + } + + return 0; +} + +static void mpam_enable_once(void) +{ + int err; + + /* + * If all the MSC have been probed, enabling the IRQs happens next. + * That involves cross-calling to a CPU that can reach the MSC, and + * the locks must be taken in this order: + */ + cpus_read_lock(); + mutex_lock(&mpam_list_lock); + do { + mpam_enable_merge_features(); + + err = mpam_allocate_config(); + if (err) { + pr_err("Failed to allocate configuration arrays.\n"); + break; + } + + err = mpam_register_irqs(); + if (err) { + pr_warn("Failed to register irqs: %d\n", err); + break; + } + } while (0); + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); + + if (!err) { + err = mpam_resctrl_setup(); + if (err) + pr_err("Failed to initialise resctrl: %d\n", err); + } + + if (err) { + schedule_work(&mpam_broken_work); + return; + } + + mutex_lock(&mpam_cpuhp_state_lock); + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + mutex_unlock(&mpam_cpuhp_state_lock); + + /* + * Once the cpuhp callbacks have been changed, mpam_partid_max can no + * longer change. + */ + spin_lock(&partid_max_lock); + partid_max_published = true; + spin_unlock(&partid_max_lock); + + static_branch_enable(&mpam_enabled); + mpam_register_cpuhp_callbacks(mpam_cpu_online); + + pr_info("MPAM enabled with %u partid and %u pmg\n", + READ_ONCE(mpam_partid_max) + 1, READ_ONCE(mpam_pmg_max) + 1); +} + +void mpam_reset_class(struct mpam_class *class) +{ + int idx; + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(comp, &class->components, class_list) { + memset(comp->cfg, 0, ((mpam_partid_max + 1) * sizeof(*comp->cfg))); + + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, mpam_reset_ris, ris); + mutex_unlock(&ris->msc->lock); + ris->in_reset_state = true; + } + } + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Called in response to an error IRQ. + * All of MPAMs errors indicate a software bug, restore any modified + * controls to their reset values. + */ +void mpam_disable(struct work_struct *ignored) +{ + int idx; + struct mpam_class *class; + + mutex_lock(&mpam_cpuhp_state_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_resctrl_exit(); + + static_branch_disable(&mpam_enabled); + + mpam_unregister_irqs(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Enable mpam once all devices have been probed. + * Scheduled by mpam_discovery_cpu_online() once all devices have been created. + * Also scheduled when new devices are probed when new CPUs come online. + */ +void mpam_enable(struct work_struct *work) +{ + static atomic_t once; + struct mpam_msc *msc; + bool all_devices_probed = true; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + mutex_lock(&msc->lock); + if (!msc->probed) + all_devices_probed = false; + mutex_unlock(&msc->lock); + + if (!all_devices_probed) + break; + } + mutex_unlock(&mpam_list_lock); + + if (all_devices_probed && !atomic_fetch_inc(&once)) + mpam_enable_once(); +} + +struct mpam_write_config_arg { + struct mpam_msc_ris *ris; + struct mpam_component *comp; + u16 partid; +}; + +static int __write_config(void *arg) +{ + struct mpam_write_config_arg *c = arg; + + mpam_reprogram_ris_partid(c->ris, c->partid, &c->comp->cfg[c->partid]); + + return 0; +} + +/* TODO: split into write_config/sync_config */ +/* TODO: add config_dirty bitmap to drive sync_config */ +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg) +{ + struct mpam_write_config_arg arg; + struct mpam_msc_ris *ris; + int idx; + + lockdep_assert_cpus_held(); + + if (!memcmp(&comp->cfg[partid], cfg, sizeof(*cfg))) + return 0; + + comp->cfg[partid] = *cfg; + arg.comp = comp; + arg.partid = partid; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg.ris = ris; + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, __write_config, &arg); + mutex_unlock(&ris->msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + return 0; +} + +static const struct of_device_id mpam_of_match[] = { + { .compatible = "arm,mpam-msc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpam_of_match); + +static struct platform_driver mpam_msc_driver = { + .driver = { + .name = "mpam_msc", + .of_match_table = of_match_ptr(mpam_of_match), + }, + .probe = mpam_msc_drv_probe, + .remove = mpam_msc_drv_remove, +}; + +/* + * MSC that are hidden under caches are not created as platform devices + * as there is no cache driver. Caches are also special-cased in + * get_msc_affinity(). + */ +static void mpam_dt_create_foundling_msc(void) +{ + int err; + struct device_node *cache; + + for_each_compatible_node(cache, NULL, "cache") { + err = of_platform_populate(cache, mpam_of_match, NULL, NULL); + if (err) { + pr_err("Failed to create MSC devices under caches\n"); + } + } +} + +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sysreg_s(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} + +static int __init mpam_msc_driver_init(void) +{ + bool mpam_not_available = false; + int err; + + if (!mpam_cpus_have_feature()) + return -EOPNOTSUPP; + + init_srcu_struct(&mpam_srcu); + + if (!acpi_disabled) + mpam_current_machine = acpi_mpam_get_machine_type(); + else + mpam_current_machine = mpam_dt_get_machine_type(); + + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + /* + * Access MPAM system registers after MPAM ACPI table is parsed, since + * some BIOSs disable MPAM system registers accessing but export MPAM in + * ID_AA64PFR0_EL1. So we can only rely on the MPAM ACPI table to + * determine whether MPAM feature is enabled. + */ + err = arm64_mpam_register_cpus(); + if (err) + return err; + + /* + * If the MPAM CPU interface is not implemented, or reserved by + * firmware, there is no point touching the rest of the hardware. + */ + spin_lock(&partid_max_lock); + if (!partid_max_init || (!mpam_partid_max && !mpam_pmg_max)) + mpam_not_available = true; + spin_unlock(&partid_max_lock); + + if (mpam_not_available) + return 0; + + if (acpi_disabled) + mpam_dt_create_foundling_msc(); + + return platform_driver_register(&mpam_msc_driver); +} +/* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ +subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..d84413e5e03150901f223cf90e2df82580700a76 --- /dev/null +++ b/drivers/platform/mpam/mpam_internal.h @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef MPAM_INTERNAL_H +#define MPAM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DECLARE_STATIC_KEY_FALSE(mpam_enabled); + +/* Value to indicate the allocated monitor is derived from the RMID index. */ +#define USE_RMID_IDX (U16_MAX + 1) + +/* + * Only these event configuration bits are supported. MPAM can't know if + * data is being written back, these will show up as a write. + */ +#define MPAM_RESTRL_EVT_CONFIG_VALID (READS_TO_LOCAL_MEM | NON_TEMP_WRITE_TO_LOCAL_MEM) + +static inline bool mpam_is_enabled(void) +{ + return static_branch_likely(&mpam_enabled); +} + +struct mpam_msc +{ + /* member of mpam_all_msc */ + struct list_head glbl_list; + + int id; + struct platform_device *pdev; + + /* Not modified after mpam_is_enabled() becomes true */ + enum mpam_msc_iface iface; + u32 pcc_subspace_id; + struct mbox_client pcc_cl; + struct pcc_mbox_chan *pcc_chan; + u32 nrdy_usec; + cpumask_t accessibility; + bool has_extd_esr; + + int reenable_error_ppi; + struct mpam_msc * __percpu *error_dev_id; + + atomic_t online_refs; + + struct mutex lock; + bool probed; + bool error_irq_requested; + bool error_irq_hw_enabled; + u16 partid_max; + u8 pmg_max; + unsigned long ris_idxs[128 / BITS_PER_LONG]; + u32 ris_max; + + /* mpam_msc_ris of this component */ + struct list_head ris; + + /* + * part_sel_lock protects access to the MSC hardware registers that are + * affected by MPAMCFG_PART_SEL. (including the ID registers) + * If needed, take msc->lock first. + */ + spinlock_t part_sel_lock; + spinlock_t mon_sel_lock; + void __iomem * mapped_hwpage; + size_t mapped_hwpage_sz; +}; + +/* + * When we compact the supported features, we don't care what they are. + * Storing them as a bitmap makes life easy. + */ +typedef u32 mpam_features_t; + +/* Bits for mpam_features_t */ +enum mpam_device_features { + mpam_feat_ccap_part = 0, + mpam_feat_cpor_part, + mpam_feat_mbw_part, + mpam_feat_mbw_min, + mpam_feat_mbw_max, + mpam_feat_mbw_prop, + mpam_feat_intpri_part, + mpam_feat_intpri_part_0_low, + mpam_feat_dspri_part, + mpam_feat_dspri_part_0_low, + mpam_feat_msmon, + mpam_feat_msmon_csu, + mpam_feat_msmon_csu_capture, + /* + * Having mpam_feat_msmon_mbwu set doesn't mean the regular 31 bit MBWU + * counter would be used. The exact counter used is decided based on the + * status of mpam_feat_msmon_mbwu_l/mpam_feat_msmon_mbwu_lwd as well. + */ + mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_44counter, + mpam_feat_msmon_mbwu_63counter, + mpam_feat_msmon_mbwu_capture, + mpam_feat_msmon_mbwu_rwbw, + mpam_feat_msmon_capt, + mpam_feat_impl_msmon_mbwu, + mpam_feat_partid_nrw, + MPAM_FEATURE_LAST, +}; +#define MPAM_ALL_FEATURES ((1<features) +#define mpam_set_feature(_feat, x) ((x)->features |= (1<<_feat)) + +static inline void mpam_clear_feature(enum mpam_device_features feat, + mpam_features_t *supported) +{ + *supported &= ~(1<lock. + * Changes to reset_on_next_read, prev_val and correction are protected by the + * msc's mon_sel_lock. + */ +struct msmon_mbwu_state { + bool enabled; + bool reset_on_next_read; + struct mon_cfg cfg; + + /* The value last read from the hardware. Used to detect overflow. */ + u64 prev_val; + + /* + * The value to add to the new reading to account for power management, + * and shifts to trigger the overflow interrupt. + */ + u64 correction; +}; + +struct mpam_msc_ris { + u8 ris_idx; + u64 idr; + struct mpam_props props; + bool in_reset_state; + + cpumask_t affinity; + + /* member of mpam_component:ris */ + struct list_head comp_list; + + /* member of mpam_msc:ris */ + struct list_head msc_list; + + /* parents: */ + struct mpam_msc *msc; + struct mpam_component *comp; + + /* msmon mbwu configuration is preserved over reset */ + struct msmon_mbwu_state *mbwu_state; +}; + +struct mpam_resctrl_dom { + struct mpam_component *comp; + struct rdt_domain resctrl_dom; + + u32 mbm_local_evt_cfg; +}; + +struct mpam_resctrl_res { + struct mpam_class *class; + struct rdt_resource resctrl_res; +}; + +static inline int mpam_alloc_csu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_csu_mon, 0, cprops->num_csu_mon - 1, + GFP_KERNEL); +} + +static inline void mpam_free_csu_mon(struct mpam_class *class, int csu_mon) +{ + ida_free(&class->ida_csu_mon, csu_mon); +} + +static inline int mpam_alloc_mbwu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_mbwu_mon, 0, + cprops->num_mbwu_mon - 1, GFP_KERNEL); +} + +static inline void mpam_free_mbwu_mon(struct mpam_class *class, int mbwu_mon) +{ + ida_free(&class->ida_mbwu_mon, mbwu_mon); +} + +/* List of all classes */ +extern struct list_head mpam_classes; +extern struct srcu_struct mpam_srcu; + +/* System wide partid/pmg values */ +extern u16 mpam_partid_max; +extern u8 mpam_pmg_max; + +/* Scheduled work callback to enable mpam once all MSC have been probed */ +void mpam_enable(struct work_struct *work); +void mpam_disable(struct work_struct *work); + +void mpam_reset_class(struct mpam_class *class); + +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg); + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features, u64 *val); +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp); + +int mpam_resctrl_online_cpu(unsigned int cpu); +int mpam_resctrl_offline_cpu(unsigned int cpu); + +int mpam_resctrl_setup(void); +void mpam_resctrl_exit(void); + +/* + * MPAM MSCs have the following register layout. See: + * Arm Architecture Reference Manual Supplement - Memory System Resource + * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a + */ +#define MPAM_ARCHITECTURE_V1 0x10 +#define MPAM_MIN_MMIO_SIZE 0x3000 + +/* Memory mapped control pages: */ +/* ID Register offsets in the memory mapped page */ +#define MPAMF_IDR 0x0000 /* features id register */ +#define MPAMF_MSMON_IDR 0x0080 /* performance monitoring features */ +#define MPAMF_IMPL_IDR 0x0028 /* imp-def partitioning */ +#define MPAMF_CPOR_IDR 0x0030 /* cache-portion partitioning */ +#define MPAMF_CCAP_IDR 0x0038 /* cache-capacity partitioning */ +#define MPAMF_MBW_IDR 0x0040 /* mem-bw partitioning */ +#define MPAMF_PRI_IDR 0x0048 /* priority partitioning */ +#define MPAMF_CSUMON_IDR 0x0088 /* cache-usage monitor */ +#define MPAMF_MBWUMON_IDR 0x0090 /* mem-bw usage monitor */ +#define MPAMF_PARTID_NRW_IDR 0x0050 /* partid-narrowing */ +#define MPAMF_IIDR 0x0018 /* implementer id register */ +#define MPAMF_AIDR 0x0020 /* architectural id register */ + +/* Configuration and Status Register offsets in the memory mapped page */ +#define MPAMCFG_PART_SEL 0x0100 /* partid to configure: */ +#define MPAMCFG_CPBM 0x1000 /* cache-portion config */ +#define MPAMCFG_CMAX 0x0108 /* cache-capacity config */ +#define MPAMCFG_MBW_MIN 0x0200 /* min mem-bw config */ +#define MPAMCFG_MBW_MAX 0x0208 /* max mem-bw config */ +#define MPAMCFG_MBW_WINWD 0x0220 /* mem-bw accounting window config */ +#define MPAMCFG_MBW_PBM 0x2000 /* mem-bw portion bitmap config */ +#define MPAMCFG_PRI 0x0400 /* priority partitioning config */ +#define MPAMCFG_MBW_PROP 0x0500 /* mem-bw stride config */ +#define MPAMCFG_INTPARTID 0x0600 /* partid-narrowing config */ + +#define MSMON_CFG_MON_SEL 0x0800 /* monitor selector */ +#define MSMON_CFG_CSU_FLT 0x0810 /* cache-usage monitor filter */ +#define MSMON_CFG_CSU_CTL 0x0818 /* cache-usage monitor config */ +#define MSMON_CFG_MBWU_FLT 0x0820 /* mem-bw monitor filter */ +#define MSMON_CFG_MBWU_CTL 0x0828 /* mem-bw monitor config */ +#define MSMON_CSU 0x0840 /* current cache-usage */ +#define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ +#define MSMON_MBWU 0x0860 /* current mem-bw usage value */ +#define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_MBWU_L 0x0880 /* current long mem-bw usage value */ +#define MSMON_MBWU_CAPTURE_L 0x0890 /* last long mem-bw value captured */ +#define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ +#define MPAMF_ESR 0x00F8 /* error status register */ +#define MPAMF_ECR 0x00F0 /* error control register */ + +/* MPAMF_IDR - MPAM features ID register */ +#define MPAMF_IDR_PARTID_MAX GENMASK(15, 0) +#define MPAMF_IDR_PMG_MAX GENMASK(23, 16) +#define MPAMF_IDR_HAS_CCAP_PART BIT(24) +#define MPAMF_IDR_HAS_CPOR_PART BIT(25) +#define MPAMF_IDR_HAS_MBW_PART BIT(26) +#define MPAMF_IDR_HAS_PRI_PART BIT(27) +#define MPAMF_IDR_HAS_EXT BIT(28) +#define MPAMF_IDR_HAS_IMPL_IDR BIT(29) +#define MPAMF_IDR_HAS_MSMON BIT(30) +#define MPAMF_IDR_HAS_PARTID_NRW BIT(31) +#define MPAMF_IDR_HAS_RIS BIT(32) +#define MPAMF_IDR_HAS_EXT_ESR BIT(38) +#define MPAMF_IDR_HAS_ESR BIT(39) +#define MPAMF_IDR_RIS_MAX GENMASK(59, 56) + + +/* MPAMF_MSMON_IDR - MPAM performance monitoring ID register */ +#define MPAMF_MSMON_IDR_MSMON_CSU BIT(16) +#define MPAMF_MSMON_IDR_MSMON_MBWU BIT(17) +#define MPAMF_MSMON_IDR_HAS_LOCAL_CAPT_EVNT BIT(31) + +/* MPAMF_CPOR_IDR - MPAM features cache portion partitioning ID register */ +#define MPAMF_CPOR_IDR_CPBM_WD GENMASK(15, 0) + +/* MPAMF_CCAP_IDR - MPAM features cache capacity partitioning ID register */ +#define MPAMF_CCAP_IDR_CMAX_WD GENMASK(5, 0) + +/* MPAMF_MBW_IDR - MPAM features memory bandwidth partitioning ID register */ +#define MPAMF_MBW_IDR_BWA_WD GENMASK(5, 0) +#define MPAMF_MBW_IDR_HAS_MIN BIT(10) +#define MPAMF_MBW_IDR_HAS_MAX BIT(11) +#define MPAMF_MBW_IDR_HAS_PBM BIT(12) +#define MPAMF_MBW_IDR_HAS_PROP BIT(13) +#define MPAMF_MBW_IDR_WINDWR BIT(14) +#define MPAMF_MBW_IDR_BWPBM_WD GENMASK(28, 16) + +/* MPAMF_PRI_IDR - MPAM features priority partitioning ID register */ +#define MPAMF_PRI_IDR_HAS_INTPRI BIT(0) +#define MPAMF_PRI_IDR_INTPRI_0_IS_LOW BIT(1) +#define MPAMF_PRI_IDR_INTPRI_WD GENMASK(9, 4) +#define MPAMF_PRI_IDR_HAS_DSPRI BIT(16) +#define MPAMF_PRI_IDR_DSPRI_0_IS_LOW BIT(17) +#define MPAMF_PRI_IDR_DSPRI_WD GENMASK(25, 20) + +/* MPAMF_CSUMON_IDR - MPAM cache storage usage monitor ID register */ +#define MPAMF_CSUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_CSUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ +#define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_MBWUMON_IDR_HAS_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_LWD BIT(29) +#define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) +#define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_PARTID_NRW_IDR - MPAM PARTID narrowing ID register */ +#define MPAMF_PARTID_NRW_IDR_INTPARTID_MAX GENMASK(15, 0) + +/* MPAMF_IIDR - MPAM implementation ID register */ +#define MPAMF_IIDR_PRODUCTID GENMASK(31, 20) +#define MPAMF_IIDR_PRODUCTID_SHIFT 20 +#define MPAMF_IIDR_VARIANT GENMASK(19, 16) +#define MPAMF_IIDR_VARIANT_SHIFT 16 +#define MPAMF_IIDR_REVISON GENMASK(15, 12) +#define MPAMF_IIDR_REVISON_SHIFT 12 +#define MPAMF_IIDR_IMPLEMENTER GENMASK(11, 0) +#define MPAMF_IIDR_IMPLEMENTER_SHIFT 0 + +/* MPAMF_AIDR - MPAM architecture ID register */ +#define MPAMF_AIDR_ARCH_MAJOR_REV GENMASK(7, 4) +#define MPAMF_AIDR_ARCH_MINOR_REV GENMASK(3, 0) + +/* MPAMCFG_PART_SEL - MPAM partition configuration selection register */ +#define MPAMCFG_PART_SEL_PARTID_SEL GENMASK(15, 0) +#define MPAMCFG_PART_SEL_INTERNAL BIT(16) +#define MPAMCFG_PART_SEL_RIS GENMASK(27, 24) + +/* MPAMCFG_CMAX - MPAM cache portion bitmap partition configuration register */ +#define MPAMCFG_CMAX_CMAX GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MIN - MPAM memory minimum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MIN_MIN GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MAX - MPAM memory maximum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MAX_MAX GENMASK(15, 0) +#define MPAMCFG_MBW_MAX_HARDLIM BIT(31) + +/* + * MPAMCFG_MBW_WINWD - MPAM memory bandwidth partitioning window width + * register + */ +#define MPAMCFG_MBW_WINWD_US_FRAC GENMASK(7, 0) +#define MPAMCFG_MBW_WINWD_US_INT GENMASK(23, 8) + + +/* MPAMCFG_PRI - MPAM priority partitioning configuration register */ +#define MPAMCFG_PRI_INTPRI GENMASK(15, 0) +#define MPAMCFG_PRI_DSPRI GENMASK(31, 16) + +/* + * MPAMCFG_MBW_PROP - Memory bandwidth proportional stride partitioning + * configuration register + */ +#define MPAMCFG_MBW_PROP_STRIDEM1 GENMASK(15, 0) +#define MPAMCFG_MBW_PROP_EN BIT(31) + +/* + * MPAMCFG_INTPARTID - MPAM internal partition narrowing configuration register + */ +#define MPAMCFG_INTPARTID_INTPARTID GENMASK(15, 0) +#define MPAMCFG_INTPARTID_INTERNAL BIT(16) + +/* MSMON_CFG_MON_SEL - Memory system performance monitor selection register */ +#define MSMON_CFG_MON_SEL_MON_SEL GENMASK(7, 0) +#define MSMON_CFG_MON_SEL_RIS GENMASK(27, 24) + +/* MPAMF_ESR - MPAM Error Status Register */ +#define MPAMF_ESR_PARTID_OR_MON GENMASK(15, 0) +#define MPAMF_ESR_PMG GENMASK(23, 16) +#define MPAMF_ESR_ERRCODE GENMASK(27, 24) +#define MPAMF_ESR_OVRWR BIT(31) +#define MPAMF_ESR_RIS GENMASK(35, 32) + +/* MPAMF_ECR - MPAM Error Control Register */ +#define MPAMF_ECR_INTEN BIT(0) + +/* Error conditions in accessing memory mapped registers */ +#define MPAM_ERRCODE_NONE 0 +#define MPAM_ERRCODE_PARTID_SEL_RANGE 1 +#define MPAM_ERRCODE_REQ_PARTID_RANGE 2 +#define MPAM_ERRCODE_MSMONCFG_ID_RANGE 3 +#define MPAM_ERRCODE_REQ_PMG_RANGE 4 +#define MPAM_ERRCODE_MONITOR_RANGE 5 +#define MPAM_ERRCODE_INTPARTID_RANGE 6 +#define MPAM_ERRCODE_UNEXPECTED_INTERNAL 7 + +/* + * MSMON_CFG_CSU_FLT - Memory system performance monitor configure cache storage + * usage monitor filter register + */ +#define MSMON_CFG_CSU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_CSU_FLT_PMG GENMASK(23, 16) + +/* + * MSMON_CFG_CSU_CTL - Memory system performance monitor configure cache storage + * usage monitor control register + * MSMON_CFG_MBWU_CTL - Memory system performance monitor configure memory + * bandwidth usage monitor control register + */ +#define MSMON_CFG_x_CTL_TYPE GENMASK(7, 0) +#define MSMON_CFG_x_CTL_MATCH_PARTID BIT(16) +#define MSMON_CFG_x_CTL_MATCH_PMG BIT(17) +#define MSMON_CFG_x_CTL_SCLEN BIT(19) +#define MSMON_CFG_x_CTL_SUBTYPE GENMASK(23, 20) +#define MSMON_CFG_x_CTL_OFLOW_FRZ BIT(24) +#define MSMON_CFG_x_CTL_OFLOW_INTR BIT(25) +#define MSMON_CFG_x_CTL_OFLOW_STATUS BIT(26) +#define MSMON_CFG_x_CTL_CAPT_RESET BIT(27) +#define MSMON_CFG_x_CTL_CAPT_EVNT GENMASK(30, 28) +#define MSMON_CFG_x_CTL_EN BIT(31) + +#define MSMON_CFG_MBWU_CTL_TYPE_MBWU 0x42 +#define MSMON_CFG_MBWU_CTL_TYPE_CSU 0x43 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_NONE 0 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_READ 1 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_WRITE 2 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_BOTH 3 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MAX 3 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MASK 0x3 + +/* + * MSMON_CFG_MBWU_FLT - Memory system performance monitor configure memory + * bandwidth usage monitor filter register + */ +#define MSMON_CFG_MBWU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_MBWU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_MBWU_FLT_RWBW GENMASK(31, 30) + +/* + * MSMON_CSU - Memory system performance monitor cache storage usage monitor + * register + * MSMON_CSU_CAPTURE - Memory system performance monitor cache storage usage + * capture register + * MSMON_MBWU - Memory system performance monitor memory bandwidth usage + * monitor register + * MSMON_MBWU_CAPTURE - Memory system performance monitor memory bandwidth usage + * capture register + */ +#define MSMON___VALUE GENMASK(30, 0) +#define MSMON___NRDY BIT(31) +#define MSMON___NRDY_L BIT(63) +#define MSMON___L_VALUE GENMASK(43, 0) +#define MSMON___LWD_VALUE GENMASK(62, 0) + +/* + * MSMON_CAPT_EVNT - Memory system performance monitoring capture event + * generation register + */ +#define MSMON_CAPT_EVNT_NOW BIT(0) + +/* Used for PTG Yitian710 specific MB monitoring feature */ +#define MBWU_MASK GENMASK(23, 0) +#define MBWU_WINWD_MAX GENMASK(22, 0) +#define MBWU_GET(v) ((v) & MBWU_MASK) +#define MPAMF_CUST_MBWC_OFFSET 0x08 +#define MPAMF_CUST_WINDW_OFFSET 0x0C + +#endif /* MPAM_INTERNAL_H */ diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..98b3b1baa91e1319289e6a2050e08519f3d84e1b --- /dev/null +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -0,0 +1,1267 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#define pr_fmt(fmt) "mpam: resctrl: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mpam_internal.h" + +u64 mpam_resctrl_default_group; + +DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); + +/* + * The classes we've picked to map to resctrl resources. + * Class pointer may be NULL. + */ +static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; + +static bool exposed_alloc_capable; +static bool exposed_mon_capable; +static struct mpam_class *mbm_local_class; +static struct mpam_class *mbm_total_class; +static struct mpam_class *mbm_bps_class; + +/* + * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. + * This applies globally to all traffic the CPU generates. + */ +static bool cdp_enabled; + +/* + * If resctrl_init() succeeded, resctrl_exit() can be used to remove support + * for the filesystem in the event of an error. + */ +static bool resctrl_enabled; + +/* + * mpam_resctrl_pick_caches() needs to know the size of the caches. cacheinfo + * populates this from a device_initcall(). mpam_resctrl_setup() must wait. + */ +static bool cacheinfo_ready; +static DECLARE_WAIT_QUEUE_HEAD(wait_cacheinfo_ready); + +/* A dummy mon context to use when the monitors were allocated up front */ +u32 __mon_is_rmid_idx = USE_RMID_IDX; +void *mon_is_rmid_idx = &__mon_is_rmid_idx; + +bool resctrl_arch_alloc_capable(void) +{ + return exposed_alloc_capable; +} + +bool resctrl_arch_mon_capable(void) +{ + return exposed_mon_capable; +} + +bool resctrl_arch_is_mbm_local_enabled(void) +{ + return mbm_local_class; +} + +bool resctrl_arch_is_mbm_total_enabled(void) +{ + return mbm_total_class; +} + +bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return mbm_bps_class; +} + +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) +{ + switch (rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + return cdp_enabled; + case RDT_RESOURCE_MBA: + default: + /* + * x86's MBA control doesn't support CDP, so user-space doesn't + * expect it. + */ + return false; + } +} + +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) +{ + u64 regval; + u32 partid, partid_i, partid_d; + + cdp_enabled = enable; + + partid = RESCTRL_RESERVED_CLOSID; + + if (enable) { + partid_d = resctrl_get_config_index(partid, CDP_CODE); + partid_i = resctrl_get_config_index(partid, CDP_DATA); + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + + } else { + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid); + } + + WRITE_ONCE(mpam_resctrl_default_group, regval); + + return 0; +} + +static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) +{ + return cdp_enabled && !resctrl_arch_get_cdp_enabled(rid); +} + +/* + * MSC may raise an error interrupt if it sees an out or range partid/pmg, + * and go on to truncate the value. Regardless of what the hardware supports, + * only the system wide safe value is safe to use. + */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) +{ + return mpam_partid_max + 1; +} + +u32 resctrl_arch_system_num_rmid_idx(void) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 num_partid = resctrl_arch_get_num_closid(NULL); + + return num_partid << closid_shift; +} + +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + return (closid << closid_shift) | rmid; +} + +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 pmg_mask = ~(~0 << closid_shift); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + *closid = idx >> closid_shift; + *rmid = idx & pmg_mask; +} + +void resctrl_arch_sched_in(struct task_struct *tsk) +{ + lockdep_assert_preemption_disabled(); + + mpam_thread_switch(tsk); +} + +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg) +{ + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(pmg > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_cpu_defaults(cpu, closid, closid, pmg, pmg); + } else { + /* + * When CDP is enabled, resctrl halves the closid range and we + * use odd/even partid for one closid. + */ + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_cpu_defaults(cpu, partid_d, partid_i, pmg, pmg); + } +} + +void resctrl_arch_sync_cpu_defaults(void *info) +{ + struct resctrl_cpu_sync *r = info; + + lockdep_assert_preemption_disabled(); + + if (r) { + resctrl_arch_set_cpu_default_closid_rmid(smp_processor_id(), + r->closid, r->rmid); + } + + resctrl_arch_sched_in(current); +} + +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + + + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(rmid > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_task_partid_pmg(tsk, closid, closid, rmid, rmid); + } else { + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_task_partid_pmg(tsk, partid_d, partid_i, rmid, rmid); + } +} + +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return tsk_closid == closid; +} + +/* The task's pmg is not unique, the partid must be considered too */ +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + u32 tsk_rmid = FIELD_GET(MPAM_SYSREG_PMG_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return (tsk_closid == closid) && (tsk_rmid == rmid); +} + +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &mpam_resctrl_exports[l].resctrl_res; +} + +static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, + int evtid) +{ + struct mpam_resctrl_res *res; + u32 *ret = kmalloc(sizeof(*ret), GFP_KERNEL); + + if (!ret) + return ERR_PTR(-ENOMEM); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + *ret = mpam_alloc_csu_mon(res->class); + return ret; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + return mon_is_rmid_idx; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + return mon_is_rmid_idx; + return ERR_PTR(-EOPNOTSUPP); + } + + return ERR_PTR(-EOPNOTSUPP); +} + +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + DEFINE_WAIT(wait); + void *ret; + + might_sleep(); + + do { + prepare_to_wait(&resctrl_mon_ctx_waiters, &wait, + TASK_INTERRUPTIBLE); + ret = resctrl_arch_mon_ctx_alloc_no_wait(r, evtid); + if (PTR_ERR(ret) == -ENOSPC) + schedule(); + } while (PTR_ERR(ret) == -ENOSPC && !signal_pending(current)); + finish_wait(&resctrl_mon_ctx_waiters, &wait); + + return ret; +} + +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *arch_mon_ctx) +{ + struct mpam_resctrl_res *res; + u32 mon = *(u32 *)arch_mon_ctx; + + if (mon == USE_RMID_IDX) + return; + kfree(arch_mon_ctx); + arch_mon_ctx = NULL; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + mpam_free_csu_mon(res->class, mon); + wake_up(&resctrl_mon_ctx_waiters); + return; + case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_MC_MBM_BPS_EVENT_ID: + return; + } +} + +static enum mon_filter_options resctrl_evt_config_to_mpam(u32 local_evt_cfg) +{ + switch (local_evt_cfg) { + case READS_TO_LOCAL_MEM: + return COUNT_READ; + case NON_TEMP_WRITE_TO_LOCAL_MEM: + return COUNT_WRITE; + default: + return COUNT_BOTH; + } +} + +int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx) +{ + int err; + u64 cdp_val; + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + u32 mon = *(u32 *)arch_mon_ctx; + enum mpam_device_features type; + + resctrl_arch_rmid_read_context_check(); + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + switch (eventid) { + case QOS_L3_OCCUP_EVENT_ID: + type = mpam_feat_msmon_csu; + break; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + type = mpam_feat_msmon_mbwu; + break; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + type = mpam_feat_impl_msmon_mbwu; + break; + default: + return -EINVAL; + } + + if (mon == USE_RMID_IDX) + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + else + cfg.mon = mon; + + cfg.match_pmg = true; + cfg.pmg = rmid; + cfg.opts = resctrl_evt_config_to_mpam(dom->mbm_local_evt_cfg); + + if (cdp_enabled) { + cfg.partid = closid << 1; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + if (err) + return err; + + cfg.partid += 1; + err = mpam_msmon_read(dom->comp, &cfg, type, &cdp_val); + if (!err) + *val += cdp_val; + } else { + cfg.partid = closid; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + } + + return err; +} + +void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid) +{ + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + + if (eventid != QOS_L3_MBM_LOCAL_EVENT_ID) + return; + + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + cfg.match_pmg = true; + cfg.pmg = rmid; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cdp_enabled) { + cfg.partid = closid << 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + + cfg.partid += 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } else { + cfg.partid = closid; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } +} + +/* + * The rmid realloc threshold should be for the smallest cache exposed to + * resctrl. + */ +static void update_rmid_limits(unsigned int size) +{ + u32 num_unique_pmg = resctrl_arch_system_num_rmid_idx(); + + if (WARN_ON_ONCE(!size)) + return; + + if (resctrl_rmid_realloc_limit && size > resctrl_rmid_realloc_limit) + return; + + resctrl_rmid_realloc_limit = size; + resctrl_rmid_realloc_threshold = size / num_unique_pmg; +} + +static bool cache_has_usable_cpor(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_cpor_part, cprops)) + return false; + + /* TODO: Scaling is not yet supported */ + return (class->props.cpbm_wd <= RESCTRL_MAX_CBM); +} + +static bool cache_has_usable_csu(struct mpam_class *class) +{ + struct mpam_props *cprops; + + if (!class) + return false; + + cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return false; + + /* + * CSU counters settle on the value, so we can get away with + * having only one. + */ + if (!cprops->num_csu_mon) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); +} + +static bool class_has_usable_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return false; + + /* + * resctrl expects the bandwidth counters to be free running, + * which means we need as many monitors as resctrl has + * control/monitor groups. + */ + if (cprops->num_mbwu_mon < resctrl_arch_system_num_rmid_idx()) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +static bool class_has_usable_impl_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_impl_msmon_mbwu, cprops)) + return false; + + return true; +} + +static bool mba_class_use_mbw_part(struct mpam_props *cprops) +{ + /* TODO: Scaling is not yet supported */ + return (mpam_has_feature(mpam_feat_mbw_part, cprops) && + cprops->mbw_pbm_bits < MAX_MBA_BW); +} + +static bool class_has_usable_mba(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops) || + mpam_has_feature(mpam_feat_mbw_max, cprops)) + return true; + + return false; +} + +/* + * Calculate the percentage change from each implemented bit in the control + * This can return 0 when BWA_WD is greater than 6. (100 / (1<<7) == 0) + */ +static u32 get_mba_granularity(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops)) { + return MAX_MBA_BW / cprops->mbw_pbm_bits; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + /* + * bwa_wd is the number of bits implemented in the 0.xxx + * fixed point fraction. 1 bit is 50%, 2 is 25% etc. + */ + return max_t(u32, 1, (MAX_MBA_BW / BIT(cprops->bwa_wd))); + } + + return 0; +} + +static u32 mbw_pbm_to_percent(unsigned long mbw_pbm, struct mpam_props *cprops) +{ + u32 bit, result = 0, granularity = get_mba_granularity(cprops); + + for_each_set_bit(bit, &mbw_pbm, cprops->mbw_pbm_bits % 32) { + result += granularity; + } + + return result; +} + +static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + for (bit = 15; bit; bit--) { + if (mbw_max & BIT(bit)) + /* + * Left shift by 16 bits to preserve the precision of + * the division operation. + */ + value += (MAX_MBA_BW << 16) / divisor; + divisor <<= 1; + } + + /* Use the upper bound of the fixed-point fraction. */ + value = (value + (MAX_MBA_BW << (16 - cprops->bwa_wd))) >> 16; + + return value; +} + +static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) +{ + u32 granularity = get_mba_granularity(cprops); + u8 num_bits = pc / granularity; + + if (!num_bits) + return 0; + + /* TODO: pick bits at random to avoid contention */ + return (1 << num_bits) - 1; +} + +static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) +{ + u8 bit; + u32 granularity, pc_ls, divisor = 2, value = 0; + + if (WARN_ON_ONCE(cprops->bwa_wd > 15)) + return MAX_MBA_BW; + + /* Set the pc value to be a multiple of granularity. */ + granularity = get_mba_granularity(cprops); + pc = roundup(pc, (u8) granularity); + if (pc > 100) + pc = 100; + + /* + * Left shift by 16 bits to preserve the precision of the division + * operation. + */ + pc_ls = (u32) pc << 16; + + for (bit = 15; bit; bit--) { + if (pc_ls >= (MAX_MBA_BW << 16) / divisor) { + pc_ls -= (MAX_MBA_BW << 16) / divisor; + value |= BIT(bit); + } + divisor <<= 1; + + if (!pc_ls || !((MAX_MBA_BW << 16) / divisor)) + break; + } + + value &= GENMASK(15, 15 - cprops->bwa_wd + 1); + + return value; +} + +/* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ +static void mpam_resctrl_pick_caches(void) +{ + int idx; + unsigned int cache_size; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + bool has_cpor = cache_has_usable_cpor(class); + + if (class->type != MPAM_CLASS_CACHE) { + pr_debug("pick_caches: Class is not a cache\n"); + continue; + } + + if (class->level != 2 && class->level != 3) { + pr_debug("pick_caches: not L2 or L3\n"); + continue; + } + + if (class->level == 2 && !has_cpor) { + pr_debug("pick_caches: L2 missing CPOR\n"); + continue; + } else if (!has_cpor && !cache_has_usable_csu(class)) { + pr_debug("pick_caches: Cache misses CPOR and CSU\n"); + continue; + } + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) { + pr_debug("pick_caches: Class has missing CPUs\n"); + continue; + } + + /* Assume cache levels are the same size for all CPUs... */ + cache_size = get_cpu_cacheinfo_size(smp_processor_id(), class->level); + if (!cache_size) { + pr_debug("pick_caches: Could not read cache size\n"); + continue; + } + + if (mpam_has_feature(mpam_feat_msmon_csu, cprops)) + update_rmid_limits(cache_size); + + if (class->level == 2) { + res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; + res->resctrl_res.name = "L2"; + } else { + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + res->resctrl_res.name = "L3"; + } + res->class = class; + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_resctrl_pick_mba(void) +{ + struct mpam_class *class, *candidate_class = NULL; + struct mpam_resctrl_res *res; + int idx; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + + if (class->level < 3) + continue; + + if (!class_has_usable_mba(cprops)) + continue; + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) + continue; + + /* + * mba_sc reads the mbm_local counter, and waggles the MBA controls. + * mbm_local is implicitly part of the L3, pick a resouce to be MBA + * that as close as possible to the L3. + */ + if (!candidate_class || class->level < candidate_class->level) + candidate_class = class; + } + srcu_read_unlock(&mpam_srcu, idx); + + if (candidate_class) { + res = &mpam_resctrl_exports[RDT_RESOURCE_MBA]; + res->class = candidate_class; + res->resctrl_res.name = "MB"; + } +} + +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + struct mpam_props *cprops; + + switch (evt) { + case QOS_L3_MBM_LOCAL_EVENT_ID: + if (!mbm_local_class) + return false; + cprops = &mbm_local_class->props; + + return mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, cprops); + default: + return false; + } +} + +void resctrl_arch_mon_event_config_read(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + mon_info->mon_config = dom->mbm_local_evt_cfg & MAX_EVT_CONFIG_BITS; +} + +void resctrl_arch_mon_event_config_write(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + if (mon_info->mon_config & ~MPAM_RESTRL_EVT_CONFIG_VALID) { + mon_info->err = -EOPNOTSUPP; + return; + } + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = mon_info->mon_config & MPAM_RESTRL_EVT_CONFIG_VALID; +} + +void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) +{ + struct mpam_resctrl_dom *dom; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + mpam_msmon_reset_all_mbwu(dom->comp); +} + +static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) +{ + struct mpam_class *class = res->class; + struct rdt_resource *r = &res->resctrl_res; + bool has_mbwu = class_has_usable_mbwu(class); + + /* Is this one of the two well-known caches? */ + if (res->resctrl_res.rid == RDT_RESOURCE_L2 || + res->resctrl_res.rid == RDT_RESOURCE_L3) { + bool has_csu = cache_has_usable_csu(class); + + r->cache_level = class->level; + + /* TODO: Scaling is not yet supported */ + r->cache.cbm_len = class->props.cpbm_wd; + r->cache.arch_has_sparse_bitmasks = true; + + /* mpam_devices will reject empty bitmaps */ + r->cache.min_cbm_bits = 1; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*x"; + r->fflags = RFTYPE_RES_CACHE; + r->default_ctrl = BIT_MASK(class->props.cpbm_wd) - 1; + r->data_width = (class->props.cpbm_wd + 3) / 4; + + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = r->default_ctrl; + + if (mpam_has_feature(mpam_feat_cpor_part, &class->props)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + /* + * MBWU counters may be 'local' or 'total' depending on where + * they are in the topology. Counters on caches are assumed to + * be local. If it's on the memory controller, its assumed to + * be global. + */ + if (has_mbwu && class->level >= 3) { + mbm_local_class = class; + r->mon_capable = true; + } + + /* + * CSU counters only make sense on a cache. The file is called + * llc_occupancy, but its expected to the on the L3. + */ + if (has_csu && class->type == MPAM_CLASS_CACHE && + class->level == 3) { + r->mon_capable = true; + } + } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { + struct mpam_props *cprops = &class->props; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%*u"; + r->fflags = RFTYPE_RES_MB; + r->default_ctrl = MAX_MBA_BW; + r->data_width = 3; + + r->membw.delay_linear = true; + r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; + r->membw.bw_gran = get_mba_granularity(cprops); + r->membw.min_bw = r->membw.bw_gran; + + /* Round up to at least 1% */ + if (!r->membw.bw_gran) + r->membw.bw_gran = 1; + + if (class_has_usable_mba(cprops)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { + mbm_total_class = class; + r->mon_capable = true; + } else if (class_has_usable_impl_mbwu(class)) { + r->mon_capable = true; + if (mpam_current_machine == MPAM_YITIAN710) + mbm_bps_class = class; + } + } + + if (r->mon_capable) { + exposed_mon_capable = true; + + /* + * Unfortunately, num_rmid doesn't mean anything for + * mpam, and its exposed to user-space! + * num-rmid is supposed to mean the number of groups + * that can be created, both control or monitor groups. + * For mpam, each control group has its own pmg/rmid + * space. + */ + r->num_rmid = 1; + } + + return 0; +} + +int mpam_resctrl_setup(void) +{ + int err = 0; + struct mpam_resctrl_res *res; + enum resctrl_res_level i; + + wait_event(wait_cacheinfo_ready, cacheinfo_ready); + + cpus_read_lock(); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + INIT_LIST_HEAD(&res->resctrl_res.domains); + INIT_LIST_HEAD(&res->resctrl_res.evt_list); + res->resctrl_res.rid = i; + } + + mpam_resctrl_pick_caches(); + mpam_resctrl_pick_mba(); + /* TODO: mpam_resctrl_pick_counters(); */ + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + if (!res->class) + continue; // dummy resource + + err = mpam_resctrl_resource_init(res); + if (err) + break; + } + cpus_read_unlock(); + + if (!err && !exposed_alloc_capable && !exposed_mon_capable) + err = -EOPNOTSUPP; + + if (!err) { + if (!is_power_of_2(mpam_pmg_max + 1)) { + /* + * If not all the partid*pmg values are valid indexes, + * resctrl may allocate pmg that don't exist. This + * should cause an error interrupt. + */ + pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); + } + + err = resctrl_init(); + if (!err) + WRITE_ONCE(resctrl_enabled, true); + } + + return err; +} + +void mpam_resctrl_exit(void) +{ + if (!READ_ONCE(resctrl_enabled)) + return; + + WRITE_ONCE(resctrl_enabled, false); + resctrl_exit(); +} + +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type) +{ + u32 partid; + struct mpam_config *cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + enum mpam_device_features configured_by; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return r->default_ctrl; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + if (mpam_resctrl_hide_cdp(r->rid)) + partid = resctrl_get_config_index(closid, CDP_CODE); + else + partid = resctrl_get_config_index(closid, type); + cfg = &dom->comp->cfg[partid]; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + configured_by = mpam_feat_cpor_part; + break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + configured_by = mpam_feat_mbw_part; + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + configured_by = mpam_feat_mbw_max; + break; + } + fallthrough; + default: + return -EINVAL; + } + + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r) || + !mpam_has_feature(configured_by, cfg)) + return r->default_ctrl; + + switch (configured_by) { + case mpam_feat_cpor_part: + /* TODO: Scaling is not yet supported */ + return cfg->cpbm; + case mpam_feat_mbw_part: + /* TODO: Scaling is not yet supported */ + return mbw_pbm_to_percent(cfg->mbw_pbm, cprops); + case mpam_feat_mbw_max: + return mbw_max_to_percent(cfg->mbw_max, cprops); + default: + return -EINVAL; + } +} + +int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type t, u32 cfg_val) +{ + int err; + u32 partid; + struct mpam_config cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + /* + * NOTE: don't check the CPU as mpam_apply_config() doesn't care, + * and resctrl_arch_update_domains() depends on this. + */ + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, t); + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r)) + return -EINVAL; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + /* TODO: Scaling is not yet supported */ + cfg.cpbm = cfg_val; + mpam_set_feature(mpam_feat_cpor_part, &cfg); + break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + cfg.mbw_pbm = percent_to_mbw_pbm(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_part, &cfg); + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + cfg.mbw_max = percent_to_mbw_max(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_max, &cfg); + break; + } + fallthrough; + default: + return -EINVAL; + } + + /* + * When CDP is enabled, but the resource doesn't support it, we need to + * apply the same configuration to the other partid. + */ + if (mpam_resctrl_hide_cdp(r->rid)) { + partid = resctrl_get_config_index(closid, CDP_CODE); + err = mpam_apply_config(dom->comp, partid, &cfg); + if (err) + return err; + + partid = resctrl_get_config_index(closid, CDP_DATA); + return mpam_apply_config(dom->comp, partid, &cfg); + + } else { + return mpam_apply_config(dom->comp, partid, &cfg); + } +} + +/* TODO: this is IPI heavy */ +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) +{ + int err = 0; + struct rdt_domain *d; + enum resctrl_conf_type t; + struct resctrl_staged_config *cfg; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + list_for_each_entry(d, &r->domains, list) { + for (t = 0; t < CDP_NUM_TYPES; t++) { + cfg = &d->staged_config[t]; + if (!cfg->have_new_ctrl) + continue; + + err = resctrl_arch_update_one(r, d, closid, t, + cfg->new_ctrl); + if (err) + return err; + } + } + + return err; +} + +void resctrl_arch_reset_resources(void) +{ + int i, idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + if (!res->resctrl_res.alloc_capable) + continue; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); + } +} + +static struct mpam_resctrl_dom * +mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) +{ + struct mpam_resctrl_dom *dom; + struct mpam_class *class = res->class; + struct mpam_component *comp_iter, *comp; + + comp = NULL; + list_for_each_entry(comp_iter, &class->components, class_list) { + if (cpumask_test_cpu(cpu, &comp_iter->affinity)) { + comp = comp_iter; + break; + } + } + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!comp)) + return ERR_PTR(-EINVAL); + + dom = kzalloc_node(sizeof(*dom), GFP_KERNEL, cpu_to_node(cpu)); + if (!dom) + return ERR_PTR(-ENOMEM); + + dom->comp = comp; + INIT_LIST_HEAD(&dom->resctrl_dom.list); + dom->resctrl_dom.id = comp->comp_id; + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + + /* TODO: this list should be sorted */ + list_add_tail(&dom->resctrl_dom.list, &res->resctrl_res.domains); + + return dom; +} + +/* Like resctrl_get_domain_from_cpu(), but for offline CPUs */ +static struct mpam_resctrl_dom * +mpam_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cpumask_test_cpu(cpu, &dom->comp->affinity)) + return dom; + } + + return NULL; +} + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &r->domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + if (dom->comp->comp_id == id) + return &dom->resctrl_dom; + } + + return NULL; +} + +int mpam_resctrl_online_cpu(unsigned int cpu) +{ + int i, err; + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy_resource; + + dom = mpam_get_domain_from_cpu(cpu, res); + if (dom) { + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + continue; + } + + dom = mpam_resctrl_alloc_domain(cpu, res); + if (IS_ERR(dom)) + return PTR_ERR(dom); + err = resctrl_online_domain(&res->resctrl_res, &dom->resctrl_dom); + if (err) + return err; + } + + resctrl_online_cpu(cpu); + return 0; +} + +int mpam_resctrl_offline_cpu(unsigned int cpu) +{ + int i; + struct rdt_domain *d; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + resctrl_offline_cpu(cpu); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + d = resctrl_get_domain_from_cpu(cpu, &res->resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /* The last one standing was ahead of us... */ + if (WARN_ON_ONCE(!d)) + continue; + + cpumask_clear_cpu(cpu, &d->cpu_mask); + + if (!cpumask_empty(&d->cpu_mask)) + continue; + + resctrl_offline_domain(&res->resctrl_res, &dom->resctrl_dom); + list_del(&d->list); + kfree(dom); + } + + return 0; +} + +static int __init __cacheinfo_ready(void) +{ + cacheinfo_ready = true; + wake_up(&wait_cacheinfo_ready); + + return 0; +} +device_initcall_sync(__cacheinfo_ready); diff --git a/drivers/platform/sw64/Makefile b/drivers/platform/sw64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..28922224fb1763030b979bb8cd2ce204d41dfd80 --- /dev/null +++ b/drivers/platform/sw64/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += legacy_xuelang.o diff --git a/drivers/platform/sw64/legacy_xuelang.c b/drivers/platform/sw64/legacy_xuelang.c new file mode 100644 index 0000000000000000000000000000000000000000..8a63d9edf9f230a6137c28e641c966aa5b72d16b --- /dev/null +++ b/drivers/platform/sw64/legacy_xuelang.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include +#include + +static void vt_mode_kill_arch(int mode) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + hcall(HCALL_RESTART, 0, 0, 0); + mb(); + break; + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + hcall(HCALL_SHUTDOWN, 0, 0, 0); + mb(); + break; + default: + break; + } +} + +void sw64_halt(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_HALT); +} + +void sw64_poweroff(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_POWER_OFF); +} + +void sw64_restart(void) +{ + if (is_in_host()) { + fix_jm585_reset(); + cpld_write(0x64, 0x00, 0xc3); + } else + vt_mode_kill_arch(LINUX_REBOOT_CMD_RESTART); +} + +static int sw64_reset_init(void) +{ +#ifdef CONFIG_EFI + if (BIOS_SUPPORT_RESET_CLALLBACK((void *)bios_version)) + return 0; +#endif + pm_restart = sw64_restart; + pm_power_off = sw64_poweroff; + pm_halt = sw64_halt; + return 0; +} +subsys_initcall(sw64_reset_init); diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 4ff2aa4b484bc5d6b0e60a65272c09ca2fa2c27d..4f571b79f0289d2602e82d0e277c0f0082e327ac 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -11,13 +11,16 @@ #include "ifs.h" -#define X86_MATCH(model) \ +#define X86_MATCH(model, array_gen) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) + INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen) static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { - X86_MATCH(SAPPHIRERAPIDS_X), - X86_MATCH(EMERALDRAPIDS_X), + X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0), + X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0), + X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); @@ -30,6 +33,7 @@ bool *ifs_pkg_auth; static const struct ifs_test_caps scan_test = { .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, .test_num = IFS_TYPE_SAF, + .image_suffix = "scan", }; static const struct ifs_test_caps array_test = { @@ -37,9 +41,32 @@ static const struct ifs_test_caps array_test = { .test_num = IFS_TYPE_ARRAY_BIST, }; +static const struct ifs_test_msrs scan_msrs = { + .copy_hashes = MSR_COPY_SCAN_HASHES, + .copy_hashes_status = MSR_SCAN_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK, + .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SAF_CTRL, +}; + +static const struct ifs_test_msrs sbaf_msrs = { + .copy_hashes = MSR_COPY_SBAF_HASHES, + .copy_hashes_status = MSR_SBAF_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK, + .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SBAF_CTRL, +}; + +static const struct ifs_test_caps sbaf_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT, + .test_num = IFS_TYPE_SBAF, + .image_suffix = "sbft", +}; + static struct ifs_device ifs_devices[] = { [IFS_TYPE_SAF] = { .test_caps = &scan_test, + .test_msrs = &scan_msrs, .misc = { .name = "intel_ifs_0", .minor = MISC_DYNAMIC_MINOR, @@ -54,6 +81,15 @@ static struct ifs_device ifs_devices[] = { .groups = plat_ifs_array_groups, }, }, + [IFS_TYPE_SBAF] = { + .test_caps = &sbaf_test, + .test_msrs = &sbaf_msrs, + .misc = { + .name = "intel_ifs_2", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, + }, }; #define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) @@ -97,6 +133,7 @@ static int __init ifs_init(void) continue; ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, msrval); + ifs_devices[i].rw_data.array_gen = (u32)m->driver_data; ret = misc_register(&ifs_devices[i].misc); if (ret) goto err_exit; diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 6bc63ab705175251586312f6c595e8bb1d83f0df..5c3c0dfa1bf833031d7165fa6c0ec08f33213b95 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -126,23 +126,61 @@ * The driver does not make use of this, it only tests one core at a time. * * .. [#f1] https://github.com/intel/TBD + * + * + * Structural Based Functional Test at Field (SBAF): + * ------------------------------------------------- + * + * SBAF is a new type of testing that provides comprehensive core test + * coverage complementing Scan at Field (SAF) testing. SBAF mimics the + * manufacturing screening environment and leverages the same test suite. + * It makes use of Design For Test (DFT) observation sites and features + * to maximize coverage in minimum time. + * + * Similar to the SAF test, SBAF isolates the core under test from the + * rest of the system during execution. Upon completion, the core + * seamlessly resets to its pre-test state and resumes normal operation. + * Any machine checks or hangs encountered during the test are confined to + * the isolated core, preventing disruption to the overall system. + * + * Like the SAF test, the SBAF test is also divided into multiple batches, + * and each batch test can take hundreds of milliseconds (100-200 ms) to + * complete. If such a lengthy interruption is undesirable, it is + * recommended to relocate the time-sensitive applications to other cores. */ #include #include #define MSR_ARRAY_BIST 0x00000105 + +#define MSR_COPY_SBAF_HASHES 0x000002b8 +#define MSR_SBAF_HASHES_STATUS 0x000002b9 +#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba +#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb +#define MSR_ACTIVATE_SBAF 0x000002bc +#define MSR_SBAF_STATUS 0x000002bd + #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_ARRAY_TRIGGER 0x000002d6 +#define MSR_ARRAY_STATUS 0x000002d7 +#define MSR_SAF_CTRL 0x000004f0 +#define MSR_SBAF_CTRL 0x000004f8 + #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define IFS_TYPE_SBAF 2 + +#define ARRAY_GEN0 0 +#define ARRAY_GEN1 1 /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { @@ -158,6 +196,19 @@ union ifs_scan_hashes_status { }; }; +union ifs_scan_hashes_status_gen2 { + u64 data; + struct { + u16 chunk_size; + u16 num_chunks; + u32 error_code :8; + u32 chunks_in_stride :9; + u32 rsvd :2; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + /* MSR_CHUNKS_AUTH_STATUS bit fields */ union ifs_chunks_auth_status { u64 data; @@ -170,6 +221,17 @@ union ifs_chunks_auth_status { }; }; +union ifs_chunks_auth_status_gen2 { + u64 data; + struct { + u16 valid_chunks; + u16 total_chunks; + u32 error_code :8; + u32 rsvd2 :8; + u32 max_bundle :16; + }; +}; + /* MSR_ACTIVATE_SCAN bit fields */ union ifs_scan { u64 data; @@ -223,6 +285,34 @@ union ifs_array { }; }; +/* MSR_ACTIVATE_SBAF bit fields */ +union ifs_sbaf { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SBAF_STATUS bit fields */ +union ifs_sbaf_status { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 error_code :8; + u32 rsvd3 :21; + u32 test_fail :1; + u32 sbaf_status :2; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. @@ -231,9 +321,28 @@ union ifs_array { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +#define IFS_SUFFIX_SZ 5 + struct ifs_test_caps { int integrity_cap_bit; int test_num; + char image_suffix[IFS_SUFFIX_SZ]; +}; + +/** + * struct ifs_test_msrs - MSRs used in IFS tests + * @copy_hashes: Copy test hash data + * @copy_hashes_status: Status of copied test hash data + * @copy_chunks: Copy chunks of the test data + * @copy_chunks_status: Status of the copied test data chunks + * @test_ctrl: Control the test attributes + */ +struct ifs_test_msrs { + u32 copy_hashes; + u32 copy_hashes_status; + u32 copy_chunks; + u32 copy_chunks_status; + u32 test_ctrl; }; /** @@ -246,6 +355,9 @@ struct ifs_test_caps { * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file * @generation: IFS test generation enumerated by hardware + * @chunk_size: size of a test chunk + * @array_gen: test generation of array test + * @max_bundle: maximum bundle index */ struct ifs_data { int loaded_version; @@ -256,6 +368,9 @@ struct ifs_data { u64 scan_details; u32 cur_batch; u32 generation; + u32 chunk_size; + u32 array_gen; + u32 max_bundle; }; struct ifs_work { @@ -265,6 +380,7 @@ struct ifs_work { struct ifs_device { const struct ifs_test_caps *test_caps; + const struct ifs_test_msrs *test_msrs; struct ifs_data rw_data; struct miscdevice misc; }; @@ -285,6 +401,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) return d->test_caps; } +static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_msrs; +} + extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 53d957d4eea4d119da81f1c081c803028904a178..de54bd1a5970bf7c078c4a2d55558c5c7887dd0e 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -2,6 +2,7 @@ /* Copyright(c) 2022 Intel Corporation. */ #include +#include #include #include @@ -26,6 +27,11 @@ union meta_data { #define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define META_TYPE_IFS 1 +#define INVALIDATE_STRIDE 0x1UL +#define IFS_GEN_STRIDE_AWARE 2 +#define AUTH_INTERRUPTED_ERROR 5 +#define IFS_AUTH_RETRY_CT 10 + static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */ static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ @@ -44,7 +50,10 @@ static const char * const scan_hash_status[] = { static const char * const scan_authentication_status[] = { [0] = "No error reported", [1] = "Attempt to authenticate a chunk which is already marked as authentic", - [2] = "Chunk authentication error. The hash of chunk did not match expected value" + [2] = "Chunk authentication error. The hash of chunk did not match expected value", + [3] = "Reserved", + [4] = "Chunk outside the current stride", + [5] = "Authentication flow interrupted", }; #define MC_HEADER_META_TYPE_END (0) @@ -80,6 +89,23 @@ static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_typ return NULL; } +static void hashcopy_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_hash_status)) + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + else + dev_err(dev, "Hash copy error : %s\n", scan_hash_status[err_code]); +} + +static void auth_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_authentication_status)) + dev_err(dev, "invalid error code 0x%x for authentication\n", err_code); + else + dev_err(dev, "Chunk authentication error : %s\n", + scan_authentication_status[err_code]); +} + /* * To copy scan hashes and authenticate test chunks, the initiating cpu must point * to the EDX:EAX to the test image in linear address. @@ -92,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) union ifs_scan_hashes_status hashes_status; union ifs_chunks_auth_status chunk_status; struct device *dev = local_work->dev; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; struct ifs_data *ifsd; u64 linear_addr, base; u32 err_code; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -109,11 +137,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (!hashes_status.valid) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_hash_status)) { - dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); - goto done; - } - dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + hashcopy_err_message(dev, err_code); goto done; } @@ -125,21 +149,15 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->copy_chunks, linear_addr); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; if (err_code) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_authentication_status)) { - dev_err(dev, - "invalid error code 0x%x for authentication\n", err_code); - goto done; - } - dev_err(dev, "Chunk authentication error %s\n", - scan_authentication_status[err_code]); + auth_err_message(dev, err_code); goto done; } } @@ -147,16 +165,118 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) complete(&ifs_done); } +static int get_num_chunks(int gen, union ifs_scan_hashes_status_gen2 status) +{ + return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; +} + +static bool need_copy_scan_hashes(struct ifs_data *ifsd) +{ + return !ifsd->loaded || + ifsd->generation < IFS_GEN_STRIDE_AWARE || + ifsd->loaded_version != ifs_header_ptr->rev; +} + +static int copy_hashes_authenticate_chunks_gen2(struct device *dev) +{ + union ifs_scan_hashes_status_gen2 hashes_status; + union ifs_chunks_auth_status_gen2 chunk_status; + u32 err_code, valid_chunks, total_chunks; + const struct ifs_test_msrs *msrs; + int i, num_chunks, chunk_size; + union meta_data *ifs_meta; + int starting_chunk_nr; + struct ifs_data *ifsd; + u64 linear_addr, base; + u64 chunk_table[2]; + int retry_count; + + ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); + + if (need_copy_scan_hashes(ifsd)) { + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); + + /* enumerate the scan image information */ + chunk_size = hashes_status.chunk_size * SZ_1K; + err_code = hashes_status.error_code; + + num_chunks = get_num_chunks(ifsd->generation, hashes_status); + + if (!hashes_status.valid) { + hashcopy_err_message(dev, err_code); + return -EIO; + } + ifsd->loaded_version = ifs_header_ptr->rev; + ifsd->chunk_size = chunk_size; + } else { + num_chunks = ifsd->valid_chunks; + chunk_size = ifsd->chunk_size; + } + + if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { + wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); + if (chunk_status.valid_chunks != 0) { + dev_err(dev, "Couldn't invalidate installed stride - %d\n", + chunk_status.valid_chunks); + return -EIO; + } + } + + base = ifs_test_image_ptr; + ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); + starting_chunk_nr = ifs_meta->starting_chunk; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + retry_count = IFS_AUTH_RETRY_CT; + linear_addr = base + i * chunk_size; + + chunk_table[0] = starting_chunk_nr + i; + chunk_table[1] = linear_addr; + do { + local_irq_disable(); + wrmsrl(msrs->copy_chunks, (u64)chunk_table); + local_irq_enable(); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); + err_code = chunk_status.error_code; + } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); + + if (err_code) { + ifsd->loading_error = true; + auth_err_message(dev, err_code); + return -EIO; + } + } + + valid_chunks = chunk_status.valid_chunks; + total_chunks = chunk_status.total_chunks; + + if (valid_chunks != total_chunks) { + ifsd->loading_error = true; + dev_err(dev, "Couldn't authenticate all the chunks. Authenticated %d total %d.\n", + valid_chunks, total_chunks); + return -EIO; + } + ifsd->valid_chunks = valid_chunks; + ifsd->max_bundle = chunk_status.max_bundle; + + return 0; +} + static int validate_ifs_metadata(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); union meta_data *ifs_meta; char test_file[64]; int ret = -EINVAL; - snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan", + snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s", boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); if (!ifs_meta) { @@ -179,6 +299,19 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->chunks_per_stride && + (ifs_meta->starting_chunk % ifs_meta->chunks_per_stride != 0)) { + dev_warn(dev, "Starting chunk num %u not a multiple of chunks_per_stride %u\n", + ifs_meta->starting_chunk, ifs_meta->chunks_per_stride); + return ret; + } + + if (ifs_meta->test_type != test->test_num) { + dev_warn(dev, "Metadata test_type %d mismatches with device type\n", + ifs_meta->test_type); + return ret; + } + return 0; } @@ -199,7 +332,9 @@ static int scan_chunks_sanity_check(struct device *dev) return ret; ifsd->loading_error = false; - ifsd->loaded_version = ifs_header_ptr->rev; + + if (ifsd->generation > 0) + return copy_hashes_authenticate_chunks_gen2(dev); /* copy the scan hash and authenticate per package */ cpus_read_lock(); @@ -219,6 +354,7 @@ static int scan_chunks_sanity_check(struct device *dev) ifs_pkg_auth[curr_pkg] = 1; } ret = 0; + ifsd->loaded_version = ifs_header_ptr->rev; out: cpus_read_unlock(); @@ -227,7 +363,7 @@ static int scan_chunks_sanity_check(struct device *dev) static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data) { - struct ucode_cpu_info uci; + struct cpu_signature sig; /* Provide a specific error message when loading an older/unsupported image */ if (data->hdrver != MC_HEADER_TYPE_IFS) { @@ -240,11 +376,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ return -EINVAL; } - intel_cpu_collect_info(&uci); + intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, - uci.cpu_sig.sig, - uci.cpu_sig.pf)) { + if (!intel_find_matching_signature((void *)data, &sig)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } @@ -263,11 +397,11 @@ int ifs_load_firmware(struct device *dev) unsigned int expected_size; const struct firmware *fw; char scan_path[64]; - int ret = -EINVAL; + int ret; - snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", + snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s", test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ret = request_firmware_direct(&fw, scan_path, dev); if (ret) { diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index c7a5bf24bef35e3c15004c97fbe7eaebfbe40241..f978dd05d4d8b2ed117762b4cf1af6fdc9582f4e 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -23,6 +23,19 @@ /* Max retries on the same chunk */ #define MAX_IFS_RETRIES 5 +struct run_params { + struct ifs_data *ifsd; + union ifs_scan *activate; + union ifs_status status; +}; + +struct sbaf_run_params { + struct ifs_data *ifsd; + int *retry_cnt; + union ifs_sbaf *activate; + union ifs_sbaf_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -40,6 +53,8 @@ enum ifs_status_err_code { IFS_UNASSIGNED_ERROR_CODE = 7, IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, IFS_INTERRUPTED_DURING_EXECUTION = 9, + IFS_UNASSIGNED_ERROR_CODE_0xA = 0xA, + IFS_CORRUPTED_CHUNK = 0xB, }; static const char * const scan_test_status[] = { @@ -55,10 +70,25 @@ static const char * const scan_test_status[] = { [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", + [IFS_UNASSIGNED_ERROR_CODE_0xA] = "Unassigned error code 0xA", + [IFS_CORRUPTED_CHUNK] = "Scan operation aborted due to corrupted image. Try reloading", }; static void message_not_tested(struct device *dev, int cpu, union ifs_status status) { + struct ifs_data *ifsd = ifs_get_data(dev); + + /* + * control_error is set when the microcode runs into a problem + * loading the image from the reserved BIOS memory, or it has + * been corrupted. Reloading the image may fix this issue. + */ + if (status.control_error) { + dev_warn(dev, "CPU(s) %*pbl: Scan controller error. Batch: %02x version: 0x%x\n", + cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); + return; + } + if (status.error_code < ARRAY_SIZE(scan_test_status)) { dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n", cpumask_pr_args(cpu_smt_mask(cpu)), @@ -80,16 +110,6 @@ static void message_fail(struct device *dev, int cpu, union ifs_status status) { struct ifs_data *ifsd = ifs_get_data(dev); - /* - * control_error is set when the microcode runs into a problem - * loading the image from the reserved BIOS memory, or it has - * been corrupted. Reloading the image may fix this issue. - */ - if (status.control_error) { - dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n", - cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); - } - /* * signature_error is set when the output from the scan chains does not * match the expected signature. This might be a transient problem (e.g. @@ -123,24 +143,64 @@ static bool can_restart(union ifs_status status) case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: case IFS_CORE_NOT_CAPABLE_CURRENTLY: case IFS_UNASSIGNED_ERROR_CODE: + case IFS_UNASSIGNED_ERROR_CODE_0xA: + case IFS_CORRUPTED_CHUNK: break; } return false; } +#define SPINUNIT 100 /* 100 nsec */ +static atomic_t array_cpus_in; +static atomic_t scan_cpus_in; +static atomic_t sbaf_cpus_in; + +/* + * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() + */ +static void wait_for_sibling_cpu(atomic_t *t, long long timeout) +{ + int cpu = smp_processor_id(); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + int all_cpus = cpumask_weight(smt_mask); + + atomic_inc(t); + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) + return; + ndelay(SPINUNIT); + timeout -= SPINUNIT; + touch_nmi_watchdog(); + } +} + /* * Execute the scan. Called "simultaneously" on all threads of a core * at high priority using the stop_cpus mechanism. */ static int doscan(void *data) { - int cpu = smp_processor_id(); - u64 *msrs = data; + int cpu = smp_processor_id(), start, stop; + struct run_params *params = data; + union ifs_status status; + struct ifs_data *ifsd; int first; + ifsd = params->ifsd; + + if (ifsd->generation) { + start = params->activate->gen2.start; + stop = params->activate->gen2.stop; + } else { + start = params->activate->gen0.start; + stop = params->activate->gen0.stop; + } + /* Only the first logical CPU on a core reports result */ first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC); + /* * This WRMSR will wait for other HT threads to also write * to this MSR (at most for activate.delay cycles). Then it @@ -149,12 +209,14 @@ static int doscan(void *data) * take up to 200 milliseconds (in the case where all chunks * are processed in a single pass) before it retires. */ - wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]); + wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); + rdmsrl(MSR_SCAN_STATUS, status.data); - if (cpu == first) { - /* Pass back the result of the scan */ - rdmsrl(MSR_SCAN_STATUS, msrs[1]); - } + trace_ifs_status(ifsd->cur_batch, start, stop, status.data); + + /* Pass back the result of the scan */ + if (cpu == first) + params->status = status; return 0; } @@ -173,7 +235,7 @@ static void ifs_test_core(int cpu, struct device *dev) struct ifs_data *ifsd; int to_start, to_stop; int status_chunk; - u64 msrvals[2]; + struct run_params params; int retries; ifsd = ifs_get_data(dev); @@ -184,6 +246,8 @@ static void ifs_test_core(int cpu, struct device *dev) to_start = 0; to_stop = ifsd->valid_chunks - 1; + params.ifsd = ifs_get_data(dev); + if (ifsd->generation) { activate.gen2.start = to_start; activate.gen2.stop = to_stop; @@ -201,12 +265,11 @@ static void ifs_test_core(int cpu, struct device *dev) break; } - msrvals[0] = activate.data; - stop_core_cpuslocked(cpu, doscan, msrvals); + params.activate = &activate; + atomic_set(&scan_cpus_in, 0); + stop_core_cpuslocked(cpu, doscan, ¶ms); - status.data = msrvals[1]; - - trace_ifs_status(cpu, to_start, to_stop, status.data); + status = params.status; /* Some cases can be retried, give up for others */ if (!can_restart(status)) @@ -233,10 +296,10 @@ static void ifs_test_core(int cpu, struct device *dev) /* Update status for this core */ ifsd->scan_details = status.data; - if (status.control_error || status.signature_error) { + if (status.signature_error) { ifsd->status = SCAN_TEST_FAIL; message_fail(dev, cpu, status); - } else if (status.error_code) { + } else if (status.control_error || status.error_code) { ifsd->status = SCAN_NOT_TESTED; message_not_tested(dev, cpu, status); } else { @@ -244,34 +307,14 @@ static void ifs_test_core(int cpu, struct device *dev) } } -#define SPINUNIT 100 /* 100 nsec */ -static atomic_t array_cpus_out; - -/* - * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() - */ -static void wait_for_sibling_cpu(atomic_t *t, long long timeout) -{ - int cpu = smp_processor_id(); - const struct cpumask *smt_mask = cpu_smt_mask(cpu); - int all_cpus = cpumask_weight(smt_mask); - - atomic_inc(t); - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) - return; - ndelay(SPINUNIT); - timeout -= SPINUNIT; - touch_nmi_watchdog(); - } -} - static int do_array_test(void *data) { union ifs_array *command = data; int cpu = smp_processor_id(); int first; + wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC); + /* * Only one logical CPU on a core needs to trigger the Array test via MSR write. */ @@ -283,9 +326,6 @@ static int do_array_test(void *data) rdmsrl(MSR_ARRAY_BIST, command->data); } - /* Tests complete faster if the sibling is spinning here */ - wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC); - return 0; } @@ -306,7 +346,7 @@ static void ifs_array_test_core(int cpu, struct device *dev) timed_out = true; break; } - atomic_set(&array_cpus_out, 0); + atomic_set(&array_cpus_in, 0); stop_core_cpuslocked(cpu, do_array_test, &command); if (command.ctrl_result) @@ -323,6 +363,257 @@ static void ifs_array_test_core(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define ARRAY_GEN1_TEST_ALL_ARRAYS 0x0ULL +#define ARRAY_GEN1_STATUS_FAIL 0x1ULL + +static int do_array_test_gen1(void *status) +{ + int cpu = smp_processor_id(); + int first; + + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + } + + return 0; +} + +static void ifs_array_test_gen1(int cpu, struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + u64 status = 0; + + stop_core_cpuslocked(cpu, do_array_test_gen1, &status); + ifsd->scan_details = status; + + if (status & ARRAY_GEN1_STATUS_FAIL) + ifsd->status = SCAN_TEST_FAIL; + else + ifsd->status = SCAN_TEST_PASS; +} + +#define SBAF_STATUS_PASS 0 +#define SBAF_STATUS_SIGN_FAIL 1 +#define SBAF_STATUS_INTR 2 +#define SBAF_STATUS_TEST_FAIL 3 + +enum sbaf_status_err_code { + IFS_SBAF_NO_ERROR = 0, + IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3, + IFS_SBAF_INVALID_BUNDLE_INDEX = 4, + IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5, + IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7, + IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9, + IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA, + IFS_SBAF_CORRUPTED_CHUNK = 0xB, + IFS_SBAF_DID_NOT_START = 0xC, +}; + +static const char * const sbaf_test_status[] = { + [IFS_SBAF_NO_ERROR] = "SBAF no error", + [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.", + [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3", + [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image", + [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently", + [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7", + [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start", + [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid", + [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk", + [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start", +}; + +static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data) +{ + union ifs_sbaf_status status = (union ifs_sbaf_status)status_data; + + if (status.error_code < ARRAY_SIZE(sbaf_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + sbaf_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all SBAF bundles executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status) +{ + /* Failed signature check is set when SBAF signature did not match the expected value */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed signature check\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* Failed to reach end of test */ + if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool sbaf_bundle_completed(union ifs_sbaf_status status) +{ + return !(status.sbaf_status || status.error_code); +} + +static bool sbaf_can_restart(union ifs_sbaf_status status) +{ + enum sbaf_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) + return false; + + switch (err_code) { + case IFS_SBAF_NO_ERROR: + case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_SBAF_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_SBAF_UNASSIGNED_ERROR_CODE3: + case IFS_SBAF_INVALID_BUNDLE_INDEX: + case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS: + case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_SBAF_UNASSIGNED_ERROR_CODE7: + case IFS_SBAF_INVALID_PROGRAM_INDEX: + case IFS_SBAF_CORRUPTED_CHUNK: + case IFS_SBAF_DID_NOT_START: + break; + } + return false; +} + +/* + * Execute the SBAF test. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int dosbaf(void *data) +{ + struct sbaf_run_params *run_params = data; + int cpu = smp_processor_id(); + union ifs_sbaf_status status; + struct ifs_data *ifsd; + int first; + + ifsd = run_params->ifsd; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested bundle. The core test happens + * during the "execution" of the WRMSR. + */ + wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrl(MSR_SBAF_STATUS, status.data); + trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); + + /* Pass back the result of the test */ + if (cpu == first) + run_params->status = status; + + return 0; +} + +static void ifs_sbaf_test_core(int cpu, struct device *dev) +{ + struct sbaf_run_params run_params; + union ifs_sbaf_status status = {}; + union ifs_sbaf activate; + unsigned long timeout; + struct ifs_data *ifsd; + int stop_bundle; + int retries; + + ifsd = ifs_get_data(dev); + + activate.data = 0; + activate.delay = IFS_THREAD_WAIT; + + timeout = jiffies + 2 * HZ; + retries = MAX_IFS_RETRIES; + activate.bundle_idx = 0; + stop_bundle = ifsd->max_bundle; + + while (activate.bundle_idx <= stop_bundle) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + atomic_set(&sbaf_cpus_in, 0); + + run_params.ifsd = ifsd; + run_params.activate = &activate; + run_params.retry_cnt = &retries; + stop_core_cpuslocked(cpu, dosbaf, &run_params); + + status = run_params.status; + + if (sbaf_bundle_completed(status)) { + activate.bundle_idx = status.bundle_idx + 1; + activate.pgm_idx = 0; + retries = MAX_IFS_RETRIES; + continue; + } + + /* Some cases can be retried, give up for others */ + if (!sbaf_can_restart(status)) + break; + + if (status.pgm_idx == activate.pgm_idx) { + /* If no progress retry */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + /* if some progress, more pgms remaining in bundle, reset retries */ + retries = MAX_IFS_RETRIES; + activate.bundle_idx = status.bundle_idx; + activate.pgm_idx = status.pgm_idx; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + ifsd->status = SCAN_TEST_FAIL; + sbaf_message_fail(dev, cpu, status); + } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR || + (activate.bundle_idx < stop_bundle)) { + ifsd->status = SCAN_NOT_TESTED; + sbaf_message_not_tested(dev, cpu, status.data); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -351,7 +642,16 @@ int do_core_test(int cpu, struct device *dev) ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: - ifs_array_test_core(cpu, dev); + if (ifsd->array_gen == ARRAY_GEN0) + ifs_array_test_core(cpu, dev); + else + ifs_array_test_gen1(cpu, dev); + break; + case IFS_TYPE_SBAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_sbaf_test_core(cpu, dev); break; default: ret = -EINVAL; diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index f32a233470deabe89d5f279ceda23a256fb23dc9..4b53940a64e288f379357eb784a3c49601f48d7f 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -17,7 +17,7 @@ #include "../vsec.h" #include "class.h" -#define PMT_XA_START 0 +#define PMT_XA_START 1 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) #define GUID_SPR_PUNIT 0x9956f43f @@ -31,7 +31,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev) * differences from the server platforms (which use the Out Of Band * Management Services Module OOBMSM). */ - return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW); + return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW); } EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT); @@ -159,11 +159,12 @@ static struct class intel_pmt_class = { }; static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, - struct device *dev, + struct intel_vsec_device *ivdev, struct resource *disc_res) { - struct pci_dev *pci_dev = to_pci_dev(dev->parent); + struct pci_dev *pci_dev = ivdev->pcidev; + struct device *dev = &ivdev->auxdev.dev; + struct intel_pmt_header *header = &entry->header; u8 bir; /* @@ -215,6 +216,13 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, break; case ACCESS_BARID: + /* Use the provided base address if it exists */ + if (ivdev->base_addr) { + entry->base_addr = ivdev->base_addr + + GET_ADDRESS(header->base_offset); + break; + } + /* * If another BAR was specified then the base offset * represents the offset within that BAR. SO retrieve the @@ -239,6 +247,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { + struct intel_vsec_device *ivdev = dev_to_ivdev(parent); struct resource res = {0}; struct device *dev; int ret; @@ -262,7 +271,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, if (ns->attr_grp) { ret = sysfs_create_group(entry->kobj, ns->attr_grp); if (ret) - goto fail_sysfs; + goto fail_sysfs_create_group; } /* if size is 0 assume no data buffer, so no file needed */ @@ -287,13 +296,23 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->pmt_bin_attr.size = entry->size; ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); - if (!ret) - return 0; + if (ret) + goto fail_ioremap; + if (ns->pmt_add_endpoint) { + ret = ns->pmt_add_endpoint(entry, ivdev->pcidev); + if (ret) + goto fail_add_endpoint; + } + + return 0; + +fail_add_endpoint: + sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); fail_ioremap: if (ns->attr_grp) sysfs_remove_group(entry->kobj, ns->attr_grp); -fail_sysfs: +fail_sysfs_create_group: device_unregister(dev); fail_dev_create: xa_erase(ns->xa, entry->devid); @@ -305,7 +324,6 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa struct intel_vsec_device *intel_vsec_dev, int idx) { struct device *dev = &intel_vsec_dev->auxdev.dev; - struct intel_pmt_header header; struct resource *disc_res; int ret; @@ -315,16 +333,15 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa if (IS_ERR(entry->disc_table)) return PTR_ERR(entry->disc_table); - ret = ns->pmt_header_decode(entry, &header, dev); + ret = ns->pmt_header_decode(entry, dev); if (ret) return ret; - ret = intel_pmt_populate_entry(entry, &header, dev, disc_res); + ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res); if (ret) return ret; return intel_pmt_dev_register(entry, ns, dev); - } EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT); diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index db11d58867ce2f1ed36a6fd04a0a9bc443092acd..d23c63b73ab7d0f81c5718ce10bfecba766fc0ba 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -9,6 +9,7 @@ #include #include "../vsec.h" +#include "telemetry.h" /* PMT access types */ #define ACCESS_BARID 2 @@ -18,7 +19,26 @@ #define GET_BIR(v) ((v) & GENMASK(2, 0)) #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) +struct pci_dev; + +struct telem_endpoint { + struct pci_dev *pcidev; + struct telem_header header; + void __iomem *base; + bool present; + struct kref kref; +}; + +struct intel_pmt_header { + u32 base_offset; + u32 size; + u32 guid; + u8 access_type; +}; + struct intel_pmt_entry { + struct telem_endpoint *ep; + struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; struct kobject *kobj; void __iomem *disc_table; @@ -29,20 +49,14 @@ struct intel_pmt_entry { int devid; }; -struct intel_pmt_header { - u32 base_offset; - u32 size; - u32 guid; - u8 access_type; -}; - struct intel_pmt_namespace { const char *name; struct xarray *xa; const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev); + int (*pmt_add_endpoint)(struct intel_pmt_entry *entry, + struct pci_dev *pdev); }; bool intel_pmt_is_early_client_hw(struct device *dev); diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index bbb3d61d09f46976cceeb1cc6d8d3645a019219b..4014c02cafdb53c1b8dd5bf0c7ae51693889bfce 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -223,10 +223,10 @@ static const struct attribute_group pmt_crashlog_group = { }; static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; struct crashlog_entry *crashlog; if (!pmt_crashlog_supported(entry)) diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 39cbc87cc28a89f452e1e9ccbceb6cc2b0f22666..09258564dfc4df197eefe156599124b5df53409f 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -30,6 +30,15 @@ /* Used by client hardware to identify a fixed telemetry entry*/ #define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 +#define NUM_BYTES_QWORD(v) ((v) << 3) +#define SAMPLE_ID_OFFSET(v) ((v) << 3) + +#define NUM_BYTES_DWORD(v) ((v) << 2) +#define SAMPLE_ID_OFFSET32(v) ((v) << 2) + +/* Protects access to the xarray of telemetry endpoint handles */ +static DEFINE_MUTEX(ep_lock); + enum telem_type { TELEM_TYPE_PUNIT = 0, TELEM_TYPE_CRASHLOG, @@ -58,10 +67,10 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, } static int pmt_telem_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; if (pmt_telem_region_overlaps(entry, dev)) return 1; @@ -84,21 +93,195 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, return 0; } +static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, + struct pci_dev *pdev) +{ + struct telem_endpoint *ep; + + /* Endpoint lifetimes are managed by kref, not devres */ + entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL); + if (!entry->ep) + return -ENOMEM; + + ep = entry->ep; + ep->pcidev = pdev; + ep->header.access_type = entry->header.access_type; + ep->header.guid = entry->header.guid; + ep->header.base_offset = entry->header.base_offset; + ep->header.size = entry->header.size; + ep->base = entry->base; + ep->present = true; + + kref_init(&ep->kref); + + return 0; +} + static DEFINE_XARRAY_ALLOC(telem_array); static struct intel_pmt_namespace pmt_telem_ns = { .name = "telem", .xa = &telem_array, .pmt_header_decode = pmt_telem_header_decode, + .pmt_add_endpoint = pmt_telem_add_endpoint, }; +/* Called when all users unregister and the device is removed */ +static void pmt_telem_ep_release(struct kref *kref) +{ + struct telem_endpoint *ep; + + ep = container_of(kref, struct telem_endpoint, kref); + kfree(ep); +} + +unsigned long pmt_telem_get_next_endpoint(unsigned long start) +{ + struct intel_pmt_entry *entry; + unsigned long found_idx; + + mutex_lock(&ep_lock); + xa_for_each_start(&telem_array, found_idx, entry, start) { + /* + * Return first found index after start. + * 0 is not valid id. + */ + if (found_idx > start) + break; + } + mutex_unlock(&ep_lock); + + return found_idx == start ? 0 : found_idx; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY); + +struct telem_endpoint *pmt_telem_register_endpoint(int devid) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + mutex_unlock(&ep_lock); + return ERR_PTR(-ENXIO); + } + + kref_get(&entry->ep->kref); + mutex_unlock(&ep_lock); + + return entry->ep; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY); + +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep) +{ + kref_put(&ep->kref, pmt_telem_ep_release); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY); + +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + int err = 0; + + if (!info) + return -EINVAL; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + err = -ENXIO; + goto unlock; + } + + info->pdev = entry->ep->pcidev; + info->header = entry->ep->header; + +unlock: + mutex_unlock(&ep_lock); + return err; + +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY); + +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET(id); + size = ep->header.size; + + if (offset + NUM_BYTES_QWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY); + +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET32(id); + size = ep->header.size; + + if (offset + NUM_BYTES_DWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY); + +struct telem_endpoint * +pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos) +{ + int devid = 0; + int inst = 0; + int err = 0; + + while ((devid = pmt_telem_get_next_endpoint(devid))) { + struct telem_endpoint_info ep_info; + + err = pmt_telem_get_endpoint_info(devid, &ep_info); + if (err) + return ERR_PTR(err); + + if (ep_info.header.guid == guid && ep_info.pdev == pcidev) { + if (inst == pos) + return pmt_telem_register_endpoint(devid); + ++inst; + } + } + + return ERR_PTR(-ENXIO); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY); + static void pmt_telem_remove(struct auxiliary_device *auxdev) { struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns); -} + mutex_lock(&ep_lock); + for (i = 0; i < priv->num_entries; i++) { + struct intel_pmt_entry *entry = &priv->entry[i]; + + kref_put(&entry->ep->kref, pmt_telem_ep_release); + intel_pmt_dev_destroy(entry, &pmt_telem_ns); + } + mutex_unlock(&ep_lock); +}; static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { @@ -117,7 +300,9 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia for (i = 0; i < intel_vsec_dev->num_resources; i++) { struct intel_pmt_entry *entry = &priv->entry[priv->num_entries]; + mutex_lock(&ep_lock); ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i); + mutex_unlock(&ep_lock); if (ret < 0) goto abort_probe; if (ret) diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h new file mode 100644 index 0000000000000000000000000000000000000000..d45af5512b4e2dd8de7f35a0b02f84482b39efdf --- /dev/null +++ b/drivers/platform/x86/intel/pmt/telemetry.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TELEMETRY_H +#define _TELEMETRY_H + +/* Telemetry types */ +#define PMT_TELEM_TELEMETRY 0 +#define PMT_TELEM_CRASHLOG 1 + +struct telem_endpoint; +struct pci_dev; + +struct telem_header { + u8 access_type; + u16 size; + u32 guid; + u32 base_offset; +}; + +struct telem_endpoint_info { + struct pci_dev *pdev; + struct telem_header header; +}; + +/** + * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint + * @start: starting devid to look from + * + * This functions can be used in a while loop predicate to retrieve the devid + * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint() + * and pmt_telem_register_endpoint() can be used inside of the loop to examine + * endpoint info and register to receive a pointer to the endpoint. The pointer + * is then usable in the telemetry read calls to access the telemetry data. + * + * Return: + * * devid - devid of the next present endpoint from start + * * 0 - when no more endpoints are present after start + */ +unsigned long pmt_telem_get_next_endpoint(unsigned long start); + +/** + * pmt_telem_register_endpoint() - Register a telemetry endpoint + * @devid: device id/handle of the telemetry endpoint + * + * Increments the kref usage counter for the endpoint. + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_register_endpoint(int devid); + +/** + * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint + * @ep: ep structure to populate. + * + * Decrements the kref usage counter for the endpoint. + */ +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep); + +/** + * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid + * @devid: device id/handle of the telemetry endpoint + * @info: Endpoint info structure to be populated + * + * Return: + * * 0 - Success + * * -ENXIO - telemetry endpoint not found for the devid + * * -EINVAL - @info is NULL + */ +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info); + +/** + * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from + * pci_dev device, guid and pos + * @pdev: PCI device inside the Intel vsec + * @guid: GUID of the telemetry space + * @pos: Instance of the guid + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, + u32 guid, u16 pos); + +/** + * pmt_telem_read() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count); + +/** + * pmt_telem_read32() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated dword buffer + * @count: Number of dwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count); + +#endif diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 9040a3d39924bb154de7af1323cf840b20e067d3..bd51dee9418fc98d7d51c3325fa3f50e0585f2d0 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -653,10 +653,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, /* Lock to prevent module registration when already opened by user space */ static DEFINE_MUTEX(punit_misc_dev_open_lock); -/* Lock to allow one shared misc device for all ISST interfaces */ -static DEFINE_MUTEX(punit_misc_dev_reg_lock); -static int misc_usage_count; -static int misc_device_ret; static int misc_device_open; static int isst_if_open(struct inode *inode, struct file *file) @@ -720,55 +716,25 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; -static const struct x86_cpu_id hpm_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL), - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL), - {} -}; - static int isst_misc_reg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_device_ret) - goto unlock_exit; - - if (!misc_usage_count) { - const struct x86_cpu_id *id; - - id = x86_match_cpu(hpm_cpu_ids); - if (id) - isst_hpm_support = true; - - misc_device_ret = isst_if_cpu_info_init(); - if (misc_device_ret) - goto unlock_exit; + int ret; - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) { - isst_if_cpu_info_exit(); - goto unlock_exit; - } - } - misc_usage_count++; + ret = isst_if_cpu_info_init(); + if (ret) + return ret; -unlock_exit: - mutex_unlock(&punit_misc_dev_reg_lock); + ret = misc_register(&isst_if_char_driver); + if (ret) + isst_if_cpu_info_exit(); - return misc_device_ret; + return ret; } static void isst_misc_unreg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_usage_count) - misc_usage_count--; - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_reg_lock); + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); } /** @@ -788,11 +754,12 @@ static void isst_misc_unreg(void) */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - int ret; - if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; + if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support) + return -ENODEV; + mutex_lock(&punit_misc_dev_open_lock); /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { @@ -807,15 +774,6 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) punit_callbacks[device_type].registered = 1; mutex_unlock(&punit_misc_dev_open_lock); - ret = isst_misc_reg(); - if (ret) { - /* - * No need of mutex as the misc device register failed - * as no one can open device yet. Hence no contention. - */ - punit_callbacks[device_type].registered = 0; - return ret; - } return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -831,7 +789,6 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - isst_misc_unreg(); mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].def_ioctl = NULL; punit_callbacks[device_type].registered = 0; @@ -841,4 +798,51 @@ void isst_if_cdev_unregister(int device_type) } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); +#define SST_HPM_SUPPORTED 0x01 +#define SST_MBOX_SUPPORTED 0x02 + +static const struct x86_cpu_id isst_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, SST_MBOX_SUPPORTED), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids); + +static int __init isst_if_common_init(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(isst_cpu_ids); + if (!id) + return -ENODEV; + + if (id->driver_data == SST_HPM_SUPPORTED) { + isst_hpm_support = true; + } else if (id->driver_data == SST_MBOX_SUPPORTED) { + u64 data; + + /* Can fail only on some Skylake-X generations */ + if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) || + rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data)) + return -ENODEV; + } + + return isst_misc_reg(); +} +module_init(isst_if_common_init) + +static void __exit isst_if_common_exit(void) +{ + isst_misc_unreg(); +} +module_exit(isst_if_common_exit) + +MODULE_DESCRIPTION("ISST common interface module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h index 1004f2c9cca86bf2f47d24e13ddf55a6d80ad7d0..378055fe1d16faf1b4fb0fe822c933c6d6b6cb50 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h @@ -16,6 +16,9 @@ #define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251 #define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259 +#define MSR_OS_MAILBOX_INTERFACE 0xB0 +#define MSR_OS_MAILBOX_DATA 0xB1 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c index 1b6eab07106810cf1b77ec1348ce4fcb7f895c88..48b608eaca5f587b708c5dc5397c80cced1083d5 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -21,8 +21,6 @@ #include "isst_if_common.h" -#define MSR_OS_MAILBOX_INTERFACE 0xB0 -#define MSR_OS_MAILBOX_DATA 0xB1 #define MSR_OS_MAILBOX_BUSY_BIT 31 /* diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c index ff49025ec0856ab587709a352442159bc97144c7..3f4343147dadb0e5d732761f216a36e7f64b2d5d 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c @@ -18,16 +18,17 @@ struct isst_mmio_range { int beg; int end; + int size; }; static struct isst_mmio_range mmio_range_devid_0[] = { - {0x04, 0x14}, - {0x20, 0xD0}, + {0x04, 0x14, 0x18}, + {0x20, 0xD0, 0xD4}, }; static struct isst_mmio_range mmio_range_devid_1[] = { - {0x04, 0x14}, - {0x20, 0x11C}, + {0x04, 0x14, 0x18}, + {0x20, 0x11C, 0x120}, }; struct isst_if_device { @@ -93,6 +94,7 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct isst_if_device *punit_dev; struct isst_if_cmd_cb cb; u32 mmio_base, pcu_base; + struct resource r; u64 base_addr; int ret; @@ -114,13 +116,16 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pcu_base &= GENMASK(10, 0); base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; - punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256); - if (!punit_dev->punit_mmio) - return -ENOMEM; + + punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; + + r = DEFINE_RES_MEM(base_addr, punit_dev->mmio_range[1].size); + punit_dev->punit_mmio = devm_ioremap_resource(&pdev->dev, &r); + if (IS_ERR(punit_dev->punit_mmio)) + return PTR_ERR(punit_dev->punit_mmio); mutex_init(&punit_dev->mutex); pci_set_drvdata(pdev, punit_dev); - punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; memset(&cb, 0, sizeof(cb)); cb.cmd_size = sizeof(struct isst_if_io_reg); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 63faa2ea8327b2c7d64857ad47ddd31c43d022cd..4045823071091a1af76d3dfc40481dc1b80e4788 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -17,12 +17,15 @@ * the hardware mapping. */ +#define dev_fmt(fmt) "tpmi_sst: " fmt + #include #include #include #include #include #include +#include #include #include @@ -30,7 +33,8 @@ #include "isst_if_common.h" /* Supported SST hardware version by this driver */ -#define ISST_HEADER_VERSION 1 +#define ISST_MAJOR_VERSION 0 +#define ISST_MINOR_VERSION 1 /* * Used to indicate if value read from MMIO needs to get multiplied @@ -233,6 +237,7 @@ struct perf_level { * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume + * @write_blocked: Write operation is blocked, so can't change SST state * * This structure is used store complete SST information for a power_domain. This information * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple @@ -258,22 +263,36 @@ struct tpmi_per_power_domain_info { u64 saved_clos_configs[4]; u64 saved_clos_assocs[4]; u64 saved_pp_control; + bool write_blocked; }; +/* Supported maximum partitions */ +#define SST_MAX_PARTITIONS 2 + /** * struct tpmi_sst_struct - Store sst info for a package * @package_id: Package id for this aux device instance * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer * @power_domain_info: Pointer to power domains information + * @cdie_mask: Mask of compute dies present in a partition from hardware. + * This mask is not present in the version 1 information header. + * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI + * version 1 information header. + * @partition_mask: Mask of all partitions. + * @partition_mask_current: Current partition mask as some may have been unbound. * * This structure is used store full SST information for a package. - * Each package has a unique OOB PCI device, which enumerates TPMI. - * Each Package will have multiple power_domains. + * Each package has one or multiple OOB PCI devices. Each package can contain multiple + * power domains. */ struct tpmi_sst_struct { int package_id; - int number_of_power_domains; - struct tpmi_per_power_domain_info *power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS]; + u16 cdie_mask[SST_MAX_PARTITIONS]; + u8 number_of_power_domains[SST_MAX_PARTITIONS]; + u8 io_dies[SST_MAX_PARTITIONS]; + u8 partition_mask; + u8 partition_mask_current; }; /** @@ -310,12 +329,11 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info, int levels) { + struct device *dev = &auxdev->dev; u64 perf_level_offsets; int i; - pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels, - sizeof(struct perf_level), - GFP_KERNEL); + pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL); if (!pd_info->perf_levels) return 0; @@ -346,27 +364,32 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev, static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info) { + struct device *dev = &auxdev->dev; int i, mask, levels; *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base); pd_info->sst_header.cp_offset *= 8; pd_info->sst_header.pp_offset *= 8; - if (pd_info->sst_header.interface_version != ISST_HEADER_VERSION) { - dev_err(&auxdev->dev, "SST: Unsupported version:%x\n", - pd_info->sst_header.interface_version); + if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID) + return -ENODEV; + + if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) { + dev_err(dev, "SST: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version)); return -ENODEV; } + if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION) + dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->sst_header.interface_version)); + /* Read SST CP Header */ *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset); /* Read PP header */ *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset); - /* Force level_en_mask level 0 */ - pd_info->pp_header.level_en_mask |= 0x01; - mask = 0x01; levels = 0; for (i = 0; i < 8; ++i) { @@ -380,6 +403,126 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai return 0; } +static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst) +{ + u8 i, max_part, count = 0; + + /* Partition mask starts from bit 0 and contains 1s only */ + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) + count += sst_inst->number_of_power_domains[i]; + + return count; +} + +/** + * map_cdies() - Map user domain ID to compute domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * Helper function to map_partition_power_domain_id() to resolve compute + * domain ID and partition. Use hardware provided cdie_mask for a partition + * as is to resolve a compute domain ID. + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, max_part; + + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) { + if (!(sst_inst->cdie_mask[i] & BIT(id))) + continue; + + *partition = i; + return id - ffs(sst_inst->cdie_mask[i]) + 1; + } + + return -EINVAL; +} + +/** + * map_partition_power_domain_id() - Map user domain ID to partition domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * In a partitioned system a CPU package has two separate MMIO ranges (Under + * two PCI devices). But the CPU package compute die/power domain IDs are + * unique in a package. User space can get compute die/power domain ID from + * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if + * they are present in two different partitions with its own order. + * + * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask + * is 111111b for a 4 compute and 2 IO dies system. This is presented as + * provided by the hardware in a non-partitioned system with the following + * order: + * I1-I0-C3-C2-C1-C0 + * Here: "C": for compute and "I" for IO die. + * Compute dies are always present first in TPMI instances, as they have + * to map to the real power domain/die ID of a system. In a non-partitioned + * system there is no way to identify compute and IO die boundaries from + * this driver without reading each CPU's mapping. + * + * The same order needs to be preserved, even if those compute dies are + * distributed among multiple partitions. For example: + * Partition 1 can contain: I1-C1-C0 + * Partition 2 can contain: I2-C3-C2 + * + * This will require a conversion of user space IDs to the actual index into + * array of stored power domains for each partition. For the above example + * this function will return partition and index as follows: + * + * ============= ========= ===== ======== + * User space ID Partition Index Die type + * ============= ========= ===== ======== + * 0 0 0 Compute + * 1 0 1 Compute + * 2 1 0 Compute + * 3 1 1 Compute + * 4 0 2 IO + * 5 1 2 IO + * ============= ========= ===== ======== + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, io_start_id, max_part; + + *partition = 0; + + /* If any PCI device for partition is unbound, treat this as failure */ + if (sst_inst->partition_mask != sst_inst->partition_mask_current) + return -EINVAL; + + max_part = hweight8(sst_inst->partition_mask); + + /* IO Index begin here */ + io_start_id = fls(sst_inst->cdie_mask[max_part - 1]); + + if (id < io_start_id) + return map_cdies(sst_inst, id, partition); + + for (i = 0; i < max_part; i++) { + u8 io_id; + + io_id = id - io_start_id; + if (io_id < sst_inst->io_dies[i]) { + u8 cdie_range; + + cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1; + *partition = i; + return cdie_range + io_id; + } + io_start_id += sst_inst->io_dies[i]; + } + + return -EINVAL; +} + /* * Map a package and power_domain id to SST information structure unique for a power_domain. * The caller should call under isst_tpmi_dev_lock. @@ -388,19 +531,20 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom { struct tpmi_per_power_domain_info *power_domain_info; struct tpmi_sst_struct *sst_inst; + u8 part; - if (pkg_id < 0 || pkg_id > isst_common.max_index || - pkg_id >= topology_max_packages()) + if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index) return NULL; sst_inst = isst_common.sst_inst[pkg_id]; if (!sst_inst) return NULL; - if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains) + power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part); + if (power_domain_id < 0) return NULL; - power_domain_info = &sst_inst->power_domain_info[power_domain_id]; + power_domain_info = &sst_inst->power_domain_info[part][power_domain_id]; if (power_domain_info && !power_domain_info->sst_base) return NULL; @@ -455,10 +599,10 @@ static long isst_if_core_power_state(void __user *argp) struct tpmi_per_power_domain_info *power_domain_info; struct isst_core_power core_power; - if (disable_dynamic_sst_features()) + if (copy_from_user(&core_power, argp, sizeof(core_power))) return -EFAULT; - if (copy_from_user(&core_power, argp, sizeof(core_power))) + if (core_power.get_set && disable_dynamic_sst_features()) return -EFAULT; power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id); @@ -510,6 +654,9 @@ static long isst_if_clos_param(void __user *argp) return -EINVAL; if (clos_param.get_set) { + if (power_domain_info->write_blocked) + return -EPERM; + _write_cp_info("clos.min_freq", clos_param.min_freq_mhz, (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, @@ -569,6 +716,7 @@ static long isst_if_clos_assoc(void __user *argp) struct tpmi_sst_struct *sst_inst; int offset, shift, cpu; u64 val, mask, clos; + u8 part; if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc))) return -EFAULT; @@ -592,10 +740,14 @@ static long isst_if_clos_assoc(void __user *argp) sst_inst = isst_common.sst_inst[pkg_id]; - if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains) + punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part); + if (punit_id < 0) return -EINVAL; - power_domain_info = &sst_inst->power_domain_info[punit_id]; + power_domain_info = &sst_inst->power_domain_info[part][punit_id]; + + if (assoc_cmds.get_set && power_domain_info->write_blocked) + return -EPERM; offset = SST_CLOS_ASSOC_0_OFFSET + (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE; @@ -695,6 +847,8 @@ static int isst_if_get_perf_level(void __user *argp) { struct isst_perf_level_info perf_level; struct tpmi_per_power_domain_info *power_domain_info; + unsigned long level_mask; + u8 level, support; if (copy_from_user(&perf_level, argp, sizeof(perf_level))) return -EFAULT; @@ -704,7 +858,7 @@ static int isst_if_get_perf_level(void __user *argp) return -EINVAL; perf_level.max_level = power_domain_info->max_level; - perf_level.level_mask = power_domain_info->pp_header.allowed_level_mask; + perf_level.level_mask = power_domain_info->pp_header.level_en_mask; perf_level.feature_rev = power_domain_info->pp_header.feature_rev; _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET, SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) @@ -714,12 +868,34 @@ static int isst_if_get_perf_level(void __user *argp) SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1)); - _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0, - SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH, - SST_MUL_FACTOR_NONE); - _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0, - SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH, - SST_MUL_FACTOR_NONE); + level_mask = perf_level.level_mask; + perf_level.sst_bf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read BF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START, + SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_bf_support |= BIT(level); + } + + perf_level.sst_tf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read TF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START, + SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_tf_support |= BIT(level); + } if (copy_to_user(argp, &perf_level, sizeof(perf_level))) return -EFAULT; @@ -747,6 +923,9 @@ static int isst_if_set_perf_level(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level))) return -EINVAL; @@ -804,6 +983,9 @@ static int isst_if_set_perf_feature(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET, SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) @@ -1115,18 +1297,28 @@ static int isst_if_get_tpmi_instance_count(void __user *argp) if (tpmi_inst.socket_id >= topology_max_packages()) return -EINVAL; - tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains; - sst_inst = isst_common.sst_inst[tpmi_inst.socket_id]; + + tpmi_inst.count = isst_instance_count(sst_inst); + tpmi_inst.valid_mask = 0; - for (i = 0; i < sst_inst->number_of_power_domains; ++i) { + for (i = 0; i < tpmi_inst.count; i++) { struct tpmi_per_power_domain_info *pd_info; + u8 part; + int pd; - pd_info = &sst_inst->power_domain_info[i]; + pd = map_partition_power_domain_id(sst_inst, i, &part); + if (pd < 0) + continue; + + pd_info = &sst_inst->power_domain_info[part][pd]; if (pd_info->sst_base) tpmi_inst.valid_mask |= BIT(i); } + if (!tpmi_inst.valid_mask) + tpmi_inst.count = 0; + if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst))) return -EFAULT; @@ -1252,91 +1444,174 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { + struct tpmi_per_power_domain_info *pd_info; + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; + struct device *dev = &auxdev->dev; struct tpmi_sst_struct *tpmi_sst; - int i, ret, pkg = 0, inst = 0; - int num_resources; + u8 i, num_resources, io_die_cnt; + int ret, pkg = 0, inst = 0; + bool first_enum = false; + u16 cdie_mask; + u8 partition; + + ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); + if (ret) + dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n"); + + if (read_blocked) { + dev_info(dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } plat_info = tpmi_get_platform_data(auxdev); if (!plat_info) { - dev_err(&auxdev->dev, "No platform info\n"); + dev_err(dev, "No platform info\n"); return -EINVAL; } pkg = plat_info->package_id; if (pkg >= topology_max_packages()) { - dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg); + dev_err(dev, "Invalid package id :%x\n", pkg); return -EINVAL; } - if (isst_common.sst_inst[pkg]) - return -EEXIST; + partition = plat_info->partition; + if (partition >= SST_MAX_PARTITIONS) { + dev_err(&auxdev->dev, "Invalid partition :%x\n", partition); + return -EINVAL; + } num_resources = tpmi_get_resource_count(auxdev); if (!num_resources) return -EINVAL; - tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL); - if (!tpmi_sst) - return -ENOMEM; + mutex_lock(&isst_tpmi_dev_lock); + + if (isst_common.sst_inst[pkg]) { + tpmi_sst = isst_common.sst_inst[pkg]; + } else { + /* + * tpmi_sst instance is for a package. So needs to be + * allocated only once for both partitions. We can't use + * devm_* allocation here as each partition is a + * different device, which can be unbound. + */ + tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL); + if (!tpmi_sst) { + ret = -ENOMEM; + goto unlock_exit; + } + first_enum = true; + } + + ret = 0; + + pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL); + if (!pd_info) { + ret = -ENOMEM; + goto unlock_free; + } - tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources, - sizeof(*tpmi_sst->power_domain_info), - GFP_KERNEL); - if (!tpmi_sst->power_domain_info) - return -ENOMEM; + /* Get the IO die count, if cdie_mask is present */ + if (plat_info->cdie_mask) { + u8 cdie_range; - tpmi_sst->number_of_power_domains = num_resources; + cdie_mask = plat_info->cdie_mask; + cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1; + io_die_cnt = num_resources - cdie_range; + } else { + /* + * This is a synthetic mask, careful when assuming that + * they are compute dies only. + */ + cdie_mask = (1 << num_resources) - 1; + io_die_cnt = 0; + } for (i = 0; i < num_resources; ++i) { struct resource *res; res = tpmi_get_resource_at_index(auxdev, i); if (!res) { - tpmi_sst->power_domain_info[i].sst_base = NULL; + pd_info[i].sst_base = NULL; continue; } - tpmi_sst->power_domain_info[i].package_id = pkg; - tpmi_sst->power_domain_info[i].power_domain_id = i; - tpmi_sst->power_domain_info[i].auxdev = auxdev; - tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res); - if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) - return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); - - ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]); - if (ret) { - devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base); - tpmi_sst->power_domain_info[i].sst_base = NULL; + pd_info[i].package_id = pkg; + pd_info[i].power_domain_id = i; + pd_info[i].auxdev = auxdev; + pd_info[i].write_blocked = write_blocked; + pd_info[i].sst_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pd_info[i].sst_base)) { + ret = PTR_ERR(pd_info[i].sst_base); + goto unlock_free; + } + + if (sst_main(auxdev, &pd_info[i])) { + /* + * This entry is not valid, hardware can partially + * populate dies. In this case MMIO will have 0xFFs. + * Also possible some pre-production hardware has + * invalid data. But don't fail and continue to use + * other dies with valid data. + */ + devm_iounmap(dev, pd_info[i].sst_base); + pd_info[i].sst_base = NULL; continue; } ++inst; } - if (!inst) - return -ENODEV; + if (!inst) { + ret = -ENODEV; + goto unlock_free; + } tpmi_sst->package_id = pkg; + + tpmi_sst->power_domain_info[partition] = pd_info; + tpmi_sst->number_of_power_domains[partition] = num_resources; + tpmi_sst->cdie_mask[partition] = cdie_mask; + tpmi_sst->io_dies[partition] = io_die_cnt; + tpmi_sst->partition_mask |= BIT(partition); + tpmi_sst->partition_mask_current |= BIT(partition); + auxiliary_set_drvdata(auxdev, tpmi_sst); - mutex_lock(&isst_tpmi_dev_lock); if (isst_common.max_index < pkg) isst_common.max_index = pkg; isst_common.sst_inst[pkg] = tpmi_sst; + +unlock_free: + if (ret && first_enum) + kfree(tpmi_sst); +unlock_exit: mutex_unlock(&isst_tpmi_dev_lock); - return 0; + return ret; } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST); void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct intel_tpmi_plat_info *plat_info; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; mutex_lock(&isst_tpmi_dev_lock); - isst_common.sst_inst[tpmi_sst->package_id] = NULL; + tpmi_sst->power_domain_info[plat_info->partition] = NULL; + tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition); + /* Free the package instance when the all partitions are removed */ + if (!tpmi_sst->partition_mask_current) { + isst_common.sst_inst[tpmi_sst->package_id] = NULL; + kfree(tpmi_sst); + } mutex_unlock(&isst_tpmi_dev_lock); } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST); @@ -1344,9 +1619,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST); void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); - struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info; + struct intel_tpmi_plat_info *plat_info; void __iomem *cp_base; + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET); @@ -1365,9 +1647,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST); void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); - struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info; + struct intel_tpmi_plat_info *plat_info; void __iomem *cp_base; + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET); @@ -1382,7 +1671,7 @@ void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST); -#define ISST_TPMI_API_VERSION 0x02 +#define ISST_TPMI_API_VERSION 0x03 int tpmi_sst_init(void) { @@ -1439,4 +1728,5 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST); MODULE_IMPORT_NS(INTEL_TPMI); MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN); +MODULE_DESCRIPTION("ISST TPMI interface module"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 4c42c28bdd3d492efe53166a230acfdac7f1d309..060f9b86bc030a495a761c91163681addb1e36bf 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -128,6 +128,9 @@ struct intel_tpmi_info { * @dev: PCI device number * @bus: PCI bus number * @pkg: CPU Package id + * @segment: PCI segment id + * @partition: Package Partition id + * @cdie_mask: Bitmap of compute dies in the current partition * @reserved: Reserved for future use * @lock: When set to 1 the register is locked and becomes read-only * until next reset. Not for use by the OS driver. @@ -139,22 +142,39 @@ struct tpmi_info_header { u64 dev:5; u64 bus:8; u64 pkg:8; - u64 reserved:39; + u64 segment:8; + u64 partition:2; + u64 cdie_mask:16; + u64 reserved:13; u64 lock:1; } __packed; -/* - * List of supported TMPI IDs. - * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. +/** + * struct tpmi_feature_state - Structure to read hardware state of a feature + * @enabled: Enable state of a feature, 1: enabled, 0: disabled + * @reserved_1: Reserved for future use + * @write_blocked: Writes are blocked means all write operations are ignored + * @read_blocked: Reads are blocked means will read 0xFFs + * @pcs_select: Interface used by out of band software, not used in OS + * @reserved_2: Reserved for future use + * @id: TPMI ID of the feature + * @reserved_3: Reserved for future use + * @locked: When set to 1, OS can't change this register. + * + * The structure is used to read hardware state of a TPMI feature. This + * information is used for debug and restricting operations for this feature. */ -enum intel_tpmi_id { - TPMI_ID_RAPL = 0, /* Running Average Power Limit */ - TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ - TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ - TPMI_ID_SST = 5, /* Speed Select Technology */ - TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ - TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ -}; +struct tpmi_feature_state { + u32 enabled:1; + u32 reserved_1:3; + u32 write_blocked:1; + u32 read_blocked:1; + u32 pcs_select:1; + u32 reserved_2:1; + u32 id:8; + u32 reserved_3:15; + u32 locked:1; +} __packed; /* * The size from hardware is in u32 units. This size is from a trusted hardware, @@ -202,6 +222,7 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); #define TPMI_CONTROL_STATUS_OFFSET 0x00 #define TPMI_COMMAND_OFFSET 0x08 +#define TMPI_CONTROL_DATA_VAL_OFFSET 0x0c /* * Spec is calling for max 1 seconds to get ownership at the worst @@ -230,7 +251,6 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); /* TPMI command data registers */ #define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0) -#define TMPI_CONTROL_DATA_VAL GENMASK_ULL(63, 32) #define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40) /* Command to send via control interface */ @@ -240,9 +260,6 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); #define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16) -#define TPMI_STATE_DISABLED BIT_ULL(0) -#define TPMI_STATE_LOCKED BIT_ULL(31) - /* Mutex to complete get feature status without interruption */ static DEFINE_MUTEX(tpmi_dev_lock); @@ -256,7 +273,7 @@ static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner) } static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id, - int *locked, int *disabled) + struct tpmi_feature_state *feature_state) { u64 control, data; int ret; @@ -306,17 +323,8 @@ static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int featu } /* Response is ready */ - data = readq(tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET); - data = FIELD_GET(TMPI_CONTROL_DATA_VAL, data); - - *disabled = 0; - *locked = 0; - - if (!(data & TPMI_STATE_DISABLED)) - *disabled = 1; - - if (data & TPMI_STATE_LOCKED) - *locked = 1; + memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET, + sizeof(*feature_state)); ret = 0; @@ -330,39 +338,54 @@ static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int featu return ret; } -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, - int *locked, int *disabled) +int tpmi_get_feature_status(struct auxiliary_device *auxdev, + int feature_id, bool *read_blocked, bool *write_blocked) { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); + struct tpmi_feature_state feature_state; + int ret; - return tpmi_read_feature_status(tpmi_info, feature_id, locked, disabled); + ret = tpmi_read_feature_status(tpmi_info, feature_id, &feature_state); + if (ret) + return ret; + + *read_blocked = feature_state.read_blocked; + *write_blocked = feature_state.write_blocked; + + return 0; } EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI); static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) { struct intel_tpmi_info *tpmi_info = s->private; + int locked, disabled, read_blocked, write_blocked; + struct tpmi_feature_state feature_state; struct intel_tpmi_pm_feature *pfs; - int locked, disabled, ret, i; + int ret, i; seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start); - seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\n"); + seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n"); for (i = 0; i < tpmi_info->feature_count; ++i) { pfs = &tpmi_info->tpmi_features[i]; - ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &locked, - &disabled); + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); if (ret) { locked = 'U'; disabled = 'U'; + read_blocked = 'U'; + write_blocked = 'U'; } else { - disabled = disabled ? 'Y' : 'N'; - locked = locked ? 'Y' : 'N'; + disabled = feature_state.enabled ? 'N' : 'Y'; + locked = feature_state.locked ? 'Y' : 'N'; + read_blocked = feature_state.read_blocked ? 'Y' : 'N'; + write_blocked = feature_state.write_blocked ? 'Y' : 'N'; } - seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\n", + seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, - pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled); + pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, + read_blocked, write_blocked); } return 0; @@ -568,9 +591,21 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; char feature_id_name[TPMI_FEATURE_NAME_LEN]; struct intel_vsec_device *feature_vsec_dev; + struct tpmi_feature_state feature_state; struct resource *res, *tmp; const char *name; - int i; + int i, ret; + + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); + if (ret) + return ret; + + /* + * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. + * This will not cause failure of loading of this driver. + */ + if (!feature_state.enabled) + return -EOPNOTSUPP; name = intel_tpmi_name(pfs->pfs_header.tpmi_id); if (!name) @@ -636,28 +671,44 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) } #define TPMI_INFO_BUS_INFO_OFFSET 0x08 +#define TPMI_INFO_MAJOR_VERSION 0x00 +#define TPMI_INFO_MINOR_VERSION 0x02 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, struct intel_tpmi_pm_feature *pfs) { struct tpmi_info_header header; void __iomem *info_mem; + u64 feature_header; + int ret = 0; - info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET, - pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET); + info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32)); if (!info_mem) return -ENOMEM; - memcpy_fromio(&header, info_mem, sizeof(header)); + feature_header = readq(info_mem); + if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) { + ret = -ENODEV; + goto error_info_header; + } + + memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header)); tpmi_info->plat_info.package_id = header.pkg; tpmi_info->plat_info.bus_number = header.bus; tpmi_info->plat_info.device_number = header.dev; tpmi_info->plat_info.function_number = header.fn; + if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) { + tpmi_info->plat_info.cdie_mask = header.cdie_mask; + tpmi_info->plat_info.partition = header.partition; + tpmi_info->plat_info.segment = header.segment; + } + +error_info_header: iounmap(info_mem); - return 0; + return ret; } static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 33bb58dc3f78c30a304a7a35595666152c34e908..4e880585cbe4d0c552a4f1854348c903d6f6b0f6 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -19,9 +19,8 @@ static int uncore_instance_count; static DEFINE_IDA(intel_uncore_ida); /* callbacks for actual HW read/write */ -static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max); -static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max); -static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); +static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index); +static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index); static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -44,27 +43,22 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr return sprintf(buf, "%u\n", data->package_id); } -static ssize_t show_min_max_freq_khz(struct uncore_data *data, - char *buf, int min_max) +static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index) { - unsigned int min, max; + unsigned int value; int ret; mutex_lock(&uncore_lock); - ret = uncore_read(data, &min, &max); + ret = uncore_read(data, &value, index); mutex_unlock(&uncore_lock); if (ret) return ret; - if (min_max) - return sprintf(buf, "%u\n", max); - - return sprintf(buf, "%u\n", min); + return sprintf(buf, "%u\n", value); } -static ssize_t store_min_max_freq_khz(struct uncore_data *data, - const char *buf, ssize_t count, - int min_max) +static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, + enum uncore_index index) { unsigned int input; int ret; @@ -73,7 +67,7 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data, return -EINVAL; mutex_lock(&uncore_lock); - ret = uncore_write(data, input, min_max); + ret = uncore_write(data, input, index); mutex_unlock(&uncore_lock); if (ret) @@ -82,56 +76,32 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data, return count; } -static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) -{ - unsigned int freq; - int ret; - - mutex_lock(&uncore_lock); - ret = uncore_read_freq(data, &freq); - mutex_unlock(&uncore_lock); - if (ret) - return ret; - - return sprintf(buf, "%u\n", freq); -} - -#define store_uncore_min_max(name, min_max) \ +#define store_uncore_attr(name, index) \ static ssize_t store_##name(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return store_min_max_freq_khz(data, buf, count, \ - min_max); \ + return store_attr(data, buf, count, index); \ } -#define show_uncore_min_max(name, min_max) \ +#define show_uncore_attr(name, index) \ static ssize_t show_##name(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ { \ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return show_min_max_freq_khz(data, buf, min_max); \ - } - -#define show_uncore_perf_status(name) \ - static ssize_t show_##name(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf)\ - { \ - struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ - \ - return show_perf_status_freq_khz(data, buf); \ + return show_attr(data, buf, index); \ } -store_uncore_min_max(min_freq_khz, 0); -store_uncore_min_max(max_freq_khz, 1); +store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -show_uncore_min_max(min_freq_khz, 0); -show_uncore_min_max(max_freq_khz, 1); +show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -show_uncore_perf_status(current_freq_khz); +show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); #define show_uncore_data(member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ @@ -198,7 +168,7 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; - ret = uncore_read_freq(data, &freq); + ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); if (!ret) data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; @@ -238,7 +208,8 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu) sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id); } - uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz); + uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ); + uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ); ret = create_attr_group(data, data->name); if (ret) { @@ -269,15 +240,15 @@ void uncore_freq_remove_die_entry(struct uncore_data *data) } EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY); -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max), - int (*read_freq)(struct uncore_data *data, unsigned int *freq)) +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)) { mutex_lock(&uncore_lock); - uncore_read = read_control_freq; - uncore_write = write_control_freq; - uncore_read_freq = read_freq; + uncore_read = read; + uncore_write = write; if (!uncore_root_kobj) { struct device *dev_root = bus_get_dev_root(&cpu_subsys); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 0e5bf507e555209a69ba61e8e8eaaf7392209bfa..4c245b945e4ea1e2e6d05d519f82d28fdf61cf03 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -66,9 +66,16 @@ struct uncore_data { #define UNCORE_DOMAIN_ID_INVALID -1 -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max), - int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); +enum uncore_index { + UNCORE_INDEX_MIN_FREQ, + UNCORE_INDEX_MAX_FREQ, + UNCORE_INDEX_CURRENT_FREQ, +}; + +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)); void uncore_freq_common_exit(void); int uncore_freq_add_entry(struct uncore_data *data, int cpu); void uncore_freq_remove_die_entry(struct uncore_data *data); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 1050221645482456a6cb1714f851bd128a0ecc42..9fa3037c03d15975d9d88ccbbd8d638797cbd07b 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -28,7 +28,8 @@ #include "uncore-frequency-common.h" -#define UNCORE_HEADER_VERSION 1 +#define UNCORE_MAJOR_VERSION 0 +#define UNCORE_MINOR_VERSION 2 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -65,28 +66,34 @@ struct tpmi_uncore_struct { int min_ratio; struct tpmi_uncore_power_domain_info *pd_info; struct tpmi_uncore_cluster_info root_cluster; + bool write_blocked; }; -#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15) -#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8) -#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0) +/* Bit definitions for STATUS register */ +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + +/* Bit definitions for CONTROL register */ +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, - unsigned int *min, unsigned int *max) + unsigned int *value, enum uncore_index index) { u64 control; control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); - *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } -#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO) +#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) -/* Callback for sysfs read for max/min frequencies. Called under mutex locks */ -static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, - unsigned int *max) +/* Helper for sysfs read for max/min frequencies. Called under mutex locks */ +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; @@ -94,10 +101,11 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (cluster_info->root_domain) { struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; - int i, _min = 0, _max = 0; + unsigned int min, max, v; + int i; - *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER; - *max = 0; + min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER; + max = 0; /* * Get the max/min by looking at each cluster. Get the lowest @@ -108,35 +116,41 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) { read_control_freq(&uncore_root->pd_info[i].cluster_infos[j], - &_min, &_max); - if (*min > _min) - *min = _min; - if (*max < _max) - *max = _max; + &v, index); + if (v < min) + min = v; + if (v > max) + max = v; } } + + if (index == UNCORE_INDEX_MIN_FREQ) + *value = min; + else + *value = max; + return 0; } - read_control_freq(cluster_info, min, max); + read_control_freq(cluster_info, value, index); return 0; } /* Helper function to write MMIO offset for max/min control frequency */ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, - unsigned int min_max) + unsigned int index) { u64 control; control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); - if (min_max) { - control &= ~UNCORE_GENMASK_MAX_RATIO; - control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input); + if (index == UNCORE_INDEX_MAX_FREQ) { + control &= ~UNCORE_MAX_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { - control &= ~UNCORE_GENMASK_MIN_RATIO; - control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input); + control &= ~UNCORE_MIN_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); @@ -144,7 +158,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un /* Callback for sysfs write for max/min frequencies. Called under mutex locks */ static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, - unsigned int min_max) + enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; struct tpmi_uncore_struct *uncore_root; @@ -156,6 +170,9 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); uncore_root = cluster_info->uncore_root; + if (uncore_root->write_blocked) + return -EPERM; + /* Update each cluster in a package */ if (cluster_info->root_domain) { struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; @@ -166,10 +183,10 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) write_control_freq(&uncore_root->pd_info[i].cluster_infos[j], - input, min_max); + input, index); } - if (min_max) + if (index == UNCORE_INDEX_MAX_FREQ) uncore_root->max_ratio = input; else uncore_root->min_ratio = input; @@ -177,18 +194,20 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu return 0; } - if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input) + if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio && + uncore_root->max_ratio < input) return -EINVAL; - if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input) + if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio && + uncore_root->min_ratio > input) return -EINVAL; - write_control_freq(cluster_info, input, min_max); + write_control_freq(cluster_info, input, index); return 0; } -/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */ +/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) { struct tpmi_uncore_cluster_info *cluster_info; @@ -199,11 +218,29 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) return -ENODATA; status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX); - *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER; + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } +/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */ +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + default: + break; + } + + return -EOPNOTSUPP; +} + static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) { int i; @@ -232,20 +269,29 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_uncore_struct *tpmi_uncore; bool uncore_sysfs_added = false; int ret, i, pkg = 0; int num_resources; + ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + /* Get number of power domains, which is equal to number of resources */ num_resources = tpmi_get_resource_count(auxdev); if (!num_resources) return -EINVAL; /* Register callbacks to uncore core */ - ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq, - uncore_read_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); if (ret) return ret; @@ -266,6 +312,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ } tpmi_uncore->power_domain_count = num_resources; + tpmi_uncore->write_blocked = write_blocked; /* Get the package ID from the TPMI core */ plat_info = tpmi_get_platform_data(auxdev); @@ -303,12 +350,21 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ /* Check for version and skip this resource if there is mismatch */ header = readq(pd_info->uncore_base); pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK; - if (pd_info->ufs_header_ver != UNCORE_HEADER_VERSION) { - dev_info(&auxdev->dev, "Uncore: Unsupported version:%d\n", - pd_info->ufs_header_ver); + + if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID) continue; + + if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) { + dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->ufs_header_ver)); + ret = -ENODEV; + goto remove_clusters; } + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION) + dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->ufs_header_ver)); + /* Get Cluster ID Mask */ cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header); if (!cluster_mask) { diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index a3b25253b6fdebe0e3d29f9af3ac95bab7cf9915..c68e69d7b242cc0ef118b8b253a3d63964734d01 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -14,6 +14,7 @@ * Author: Srinivas Pandruvada */ +#include #include #include #include @@ -36,8 +37,13 @@ static enum cpuhp_state uncore_hp_state __read_mostly; #define MSR_UNCORE_PERF_STATUS 0x621 #define UNCORE_FREQ_KHZ_MULTIPLIER 100000 -static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, - unsigned int *max) +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8) + +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) { u64 cap; int ret; @@ -49,20 +55,22 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (ret) return ret; - *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, - unsigned int min_max) + enum uncore_index index) { int ret; u64 cap; input /= UNCORE_FREQ_KHZ_MULTIPLIER; - if (!input || input > 0x7F) + if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK)) return -EINVAL; if (data->control_cpu < 0) @@ -72,12 +80,12 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu if (ret) return ret; - if (min_max) { - cap &= ~0x7F; - cap |= input; + if (index == UNCORE_INDEX_MAX_FREQ) { + cap &= ~UNCORE_MAX_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { - cap &= ~GENMASK(14, 8); - cap |= (input << 8); + cap &= ~UNCORE_MIN_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); @@ -101,11 +109,28 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) if (ret) return ret; - *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + default: + break; + } + + return -EOPNOTSUPP; +} + /* Caller provides protection */ static struct uncore_data *uncore_get_instance(unsigned int cpu) { @@ -235,8 +260,7 @@ static int __init intel_uncore_init(void) if (!uncore_instances) return -ENOMEM; - ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq, - uncore_read_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); if (ret) goto err_free; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 343ab6a82c01773b33c1bae3df5e2f1e7503e269..47aea5f14ce1cda80744fad218521921b8165310 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -24,13 +25,6 @@ #include "vsec.h" -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -39,34 +33,6 @@ static DEFINE_IDA(intel_vsec_ida); static DEFINE_IDA(intel_vsec_sdsi_ida); static DEFINE_XARRAY_ALLOC(auxdev_array); -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; -}; - -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, - VSEC_ID_TPMI = 66, -}; - static const char *intel_vsec_name(enum intel_vsec_id id) { switch (id) { @@ -137,6 +103,9 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; int ret, id; + if (!parent) + return -EINVAL; + ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, PMT_XA_LIMIT, GFP_KERNEL); if (ret < 0) { @@ -155,9 +124,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return id; } - if (!parent) - parent = &pdev->dev; - auxdev->id = id; auxdev->name = name; auxdev->dev.parent = parent; @@ -175,23 +141,27 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return ret; } - ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux, + return devm_add_action_or_reset(parent, intel_vsec_remove_aux, auxdev); - if (ret < 0) - return ret; - - return 0; } EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, struct intel_vsec_platform_info *info) { - struct intel_vsec_device *intel_vsec_dev; - struct resource *res, *tmp; + struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; + struct resource __free(kfree) *res = NULL; + struct resource *tmp; + struct device *parent; unsigned long quirks = info->quirks; + u64 base_addr; int i; + if (info->parent) + parent = info->parent; + else + parent = &pdev->dev; + if (!intel_vsec_supported(header->id, info->caps)) return -EINVAL; @@ -210,37 +180,50 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he return -ENOMEM; res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL); - if (!res) { - kfree(intel_vsec_dev); + if (!res) return -ENOMEM; - } if (quirks & VSEC_QUIRK_TABLE_SHIFT) header->offset >>= TABLE_OFFSET_SHIFT; + if (info->base_addr) + base_addr = info->base_addr; + else + base_addr = pdev->resource[header->tbir].start; + /* * The DVSEC/VSEC contains the starting offset and count for a block of * discovery tables. Create a resource array of these tables to the * auxiliary device driver. */ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { - tmp->start = pdev->resource[header->tbir].start + - header->offset + i * (header->entry_size * sizeof(u32)); + tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32)); tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; tmp->flags = IORESOURCE_MEM; + + /* Check resource is not in use */ + if (!request_mem_region(tmp->start, resource_size(tmp), "")) + return -EBUSY; + + release_mem_region(tmp->start, resource_size(tmp)); } intel_vsec_dev->pcidev = pdev; - intel_vsec_dev->resource = res; + intel_vsec_dev->resource = no_free_ptr(res); intel_vsec_dev->num_resources = header->num_entries; - intel_vsec_dev->info = info; + intel_vsec_dev->quirks = info->quirks; + intel_vsec_dev->base_addr = info->base_addr; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; else intel_vsec_dev->ida = &intel_vsec_ida; - return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev, + /* + * Pass the ownership of intel_vsec_dev and resource within it to + * intel_vsec_add_aux() + */ + return intel_vsec_add_aux(pdev, parent, no_free_ptr(intel_vsec_dev), intel_vsec_name(header->id)); } @@ -253,10 +236,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, for ( ; *header; header++) { ret = intel_vsec_add_dev(pdev, *header, info); - if (ret) - dev_info(&pdev->dev, "Could not add device for VSEC id %d\n", - (*header)->id); - else + if (!ret) have_devices = true; } @@ -358,6 +338,16 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, return have_devices; } +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + if (!pdev || !info) + return; + + intel_vsec_walk_header(pdev, info); +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_register, INTEL_VSEC); + static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_vsec_platform_info *info; diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index 0a6201b4a0e9064523ba9873cb04096da52223a8..e23e76129691a58bd81de468983f2e9e9d4da025 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -11,9 +11,45 @@ #define VSEC_CAP_SDSI BIT(3) #define VSEC_CAP_TPMI BIT(4) +/* Intel DVSEC offsets */ +#define INTEL_DVSEC_ENTRIES 0xA +#define INTEL_DVSEC_SIZE 0xB +#define INTEL_DVSEC_TABLE 0xC +#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) +#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) +#define TABLE_OFFSET_SHIFT 3 + struct pci_dev; struct resource; +enum intel_vsec_id { + VSEC_ID_TELEMETRY = 2, + VSEC_ID_WATCHER = 3, + VSEC_ID_CRASHLOG = 4, + VSEC_ID_SDSI = 65, + VSEC_ID_TPMI = 66, +}; + +/** + * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. + * @rev: Revision ID of the VSEC/DVSEC register space + * @length: Length of the VSEC/DVSEC register space + * @id: ID of the feature + * @num_entries: Number of instances of the feature + * @entry_size: Size of the discovery table for each feature + * @tbir: BAR containing the discovery tables + * @offset: BAR offset of start of the first discovery table + */ +struct intel_vsec_header { + u8 rev; + u16 length; + u16 id; + u8 num_entries; + u8 entry_size; + u8 tbir; + u32 offset; +}; + enum intel_vsec_quirks { /* Watcher feature not supported */ VSEC_QUIRK_NO_WATCHER = BIT(0), @@ -33,9 +69,11 @@ enum intel_vsec_quirks { /* Platform specific data */ struct intel_vsec_platform_info { + struct device *parent; struct intel_vsec_header **headers; unsigned long caps; unsigned long quirks; + u64 base_addr; }; struct intel_vsec_device { @@ -43,11 +81,12 @@ struct intel_vsec_device { struct pci_dev *pcidev; struct resource *resource; struct ida *ida; - struct intel_vsec_platform_info *info; int num_resources; int id; /* xa */ void *priv_data; size_t priv_data_size; + unsigned long quirks; + u64 base_addr; }; int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, @@ -63,4 +102,7 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device { return container_of(auxdev, struct intel_vsec_device, auxdev); } + +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info); #endif diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index f1de4111e98d9d2224147625e9d2742c7fe818ce..3350054632f1e87e522268d7a92d83a1245376d0 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -5,27 +5,29 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include +#include +#include +#include +#include #include -#include #include -#include -#include -#include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include -#include #include -#include +#include +#include +#include +#include -#include #include #include +#include /* bitmasks for RAPL MSRs, used by primitive access functions */ #define ENERGY_STATUS_MASK 0xffffffff @@ -1505,6 +1507,586 @@ static int rapl_detect_domains(struct rapl_package *rp) return 0; } +#ifdef CONFIG_PERF_EVENTS + +/* + * Support for RAPL PMU + * + * Register a PMU if any of the registered RAPL Packages have the requirement + * of exposing its energy counters via Perf PMU. + * + * PMU Name: + * power + * + * Events: + * Name Event id RAPL Domain + * energy_cores 0x01 RAPL_DOMAIN_PP0 + * energy_pkg 0x02 RAPL_DOMAIN_PACKAGE + * energy_ram 0x03 RAPL_DOMAIN_DRAM + * energy_gpu 0x04 RAPL_DOMAIN_PP1 + * energy_psys 0x05 RAPL_DOMAIN_PLATFORM + * + * Unit: + * Joules + * + * Scale: + * 2.3283064365386962890625e-10 + * The same RAPL domain in different RAPL Packages may have different + * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as + * the fixed unit for all energy counters, and covert each hardware + * counter increase to N times of PMU event counter increases. + * + * This is fully compatible with the current MSR RAPL PMU. This means that + * userspace programs like turbostat can use the same code to handle RAPL Perf + * PMU, no matter what RAPL Interface driver (MSR/TPMI, etc) is running + * underlying on the platform. + * + * Note that RAPL Packages can be probed/removed dynamically, and the events + * supported by each TPMI RAPL device can be different. Thus the RAPL PMU + * support is done on demand, which means + * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for + * unsupported counters are not exposed. + * 2. PMU is unregistered and registered when a new RAPL Package is probed and + * supports new counters that are not supported by current PMU. + * 3. PMU is unregistered when all registered RAPL Packages don't need PMU. + */ + +struct rapl_pmu { + struct pmu pmu; /* Perf PMU structure */ + u64 timer_ms; /* Maximum expiration time to avoid counter overflow */ + unsigned long domain_map; /* Events supported by current registered PMU */ + bool registered; /* Whether the PMU has been registered or not */ +}; + +static struct rapl_pmu rapl_pmu; + +/* PMU helpers */ + +static int get_pmu_cpu(struct rapl_package *rp) +{ + int cpu; + + if (!rp->has_pmu) + return nr_cpu_ids; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return nr_cpu_ids; + + /* TPMI RAPL uses any CPU in the package for PMU */ + for_each_online_cpu(cpu) + if (topology_physical_package_id(cpu) == rp->id) + return cpu; + + return nr_cpu_ids; +} + +static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu) +{ + if (!rp->has_pmu) + return false; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return false; + + /* TPMI RAPL uses any CPU in the package for PMU */ + return topology_physical_package_id(cpu) == rp->id; +} + +static struct rapl_package_pmu_data *event_to_pmu_data(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + + return &rp->pmu_data; +} + +/* PMU event callbacks */ + +static u64 event_read_counter(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + u64 val; + int ret; + + /* Return 0 for unsupported events */ + if (event->hw.idx < 0) + return 0; + + ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val); + + /* Return 0 for failed read */ + if (ret) + return 0; + + return val; +} + +static void __rapl_pmu_event_start(struct perf_event *event) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &data->active_list); + + local64_set(&event->hw.prev_count, event_read_counter(event)); + if (++data->n_active == 1) + hrtimer_start(&data->hrtimer, data->timer_interval, + HRTIMER_MODE_REL_PINNED); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + __rapl_pmu_event_start(event); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + + /* + * Follow the generic code to drain hwc->prev_count. + * The loop is not expected to run for multiple times. + */ + prev_raw_count = local64_read(&hwc->prev_count); + do { + new_raw_count = event_read_counter(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, + &prev_raw_count, new_raw_count)); + + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + */ + delta = new_raw_count - prev_raw_count; + + /* + * Scale delta to smallest unit (2^-32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + sdelta = delta * data->scale[event->hw.flags]; + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + /* Mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(data->n_active <= 0); + if (--data->n_active == 0) + hrtimer_cancel(&data->hrtimer); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* Check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +/* RAPL PMU event ids, same as shown in sysfs */ +enum perf_rapl_events { + PERF_RAPL_PP0 = 1, /* all cores */ + PERF_RAPL_PKG, /* entire package */ + PERF_RAPL_RAM, /* DRAM */ + PERF_RAPL_PP1, /* gpu */ + PERF_RAPL_PSYS, /* psys */ + PERF_RAPL_MAX +}; +#define RAPL_EVENT_MASK GENMASK(7, 0) + +static const int event_to_domain[PERF_RAPL_MAX] = { + [PERF_RAPL_PP0] = RAPL_DOMAIN_PP0, + [PERF_RAPL_PKG] = RAPL_DOMAIN_PACKAGE, + [PERF_RAPL_RAM] = RAPL_DOMAIN_DRAM, + [PERF_RAPL_PP1] = RAPL_DOMAIN_PP1, + [PERF_RAPL_PSYS] = RAPL_DOMAIN_PLATFORM, +}; + +static int rapl_pmu_event_init(struct perf_event *event) +{ + struct rapl_package *pos, *rp = NULL; + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int domain, idx; + + /* Only look at RAPL events */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Check for supported events only */ + if (!cfg || cfg >= PERF_RAPL_MAX) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + /* Find out which Package the event belongs to */ + list_for_each_entry(pos, &rapl_packages, plist) { + if (is_rp_pmu_cpu(pos, event->cpu)) { + rp = pos; + break; + } + } + if (!rp) + return -ENODEV; + + /* Find out which RAPL Domain the event belongs to */ + domain = event_to_domain[cfg]; + + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->pmu_private = rp; /* Which package */ + event->hw.flags = domain; /* Which domain */ + + event->hw.idx = -1; + /* Find out the index in rp->domains[] to get domain pointer */ + for (idx = 0; idx < rp->nr_domains; idx++) { + if (rp->domains[idx].id == domain) { + event->hw.idx = idx; + break; + } + } + + return 0; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_package_pmu_data *data = + container_of(hrtimer, struct rapl_package_pmu_data, hrtimer); + struct perf_event *event; + unsigned long flags; + + if (!data->n_active) + return HRTIMER_NORESTART; + + raw_spin_lock_irqsave(&data->lock, flags); + + list_for_each_entry(event, &data->active_list, active_entry) + rapl_event_update(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + hrtimer_forward_now(hrtimer, data->timer_interval); + + return HRTIMER_RESTART; +} + +/* PMU sysfs attributes */ + +/* + * There are no default events, but we need to create "events" group (with + * empty attrs) before updating it with detected events. + */ +static struct attribute *attrs_empty[] = { + NULL, +}; + +static struct attribute_group pmu_events_group = { + .name = "events", + .attrs = attrs_empty, +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rapl_package *rp; + cpumask_var_t cpu_mask; + int cpu; + int ret; + + if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + + cpus_read_lock(); + + cpumask_clear(cpu_mask); + + /* Choose a cpu for each RAPL Package */ + list_for_each_entry(rp, &rapl_packages, plist) { + cpu = get_pmu_cpu(rp); + if (cpu < nr_cpu_ids) + cpumask_set_cpu(cpu, cpu_mask); + } + cpus_read_unlock(); + + ret = cpumap_print_to_pagebuf(true, buf, cpu_mask); + + free_cpumask_var(cpu_mask); + + return ret; +} + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group pmu_cpumask_group = { + .attrs = pmu_cpumask_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); +static struct attribute *pmu_format_attr[] = { + &format_attr_event.attr, + NULL +}; + +static struct attribute_group pmu_format_group = { + .name = "format", + .attrs = pmu_format_attr, +}; + +static const struct attribute_group *pmu_attr_groups[] = { + &pmu_events_group, + &pmu_cpumask_group, + &pmu_format_group, + NULL +}; + +#define RAPL_EVENT_ATTR_STR(_name, v, str) \ +static struct perf_pmu_events_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ + .event_str = str, \ +} + +RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02"); +RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03"); +RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04"); +RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); + +RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules"); +RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules"); +RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules"); +RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules"); +RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules"); + +RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10"); + +#define RAPL_EVENT_GROUP(_name, domain) \ +static struct attribute *pmu_attr_##_name[] = { \ + &event_attr_rapl_##_name.attr.attr, \ + &event_attr_rapl_unit_##_name.attr.attr, \ + &event_attr_rapl_scale_##_name.attr.attr, \ + NULL \ +}; \ +static umode_t is_visible_##_name(struct kobject *kobj, struct attribute *attr, int event) \ +{ \ + return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \ +} \ +static struct attribute_group pmu_group_##_name = { \ + .name = "events", \ + .attrs = pmu_attr_##_name, \ + .is_visible = is_visible_##_name, \ +} + +RAPL_EVENT_GROUP(cores, RAPL_DOMAIN_PP0); +RAPL_EVENT_GROUP(pkg, RAPL_DOMAIN_PACKAGE); +RAPL_EVENT_GROUP(ram, RAPL_DOMAIN_DRAM); +RAPL_EVENT_GROUP(gpu, RAPL_DOMAIN_PP1); +RAPL_EVENT_GROUP(psys, RAPL_DOMAIN_PLATFORM); + +static const struct attribute_group *pmu_attr_update[] = { + &pmu_group_cores, + &pmu_group_pkg, + &pmu_group_ram, + &pmu_group_gpu, + &pmu_group_psys, + NULL +}; + +static int rapl_pmu_update(struct rapl_package *rp) +{ + int ret = 0; + + /* Return if PMU already covers all events supported by current RAPL Package */ + if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map))) + goto end; + + /* Unregister previous registered PMU */ + if (rapl_pmu.registered) + perf_pmu_unregister(&rapl_pmu.pmu); + + rapl_pmu.registered = false; + rapl_pmu.domain_map |= rp->domain_map; + + memset(&rapl_pmu.pmu, 0, sizeof(struct pmu)); + rapl_pmu.pmu.attr_groups = pmu_attr_groups; + rapl_pmu.pmu.attr_update = pmu_attr_update; + rapl_pmu.pmu.task_ctx_nr = perf_invalid_context; + rapl_pmu.pmu.event_init = rapl_pmu_event_init; + rapl_pmu.pmu.add = rapl_pmu_event_add; + rapl_pmu.pmu.del = rapl_pmu_event_del; + rapl_pmu.pmu.start = rapl_pmu_event_start; + rapl_pmu.pmu.stop = rapl_pmu_event_stop; + rapl_pmu.pmu.read = rapl_pmu_event_read; + rapl_pmu.pmu.module = THIS_MODULE; + rapl_pmu.pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT; + ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1); + if (ret) { + pr_info("Failed to register PMU\n"); + return ret; + } + + rapl_pmu.registered = true; +end: + rp->has_pmu = true; + return ret; +} + +int rapl_package_add_pmu(struct rapl_package *rp) +{ + struct rapl_package_pmu_data *data = &rp->pmu_data; + int idx; + + if (rp->has_pmu) + return -EEXIST; + + guard(cpus_read_lock)(); + + for (idx = 0; idx < rp->nr_domains; idx++) { + struct rapl_domain *rd = &rp->domains[idx]; + int domain = rd->id; + u64 val; + + if (!test_bit(domain, &rp->domain_map)) + continue; + + /* + * The RAPL PMU granularity is 2^-32 Joules + * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase + */ + val = rd->energy_unit * (1ULL << 32); + do_div(val, ENERGY_UNIT_SCALE * 1000000); + data->scale[domain] = val; + + if (!rapl_pmu.timer_ms) { + struct rapl_primitive_info *rpi = get_rpi(rp, ENERGY_COUNTER); + + /* + * Calculate the timer rate: + * Use reference of 200W for scaling the timeout to avoid counter + * overflows. + * + * max_count = rpi->mask >> rpi->shift + 1 + * max_energy_pj = max_count * rd->energy_unit + * max_time_sec = (max_energy_pj / 1000000000) / 200w + * + * rapl_pmu.timer_ms = max_time_sec * 1000 / 2 + */ + val = (rpi->mask >> rpi->shift) + 1; + val *= rd->energy_unit; + do_div(val, 1000000 * 200 * 2); + rapl_pmu.timer_ms = val; + + pr_debug("%llu ms overflow timer\n", rapl_pmu.timer_ms); + } + + pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]); + } + + /* Initialize per package PMU data */ + raw_spin_lock_init(&data->lock); + INIT_LIST_HEAD(&data->active_list); + data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms); + hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + data->hrtimer.function = rapl_hrtimer_handle; + + return rapl_pmu_update(rp); +} +EXPORT_SYMBOL_GPL(rapl_package_add_pmu); + +void rapl_package_remove_pmu(struct rapl_package *rp) +{ + struct rapl_package *pos; + + if (!rp->has_pmu) + return; + + guard(cpus_read_lock)(); + + list_for_each_entry(pos, &rapl_packages, plist) { + /* PMU is still needed */ + if (pos->has_pmu && pos != rp) + return; + } + + perf_pmu_unregister(&rapl_pmu.pmu); + memset(&rapl_pmu, 0, sizeof(struct rapl_pmu)); +} +EXPORT_SYMBOL_GPL(rapl_package_remove_pmu); +#endif + /* called from CPU hotplug notifier, hotplug lock held */ void rapl_remove_package_cpuslocked(struct rapl_package *rp) { diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 1c48dba0ba96af4236773e380d8befe0ef88f44d..645fd1dc51a982edd6ac1edebfb598eed7f79384 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -313,6 +313,8 @@ static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev, goto err; } + rapl_package_add_pmu(trp->rp); + auxiliary_set_drvdata(auxdev, trp); return 0; @@ -325,6 +327,7 @@ static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev) { struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev); + rapl_package_remove_pmu(trp->rp); rapl_remove_package(trp->rp); trp_release(trp); } diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index b8d4f61f14be4fd65c55b92f0b8dc2882c865f59..63a846e63b9b1b9574e0821cc7b5e6cb1d84e7c7 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -53,6 +53,11 @@ struct ptp_clock { struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ bool is_virtual_clock; bool has_cycles; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define info_to_vclock(d) container_of((d), struct ptp_vclock, info) diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 92f46a6312c24aedb2d450edbf0938fe61d3850c..6f270577df86d9bbdffd4725f1f9575e85994875 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -984,6 +984,13 @@ config RTC_DRV_ALPHA Direct support for the real-time clock found on every Alpha system, specifically MC146818 compatibles. If in doubt, say Y. +config RTC_DRV_SW64_VIRT + bool "SW64 Hypervisor based RTC" + depends on SW64 + default y + help + Get support for the Hypervisor based RTC on SW64 systems. + config RTC_DRV_DS1216 tristate "Dallas DS1216" depends on SNI_RM diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index fd209883ee2efd118c992f010fde9935c186c816..7711f79787acce325da642b9b9d3c03ea81c4c66 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_SW64_VIRT +rtc-core-y += rtc-sw64-virt-platform.o +endif + rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o @@ -168,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_ST_LPC) += rtc-st-lpc.o obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o +obj-$(CONFIG_RTC_DRV_SW64_VIRT) += rtc-sw64-virt.o obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 651bf3c279c7462151096cf8565a56fffab9052d..6b5947ec6e55bfbd6482296f745b160e6b706430 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -11,6 +11,21 @@ #define UIP_RECHECK_DELAY 100 /* usec */ #define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY) #define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS) +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) + return false; + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif /* * Execute a function while the UIP (Update-in-progress) bit of the RTC is @@ -280,12 +295,13 @@ int mc146818_set_time(struct rtc_time *time) spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - if (apply_amd_register_a_behavior()) - CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); - else - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -302,7 +318,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/drivers/rtc/rtc-sw64-guest.c b/drivers/rtc/rtc-sw64-guest.c new file mode 100644 index 0000000000000000000000000000000000000000..5d86ce20a1fbb8f493cb8dfeda0b917e3e8d1f2a --- /dev/null +++ b/drivers/rtc/rtc-sw64-guest.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) + +static int sw_guest_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + rtc_time64_to_tm(*ioaddr, tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw_guest_ops = { + .read_time = sw_guest_read_time, +}; + +static int __init rtc_sw_guest_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw_guest", + &rtc_sw_guest_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw_guest_driver = { + .driver = { + .name = "rtc_sw_guest", + }, +}; + +module_platform_driver_probe(rtc_sw_guest_driver, rtc_sw_guest_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("SW GUEST RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw_guest"); diff --git a/drivers/rtc/rtc-sw64-virt-platform.c b/drivers/rtc/rtc-sw64-virt-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..3db9ff2f0e646b17463d5550f6f431cfbbbf9e88 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt-platform.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static struct platform_device rtc_sw64_virt_device = { + .name = "rtc_sw64_virt", + .id = -1, +}; + +static int __init rtc_sw64_virt_init(void) +{ + if (is_in_host()) + return 0; + + if (platform_device_register(&rtc_sw64_virt_device) < 0) + pr_err("unable to register rtc device...\n"); + /* not necessarily an error */ + return 0; +} +module_init(rtc_sw64_virt_init); diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c new file mode 100644 index 0000000000000000000000000000000000000000..23c93d7ddbae7281d04f4f86137018cc297b77f4 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* rtc-sw64-virt.c: Hypervisor based RTC for SW64 systems. + * + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; + +static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw64_virt_ops = { + .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, +}; + +static int __init rtc_sw64_virt_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw64_virt", + &rtc_sw64_virt_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw64_virt_driver = { + .driver = { + .name = "rtc_sw64_virt", + }, +}; + +module_platform_driver_probe(rtc_sw64_virt_driver, rtc_sw64_virt_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("Sunway virtual RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw64_virt"); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 23bce8995a55756305179148153610c9955ce960..cd658370af46f8f1d80e3acaeadc08e5f8f38cf4 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -489,6 +489,7 @@ source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt3sas/Kconfig" source "drivers/scsi/mpi3mr/Kconfig" +source "drivers/scsi/leapioraid/Kconfig" source "drivers/scsi/smartpqi/Kconfig" config SCSI_HPTIOP diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index f055bfd54a6832b3da7b08e6c56a129ff320041f..ab9ce4a6bae528375d6a6d697a1cf52a38f53fa6 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/ obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/ +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o obj-$(CONFIG_SCSI_INITIO) += initio.o diff --git a/drivers/scsi/leapioraid/Kconfig b/drivers/scsi/leapioraid/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..744f3b633c03ac74414adf535403bd3b5bddd36c --- /dev/null +++ b/drivers/scsi/leapioraid/Kconfig @@ -0,0 +1,13 @@ +# +# Kernel configuration file for the LEAPIORAID +# + +config SCSI_LEAPIORAID + tristate "LeapIO RAID Adapter" + depends on PCI && SCSI + select SCSI_SAS_ATTRS + select RAID_ATTRS + select IRQ_POLL + help + This driver supports LEAPIO RAID controller, which supports PCI Express Gen4 interface + and supports SAS/SATA HDD/SSD. diff --git a/drivers/scsi/leapioraid/Makefile b/drivers/scsi/leapioraid/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1a3786a56cb7a3f3d40dea076d9a45eeae3decbd --- /dev/null +++ b/drivers/scsi/leapioraid/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the LEAPIORAID drivers. +# + +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid.o +leapioraid-objs += leapioraid_func.o \ + leapioraid_os.o \ + leapioraid_transport.o \ + leapioraid_app.o diff --git a/drivers/scsi/leapioraid/leapioraid.h b/drivers/scsi/leapioraid/leapioraid.h new file mode 100644 index 0000000000000000000000000000000000000000..30908fffe43befddf9d1484541c2536db5a0e6ab --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid.h @@ -0,0 +1,2026 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + */ + +#ifndef LEAPIORAID_H +#define LEAPIORAID_H + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __aligned(4); + +#define LEAPIORAID_IOC_STATE_RESET (0x00000000) +#define LEAPIORAID_IOC_STATE_READY (0x10000000) +#define LEAPIORAID_IOC_STATE_OPERATIONAL (0x20000000) +#define LEAPIORAID_IOC_STATE_FAULT (0x40000000) +#define LEAPIORAID_IOC_STATE_COREDUMP (0x50000000) +#define LEAPIORAID_IOC_STATE_MASK (0xF0000000) + +struct LeapioraidSysInterfaceRegs_t { + U32 Doorbell; + U32 WriteSequence; + U32 HostDiagnostic; + U32 Reserved1; + U32 DiagRWData; + U32 DiagRWAddressLow; + U32 DiagRWAddressHigh; + U32 Reserved2[5]; + U32 HostInterruptStatus; + U32 HostInterruptMask; + U32 DCRData; + U32 DCRAddress; + U32 Reserved3[2]; + U32 ReplyFreeHostIndex; + U32 Reserved4[8]; + U32 ReplyPostHostIndex; + U32 Reserved5; + U32 HCBSize; + U32 HCBAddressLow; + U32 HCBAddressHigh; + U32 Reserved6[12]; + U32 Scratchpad[4]; + U32 RequestDescriptorPostLow; + U32 RequestDescriptorPostHigh; + U32 AtomicRequestDescriptorPost; + U32 IocLogBufPosition; + U32 HostLogBufPosition; + U32 Reserved7[11]; +}; + +#define LEAPIORAID_DOORBELL_USED (0x08000000) +#define LEAPIORAID_DOORBELL_DATA_MASK (0x0000FFFF) +#define LEAPIORAID_DOORBELL_FUNCTION_SHIFT (24) +#define LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT (16) + +#define LEAPIORAID_DIAG_RESET_ADAPTER (0x00000004) + +#define LEAPIORAID_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define LEAPIORAID_HIS_IOC2SYS_DB_STATUS (0x00000001) + +#define LEAPIORAID_RPHI_MSIX_INDEX_SHIFT (24) + +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) + +struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DescriptorTypeDependent; +}; + +struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 Reserved1; +}; + +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DevHandle; +}; + +typedef +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR; + +union LeapioraidReqDescUnion_t { + struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR Default; + struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + U64 Words; +}; + +struct LeapioraidAtomicReqDesc_t { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; +}; + +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +struct LeapioraidDefaultRepDesc_t { + U8 ReplyFlags; + U8 MSIxIndex; + U16 DescriptorTypeDependent1; + U32 DescriptorTypeDependent2; +}; + +struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U32 ReplyFrameAddress; +}; + +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U16 TaskTag; + U16 Reserved1; +}; + +typedef +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR; + +union LeapioraidRepDescUnion_t { + struct LeapioraidDefaultRepDesc_t Default; + struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR AddressReply; + struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + U64 Words; +}; + +#define LEAPIORAID_FUNC_SCSI_IO_REQUEST (0x00) +#define LEAPIORAID_FUNC_SCSI_TASK_MGMT (0x01) +#define LEAPIORAID_FUNC_IOC_INIT (0x02) +#define LEAPIORAID_FUNC_IOC_FACTS (0x03) +#define LEAPIORAID_FUNC_CONFIG (0x04) +#define LEAPIORAID_FUNC_PORT_FACTS (0x05) +#define LEAPIORAID_FUNC_PORT_ENABLE (0x06) +#define LEAPIORAID_FUNC_EVENT_NOTIFICATION (0x07) +#define LEAPIORAID_FUNC_EVENT_ACK (0x08) +#define LEAPIORAID_FUNC_FW_DOWNLOAD (0x09) +#define LEAPIORAID_FUNC_FW_UPLOAD (0x12) +#define LEAPIORAID_FUNC_RAID_ACTION (0x15) +#define LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define LEAPIORAID_FUNC_SMP_PASSTHROUGH (0x1A) +#define LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_SATA_PASSTHROUGH (0x1C) +#define LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET (0x40) +#define LEAPIORAID_FUNC_HANDSHAKE (0x42) +#define LEAPIORAID_FUNC_LOG_INIT (0x57) + +#define LEAPIORAID_IOCSTATUS_MASK (0x7FFF) +#define LEAPIORAID_IOCSTATUS_SUCCESS (0x0000) +#define LEAPIORAID_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define LEAPIORAID_IOCSTATUS_BUSY (0x0002) +#define LEAPIORAID_IOCSTATUS_INVALID_SGL (0x0003) +#define LEAPIORAID_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define LEAPIORAID_IOCSTATUS_INVALID_VPID (0x0005) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define LEAPIORAID_IOCSTATUS_INVALID_FIELD (0x0007) +#define LEAPIORAID_IOCSTATUS_INVALID_STATE (0x0008) +#define LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER (0x000A) + +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +#define LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +#define LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +#define LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define LEAPIORAID_IOCSTATUS_TARGET_ABORTED (0x0063) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +#define LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) +#define LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +struct LeapioraidReqHeader_t { + U16 FunctionDependent1; + U8 ChainOffset; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; +}; + +struct LeapioraidDefaultRep_t { + U16 FunctionDependent1; + U8 MsgLength; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 FunctionDependent5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_VERSION_STRUCT { + U8 Dev; + U8 Unit; + U8 Minor; + U8 Major; +}; + +union LEAPIORAID_VERSION_UNION { + struct LEAPIORAID_VERSION_STRUCT Struct; + U32 Word; +}; + +struct LeapioSGESimple32_t { + U32 FlagsLength; + U32 Address; +}; + +struct LeapioSGESimple64_t { + U32 FlagsLength; + U64 Address; +}; + +struct LEAPIORAID_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +}; + +struct LEAPIORAID_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +}; + +#define LEAPIORAID_SGE_FLAGS_LAST_ELEMENT (0x80) +#define LEAPIORAID_SGE_FLAGS_END_OF_BUFFER (0x40) +#define LEAPIORAID_SGE_FLAGS_END_OF_LIST (0x01) +#define LEAPIORAID_SGE_FLAGS_SHIFT (24) +#define LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS (0x00) +#define LEAPIORAID_SGE_FLAGS_HOST_TO_IOC (0x04) +#define LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +struct LEAPIORAID_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +}; + +struct LEAPIORAID_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_SIMPLE_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Simple32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Simple64; +}; + +union LEAPIORAID_IEEE_SGE_CHAIN_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Chain32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Chain64; +}; + +struct LEAPIORAID_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_IO_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE64 IeeeSimple; + struct LEAPIORAID_IEEE_SGE_CHAIN64 IeeeChain; +}; + +#define LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST (0x40) +#define LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +#define LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) + +union LEAPIORAID_SIMPLE_SGE_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; +}; + +union LEAPIORAID_SGE_IO_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + struct LEAPIORAID_SGE_CHAIN_UNION LeapioChain; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; + union LEAPIORAID_IEEE_SGE_CHAIN_UNION IeeeChain; +}; + +struct LEAPIORAID_CONFIG_PAGE_HEADER { + U8 PageVersion; + U8 PageLength; + U8 PageNumber; + U8 PageType; +}; + +struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; + U8 Reserved1; + U8 PageNumber; + U8 PageType; + U16 ExtPageLength; + U8 ExtPageType; + U8 Reserved2; +}; + +#define LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define LEAPIORAID_CONFIG_PAGETYPE_IOC (0x01) +#define LEAPIORAID_CONFIG_PAGETYPE_BIOS (0x02) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define LEAPIORAID_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define LEAPIORAID_CONFIG_PAGETYPE_MASK (0x0F) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_LOG (0x14) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) + +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) + +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) +#define LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) +#define LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) + +struct LeapioraidCfgReq_t { + U8 Action; + U8 SGLFlags; + U8 ChainOffset; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 ProxyVF_ID; + U16 Reserved4; + U32 Reserved3; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 PageAddress; + union LEAPIORAID_SGE_IO_UNION PageBufferSGE; +}; + +#define LEAPIORAID_CONFIG_ACTION_PAGE_HEADER (0x00) +#define LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) + +struct LeapioraidCfgRep_t { + U8 Action; + U8 SGLFlags; + U8 MsgLength; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 Reserved2; + U16 IOCStatus; + U32 IOCLogInfo; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; +}; + +struct LeapioraidManP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 ChipName[16]; + U8 ChipRevision[8]; + U8 BoardName[16]; + U8 BoardAssembly[16]; + U8 BoardTracerNumber[16]; +}; + +struct LEAPIORAID_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; + U8 Connector[16]; + U8 Location; + U8 ReceptacleID; + U16 Slot; + U16 Slotx2; + U16 Slotx4; +}; + +struct LeapioraidIOUnitP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U64 UniqueValue; + union LEAPIORAID_VERSION_UNION NvdataVersionDefault; + union LEAPIORAID_VERSION_UNION NvdataVersionPersistent; +}; + +struct LeapioraidIOUnitP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; +}; + +#define LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) + +struct LEAPIORAID_IOUNIT8_SENSOR { + U16 Flags; + U16 Reserved1; + U16 Threshold[4]; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; +}; + +struct LeapioraidIOUnitP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U8 NumSensors; + U8 PollingInterval; + U16 Reserved3; + struct LEAPIORAID_IOUNIT8_SENSOR Sensor[]; +}; + +struct LeapioraidIOCP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; + U32 CoalescingTimeout; + U8 CoalescingDepth; + U8 PCISlotNum; + U8 PCIBusNum; + U8 PCIDomainSegment; + U32 Reserved1; + U32 ProductSpecific; +}; + +struct LeapioraidIOCP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 NumDevsPerEnclosure; + U8 Reserved1; + U16 Reserved2; + U16 MaxPersistentEntries; + U16 MaxNumPhysicalMappedIDs; + U16 Flags; + U16 Reserved3; + U16 IRVolumeMappingFlags; + U16 Reserved4; + U32 Reserved5; +}; + +#define LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) + +struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; +}; + +struct LEAPIORAID_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; + U32 Reserved1; + U32 Reserved2; + U16 SlotNumber; + U16 Reserved3; + U32 Reserved4; +}; + +struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE { + struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID SasWwid; + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME DeviceName; +}; + +struct LeapioraidBiosP2_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; + U8 ReqBootDeviceForm; + U8 Reserved7; + U16 Reserved8; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; + U8 ReqAltBootDeviceForm; + U8 Reserved9; + U16 Reserved10; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; + U8 CurrentBootDeviceForm; + U8 Reserved11; + U16 Reserved12; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; +}; + +#define LEAPIORAID_BIOSPAGE2_FORM_MASK (0x0F) +#define LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + +struct LEAPIORAID_ADAPTER_INFO { + U8 PciBusNumber; + U8 PciDeviceAndFunctionNumber; + U16 AdapterFlags; +}; + +struct LEAPIORAID_ADAPTER_ORDER_AUX { + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidBiosP3_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 GlobalFlags; + U32 BiosVersion; + struct LEAPIORAID_ADAPTER_INFO AdapterOrder[4]; + U32 Reserved1; + struct LEAPIORAID_ADAPTER_ORDER_AUX AdapterOrderAux[4]; +}; + +struct LEAPIORAID_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; + U8 PhysDiskMap; + U8 PhysDiskNum; + U8 Reserved; +}; + +struct LEAPIORAID_RAIDVOL0_SETTINGS { + U16 Settings; + U8 HotSparePool; + U8 Reserved; +}; + +struct LeapioraidRaidVolP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 VolumeState; + U8 VolumeType; + U32 VolumeStatusFlags; + struct LEAPIORAID_RAIDVOL0_SETTINGS VolumeSettings; + U64 MaxLBA; + U32 StripeSize; + U16 BlockSize; + U16 Reserved1; + U8 SupportedPhysDisks; + U8 ResyncRate; + U16 DataScrubDuration; + U8 NumPhysDisks; + U8 Reserved2; + U8 Reserved3; + U8 InactiveStatus; + struct LEAPIORAID_RAIDVOL0_PHYS_DISK PhysDisk[]; +}; + +#define LEAPIORAID_RAID_VOL_STATE_MISSING (0x00) +#define LEAPIORAID_RAID_VOL_STATE_FAILED (0x01) +#define LEAPIORAID_RAID_VOL_STATE_INITIALIZING (0x02) +#define LEAPIORAID_RAID_VOL_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_VOL_STATE_DEGRADED (0x04) +#define LEAPIORAID_RAID_VOL_STATE_OPTIMAL (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_RAID0 (0x00) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1E (0x01) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1 (0x02) +#define LEAPIORAID_RAID_VOL_TYPE_RAID10 (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_UNKNOWN (0xFF) + +#define LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) + +struct LeapioraidRaidVolP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U16 Reserved0; + U8 GUID[24]; + U8 Name[16]; + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; + U8 HotSparePool; + U8 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; + U8 ProductID[16]; + U8 ProductRevLevel[4]; + U8 SerialNum[32]; +}; + +struct LeapioraidRaidPDP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 Reserved1; + U8 PhysDiskNum; + struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; + U32 Reserved2; + struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; + U32 Reserved3; + U8 PhysDiskState; + U8 OfflineReason; + U8 IncompatibleReason; + U8 PhysDiskAttributes; + U32 PhysDiskStatusFlags; + U64 DeviceMaxLBA; + U64 HostMaxLBA; + U64 CoercedMaxLBA; + U16 BlockSize; + U16 Reserved5; + U32 Reserved6; +}; + +#define LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define LEAPIORAID_RAID_PD_STATE_OFFLINE (0x02) +#define LEAPIORAID_RAID_PD_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_PD_STATE_HOT_SPARE (0x04) +#define LEAPIORAID_RAID_PD_STATE_DEGRADED (0x05) +#define LEAPIORAID_RAID_PD_STATE_REBUILDING (0x06) +#define LEAPIORAID_RAID_PD_STATE_OPTIMAL (0x07) + +#define LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +#define LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define LEAPIORAID_SAS_NEG_LINK_RATE_1_5 (0x08) +#define LEAPIORAID_SAS_NEG_LINK_RATE_3_0 (0x09) +#define LEAPIORAID_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define LEAPIORAID_SAS_NEG_LINK_RATE_12_0 (0x0B) + +#define LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define LEAPIORAID_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK (0x0F) + +struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 NegotiatedLinkRate; + U32 ControllerPhyDeviceInfo; + U16 AttachedDevHandle; + U16 ControllerDevHandle; + U32 DiscoveryStatus; + U32 Reserved; +}; + +struct LeapioraidSasIOUnitP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U8 NumPhys; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 MaxMinLinkRate; + U32 ControllerPhyDeviceInfo; + U16 MaxTargetPortConnectTime; + U16 Reserved1; +}; + +struct LeapioraidSasIOUnitP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 ControlFlags; + U16 SASNarrowMaxQueueDepth; + U16 AdditionalControlFlags; + U16 SASWideMaxQueueDepth; + U8 NumPhys; + U8 SATAMaxQDepth; + U8 ReportDeviceMissingDelay; + U8 IODeviceMissingDelay; + struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +struct LeapioraidExpanderP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 ReportGenLength; + U16 EnclosureHandle; + U64 SASAddress; + U32 DiscoveryStatus; + U16 DevHandle; + U16 ParentDevHandle; + U16 ExpanderChangeCount; + U16 ExpanderRouteIndexes; + U8 NumPhys; + U8 SASLevel; + U16 Flags; + U16 STPBusInactivityTimeLimit; + U16 STPMaxConnectTimeLimit; + U16 STP_SMP_NexusLossTime; + U16 MaxNumRoutedSasAddresses; + U64 ActiveZoneManagerSASAddress; + U16 ZoneLockInactivityLimit; + U16 Reserved1; + U8 TimeToReducedFunc; + U8 InitialTimeToReducedFunc; + U8 MaxReducedFuncTime; + U8 Reserved2; +}; + +struct LeapioraidExpanderP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 Reserved1; + U16 Reserved2; + U8 NumPhys; + U8 Phy; + U16 NumTableEntriesProgrammed; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U16 AttachedDevHandle; + U32 PhyInfo; + U32 AttachedDeviceInfo; + U16 ExpanderDevHandle; + U8 ChangeCount; + U8 NegotiatedLinkRate; + U8 PhyIdentifier; + U8 AttachedPhyIdentifier; + U8 Reserved3; + U8 DiscoveryInfo; + U32 AttachedPhyInfo; + U8 ZoneGroup; + U8 SelfConfigStatus; + U16 Reserved4; +}; + +struct LeapioraidSasDevP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 Slot; + U16 EnclosureHandle; + U64 SASAddress; + U16 ParentDevHandle; + U8 PhyNum; + U8 AccessStatus; + U16 DevHandle; + U8 AttachedPhyIdentifier; + U8 ZoneGroup; + U32 DeviceInfo; + U16 Flags; + U8 PhysicalPort; + U8 MaxPortConnections; + U64 DeviceName; + U8 PortGroups; + U8 DmaGroup; + U8 ControlGroup; + U8 EnclosureLevel; + U8 ConnectorName[4]; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + +struct LeapioraidSasPhyP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 OwnerDevHandle; + U16 Reserved1; + U16 AttachedDevHandle; + U8 AttachedPhyIdentifier; + U8 Reserved2; + U32 AttachedPhyInfo; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U8 ChangeCount; + U8 Flags; + U32 PhyInfo; + U8 NegotiatedLinkRate; + U8 Reserved3; + U16 Reserved4; +}; + +struct LeapioraidSasPhyP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U32 InvalidDwordCount; + U32 RunningDisparityErrorCount; + U32 LossDwordSynchCount; + U32 PhyResetProblemCount; +}; + +struct LeapioraidSasEncP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U64 EnclosureLogicalID; + U16 Flags; + U16 EnclosureHandle; + U16 NumSlots; + U16 StartSlot; + U8 ChassisSlot; + U8 EnclosureLevel; + U16 SEPDevHandle; + U8 OEMRD; + U8 Reserved1a; + U16 Reserved2; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) + +struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; + U16 VolDevHandle; + U8 HotSparePool; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + +struct LeapioraidRaidCfgP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 NumHotSpares; + U8 NumPhysDisks; + U8 NumVolumes; + U8 ConfigNum; + U32 Flags; + U8 ConfigGUID[24]; + U32 Reserved1; + U8 NumElements; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[]; +}; + +struct LeapioraidFWImgHeader_t { + U32 Signature; + U32 Signature0; + U32 Signature1; + U32 Signature2; + union LEAPIORAID_VERSION_UNION LEAPIOVersion; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NVDATAVersion; + union LEAPIORAID_VERSION_UNION PackageVersion; + U16 VendorID; + U16 ProductID; + U16 ProtocolFlags; + U16 Reserved26; + U32 IOCCapabilities; + U32 ImageSize; + U32 NextImageHeaderOffset; + U32 Checksum; + U32 Reserved38; + U32 Reserved3C; + U32 Reserved40; + U32 Reserved44; + U32 Reserved48; + U32 Reserved4C; + U32 Reserved50; + U32 Reserved54; + U32 Reserved58; + U32 Reserved5C; + U32 BootFlags; + U32 FirmwareVersionNameWhat; + U8 FirmwareVersionName[32]; + U32 VendorNameWhat; + U8 VendorName[32]; + U32 PackageNameWhat; + U8 PackageName[32]; + U32 ReservedD0; + U32 ReservedD4; + U32 ReservedD8; + U32 ReservedDC; + U32 ReservedE0; + U32 ReservedE4; + U32 ReservedE8; + U32 ReservedEC; + U32 ReservedF0; + U32 ReservedF4; + U32 ReservedF8; + U32 ReservedFC; +}; + +struct LEAPIORAID_HASH_EXCLUSION_FORMAT { + U32 Offset; + U32 Size; +}; + +struct LeapioraidComptImgHeader_t { + U32 Signature0; + U32 LoadAddress; + U32 DataSize; + U32 StartAddress; + U32 Signature1; + U32 FlashOffset; + U32 FlashSize; + U32 VersionStringOffset; + U32 BuildDateStringOffset; + U32 BuildTimeStringOffset; + U32 EnvironmentVariableOffset; + U32 ApplicationSpecific; + U32 Signature2; + U32 HeaderSize; + U32 Crc; + U8 NotFlashImage; + U8 Compressed; + U16 Reserved3E; + U32 SecondaryFlashOffset; + U32 Reserved44; + U32 Reserved48; + union LEAPIORAID_VERSION_UNION RMCInterfaceVersion; + union LEAPIORAID_VERSION_UNION Reserved50; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NvdataVersion; + struct LEAPIORAID_HASH_EXCLUSION_FORMAT HashExclusion[4]; + U32 NextImageHeaderOffset; + U32 Reserved80[32]; +}; + +struct LEAPIORAID_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; + __be32 PrimaryReferenceTag; + U16 PrimaryApplicationTag; + U16 PrimaryApplicationTagMask; + U32 TransferLength; +}; + +union LEAPIO_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_SGE_SIMPLE_UNION SGE; +}; + +struct LeapioSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U16 SGLFlags; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U32 EEDPBlockSize; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIO_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_SGE_IO_UNION SGL; +}; + +#define LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) + +#define LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) +#define LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_WRITE (0x01000000) +#define LEAPIORAID_SCSIIO_CONTROL_READ (0x02000000) +#define LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) +#define LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT (11) +#define LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define LEAPIORAID_SCSIIO_CONTROL_TLR_ON (0x00000040) + +union LEAPIORAID_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 SGE; +}; + +struct LeapioraidSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U8 DMAFlags; + U8 Reserved5; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U16 EEDPBlockSize; + U16 Reserved6; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIORAID_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidSCSIIORep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 SCSIStatus; + U8 SCSIState; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TransferCount; + U32 SenseCount; + U32 ResponseInfo; + U16 TaskTag; + U16 SCSIStatusQualifier; + U32 BidirectionalTransferCount; + U32 EEDPErrorOffset; + U16 EEDPObservedAppTag; + U16 EEDPObservedGuard; + U32 EEDPObservedRefTag; +}; + +#define LEAPIORAID_SCSI_STATUS_GOOD (0x00) +#define LEAPIORAID_SCSI_STATUS_CHECK_CONDITION (0x02) +#define LEAPIORAID_SCSI_STATUS_CONDITION_MET (0x04) +#define LEAPIORAID_SCSI_STATUS_BUSY (0x08) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE (0x10) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED (0x22) +#define LEAPIORAID_SCSI_STATUS_TASK_SET_FULL (0x28) +#define LEAPIORAID_SCSI_STATUS_ACA_ACTIVE (0x30) +#define LEAPIORAID_SCSI_STATUS_TASK_ABORTED (0x40) +#define LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define LEAPIORAID_SCSI_STATE_TERMINATED (0x08) +#define LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID (0x01) + +struct LeapioraidSCSITmgReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Reserved1; + U8 TaskType; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 LUN[8]; + U32 Reserved4[7]; + U16 TaskMID; + U16 Reserved5; +}; + +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) + +struct LeapioraidSCSITmgRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 ResponseCode; + U8 TaskType; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TerminationCount; + U32 ResponseInfo; +}; + +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +struct LeapioraidSepReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 SlotStatus; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U16 Slot; + U16 EnclosureHandle; +}; + +#define LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) +#define LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) + +struct LeapioraidSepRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 SlotStatus; + U32 Reserved4; + U16 Slot; + U16 EnclosureHandle; +}; + +struct LeapioraidIOCInitReq_t { + U8 WhoInit; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 MsgVersion; + U16 HeaderVersion; + U32 Reserved5; + U16 ConfigurationFlags; + U8 HostPageSize; + U8 HostMSIxVectors; + U16 Reserved8; + U16 SystemRequestFrameSize; + U16 ReplyDescriptorPostQueueDepth; + U16 ReplyFreeQueueDepth; + U32 SenseBufferAddressHigh; + U32 SystemReplyAddressHigh; + U64 SystemRequestFrameBaseAddress; + U64 ReplyDescriptorPostQueueAddress; + U64 ReplyFreeQueueAddress; + U64 TimeStamp; +}; + +#define LEAPIORAID_WHOINIT_HOST_DRIVER (0x04) +#define LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + +struct LeapioraidIOCInitRDPQArrayEntry { + U64 RDPQBaseAddress; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidIOCInitRep_t { + U8 WhoInit; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCLogReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U64 BufAddr; + U32 BufSize; +}; + +struct LeapioraidIOCLogRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidIOCFactsRep_t { + U16 MsgVersion; + U8 MsgLength; + U8 Function; + U16 HeaderVersion; + U8 IOCNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 IOCExceptions; + U16 IOCStatus; + U32 IOCLogInfo; + U8 MaxChainDepth; + U8 WhoInit; + U8 NumberOfPorts; + U8 MaxMSIxVectors; + U16 RequestCredit; + U16 ProductID; + U32 IOCCapabilities; + union LEAPIORAID_VERSION_UNION FWVersion; + U16 IOCRequestFrameSize; + U16 IOCMaxChainSegmentSize; + U16 MaxInitiators; + U16 MaxTargets; + U16 MaxSasExpanders; + U16 MaxEnclosures; + U16 ProtocolFlags; + U16 HighPriorityCredit; + U16 MaxReplyDescriptorPostQueueDepth; + U8 ReplyFrameSize; + U8 MaxVolumes; + U16 MaxDevHandle; + U16 MaxPersistentEntries; + U16 MinDevHandle; + U8 CurrentHostPageSize; + U8 Reserved4; + U8 SGEModifierMask; + U8 SGEModifierValue; + U8 SGEModifierShift; + U8 Reserved5; +}; + +#define LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + +struct LeapioraidPortFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; +}; + +struct LeapioraidPortFactsRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; + U8 Reserved5; + U8 PortType; + U16 Reserved6; + U16 MaxPostedCmdBuffers; + U16 Reserved7; +}; + +struct LeapioraidPortEnableReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidPortEnableRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +#define LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS (4) +struct LeapioraidEventNotificationReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 EventMasks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + U16 SASBroadcastPrimitiveMasks; + U16 SASNotifyPrimitiveMasks; + U32 Reserved8; +}; + +struct LeapioraidEventNotificationRep_t { + U16 EventDataLength; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 AckRequired; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U16 Event; + U16 Reserved4; + U32 EventContext; + U32 EventData[]; +}; + +#define LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) +#define LEAPIORAID_EVENT_LOG_DATA (0x0001) +#define LEAPIORAID_EVENT_STATE_CHANGE (0x0002) +#define LEAPIORAID_EVENT_HARD_RESET_RECEIVED (0x0005) +#define LEAPIORAID_EVENT_EVENT_CHANGE (0x000A) +#define LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define LEAPIORAID_EVENT_IR_OPERATION_STATUS (0x0014) +#define LEAPIORAID_EVENT_SAS_DISCOVERY (0x0016) +#define LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define LEAPIORAID_EVENT_IR_VOLUME (0x001E) +#define LEAPIORAID_EVENT_IR_PHYSICAL_DISK (0x001F) +#define LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define LEAPIORAID_EVENT_LOG_ENTRY_ADDED (0x0021) +#define LEAPIORAID_EVENT_SAS_QUIESCE (0x0025) +#define LEAPIORAID_EVENT_TEMP_THRESHOLD (0x0027) +#define LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) + +struct LeapioraidEventDataSasDeviceStatusChange_t { + U16 TaskTag; + U8 ReasonCode; + U8 PhysicalPort; + U8 ASC; + U8 ASCQ; + U16 DevHandle; + U32 Reserved2; + U64 SASAddress; + U8 LUN[8]; +}; + +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +struct LeapioraidEventDataIrOpStatus_t { + U16 VolDevHandle; + U16 Reserved1; + U8 RAIDOperation; + U8 PercentComplete; + U16 Reserved2; + U32 ElapsedSeconds; +}; + +#define LEAPIORAID_EVENT_IR_RAIDOP_RESYNC (0x00) +#define LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +struct LeapioraidEventDataIrVol_t { + U16 VolDevHandle; + U8 ReasonCode; + U8 Reserved1; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) +struct LeapioraidEventDataIrPhyDisk_t { + U16 Reserved1; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; + U16 Reserved2; + U16 Slot; + U16 EnclosureHandle; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +struct LeapioraidEventIrCfgEle_t { + U16 ElementFlags; + U16 VolDevHandle; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +struct LeapioraidEventDataIrCfgChangeList_t { + U8 NumElements; + U8 Reserved1; + U8 Reserved2; + U8 ConfigNum; + U32 Flags; + struct LeapioraidEventIrCfgEle_t ConfigElement[]; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) +struct LeapioraidEventDataSasDiscovery_t { + U8 Flags; + U8 ReasonCode; + U8 PhysicalPort; + U8 Reserved1; + U32 DiscoveryStatus; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_RC_STARTED (0x01) + +struct LeapioraidEventDataSasBroadcastPrimitive_t { + U8 PhyNum; + U8 Port; + U8 PortWidth; + U8 Primitive; +}; + +#define LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) + +struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; + U8 LinkRate; + U8 PhyStatus; +}; + +struct LeapioraidEventDataSasTopoChangeList_t { + U16 EnclosureHandle; + U16 ExpanderDevHandle; + U8 NumPhys; + U8 Reserved1; + U16 Reserved2; + U8 NumEntries; + U8 StartPhyNum; + U8 ExpStatus; + U8 PhysicalPort; + struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY PHY[]; +}; + +#define LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +struct LeapioraidEventDataSasEnclDevStatusChange_t { + U16 EnclosureHandle; + U8 ReasonCode; + U8 PhysicalPort; + U64 EnclosureLogicalID; + U16 NumSlots; + U16 StartSlot; + U32 PhyBits; +}; + +#define LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +struct LeapioraidEventDataSasDeviceDiscoveryError_t { + U16 DevHandle; + U8 ReasonCode; + U8 PhysicalPort; + U32 Reserved1[2]; + U64 SASAddress; + U32 Reserved2[2]; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + +struct LeapioraidEventAckReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Event; + U16 Reserved5; + U32 EventContext; +}; + +struct LeapioraidFWUploadReq_t { + U8 ImageType; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 Reserved7; + U32 ImageOffset; + U32 ImageSize; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidFWUploadRep_t { + U8 ImageType; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; + U32 ActualImageSize; +}; + +struct LeapioraidIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 IOCParameterValue2; + U32 Reserved8; +}; + +#define LEAPIORAID_CTRL_OP_REMOVE_DEVICE (0x0D) + +struct LeapioraidIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_RAID_ACTION_RATE_DATA { + U8 RateToChange; + U8 RateOrMode; + U16 DataScrubDuration; +}; + +struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; + U8 Reserved1; + U16 DevHandle; +}; + +struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; + U8 DeviceFirmwareUpdateModeTimeout; + U16 Reserved1; +}; + +union LEAPIORAID_RAID_ACTION_DATA { + U32 Word; + struct LEAPIORAID_RAID_ACTION_RATE_DATA Rates; + struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + struct LEAPIORAID_RAID_ACTION_HOT_SPARE HotSpare; + struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +}; + +struct LeapioraidRaidActionReq_t { + U8 Action; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 Reserved3; + union LEAPIORAID_RAID_ACTION_DATA ActionDataWord; + struct LEAPIORAID_SGE_SIMPLE_UNION ActionDataSGE; +}; + +struct LEAPIORAID_RAID_VOL_INDICATOR { + U64 TotalBlocks; + U64 BlocksRemaining; + U32 Flags; + U32 ElapsedSeconds; +}; + +struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; + U8 Reserved1; + U16 Reserved2; + U32 GenericAttributes; + U32 OEMSpecificAttributes; + U32 Reserved3; + U32 Reserved4; +}; + +union LEAPIORAID_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + struct LEAPIORAID_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +}; + +struct LeapioraidRaidActionRep_t { + U8 Action; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + union LEAPIORAID_RAID_ACTION_REPLY_DATA ActionData; +}; + +#define LEAPIORAID_SAS_DEVICE_INFO_SEP (0x00004000) +#define LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST (0x00000008) +#define LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +struct LeapioraidSmpPassthroughReq_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 ChainOffset; + U8 Function; + U16 RequestDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U32 Reserved2; + U64 SASAddress; + U32 Reserved3; + U32 Reserved4; + union LEAPIORAID_SIMPLE_SGE_UNION SGL; +}; + +struct LeapioraidSmpPassthroughRep_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 MsgLength; + U8 Function; + U16 ResponseDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 SASStatus; + U16 IOCStatus; + U32 IOCLogInfo; + U32 Reserved3; + U8 ResponseData[4]; +}; + +struct LeapioraidSasIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 Reserved7; + U32 Reserved8; +}; + +#define LEAPIORAID_SAS_OP_PHY_LINK_RESET (0x06) +#define LEAPIORAID_SAS_OP_PHY_HARD_RESET (0x07) +#define LEAPIORAID_SAS_OP_REMOVE_DEVICE (0x0D) +struct LeapioraidSasIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; +#endif diff --git a/drivers/scsi/leapioraid/leapioraid_app.c b/drivers/scsi/leapioraid/leapioraid_app.c new file mode 100644 index 0000000000000000000000000000000000000000..6e7f6bf87778280ab226f990312181bf20a303f7 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_app.c @@ -0,0 +1,2225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __KERNEL__ +#include +#endif +#include "leapioraid_func.h" + +#define LEAPIORAID_DEV_NAME "leapioraid_ctl" + +#define LEAPIORAID_MAGIC_NUMBER 'L' +#define LEAPIORAID_IOCTL_DEFAULT_TIMEOUT (10) + +#define LEAPIORAID_IOCINFO \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 17, struct leapio_ioctl_iocinfo) +#define LEAPIORAID_COMMAND \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command) +#ifdef CONFIG_COMPAT +#define LEAPIORAID_COMMAND32 \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command32) +#endif +#define LEAPIORAID_EVENTQUERY \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 21, struct leapio_ioctl_eventquery) +#define LEAPIORAID_EVENTENABLE \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 22, struct leapio_ioctl_eventenable) +#define LEAPIORAID_EVENTREPORT \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 23, struct leapio_ioctl_eventreport) +#define LEAPIORAID_HARDRESET \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 24, struct leapio_ioctl_diag_reset) +#define LEAPIORAID_BTDHMAPPING \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 31, struct leapio_ioctl_btdh_mapping) + +struct leapio_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +struct leapio_ioctl_diag_reset { + struct leapio_ioctl_header hdr; +}; + +struct leapio_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + +struct leapio_ioctl_iocinfo { + struct leapio_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[32]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct leapio_ioctl_pci_info pci_information; +}; + +#define LEAPIORAID_CTL_EVENT_LOG_SIZE (200) +struct leapio_ioctl_eventquery { + struct leapio_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +struct leapio_ioctl_eventenable { + struct leapio_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define LEAPIORAID_EVENT_DATA_SIZE (192) +struct LEAPIORAID_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[LEAPIORAID_EVENT_DATA_SIZE]; +}; + +struct leapio_ioctl_eventreport { + struct leapio_ioctl_header hdr; + struct LEAPIORAID_IOCTL_EVENTS event_data[]; +}; + +struct leapio_ioctl_command { + struct leapio_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; + +#ifdef CONFIG_COMPAT +struct leapio_ioctl_command32 { + struct leapio_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; +#endif + +struct leapio_ioctl_btdh_mapping { + struct leapio_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + +static struct fasync_struct *leapioraid_async_queue; +static DECLARE_WAIT_QUEUE_HEAD(leapioraid_ctl_poll_wait); + +enum leapioraid_block_state { + NON_BLOCKING, + BLOCKING, +}; + +static void +leapioraid_ctl_display_some_debug( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_IOCTL)) + return; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + desc = "ioc_init"; + break; + case LEAPIORAID_FUNC_IOC_FACTS: + desc = "ioc_facts"; + break; + case LEAPIORAID_FUNC_CONFIG: + { + struct LeapioraidCfgReq_t *config_request = + (struct LeapioraidCfgReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + LEAPIORAID_CONFIG_PAGETYPE_MASK), + config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_PORT_FACTS: + desc = "port_facts"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + desc = "port_enable"; + break; + case LEAPIORAID_FUNC_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case LEAPIORAID_FUNC_FW_DOWNLOAD: + desc = "fw_download"; + break; + case LEAPIORAID_FUNC_FW_UPLOAD: + desc = "fw_upload"; + break; + case LEAPIORAID_FUNC_RAID_ACTION: + desc = "raid_action"; + break; + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + } + if (!desc) + return; + pr_info("%s %s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + struct LeapioraidSCSIIORep_t *scsi_reply = + (struct LeapioraidSCSIIORep_t *) mpi_reply; + struct leapioraid_sas_device *sas_device = NULL; + + sas_device = leapioraid_get_sdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + pr_info("%s \tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + if (sas_device->enclosure_handle != 0) + pr_info( + "%s \tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + leapioraid_sas_device_put(sas_device); + } + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + pr_info( + "%s \tscsi_state(0x%02x), scsi_status (0x%02x)\n", + ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus); + } +} + +u8 +leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + struct LeapioraidSCSIIORep_t *scsiio_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->ctl_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_reply->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (struct LeapioraidSCSIIORep_t *) mpi_reply; + if (scsiio_reply->SCSIState & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = + leapioraid_base_get_sense_buffer(ioc, smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + } + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +static int leapioraid_ctl_check_event_type( + struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +void +leapioraid_ctl_add_to_event_log( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + struct LEAPIORAID_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + event = le16_to_cpu(mpi_reply->Event); + if (leapioraid_ctl_check_event_type(ioc, event)) { + i = ioc->event_context % LEAPIORAID_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + sz = min_t(u32, event_data_sz, LEAPIORAID_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, LEAPIORAID_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + if (event == LEAPIORAID_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&leapioraid_ctl_poll_wait); + if (leapioraid_async_queue) + kill_fasync(&leapioraid_async_queue, SIGIO, POLL_IN); + } +} + +u8 +leapioraid_ctl_event_callback( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) + leapioraid_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +static int +leapioraid_ctl_verify_adapter( + int ioc_number, struct LEAPIORAID_ADAPTER **iocpp) +{ + struct LEAPIORAID_ADAPTER *ioc; + + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + spin_unlock(&leapioraid_gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&leapioraid_gioc_lock); + *iocpp = NULL; + return -1; +} + +void +leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ctl_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->ctl_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } +} + +void +leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", ioc->name, + __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", ioc->name, + __func__)); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", ioc->name, + __func__)); + break; + } +} + +static int +leapioraid_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &leapioraid_async_queue); +} + +int +leapioraid_ctl_release(struct inode *inode, struct file *filep) +{ + return fasync_helper(-1, filep, 0, &leapioraid_async_queue); +} + +static unsigned int +leapioraid_ctl_poll(struct file *filep, poll_table *wait) +{ + struct LEAPIORAID_ADAPTER *ioc; + + poll_wait(filep, &leapioraid_ctl_poll_wait, wait); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&leapioraid_gioc_lock); + return POLLIN | POLLRDNORM; + } + } + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +static int +leapioraid_ctl_set_task_mid(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command *karg, + struct LeapioraidSCSITmgReq_t *tm_request) +{ + u8 found = 0; + u16 smid; + u16 handle; + struct scsi_cmnd *scmd; + struct LEAPIORAID_DEVICE *priv_data; + struct LeapioraidSCSITmgRep_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + struct leapioraid_scsiio_tracker *st = NULL; + + if (tm_request->TaskType == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + handle = le16_to_cpu(tm_request->DevHandle); + for (smid = ioc->shost->can_queue; smid && !found; smid--) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->smid == 0)) + continue; + if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) { + tm_request->TaskMID = cpu_to_le16(st->smid); + found = 1; + } + } + if (!found) { + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), no active mid!!\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), + lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = + sizeof(struct LeapioraidSCSITmgRep_t) / 4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), task_mid(%d)\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +static long +leapioraid_ctl_do_command(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command karg, void __user *mf) +{ + struct LeapioraidReqHeader_t *mpi_request = NULL, *request; + struct LeapioraidDefaultRep_t *mpi_reply; + u16 smid; + unsigned long timeout; + u8 issue_reset; + u32 sz, sz_arg; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 device_handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + + issue_reset = 0; + if (ioc->ctl_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + ret = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (ret) + goto out; + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + ret = -ENOMEM; + goto out; + } + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + if (copy_from_user(mpi_request, mf, karg.data_sge_offset * 4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + smid = leapioraid_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err( + "%s %s: failed obtaining a smid\n", ioc->name, + __func__); + ret = -EAGAIN; + goto out; + } + } else { + smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + } + ret = 0; + ioc->ctl_cmds.status = LEAPIORAID_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, karg.data_sge_offset * 4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT + || mpi_request->Function == LEAPIORAID_FUNC_SATA_PASSTHROUGH) { + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > ioc->facts.MaxDevHandle)) { + ret = -EINVAL; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_out_sz) { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_in_sz) { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_ATOMIC); + if (!data_in) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + psge = (void *)request + (karg.data_sge_offset * 4); + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsiio_request = + (struct LeapioSCSIIOReq_t *) request; + scsiio_request->SenseBufferLength = + SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + if (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST) + ioc->put_smid_scsi_io(ioc, smid, device_handle); + else + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) request; + dtmprintk(ioc, + pr_info("%s TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + ioc->name, + le16_to_cpu(tm_request->DevHandle), + tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; + if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (leapioraid_ctl_set_task_mid(ioc, &karg, tm_request)) { + leapioraid_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; + goto out; + } + } + ioc->got_task_abort_from_ioctl = 0; + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + leapioraid_scsihost_set_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_hi_priority(ioc, smid, 0); + break; + } + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + { + struct LeapioraidSmpPassthroughReq_t *smp_request = + (struct LeapioraidSmpPassthroughReq_t *) mpi_request; + u8 *data; + + if (!ioc->multipath_on_hba) + smp_request->PhysicalPort = 0xFF; + if (smp_request->PassthroughFlags & + 0x80) + data = (u8 *) &smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err( + "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_FW_DOWNLOAD: + case LEAPIORAID_FUNC_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + { + struct LeapioraidSasIoUnitControlReq_t *sasiounit_request = + (struct LeapioraidSasIoUnitControlReq_t *) mpi_request; + if (sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + } + fallthrough; + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + timeout = karg.timeout; + if (timeout < LEAPIORAID_IOCTL_DEFAULT_TIMEOUT) + timeout = LEAPIORAID_IOCTL_DEFAULT_TIMEOUT; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout * HZ); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) mpi_request; + leapioraid_scsihost_clear_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + } else if ((mpi_request->Function == LEAPIORAID_FUNC_SMP_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL) + && ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); + goto issue_host_reset; + } + mpi_reply = ioc->ctl_cmds.reply; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT && + (ioc->logging_level & LEAPIORAID_DEBUG_TM)) { + struct LeapioraidSCSITmgRep_t *tm_reply = + (struct LeapioraidSCSITmgRep_t *) mpi_reply; + pr_info( + "%s TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", + ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_sense_bytes && (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH)) { + if (karg.sense_data_ptr == NULL) { + pr_err( + "%s Response buffer provided by application is NULL; Response data will not be returned.\n", + ioc->name); + goto out; + } + sz_arg = SCSI_SENSE_BUFFERSIZE; + sz = min_t(u32, karg.max_sense_bytes, sz_arg); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } +issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SATA_PASSTHROUGH)) { + pr_err( + "%s issue target reset: handle = (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + leapioraid_halt_firmware(ioc, 0); + leapioraid_scsihost_issue_locked_tm(ioc, + le16_to_cpu + (mpi_request->FunctionDependent1), + 0, 0, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, 30, + LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + } else + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + } +out: + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + kfree(mpi_request); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + return ret; +} + +static long +leapioraid_ctl_getiocinfo( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_iocinfo karg; + u8 revision; + + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memset(&karg, 0, sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + karg.hw_rev = revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strscpy(karg.driver_version, ioc->driver_name, sizeof(karg.driver_version)); + strcat(karg.driver_version, "-"); + karg.adapter_type = 0x06; + strcat(karg.driver_version, LEAPIORAID_DRIVER_VERSION); + karg.adapter_type = 0x07; + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventquery( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + karg.event_entries = LEAPIORAID_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventenable( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memcpy(ioc->event_type, karg.event_types, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + leapioraid_base_validate_event_type(ioc, ioc->event_type); + if (ioc->event_log) + return 0; + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(LEAPIORAID_CTL_EVENT_LOG_SIZE, + sizeof(struct LEAPIORAID_IOCTL_EVENTS), + GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +static long +leapioraid_ctl_eventreport( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct leapio_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + number_bytes = karg.hdr.max_data_size - + sizeof(struct leapio_ioctl_header); + max_events = number_bytes / sizeof(struct LEAPIORAID_IOCTL_EVENTS); + max = min_t(u32, LEAPIORAID_CTL_EVENT_LOG_SIZE, max_events); + if (!max || !ioc->event_log) + return -ENODATA; + number_bytes = max * sizeof(struct LEAPIORAID_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + ioc->aen_event_read_flag = 0; + return 0; +} + +static long +leapioraid_ctl_do_reset( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) + return -EAGAIN; + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + retval = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + scsi_unblock_requests(ioc->shost); + pr_info("%s ioctl: host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +static int +leapioraid_ctl_btdh_search_sas_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_ctl_btdh_search_raid_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +static long +leapioraid_ctl_btdh_mapping( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + rc = leapioraid_ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + leapioraid_ctl_btdh_search_raid_device(ioc, &karg); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_compat_command( + struct LEAPIORAID_ADAPTER *ioc, unsigned int cmd, + void __user *arg) +{ + struct leapio_ioctl_command32 karg32; + struct leapio_ioctl_command32 __user *uarg; + struct leapio_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct leapio_ioctl_command32)) + return -EINVAL; + uarg = (struct leapio_ioctl_command32 __user *)arg; + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + memset(&karg, 0, sizeof(struct leapio_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return leapioraid_ctl_do_command(ioc, karg, &uarg->mf); +} +#endif + +static long +leapioraid_ctl_ioctl_main( + struct file *file, unsigned int cmd, void __user *arg, + u8 compat) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct leapio_ioctl_header ioctl_header; + enum leapioraid_block_state state; + long ret = -ENOIOCTLCMD; + + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct leapio_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (leapioraid_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc) == -1 || !ioc) + return -ENODEV; + mutex_lock(&ioc->pci_access_mutex); + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) { + ret = -EAGAIN; + goto unlock_pci_access; + } + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto unlock_pci_access; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto unlock_pci_access; + } + switch (cmd) { + case LEAPIORAID_IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_iocinfo)) + ret = leapioraid_ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case LEAPIORAID_COMMAND32: +#endif + case LEAPIORAID_COMMAND: + { + struct leapio_ioctl_command __user *uarg; + struct leapio_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = + leapioraid_ctl_compat_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } + if (_IOC_SIZE(cmd) == + sizeof(struct leapio_ioctl_command)) { + uarg = arg; + ret = + leapioraid_ctl_do_command(ioc, karg, + &uarg->mf); + } + break; + } + case LEAPIORAID_EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventquery)) + ret = leapioraid_ctl_eventquery(ioc, arg); + break; + case LEAPIORAID_EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventenable)) + ret = leapioraid_ctl_eventenable(ioc, arg); + break; + case LEAPIORAID_EVENTREPORT: + ret = leapioraid_ctl_eventreport(ioc, arg); + break; + case LEAPIORAID_HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_diag_reset)) + ret = leapioraid_ctl_do_reset(ioc, arg); + break; + case LEAPIORAID_BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_btdh_mapping)) + ret = leapioraid_ctl_btdh_mapping(ioc, arg); + break; + default: + dctlprintk(ioc, pr_err( + "%s unsupported ioctl opcode(0x%08x)\n", + ioc->name, cmd)); + break; + } + mutex_unlock(&ioc->ctl_cmds.mutex); +unlock_pci_access: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +static long +leapioraid_ctl_ioctl( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 0); + return ret; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_ioctl_compat( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 1); + return ret; +} +#endif + +static ssize_t +version_fw_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR_RO(version_fw); + +static ssize_t +version_bios_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, version & 0x000000FF); +} +static DEVICE_ATTR_RO(version_bios); + +static ssize_t +version_leapioraid_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR_RO(version_leapioraid); + +static ssize_t +version_product_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR_RO(version_product); + +static ssize_t +version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_persistent); + +static ssize_t +version_nvdata_default_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_default); + +static ssize_t +board_name_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR_RO(board_name); + +static ssize_t +board_assembly_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR_RO(board_assembly); + +static ssize_t +board_tracer_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR_RO(board_tracer); + +static ssize_t +io_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR_RO(io_delay); + +static ssize_t +device_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR_RO(device_delay); + +static ssize_t +fw_queue_depth_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +static ssize_t +host_sas_address_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR_RO(host_sas_address); + +static ssize_t +logging_level_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} + +static ssize_t +logging_level_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->logging_level = val; + pr_info("%s logging_level=%08xh\n", ioc->name, + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +static ssize_t +fwfault_debug_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} + +static ssize_t +fwfault_debug_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->fwfault_debug = val; + pr_info("%s fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR_RW(fwfault_debug); + +static +struct leapioraid_raid_device *leapioraid_ctl_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +u8 +leapioraid_ctl_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u8 rc; + unsigned long flags; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 smid_task_abort; + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + rc = 1; + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return rc; + } + handle = le16_to_cpu(mpi_reply->DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + smid_task_abort = 0; + if (mpi_reply->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + smid_task_abort = le16_to_cpu(mpi_request->TaskMID); + } + pr_info("\tcomplete: sas_addr(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)sas_device->sas_address, handle, + (smid_task_abort ? smid_task_abort : smid), + le32_to_cpu(mpi_reply->TerminationCount)); + leapioraid_sas_device_put(sas_device); + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_ctl_raid_device_find_by_handle(ioc, handle); + if (raid_device) + pr_info("\tcomplete: wwid(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)raid_device->wwid, handle, + smid, le32_to_cpu(mpi_reply->TerminationCount)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + ioc->terminated_tm_count += le32_to_cpu(mpi_reply->TerminationCount); + if (ioc->out_of_frames) { + rc = 0; + leapioraid_base_free_smid(ioc, smid); + ioc->out_of_frames = 0; + wake_up(&ioc->no_frames_tm_wq); + } + ioc->pending_tm_count--; + if (!ioc->pending_tm_count) + wake_up(&ioc->pending_tm_wq); + return rc; +} + +static void +leapioraid_ctl_tm_sysfs(struct LEAPIORAID_ADAPTER *ioc, u8 task_type) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid, handle, hpr_smid; + struct LEAPIORAID_DEVICE *device_priv_data; + struct LEAPIORAID_TARGET *target_priv_data; + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + unsigned long flags; + int tm_count; + int lun; + u32 doorbell; + struct leapioraid_scsiio_tracker *st; + u8 tr_method = 0x00; + + if (list_empty(&ioc->sas_device_list)) + return; + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery || ioc->remove_host) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + pr_err( + "%s %s: busy : host reset in progress, try later\n", + ioc->name, __func__); + return; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + scsi_block_requests(ioc->shost); + init_waitqueue_head(&ioc->pending_tm_wq); + ioc->ignore_loginfos = 1; + ioc->pending_tm_count = 0; + ioc->terminated_tm_count = 0; + ioc->out_of_frames = 0; + tm_count = 0; + switch (task_type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + continue; + lun = scmd->device->lun; + device_priv_data = scmd->device->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mpi_request->TaskMID = cpu_to_le16(st->smid); + int_to_scsilun(lun, + (struct scsi_lun *)mpi_request->LUN); + starget_printk(KERN_INFO, + device_priv_data->sas_target->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long) + device_priv_data->sas_target->sas_address, handle, st->smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + goto fault_in_progress; + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + } + if (!sas_device->starget) + continue; + if (test_bit(sas_device->handle, ioc->pd_handles)) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(sas_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + sas_device->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)sas_device->sas_address, + sas_device->handle, + hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->raid_device_lock, + flags); + } + if (!raid_device->starget) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(raid_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + raid_device->starget, + "sending tm: wwid(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)raid_device->wwid, + raid_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + shost_for_each_device(sdev, ioc->shost) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + device_priv_data = sdev->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if ((target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + && (task_type == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + scsi_device_put(sdev); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = task_type; + mpi_request->MsgFlags = tr_method; + int_to_scsilun(sdev->lun, (struct scsi_lun *) + mpi_request->LUN); + sdev_printk(KERN_INFO, sdev, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)target_priv_data->sas_address, + handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + scsi_device_put(sdev); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + } +out_of_frames: + if (ioc->pending_tm_count) + wait_event_timeout(ioc->pending_tm_wq, + !ioc->pending_tm_count, 30 * HZ); + pr_info("%s task management requests issued(%d)\n", + ioc->name, tm_count); + pr_info("%s number IO terminated(%d)\n", + ioc->name, ioc->terminated_tm_count); +fault_in_progress: + scsi_unblock_requests(ioc->shost); + ioc->ignore_loginfos = 0; +} + +static ssize_t +task_management_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int opcode = 0; + + if (kstrtoint(buf, 0, &opcode)) + return -EINVAL; + switch (opcode) { + case 1: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: diag reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER)) + ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 2: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: message unit reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + SOFT_RESET)) ? + "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 3: + pr_err("%s sysfs: TASKTYPE_ABORT_TASK :\n", ioc->name); + ioc->got_task_abort_from_sysfs = 1; + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + ioc->got_task_abort_from_sysfs = 0; + break; + case 4: + pr_err("%s sysfs: TASKTYPE_TARGET_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET); + break; + case 5: + pr_err("%s sysfs: TASKTYPE_LOGICAL_UNIT_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); + break; + case 6: + pr_info("%s sysfs: TASKTYPE_ABRT_TASK_SET\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET); + break; + default: + pr_info("%s unsupported opcode(%d)\n", + ioc->name, opcode); + break; + }; + return strlen(buf); +} +static DEVICE_ATTR_WO(task_management); + +static ssize_t +ioc_reset_count_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR_RO(ioc_reset_count); + +static ssize_t +reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR_RO(reply_queue_count); + +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct leapioraid_raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + raid_device = + leapioraid_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + LEAPIORAID_SAS_QUEUE_DEPTH; + else + qdepth = + LEAPIORAID_SATA_QUEUE_DEPTH; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + } + } else + qdepth = + (sas_target_priv_data->sas_dev->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) { + leapioraid__scsihost_change_queue_depth(sdev, + shost->can_queue); + } + break; + default: + return -EINVAL; + } + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + +static struct attribute *leapioraid_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_version_bios.attr, + &dev_attr_version_leapioraid.attr, + &dev_attr_version_product.attr, + &dev_attr_version_nvdata_persistent.attr, + &dev_attr_version_nvdata_default.attr, + &dev_attr_board_name.attr, + &dev_attr_board_assembly.attr, + &dev_attr_board_tracer.attr, + &dev_attr_io_delay.attr, + &dev_attr_device_delay.attr, + &dev_attr_logging_level.attr, + &dev_attr_fwfault_debug.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_task_management.attr, + &dev_attr_ioc_reset_count.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_drv_support_bitmap.attr, + &dev_attr_enable_sdev_max_qd.attr, + NULL, +}; + +static const struct attribute_group leapioraid_host_attr_group = { + .attrs = leapioraid_host_attrs +}; + +const struct attribute_group *leapioraid_host_groups[] = { + &leapioraid_host_attr_group, + NULL +}; + +static ssize_t +sas_address_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf( + buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR_RO(sas_address); + +static ssize_t +sas_device_handle_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR_RO(sas_device_handle); + +static ssize_t +sas_ncq_prio_enable_show( + struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + int ncq_prio_enable = 0; + + if (kstrtoint(buf, 0, &ncq_prio_enable)) + return -EINVAL; + if (!leapioraid_scsihost_ncq_prio_supp(sdev)) + return -EINVAL; + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + +static struct attribute *leapioraid_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_sas_device_handle.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL, +}; +static const struct attribute_group leapioraid_dev_attr_group = { + .attrs = leapioraid_dev_attrs +}; +const struct attribute_group *leapioraid_dev_groups[] = { + &leapioraid_dev_attr_group, + NULL +}; + +static const struct +file_operations leapioraid_ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = leapioraid_ctl_ioctl, + .poll = leapioraid_ctl_poll, + .fasync = leapioraid_ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = leapioraid_ctl_ioctl_compat, +#endif +}; + +static struct miscdevice leapioraid_ctl_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = LEAPIORAID_DEV_NAME, + .fops = &leapioraid_ctl_fops, +}; + +void leapioraid_ctl_init(void) +{ + leapioraid_async_queue = NULL; + if (misc_register(&leapioraid_ctl_dev) < 0) + pr_err("%s can't register misc device\n", + LEAPIORAID_DRIVER_NAME); + init_waitqueue_head(&leapioraid_ctl_poll_wait); +} + +void leapioraid_ctl_exit(void) +{ + struct LEAPIORAID_ADAPTER *ioc; + + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + kfree(ioc->event_log); + } + misc_deregister(&leapioraid_ctl_dev); +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.c b/drivers/scsi/leapioraid/leapioraid_func.c new file mode 100644 index 0000000000000000000000000000000000000000..97e0f893ab4c50f025495d7bd1ac57e43828339b --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.c @@ -0,0 +1,7075 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include +#include +#include + +static char *dest_ip = "127.0.0.1"; +module_param(dest_ip, charp, 0000); +MODULE_PARM_DESC(dest_ip, "Destination IP address"); + +static u16 port_no = 6666; +module_param(port_no, ushort, 0000); +MODULE_PARM_DESC(port_no, "Destination Port number"); +static struct sockaddr_in dest_addr; +static struct socket *sock; +static struct msghdr msg; + +#define LEAPIORAID_LOG_POLLING_INTERVAL 1 +static LEAPIORAID_CALLBACK leapioraid_callbacks[LEAPIORAID_MAX_CALLBACKS]; +#define LEAPIORAID_FAULT_POLLING_INTERVAL 1000 +#define LEAPIORAID_MAX_HBA_QUEUE_DEPTH 1024 + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, + "SMP affinity feature enable/disable Default: enable(1)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0444); +MODULE_PARM_DESC(max_msix_vectors, " max msix vectors"); + +static int irqpoll_weight = -1; +module_param(irqpoll_weight, int, 0444); +MODULE_PARM_DESC(irqpoll_weight, + "irq poll weight (default= one fourth of HBA queue depth)"); + +static int leapioraid_fwfault_debug; + +static int perf_mode = -1; + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, + "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t"); + +enum leapioraid_perf_mode { + LEAPIORAID_PERF_MODE_DEFAULT = -1, + LEAPIORAID_PERF_MODE_BALANCED = 0, + LEAPIORAID_PERF_MODE_IOPS = 1, + LEAPIORAID_PERF_MODE_LATENCY = 2, +}; + +static void +leapioraid_base_clear_outstanding_leapioraid_commands( + struct LEAPIORAID_ADAPTER *ioc); +static +int leapioraid_base_wait_on_iocstate(struct LEAPIORAID_ADAPTER *ioc, + u32 ioc_state, int timeout); + +static int +leapioraid_scsihost_set_fwfault_debug( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting fwfault_debug(%d)\n", + leapioraid_fwfault_debug); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->fwfault_debug = leapioraid_fwfault_debug; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call( + leapioraid_fwfault_debug, + leapioraid_scsihost_set_fwfault_debug, + param_get_int, &leapioraid_fwfault_debug, 0644); + +static inline u32 +leapioraid_base_readl_aero( + const void __iomem *addr, u8 retry_count) +{ + u32 i = 0, ret_val; + + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < retry_count); + return ret_val; +} + +u8 +leapioraid_base_check_cmd_timeout( + struct LEAPIORAID_ADAPTER *ioc, + U8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + pr_err("%s Command %s\n", ioc->name, + ((issue_reset == + 0) ? "terminated due to Host Reset" : "Timeout")); + leapioraid_debug_dump_mf(mpi_request, sz); + return issue_reset; +} + +static int +leapioraid_remove_dead_ioc_func(void *arg) +{ + struct LEAPIORAID_ADAPTER *ioc = (struct LEAPIORAID_ADAPTER *)arg; + struct pci_dev *pdev; + + if (ioc == NULL) + return -1; + pdev = ioc->pdev; + if (pdev == NULL) + return -1; +#if defined(DISABLE_RESET_SUPPORT) + ssleep(2); +#endif + + pci_stop_and_remove_bus_device(pdev); + return 0; +} + +u8 +leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + struct pci_bus *bus = pdev->bus; + int devfn = pdev->devfn; + u32 vendor_id; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendor_id)) + return 1; + if (vendor_id == 0xffffffff || vendor_id == 0x00000000 || + vendor_id == 0x0000ffff || vendor_id == 0xffff0000) + return 1; + if ((vendor_id & 0xffff) == 0x0001) + return 1; + return 0; +} + +u8 +leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->pci_error_recovery + || leapioraid_base_pci_device_is_unplugged(ioc)) + return 0; + return 1; +} + +static void +leapioraid_base_sync_drv_fw_timestamp(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIoUnitControlReq_t *mpi_request; + struct LeapioraidIoUnitControlRep_t *mpi_reply; + u16 smid; + ktime_t current_time; + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s: scsih_cmd in use %s\n", ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s: failed obtaining a smid %s\n", ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0F; + mpi_request->IOCParameter = 0x81; + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + mpi_request->IOCParameterValue2 = cpu_to_le32(TimeStamp >> 32); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, pr_err( + "%s Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + ioc->name, TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, pr_err( + "%s Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_udp_init(void) +{ + int ret; + u32 ip; + + if (sock) + return 0; + if (!in4_pton(dest_ip, -1, (u8 *) &ip, -1, NULL)) { + pr_err("Invalid IP address: %s, set to default: 127.0.0.1\n", + dest_ip); + dest_ip = "127.0.0.1"; + } + ret = + sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, IPPROTO_UDP, + &sock); + memset(&dest_addr, 0, sizeof(dest_addr)); + dest_addr.sin_family = AF_INET; + dest_addr.sin_addr.s_addr = ip; + dest_addr.sin_port = htons(port_no); + memset(&msg, 0, sizeof(msg)); + msg.msg_name = &dest_addr; + msg.msg_namelen = sizeof(struct sockaddr_in); + return ret; +} + +static void +leapioraid_udp_exit(void) +{ + if (sock) + sock_release(sock); +} + +static int +leapioraid_send_udp_pkg(void *buf, U32 datasize) +{ + int ret; + struct kvec vec; + + vec.iov_len = datasize; + vec.iov_base = buf; + ret = kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); + if (ret <= 0) { + pr_err_ratelimited("Sending UDP packet failed: errorno = %d", + ret); + return 0; + } else { + return ret; + } +} + +static void +leapioraid_base_pcie_log_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, pcie_log_work.work); + unsigned long flags; + u32 host_logbuf_position, ioc_logbuf_position; + u32 datasize, offset, send_sz, actual_send_sz; + + while (true) { + host_logbuf_position = + ioc->base_readl(&ioc->chip->HostLogBufPosition, 0); + ioc_logbuf_position = + ioc->base_readl(&ioc->chip->IocLogBufPosition, 0); + datasize = ioc_logbuf_position - host_logbuf_position; + offset = host_logbuf_position % SYS_LOG_BUF_SIZE; + if (datasize == 0) { + goto rearm_timer; + } else if (datasize > SYS_LOG_BUF_SIZE) { + pr_err("log thread error:data size overflow\n"); + return; + } + + if (offset + datasize > SYS_LOG_BUF_SIZE) + send_sz = SYS_LOG_BUF_SIZE - offset; + else + send_sz = datasize; + + if (send_sz > MAX_UPD_PAYLOAD_SZ) + send_sz = MAX_UPD_PAYLOAD_SZ; + + actual_send_sz = + leapioraid_send_udp_pkg(ioc->log_buffer + offset, send_sz); + host_logbuf_position += actual_send_sz; + writel(host_logbuf_position, &ioc->chip->HostLogBufPosition); + } +rearm_timer: + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->pcie_log_work_q) + return; + leapioraid_udp_init(); + INIT_DELAYED_WORK(&ioc->pcie_log_work, leapioraid_base_pcie_log_work); + snprintf(ioc->pcie_log_work_q_name, + sizeof(ioc->pcie_log_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->pcie_log_work_q = + create_singlethread_workqueue(ioc->pcie_log_work_q_name); + if (!ioc->pcie_log_work_q) { + pr_err("%s %s: failed (line=%d)\n", ioc->name, + __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->pcie_log_work_q; + ioc->pcie_log_work_q = NULL; + leapioraid_udp_exit(); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->pcie_log_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_fault_reset_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery || ioc->remove_host) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_MASK) { + pr_err( + "%s SAS host is non-operational !!!!\n", ioc->name); + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + return; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + 15; + timeout /= (LEAPIORAID_FAULT_POLLING_INTERVAL / 1000); + if (ioc->ioc_coredump_loop == 0) { + leapioraid_base_coredump_info(ioc, doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, + flags); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands + (ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } + drsprintk(ioc, + pr_info("%s %s: CoreDump loop %d.", + ioc->name, __func__, ioc->ioc_coredump_loop)); + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + } + if (ioc->ioc_coredump_loop) { + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_COREDUMP) + pr_err( + "%s %s: CoreDump completed. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + else + pr_err( + "%s %s: CoreDump Timed out. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = 0xFF; + } + ioc->non_operational_loop = 0; + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) { + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_warn("%s %s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + if (rc + && (doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + } + ioc->ioc_coredump_loop = 0; + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + leapioraid_base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +static void +leapioraid_base_hba_hot_unplug_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + hba_hot_unplug_work.work); + unsigned long flags; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->shost_recovery || ioc->pci_error_recovery) + goto rearm_timer; + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + if (ioc->remove_host) { + pr_err("%s The host is removeing!!!\n", + ioc->name); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } +rearm_timer: + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies + (1000)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + ioc->timestamp_update_count = 0; + INIT_DELAYED_WORK(&ioc->fault_reset_work, + leapioraid_base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->open_pcie_trace) + leapioraid_base_start_log_watchdog(ioc); +} + +void +leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } + if (ioc->open_pcie_trace) + leapioraid_base_stop_log_watchdog(ioc); +} + +void +leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->hba_hot_unplug_work_q) + return; + INIT_DELAYED_WORK(&ioc->hba_hot_unplug_work, + leapioraid_base_hba_hot_unplug_work); + snprintf(ioc->hba_hot_unplug_work_q_name, + sizeof(ioc->hba_hot_unplug_work_q_name), + "poll_%s%u_hba_unplug", ioc->driver_name, ioc->id); + ioc->hba_hot_unplug_work_q = + create_singlethread_workqueue(ioc->hba_hot_unplug_work_q_name); + if (!ioc->hba_hot_unplug_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + wq = ioc->hba_hot_unplug_work_q; + ioc->hba_hot_unplug_work_q = NULL; + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->hba_hot_unplug_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_stop_smart_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + struct workqueue_struct *wq; + + wq = ioc->smart_poll_work_q; + ioc->smart_poll_work_q = NULL; + if (wq) { + if (!cancel_delayed_work(&ioc->smart_poll_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +void +leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +void +leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s coredump_state(0x%04x)!\n", + ioc->name, fault_code); +} + +int +leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = + (ioc->manu_pg11.CoreDumpTOSec) ? ioc->manu_pg11.CoreDumpTOSec : 15; + int ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + pr_err("%s %s: CoreDump timed out. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + else + pr_info("%s %s: CoreDump completed. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + return ioc_state; +} + +void +leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault) +{ + u32 doorbell; + + if ((!ioc->fwfault_debug) && (!set_fault)) + return; + if (!set_fault) + dump_stack(); + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) + == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + if (!set_fault) + pr_err("%s Firmware is halted due to command timeout\n", + ioc->name); + } + if (set_fault) + return; + if (ioc->fwfault_debug == 2) { + for (;;) + ; + } else + panic("panic in %s\n", __func__); +} + +static void +leapioraid_base_group_cpus_on_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int i, cpu, group, nr_cpus, nr_msix, index = 0; + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int unmanaged_q_count = ioc->high_iops_queues + iopoll_q_count; + + cpu = cpumask_first(cpu_online_mask); + nr_msix = ioc->reply_queue_count - unmanaged_q_count; + nr_cpus = num_online_cpus(); + group = nr_cpus / nr_msix; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + if (cpu >= nr_cpus) + break; + if (index < nr_cpus % nr_msix) + group++; + for (i = 0; i < group; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} + +static void +leapioraid_base_sas_ioc_info(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidDefaultRep_t *mpi_reply, + struct LeapioraidReqHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + if (request_hdr->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + request_hdr->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || request_hdr->Function == LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return; + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_BUSY: + desc = "busy"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config can not commit"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp guard error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp ref tag error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp app tag error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + default: + break; + } + if (!desc) + return; + switch (request_hdr->Function) { + case LEAPIORAID_FUNC_CONFIG: + frame_sz = sizeof(struct LeapioraidCfgReq_t) + ioc->sge_size; + func_str = "config_page"; + break; + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + frame_sz = sizeof(struct LeapioraidSCSITmgReq_t); + func_str = "task_mgmt"; + break; + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(struct LeapioraidSasIoUnitControlReq_t); + func_str = "sas_iounit_ctl"; + break; + case LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(struct LeapioraidSepReq_t); + func_str = "enclosure"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + frame_sz = sizeof(struct LeapioraidIOCInitReq_t); + func_str = "ioc_init"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + frame_sz = sizeof(struct LeapioraidPortEnableReq_t); + func_str = "port_enable"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + frame_sz = + sizeof(struct LeapioraidSmpPassthroughReq_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + pr_warn("%s ioc_status: %s(0x%04x), request(0x%p), (%s)\n", + ioc->name, desc, ioc_status, request_hdr, func_str); + leapioraid_debug_dump_mf(request_hdr, frame_sz / 4); +} + +static void +leapioraid_base_display_event_data(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_EVENTS)) + return; + event = le16_to_cpu(mpi_reply->Event); + if (ioc->warpdrive_msg) { + switch (event) { + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_IR_VOLUME: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + return; + } + } + switch (event) { + case LEAPIORAID_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case LEAPIORAID_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case LEAPIORAID_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case LEAPIORAID_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + desc = "IR Operation Status"; + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + { + struct LeapioraidEventDataSasDiscovery_t *event_data = + (struct LeapioraidEventDataSasDiscovery_t *) mpi_reply->EventData; + pr_info("%s SAS Discovery: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : + "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + return; + } + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_VOLUME: + desc = "IR Volume"; + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + desc = "IR Physical Disk"; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + desc = "IR Configuration Change List"; + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + desc = "Log Entry Added"; + break; + case LEAPIORAID_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + } + if (!desc) + return; + pr_info("%s %s\n", ioc->name, desc); +} + +static void +leapioraid_base_sas_log_info(struct LEAPIORAID_ADAPTER *ioc, u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3) + return; + if (log_info == 0x31170000) + return; + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (ioc->warpdrive_msg) + originator_str = "WarpDrive"; + else + originator_str = "IR"; + break; + } + pr_warn("%s log_info(0x%08x):\n\t\t" + "originator(%s), code(0x%02x), sub_code(0x%04x)\n", + ioc->name, + log_info, + originator_str, + sas_loginfo.dw.code, + sas_loginfo.dw.subcode); +} + +static void +leapioraid_base_display_reply_info(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) && + (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) { + leapioraid_base_sas_ioc_info(ioc, mpi_reply, + leapioraid_base_get_msg_frame(ioc, + smid)); + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + leapioraid_base_sas_log_info(ioc, loginfo); + } +} + +u8 +leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == LEAPIORAID_FUNC_EVENT_ACK) + return leapioraid_check_for_pending_internal_cmds(ioc, smid); + if (ioc->base_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + ioc->base_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->base_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->base_cmds.done); + return 1; +} + +static u8 +leapioraid_base_async_event( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + struct LeapioraidEventAckReq_t *ack_request; + u16 smid; + struct leapioraid_event_ack_list *delayed_event_ack; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return 1; + leapioraid_base_display_event_data(ioc, mpi_reply); + if (!(mpi_reply->AckRequired & LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + delayed_event_ack = + kzalloc(sizeof(*delayed_event_ack), GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, pr_err( + "%s DELAYED: EVENT ACK: event (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_reply->Event))); + goto out; + } + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +out: + leapioraid_scsihost_event_callback(ioc, msix_index, reply); + leapioraid_ctl_event_callback(ioc, msix_index, reply); + return 1; +} + +inline +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd) +{ + return scsi_cmd_priv(scmd); +} + +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *cmd; + + if (WARN_ON(!smid) || WARN_ON(smid >= ioc->hi_priority_smid)) + return NULL; + cmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (cmd) + return leapioraid_base_scsi_cmd_priv(cmd); + return NULL; +} + +static u8 +leapioraid_base_get_cb_idx(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + int i; + u16 ctl_smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + u16 discovery_smid = + ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + u8 cb_idx = 0xFF; + + if (smid < ioc->hi_priority_smid) { + struct leapioraid_scsiio_tracker *st; + + if (smid < ctl_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (st) + cb_idx = st->cb_idx; + } else if (smid < discovery_smid) + cb_idx = ioc->ctl_cb_idx; + else + cb_idx = ioc->scsih_cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } + return cb_idx; +} + +void +leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 1); + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->blk_mq_poll_queues[qid].busy)) { + cpu_relax(); + udelay(500); + } + } +} + +void +leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 0); +} + +void +leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register |= + 0x00000001 + 0x00000008 + 0x40000000; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); +} + +void +leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register &= ~0x00000008; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union leapioraid_reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +static int +leapioraid_base_process_reply_queue( + struct leapioraid_adapter_reply_queue *reply_q) +{ + union leapioraid_reply_descriptor rd; + u64 completed_cmds; + u8 request_descript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + union LeapioraidRepDescUnion_t *rpf; + u8 rc; + + completed_cmds = 0; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return completed_cmds; + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_descript_type = rpf->Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return 1; + } + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, smid, + msix_index, 0); + if (rc) + leapioraid_base_free_smid(ioc, smid); + } + } else if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = + le32_to_cpu(rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address + || reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, + smid, + msix_index, + reply); + if (reply) + leapioraid_base_display_reply_info + (ioc, smid, msix_index, + reply); + if (rc) + leapioraid_base_free_smid(ioc, + smid); + } + } else { + leapioraid_base_async_event(ioc, msix_index, reply); + } + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + wmb(); /* Make sure that all write ops are in order */ + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_descript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index].Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + if (completed_cmds >= ioc->thresh_hold) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / + 8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + if (!reply_q->is_blk_mq_poll_q && + !reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = true; + irq_poll_sched(&reply_q->irqpoll); + } + atomic_dec(&reply_q->busy); + return completed_cmds; + } + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); +out: + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + wmb(); /* Make sure that all write ops are in order */ + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / 8]); + } else { + writel(reply_q->reply_post_host_index | (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + atomic_dec(&reply_q->busy); + return completed_cmds; +} + +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->blk_mq_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->blk_mq_poll_queues[qid].busy, 1, 1)) + return 0; + reply_q = ioc->blk_mq_poll_queues[qid].reply_q; + num_entries = leapioraid_base_process_reply_queue(reply_q); + atomic_dec(&ioc->blk_mq_poll_queues[qid].busy); + return num_entries; +} + +static irqreturn_t +leapioraid_base_interrupt(int irq, void *bus_id) +{ + struct leapioraid_adapter_reply_queue *reply_q = bus_id; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + if (reply_q->irq_poll_scheduled) + return IRQ_HANDLED; + return ((leapioraid_base_process_reply_queue(reply_q) > 0) ? + IRQ_HANDLED : IRQ_NONE); +} + +static +int leapioraid_base_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + + reply_q = container_of(irqpoll, + struct leapioraid_adapter_reply_queue, irqpoll); + if (reply_q->irq_line_enable) { + disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = leapioraid_base_process_reply_queue(reply_q); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + return num_entries; +} + +static void +leapioraid_base_init_irqpolls(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_blk_mq_poll_q) + continue; + irq_poll_init(&reply_q->irqpoll, ioc->thresh_hold, + leapioraid_base_irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + reply_q->os_irq = pci_irq_vector(ioc->pdev, + reply_q->msix_index); + } +} + +static inline int +leapioraid_base_is_controller_msix_enabled(struct LEAPIORAID_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +void +leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll) +{ + struct leapioraid_adapter_reply_queue *reply_q; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + if (reply_q->msix_index == 0) + continue; + if (reply_q->is_blk_mq_poll_q) { + leapioraid_base_process_reply_queue(reply_q); + continue; + } + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + } + if (poll) + leapioraid_base_process_reply_queue(reply_q); + } +} + +void +leapioraid_base_release_callback_handler(u8 cb_idx) +{ + leapioraid_callbacks[cb_idx] = NULL; +} + +u8 +leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = LEAPIORAID_MAX_CALLBACKS - 1; cb_idx; cb_idx--) + if (leapioraid_callbacks[cb_idx] == NULL) + break; + leapioraid_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +void +leapioraid_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < LEAPIORAID_MAX_CALLBACKS; cb_idx++) + leapioraid_base_release_callback_handler(cb_idx); +} + +static void +leapioraid_base_build_zero_len_sge( + struct LEAPIORAID_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32) ((LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT) << + LEAPIORAID_SGE_FLAGS_SHIFT); + + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +static void +leapioraid_base_add_sg_single_32(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple32_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + +static void +leapioraid_base_add_sg_single_64(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple64_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static +struct leapioraid_chain_tracker *leapioraid_base_get_chain_buffer_tracker( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct leapioraid_chain_tracker *chain_req; + struct leapioraid_scsiio_tracker *st = leapioraid_base_scsi_cmd_priv(scmd); + u16 smid = st->smid; + u8 chain_offset = + atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); + + if (chain_offset == ioc->chains_needed_per_io) + return NULL; + chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; + atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); + return chain_req; +} + +static void +leapioraid_base_build_sg(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + psge += ioc->sge_size; + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +u32 +leapioraid_base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) { + pr_err("leapioraid : DIVISOR is zero, in div fn\n"); + return 0; + } + remainder = do_div(dividend, divisor); + return remainder; +} + +static void +leapioraid_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, + u32 length, dma_addr_t dma_addr) +{ + struct LEAPIORAID_IEEE_SGE_CHAIN64 *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static void +leapioraid_base_build_zero_len_sge_ieee(struct LEAPIORAID_ADAPTER *ioc, + void *paddr) +{ + u8 sgl_flags = (LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST); + + leapioraid_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +static int +leapioraid_base_build_sg_scmd_ieee(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct leapioraid_chain_tracker *chain_req; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + simple_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) { + pr_err_ratelimited + ("sd %s: scsi_dma_map failed: request for %d bytes!\n", + dev_name(&scmd->device->sdev_gendev), scsi_bufflen(scmd)); + return -ENOMEM; + } + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(struct LeapioraidSCSIIOReq_t, + SGL)) / ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + mpi_request->ChainOffset = (sges_in_segment - 1) + + (offsetof(struct LeapioraidSCSIIOReq_t, SGL) / ioc->sge_size_ieee); + while (sges_in_segment > 1) { + leapioraid_base_add_sg_single_ieee(sg_local, simple_sgl_flags, + 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + leapioraid_base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, + chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + while (sges_in_segment) { + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); +fill_in_last_segment: + while (sges_left > 0) { + if (sges_left == 1) + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, + 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + else + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + return 0; +} + +static void +leapioraid_base_build_sg_ieee(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge_ieee(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + psge += ioc->sge_size_ieee; + sgl_flags |= LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } +} + +#define leapioraid_convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) +static int +leapioraid_base_config_dma_addressing(struct LEAPIORAID_ADAPTER *ioc, + struct pci_dev *pdev) +{ + struct sysinfo s; + char *desc = "64"; + u64 consistant_dma_mask = DMA_BIT_MASK(64); + u64 dma_mask = DMA_BIT_MASK(64); + + consistant_dma_mask = DMA_BIT_MASK(63); + dma_mask = DMA_BIT_MASK(63); + desc = "63"; + ioc->dma_mask = 63; + if (ioc->use_32bit_dma) + consistant_dma_mask = DMA_BIT_MASK(32); + if (sizeof(dma_addr_t) > 4) { + if (!dma_set_mask(&pdev->dev, dma_mask) && + !dma_set_coherent_mask(&pdev->dev, consistant_dma_mask)) { + ioc->base_add_sg_single = + &leapioraid_base_add_sg_single_64; + ioc->sge_size = sizeof(struct LeapioSGESimple64_t); + if (!ioc->use_32bit_dma) + goto out; + return 0; + } + } + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &leapioraid_base_add_sg_single_32; + ioc->sge_size = sizeof(struct LeapioSGESimple32_t); + desc = "32"; + ioc->dma_mask = 32; + } else + return -ENODEV; +out: + si_meminfo(&s); + pr_info("%s %s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, desc, leapioraid_convert_to_kb(s.totalram)); + return 0; +} + +int +leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev) +{ + int base; + u16 message_control, msix_vector_count; + + base = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!base) + return -EINVAL; + pci_read_config_word(pdev, base + 2, &message_control); + msix_vector_count = (message_control & 0x3FF) + 1; + return msix_vector_count; +} + +enum leapioraid_pci_bus_speed { + LEAPIORAID_PCIE_SPEED_2_5GT = 0x14, + LEAPIORAID_PCIE_SPEED_5_0GT = 0x15, + LEAPIORAID_PCIE_SPEED_8_0GT = 0x16, + LEAPIORAID_PCIE_SPEED_16_0GT = 0x17, + LEAPIORAID_PCI_SPEED_UNKNOWN = 0xff, +}; + +const unsigned char leapioraid_pcie_link_speed[] = { + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCIE_SPEED_2_5GT, + LEAPIORAID_PCIE_SPEED_5_0GT, + LEAPIORAID_PCIE_SPEED_8_0GT, + LEAPIORAID_PCIE_SPEED_16_0GT, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN +}; + +static void +leapioraid_base_check_and_enable_high_iops_queues( + struct LEAPIORAID_ADAPTER *ioc, + int hba_msix_vector_count, + int iopoll_q_count) +{ + u16 lnksta; + enum leapioraid_pci_bus_speed speed; + + if (perf_mode == LEAPIORAID_PERF_MODE_IOPS || + perf_mode == LEAPIORAID_PERF_MODE_LATENCY || iopoll_q_count) { + ioc->high_iops_queues = 0; + return; + } + if (perf_mode == LEAPIORAID_PERF_MODE_DEFAULT) { + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = leapioraid_pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + dev_info(&ioc->pdev->dev, "PCIe device speed is %s\n", + speed == LEAPIORAID_PCIE_SPEED_2_5GT ? "2.5GHz" : + speed == LEAPIORAID_PCIE_SPEED_5_0GT ? "5.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_8_0GT ? "8.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_16_0GT ? "16.0GHz" : + "Unknown"); + if (speed < LEAPIORAID_PCIE_SPEED_16_0GT) { + ioc->high_iops_queues = 0; + return; + } + } + if (!reset_devices && + hba_msix_vector_count == LEAPIORAID_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= LEAPIORAID_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = LEAPIORAID_HIGH_IOPS_REPLY_QUEUES; + else + ioc->high_iops_queues = 0; +} + +void +leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_free_irq_vectors(ioc->pdev); + kfree(ioc->blk_mq_poll_queues); + ioc->msix_enable = 0; +} + +void +leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + if (reply_q->is_blk_mq_poll_q) { + kfree(reply_q); + continue; + } + irq_poll_disable(&reply_q->irqpoll); + if (ioc->smp_affinity_enable) + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + reply_q->msix_index), NULL); + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); + kfree(reply_q); + } +} + +static int +leapioraid_base_request_irq(struct LEAPIORAID_ADAPTER *ioc, u8 index) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int r; + u8 qid; + + reply_q = kzalloc(sizeof(struct leapioraid_adapter_reply_queue), + GFP_KERNEL); + if (!reply_q) + return -ENOMEM; + + reply_q->ioc = ioc; + reply_q->msix_index = index; + atomic_set(&reply_q->busy, 0); + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-mq-poll%u", + ioc->driver_name, ioc->id, qid); + reply_q->is_blk_mq_poll_q = 1; + ioc->blk_mq_poll_queues[qid].reply_q = reply_q; + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; + } + if (ioc->msix_enable) + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-msix%u", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(pci_irq_vector(ioc->pdev, index), leapioraid_base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); + if (r) { + pr_err("%s unable to allocate interrupt %d!\n", reply_q->name, + pci_irq_vector(ioc->pdev, index)); + kfree(reply_q); + return -EBUSY; + } + + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +static int leapioraid_base_alloc_irq_vectors(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = {.pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + int nr_msix_vectors = ioc->iopoll_q_start_index; + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + dinitprintk(ioc, pr_err( + "%s high_iops_queues: %d,\n\t\t" + "reply_queue_count: %d, nr_msix_vectors: %d\n", + ioc->name, + ioc->high_iops_queues, + ioc->reply_queue_count, + nr_msix_vectors)); + i = pci_alloc_irq_vectors_affinity( + ioc->pdev, + ioc->high_iops_queues, + nr_msix_vectors, irq_flags, descp); + return i; +} + +static int +leapioraid_base_enable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, i, msix_vector_count, local_max_msix_vectors; + int iopoll_q_count = 0; + + ioc->msix_load_balance = false; + msix_vector_count = + leapioraid_base_check_and_get_msix_vectors(ioc->pdev); + if (msix_vector_count <= 0) { + dfailprintk(ioc, pr_info("%s msix not supported\n", ioc->name)); + goto try_ioapic; + } + dinitprintk(ioc, pr_err( + "%s MSI-X vectors supported: %d, no of cores: %d\n", + ioc->name, msix_vector_count, ioc->cpu_count)); + ioc->reply_queue_count = min_t(int, ioc->cpu_count, msix_vector_count); + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) { + if (reset_devices) + local_max_msix_vectors = 1; + else + local_max_msix_vectors = 8; + } else + local_max_msix_vectors = max_msix_vectors; + if (local_max_msix_vectors == 0) + goto try_ioapic; + if (!ioc->combined_reply_queue) { + pr_err( + "%s combined reply queue is off, so enabling msix load balance\n", + ioc->name); + ioc->msix_load_balance = true; + } + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + if (iopoll_q_count) { + ioc->blk_mq_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct + leapioraid_blk_mq_poll_queue), + GFP_KERNEL); + if (!ioc->blk_mq_poll_queues) + iopoll_q_count = 0; + } + leapioraid_base_check_and_enable_high_iops_queues(ioc, + msix_vector_count, + iopoll_q_count); + ioc->reply_queue_count = + min_t(int, ioc->reply_queue_count + ioc->high_iops_queues, + msix_vector_count); + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + 1)) + iopoll_q_count = 0; + ioc->reply_queue_count = + min(ioc->reply_queue_count + iopoll_q_count, + msix_vector_count); + } + ioc->iopoll_q_start_index = ioc->reply_queue_count - iopoll_q_count; + r = leapioraid_base_alloc_irq_vectors(ioc); + if (r < 0) { + pr_warn( + "%s pci_alloc_irq_vectors failed (r=%d) !!!\n", + ioc->name, r); + goto try_ioapic; + } + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = leapioraid_base_request_irq(ioc, i); + if (r) { + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + goto try_ioapic; + } + } + dinitprintk(ioc, + pr_info("%s High IOPs queues : %s\n", + ioc->name, + ioc->high_iops_queues ? "enabled" : "disabled")); + return 0; +try_ioapic: + ioc->high_iops_queues = 0; + dinitprintk(ioc, pr_err( + "%s High IOPs queues : disabled\n", ioc->name)); + ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; + r = leapioraid_base_request_irq(ioc, 0); + return r; +} + +static void +leapioraid_base_import_managed_irqs_affinity( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int cpu, nr_msix; + int local_numa_node; + unsigned int index = 0; + + nr_msix = ioc->reply_queue_count; + if (!nr_msix) + return; + if (ioc->smp_affinity_enable) { + if (ioc->high_iops_queues) { + local_numa_node = dev_to_node(&ioc->pdev->dev); + for (index = 0; index < ioc->high_iops_queues; index++) { + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + index), + cpumask_of_node + (local_numa_node)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + dinitprintk(ioc, pr_warn( + "%s no affinity for msi %x\n", + ioc->name, + reply_q->msix_index)); + goto fall_back; + } + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + } + return; + } +fall_back: + leapioraid_base_group_cpus_on_irq(ioc); +} + +static void +leapioraid_base_assign_reply_queues(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int reply_queue; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + if (ioc->msix_load_balance) + return; + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { + ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; + reply_queue = 0; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->msix_index = reply_queue; + if (++reply_queue == ioc->reply_queue_count) + reply_queue = 0; + } + } + leapioraid_base_import_managed_irqs_affinity(ioc); +} + +static int +leapioraid_base_wait_for_doorbell_int( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_spin_on_doorbell_int(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + udelay(500); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_ack(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (!(int_status & LEAPIORAID_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } else if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + usleep_range(1000, 1100); + count++; + } while (--cntdn); +out: + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_not_used(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000 * timeout; + do { + doorbell_reg = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (!(doorbell_reg & LEAPIORAID_DOORBELL_USED)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +static int +leapioraid_base_handshake_req_reply_wait(struct LEAPIORAID_ADAPTER *ioc, + int request_bytes, u32 *request, + int reply_bytes, u16 *reply, + int timeout) +{ + struct LeapioraidDefaultRep_t *default_reply + = (struct LeapioraidDefaultRep_t *) reply; + int i; + u8 failed; + __le32 *mfp; + + if ((ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) & LEAPIORAID_DOORBELL_USED)) { + pr_err("%s doorbell is in use (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + if (ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE) & + LEAPIORAID_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + writel(((LEAPIORAID_FUNC_HANDSHAKE << LEAPIORAID_DOORBELL_FUNCTION_SHIFT) + | ((request_bytes / 4) << LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT)), + &ioc->chip->Doorbell); + if ((leapioraid_base_spin_on_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) { + pr_err("%s doorbell handshake ack failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + for (i = 0, failed = 0; i < request_bytes / 4 && !failed; i++) { + writel((u32) (request[i]), &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + if (failed) { + pr_err("%s doorbell handshake sending request failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if ((leapioraid_base_wait_for_doorbell_int(ioc, timeout))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[0] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (i >= reply_bytes / 2) + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + else + reply[i] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + if (leapioraid_base_wait_for_doorbell_int(ioc, 5)) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (leapioraid_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, + pr_info("%s doorbell is in use (line=%d)\n", + ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + mfp = (__le32 *) reply; + pr_info("%s \toffset:data\n", ioc->name); + for (i = 0; i < reply_bytes / 4; i++) + pr_info("%s \t[0x%02x]:%08x\n", + ioc->name, i * 4, le32_to_cpu(mfp[i])); + } + return 0; +} + +static int +leapioraid_base_wait_on_iocstate( + struct LEAPIORAID_ADAPTER *ioc, u32 ioc_state, + int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000 * timeout; + do { + current_state = leapioraid_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == LEAPIORAID_IOC_STATE_FAULT) + break; + usleep_range(1000, 1100); + count++; + } while (--cntdn); + return current_state; +} + +static inline void +leapioraid_base_dump_reg_set(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *) ioc->chip; + + pr_info("%s System Register set:\n", ioc->name); + for (i = 0; i < (sz / sizeof(u32)); i++) + pr_info("%08x: %08x\n", (i * 4), readl(®[i])); +} + +int +leapioraid_base_unlock_and_get_host_diagnostic( + struct LEAPIORAID_ADAPTER *ioc, + u32 *host_diagnostic) +{ + u32 count; + + *host_diagnostic = 0; + count = 0; + do { + drsprintk(ioc, pr_info("%s write magic sequence\n", ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); + writel(0xF, &ioc->chip->WriteSequence); + writel(0x4, &ioc->chip->WriteSequence); + writel(0xB, &ioc->chip->WriteSequence); + writel(0x2, &ioc->chip->WriteSequence); + writel(0x7, &ioc->chip->WriteSequence); + writel(0xD, &ioc->chip->WriteSequence); + msleep(100); + if (count++ > 20) { + pr_err("%s Giving up writing magic sequence after 20 retries\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + return -EFAULT; + } + *host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + drsprintk(ioc, pr_info( + "%s wrote magic sequence: cnt(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, *host_diagnostic)); + } while ((*host_diagnostic & 0x00000080) == 0); + return 0; +} + +void +leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc) +{ + drsprintk(ioc, pr_info("%s disable writes to the diagnostic register\n", + ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); +} + +static int +leapioraid_base_diag_reset(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + pr_info("%s sending diag reset !!\n", ioc->name); + drsprintk(ioc, + pr_info("%s Locking pci cfg space access\n", + ioc->name)); + pci_cfg_access_lock(ioc->pdev); + drsprintk(ioc, pr_info("%s clear interrupts\n", + ioc->name)); + mutex_lock(&ioc->hostdiag_unlock_mutex); + if (leapioraid_base_unlock_and_get_host_diagnostic + (ioc, &host_diagnostic)) { + mutex_unlock(&ioc->hostdiag_unlock_mutex); + goto out; + } + hcb_size = + ioc->base_readl(&ioc->chip->HCBSize, LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + drsprintk(ioc, + pr_info("%s diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | LEAPIORAID_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); +#if defined(DISABLE_RESET_SUPPORT) + count = 0; + do { + msleep(50); + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) + goto out; + else if (count++ >= 300) + goto out; + if (!(count % 20)) + pr_info("waiting on diag reset bit to clear, count = %d\n", + (count / 20)); + } while (host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER); +#else + msleep(50); + for (count = 0; count < (300000 / 256); count++) { + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) { + pr_err("%s Invalid host diagnostic register value\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + if (!(host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER)) + break; + + msleep(256); + } +#endif + if (host_diagnostic & 0x00000100) { + drsprintk(ioc, pr_info( + "%s restart IOC assuming HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~0x00001800; + host_diagnostic |= 0x00000800; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + drsprintk(ioc, pr_err( + "%s re-enable the HCDW\n", ioc->name)); + writel(hcb_size | 0x00000001, + &ioc->chip->HCBSize); + } + drsprintk(ioc, pr_info("%s restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~0x00000002, + &ioc->chip->HostDiagnostic); + leapioraid_base_lock_host_diagnostic(ioc); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + drsprintk(ioc, pr_info("%s Wait for FW to go to the READY state\n", + ioc->name)); + ioc_state = + leapioraid_base_wait_on_iocstate( + ioc, LEAPIORAID_IOC_STATE_READY, 20); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_init(ioc); + pr_info("%s diag reset: SUCCESS\n", ioc->name); + return 0; +out: + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + pr_err("%s diag reset: FAILED\n", ioc->name); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_iocstate( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if (((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) || + (ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + dhsprintk(ioc, + pr_info("%s unexpected doorbell active!\n", ioc->name)); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + pr_err("%s %s: Skipping the diag reset here. (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +int +leapioraid_base_check_for_fault_and_issue_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return rc; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } + return rc; +} + +static int +leapioraid_base_get_ioc_facts(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCFactsReq_t mpi_request; + struct LeapioraidIOCFactsRep_t mpi_reply; + struct leapioraid_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + r = leapioraid_base_wait_for_iocstate(ioc, 10); + if (r) { + pr_err( + "%s %s: failed getting to correct state\n", ioc->name, + __func__); + return r; + } + mpi_reply_sz = sizeof(struct LeapioraidIOCFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidIOCFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_IOC_FACTS; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + facts = &ioc->facts; + memset(facts, 0, sizeof(struct leapioraid_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->IOCNumber = mpi_reply.IOCNumber; + pr_info("%s IOC Number : %d\n", ioc->name, facts->IOCNumber); + ioc->IOCNumber = facts->IOCNumber; + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= 16)) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) + && (!reset_devices)) + ioc->rdpq_array_capable = 1; + else + ioc->rdpq_array_capable = 0; + if (facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ) + ioc->atomic_desc_capable = 1; + else + ioc->atomic_desc_capable = 0; + + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + pr_err( + "%s CurrentHostPageSize is 0: Setting host page to 4k\n", + ioc->name); + ioc->page_size = 1 << 12; + } + dinitprintk(ioc, + pr_info("%s CurrentHostPageSize(%d)\n", + ioc->name, facts->CurrentHostPageSize)); + dinitprintk(ioc, + pr_info("%s hba queue depth(%d), max chains per io(%d)\n", + ioc->name, facts->RequestCredit, facts->MaxChainDepth)); + dinitprintk(ioc, + pr_info("%s request frame size(%d), reply frame size(%d)\n", + ioc->name, + facts->IOCRequestFrameSize * 4, + facts->ReplyFrameSize * 4)); + return 0; +} + +static void +leapioraid_base_unmap_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + pr_info("%s %s\n", ioc->name, __func__); + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + kfree(ioc->replyPostRegisterIndex); + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_device(pdev); + mutex_unlock(&ioc->pci_access_mutex); +} + +int +leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0, rc; + u64 pio_chip = 0; + phys_addr_t chip_phys = 0; + struct leapioraid_adapter_reply_queue *reply_q; + int iopoll_q_count = 0; + + dinitprintk(ioc, pr_info("%s %s\n", + ioc->name, __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + pr_warn("%s pci_enable_device_mem: failed\n", ioc->name); + return -ENODEV; + } + if (pci_request_selected_regions(pdev, ioc->bars, ioc->driver_name)) { + pr_warn("%s pci_request_selected_regions: failed\n", ioc->name); + r = -ENODEV; + goto out_fail; + } + + pci_set_master(pdev); + + if (leapioraid_base_config_dma_addressing(ioc, pdev) != 0) { + pr_warn("%s no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + for (i = 0, memap_sz = 0, pio_sz = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64) pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + if (ioc->chip == NULL) { + pr_err("%s unable to map adapter memory!\n", + ioc->name); + r = -EINVAL; + goto out_fail; + } + } + } + leapioraid_base_mask_interrupts(ioc); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_fail; + } + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + r = leapioraid_base_enable_msix(ioc); + if (r) + goto out_fail; + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->blk_mq_poll_queues[i].busy, 0); + atomic_set(&ioc->blk_mq_poll_queues[i].pause, 0); + } + if (!ioc->is_driver_loading) + leapioraid_base_init_irqpolls(ioc); + if (ioc->combined_reply_queue) { + ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count, + sizeof(resource_size_t *), + GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + pr_err("%s allocation for reply Post Register Index failed!!!\n", + ioc->name); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < ioc->nc_reply_index_count; i++) { + ioc->replyPostRegisterIndex[i] = (resource_size_t *) + ((u8 *) &ioc->chip->Doorbell + + 0x0000030C + + (i * 0x10)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + pr_info("%s %s: IRQ %d\n", + reply_q->name, + ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), pci_irq_vector(ioc->pdev, + reply_q->msix_index)); + } + pr_info("%s iomem(%pap), mapped(0x%p), size(%d)\n", + ioc->name, &chip_phys, ioc->chip, memap_sz); + pr_info("%s ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); + + pci_save_state(pdev); + return 0; +out_fail: + leapioraid_base_unmap_resources(ioc); + return r; +} + +void *leapioraid_base_get_msg_frame( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +void *leapioraid_base_get_sense_buffer( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +__le32 +leapioraid_base_get_sense_buffer_dma( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +__le64 +leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + return cpu_to_le64(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32) ioc->reply_dma); +} + +static inline u8 +leapioraid_base_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + if (ioc->msix_load_balance) + return ioc->reply_queue_count ? + leapioraid_base_mod64(atomic64_add_return(1, &ioc->total_io_cnt), + ioc->reply_queue_count) : 0; + if (scmd && ioc->shost->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues; + } + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +inline unsigned long +leapioraid_base_sdev_nr_inflight_request(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + return scsi_device_busy(scmd->device); +} + +static inline u8 +leapioraid_base_get_high_iops_msix_index(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (leapioraid_base_sdev_nr_inflight_request(ioc, scmd) > + LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH) + return + leapioraid_base_mod64((atomic64_add_return + (1, + &ioc->high_iops_outstanding) / + LEAPIORAID_HIGH_IOPS_BATCH_COUNT), + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + return leapioraid_base_get_msix_index(ioc, scmd); +} + +u16 +leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err("%s %s: smid not available\n", + ioc->name, __func__); + return 0; + } + request = list_entry(ioc->internal_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +u16 +leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + struct leapioraid_scsiio_tracker *request; + u16 smid; + u32 tag = scsi_cmd_to_rq(scmd)->tag; + u32 unique_tag; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + tag = blk_mq_unique_tag_to_tag(unique_tag); + ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); + request = leapioraid_base_scsi_cmd_priv(scmd); + smid = tag + 1; + request->cb_idx = cb_idx; + request->smid = smid; + request->scmd = scmd; + return smid; +} + +u16 +leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + request = list_entry(ioc->hpr_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +static void +leapioraid_base_recovery_check(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; + } +} + +void +leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st) +{ + if (!st) + return; + if (WARN_ON(st->smid == 0)) + return; + st->cb_idx = 0xFF; + st->direct_io = 0; + st->scmd = NULL; + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); +} + +void +leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + struct leapioraid_scsiio_tracker *st; + void *request; + + if (smid < ioc->hi_priority_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (!st) { + leapioraid_base_recovery_check(ioc); + return; + } + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + leapioraid_base_clear_st(ioc, st); + leapioraid_base_recovery_check(ioc); + ioc->io_queue_num[smid - 1] = 0xFFFF; + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + writeq(b, addr); +} +#else +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + unsigned long flags; + __u64 data_out = b; + + spin_lock_irqsave(writeq_lock, flags); + writel((u32) (data_out), addr); + writel((u32) (data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} +#endif + +static u8 +leapioraid_base_set_and_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_scsiio_tracker *st; + + st = (smid < + ioc->hi_priority_smid) ? (leapioraid_get_st_from_smid(ioc, + smid)) + : (NULL); + if (st == NULL) + return leapioraid_base_get_msix_index(ioc, NULL); + st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); + return st->msix_io; +} + +static void +leapioraid_base_put_smid_scsi_io(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_fast_path(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_hi_priority(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.HighPriority.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_default(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.Default.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_scsi_io_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_fast_path_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_hi_priority_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 msix_task) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_default_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *)(&descriptor); + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static int +leapioraid_base_display_fwpkg_version(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidFWImgHeader_t *fw_img_hdr; + struct LeapioraidComptImgHeader_t *cmp_img_hdr; + struct LeapioraidFWUploadReq_t *mpi_request; + struct LeapioraidFWUploadRep_t mpi_reply; + int r = 0, issue_diag_reset = 0; + u32 package_version = 0; + void *fwpkg_data = NULL; + dma_addr_t fwpkg_data_dma; + u16 smid, ioc_status; + size_t data_length; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err("%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + data_length = sizeof(struct LeapioraidFWImgHeader_t); + fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &fwpkg_data_dma, GFP_ATOMIC); + if (!fwpkg_data) + return -ENOMEM; + + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + r = -EAGAIN; + goto out; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidFWUploadReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_FW_UPLOAD; + mpi_request->ImageType = 0x01; + mpi_request->ImageSize = data_length; + ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, + data_length); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 15 * HZ); + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidFWUploadReq_t) / 4); + issue_diag_reset = 1; + } else { + memset(&mpi_reply, 0, sizeof(struct LeapioraidFWUploadRep_t)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(&mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidFWUploadRep_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + fw_img_hdr = + (struct LeapioraidFWImgHeader_t *) fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + 0xEB000042) { + cmp_img_hdr = + (struct LeapioraidComptImgHeader_t + *) (fwpkg_data); + package_version = + le32_to_cpu(cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu(fw_img_hdr->PackageVersion.Word); + if (package_version) + pr_err( + "%s FW Package Version(%02d.%02d.%02d.%02d)\n", + ioc->name, + ((package_version) & 0xFF000000) + >> 24, + ((package_version) & 0x00FF0000) + >> 16, + ((package_version) & 0x0000FF00) + >> 8, + (package_version) & 0x000000FF); + } else { + leapioraid_debug_dump_mf(&mpi_reply, + sizeof(struct LeapioraidFWUploadRep_t) / + 4); + } + } + } + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + if (fwpkg_data) + dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, + fwpkg_data_dma); + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +static void +leapioraid_base_display_ioc_capabilities(struct LEAPIORAID_ADAPTER *ioc) +{ + int i = 0; + char desc[17] = { 0 }; + u8 revision; + u32 iounit_pg1_flags; + + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc)); + pr_info("%s %s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, revision); + pr_info("%s Protocol=(", ioc->name); + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_info("Initiator"); + i++; + } + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_info("%sTarget", i ? "," : ""); + i++; + } + i = 0; + pr_info("), "); + pr_info("Capabilities=("); + if ((!ioc->warpdrive_msg) && (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) { + pr_info("Raid"); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR) { + pr_info("%sTLR", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST) { + pr_info("%sMulticast", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_info("%sBIDI Target", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_EEDP) { + pr_info("%sEEDP", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_info("%sTask Set Full", i ? "," : ""); + i++; + } + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_info("%sNCQ", i ? "," : ""); + i++; + } + pr_info(")\n"); +} + +static int +leapioraid_base_update_ioc_page1_inlinewith_perf_mode( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCP1_t ioc_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int rc; + + rc = leapioraid_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); + if (rc) + return rc; + memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(struct LeapioraidIOCP1_t)); + switch (perf_mode) { + case LEAPIORAID_PERF_MODE_DEFAULT: + case LEAPIORAID_PERF_MODE_BALANCED: + if (ioc->high_iops_queues) { + pr_err( + "%s Enable int coalescing only for first %d reply queues\n", + ioc->name, LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | + ((1 << + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES + / 8) - 1)); + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: balanced\n", ioc->name); + return 0; + } + fallthrough; + case LEAPIORAID_PERF_MODE_LATENCY: + ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: latency\n", ioc->name); + break; + case LEAPIORAID_PERF_MODE_IOPS: + pr_err( + "%s performance mode: iops with coalescing timeout: 0x%x\n", + ioc->name, le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + break; + } + return 0; +} + +static int +leapioraid_base_assign_fw_reported_qd(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + int sz; + int rc = 0; + + ioc->max_wideport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = LEAPIORAID_SATA_QUEUE_DEPTH; + + sz = offsetof(struct LeapioraidSasIOUnitP1_t, PhyData); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_wideport_qd = + (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = + (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? + sas_iounit_pg1->SATAMaxQDepth : LEAPIORAID_SATA_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "%s MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x\n", + ioc->name, ioc->max_wideport_qd, + ioc->max_narrowport_qd, ioc->max_sata_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_base_static_config_pages(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + u32 iounit_pg1_flags; + int rc; + + rc = leapioraid_config_get_manufacturing_pg0(ioc, &mpi_reply, + &ioc->manu_pg0); + if (rc) + return rc; + if (ioc->ir_firmware) { + rc = leapioraid_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + if (rc) + return rc; + } + rc = leapioraid_config_get_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + if (rc) + return rc; + + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & 0x7F; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & 0x80) + ioc->time_sync_interval = + ioc->time_sync_interval * 3600; + else + ioc->time_sync_interval = + ioc->time_sync_interval * 60; + dinitprintk(ioc, pr_info( + "%s Driver-FW TimeSync interval is %d seconds.\n\t\t" + "ManuPg11 TimeSync Unit is in %s's", + ioc->name, + ioc->time_sync_interval, + ((ioc->manu_pg11.TimeSyncInterval & 0x80) + ? "Hour" : "Minute"))); + } + rc = leapioraid_base_assign_fw_reported_qd(ioc); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + if (rc) + return rc; + rc = leapioraid_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg0(ioc, &mpi_reply, + &ioc->iounit_pg0); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg8(ioc, &mpi_reply, + &ioc->iounit_pg8); + if (rc) + return rc; + leapioraid_base_display_ioc_capabilities(ioc); + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + rc = leapioraid_config_set_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + + rc = leapioraid_base_update_ioc_page1_inlinewith_perf_mode(ioc); + if (rc) + return rc; + + return 0; +} + +void +leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev, *enclosure_dev_next; + + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &ioc->enclosure_list, + list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +static void +leapioraid_base_release_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, j; + int dma_alloc_count = 0; + struct leapioraid_chain_tracker *ct; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->request) { + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, + pr_info("%s request_pool(0x%p): free\n", + ioc->name, ioc->request)); + ioc->request = NULL; + } + if (ioc->sense) { + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, pr_info("%s sense_pool(0x%p): free\n", + ioc->name, ioc->sense)); + ioc->sense = NULL; + } + if (ioc->reply) { + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, pr_info("%s reply_pool(0x%p): free\n", + ioc->name, ioc->reply)); + ioc->reply = NULL; + } + if (ioc->reply_free) { + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + dma_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, pr_info("%s reply_free_pool(0x%p): free\n", + ioc->name, ioc->reply_free)); + ioc->reply_free = NULL; + } + if (ioc->reply_post) { + dma_alloc_count = DIV_ROUND_UP(count, + LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + pr_err( + "%s reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free); + ioc->reply_post[i].reply_post_free = + NULL; + } + --dma_alloc_count; + } + } + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); + kfree(ioc->reply_post); + } + if (ioc->config_page) { + dexitprintk(ioc, pr_err( + "%s config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + kfree(ioc->hpr_lookup); + kfree(ioc->internal_lookup); + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + if (ct && ct->chain_buffer) + dma_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); + } + kfree(ioc->chain_lookup[i].chains_per_smid); + } + dma_pool_destroy(ioc->chain_dma_pool); + kfree(ioc->chain_lookup); + ioc->chain_lookup = NULL; + } + kfree(ioc->io_queue_num); + ioc->io_queue_num = NULL; +} + +static int +leapioraid_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) +{ + dma_addr_t end_address; + + end_address = start_address + pool_sz - 1; + if (upper_32_bits(start_address) == upper_32_bits(end_address)) + return 1; + else + return 0; +} + +static inline int +leapioraid_base_reduce_hba_queue_depth(struct LEAPIORAID_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +static int +leapioraid_base_allocate_reply_post_free_array(struct LEAPIORAID_ADAPTER *ioc, + int reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_create failed\n")); + return -ENOMEM; + } + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_alloc failed\n")); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_post_free_array_dma, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + return 0; +} + +static int +base_alloc_rdpq_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + ioc->reply_post = + kcalloc(count, sizeof(struct leapioraid_reply_post_struct), GFP_KERNEL); + if (!ioc->reply_post) { + pr_err("%s reply_post_free pool: kcalloc failed\n", ioc->name); + return -ENOMEM; + } + dma_alloc_count = DIV_ROUND_UP( + count, LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err("reply_post_free pool: dma_pool_create failed\n"); + return -ENOMEM; + } + for (i = 0; i < count; i++) { + if ((i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err("reply_post_free pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region + (ioc->reply_post[i].reply_post_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "%s bad Replypost free pool(0x%p) dma = (0x%llx)\n", + ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + dma_alloc_count--; + } else { + ioc->reply_post[i].reply_post_free = + (union LeapioraidRepDescUnion_t *) + ((long)ioc->reply_post[i - 1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = (dma_addr_t) + (ioc->reply_post[i - 1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +static int +leapioraid_base_allocate_chain_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0, j = 0; + struct leapioraid_chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) { + pr_err("%s chain_dma_pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, + &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!leapioraid_check_same_4gb_region + (ctr->chain_buffer_dma, ioc->chain_segment_sz)) { + pr_err( + "%s buffers not in same 4G! buff=(0x%p) dma=(0x%llx)\n", + ioc->name, + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + } + } + dinitprintk(ioc, pr_info( + "%s chain_lookup depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->scsiio_depth, + ioc->chain_segment_sz, + ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz)) / 1024)); + return 0; +} + +static int +leapioraid_base_allocate_sense_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) { + pr_err("%s sense pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) { + pr_err("%s sense pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->sense_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, + (unsigned long long)ioc->sense_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + pr_err( + "%s sense pool(0x%p) - dma(0x%llx): depth(%d),\n\t\t" + "element_size(%d), pool_size (%d kB)\n", + ioc->name, + ioc->sense, + (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_reply_free_dma_pool(struct LEAPIORAID_ADAPTER *ioc, + int sz) +{ + ioc->reply_free_dma_pool = + dma_pool_create("reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + pr_err("%s reply_free pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) { + pr_err("%s reply_free pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, pr_info( + "%s reply_free pool(0x%p): depth(%d),\n\t\t" + "element_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply_free, + ioc->reply_free_queue_depth, 4, + sz / 1024)); + dinitprintk(ioc, + pr_info("%s reply_free_dma (0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +static int +leapioraid_base_allocate_reply_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) { + pr_err("%s reply pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + pr_err("%s reply pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, + (unsigned long long)ioc->reply_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32) (ioc->reply_dma); + ioc->reply_dma_max_address = (u32) (ioc->reply_dma) + sz; + pr_err( + "%s reply pool(0x%p) - dma(0x%llx): depth(%d)\n\t\t" + "frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply, + (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, + ioc->reply_sz, + sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz, rc = 0; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0, reply_post_free_array_sz = 0; + u16 max_request_credit; + unsigned short sg_tablesize; + u16 sge_size; + int i = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + retry_sz = 0; + facts = &ioc->facts; + sg_tablesize = LEAPIORAID_SG_DEPTH; + if (reset_devices) + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS); + if (sg_tablesize < LEAPIORAID_MIN_PHYS_SEGMENTS) + sg_tablesize = LEAPIORAID_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > LEAPIORAID_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_MAX_SG_SEGMENTS); + pr_warn( + "%s sg_tablesize(%u) is bigger than kernel defined %s(%u)\n", + ioc->name, + sg_tablesize, LEAPIORAID_MAX_PHYS_SEGMENTS_STRING, + LEAPIORAID_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < LEAPIORAID_INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (LEAPIORAID_INTERNAL_CMDS_COUNT + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + pr_err( + "%s RequestCredits not enough, it has %d credits\n", + ioc->name, + facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + ioc->hi_priority_depth = ioc->internal_depth - (5); + if (reset_devices) + max_request_credit = min_t(u16, facts->RequestCredit, + (LEAPIORAID_KDUMP_SCSI_IO_DEPTH + + ioc->internal_depth)); + else + max_request_credit = min_t(u16, facts->RequestCredit, + LEAPIORAID_MAX_HBA_QUEUE_DEPTH); +retry: + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; + ioc->request_sz = facts->IOCRequestFrameSize * 4; + ioc->reply_sz = facts->ReplyFrameSize * 4; + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + else + ioc->chain_segment_sz = + LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); +retry_allocation: + total_sz = 0; + max_sge_elements = + ioc->request_sz - + ((sizeof(struct LeapioraidSCSIIOReq_t) - + sizeof(union LEAPIORAID_IEEE_SGE_IO_UNION)) + 2 * sge_size); + ioc->max_sges_in_main_message = max_sge_elements / sge_size; + max_sge_elements = ioc->chain_segment_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements / sge_size; + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message) / + ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + + (ioc->max_sges_in_chain_message * + chains_needed_per_io), + ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += + 16 - (ioc->reply_post_queue_depth % 16); + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + pr_info( + "%s scatter gather: sge_in_main_msg(%d),\n\t\t" + "sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", + ioc->name, + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + ioc->shost->can_queue = + ioc->scsiio_depth - LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT; + dinitprintk(ioc, pr_info("%s scsi host: can_queue depth (%d)\n", ioc->name, + ioc->shost->can_queue)); + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + sz += (ioc->hi_priority_depth * ioc->request_sz); + sz += (ioc->internal_depth * ioc->request_sz); + ioc->request_dma_sz = sz; + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); + if (!ioc->request) { + if (ioc->scsiio_depth < LEAPIORAID_SAS_QUEUE_DEPTH) { + rc = -ENOMEM; + goto out; + } + retry_sz = 64; + if ((ioc->hba_queue_depth - retry_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= retry_sz; + goto retry_allocation; + } else { + rc = -ENOMEM; + goto out; + } + } + memset(ioc->request, 0, sz); + if (retry_sz) + pr_err( + "%s request pool: dma_alloc_consistent succeed:\n\t\t" + "hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", + ioc->name, + ioc->hba_queue_depth, + ioc->chains_needed_per_io, + ioc->request_sz, + sz / 1024); + ioc->hi_priority = + ioc->request + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->hi_priority_dma = + ioc->request_dma + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->internal = + ioc->hi_priority + (ioc->hi_priority_depth * ioc->request_sz); + ioc->internal_dma = + ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); + pr_info( + "%s request pool(0x%p) - dma(0x%llx):\n\t\t" + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->request, + (unsigned long long)ioc->request_dma, + ioc->hba_queue_depth, + ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); + total_sz += sz; + ioc->io_queue_num = kcalloc(ioc->scsiio_depth, sizeof(u16), GFP_KERNEL); + if (!ioc->io_queue_num) { + rc = -ENOMEM; + goto out; + } + dinitprintk(ioc, pr_info("%s scsiio(0x%p): depth(%d)\n", + ioc->name, ioc->request, ioc->scsiio_depth)); + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct leapioraid_request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + rc = -ENOMEM; + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, pr_info( + "%s hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->hi_priority, ioc->hi_priority_depth, + ioc->hi_priority_smid)); + ioc->internal_lookup = + kcalloc(ioc->internal_depth, sizeof(struct leapioraid_request_tracker), + GFP_KERNEL); + if (!ioc->internal_lookup) { + pr_err("%s internal_lookup: kcalloc failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, pr_info( + "%s internal(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->internal, ioc->internal_depth, + ioc->internal_smid)); + sz = ioc->scsiio_depth * sizeof(struct leapioraid_chain_lookup); + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + if ((max_request_credit - 64) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err( + "%s chain_lookup: __get_free_pages failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + } + sz = ioc->chains_needed_per_io * sizeof(struct leapioraid_chain_tracker); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err("%s chain_lookup: kzalloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + } + ioc->chains_per_prp_buffer = 0; + rc = leapioraid_base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) { + if (ioc->use_32bit_dma && ioc->dma_mask > 32) + goto try_32bit_dma; + else { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry_allocation; + } else { + pr_err("%s chain_lookup: dma_pool_alloc failed\n", ioc->name); + return -ENOMEM; + } + } + } + total_sz += ioc->chain_segment_sz * + ((ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->scsiio_depth); + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = leapioraid_base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + rc = leapioraid_base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + sz = ioc->reply_free_queue_depth * 4; + rc = leapioraid_base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + rdpq_sz = reply_post_free_sz * LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if ((leapioraid_base_is_controller_msix_enabled(ioc) + && !ioc->rdpq_array_enable) + || (ioc->reply_queue_count < LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK)) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + rc = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + else { + if (ioc->rdpq_array_enable && rc == 0) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + rc = leapioraid_base_allocate_reply_post_free_array( + ioc, reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + } + } + total_sz += rdpq_sz; + ioc->config_page_sz = 512; + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, + &ioc->config_page_dma, + GFP_KERNEL); + if (!ioc->config_page) { + pr_err("%s config page: dma_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + pr_err("%s config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->name, ioc->config_page, + (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); + total_sz += ioc->config_page_sz; + pr_info("%s Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz / 1024); + pr_info( + "%s Current IOC Queue Depth(%d), Max Queue Depth(%d)\n", + ioc->name, + ioc->shost->can_queue, + facts->RequestCredit); + return 0; +try_32bit_dma: + leapioraid_base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + if (leapioraid_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (leapioraid_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; +out: + return rc; +} + +static void +leapioraid_base_flush_ios_and_panic( + struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + ioc->adapter_over_temp = 1; + leapioraid_base_stop_smart_polling(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + leapioraid_print_fault_code(ioc, fault_code); +} + +u32 +leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = ioc->base_readl( + &ioc->chip->Doorbell, LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + sc = s & LEAPIORAID_IOC_STATE_MASK; + if (sc != LEAPIORAID_IOC_STATE_MASK) { + if ((sc == LEAPIORAID_IOC_STATE_FAULT) && + ((s & LEAPIORAID_DOORBELL_DATA_MASK) == + LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) { + leapioraid_base_flush_ios_and_panic(ioc, + s & + LEAPIORAID_DOORBELL_DATA_MASK); + panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", + __func__); + } + } + return cooked ? sc : s; +} + +static int +leapioraid_base_send_ioc_reset( + struct LEAPIORAID_ADAPTER *ioc, u8 reset_type, int timeout) +{ + u32 ioc_state; + int r = 0; + unsigned long flags; + + if (reset_type != LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET) { + pr_err("%s %s: unknown reset_type\n", + ioc->name, __func__); + return -EFAULT; + } + if (!(ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + pr_info("%s sending message unit reset !!\n", + ioc->name); + writel(reset_type << LEAPIORAID_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 15))) + r = -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP + && (ioc->is_driver_loading == 1 + || ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_coredump_info(ioc, ioc_state); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + r = -EFAULT; + goto out; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (r != 0) + goto out; + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + r = -EFAULT; + goto out; + } +out: + pr_info("%s message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + return r; +} + +int +leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count) +{ + int wait_state_count = 0; + u32 ioc_state; + + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + if (ioc->is_driver_loading) + return -ETIME; + if (wait_state_count++ == wait_count) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info("%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + return 0; +} + +int +leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t *mpi_reply, + struct LeapioraidSasIoUnitControlReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + if (mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSasIoUnitControlRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSasIoUnitControlRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +int +leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSepReq_t)); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(struct LeapioraidSepReq_t) / 4, + issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSepRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSepRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +static int +leapioraid_base_get_port_facts(struct LEAPIORAID_ADAPTER *ioc, int port) +{ + struct LeapioraidPortFactsReq_t mpi_request; + struct LeapioraidPortFactsRep_t mpi_reply; + struct leapioraid_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mpi_reply_sz = sizeof(struct LeapioraidPortFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidPortFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_PORT_FACTS; + mpi_request.PortNumber = port; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct leapioraid_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + return 0; +} + +static int +leapioraid_base_send_ioc_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCInitReq_t mpi_request; + struct LeapioraidIOCInitRep_t mpi_reply; + int i, r = 0; + ktime_t current_time; + u16 ioc_status; + u32 reply_post_free_ary_sz; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCInitReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_IOC_INIT; + mpi_request.WhoInit = LEAPIORAID_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(0x0206); + mpi_request.HeaderVersion = cpu_to_le16(0x3A00); + mpi_request.HostPageSize = 12; + if (leapioraid_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz / 4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64) ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64) ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64) ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64) ioc->reply_free_dma); + if (ioc->rdpq_array_enable) { + reply_post_free_ary_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + memset(ioc->reply_post_free_array, 0, reply_post_free_ary_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + ioc->reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64((u64) ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post[0].reply_post_free_dma); + } + mpi_request.ConfigurationFlags |= 0x0002; + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + + pr_info("%s \toffset:data\n", ioc->name); + leapioraid_debug_dump_mf(&mpi_request, + sizeof(struct LeapioraidIOCInitReq_t) / 4); + + } + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCInitReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCInitRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + ioc->timestamp_update_count = 0; + return r; +} + +int +leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCLogReq_t mpi_request; + struct LeapioraidIOCLogRep_t mpi_reply; + u16 ioc_status; + u32 r; + + dinitprintk(ioc, + pr_info("%s %s\n", ioc->name, __func__)); + if (ioc->log_buffer == NULL) { + ioc->log_buffer = + dma_alloc_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + &ioc->log_buffer_dma, GFP_KERNEL); + } + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCLogReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_LOG_INIT; + mpi_request.BufAddr = ioc->log_buffer_dma; + mpi_request.BufSize = SYS_LOG_BUF_SIZE; + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCLogReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCLogRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + return r; +} + +static int +leapioraid_base_trace_log_exit(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->log_buffer) + dma_free_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + ioc->log_buffer, ioc->log_buffer_dma); + return 0; +} + +u8 +leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_PORT_ENABLE) + return 1; + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE_ASYNC) { + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_COMPLETE_ASYNC; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + leapioraid_port_enable_complete(ioc); + return 1; + } + + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +static int +leapioraid_base_send_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + struct LeapioraidPortEnableRep_t *mpi_reply; + int r = 0; + u16 smid; + u16 ioc_status; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + init_completion(&ioc->port_enable_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300 * HZ); + if (!(ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidPortEnableReq_t) / 4); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s %s: failed with (ioc_status=0x%08x)\n", ioc->name, + __func__, ioc_status); + r = -EFAULT; + goto out; + } +out: + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" + : + "FAILED")); + return r; +} + +int +leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + u16 smid; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->drv_internal_flags |= LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED; + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE_ASYNC; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + ioc->put_smid_default(ioc, smid); + return 0; +} + +static int +leapioraid_base_determine_wait_on_discovery(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ir_firmware) + return 1; + if (!ioc->bios_pg3.BiosVersion) + return 0; + if ((ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + return 1; +} + +static void +leapioraid_base_unmask_events(struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + desired_event = (1 << (event % 32)); + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +static int +leapioraid_base_event_notification(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidEventNotificationReq_t *mpi_request; + u16 smid; + int r = 0; + int i, issue_diag_reset = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidEventNotificationReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 30 * HZ); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidEventNotificationReq_t) / 4); + if (ioc->base_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + issue_diag_reset = 1; + } else + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +void +leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + if (!send_update_to_fw) + return; + mutex_lock(&ioc->base_cmds.mutex); + leapioraid_base_event_notification(ioc); + mutex_unlock(&ioc->base_cmds.mutex); +} + +int +leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + count = 0; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_RESET) { + while ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_READY) { + if (count++ == 10) { + pr_err( + "%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + } + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + if (ioc->ioc_coredump_loop != 0xFF) { + leapioraid_base_coredump_info(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + if (! + (leapioraid_base_send_ioc_reset + (ioc, LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET, 15))) { + return 0; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +static int +leapioraid_base_make_ioc_operational(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i, index; + unsigned long flags; + u32 reply_address; + u16 smid; + struct leapioraid_tr_list *delayed_tr, *delayed_tr_next; + struct leapioraid_sc_list *delayed_sc, *delayed_sc_next; + struct leapioraid_event_ack_list *delayed_event_ack, *delayed_event_ack_next; + struct leapioraid_adapter_reply_queue *reply_q; + union LeapioraidRepDescUnion_t *reply_post_free_contig; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_internal_tm_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + for (i = 0, reply_address = (u32) ioc->reply_dma; + i < ioc->reply_free_queue_depth; i++, reply_address += + ioc->reply_sz) { + ioc->reply_free[i] = cpu_to_le32(reply_address); + } + if (ioc->is_driver_loading) + leapioraid_base_assign_reply_queues(ioc); + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } +skip_init_reply_post_free_queue: + r = leapioraid_base_send_ioc_init(ioc); + if (r) { + if (!ioc->is_driver_loading) + return r; + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_send_ioc_init(ioc))) + return r; + } + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->combined_reply_queue) { + for (i = 0; i < ioc->nc_reply_index_count; i++) + writel((reply_q->msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[i]); + } else { + writel(reply_q->msix_index << LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + } + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } +skip_init_reply_post_host_index: + leapioraid_base_unmask_interrupts(ioc); + r = leapioraid_base_display_fwpkg_version(ioc); + if (r) + return r; + r = leapioraid_base_static_config_pages(ioc); + if (r) + return r; + r = leapioraid_base_event_notification(ioc); + if (r) + return r; + leapioraid_base_start_hba_unplug_watchdog(ioc); + if (!ioc->shost_recovery) { + ioc->wait_for_discovery_to_complete = + leapioraid_base_determine_wait_on_discovery(ioc); + return r; + } + r = leapioraid_base_send_port_enable(ioc); + if (r) + return r; + return r; +} + +void +leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!ioc->chip_phys) + return; + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_unmap_resources(ioc); +} + +int +leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = 0; + ioc->dma_mask = 64; + ioc->base_readl = &leapioraid_base_readl_aero; + ioc->smp_affinity_enable = smp_affinity_enable; + r = leapioraid_base_map_resources(ioc); + if (r) + goto out_free_resources; + pci_set_drvdata(ioc->pdev, ioc->shost); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_free_resources; + } + + ioc->build_sg_scmd = &leapioraid_base_build_sg_scmd_ieee; + ioc->build_sg = &leapioraid_base_build_sg_ieee; + ioc->build_zero_len_sge = + &leapioraid_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(struct LEAPIORAID_IEEE_SGE_SIMPLE64); + if (ioc->high_iops_queues) + ioc->get_msix_index_for_smlio = + &leapioraid_base_get_high_iops_msix_index; + else + ioc->get_msix_index_for_smlio = &leapioraid_base_get_msix_index; + + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = + &leapioraid_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = + &leapioraid_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = + &leapioraid_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &leapioraid_base_put_smid_default; + ioc->put_smid_scsi_io = &leapioraid_base_put_smid_scsi_io; + ioc->put_smid_fast_path = &leapioraid_base_put_smid_fast_path; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority; + } + ioc->build_sg_mpi = &leapioraid_base_build_sg; + ioc->build_zero_len_sge_mpi = &leapioraid_base_build_zero_len_sge; + r = leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + if (r) + goto out_free_resources; + if (ioc->open_pcie_trace) { + r = leapioraid_base_trace_log_init(ioc); + if (r) { + pr_err("log init failed\n"); + goto out_free_resources; + } + } + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct leapioraid_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < ioc->facts.NumberOfPorts; i++) { + r = leapioraid_base_get_port_facts(ioc, i); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset + (ioc); + if (rc || (leapioraid_base_get_port_facts(ioc, i))) + goto out_free_resources; + } + } + r = leapioraid_base_allocate_memory_pools(ioc); + if (r) + goto out_free_resources; + if (irqpoll_weight > 0) + ioc->thresh_hold = irqpoll_weight; + else + ioc->thresh_hold = ioc->hba_queue_depth / 4; + leapioraid_base_init_irqpolls(ioc); + init_waitqueue_head(&ioc->reset_wq); + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) + goto out_free_resources; + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) + goto out_free_resources; + ioc->tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + ioc->tm_tr_retry = kzalloc(ioc->tm_tr_retry_sz, GFP_KERNEL); + if (!ioc->tm_tr_retry) + goto out_free_resources; + ioc->fwfault_debug = leapioraid_fwfault_debug; + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_SAS_DISCOVERY); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_VOLUME); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_PHYSICAL_DISK); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_OPERATION_STATUS); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_LOG_ENTRY_ADDED); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_TEMP_THRESHOLD); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + r = leapioraid_base_make_ioc_operational(ioc); + if (r == -EAGAIN) + r = leapioraid_base_make_ioc_operational(ioc); + if (r) + goto out_free_resources; + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; + ioc->got_task_abort_from_ioctl = 0; + ioc->got_task_abort_from_sysfs = 0; + return 0; +out_free_resources: + ioc->remove_host = 1; + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + +void +leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_exit(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + leapioraid_free_enclosure_list(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +static void +leapioraid_base_clear_outstanding_leapioraid_commands(struct LEAPIORAID_ADAPTER + *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->transport_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->transport_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->base_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + LEAPIORAID_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + } else + complete(&ioc->port_enable_cmds.done); + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->config_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + if ((scsih_qcmd->status) & LEAPIORAID_CMD_PENDING) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); +} + +static void +leapioraid_base_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + leapioraid_scsihost_reset_handler(ioc, reset_phase); + leapioraid_ctl_reset_handler(ioc, reset_phase); + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + break; + } +} + +void +leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + unsigned long flags; + u16 i; + struct leapioraid_scsiio_tracker *st; + + ioc->pending_io_count = 0; + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err("%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 1; i <= ioc->scsiio_depth; i++) { + st = leapioraid_get_st_from_smid(ioc, i); + if (st && st->smid != 0) { + if (st->cb_idx != 0xFF) + ioc->pending_io_count++; + } + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + if (!ioc->pending_io_count) + return; + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +static int +leapioraid_base_check_ioc_facts_changes(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 pd_handles_sz, tm_tr_retry_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + u8 *tm_tr_retry = NULL; + struct leapioraid_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + pr_err( + "%s Unable to allocate the memory for pd_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + blocking_handles = + krealloc(ioc->blocking_handles, pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + pr_err( + "%s Unable to allocate the memory for blocking_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + pend_os_device_add = + krealloc(ioc->pend_os_device_add, pd_handles_sz, + GFP_KERNEL); + if (!pend_os_device_add) { + pr_err( + "%s Unable to allocate the memory for pend_os_device_add of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + device_remove_in_progress = + krealloc(ioc->device_remove_in_progress, pd_handles_sz, + GFP_KERNEL); + if (!device_remove_in_progress) { + pr_err( + "%s Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + tm_tr_retry = krealloc(ioc->tm_tr_retry, tm_tr_retry_sz, + GFP_KERNEL); + if (!tm_tr_retry) { + pr_err( + "%s Unable to allocate the memory for tm_tr_retry of sz: %d\n", + ioc->name, tm_tr_retry_sz); + return -ENOMEM; + } + memset(tm_tr_retry + ioc->tm_tr_retry_sz, 0, + (tm_tr_retry_sz - ioc->tm_tr_retry_sz)); + ioc->tm_tr_retry = tm_tr_retry; + ioc->tm_tr_retry_sz = tm_tr_retry_sz; + } + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + return 0; +} + +int +leapioraid_base_hard_reset_handler( + struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + int r; + unsigned long flags; + + dtmprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, + pr_info("%s %s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_status; + } + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err( + "%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + leapioraid_base_resume_mq_polling(ioc); + } + r = 0; + goto out_unlocked; + } + leapioraid_halt_firmware(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_get_iocstate(ioc, 0); + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_PRE_RESET_PHASE); + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_pause_mq_polling(ioc); + r = leapioraid_base_make_ioc_ready(ioc, type); + if (r) + goto out; + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_AFTER_RESET_PHASE); + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = leapioraid_base_get_ioc_facts(ioc); + if (r) + goto out; + r = leapioraid_base_check_ioc_facts_changes(ioc); + if (r) { + pr_err( + "%s Some of the parameters got changed in this\n\t\t" + "new firmware image and it requires system reboot\n", + ioc->name); + goto out; + } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic( + "%s: Issue occurred with flashing controller firmware.\n\t\t" + "Please reboot the system and ensure that the correct\n\t\t" + "firmware version is running\n", + ioc->name); + r = leapioraid_base_make_ioc_operational(ioc); + if (!r) + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_DONE_RESET_PHASE); +out: + pr_info("%s %s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_status = r; + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); +#if defined(DISABLE_RESET_SUPPORT) + if (r != 0) { + struct task_struct *p; + + ioc->remove_host = 1; + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "leapioraid_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + } +#else + if (r != 0) + ioc->schedule_dead_ioc_flush_running_cmds(ioc); +#endif + leapioraid_base_resume_mq_polling(ioc); +out_unlocked: + dtmprintk(ioc, pr_info("%s %s: exit\n", ioc->name, + __func__)); + return r; +} + +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +static void +leapioraid_config_display_some_debug(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & LEAPIORAID_CONFIG_PAGETYPE_MASK) { + case LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufacturing"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT: + desc = "sas_port"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING: + desc = "ext_manufacturing"; + break; + } + break; + } + if (!desc) + return; + pr_info("%s %s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_err( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +static int +leapioraid_config_alloc_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) + r = -ENOMEM; + } else { + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + ioc->config_vaddr = mem->page; + return r; +} + +static void +leapioraid_config_free_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +u8 +leapioraid_config_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->config_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->config_cmds.status &= ~LEAPIORAID_CMD_PENDING; + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug( + ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +static int +leapioraid_config_request( + struct LEAPIORAID_ADAPTER *ioc, struct LeapioraidCfgReq_t *mpi_request, + struct LeapioraidCfgRep_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + struct LeapioraidCfgReq_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + struct config_request mem; + u32 ioc_status = UINT_MAX; + u8 issue_reset; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: config_cmd in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = leapioraid_config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS + | mem.sz, mem.page_dma); + memcpy(mem.page, config_page, + min_t(u16, mem.sz, config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_SGLFLAGS + | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } +retry_config: + if (retry_count) { + if (retry_count > 2) { + r = -EFAULT; + goto free_mem; + } + pr_info("%s %s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } + r = leapioraid_wait_for_ioc_to_operational(ioc, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) { + if (r == -ETIME) + issue_host_reset = 1; + goto free_mem; + } + smid = leapioraid_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + r = 0; + memset(mpi_reply, 0, sizeof(struct LeapioraidCfgRep_t)); + memset(ioc->config_cmds.reply, 0, sizeof(struct LeapioraidCfgRep_t)); + ioc->config_cmds.status = LEAPIORAID_CMD_PENDING; + config_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(struct LeapioraidCfgReq_t)); + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->config_cmds.done, timeout * HZ); + if (!(ioc->config_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request no reply", + NULL); + leapioraid_check_cmd_timeout(ioc, ioc->config_cmds.status, + mpi_request, + sizeof(struct LeapioraidCfgReq_t) / 4, + issue_reset); + pr_info("%s issue_reset=%d\n", __func__, issue_reset); + retry_count++; + if (ioc->config_cmds.smid == smid) + leapioraid_base_free_smid(ioc, smid); + if (ioc->config_cmds.status & LEAPIORAID_CMD_RESET) + goto retry_config; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; + goto free_mem; + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(struct LeapioraidCfgRep_t)); + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + } + if (retry_count) + pr_info("%s %s: retry (%d) completed!!\n", + ioc->name, __func__, retry_count); + if ((ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *) mem.page; + + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } +free_mem: + if (config_page) + leapioraid_config_free_config_dma_memory(ioc, &mem); +out: + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + if (issue_host_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + r = -EFAULT; + } else { + if (leapioraid_base_check_for_fault_and_issue_reset + (ioc)) + return -EFAULT; + r = -EAGAIN; + } + } + return r; +} + +int +leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = 0x09; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + u16 ioc_status; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidSasIOUnitP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x06; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | + handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 *num_pds) +{ + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidRaidVolP0_t config_page; + struct LeapioraidCfgRep_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + *num_pds = 0; + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidRaidVolP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t *config_page, + u32 form, u32 form_specific) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle) +{ + struct LeapioraidRaidCfgP0_t *config_page = NULL; + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidCfgRep_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = 0x00; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, + config_page, config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) { + *volume_handle = 0; + r = 0; + goto out; + } else if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = + le16_to_cpu(config_page->ConfigElement[i].ElementFlags) & + LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT + || element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i].PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu + (config_page->ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } +out: + kfree(config_page); + return r; +} + +int +leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidRaidVolP1_t raid_vol_pg1; + + *wwid = 0; + if (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h new file mode 100644 index 0000000000000000000000000000000000000000..76babcb40766cb8d0e9f9fa88d15574f99156128 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.h @@ -0,0 +1,1258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#ifndef LEAPIORAID_FUNC_H_INCLUDED +#define LEAPIORAID_FUNC_H_INCLUDED + +#include "leapioraid.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef fallthrough +#define fallthrough +#endif + +#define SYS_LOG_BUF_SIZE (0x20000) +#define MAX_UPD_PAYLOAD_SZ (0x4000) + +#define LEAPIORAID_DRIVER_NAME "LeapIoRaid" +#define LEAPIORAID_AUTHOR "LeapIO Inc." +#define LEAPIORAID_DESCRIPTION "LEAPIO RAID Driver" +#define LEAPIORAID_DRIVER_VERSION "1.02.02.00" +#define LEAPIORAID_MAJOR_VERSION (1) +#define LEAPIORAID_MINOR_VERSION (02) +#define LEAPIORAID_BUILD_VERSION (02) +#define LEAPIORAID_RELEASE_VERSION (00) + +#define LEAPIORAID_VENDOR_ID (0xD405) +#define LEAPIORAID_DEVICE_ID_1 (0x1000) +#define LEAPIORAID_DEVICE_ID_2 (0x1001) + +#define LEAPIORAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE + +#define LEAPIORAID_MIN_PHYS_SEGMENTS (16) +#define LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS (32) + +#define LEAPIORAID_MAX_SG_SEGMENTS SG_MAX_SEGMENTS +#define LEAPIORAID_MAX_PHYS_SEGMENTS_STRING "SG_CHUNK_SIZE" + +#define LEAPIORAID_SG_DEPTH LEAPIORAID_MAX_PHYS_SEGMENTS + + +#define LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT 15 +#define LEAPIORAID_CONFIG_COMMON_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST) << LEAPIORAID_SGE_FLAGS_SHIFT) +#define LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST | LEAPIORAID_SGE_FLAGS_HOST_TO_IOC) \ + << LEAPIORAID_SGE_FLAGS_SHIFT) + +#define LEAPIORAID_SATA_QUEUE_DEPTH (32) +#define LEAPIORAID_SAS_QUEUE_DEPTH (64) +#define LEAPIORAID_RAID_QUEUE_DEPTH (64) +#define LEAPIORAID_KDUMP_SCSI_IO_DEPTH (64) +#define LEAPIORAID_RAID_MAX_SECTORS (128) + +#define LEAPIORAID_NAME_LENGTH (48) +#define LEAPIORAID_DRIVER_NAME_LENGTH (24) +#define LEAPIORAID_STRING_LENGTH (64) + +#define LEAPIORAID_FRAME_START_OFFSET (256) +#define LEAPIORAID_REPLY_FREE_POOL_SIZE (512) +#define LEAPIORAID_MAX_CALLBACKS (32) +#define LEAPIORAID_MAX_HBA_NUM_PHYS (16) + +#define LEAPIORAID_INTERNAL_CMDS_COUNT (10) +#define LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT (3) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL (1) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY (2) + +#define LEAPIORAID_INVALID_DEVICE_HANDLE (0xFFFF) +#define LEAPIORAID_MAX_CHAIN_ELEMT_SZ (16) +#define LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS (8) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY (30) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THREE (3) + +#define LEAPIORAID_IOC_PRE_RESET_PHASE (1) +#define LEAPIORAID_IOC_AFTER_RESET_PHASE (2) +#define LEAPIORAID_IOC_DONE_RESET_PHASE (3) + +#define LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT (0x01) +#define LEAPIORAID_TARGET_FLAGS_VOLUME (0x02) +#define LEAPIORAID_TARGET_FASTPATH_IO (0x08) + +#define LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH (8) +#define LEAPIORAID_HIGH_IOPS_REPLY_QUEUES (8) +#define LEAPIORAID_HIGH_IOPS_BATCH_COUNT (16) +#define LEAPIORAID_GEN35_MAX_MSIX_QUEUES (128) +#define LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK (16) + +#define LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED (0x2810) + +#ifndef DID_TRANSPORT_DISRUPTED +#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY +#endif +#ifndef ULLONG_MAX +#define ULLONG_MAX (~0ULL) +#endif +#ifndef USHORT_MAX +#define USHORT_MAX ((u16)(~0U)) +#endif +#ifndef UINT_MAX +#define UINT_MAX (~0U) +#endif + +static inline void *leapioraid_shost_private(struct Scsi_Host *shost) +{ + return (void *)shost->hostdata; +} + +struct LeapioraidManuP10_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 OEMIdentifier; + U8 Reserved1; + U16 Reserved2; + U32 Reserved3; + U32 GenericFlags0; + U32 GenericFlags1; + U32 Reserved4; + U32 OEMSpecificFlags0; + U32 OEMSpecificFlags1; + U32 Reserved5[18]; +}; + +struct LeapioraidManuP11_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + __le32 Reserved1; + u8 Reserved2; + u8 EEDPTagMode; + u8 Reserved3; + u8 Reserved4; + __le32 Reserved5[8]; + u16 AddlFlags2; + u8 AddlFlags3; + u8 Reserved6; + __le32 Reserved7[7]; + u8 AbortTO; + u8 NumPerDevEvents; + u8 HostTraceBufferDecrementSizeKB; + u8 HostTraceBufferFlags; + u16 HostTraceBufferMaxSizeKB; + u16 HostTraceBufferMinSizeKB; + u8 CoreDumpTOSec; + u8 TimeSyncInterval; + u16 Reserved9; + __le32 Reserved10; +}; + +struct LEAPIORAID_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct leapioraid_raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct leapioraid_hba_port *port; + struct leapioraid_sas_device *sas_dev; +}; + +#define LEAPIORAID_DEVICE_FLAGS_INIT (0x01) +#define LEAPIORAID_DEVICE_TLR_ON (0x02) + +struct LEAPIORAID_DEVICE { + struct LEAPIORAID_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 deleted; + u8 tlr_snoop_check; + u8 ignore_delay_remove; + u8 ncq_prio_enable; + unsigned long ata_command_pending; +}; + +#define LEAPIORAID_CMND_PENDING_BIT (0) +#define LEAPIORAID_CMD_NOT_USED (0x8000) +#define LEAPIORAID_CMD_COMPLETE (0x0001) +#define LEAPIORAID_CMD_PENDING (0x0002) +#define LEAPIORAID_CMD_REPLY_VALID (0x0004) +#define LEAPIORAID_CMD_RESET (0x0008) +#define LEAPIORAID_CMD_COMPLETE_ASYNC (0x0010) + +struct leapioraid_internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + +struct leapioraid_scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +struct leapioraid_internal_qcmd { + struct list_head list; + void *request; + void *reply; + void *sense; + u16 status; + u16 smid; + struct leapioraid_scsi_io_transfer *transfer_packet; +}; + +#define LEAPIORAID_WIDE_PORT_API (1) +#define LEAPIORAID_WIDE_PORT_API_PLUS (1) + +struct leapioraid_sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + struct kref refcount; + u8 *serial_number; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 chassis_slot; + u8 is_chassis_slot_valid; + u8 connector_name[5]; + u8 ssd_device; + u8 supports_sata_smart; + u8 port_type; + struct leapioraid_hba_port *port; + struct sas_rphy *rphy; +}; + +static inline +void leapioraid_sas_device_get(struct leapioraid_sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline +void leapioraid_sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct leapioraid_sas_device, refcount)); +} + +static inline +void leapioraid_sas_device_put(struct leapioraid_sas_device *s) +{ + kref_put(&s->refcount, leapioraid_sas_device_free); +} + +struct leapioraid_raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[8]; +}; + +struct leapioraid_boot_device { + int channel; + void *device; +}; + +struct leapioraid_sas_port { + struct list_head port_list; + u8 num_phys; + struct leapioraid_hba_port *hba_port; + struct sas_identify remote_identify; + struct sas_rphy *rphy; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct list_head phy_list; +}; + +struct leapioraid_sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + u8 hba_vphy; + struct leapioraid_hba_port *port; +}; + +struct leapioraid_raid_sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + u8 nr_phys_allocated; + struct leapioraid_hba_port *port; + struct leapioraid_sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +struct leapioraid_enclosure_node { + struct list_head list; + struct LeapioraidSasEncP0_t pg0; +}; + +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +struct leapioraid_chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; +}; + +struct leapioraid_chain_lookup { + struct leapioraid_chain_tracker *chains_per_smid; + atomic_t chain_offset; +}; + +struct leapioraid_scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct list_head chain_list; + u16 msix_io; +}; + +struct leapioraid_request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +struct leapioraid_tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + +struct leapioraid_sc_list { + struct list_head list; + u16 handle; +}; + +struct leapioraid_event_ack_list { + struct list_head list; + U16 Event; + U32 EventContext; +}; + +struct leapioraid_adapter_reply_queue { + struct LEAPIORAID_ADAPTER *ioc; + u8 msix_index; + u32 reply_post_host_index; + union LeapioraidRepDescUnion_t *reply_post_free; + char name[LEAPIORAID_NAME_LENGTH]; + atomic_t busy; + cpumask_var_t affinity_hint; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + bool is_blk_mq_poll_q; + struct list_head list; +}; + +struct leapioraid_blk_mq_poll_queue { + atomic_t busy; + atomic_t pause; + struct leapioraid_adapter_reply_queue *reply_q; +}; + +union leapioraid_version_union { + struct LEAPIORAID_VERSION_STRUCT Struct; + u32 Word; +}; + +typedef void (*LEAPIORAID_ADD_SGE)(void *paddr, u32 flags_length, + dma_addr_t dma_addr); +typedef int (*LEAPIORAID_BUILD_SG_SCMD)(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid); +typedef void (*LEAPIORAID_BUILD_SG)(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*LEAPIORAID_BUILD_ZERO_LEN_SGE)(struct LEAPIORAID_ADAPTER *ioc, + void *paddr); +typedef void (*PUT_SMID_IO_FP_HIP_TA)(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT)(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +typedef u32(*BASE_READ_REG) (const void __iomem *addr, + u8 retry_count); +typedef u8(*GET_MSIX_INDEX) (struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd); + +struct leapioraid_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union leapioraid_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 IOCMaxChainSegmentSize; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; + u8 CurrentHostPageSize; +}; + +struct leapioraid_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct leapioraid_reply_post_struct { + union LeapioraidRepDescUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +struct leapioraid_virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define LEAPIORAID_VPHY_FLAG_DIRTY_PHY (0x01) +struct leapioraid_hba_port { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 port_id; + u8 flags; + u32 vphys_mask; + struct list_head vphys_list; +}; + +#define LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT (0x01) +#define LEAPIORAID_HBA_PORT_FLAG_NEW_PORT (0x02) +#define LEAPIORAID_MULTIPATH_DISABLED_PORT_ID (0xFF) + +typedef void (*LEAPIORAID_FLUSH_RUNNING_CMDS)(struct LEAPIORAID_ADAPTER * + ioc); + +struct LEAPIORAID_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + u8 IOCNumber; + int cpu_count; + char name[LEAPIORAID_NAME_LENGTH]; + char driver_name[LEAPIORAID_DRIVER_NAME_LENGTH]; + char tmp_string[LEAPIORAID_STRING_LENGTH]; + struct pci_dev *pdev; + struct LeapioraidSysInterfaceRegs_t __iomem *chip; + phys_addr_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + struct mutex pci_access_mutex; + char fault_reset_work_q_name[48]; + char hba_hot_unplug_work_q_name[48]; + struct workqueue_struct *fault_reset_work_q; + struct workqueue_struct *hba_hot_unplug_work_q; + struct delayed_work fault_reset_work; + struct delayed_work hba_hot_unplug_work; + struct workqueue_struct *smart_poll_work_q; + struct delayed_work smart_poll_work; + u8 adapter_over_temp; + char firmware_event_name[48]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + struct leapioraid_fw_event_work *current_event; + u8 fw_events_cleanup; + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + u8 got_task_abort_from_ioctl; + u8 got_task_abort_from_sysfs; + struct mutex reset_in_progress_mutex; + struct mutex hostdiag_unlock_mutex; + spinlock_t ioc_reset_in_progress_lock; + spinlock_t hba_hot_unplug_lock; + u8 ioc_link_reset_in_progress; + int ioc_reset_status; + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + u8 msix_enable; + u8 *cpu_msix_table; + resource_size_t **reply_post_host_index; + u16 cpu_msix_table_sz; + u32 ioc_reset_count; + LEAPIORAID_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; + u8 multipath_on_hba; + atomic64_t total_io_cnt; + atomic64_t high_iops_outstanding; + bool msix_load_balance; + u16 thresh_hold; + u8 high_iops_queues; + u8 iopoll_q_start_index; + u32 drv_internal_flags; + u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; + struct leapioraid_blk_mq_poll_queue *blk_mq_poll_queues; + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 ctl_tm_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_tr_internal_cb_idx; + u8 tm_sas_control_cb_idx; + struct leapioraid_internal_cmd base_cmds; + struct leapioraid_internal_cmd port_enable_cmds; + struct leapioraid_internal_cmd transport_cmds; + struct leapioraid_internal_cmd scsih_cmds; + struct leapioraid_internal_cmd tm_cmds; + struct leapioraid_internal_cmd ctl_cmds; + struct leapioraid_internal_cmd config_cmds; + struct list_head scsih_q_intenal_cmds; + spinlock_t scsih_q_internal_lock; + LEAPIORAID_ADD_SGE base_add_sg_single; + LEAPIORAID_BUILD_SG_SCMD build_sg_scmd; + LEAPIORAID_BUILD_SG build_sg; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + LEAPIORAID_BUILD_SG build_sg_mpi; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + u32 event_type[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u8 disable_eedp_support; + u8 tm_custom_handling; + u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u8 max_sata_qd; + struct leapioraid_facts facts; + struct leapioraid_facts prev_fw_facts; + struct leapioraid_port_facts *pfacts; + struct LeapioraidManP0_t manu_pg0; + struct LeapioraidManuP10_t manu_pg10; + struct LeapioraidManuP11_t manu_pg11; + struct LeapioraidBiosP2_t bios_pg2; + struct LeapioraidBiosP3_t bios_pg3; + struct LeapioraidIOCP8_t ioc_pg8; + struct LeapioraidIOUnitP0_t iounit_pg0; + struct LeapioraidIOUnitP1_t iounit_pg1; + struct LeapioraidIOUnitP8_t iounit_pg8; + struct LeapioraidIOCP1_t ioc_pg1_copy; + struct leapioraid_boot_device req_boot_device; + struct leapioraid_boot_device req_alt_boot_device; + struct leapioraid_boot_device current_boot_device; + struct leapioraid_raid_sas_node sas_hba; + struct list_head sas_expander_list; + struct list_head enclosure_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head pcie_device_list; + struct list_head pcie_device_init_list; + spinlock_t pcie_device_lock; + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + int pcie_target_id; + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + void *pend_os_device_add; + u16 pend_os_device_add_sz; + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + void *config_vaddr; + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + spinlock_t scsi_lookup_lock; + int pending_io_count; + wait_queue_head_t reset_wq; + int pending_tm_count; + u32 terminated_tm_count; + wait_queue_head_t pending_tm_wq; + u8 out_of_frames; + wait_queue_head_t no_frames_tm_wq; + u16 *io_queue_num; + u32 page_size; + struct leapioraid_chain_lookup *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u16 chain_segment_sz; + u16 chains_per_prp_buffer; + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct leapioraid_request_tracker *hpr_lookup; + struct list_head hpr_free_list; + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct leapioraid_request_tracker *internal_lookup; + struct list_head internal_free_list; + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + u16 reply_post_queue_depth; + struct leapioraid_reply_post_struct *reply_post; + struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_array_dma_pool; + struct LeapioraidIOCInitRDPQArrayEntry *reply_post_free_array; + dma_addr_t reply_post_free_array_dma; + u8 reply_queue_count; + struct list_head reply_queue_list; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + u8 combined_reply_queue; + u8 nc_reply_index_count; + u8 smp_affinity_enable; + resource_size_t **replyPostRegisterIndex; + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + struct list_head delayed_internal_tm_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + u8 reset_from_user; + u8 hide_ir_msg; + u8 warpdrive_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + PUT_SMID_IO_FP_HIP_TA put_smid_scsi_io; + PUT_SMID_IO_FP_HIP_TA put_smid_fast_path; + PUT_SMID_IO_FP_HIP_TA put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + GET_MSIX_INDEX get_msix_index_for_smlio; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 *tm_tr_retry; + u32 tm_tr_retry_sz; + u8 temp_sensors_count; + struct list_head port_table_list; + u8 *log_buffer; + dma_addr_t log_buffer_dma; + char pcie_log_work_q_name[48]; + struct workqueue_struct *pcie_log_work_q; + struct delayed_work pcie_log_work; + u32 open_pcie_trace; +}; + +#define LEAPIORAID_DEBUG (0x00000001) +#define LEAPIORAID_DEBUG_MSG_FRAME (0x00000002) +#define LEAPIORAID_DEBUG_SG (0x00000004) +#define LEAPIORAID_DEBUG_EVENTS (0x00000008) +#define LEAPIORAID_DEBUG_EVENT_WORK_TASK (0x00000010) +#define LEAPIORAID_DEBUG_INIT (0x00000020) +#define LEAPIORAID_DEBUG_EXIT (0x00000040) +#define LEAPIORAID_DEBUG_FAIL (0x00000080) +#define LEAPIORAID_DEBUG_TM (0x00000100) +#define LEAPIORAID_DEBUG_REPLY (0x00000200) +#define LEAPIORAID_DEBUG_HANDSHAKE (0x00000400) +#define LEAPIORAID_DEBUG_CONFIG (0x00000800) +#define LEAPIORAID_DEBUG_DL (0x00001000) +#define LEAPIORAID_DEBUG_RESET (0x00002000) +#define LEAPIORAID_DEBUG_SCSI (0x00004000) +#define LEAPIORAID_DEBUG_IOCTL (0x00008000) +#define LEAPIORAID_DEBUG_CSMISAS (0x00010000) +#define LEAPIORAID_DEBUG_SAS (0x00020000) +#define LEAPIORAID_DEBUG_TRANSPORT (0x00040000) +#define LEAPIORAID_DEBUG_TASK_SET_FULL (0x00080000) + +#define LEAPIORAID_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +#define dprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG) +#define dsgprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SG) +#define devtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENTS) +#define dewtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENT_WORK_TASK) +#define dinitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_INIT) +#define dexitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EXIT) +#define dfailprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_FAIL) +#define dtmprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TM) +#define dreplyprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_REPLY) +#define dhsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_HANDSHAKE) +#define dcprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CONFIG) +#define ddlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_DL) +#define drsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_RESET) +#define dsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SCSI) +#define dctlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_IOCTL) +#define dcsmisasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CSMISAS) +#define dsasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS) +#define dsastransport(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS_WIDE) +#define dmfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_MSG_FRAME) +#define dtsfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TASK_SET_FULL) +#define dtransportprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TRANSPORT) + +static inline void +leapioraid_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#define LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ (0x00000001) +#define LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED (0x00000002) + +typedef u8(*LEAPIORAID_CALLBACK) (struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); + +#define SCSIH_MAP_QUEUE(shost) static void leapioraid_scsihost_map_queues(shost) + +extern struct list_head leapioraid_ioc_list; +extern spinlock_t leapioraid_gioc_lock; +void leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_hard_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void *leapioraid_base_get_msg_frame(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void *leapioraid_base_get_sense_buffer(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le32 leapioraid_base_get_sense_buffer_dma(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le64 leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +void leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll); +u16 leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +u16 leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); +u16 leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +void leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_initialize_callback_handler(void); +u8 leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func); +void leapioraid_base_release_callback_handler(u8 cb_idx); +u8 leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr); +u32 leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked); +int leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev); +void leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code); +#define leapioraid_print_fault_code(ioc, fault_code) \ + do { \ + pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + leapioraid_base_fault_info(ioc, fault_code); \ + } while (0) +void leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, + u16 fault_code); +int leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller); +int leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t * + mpi_reply, + struct LeapioraidSasIoUnitControlReq_t * + mpi_request); +int leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request); +void leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type); +void leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault); +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st); +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd); +int +leapioraid_base_check_for_fault_and_issue_reset(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_check_cmd_timeout(struct LEAPIORAID_ADAPTER *ioc, + u8 status, void *mpi_request, int sz); +#define leapioraid_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ + do { \ + pr_err("%s In func: %s\n", ioc->name, __func__); \ + issue_reset = leapioraid_base_check_cmd_timeout(ioc, status, mpi_request, sz); \ + } while (0) +int leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count); +void leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_unlock_and_get_host_diagnostic(struct LEAPIORAID_ADAPTER + *ioc, u32 *host_diagnostic); +void leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc); +extern char driver_name[LEAPIORAID_NAME_LENGTH]; +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get(struct LEAPIORAID_ADAPTER + *ioc, u16 smid); +u8 leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply); +void leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +int leapioraid_scsihost_issue_tm(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method); +int leapioraid_scsihost_issue_locked_tm(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, uint channel, uint id, + uint lun, u8 type, u16 smid_task, + u8 timeout, u8 tr_method); +void leapioraid_scsihost_set_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_scsihost_clear_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_device_remove_by_sas_address(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +u8 leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, u8 port, u8 skip_dirty_flag); +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, struct leapioraid_hba_port *port, u32 phy); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy); +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_flush_running_cmds(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc); +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +void leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc); +u32 leapioraid_base_mod64(u64 dividend, u32 divisor); +void +leapioraid__scsihost_change_queue_depth(struct scsi_device *sdev, int qdepth); +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev); +u8 leapioraid_config_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +int leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys); +int leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page); +int leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t + *config_page); +int leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page); +int leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page); +int leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page); +int leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz); +int leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page); +int leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page); +int leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle); +int leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t * + config_page, u32 form, u32 handle); +int leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number); +int leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number); +int leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz); +int leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t * + config_page, u32 form, + u32 form_specific); +int leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle); +int leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); +extern const struct attribute_group *leapioraid_host_groups[]; +extern const struct attribute_group *leapioraid_dev_groups[]; +void leapioraid_ctl_init(void); +void leapioraid_ctl_exit(void); +u8 leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_ctl_tm_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +u8 leapioraid_ctl_event_callback(struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply); +void leapioraid_ctl_add_to_event_log(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t * + mpi_reply); +void leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_ctl_release(struct inode *inode, struct file *filep); +void ctl_init(void); +void ctl_exit(void); +u8 leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port); +int leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev); +int leapioraid_transport_add_expander_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev); +void leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, + u8 phy_number, u8 link_rate, + struct leapioraid_hba_port *port); +extern struct sas_function_template leapioraid_transport_functions; +extern struct scsi_transport_template *leapioraid_transport_template; +void +leapioraid_transport_del_phy_from_an_existing_port(struct LEAPIORAID_ADAPTER + *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy); +#if defined(LEAPIORAID_WIDE_PORT_API) +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port); +#endif +#endif diff --git a/drivers/scsi/leapioraid/leapioraid_os.c b/drivers/scsi/leapioraid/leapioraid_os.c new file mode 100644 index 0000000000000000000000000000000000000000..368a3c859a04be91e27c4780ea2d0eca2929f4cd --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_os.c @@ -0,0 +1,9823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include + +#define RAID_CHANNEL 1 + +static void leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander); +static void leapioraid_firmware_event_work(struct work_struct *work); +static void leapioraid_firmware_event_work_delayed(struct work_struct *work); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length); +static enum device_responsive_state leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, + u8 *is_ssd_device, + u8 tr_timeout, + u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method); +static void leapioraid_scsihost_remove_device( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +static int leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); +static u8 leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +static void leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +static void leapioraid_scsihost_complete_devices_scanning( + struct LEAPIORAID_ADAPTER *ioc); + +LIST_HEAD(leapioraid_ioc_list); +DEFINE_SPINLOCK(leapioraid_gioc_lock); + +MODULE_AUTHOR(LEAPIORAID_AUTHOR); +MODULE_DESCRIPTION(LEAPIORAID_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LEAPIORAID_DRIVER_VERSION); + +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 ctl_tm_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int leapioraid_ids; +static u8 tm_tr_cb_idx = -1; +static u8 tm_tr_volume_cb_idx = -1; +static u8 tm_tr_internal_cb_idx = -1; +static u8 tm_sas_control_cb_idx = -1; +static u32 logging_level; + +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + +static int open_pcie_trace; +module_param(open_pcie_trace, int, 0444); +MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: open=1/default=0(close)"); + +static int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, "disable discovery"); + +static struct raid_template *leapioraid_raid_template; + +enum device_responsive_state { + DEVICE_READY, + DEVICE_RETRY, + DEVICE_RETRY_UA, + DEVICE_START_UNIT, + DEVICE_STOP_UNIT, + DEVICE_ERROR, +}; + +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define LEAPIORAID_TURN_ON_PFA_LED (0xFFFC) +#define LEAPIORAID_PORT_ENABLE_COMPLETE (0xFFFD) +#define LEAPIORAID_REMOVE_UNRESPONDING_DEVICES (0xFFFF) + +struct leapioraid_fw_event_work { + struct list_head list; + struct work_struct work; + u8 cancel_pending_work; + struct delayed_work delayed_work; + u8 delayed_work_active; + struct LEAPIORAID_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + void *event_data; + u8 *retries; +}; + +static void +leapioraid_fw_event_work_free(struct kref *r) +{ + struct leapioraid_fw_event_work *fw_work; + + fw_work = container_of( + r, struct leapioraid_fw_event_work, refcount); + kfree(fw_work->event_data); + kfree(fw_work->retries); + kfree(fw_work); +} + +static void +leapioraid_fw_event_work_get( + struct leapioraid_fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void +leapioraid_fw_event_work_put(struct leapioraid_fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, leapioraid_fw_event_work_free); +} + +static +struct leapioraid_fw_event_work *leapioraid_alloc_fw_event_work(int len) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + kref_init(&fw_event->refcount); + return fw_event; +} + +static int +leapioraid_scsihost_set_debug_level( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call(logging_level, + leapioraid_scsihost_set_debug_level, param_get_int, + &logging_level, 0644); + +static inline int +leapioraid_scsihost_srch_boot_sas_address(u64 sas_address, + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_device_name(u64 device_name, + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT *boot_device) +{ + return (enclosure_logical_id == + le64_to_cpu(boot_device->EnclosureLogicalID) + && slot_number == le16_to_cpu(boot_device->SlotNumber)) ? 1 : 0; +} + +static void +leapioraid_scsihost_display_enclosure_chassis_info( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev, + struct scsi_target *starget) +{ + if (sdev) { + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else if (starget) { + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, + "chassis slot(0x%04x)\n", sas_device->chassis_slot); + } else { + if (sas_device->enclosure_handle != 0) + pr_info("%s enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + pr_info("%s enclosure level(0x%04x),connector name( %s)\n", + ioc->name, + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + pr_info("%s chassis slot(0x%04x)\n", + ioc->name, sas_device->chassis_slot); + } +} + +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 skip_dirty_flag) +{ + struct leapioraid_hba_port *port, *port_next; + + if (!ioc->multipath_on_hba) + port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + if (skip_dirty_flag) { + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + return port; + } + } + if (unlikely(!ioc->multipath_on_hba)) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_ATOMIC); + if (!port) + return NULL; + + port->port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + pr_err( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + return port; + } + return NULL; +} + +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port, u32 phy) +{ + struct leapioraid_virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & (1 << phy)) + return vphy; + } + return NULL; +} + +static int +leapioraid_scsihost_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE *boot_device) +{ + int rc = 0; + + switch (form) { + case LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = leapioraid_scsihost_srch_boot_sas_address(sas_address, + &boot_device->SasWwid); + break; + case LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = leapioraid_scsihost_srch_boot_encl_slot( + enclosure_logical_id, + slot, + &boot_device->EnclosureSlot); + break; + case LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = leapioraid_scsihost_srch_boot_device_name(device_name, + &boot_device->DeviceName); + break; + case LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + return rc; +} + +static int +leapioraid_scsihost_get_sas_address( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + if ((handle <= ioc->sas_hba.num_phys) && + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + LEAPIORAID_SAS_DEVICE_INFO_SEP))) + *sas_address = ioc->sas_hba.sas_address; + else + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + pr_err("%s handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; +} + +static void +leapioraid_scsihost_determine_boot_device( + struct LEAPIORAID_ADAPTER *ioc, void *device, + u32 channel) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + if (!ioc->is_driver_loading) + return; + if (!ioc->bios_pg3.BiosVersion) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } + if (!ioc->req_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.channel = channel; + } + } + if (!ioc->req_alt_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.channel = channel; + } + } + if (!ioc->current_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.channel = channel; + } + } +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + ret = tgt_priv->sas_dev; + if (ret) + leapioraid_sas_device_get(ret); + return ret; +} + +static +struct leapioraid_sas_device *leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __leapioraid_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return ret; +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + + if (!port) + return NULL; + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + if (!port) + return sas_device; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static struct leapioraid_sas_device *__leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +void +leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + int was_on_sas_device_list = 0; + + if (!sas_device) + return; + pr_info("%s %s: removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + kfree(sas_device->serial_number); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_device_remove_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +void +leapioraid_device_remove_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_sas_device_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); +} + +static void +leapioraid_scsihost_sas_device_init_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + leapioraid_scsihost_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_id( + struct LEAPIORAID_ADAPTER *ioc, int id, int channel) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } +out: + return r; +} + +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_wwid( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_raid_device_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + u8 protection_mask; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, + __func__, raid_device->handle, + (unsigned long long)raid_device->wwid)); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_raid_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static +struct leapioraid_enclosure_node *leapioraid_scsihost_enclosure_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_enclosure_node *enclosure_dev, *r; + + r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + if (!port) + return r; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + sas_expander->port != port) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_expander_node_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static int +leapioraid_scsihost_is_sas_end_device(u32 device_info) +{ + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_target( + struct LEAPIORAID_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && scmd->device->channel == channel) + return 1; + } + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_lun( + struct LEAPIORAID_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + struct leapioraid_scsiio_tracker *st; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 unique_tag = smid - 1; + + if (smid > 0 && smid <= ioc->shost->can_queue) { + unique_tag = + ioc->io_queue_num[smid - + 1] << BLK_MQ_UNIQUE_TAG_BITS | (smid - 1); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (!mpi_request->DevHandle) + return scmd; + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + scmd = NULL; + } + } + return scmd; +} + +static void +leapioraid_scsihost_display_sdev_qd(struct scsi_device *sdev) +{ + if (sdev->inquiry_len <= 7) + return; + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); +} + +static int +leapioraid_scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + goto not_sata; + + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) + goto not_sata; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = LEAPIORAID_SATA_QUEUE_DEPTH; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +not_sata: + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_display_sdev_qd(sdev); + return sdev->queue_depth; +} + +void +leapioraid__scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + leapioraid_scsihost_change_queue_depth(sdev, qdepth); +} + +static int +leapioraid_scsihost_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = + kzalloc(sizeof(struct LEAPIORAID_TARGET), GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_VOLUME; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; + sas_target_priv_data->sas_dev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return 0; +} + +static void +leapioraid_scsihost_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) + && (sas_device->id == starget->id) + && (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + if (sas_device) { + sas_target_priv_data->sas_dev = NULL; + leapioraid_sas_device_put(sas_device); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +static int +leapioraid_scsihost_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + sas_device_priv_data = + kzalloc(sizeof(*sas_device_priv_data), GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = LEAPIORAID_DEVICE_FLAGS_INIT; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id(ioc, + starget->id, + starget->channel); + if (raid_device) + raid_device->sdev = sdev; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; +} + +static void +leapioraid_scsihost_slave_destroy(struct scsi_device *sdev) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +static void +leapioraid_scsihost_display_sata_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s),\n\t\t" + "smart(%s), fua(%s), sw_preserve(%s)\n", + (device_info & LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : + "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) + ? "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? + "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : + "n"); +} + +static int +leapioraid_scsihost_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +static void +leapioraid_scsihost_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!handle) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; +out: + raid_set_resync(leapioraid_raid_template, dev, percent_complete); +} + +static void +leapioraid_scsihost_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + switch (vol_pg0.VolumeState) { + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case LEAPIORAID_RAID_VOL_STATE_FAILED: + case LEAPIORAID_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } +out: + raid_set_state(leapioraid_raid_template, dev, state); +} + +static void +leapioraid_scsihost_set_level(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_10; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + raid_set_level(leapioraid_raid_template, &sdev->sdev_gendev, level); +} + +static int +leapioraid_scsihost_get_volume_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + struct LeapioraidRaidVolP0_t *vol_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((leapioraid_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + raid_device->num_pds = num_pds; + sz = offsetof(struct LeapioraidRaidVolP0_t, PhysDisk) + (num_pds * + sizeof + (struct LEAPIORAID_RAIDVOL0_PHYS_DISK)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + if ((leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + raid_device->handle, sz))) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(vol_pg0); + return 1; + } + raid_device->volume_type = vol_pg0->VolumeType; + if (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (! + (leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + kfree(vol_pg0); + return 0; +} + +static void +leapioraid_scsihost_enable_tlr( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_device *sdev) +{ + u8 data[30]; + u8 page_len, ii; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + + if (sdev->type != TYPE_TAPE) + return; + if (!(ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR)) + return; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + return; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + return; + if (leapioraid_scsihost_inquiry_vpd_supported_pages(ioc, + sas_target_priv_data->handle, + sdev->lun, data, + sizeof(data)) != + DEVICE_READY) { + sas_device = + leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device) { + sdev_printk(KERN_INFO, sdev, + "%s: DEVICE NOT READY: handle(0x%04x),\n\t\t" + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + __func__, + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, NULL); + leapioraid_sas_device_put(sas_device); + } + return; + } + page_len = data[3]; + for (ii = 4; ii < page_len + 4; ii++) { + if (data[ii] == 0x90) { + sas_device_priv_data->flags |= LEAPIORAID_DEVICE_TLR_ON; + return; + } + } +} + +static void +leapioraid_scsihost_enable_ssu_on_sata( + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev) +{ + if (!(sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)) + return; + if (sas_device->ssd_device) { + sdev->manage_system_start_stop = 1; + sdev->manage_runtime_start_stop = 1; + } +} + +static int +leapioraid_scsihost_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + u8 *serial_number = NULL; + enum device_responsive_state retval; + u8 count = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (leapioraid_scsihost_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = LEAPIORAID_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = LEAPIORAID_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + 0x00000004) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + if (!ioc->warpdrive_msg) + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + if (shost->max_sectors > LEAPIORAID_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + LEAPIORAID_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + LEAPIORAID_RAID_MAX_SECTORS); + } + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + if (leapioraid_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (volume_handle && leapioraid_config_get_volume_wwid(ioc, + volume_handle, + &volume_wwid)) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + } + leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(serial_number); + return 1; + } + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + sas_device->serial_number = serial_number; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + ssp_target = 1; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; + } else { + qdepth = ioc->max_sata_qd; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, sas_device, sdev, NULL); + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!ssp_target) { + leapioraid_scsihost_display_sata_capabilities(ioc, handle, sdev); + do { + retval = leapioraid_scsihost_ata_pass_thru_idd(ioc, handle, + &sas_device->ssd_device, 30, 0); + } while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA) + && count++ < 3); + } + leapioraid_scsihost_enable_ssu_on_sata(sas_device, sdev); + if (serial_number) + sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", + serial_number); + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + if (ssp_target) { + sas_read_port_mode_page(sdev); + leapioraid_scsihost_enable_tlr(ioc, sdev); + } + + return 0; +} + +static int +leapioraid_scsihost_bios_param( + struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + if ((ulong) capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + return 0; +} + +static void +leapioraid_scsihost_response_code( + struct LEAPIORAID_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + pr_warn("%s response_code(0x%01x): %s\n", + ioc->name, response_code, desc); +} + +static u8 +leapioraid_scsihost_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->tm_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + ioc->tm_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength * 4); + ioc->tm_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +void +leapioraid_scsihost_set_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +void +leapioraid_scsihost_clear_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +static int +leapioraid_scsihost_tm_cmd_map_status( + struct LEAPIORAID_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_target + (ioc, id, channel))) + return SUCCESS; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_lun + (ioc, id, lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->scsih_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->ctl_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } + return FAILED; +} + +static int +leapioraid_scsihost_tm_post_processing(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task) +{ + int rc; + + rc = leapioraid_scsihost_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + pr_err( + "%s Poll finish of smid(%d),task_type(0x%02x),handle(0x%04x)\n", + ioc->name, + smid_task, + type, + handle); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_sync_reply_irqs(ioc, 1); + leapioraid_base_unmask_interrupts(ioc); + return leapioraid_scsihost_tm_cmd_map_status( + ioc, channel, id, lun, type, smid_task); +} + +int +leapioraid_scsihost_issue_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidSCSIIOReq_t *request; + u16 smid = 0; + u32 ioc_state; + struct leapioraid_scsiio_tracker *scsi_lookup = NULL; + int rc; + u16 msix_task = 0; + u8 issue_reset = 0; + + lockdep_assert_held(&ioc->tm_cmds.mutex); + if (ioc->tm_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_info("%s %s: tm_cmd busy!!!\n", + __func__, ioc->name); + return FAILED; + } + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return FAILED; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return FAILED; + } + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = leapioraid_get_st_from_smid(ioc, smid_task); + dtmprintk(ioc, pr_info( + "%s sending tm: handle(0x%04x),\n\t\t" + "task_type(0x%02x), timeout(%d) tr_method(0x%x) smid(%d)\n", + ioc->name, + handle, + type, + timeout, + tr_method, + smid_task)); + ioc->tm_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(struct LeapioraidSCSITmgRep_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + mpi_request->MsgFlags = tr_method; + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + leapioraid_scsihost_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + if ((type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup && (scsi_lookup->msix_io < ioc->reply_queue_count))) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; + ioc->put_smid_hi_priority(ioc, smid, msix_task); + wait_for_completion_timeout(&ioc->tm_cmds.done, timeout * HZ); + if (!(ioc->tm_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof + (struct LeapioraidSCSITmgReq_t) + / 4, issue_reset); + if (issue_reset) { + rc = leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto out; + } + } + leapioraid_base_sync_reply_irqs(ioc, 0); + if (ioc->tm_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, pr_info( + "%s complete tm: ioc_status(0x%04x),\n\t\t" + "loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & LEAPIORAID_DEBUG_TM) { + leapioraid_scsihost_response_code( + ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + leapioraid_debug_dump_mf( + mpi_request, + sizeof(struct LeapioraidSCSITmgReq_t) / 4); + } + } + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + request = leapioraid_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + pr_err( + "%s Task abort tm failed:\n\t\t" + "handle(0x%04x), timeout(%d),\n\t\t" + "tr_method(0x%x), smid(%d), msix_index(%d)\n", + ioc->name, + handle, + timeout, + tr_method, + smid_task, + msix_task); + rc = FAILED; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + rc = leapioraid_scsihost_tm_post_processing( + ioc, handle, channel, id, lun, type, smid_task); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } +out: + leapioraid_scsihost_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + return rc; +} + +int +leapioraid_scsihost_issue_locked_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + int ret; + + mutex_lock(&ioc->tm_cmds.mutex); + ret = leapioraid_scsihost_issue_tm( + ioc, handle, channel, id, lun, type, + smid_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); + return ret; +} + +static void +leapioraid_scsihost_tm_display_info( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + starget_printk( + KERN_INFO, starget, "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, + priv_target->handle, device_str, + (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "%s: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + NULL, starget); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +static int +leapioraid_scsihost_abort(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u16 handle; + int r; + struct leapioraid_scsiio_tracker *st + = leapioraid_base_scsi_cmd_priv(scmd); + u8 timeout = 30; + + sdev_printk( + KERN_INFO, scmd->device, + "attempting task abort! scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + if (st && st->smid) + leapioraid_base_free_smid(ioc, st->smid); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, + "No ref at driver, assuming scmd(0x%p) might have completed\n", + scmd); + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + leapioraid_halt_firmware(ioc, 0); + handle = sas_device_priv_data->sas_target->handle; + r = leapioraid_scsihost_issue_locked_tm( + ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, timeout, 0); +out: + sdev_printk( + KERN_INFO, scmd->device, + "task abort: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +static int +leapioraid_scsihost_dev_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, + 0, tr_timeout, tr_method); +out: + sdev_printk(KERN_INFO, scmd->device, + "device reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_target_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + 0, tr_timeout, tr_method); +out: + starget_printk(KERN_INFO, starget, + "target reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_host_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + int r, retval; + + pr_info("%s attempting host reset! scmd(0x%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + if (ioc->is_driver_loading || ioc->remove_host) { + pr_info("%s Blocking the host reset\n", + ioc->name); + r = FAILED; + goto out; + } + retval = leapioraid_base_hard_reset_handler( + ioc, FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + pr_info("%s host reset: %s scmd(0x%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), + scmd); + return r; +} + +static void +leapioraid_scsihost_fw_event_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, leapioraid_firmware_event_work); + leapioraid_fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_del_from_list( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_requeue( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event, unsigned long delay) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + if (!fw_event->delayed_work_active) { + fw_event->delayed_work_active = 1; + INIT_DELAYED_WORK(&fw_event->delayed_work, + leapioraid_firmware_event_work_delayed); + } + queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, + msecs_to_jiffies(delay)); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_error_recovery_delete_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +void +leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static struct leapioraid_fw_event_work *dequeue_next_fw_event( + struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct leapioraid_fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct leapioraid_fw_event_work, list); + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + return fw_event; +} + +static void +leapioraid_scsihost_fw_event_cleanup_queue( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + bool rc = false; + + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || + !ioc->firmware_event_thread || in_interrupt()) + return; + + ioc->fw_events_cleanup = 1; + if (ioc->shost_recovery && ioc->current_event) + ioc->current_event->ignore = 1; + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { + if (fw_event == ioc->current_event && + ioc->current_event->event != + LEAPIORAID_REMOVE_UNRESPONDING_DEVICES) { + ioc->current_event = NULL; + continue; + } + if (fw_event->event == LEAPIORAID_PORT_ENABLE_COMPLETE) { + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + ioc->start_scan = 0; + } + if (fw_event->delayed_work_active) + rc = cancel_delayed_work_sync(&fw_event->delayed_work); + else + rc = cancel_work_sync(&fw_event->work); + if (rc) + leapioraid_fw_event_work_put(fw_event); + } + ioc->fw_events_cleanup = 0; +} + +static void +leapioraid_scsihost_internal_device_block( + struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block_nowait(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); +} + +static void +leapioraid_scsihost_internal_device_unblock(struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, + "device_unblock and setting to running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d)\n\t\t" + "for handle(0x%04x) performing a block followed by an unblock\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block_nowait(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_block failed with return(%d)\n\t\t" + "for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_unblock failed\n\t\t" + "with return(%d) for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_all_device( + struct LEAPIORAID_ADAPTER *ioc, u8 no_turs) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device = NULL; + int count; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target || sas_target->deleted) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + if (no_turs) { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + continue; + } + do { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + ssleep(1); + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ < 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + leapioraid_scsihost_internal_device_unblock( + sdev, sas_device_priv_data); + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + scsi_device_set_state(sdev, SDEV_OFFLINE); + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + } else + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_device_wait( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + int count, host_reset_completion_count; + struct leapioraid_sas_device *sas_device; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (sdev->sdev_state == SDEV_OFFLINE) { + sas_device_priv_data->block = 1; + sas_device_priv_data->deleted = 0; + scsi_device_set_state(sdev, SDEV_RUNNING); + scsi_internal_device_block_nowait(sdev); + } + } + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + do { + host_reset_completion_count = 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) { + do { + msleep(500); + host_reset_completion_count++; + } while (rc == DEVICE_RETRY && + ioc->shost_recovery); + if (host_reset_completion_count > 1) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, sas_target->handle, 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, tr_timeout, tr_method); + if (rc == DEVICE_RETRY + || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) + msleep(500); + } + continue; + } + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ <= 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + sas_device = + leapioraid_get_sdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } else { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } + } +} + +static void +leapioraid_scsihost_ublock_io_device( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address || + sas_device_priv_data->sas_target->port != port) + continue; + if (sas_device_priv_data->block) { + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } +} + +static void leapioraid_scsihost_block_io_all_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } +} + +static void +leapioraid_scsihost_block_io_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device && sas_device->pend_sas_rphy_add) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_block_io_to_children_attached_to_ex( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + leapioraid_scsihost_expander_find_by_sas_address + (ioc, leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, expander_sibling); + } + } +} + +static void +leapioraid_scsihost_block_io_to_children_attached_directly( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + leapioraid_scsihost_block_io_device(ioc, handle); + } +} + +static void +leapioraid_scsihost_tm_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_sas_device *sas_device = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct leapioraid_tr_list *delayed_tr; + u32 ioc_state; + struct leapioraid_hba_port *port = NULL; + u8 tr_method = 0; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + if (test_bit(handle, ioc->pd_handles)) + return; + clear_bit(handle, ioc->pend_os_device_add); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + port = sas_device->port; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + if (sas_target_priv_data) { + dewtprintk(ioc, pr_err( + "%s %s: setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, handle, + (unsigned long long)sas_address)); + if (sas_device) { + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, + sas_device, + NULL, + NULL)); + } + leapioraid_scsihost_ublock_io_device(ioc, sas_address, port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + goto out; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static u8 +leapioraid_scsihost_tm_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + struct leapioraid_sc_list *delayed_sc; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return 1; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete: handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + smid_sas_ctrl = + leapioraid_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:sc:handle(0x%04x), (open)\n", + ioc->name, handle)); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + } + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = LEAPIORAID_CTRL_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + ioc->put_smid_default(ioc, smid_sas_ctrl); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +inline bool +leapioraid_scsihost_allow_scmd_to_device( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (ioc->pci_error_recovery) + return false; + if (ioc->adapter_over_temp) + return false; + if (ioc->remove_host) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return false; + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + return true; +} + +static u8 +leapioraid_scsihost_sas_control_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + u16 dev_handle; + + if (likely(mpi_reply)) { + dev_handle + = ((struct LeapioraidIoUnitControlRep_t *)mpi_reply)->DevHandle; + dewtprintk(ioc, pr_err( + "%s sc_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(dev_handle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + LEAPIORAID_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + } else if (ioc->tm_tr_retry[le16_to_cpu(dev_handle)] < 3) { + dewtprintk(ioc, pr_err( + "%s re-initiating tm_tr_send:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)]++; + leapioraid_scsihost_tm_tr_send(ioc, le16_to_cpu(dev_handle)); + } else { + dewtprintk(ioc, pr_err( + "%s Exiting out of tm_tr_send retries:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + } + } else { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + } + return leapioraid_check_for_pending_internal_cmds(ioc, smid); +} + +static void +leapioraid_scsihost_tm_tr_volume_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_tr_list *delayed_tr; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_volume_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_volume_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_tm_internal_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_tr_list *delayed_tr; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + u8 tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_internal_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, + &ioc->delayed_internal_tm_list); + dewtprintk(ioc, + pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_internal_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_internal_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x),\n\t\t" + "(open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->DevHandle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_event_ack( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + U16 event, U32 event_context) +{ + struct LeapioraidEventAckReq_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + ioc->name, le16_to_cpu(event), + smid, ioc->base_cb_idx)); + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl( + struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info( + "%s %s: host has been removed\n", __func__, ioc->name)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0D; + mpi_request->DevHandle = cpu_to_le16(handle); + ioc->put_smid_default(ioc, smid); +} + +u8 +leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct leapioraid_sc_list *delayed_sc; + struct leapioraid_event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct leapioraid_event_ack_list, list); + leapioraid_scsihost_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, + delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct leapioraid_sc_list, list); + leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} + +static u8 +leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_internal_tm_list)) { + delayed_tr = list_entry(ioc->delayed_internal_tm_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_internal_tr_send( + ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + return 1; +} + +static void +leapioraid_scsihost_check_topo_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventDataSasTopoChangeList_t *local_event_data; + u16 expander_handle; + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + leapioraid_scsihost_tm_tr_send(ioc, handle); + } + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + return; + } + if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, expander_handle); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + leapioraid_scsihost_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + if (event_data->ExpStatus != LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = fw_event->event_data; + if (local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, pr_err( + "%s setting ignoring flag\n", + ioc->name)); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_delete_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle( + ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, pr_err( + "%s setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, handle, + (unsigned long long)raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_handle_for_tr( + u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +static void +leapioraid_scsihost_check_ir_config_unhide_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u16 handle, volume_handle, a, b; + struct leapioraid_tr_list *delayed_tr; + + a = 0; + b = 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_delete_flag(ioc, volume_handle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + if (a) + leapioraid_scsihost_tm_tr_volume_send(ioc, a); + if (b) + leapioraid_scsihost_tm_tr_volume_send(ioc, b); + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + leapioraid_scsihost_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + } else + leapioraid_scsihost_tm_tr_send(ioc, handle); + } +} + +static void +leapioraid_scsihost_check_volume_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrVol_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == LEAPIORAID_RAID_VOL_STATE_MISSING || state == + LEAPIORAID_RAID_VOL_STATE_FAILED) + leapioraid_scsihost_set_volume_delete_flag( + ioc, le16_to_cpu(event_data->VolDevHandle)); +} + +static int +leapioraid_scsihost_set_satl_pending( + struct scsi_cmnd *scmd, bool pending) +{ + struct LEAPIORAID_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + if (pending) + return test_and_set_bit(LEAPIORAID_CMND_PENDING_BIT, + &priv->ata_command_pending); + clear_bit(LEAPIORAID_CMND_PENDING_BIT, &priv->ata_command_pending); + return 0; +} + +void +leapioraid_scsihost_flush_running_cmds( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + struct leapioraid_scsiio_tracker *st; + u16 smid; + u16 count = 0; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + count++; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st && st->smid == 0) + continue; + leapioraid_scsihost_set_satl_pending(scmd, false); + leapioraid_base_get_msg_frame(ioc, smid); + scsi_dma_unmap(scmd); + + leapioraid_base_clear_st(ioc, st); + if ((!leapioraid_base_pci_device_is_available(ioc)) || + (ioc->ioc_reset_status != 0) + || ioc->adapter_over_temp || ioc->remove_host) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + dtmprintk(ioc, pr_info("%s completing %d cmds\n", + ioc->name, count)); +} + +static inline u8 scsih_is_io_belongs_to_RT_class( + struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + + return (IOPRIO_PRIO_CLASS(req_get_ioprio(rq)) == IOPRIO_CLASS_RT); +} + +static int +leapioraid_scsihost_qcmd( + struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 mpi_control; + u16 smid; + u16 handle; + int rc = 0; + + if (ioc->logging_level & LEAPIORAID_DEBUG_SCSI) + scsi_print_command(scmd); + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (!(leapioraid_scsihost_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (sas_device_priv_data->block && + scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, + 0x29, 0x07); + scsi_done(scmd); + goto out; + } + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } else if (sas_target_priv_data->deleted || + sas_device_priv_data->deleted) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + do { + if (test_bit(LEAPIORAID_CMND_PENDING_BIT, + &sas_device_priv_data->ata_command_pending)) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + } while (leapioraid_scsihost_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + else + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ; + if (sas_device_priv_data->ncq_prio_enable) { + if (scsih_is_io_belongs_to_RT_class(scmd)) + mpi_control |= 1 << LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT; + } + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + scmd->cmd_len != 32) + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_TLR_ON; + smid = leapioraid_base_get_smid_scsiio( + ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (scmd->cmd_len == 32) + mpi_control |= 4 << LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioraidSCSIIOReq_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + if (mpi_request->DataLength) { + if (ioc->build_sg_scmd(ioc, scmd, smid)) { + leapioraid_base_free_smid(ioc, smid); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 0x4000); + ioc->put_smid_fast_path(ioc, smid, handle); + } else + ioc->put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + ioc->put_smid_default(ioc, smid); +out: + return rc; +} + +static void +leapioraid_scsihost_normalize_sense( + char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +static void +leapioraid_scsihost_scsi_ioc_info( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct LeapioraidSCSIIORep_t *mpi_reply, u16 smid, + u8 scsi_status, u16 error_response_count) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct leapioraid_sas_device *sas_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + if (log_info == 0x31170000) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp guard error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp ref tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp app tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + switch (scsi_status) { + case LEAPIORAID_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case LEAPIORAID_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case LEAPIORAID_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case LEAPIORAID_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case LEAPIORAID_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + pr_warn("%s \t%s wwid(0x%016llx)\n", + ioc->name, device_str, + (unsigned long long)priv_target->sas_address); + } else { + sas_device = leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + pr_warn( + "%s \t%s: sas_address(0x%016llx), phy(%d)\n", + ioc->name, __func__, (unsigned long long) + sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, + sas_device, + NULL, NULL); + leapioraid_sas_device_put(sas_device); + } + } + pr_warn( + "%s \thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, + ioc_status, smid); + pr_warn("%s \trequest_len(%d), underflow(%d), resid(%d)\n", + ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); + pr_warn("%s \ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + pr_warn("%s \tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + pr_warn( + "%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + ioc->name, + data.skey, data.asc, data.ascq, + le32_to_cpu(mpi_reply->SenseCount)); + } + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *) &response_info; + leapioraid_scsihost_response_code(ioc, response_bytes[0]); + } +} + +static void +leapioraid_scsihost_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_turn_off_pfa_led(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +static void +leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static void +leapioraid_scsihost_smart_predicted_fault( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 from_sata_smart_polling) +{ + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidEventNotificationRep_t *event_reply; + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data; + struct leapioraid_sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + || ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME))) + goto out_unlock; + leapioraid_scsihost_display_enclosure_chassis_info(NULL, sas_device, NULL, + starget); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (from_sata_smart_polling) + leapioraid_scsihost_send_event_to_turn_on_pfa_led(ioc, handle); + sz = offsetof(struct LeapioraidEventNotificationRep_t, EventData) + + sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_ATOMIC); + if (!event_reply) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + event_reply->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz / 4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t) / 4); + event_data = (struct LeapioraidEventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + leapioraid_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + return; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +static u8 +leapioraid_scsihost_io_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + struct LeapioraidSCSIIORep_t *mpi_reply; + struct scsi_cmnd *scmd; + u16 ioc_status, error_response_count = 0; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 response_code = 0; + struct leapioraid_scsiio_tracker *st; + + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL) + return 1; + leapioraid_scsihost_set_satl_pending(scmd, false); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st->direct_io && ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) + != LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED)) { + st->scmd = scmd; + st->direct_io = 0; + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + ioc->put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + scsi_state = mpi_reply->SCSIState; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + response_code == LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_TLR_ON; + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status == LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN + && xfer_cnt == 0 + && (scsi_status == LEAPIORAID_SCSI_STATUS_BUSY + || scsi_status == LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT + || scsi_status == LEAPIORAID_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = LEAPIORAID_IOCSTATUS_SUCCESS; + } + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = leapioraid_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + if (data.asc == 0x5D) + leapioraid_scsihost_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle), + 0); + } + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == 0x32010081) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (LEAPIORAID_SCSI_STATE_TERMINATED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + if ((scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID)) + break; + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 0, + ILLEGAL_REQUEST, 0x20, + 0); + } + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + } + if (scmd->result && (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) + leapioraid_scsihost_scsi_ioc_info( + ioc, scmd, mpi_reply, smid, scsi_status, + error_response_count); +out: + scsi_dma_unmap(scmd); + leapioraid_base_free_smid(ioc, smid); + scsi_done(scmd); + return 0; +} + +static void +leapioraid_scsihost_update_vphys_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_id; + struct LeapioraidSasPhyP0_t phy_pg0; + struct leapioraid_hba_port *port, *port_next, *mport; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct leapioraid_sas_device *sas_device; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + vphy->flags |= LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + } + } + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + if (!(le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP)) + continue; + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + found = 0; + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, + list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (! + (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY)) + continue; + if (vphy->sas_address != attached_sas_addr) + continue; + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = + leapioraid_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = + kzalloc(sizeof(struct leapioraid_hba_port), + GFP_KERNEL); + if (!mport) { + pr_err( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, + __LINE__, __func__); + break; + } + mport->port_id = port_id; + pr_err( + "%s %s: hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, __func__, mport, + mport->port_id); + list_add_tail(&mport->list, + &ioc->port_table_list); + } + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD(&mport->vphys_list); + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, + &mport->vphys_list); + sas_device = + leapioraid_get_sdev_by_addr(ioc, + attached_sas_addr, + port); + if (sas_device) + sas_device->port = mport; + } + if (mport->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= + ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + vphy->flags &= ~LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + +static u8 +leapioraid_scsihost_get_port_table_after_reset( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_count = 0, port_id; + + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return port_count; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if ((port_table[j].port_id == port_id) && + (port_table[j].sas_address == attached_sas_addr)) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + if (found) + continue; + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; +static int +leapioraid_scsihost_look_and_get_matched_port_entry( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_entry, + struct leapioraid_hba_port **matched_port_entry, + int *count) +{ + struct leapioraid_hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; + int lcount = 0; + + *matched_port_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT)) + continue; + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask == port_entry->phy_mask)) { + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; + break; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask) + && (port_table_entry->port_id == port_entry->port_id)) { + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; + continue; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask)) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; + continue; + } + if (port_table_entry->sas_address == port_entry->sas_address) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; + lcount++; + } + } + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) + *count = lcount; + return matched_code; +} + +static void +leapioraid_scsihost_del_phy_part_of_anther_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + u32 i, found = 0; + + for (i = 0; i < port_count; i++) { + if (i == index) + continue; + if (port_table[i].phy_mask & (1 << offset)) { + leapioraid_transport_del_phy_from_an_existing_port( + ioc, + sas_node, + &sas_node->phy + [offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +static void +leapioraid_scsihost_add_or_del_phys_from_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *hba_port_entry, + struct leapioraid_hba_port *port_table, + int index, u8 port_count) +{ + u32 phy_mask, offset = 0; + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + leapioraid_scsihost_del_phy_part_of_anther_port( + ioc, + port_table, + index, + port_count, + offset); + } else { +#if defined(LEAPIORAID_WIDE_PORT_API) + if (sas_node->phy[offset].phy_belongs_to_port) + leapioraid_transport_del_phy_from_an_existing_port + (ioc, sas_node, + &sas_node->phy[offset]); + leapioraid_transport_add_phy_to_an_existing_port + (ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); +#endif + } + } + } +} + +static void +leapioraid_scsihost_del_dirty_vphy(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + if (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, pr_err( + "%s Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, vphy, + port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } +} + +static void +leapioraid_scsihost_del_dirty_port_entries( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!(port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & LEAPIORAID_HBA_PORT_FLAG_NEW_PORT) + continue; + drsprintk(ioc, pr_err( + "%s Deleting port table entry %p having Port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, port, port->port_id, + port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +static void +leapioraid_scsihost_sas_port_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u8 port_count = 0; + struct leapioraid_hba_port *port_table; + struct leapioraid_hba_port *port_table_entry; + struct leapioraid_hba_port *port_entry = NULL; + int i, j, ret, count = 0, lcount = 0; + u64 sas_addr; + u8 num_phys; + + drsprintk(ioc, pr_err( + "%s updating ports for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port_table) + return; + port_count = leapioraid_scsihost_get_port_table_after_reset( + ioc, port_table); + if (!port_count) + return; + drsprintk(ioc, + pr_info("%s New Port table\n", ioc->name)); + for (j = 0; j < port_count; j++) + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table[j].port_id, + port_table[j].phy_mask, + port_table[j].sas_address)); + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + port_table_entry->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + drsprintk(ioc, + pr_info("%s Old Port table\n", ioc->name)); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + for (j = 0; j < port_count; j++) { + ret = leapioraid_scsihost_look_and_get_matched_port_entry(ioc, + &port_table[j], + &port_entry, + &count); + if (!port_entry) { + drsprintk(ioc, pr_err( + "%s No Matched entry for sas_addr(0x%16llx), Port:%d\n", + ioc->name, + port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + leapioraid_scsihost_add_or_del_phys_from_existing_port(ioc, + port_entry, + port_table, + j, + port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + if ((count > 1) || (lcount > 1)) + port_entry = NULL; + else + leapioraid_scsihost_add_or_del_phys_from_existing_port + (ioc, port_entry, port_table, j, + port_count); + } + if (!port_entry) + continue; + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + port_table_entry = NULL; +} + +static +struct leapioraid_virtual_phy *leapioraid_scsihost_alloc_vphy( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 phy_num) +{ + struct leapioraid_virtual_phy *vphy; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + vphy = leapioraid_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct leapioraid_virtual_phy), GFP_KERNEL); + if (!vphy) + return NULL; + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + list_add_tail(&vphy->list, &port->vphys_list); + pr_info( + "%s vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", + ioc->name, vphy, port->port_id, phy_num); + } + return vphy; +} + +static void +leapioraid_scsihost_sas_host_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate, port_id; + struct leapioraid_hba_port *port; + struct LeapioraidSasPhyP0_t phy_pg0; + + dtmprintk(ioc, pr_err( + "%s updating handles for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + if (ioc->shost_recovery) + port->flags = LEAPIORAID_HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } + if (le32_to_cpu + (sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP + && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (attached_handle + && link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + link_rate = LEAPIORAID_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if (!ioc->sas_hba.phy[i].phy) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + leapioraid_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } + leapioraid_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate, + ioc->sas_hba.phy[i].port); + } +out: + kfree(sas_iounit_pg0); +} + +static void +leapioraid_scsihost_sas_host_add(struct LEAPIORAID_ADAPTER *ioc) +{ + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidSasEncP0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + u8 num_phys, port_id; + struct leapioraid_hba_port *port; + + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.nr_phys_allocated = + max_t(u8, LEAPIORAID_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = + kcalloc(ioc->sas_hba.nr_phys_allocated, + sizeof(struct leapioraid_sas_phy), + GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->io_missing_delay = sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) + * 16; + else + ioc->device_missing_delay = device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + } + if ((le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY) + && (phy_pg0.NegotiatedLinkRate >> 4) >= + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) { + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + leapioraid_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + pr_info( + "%s host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + ioc->sas_hba.handle, + (unsigned long long)ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); + if (ioc->sas_hba.enclosure_handle) { + if (!(leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +static int +leapioraid_scsihost_expander_add( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidExpanderP1_t expander_pg1; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + u8 port_id; + struct leapioraid_sas_port *leapioraid_port = NULL; + int rc = 0; + + if (!handle) + return -1; + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + if ((leapioraid_config_get_expander_pg0( + ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (leapioraid_scsihost_get_sas_address( + ioc, parent_handle, &sas_address_parent) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + port_id = expander_pg0.PhysicalPort; + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address_parent, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = leapioraid_scsihost_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + return 0; + sas_expander = kzalloc(sizeof(struct leapioraid_raid_sas_node), + GFP_KERNEL); + if (!sas_expander) + return -1; + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_expander->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + pr_info( + "%s expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + handle, parent_handle, + (unsigned long long)sas_expander->sas_address, + sas_expander->num_phys); + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct leapioraid_sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + rc = -1; + goto out_fail; + } + INIT_LIST_HEAD(&sas_expander->sas_port_list); + leapioraid_port = leapioraid_transport_port_add( + ioc, handle, + sas_address_parent, + sas_expander->port); + if (!leapioraid_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &leapioraid_port->rphy->dev; + sas_expander->rphy = leapioraid_port->rphy; + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1( + ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if ((leapioraid_transport_add_expander_phy + (ioc, &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + if (sas_expander->enclosure_handle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle( + ioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + leapioraid_scsihost_expander_node_add(ioc, sas_expander); + return 0; +out_fail: + if (leapioraid_port) + leapioraid_transport_port_remove(ioc, + sas_expander->sas_address, + sas_address_parent, + sas_expander->port); + kfree(sas_expander); + return rc; +} + +void +leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + leapioraid_scsihost_expander_node_remove( + ioc, sas_expander); +} + +static u8 +leapioraid_scsihost_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->scsih_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + +static int +leapioraid_scsi_send_scsi_io( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet, + u8 tr_timeout, u8 tr_method) +{ + struct LeapioraidSCSIIORep_t *mpi_reply; + struct LeapioSCSIIOReq_t *mpi_request; + u16 smid; + u8 issue_reset = 0; + int rc; + void *priv_sense; + u32 mpi_control; + void *psge; + dma_addr_t data_out_dma = 0; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + size_t data_out_sz = 0; + u16 handle; + u8 retry_count = 0, host_reset_count = 0; + int tm_return_code; + + if (ioc->pci_error_recovery) { + pr_err("%s %s: pci error recovery in progress!\n", + ioc->name, __func__); + return -EFAULT; + } + if (ioc->shost_recovery) { + pr_info("%s %s: host recovery in progress!\n", + ioc->name, __func__); + return -EAGAIN; + } + handle = transfer_packet->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + pr_info("%s %s: no device!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } +retry_loop: + if (test_bit(handle, ioc->device_remove_in_progress)) { + pr_info("%s %s: device removal in progress\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = ioc->shost->can_queue + + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioSCSIIOReq_t)); + if (transfer_packet->is_raid) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + switch (transfer_packet->dir) { + case DMA_TO_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + data_out_dma = transfer_packet->data_dma; + data_out_sz = transfer_packet->data_length; + break; + case DMA_FROM_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + data_in_dma = transfer_packet->data_dma; + data_in_sz = transfer_packet->data_length; + break; + case DMA_BIDIRECTIONAL: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL; + BUG(); + break; + default: + case DMA_NONE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + break; + } + psge = &mpi_request->SGL; + ioc->build_sg( + ioc, psge, data_out_dma, + data_out_sz, data_in_dma, + data_in_sz); + mpi_request->Control = cpu_to_le32(mpi_control | + LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ); + mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + priv_sense = leapioraid_base_get_sense_buffer(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioSCSIIOReq_t, SGL) / 4; + mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length); + int_to_scsilun(transfer_packet->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb, + transfer_packet->cdb_length); + init_completion(&ioc->scsih_cmds.done); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) + ioc->put_smid_scsi_io(ioc, smid, handle); + else + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + transfer_packet->timeout * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioSCSIIOReq_t) / 4, + issue_reset); + goto issue_target_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + transfer_packet->valid_reply = 1; + mpi_reply = ioc->scsih_cmds.reply; + transfer_packet->sense_length = + le32_to_cpu(mpi_reply->SenseCount); + if (transfer_packet->sense_length) + memcpy(transfer_packet->sense, priv_sense, + transfer_packet->sense_length); + transfer_packet->transfer_length = + le32_to_cpu(mpi_reply->TransferCount); + transfer_packet->ioc_status = + le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + transfer_packet->scsi_state = mpi_reply->SCSIState; + transfer_packet->scsi_status = mpi_reply->SCSIStatus; + transfer_packet->log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + } + goto out; +issue_target_reset: + if (issue_reset) { + pr_info("%s issue target reset: handle(0x%04x)\n", ioc->name, handle); + tm_return_code = + leapioraid_scsihost_issue_locked_tm(ioc, handle, + 0xFFFFFFFF, 0xFFFFFFFF, + 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, tr_timeout, + tr_method); + if (tm_return_code == SUCCESS) { + pr_err( + "%s target reset completed: handle (0x%04x)\n", + ioc->name, handle); + if (((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) + && retry_count++ < 3) + || ((ioc->scsih_cmds.status & LEAPIORAID_CMD_RESET) + && host_reset_count++ == 0)) { + pr_info("%s issue retry: handle (0x%04x)\n", + ioc->name, handle); + goto retry_loop; + } + } else + pr_err("%s target reset didn't complete: handle(0x%04x)\n", + ioc->name, handle); + rc = -EFAULT; + } else + rc = -EAGAIN; +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_determine_disposition( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet) +{ + static enum device_responsive_state rc; + struct sense_info sense_info = { 0, 0, 0 }; + u8 check_sense = 0; + char *desc = NULL; + + if (!transfer_packet->valid_reply) + return DEVICE_READY; + switch (transfer_packet->ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (transfer_packet->log_info == 0x31170000) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->cdb[0] == REPORT_LUNS) + rc = DEVICE_READY; + else + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + if (!transfer_packet->scsi_state && + !transfer_packet->scsi_status) { + rc = DEVICE_READY; + break; + } + if (transfer_packet->scsi_state & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + rc = DEVICE_ERROR; + check_sense = 1; + break; + } + if (transfer_packet->scsi_state & + (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS | + LEAPIORAID_SCSI_STATE_TERMINATED)) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->scsi_status >= LEAPIORAID_SCSI_STATUS_BUSY) { + rc = DEVICE_RETRY; + break; + } + rc = DEVICE_READY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + if (transfer_packet->scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + rc = DEVICE_RETRY; + else + rc = DEVICE_ERROR; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + rc = DEVICE_ERROR; + break; + } + if (check_sense) { + leapioraid_scsihost_normalize_sense( + transfer_packet->sense, &sense_info); + if (sense_info.skey == UNIT_ATTENTION) + rc = DEVICE_RETRY_UA; + else if (sense_info.skey == NOT_READY) { + if (sense_info.asc == 0x3a) + rc = DEVICE_READY; + else if (sense_info.asc == 0x04) { + if (sense_info.ascq == 0x03 || + sense_info.ascq == 0x0b || + sense_info.ascq == 0x0c) { + rc = DEVICE_ERROR; + } else + rc = DEVICE_START_UNIT; + } else if (sense_info.asc == 0x3e && !sense_info.ascq) + rc = DEVICE_START_UNIT; + } else if (sense_info.skey == ILLEGAL_REQUEST && + transfer_packet->cdb[0] == REPORT_LUNS) { + rc = DEVICE_READY; + } else if (sense_info.skey == MEDIUM_ERROR) { + if (sense_info.asc == 0x31) + rc = DEVICE_READY; + } else if (sense_info.skey == HARDWARE_ERROR) { + if (sense_info.asc == 0x19) + rc = DEVICE_READY; + } + } + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + switch (rc) { + case DEVICE_READY: + desc = "ready"; + break; + case DEVICE_RETRY: + desc = "retry"; + break; + case DEVICE_RETRY_UA: + desc = "retry_ua"; + break; + case DEVICE_START_UNIT: + desc = "start_unit"; + break; + case DEVICE_STOP_UNIT: + desc = "stop_unit"; + break; + case DEVICE_ERROR: + desc = "error"; + break; + } + pr_info( + "%s \tioc_status(0x%04x), loginfo(0x%08x),\n\t\t" + "scsi_status(0x%02x), scsi_state(0x%02x), rc(%s)\n", + ioc->name, + transfer_packet->ioc_status, + transfer_packet->log_info, + transfer_packet->scsi_status, + transfer_packet->scsi_state, + desc); + if (check_sense) + pr_info("%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x]\n", + ioc->name, + sense_info.skey, sense_info.asc, + sense_info.ascq); + } + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *inq_data; + int return_code; + u32 data_length; + u8 len; + u8 tr_timeout = 30; + u8 tr_method = 0; + + inq_data = NULL; + transfer_packet + = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + data_length = 252; + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[2] = 0x80; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + len = strlen(&inq_data[4]) + 1; + *serial_number = kmalloc(len, GFP_KERNEL); + if (*serial_number) + strscpy(*serial_number, &inq_data[4], sizeof(*serial_number)); + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *inq_data; + int return_code; + + inq_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->lun = lun; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) + memcpy(data, inq_data, data_length); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_report_luns( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, void *data, + u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, + u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *lun_data; + int return_code; + int retries; + + lun_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!lun_data) { + rc = DEVICE_RETRY; + goto out; + } + for (retries = 0; retries < 4; retries++) { + rc = DEVICE_ERROR; + pr_info("%s REPORT_LUNS: handle(0x%04x), retries(%d)\n", + ioc->name, handle, retries); + memset(lun_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = REPORT_LUNS; + transfer_packet->cdb[6] = (data_length >> 24) & 0xFF; + transfer_packet->cdb[7] = (data_length >> 16) & 0xFF; + transfer_packet->cdb[8] = (data_length >> 8) & 0xFF; + transfer_packet->cdb[9] = data_length & 0xFF; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + return_code = + leapioraid_scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, + tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition(ioc, + transfer_packet); + if (rc == DEVICE_READY) { + memcpy(data, lun_data, data_length); + goto out; + } else if (rc == DEVICE_ERROR) + goto out; + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + } +out: + if (lun_data) + dma_free_coherent(&ioc->pdev->dev, data_length, lun_data, + transfer_packet->data_dma); + kfree(transfer_packet); + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_start_unit( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = START_STOP; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = 1; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + pr_info("%s START_UNIT: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_test_unit_ready( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + int sata_init_failure = 0; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = TEST_UNIT_READY; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; +sata_init_retry: + pr_info("%s TEST_UNIT_READY: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_RETRY && + transfer_packet->log_info == 0x31111000) { + if (!sata_init_failure++) { + pr_err( + "%s SATA Initialization Timeout,sending a retry\n", + ioc->name); + rc = DEVICE_READY; + goto sata_init_retry; + } else { + pr_err( + "%s SATA Initialization Failed\n", + ioc->name); + rc = DEVICE_ERROR; + } + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *is_ssd_device, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u16 *idd_data; + int return_code; + u32 data_length; + + idd_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + data_length = 512; + idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!idd_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(idd_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = ATA_12; + transfer_packet->cdb[1] = 0x8; + transfer_packet->cdb[2] = 0xd; + transfer_packet->cdb[3] = 0x1; + transfer_packet->cdb[9] = 0xec; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + if (le16_to_cpu(idd_data[217]) == 1) + *is_ssd_device = 1; + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (idd_data) { + dma_free_coherent(&ioc->pdev->dev, data_length, idd_data, + transfer_packet->data_dma); + } + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + + if (ioc->pci_error_recovery) + return DEVICE_ERROR; + if (ioc->shost_recovery) + return DEVICE_RETRY; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_READY || rc == DEVICE_ERROR) + return rc; + else if (rc == DEVICE_START_UNIT) { + rc = leapioraid_scsihost_start_unit( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) + return rc; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, + tr_timeout, tr_method); + } + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + struct scsi_lun *lun_data; + u32 length, num_luns; + u8 *data; + int lun; + struct scsi_lun *lunp; + + lun_data = + kcalloc(255, sizeof(struct scsi_lun), GFP_KERNEL); + if (!lun_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return DEVICE_RETRY; + } + rc = leapioraid_scsihost_report_luns(ioc, handle, lun_data, + 255 * sizeof(struct scsi_lun), + retry_count, is_pd, tr_timeout, tr_method); + if (rc != DEVICE_READY) + goto out; + data = (u8 *) lun_data; + length = ((data[0] << 24) | (data[1] << 16) | + (data[2] << 8) | (data[3] << 0)); + num_luns = (length / sizeof(struct scsi_lun)); + lunp = &lun_data[1]; + lun = (num_luns) ? scsilun_to_int(&lun_data[1]) : 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, handle, retry_count, + is_pd, lun, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) { + struct scsi_lun *lunq; + + for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready(ioc, + handle, + retry_count, + is_pd, + scsilun_to_int + (lunq), + tr_timeout, + tr_method); + if (rc != DEVICE_ERROR) + goto out; + } + } +out: + kfree(lun_data); + return rc; +} + +static u8 +leapioraid_scsihost_check_access_status( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + if (!rc) + return 0; + pr_err( + "%s discovery errors(%s): sas_address(0x%016llx),\n\t\t" + "handle(0x%04x)\n", + ioc->name, + desc, + (unsigned long long)sas_address, + handle); + return rc; +} + +static void +leapioraid_scsihost_check_device(struct LEAPIORAID_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, + u8 link_rate) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device = NULL; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + u32 device_info; + u8 *serial_number = NULL; + u8 *original_serial_number = NULL; + int rc; + struct leapioraid_hba_port *port; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + return; + if (phy_number != sas_device_pg0.PhyNum) + return; + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + port = leapioraid_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); + if (!port) + goto out_unlock; + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (!sas_device) + goto out_unlock; + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + sas_device->enclosure_handle); + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + } + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x%04x), flags!!!\n", + ioc->name, handle); + goto out_unlock; + } + if (leapioraid_scsihost_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + original_serial_number = sas_device->serial_number; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + leapioraid_scsihost_ublock_io_device_wait(ioc, sas_address, port); + if (!original_serial_number) + goto out; + if (leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number) == + DEVICE_READY && serial_number) { + rc = strcmp(original_serial_number, serial_number); + kfree(serial_number); + if (!rc) + goto out; + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + leapioraid_transport_update_links(ioc, parent_sas_address, + handle, phy_number, link_rate, + port); + leapioraid_scsihost_add_device(ioc, handle, 0, 0); + } + goto out; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static int +leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u8 retry_count, + u8 is_pd) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + u64 sas_address; + u32 device_info; + enum device_responsive_state rc; + u8 connector_name[5], port_id; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return 0; + set_bit(handle, ioc->pend_os_device_add); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x04%x)!!!\n", + ioc->name, handle); + return 0; + } + if (leapioraid_scsihost_check_access_status( + ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return 0; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); + leapioraid_sas_device_put(sas_device); + return 0; + } + if (le16_to_cpu(sas_device_pg0.EnclosureHandle)) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0.EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't\n\t\t" + "match with enclosure device!\n", + ioc->name, + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + } + if (!ioc->wait_for_discovery_to_complete) { + pr_info( + "%s detecting: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + ioc->name, handle, + (unsigned long long)sas_address, + sas_device_pg0.PhyNum); + rc = leapioraid_scsihost_wait_for_target_to_become_ready( + ioc, handle, + retry_count, + is_pd, 30, 0); + if (rc != DEVICE_READY) { + if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0) + dewtprintk(ioc, + pr_info("%s %s: device not ready: slot(%d)\n", + ioc->name, __func__, + le16_to_cpu(sas_device_pg0.Slot))); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + memcpy(connector_name, + sas_device_pg0.ConnectorName, 4); + connector_name[4] = '\0'; + dewtprintk(ioc, + pr_info( + "%s %s: device not ready: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + sas_device_pg0.EnclosureLevel, + connector_name)); + } + if ((enclosure_dev) + && (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID)) + pr_err( + "%s chassis slot(0x%04x)\n", ioc->name, + enclosure_dev->pg0.ChassisSlot); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + return 1; + else if (rc == DEVICE_ERROR) + return 0; + } + } + sas_device = kzalloc(sizeof(struct leapioraid_sas_device), + GFP_KERNEL); + if (!sas_device) + return 0; + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (leapioraid_scsihost_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_device->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? + 1 : 0; + sas_device->supports_sata_smart = + (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + pr_err( + "%s handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + ioc->name, handle, sas_device->sas_address, + sas_device->port_type); + if (ioc->wait_for_discovery_to_complete) + leapioraid_scsihost_sas_device_init_add(ioc, sas_device); + else + leapioraid_scsihost_sas_device_add(ioc, sas_device); +out: + leapioraid_sas_device_put(sas_device); + return 0; +} + +static void +leapioraid_scsihost_remove_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + + if (sas_device->pfa_led_on) { + leapioraid_scsihost_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + dewtprintk(ioc, pr_info( + "%s %s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + leapioraid_scsihost_ublock_io_device( + ioc, sas_device->sas_address, + sas_device->port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + if (!ioc->hide_drives) + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + pr_info("%s removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + dewtprintk(ioc, pr_info( + "%s %s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long) + sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + kfree(sas_device->serial_number); +} + +static void +leapioraid_scsihost_sas_topology_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + pr_info("%s sas topology change: (%s)\n", + ioc->name, status_str); + pr_info( + "\thandle(0x%04x), enclosure_handle(0x%04x)\n\t\t" + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, + event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info( + "\tphy(%02d), attached_handle(0x%04x): %s:\n\t\t" + "link rate: new(0x%02x), old(0x%02x)\n", + phy_number, + handle, + status_str, + link_rate, + prev_link_rate); + } +} + +static int +leapioraid_scsihost_sas_topology_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + int rc; + int requeue_event; + struct leapioraid_hba_port *port; + struct LeapioraidEventDataSasTopoChangeList_t *event_data = + fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_topology_change_event_debug( + ioc, event_data); + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + if (!ioc->sas_hba.num_phys) + leapioraid_scsihost_sas_host_add(ioc); + else + leapioraid_scsihost_sas_host_refresh(ioc); + if (fw_event->ignore) { + dewtprintk(ioc, + pr_info("%s ignoring expander event\n", + ioc->name)); + return 0; + } + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0); + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED) + if (leapioraid_scsihost_expander_add(ioc, parent_handle) != 0) + return 0; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + port = sas_expander->port; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, pr_info( + "%s ignoring expander event\n", + ioc->name)); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + if (fw_event->delayed_work_active && (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { + dewtprintk(ioc, + pr_info( + "%s ignoring Targ not responding\n\t\t" + "event phy in re-queued event processing\n", + ioc->name)); + continue; + } + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + if (ioc->shost_recovery) + break; + if (link_rate == prev_link_rate) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + leapioraid_scsihost_check_device(ioc, sas_address, handle, + phy_number, link_rate); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, + handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + break; + } + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + dewtprintk(ioc, pr_err( + "%s handle(0x%04x) device not found:\n\t\t" + "convert event to a device add\n", + ioc->name, handle)); + event_data->PHY[i].PhyStatus &= 0xF0; + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED; + fallthrough; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + if (ioc->shost_recovery) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + rc = leapioraid_scsihost_add_device(ioc, handle, + fw_event->retries[i], 0); + if (rc) { + fw_event->retries[i]++; + requeue_event = 1; + } else { + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT; + } + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + leapioraid_scsihost_device_remove_by_handle(ioc, handle); + break; + } + } + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING + && sas_expander) + leapioraid_expander_remove(ioc, sas_address, port); + return requeue_event; +} + +static void +leapioraid_scsihost_sas_device_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info("%s device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_info("%s , ASC(0x%x), ASCQ(0x%x)\n", + ioc->name, event_data->ASC, event_data->ASCQ); + pr_info("\n"); +} + +static void +leapioraid_scsihost_sas_device_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + struct LEAPIORAID_TARGET *target_priv_data; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __leapioraid_get_sdev_by_addr( + ioc, sas_address, + leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0)); + if (!sas_device || !sas_device->starget) + goto out; + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + if (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + pr_err( + "%s %s tm_busy flag for handle(0x%04x)\n", ioc->name, + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info( + "%s enclosure status change: (%s)\n\thandle(0x%04x),\n\t\t" + "enclosure logical id(0x%016llx) number slots(%d)\n", + ioc->name, + reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data = + fw_event->event_data; + int rc; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + ioc, fw_event->event_data); + if (ioc->shost_recovery) + return; + event_data->EnclosureHandle = le16_to_cpu(event_data->EnclosureHandle); + if (event_data->EnclosureHandle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + event_data->EnclosureHandle); + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + if (!enclosure_dev) { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, + &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + event_data->EnclosureHandle); + if (rc + || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + } + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + if (enclosure_dev) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } + break; + default: + break; + } +} + +static void +leapioraid_scsihost_sas_broadcast_primitive_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + u16 smid, handle; + u32 lun; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidEventDataSasBroadcastPrimitive_t *event_data = + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + struct leapioraid_scsiio_tracker *st; + + mutex_lock(&ioc->tm_cmds.mutex); + dewtprintk(ioc, + pr_info( + "%s %s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, + event_data->PhyNum, event_data->PortWidth)); + leapioraid_scsihost_block_io_all_device(ioc); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; +broadcast_aen_retry: + if (max_retries++ == 5) { + dewtprintk(ioc, pr_info("%s %s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, pr_info("%s %s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (!st || st->smid == 0) + continue; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + if (ioc->shost_recovery) + goto out; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = leapioraid_scsihost_issue_tm(ioc, handle, 0, 0, lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "FAILED when sending QUERY_TASK: scmd(%p)\n", + scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; +tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, pr_err( + "%s %s: ABORT_TASK: giving up\n", + ioc->name, __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (ioc->shost_recovery) + goto out_no_lock; + r = leapioraid_scsihost_issue_tm(ioc, handle, sdev->channel, + sdev->id, sdev->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "ABORT_TASK: FAILED : scmd(%p)\n", scmd); + goto tm_retry; + } + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "ABORT_TASK: RETRIES (%d): scmd(%p)\n", + task_abort_retries - 1, + scmd); + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, + pr_info("%s %s: loop back due to pending AEN\n", + ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } +out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +out_no_lock: + dewtprintk(ioc, pr_err( + "%s %s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, + termination_count)); + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + leapioraid_scsihost_ublock_io_all_device(ioc, 1); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +static void +leapioraid_scsihost_sas_discovery_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDiscovery_t *event_data + = fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + pr_info("%s sas discovery event: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + } + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + while (ioc->shost_recovery) + ssleep(1); + } + leapioraid_scsihost_sas_host_add(ioc); + } +} + +static void +leapioraid_scsihost_sas_device_discovery_error_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDeviceDiscoveryError_t *event_data = + fw_event->event_data; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has failed\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has timed out\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + default: + break; + } +} + +static int +leapioraid_scsihost_ir_fastpath( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 phys_disk_num) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x24; + mpi_request->PhysDiskNum = phys_disk_num; + dewtprintk(ioc, pr_info( + "%s IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", + ioc->name, handle, phys_disk_num)); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioraidRaidActionReq_t) + / 4, issue_reset); + rc = -EFAULT; + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", + ioc->name, ioc_status, + log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: completed successfully\n", + ioc->name)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +static void +leapioraid_scsihost_reprobe_lun( + struct scsi_device *sdev, void *no_uld_attach) +{ + int rc; + + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hiding" : "exposing"); + rc = scsi_device_reprobe(sdev); + pr_info("%s rc=%d\n", __func__, rc); +} + +static void +leapioraid_scsihost_sas_volume_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + return; + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + return; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + leapioraid_scsihost_determine_boot_device( + ioc, raid_device, RAID_CHANNEL); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_sas_volume_delete( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +static void +leapioraid_scsihost_sas_pd_expose( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->pfa_led_on = 0; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + if (starget) + starget_for_each_device(starget, NULL, leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_hide( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + leapioraid_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + leapioraid_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + if (starget) + starget_for_each_device(starget, (void *)1, + leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_delete(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + leapioraid_scsihost_device_remove_by_handle(ioc, handle); +} + +static void +leapioraid_scsihost_sas_pd_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, + 0)); + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); +} + +static void +leapioraid_scsihost_sas_ir_config_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + pr_info("%s raid config change: (%s), elements(%d)\n", + ioc->name, + (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" : + "native", event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info( + "\t(%s:%s), vol handle(0x%04x), pd handle(0x%04x), pd num(0x%02x)\n", + element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +static void +leapioraid_scsihost_sas_ir_config_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u8 foreign_config; + struct LeapioraidEventDataIrCfgChangeList_t *event_data + = fw_event->event_data; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_config_change_event_debug(ioc, event_data); + foreign_config = (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + if (ioc->shost_recovery) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE) + leapioraid_scsihost_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_delete(ioc, + le16_to_cpu + (element->VolDevHandle)); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + leapioraid_scsihost_sas_pd_hide(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + leapioraid_scsihost_sas_pd_expose(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + leapioraid_scsihost_sas_pd_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + leapioraid_scsihost_sas_pd_delete(ioc, element); + break; + } + } +} + +static void +leapioraid_scsihost_sas_ir_volume_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct leapioraid_raid_device *raid_device; + u16 handle; + u32 state; + int rc; + struct LeapioraidEventDataIrVol_t *event_data + = fw_event->event_data; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_VOL_STATE_MISSING: + case LEAPIORAID_RAID_VOL_STATE_FAILED: + leapioraid_scsihost_sas_volume_delete(ioc, handle); + break; + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + break; + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + break; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + break; + case LEAPIORAID_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_physical_disk_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct leapioraid_sas_device *sas_device; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + struct LeapioraidEventDataIrPhyDisk_t *event_data + = fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_PD_STATE_ONLINE: + case LEAPIORAID_RAID_PD_STATE_DEGRADED: + case LEAPIORAID_RAID_PD_STATE_REBUILDING: + case LEAPIORAID_RAID_PD_STATE_OPTIMAL: + case LEAPIORAID_RAID_PD_STATE_HOT_SPARE: + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0( + ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, + sas_device_pg0.PhysicalPort, 0)); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); + break; + case LEAPIORAID_RAID_PD_STATE_OFFLINE: + case LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED: + case LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrOpStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case LEAPIORAID_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + if (!reason_str) + return; + pr_info( + "%s raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataIrOpStatus_t *event_data + = fw_event->event_data; + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_operation_status_event_debug( + ioc, event_data); + if (event_data->RAIDOperation == LEAPIORAID_EVENT_IR_RAIDOP_RESYNC) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_prep_device_scan(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +static void +leapioraid_scsihost_update_device_qdepth(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + pr_info("%s Update Devices with FW Reported QD\n", + ioc->name); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_device && + sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + (sas_device->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device + && sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + } +} + +static void +leapioraid_scsihost_mark_responding_sas_device( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasDevP0_t *sas_device_pg0) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + unsigned long flags; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, sas_device_pg0->PhysicalPort, 0); + if (sas_device_pg0->EnclosureHandle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0->EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't match with enclosure device!\n", + ioc->name, sas_device_pg0->EnclosureHandle); + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if ((sas_device->sas_address == + le64_to_cpu(sas_device_pg0->SASAddress)) + && (sas_device->slot == le16_to_cpu(sas_device_pg0->Slot)) + && (sas_device->port == port)) { + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), port: %d\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->port->port_id); + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (le16_to_cpu(sas_device_pg0->Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0->EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0->ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0->EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + if (sas_device->handle == + le16_to_cpu(sas_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_create_enclosure_list_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + u16 enclosure_handle; + int rc; + + leapioraid_free_enclosure_list(ioc); + enclosure_handle = 0xFFFF; + do { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, + enclosure_handle); + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, &ioc->enclosure_list); + enclosure_handle = + le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); + } while (1); +} + +static void +leapioraid_scsihost_search_responding_sas_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + pr_info("%s search for end-devices: start\n", + ioc->name); + if (list_empty(&ioc->sas_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + continue; + leapioraid_scsihost_mark_responding_sas_device( + ioc, &sas_device_pg0); + } +out: + pr_info("%s search for end-devices: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_raid_device( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid, u16 handle) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", + handle, + (unsigned long long)raid_device->wwid); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_raid_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidVolP1_t volume_pg1; + struct LeapioraidRaidVolP0_t volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + pr_info("%s search for raid volumes: start\n", + ioc->name); + if (list_empty(&ioc->raid_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1.DevHandle); + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + if (volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_DEGRADED) + leapioraid_scsihost_mark_responding_raid_device(ioc, + le64_to_cpu + (volume_pg1.WWID), + handle); + } + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } +out: + pr_info("%s search for responding raid volumes: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_expander( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidExpanderP0_t *expander_pg0) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i; + u8 port_id = expander_pg0->PhysicalPort; + struct leapioraid_hba_port *port = leapioraid_get_port_by_id( + ioc, port_id, 0); + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u16 handle = le16_to_cpu(expander_pg0->DevHandle); + u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); + u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); + + if (enclosure_handle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + enclosure_handle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + (sas_expander->port != port)) + continue; + sas_expander->responding = 1; + if (enclosure_dev) { + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + sas_expander->enclosure_handle = + le16_to_cpu(expander_pg0->EnclosureHandle); + } + if (sas_expander->handle == handle) + goto out; + pr_info( + "\texpander(0x%016llx): handle changed from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0; i < sas_expander->num_phys; i++) + sas_expander->phy[i].handle = handle; + goto out; + } +out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_expanders( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + u8 port; + + pr_info("%s search for expanders: start\n", + ioc->name); + if (list_empty(&ioc->sas_expander_list)) + goto out; + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + port = expander_pg0.PhysicalPort; + pr_info( + "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + handle, + (unsigned long long)sas_address, + ((ioc->multipath_on_hba) ? + (port) : (LEAPIORAID_MULTIPATH_DISABLED_PORT_ID))); + leapioraid_scsihost_mark_responding_expander( + ioc, &expander_pg0); + } +out: + pr_info("%s search for expanders: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_remove_unresponding_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device, *sas_device_next; + struct leapioraid_raid_sas_node *sas_expander, *sas_expander_next; + struct leapioraid_raid_device *raid_device, *raid_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + pr_info("%s removing unresponding devices: start\n", + ioc->name); + pr_err("%s removing unresponding devices: sas end-devices\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_init_list, list) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + if (ioc->ir_firmware) { + pr_info("%s removing unresponding devices: volumes\n", + ioc->name); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + leapioraid_scsihost_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + pr_err("%s removing unresponding devices: expanders\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe( + sas_expander, sas_expander_next, &tmp_list, list) { + leapioraid_scsihost_expander_node_remove(ioc, sas_expander); + } + pr_err("%s removing unresponding devices: complete\n", ioc->name); + leapioraid_scsihost_ublock_io_all_device(ioc, 0); +} + +static void +leapioraid_scsihost_refresh_expander_links( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander, u16 handle) +{ + struct LeapioraidExpanderP1_t expander_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int i; + + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + leapioraid_transport_update_links(ioc, + sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), + i, + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); + } +} + +static void +leapioraid_scsihost_scan_for_devices_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidRaidVolP1_t *volume_pg1; + struct LeapioraidRaidVolP0_t *volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidEventIrCfgEle_t element; + struct LeapioraidCfgRep_t mpi_reply; + u8 phys_disk_num, port_id; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_device; + static struct leapioraid_raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + pr_info("%s scan devices: start\n", ioc->name); + leapioraid_scsihost_sas_host_refresh(ioc); + pr_info("%s \tscan devices: expanders start\n", + ioc->name); + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + port_id = expander_pg0.PhysicalPort; + expander_device = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + le64_to_cpu + (expander_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, + port_id, + 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + leapioraid_scsihost_refresh_expander_links( + ioc, expander_device, handle); + else { + pr_err( + "%s \tBEFORE adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + leapioraid_scsihost_expander_add(ioc, handle); + pr_info( + "%s \tAFTER adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: expanders complete\n", + ioc->name); + if (!ioc->ir_firmware) + goto skip_to_sas; + pr_info("%s \tscan devices: phys disk start\n", + ioc->name); + phys_disk_num = 0xFF; + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan:\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + if (leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, + &sas_address)) { + pr_err( + "%s \tBEFORE adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 1)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: phys disk complete\n", + ioc->name); + pr_info("%s \tscan devices: volumes start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1->DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, le64_to_cpu(volume_pg1->WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == + LEAPIORAID_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, + sizeof(struct LeapioraidEventIrCfgEle_t)); + element.ReasonCode = LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1->DevHandle; + pr_info("%s \tBEFORE adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + leapioraid_scsihost_sas_volume_add(ioc, &element); + pr_info("%s \tAFTER adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + } + } + pr_info("%s \tscan devices: volumes complete\n", + ioc->name); +skip_to_sas: + pr_info("%s \tscan devices: sas end devices start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from sas end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (! + (leapioraid_scsihost_is_sas_end_device + (le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + le64_to_cpu + (sas_device_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, port_id, 0)); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) { + pr_err( + "%s \tBEFORE adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 0)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_err("%s \tscan devices: sas end devices complete\n", ioc->name); + kfree(volume_pg0); + kfree(volume_pg1); + pr_info("%s scan devices: complete\n", ioc->name); +} + +void +leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->scsih_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->tm_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); + memset(ioc->tm_tr_retry, 0, ioc->tm_tr_retry_sz); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); +} + +void +leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) { + leapioraid_scsihost_sas_port_refresh(ioc); + leapioraid_scsihost_update_vphys_after_reset(ioc); + } + leapioraid_scsihost_prep_device_scan(ioc); + leapioraid_scsihost_create_enclosure_list_after_reset(ioc); + leapioraid_scsihost_search_responding_sas_devices(ioc); + leapioraid_scsihost_search_responding_raid_devices(ioc); + leapioraid_scsihost_search_responding_expanders(ioc); + leapioraid_scsihost_error_recovery_delete_devices(ioc); + } + break; + } +} + +static void +leapioraid_fw_work(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + ioc->current_event = fw_event; + leapioraid_scsihost_fw_event_del_from_list(ioc, fw_event); + if (ioc->remove_host || ioc->pci_error_recovery) { + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; + return; + } + switch (fw_event->event) { + case LEAPIORAID_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) { + if (ioc->remove_host || ioc->fw_events_cleanup) + goto out; + ssleep(1); + } + leapioraid_scsihost_remove_unresponding_devices(ioc); + leapioraid_scsihost_del_dirty_vphy(ioc); + leapioraid_scsihost_del_dirty_port_entries(ioc); + leapioraid_scsihost_update_device_qdepth(ioc); + leapioraid_scsihost_scan_for_devices_after_reset(ioc); + if (ioc->is_driver_loading) + leapioraid_scsihost_complete_devices_scanning(ioc); + break; + case LEAPIORAID_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + dewtprintk(ioc, pr_info( + "%s port enable: complete from worker thread\n", + ioc->name)); + break; + case LEAPIORAID_TURN_ON_PFA_LED: + leapioraid_scsihost_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + if (leapioraid_scsihost_sas_topology_change_event(ioc, fw_event)) { + leapioraid_scsihost_fw_event_requeue(ioc, fw_event, 1000); + ioc->current_event = NULL; + return; + } + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_device_status_change_event_debug( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + fw_event->event_data); + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + leapioraid_scsihost_sas_discovery_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + leapioraid_scsihost_sas_device_discovery_error_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + leapioraid_scsihost_sas_broadcast_primitive_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_enclosure_dev_status_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_sas_ir_config_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_sas_ir_volume_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + leapioraid_scsihost_sas_ir_physical_disk_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + leapioraid_scsihost_sas_ir_operation_status_event( + ioc, fw_event); + break; + default: + break; + } +out: + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; +} + +static void +leapioraid_firmware_event_work(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +static void +leapioraid_firmware_event_work_delayed(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + delayed_work.work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +u8 +leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventNotificationRep_t *mpi_reply; + u16 event; + u16 sz; + + if (ioc->pci_error_recovery) + return 1; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + event = le16_to_cpu(mpi_reply->Event); + switch (event) { + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + { + struct LeapioraidEventDataSasBroadcastPrimitive_t *baen_data = + (struct LeapioraidEventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + if (baen_data->Primitive != + LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } + ioc->broadcast_aen_busy = 1; + break; + } + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + leapioraid_scsihost_check_topo_delete_events( + ioc, + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData); + if (ioc->shost_recovery) + return 1; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_check_ir_config_unhide_events( + ioc, + (struct LeapioraidEventDataIrCfgChangeList_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_check_volume_delete_events( + ioc, + (struct LeapioraidEventDataIrVol_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + fallthrough; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_device_status_change_event( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_SAS_DISCOVERY: + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + break; + default: + return 1; + } + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event->event_data = kzalloc(sz, GFP_ATOMIC); + if (!fw_event->event_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + if (event == LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST) { + struct LeapioraidEventDataSasTopoChangeList_t *topo_event_data = + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData; + fw_event->retries = kzalloc(topo_event_data->NumEntries, + GFP_ATOMIC); + if (!fw_event->retries) { + kfree(fw_event->event_data); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + } + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); + return 1; +} + +static void +leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + unsigned long flags; + int port_id; + + list_for_each_entry_safe(leapioraid_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + port_id = sas_expander->port->port_id; + leapioraid_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent, + sas_expander->port); + pr_info( + "%s expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + ioc->name, + sas_expander->handle, + (unsigned long long)sas_expander->sas_address, + port_id); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + kfree(sas_expander->phy); + kfree(sas_expander); +} + +static void +leapioraid_scsihost_ir_shutdown(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + + if (!ioc->ir_firmware) + return; + + if (list_empty(&ioc->raid_device_list)) + return; + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return; + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x20; + if (!ioc->warpdrive_msg) + pr_info("%s IR shutdown (sending)\n", + ioc->name); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->warpdrive_msg) + pr_info( + "%s IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_scsihost_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, + struct LEAPIORAID_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev, "pdev's driver data is null\n"); + return -ENXIO; + } + *ioc = leapioraid_shost_private(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev, "shost's private data is null\n"); + return -ENXIO; + } + return 0; +} + +static void +leapioraid_scsihost_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct leapioraid_sas_port *leapioraid_port, *next_port; + struct leapioraid_raid_device *raid_device, *next; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct workqueue_struct *wq; + unsigned long flags; + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to remove device\n"); + return; + } + + while (ioc->is_driver_loading) + ssleep(1); + + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + } + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + sas_remove_host(shost); + scsi_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } + list_for_each_entry_safe(leapioraid_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + list_del(&vphy->list); + kfree(vphy); + } + } + list_del(&port->list); + kfree(port); + } + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + leapioraid_base_detach(ioc); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); +} + +static void +leapioraid_scsihost_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct workqueue_struct *wq; + unsigned long flags; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to shutdown device\n"); + return; + } + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); +} + +static void +leapioraid_scsihost_probe_boot_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 channel; + void *device; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + struct leapioraid_hba_port *port; + u8 protection_mask; + + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + channel = ioc->req_boot_device.channel; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + channel = ioc->req_alt_boot_device.channel; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + channel = ioc->current_boot_device.channel; + } + if (!device) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + if (raid_device->starget) + return; + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, + protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + sas_device = device; + if (sas_device->starget) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + port = sas_device->port; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!port) + return; + + if (ioc->hide_drives) + return; + + if (!leapioraid_transport_port_add(ioc, handle, + sas_address_parent, port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_address, + sas_address_parent, + port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } + } +} + +static void +leapioraid_scsihost_probe_raid(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } +} + +static +struct leapioraid_sas_device *leapioraid_get_next_sas_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct leapioraid_sas_device, list); + leapioraid_sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static void +leapioraid_sas_device_make_active(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_probe_sas(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device; + + while ((sas_device = leapioraid_get_next_sas_device(ioc))) { + if (ioc->hide_drives) { + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + } + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_probe_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags + & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; + leapioraid_scsihost_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + leapioraid_scsihost_probe_raid(ioc); + leapioraid_scsihost_probe_sas(ioc); + } else { + leapioraid_scsihost_probe_sas(ioc); + leapioraid_scsihost_probe_raid(ioc); + } + } else { + leapioraid_scsihost_probe_sas(ioc); + } +} + +static void +leapioraid_scsihost_scan_start(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + int rc; + + if (disable_discovery > 0) + return; + ioc->start_scan = 1; + rc = leapioraid_port_enable(ioc); + if (rc != 0) + pr_info("%s port enable: FAILED\n", + ioc->name); +} + +void +leapioraid_scsihost_complete_devices_scanning(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + leapioraid_scsihost_probe_devices(ioc); + } + leapioraid_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; +} + +static int +leapioraid_scsihost_scan_finished( + struct Scsi_Host *shost, unsigned long time) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + u32 ioc_state; + int issue_hard_reset = 0; + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + goto out; + } + if (time >= (300 * HZ)) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: FAILED with timeout (timeout=300s)\n", + ioc->name); + ioc->is_driver_loading = 0; + goto out; + } + if (ioc->start_scan) { + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + issue_hard_reset = 1; + goto out; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + issue_hard_reset = 1; + goto out; + } + return 0; + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) { + pr_err("%s port enable: aborted due to diag reset\n", + ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + if (ioc->start_scan_failed) { + pr_info("%s port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + goto out; + } + pr_info("%s port enable: SUCCESS\n", ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + leapioraid_scsihost_complete_devices_scanning(ioc); +out: + if (issue_hard_reset) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (leapioraid_base_hard_reset_handler(ioc, SOFT_RESET)) + ioc->is_driver_loading = 0; + } + return 1; +} + +SCSIH_MAP_QUEUE(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; + + if (shost->nr_hw_queues == 1) + return; + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + qoff += map->nr_queues; + } +} + +static struct scsi_host_template leapioraid_driver_template = { + .module = THIS_MODULE, + .name = "LEAPIO RAID Host", + .proc_name = LEAPIORAID_DRIVER_NAME, + .queuecommand = leapioraid_scsihost_qcmd, + .target_alloc = leapioraid_scsihost_target_alloc, + .slave_alloc = leapioraid_scsihost_slave_alloc, + .slave_configure = leapioraid_scsihost_slave_configure, + .target_destroy = leapioraid_scsihost_target_destroy, + .slave_destroy = leapioraid_scsihost_slave_destroy, + .scan_finished = leapioraid_scsihost_scan_finished, + .scan_start = leapioraid_scsihost_scan_start, + .change_queue_depth = leapioraid_scsihost_change_queue_depth, + .eh_abort_handler = leapioraid_scsihost_abort, + .eh_device_reset_handler = leapioraid_scsihost_dev_reset, + .eh_target_reset_handler = leapioraid_scsihost_target_reset, + .eh_host_reset_handler = leapioraid_scsihost_host_reset, + .bios_param = leapioraid_scsihost_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = LEAPIORAID_SG_DEPTH, + .max_sectors = 128, + .max_segment_size = 0xffffffff, + .cmd_per_lun = 128, + .shost_groups = leapioraid_host_groups, + .sdev_groups = leapioraid_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct leapioraid_scsiio_tracker), + .map_queues = leapioraid_scsihost_map_queues, + .mq_poll = leapioraid_blk_mq_poll, +}; + +static struct raid_function_template leapioraid_raid_functions = { + .cookie = &leapioraid_driver_template, + .is_raid = leapioraid_scsihost_is_raid, + .get_resync = leapioraid_scsihost_get_resync, + .get_state = leapioraid_scsihost_get_state, +}; + +static int +leapioraid_scsihost_probe( + struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + + shost = scsi_host_alloc(&leapioraid_driver_template, + sizeof(struct LEAPIORAID_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct LEAPIORAID_ADAPTER)); + ioc->id = leapioraid_ids++; + sprintf(ioc->driver_name, "%s", LEAPIORAID_DRIVER_NAME); + + ioc->combined_reply_queue = 1; + ioc->nc_reply_index_count = 16; + ioc->multipath_on_hba = 1; + + ioc = leapioraid_shost_private(shost); + INIT_LIST_HEAD(&ioc->list); + spin_lock(&leapioraid_gioc_lock); + list_add_tail(&ioc->list, &leapioraid_ioc_list); + spin_unlock(&leapioraid_gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->ctl_tm_cb_idx = ctl_tm_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_tr_internal_cb_idx = tm_tr_internal_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = + &leapioraid_scsihost_flush_running_cmds; + ioc->open_pcie_trace = open_pcie_trace; + ioc->enable_sdev_max_qd = 0; + ioc->max_shutdown_latency = 6; + ioc->drv_support_bitmap |= 0x00000001; + ioc->drv_support_bitmap |= 0x00000002; + + mutex_init(&ioc->reset_in_progress_mutex); + mutex_init(&ioc->hostdiag_unlock_mutex); + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->scsih_q_internal_lock); + spin_lock_init(&ioc->hba_hot_unplug_lock); + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->port_table_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->enclosure_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->delayed_internal_tm_list); + INIT_LIST_HEAD(&ioc->scsih_q_intenal_cmds); + INIT_LIST_HEAD(&ioc->reply_queue_list); + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + shost->max_cmd_len = 32; + shost->max_lun = 8; + shost->transportt = leapioraid_transport_template; + shost->unique_id = ioc->id; + + ioc->drv_internal_flags |= LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ; + + ioc->disable_eedp_support = 1; + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%u", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = + alloc_ordered_workqueue(ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + shost->host_tagset = 0; + ioc->is_driver_loading = 1; + if ((leapioraid_base_attach(ioc))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + ioc->hide_drives = 0; + + shost->nr_hw_queues = 1; + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + + return 0; +out_add_shost_fail: + leapioraid_base_detach(ioc); +out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); +out_thread_fail: + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); + return rv; +} + +#ifdef CONFIG_PM +static int +leapioraid_scsihost_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state; + int rc; + + rc = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) { + dev_err(&pdev->dev, "unable to suspend device\n"); + return rc; + } + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + scsi_block_requests(shost); + device_state = pci_choose_state(pdev, state); + leapioraid_scsihost_ir_shutdown(ioc); + pr_info("%s pdev=0x%p, slot=%s, entering operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_save_state(pdev); + leapioraid_base_free_resources(ioc); + pci_set_power_state(pdev, device_state); + return 0; +} + +static int +leapioraid_scsihost_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state = pdev->current_state; + int r; + + r = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) { + dev_err(&pdev->dev, "unable to resume device\n"); + return r; + } + pr_info("%s pdev=0x%p, slot=%s, previous operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + ioc->pdev = pdev; + r = leapioraid_base_map_resources(ioc); + if (r) + return r; + pr_err("%s issuing hard reset as part of OS resume\n", + ioc->name); + leapioraid_base_hard_reset_handler(ioc, SOFT_RESET); + scsi_unblock_requests(shost); + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + return 0; +} +#endif + +static pci_ers_result_t +leapioraid_scsihost_pci_error_detected( + struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "device unavailable\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: detected callback, state(%d)!!\n", + ioc->name, state); + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + ioc->pci_error_recovery = 1; + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t +leapioraid_scsihost_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + int rc; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to perform slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: slot reset callback!!\n", + ioc->name); + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = leapioraid_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + pr_info("%s issuing hard reset as part of PCI slot reset\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_info("%s hard reset: %s\n", + ioc->name, (rc == 0) ? "success" : "failed"); + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +static void +leapioraid_scsihost_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to resume device\n"); + return; + } + pr_err("%s PCI error: resume callback!!\n", + ioc->name); + + pci_aer_clear_nonfatal_status(pdev); + + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +static pci_ers_result_t +leapioraid_scsihost_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to enable mmio\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pr_err("%s: PCI error: mmio enabled callback!!!\n", + ioc->name); + return PCI_ERS_RESULT_RECOVERED; +} + +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev) +{ + u8 ncq_prio_supp = 0; + + struct scsi_vpd *vpd; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (!vpd || vpd->len < 214) + goto out; + ncq_prio_supp = (vpd->data[213] >> 4) & 1; +out: + rcu_read_unlock(); + return ncq_prio_supp; +} + +static const struct pci_device_id leapioraid_pci_table[] = { + { 0x1556, 0x1111, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, leapioraid_pci_table); +static struct pci_error_handlers leapioraid_err_handler = { + .error_detected = leapioraid_scsihost_pci_error_detected, + .mmio_enabled = leapioraid_scsihost_pci_mmio_enabled, + .slot_reset = leapioraid_scsihost_pci_slot_reset, + .resume = leapioraid_scsihost_pci_resume, +}; + +static struct pci_driver leapioraid_driver = { + .name = LEAPIORAID_DRIVER_NAME, + .id_table = leapioraid_pci_table, + .probe = leapioraid_scsihost_probe, + .remove = leapioraid_scsihost_remove, + .shutdown = leapioraid_scsihost_shutdown, + .err_handler = &leapioraid_err_handler, +#ifdef CONFIG_PM + .suspend = leapioraid_scsihost_suspend, + .resume = leapioraid_scsihost_resume, +#endif +}; + +static int +leapioraid_scsihost_init(void) +{ + leapioraid_ids = 0; + leapioraid_base_initialize_callback_handler(); + + scsi_io_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_io_done); + tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_done); + base_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_base_done); + port_enable_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_port_enable_done); + transport_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_transport_done); + scsih_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_done); + config_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_config_done); + ctl_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_done); + ctl_tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_tm_done); + tm_tr_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_tr_complete); + tm_tr_volume_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_volume_tr_complete); + tm_tr_internal_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_internal_tr_complete); + tm_sas_control_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_sas_control_complete); + + return 0; +} + +static void +leapioraid_scsihost_exit(void) +{ + leapioraid_base_release_callback_handler(scsi_io_cb_idx); + leapioraid_base_release_callback_handler(tm_cb_idx); + leapioraid_base_release_callback_handler(base_cb_idx); + leapioraid_base_release_callback_handler(port_enable_cb_idx); + leapioraid_base_release_callback_handler(transport_cb_idx); + leapioraid_base_release_callback_handler(scsih_cb_idx); + leapioraid_base_release_callback_handler(config_cb_idx); + leapioraid_base_release_callback_handler(ctl_cb_idx); + leapioraid_base_release_callback_handler(ctl_tm_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_volume_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_internal_cb_idx); + leapioraid_base_release_callback_handler(tm_sas_control_cb_idx); + + raid_class_release(leapioraid_raid_template); + sas_release_transport(leapioraid_transport_template); +} + +static int __init leapioraid_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", LEAPIORAID_DRIVER_NAME, + LEAPIORAID_DRIVER_VERSION); + leapioraid_transport_template = + sas_attach_transport(&leapioraid_transport_functions); + + if (!leapioraid_transport_template) + return -ENODEV; + + leapioraid_raid_template = + raid_class_attach(&leapioraid_raid_functions); + if (!leapioraid_raid_template) { + sas_release_transport(leapioraid_transport_template); + return -ENODEV; + } + + error = leapioraid_scsihost_init(); + if (error) { + leapioraid_scsihost_exit(); + return error; + } + leapioraid_ctl_init(); + error = pci_register_driver(&leapioraid_driver); + if (error) + leapioraid_scsihost_exit(); + return error; +} + +static void __exit leapioraid_exit(void) +{ + pr_info("leapioraid_ids version %s unloading\n", + LEAPIORAID_DRIVER_VERSION); + leapioraid_ctl_exit(); + pci_unregister_driver(&leapioraid_driver); + leapioraid_scsihost_exit(); +} + +module_init(leapioraid_init); +module_exit(leapioraid_exit); diff --git a/drivers/scsi/leapioraid/leapioraid_transport.c b/drivers/scsi/leapioraid/leapioraid_transport.c new file mode 100644 index 0000000000000000000000000000000000000000..e7ff263a8b6e71e3bd0926faa0c1539162bc7541 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_transport.c @@ -0,0 +1,1926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" + +static +struct leapioraid_raid_sas_node *leapioraid_transport_sas_node_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return leapioraid_scsihost_expander_find_by_sas_address(ioc, + sas_address, + port); +} + +static inline u8 +leapioraid_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct leapioraid_hba_port *port = phy->hostdata; + + if (port) + port_id = port->port_id; + else + BUG(); + return port_id; +} + +static int +leapioraid_transport_find_parent_node( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + unsigned long flags; + struct leapioraid_hba_port *port = phy->hostdata; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (leapioraid_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + port) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; +} + +static u8 +leapioraid_transport_get_port_id_by_rphy(struct LEAPIORAID_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy( + ioc, rphy->identify.sas_address, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return port_id; +} + +static enum sas_linkrate +leapioraid_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case LEAPIORAID_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + default: + case LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +static int +leapioraid_transport_set_identify( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if ((ioc->shost_recovery && !ioc->is_driver_loading) + || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", + ioc->name, handle, + ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + identify->phy_identifier = sas_device_pg0.PhyNum; + switch (device_info & LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + return 0; +} + +u8 +leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->transport_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +struct leapioraid_rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +struct leapioraid_rep_manu_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +static int +leapioraid_transport_expander_report_manufacture( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_expander_device *edev, + u8 port_id) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_rep_manu_reply *manufacture_reply; + struct leapioraid_rep_manu_request *manufacture_request; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + data_out_sz = sizeof(struct leapioraid_rep_manu_request); + data_in_sz = sizeof(struct leapioraid_rep_manu_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + data_in_dma = data_out_dma + sizeof(struct leapioraid_rep_manu_request); + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = port_id; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + dtransportprintk(ioc, + pr_info("%s report_manufacture - send to sas_addr(0x%016llx)\n", + ioc->name, + (unsigned long long)sas_address)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, + pr_info("%s report_manufacture - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s report_manufacture - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_rep_manu_reply)) + goto out; + manufacture_reply = data_out + sizeof(struct leapioraid_rep_manu_request); + strscpy(edev->vendor_id, manufacture_reply->vendor_id, + sizeof(edev->vendor_id)); + strscpy(edev->product_id, manufacture_reply->product_id, + sizeof(edev->product_id)); + strscpy(edev->product_rev, manufacture_reply->product_rev, + sizeof(edev->product_rev)); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strscpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + sizeof(edev->component_vendor_id)); + tmp = (u8 *) &manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, pr_err( + "%s report_manufacture - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} +#endif + +static void +leapioraid_transport_delete_port(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + struct leapioraid_hba_port *port = leapioraid_port->hba_port; + enum sas_device_type device_type = + leapioraid_port->remote_identify.device_type; + +#if defined(LEAPIORAID_WIDE_PORT_API) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); +#endif + ioc->logging_level |= LEAPIORAID_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, sas_address, port); + ioc->logging_level &= ~LEAPIORAID_DEBUG_TRANSPORT; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +static void +leapioraid_transport_delete_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long)sas_address, leapioraid_phy->phy_id); + list_del(&leapioraid_phy->port_siblings); + leapioraid_port->num_phys--; + sas_port_delete_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 0; +} + +static void +leapioraid_transport_add_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, leapioraid_phy->phy_id); + list_add_tail(&leapioraid_phy->port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + sas_port_add_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; +} + +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 1) + return; + if (!port) + return; + list_for_each_entry(leapioraid_port, &sas_node->sas_port_list, + port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch == leapioraid_phy) + return; + } + leapioraid_transport_add_phy(ioc, leapioraid_port, leapioraid_phy); + return; + } +} +#endif + +void +leapioraid_transport_del_phy_from_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 0) + return; + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch != leapioraid_phy) + continue; +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->num_phys == 1 + && !ioc->shost_recovery) + leapioraid_transport_delete_port(ioc, leapioraid_port); + else + leapioraid_transport_delete_phy(ioc, leapioraid_port, + leapioraid_phy); +#else + leapioraid_transport_delete_port(ioc, leapioraid_port); +#endif + return; + } + } +} + +static void +leapioraid_transport_sanity_check( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, u64 sas_address, + struct leapioraid_hba_port *port) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address + || sas_node->phy[i].port != port) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + leapioraid_transport_del_phy_from_an_existing_port(ioc, + sas_node, + &sas_node->phy + [i]); + } +} + +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *hba_port) +{ + struct leapioraid_sas_phy *leapioraid_phy, *next; + struct leapioraid_sas_port *leapioraid_port; + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct sas_rphy *rphy; + struct leapioraid_sas_device *sas_device = NULL; + int i; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct leapioraid_virtual_phy *vphy = NULL; + + if (!hba_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + leapioraid_port = kzalloc(sizeof(struct leapioraid_sas_port), GFP_KERNEL); + if (!leapioraid_port) + return NULL; + INIT_LIST_HEAD(&leapioraid_port->port_list); + INIT_LIST_HEAD(&leapioraid_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address, + hba_port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_node) { + pr_err("%s %s: Could not find parent sas_address(0x%016llx)!\n", + ioc->name, + __func__, (unsigned long long)sas_address); + goto out_fail; + } + if ((leapioraid_transport_set_identify(ioc, handle, + &leapioraid_port->remote_identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_PHY_UNUSED) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + leapioraid_port->hba_port = hba_port; + leapioraid_transport_sanity_check(ioc, sas_node, + leapioraid_port->remote_identify.sas_address, + hba_port); + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + leapioraid_port->remote_identify.sas_address || + sas_node->phy[i].port != hba_port) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + vphy = leapioraid_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + } + } + if (!leapioraid_port->num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (!sas_device) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } +#if defined(LEAPIORAID_WIDE_PORT_API) + if (!sas_node->parent_dev) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + port = sas_port_alloc_num(sas_node->parent_dev); + if ((sas_port_add(port))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + list_for_each_entry(leapioraid_phy, &leapioraid_port->phy_list, + port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + sas_port_add_phy(port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; + leapioraid_phy->port = hba_port; + } + leapioraid_port->port = port; + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + sas_device->rphy = rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = sas_device->sas_address; + else + vphy->sas_address = sas_device->sas_address; + } + } else { + rphy = sas_expander_alloc(port, + leapioraid_port->remote_identify.device_type); + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + leapioraid_port->remote_identify.sas_address; + } +#else + leapioraid_phy = + list_entry(leapioraid_port->phy_list.next, struct leapioraid_sas_phy, + port_siblings); + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(leapioraid_phy->phy); + sas_device->rphy = rphy; + } else + rphy = sas_expander_alloc(leapioraid_phy->phy, + leapioraid_port->remote_identify.device_type); +#endif + rphy->identify = leapioraid_port->remote_identify; + if ((sas_rphy_add(rphy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + leapioraid_sas_device_put(sas_device); + } + dev_info(&rphy->dev, + "%s: added: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, handle, (unsigned long long) + leapioraid_port->remote_identify.sas_address); + leapioraid_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&leapioraid_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER || + leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER) + leapioraid_transport_expander_report_manufacture(ioc, + leapioraid_port->remote_identify.sas_address, + rphy_to_expander_device + (rphy), + hba_port->port_id); +#endif + return leapioraid_port; +out_fail: + list_for_each_entry_safe(leapioraid_phy, next, + &leapioraid_port->phy_list, port_siblings) + list_del(&leapioraid_phy->port_siblings); + kfree(leapioraid_port); + return NULL; +} + +void +leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port) +{ + int i; + unsigned long flags; + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_raid_sas_node *sas_node; + u8 found = 0; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct leapioraid_sas_phy *leapioraid_phy, *next_phy; +#endif + struct leapioraid_hba_port *hba_port, *hba_port_next = NULL; + struct leapioraid_virtual_phy *vphy, *vphy_next = NULL; + + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address_parent, + port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + found = 1; + list_del(&leapioraid_port->port_list); + goto out; + } +out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + pr_err( + "%s remove vphy entry: %p of port:%p,\n\t\t" + "from %d port's vphys list\n", + ioc->name, + vphy, + port, + port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + if (!port->vphys_mask && !port->sas_address) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + port, + port->port_id); + list_del(&port->list); + kfree(port); + } + } + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + if (hba_port->sas_address != sas_address) + continue; + if (!port->vphys_mask) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else { + pr_err( + "%s clearing sas_address from hba_port entry: %p\n\t\t" + "port: %d from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + port->sas_address = 0; + } + break; + } + } + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) { + memset(&sas_node->phy[i].remote_identify, 0, + sizeof(struct sas_identify)); + sas_node->phy[i].hba_vphy = 0; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + list_for_each_entry_safe(leapioraid_phy, next_phy, + &leapioraid_port->phy_list, port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + leapioraid_phy->phy_belongs_to_port = 0; + if (!ioc->remove_host) + sas_port_delete_phy(leapioraid_port->port, + leapioraid_phy->phy); + list_del(&leapioraid_phy->port_siblings); + } + if (!ioc->remove_host) + sas_port_delete(leapioraid_port->port); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#else + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->rphy->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); + if (!ioc->remove_host) + sas_rphy_delete(leapioraid_port->rphy); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#endif + kfree(leapioraid_port); +} + +int +leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(phy_pg0.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API_PLUS) + phy->local_attached = 1; +#endif +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +int +leapioraid_transport_add_expander_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +void +leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, + u8 link_rate, struct leapioraid_hba_port *port) +{ + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct leapioraid_sas_phy *leapioraid_phy; + struct leapioraid_hba_port *hba_port = NULL; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address(ioc, + sas_address, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + leapioraid_phy = &sas_node->phy[phy_number]; + leapioraid_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + leapioraid_transport_set_identify(ioc, handle, + &leapioraid_phy->remote_identify); +#if defined(LEAPIORAID_WIDE_PORT_API) + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << leapioraid_phy->phy_id); + } + } + leapioraid_transport_add_phy_to_an_existing_port(ioc, sas_node, + leapioraid_phy, + leapioraid_phy->remote_identify.sas_address, + port); +#endif + } else + memset(&leapioraid_phy->remote_identify, 0, sizeof(struct + sas_identify)); + if (leapioraid_phy->phy) + leapioraid_phy->phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate(link_rate); + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); +} + +static inline void *phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + + return leapioraid_shost_private(shost); +} + +static inline void *rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + + return leapioraid_shost_private(shost); +} + +struct leapioraid_phy_error_log_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +struct leapioraid_phy_error_log_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +static int +leapioraid_transport_get_expander_phy_error_log( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_error_log_request *phy_error_log_request; + struct leapioraid_phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_error_log_request) + + sizeof(struct leapioraid_phy_error_log_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_error_log_request), + data_out_dma + sizeof(struct leapioraid_phy_error_log_request), + sizeof(struct leapioraid_phy_error_log_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info("%s phy_error_log - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_error_log - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_error_log_reply)) + goto out; + phy_error_log_reply = data_out + + sizeof(struct leapioraid_phy_error_log_request); + dtransportprintk(ioc, pr_err( + "%s phy_error_log - function_result(%d)\n", + ioc->name, + phy_error_log_reply->function_result)); + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_error_log - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_get_linkerrors(struct sas_phy *phy) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasPhyP1_t phy_pg1; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_get_expander_phy_error_log(ioc, phy); + if ((leapioraid_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +static int +leapioraid_transport_get_enclosure_identifier( + struct sas_rphy *rphy, u64 *identifier) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + leapioraid_sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + rc = sas_device->slot; + leapioraid_sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +struct leapioraid_phy_control_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +struct leapioraid_phy_control_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; +}; + +#define LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET (0x01) +#define LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET (0x02) +#define LEAPIORAID_SMP_PHY_CONTROL_DISABLE (0x03) +static int +leapioraid_transport_expander_phy_control( + struct LEAPIORAID_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_control_request *phy_control_request; + struct leapioraid_phy_control_reply *phy_control_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_control_request) + + sizeof(struct leapioraid_phy_control_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_control_request), + data_out_dma + sizeof(struct leapioraid_phy_control_request), + sizeof(struct leapioraid_phy_control_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info( + "%s phy_control - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_control - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_control_reply)) + goto out; + phy_control_reply = data_out + + sizeof(struct leapioraid_phy_control_request); + dtransportprintk(ioc, pr_err( + "%s phy_control - function_result(%d)\n", + ioc->name, + phy_control_reply->function_result)); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_control - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIoUnitControlRep_t mpi_reply; + struct LeapioraidSasIoUnitControlReq_t mpi_request; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (hard_reset == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + memset(&mpi_request, 0, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + LEAPIORAID_SAS_OP_PHY_HARD_RESET : LEAPIORAID_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + if ((leapioraid_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + return 0; +} + +static int +leapioraid_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + int i, discovery_active; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (enable == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_DISABLE); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + pr_err( + "%s discovery is active on port = %d, phy = %d:\n\t\t" + "unable to enable/disable phys, try again later!\n", + ioc->name, + sas_iounit_pg0->PhyData[i].Port, + i); + discovery_active = 1; + } + } + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + leapioraid_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz); + if (enable) + leapioraid_transport_phy_reset(phy, 0); +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +static int +leapioraid_transport_phy_speed( + struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return leapioraid_transport_expander_phy_control(ioc, phy, + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + if (leapioraid_config_set_sas_iounit_pg1 + (ioc, &mpi_reply, sas_iounit_pg1, sz)) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + leapioraid_transport_phy_reset(phy, 0); + if (!leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } +out: + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_transport_map_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + return 0; +} + +static void +leapioraid_transport_unmap_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +static void +leapioraid_transport_smp_handler( + struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + int rc; + u16 smid; + u32 ioc_state; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + u16 wait_state_count; + unsigned int reslen = 0; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + rc = -EFAULT; + goto job_done; + } + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + goto job_done; + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + rc = -EAGAIN; + goto job_done; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + } + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + wait_state_count = 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap_in; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info( + "%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap_in; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_rphy( + ioc, rphy); + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); + dtransportprintk(ioc, pr_info( + "%s %s - sending smp request\n", ioc->name, + __func__)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s : timeout\n", __func__, ioc->name); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } + } + dtransportprintk(ioc, pr_info( + "%s %s - complete\n", ioc->name, __func__)); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID)) { + dtransportprintk(ioc, pr_info( + "%s %s - no reply\n", ioc->name, + __func__)); + rc = -ENXIO; + goto unmap_in; + } + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, + pr_info( + "%s %s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + if (addr_in) { + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + } + rc = 0; +unmap_in: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); +unmap_out: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); +job_done: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template leapioraid_transport_functions = { + .get_linkerrors = leapioraid_transport_get_linkerrors, + .get_enclosure_identifier = leapioraid_transport_get_enclosure_identifier, + .get_bay_identifier = leapioraid_transport_get_bay_identifier, + .phy_reset = leapioraid_transport_phy_reset, + .phy_enable = leapioraid_transport_phy_enable, + .set_phy_speed = leapioraid_transport_phy_speed, + .smp_handler = leapioraid_transport_smp_handler, +}; + +struct scsi_transport_template *leapioraid_transport_template; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8a83f3fc2b865e7c6a430168c86b2f5fb5c5822e..d4b97f0a50131abac54b0a8652932e66de4acb3a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3589,6 +3589,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); +#ifdef CONFIG_SUBARCH_C3B + if (smid == 0xffff) { + smid = d_val.u.low >> 16; + if (smid == 0xffff) + break; + } +#endif + cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 6f30988229692ccb12e60233ef9dbe8f9c36c1f8..5f5d9d663fef6443c0952a517ba085d62f1a18ee 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -133,6 +133,11 @@ config ARCH_TEGRA_234_SOC help Enable support for the NVIDIA Tegra234 SoC. +config ARCH_TEGRA_241_SOC + bool "NVIDIA Tegra241 SoC" + help + Enable support for the NVIDIA Tegra241 SoC. + endif endif diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c index cf6886f362d387154911960fb76ac45ad6ec5741..9cbc562ae7d37aa2ea25856b69040a303e08bba1 100644 --- a/drivers/soc/tegra/cbb/tegra194-cbb.c +++ b/drivers/soc/tegra/cbb/tegra194-cbb.c @@ -2293,7 +2293,7 @@ static int tegra194_cbb_probe(struct platform_device *pdev) return tegra_cbb_register(&cbb->base); } -static int tegra194_cbb_remove(struct platform_device *pdev) +static void tegra194_cbb_remove(struct platform_device *pdev) { struct tegra194_cbb *cbb = platform_get_drvdata(pdev); struct tegra_cbb *noc, *tmp; @@ -2311,8 +2311,6 @@ static int tegra194_cbb_remove(struct platform_device *pdev) } spin_unlock_irqrestore(&cbb_lock, flags); - - return 0; } static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev) @@ -2332,7 +2330,7 @@ static const struct dev_pm_ops tegra194_cbb_pm = { static struct platform_driver tegra194_cbb_driver = { .probe = tegra194_cbb_probe, - .remove = tegra194_cbb_remove, + .remove_new = tegra194_cbb_remove, .driver = { .name = "tegra194-cbb", .of_match_table = of_match_ptr(tegra194_cbb_match), diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a2c28f493a75e52a0e620dd5fa35c2636d40d4ae..b6bfd6729df3904adbe5affd1208e59f9a5608dc 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -3,11 +3,13 @@ * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. */ +#include #include #include #include #include #include +#include #include #include #include @@ -113,6 +115,28 @@ static void tegra_fuse_restore(void *base) fuse->clk = NULL; } +static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info) +{ + pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", + tegra_revision_name[tegra_sku_info->revision], + tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id, + tegra_sku_info->soc_process_id); + pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", + tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id); +} + +static int tegra_fuse_add_lookups(struct tegra_fuse *fuse) +{ + fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups), + fuse->soc->num_lookups, GFP_KERNEL); + if (!fuse->lookups) + return -ENOMEM; + + nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); + + return 0; +} + static int tegra_fuse_probe(struct platform_device *pdev) { void __iomem *base = fuse->base; @@ -130,15 +154,46 @@ static int tegra_fuse_probe(struct platform_device *pdev) return PTR_ERR(fuse->base); fuse->phys = res->start; - fuse->clk = devm_clk_get(&pdev->dev, "fuse"); - if (IS_ERR(fuse->clk)) { - if (PTR_ERR(fuse->clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to get FUSE clock: %ld", - PTR_ERR(fuse->clk)); + /* Initialize the soc data and lookups if using ACPI boot. */ + if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) { + u8 chip; - return PTR_ERR(fuse->clk); + tegra_acpi_init_apbmisc(); + + chip = tegra_get_chip_id(); + switch (chip) { +#if defined(CONFIG_ARCH_TEGRA_194_SOC) + case TEGRA194: + fuse->soc = &tegra194_fuse_soc; + break; +#endif +#if defined(CONFIG_ARCH_TEGRA_234_SOC) + case TEGRA234: + fuse->soc = &tegra234_fuse_soc; + break; +#endif +#if defined(CONFIG_ARCH_TEGRA_241_SOC) + case TEGRA241: + fuse->soc = &tegra241_fuse_soc; + break; +#endif + default: + return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip); + } + + fuse->soc->init(fuse); + tegra_fuse_print_sku_info(&tegra_sku_info); + tegra_soc_device_register(); + + err = tegra_fuse_add_lookups(fuse); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n"); } + fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse"); + if (IS_ERR(fuse->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n"); + platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; @@ -179,12 +234,8 @@ static int tegra_fuse_probe(struct platform_device *pdev) } fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse"); - if (IS_ERR(fuse->rst)) { - err = PTR_ERR(fuse->rst); - dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n", - fuse->rst); - return err; - } + if (IS_ERR(fuse->rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n"); /* * FUSE clock is enabled at a boot time, hence this resume/suspend @@ -262,10 +313,17 @@ static const struct dev_pm_ops tegra_fuse_pm = { SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume) }; +static const struct acpi_device_id tegra_fuse_acpi_match[] = { + { "NVDA200F" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match); + static struct platform_driver tegra_fuse_driver = { .driver = { .name = "tegra-fuse", .of_match_table = tegra_fuse_match, + .acpi_match_table = tegra_fuse_acpi_match, .pm = &tegra_fuse_pm, .suppress_bind_attrs = true, }, @@ -287,7 +345,16 @@ u32 __init tegra_fuse_read_early(unsigned int offset) int tegra_fuse_readl(unsigned long offset, u32 *value) { - if (!fuse->read || !fuse->clk) + if (!fuse->dev) + return -EPROBE_DEFER; + + /* + * Wait for fuse->clk to be initialized if device-tree boot is used. + */ + if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk) + return -EPROBE_DEFER; + + if (!fuse->read) return -EPROBE_DEFER; if (IS_ERR(fuse->clk)) @@ -343,7 +410,8 @@ const struct attribute_group tegra_soc_attr_group = { }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) static ssize_t platform_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -370,7 +438,7 @@ const struct attribute_group tegra194_soc_attr_group = { }; #endif -struct device * __init tegra_soc_device_register(void) +struct device *tegra_soc_device_register(void) { struct soc_device_attribute *attr; struct soc_device *dev; @@ -407,6 +475,7 @@ static int __init tegra_init_fuse(void) const struct of_device_id *match; struct device_node *np; struct resource regs; + int err; tegra_init_apbmisc(); @@ -497,22 +566,13 @@ static int __init tegra_init_fuse(void) fuse->soc->init(fuse); - pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", - tegra_revision_name[tegra_sku_info.revision], - tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id, - tegra_sku_info.soc_process_id); - pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", - tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); + tegra_fuse_print_sku_info(&tegra_sku_info); - if (fuse->soc->lookups) { - size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups; - - fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL); - if (fuse->lookups) - nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); - } + err = tegra_fuse_add_lookups(fuse); + if (err) + pr_err("failed to add FUSE lookups\n"); - return 0; + return err; } early_initcall(tegra_init_fuse); diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index e94d46372a6396d14ee5dab31f0aa378a18bc9e1..eb14e5ff5a0aa8b5023b742b14e99320b47b7084 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -38,7 +38,8 @@ defined(CONFIG_ARCH_TEGRA_210_SOC) || \ defined(CONFIG_ARCH_TEGRA_186_SOC) || \ defined(CONFIG_ARCH_TEGRA_194_SOC) || \ - defined(CONFIG_ARCH_TEGRA_234_SOC) + defined(CONFIG_ARCH_TEGRA_234_SOC) || \ + defined(CONFIG_ARCH_TEGRA_241_SOC) static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset) { if (WARN_ON(!fuse->base)) @@ -678,3 +679,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = { .clk_suspend_on = false, }; #endif + +#if defined(CONFIG_ARCH_TEGRA_241_SOC) +static const struct tegra_fuse_info tegra241_fuse_info = { + .read = tegra30_fuse_read, + .size = 0x16008, + .spare = 0xcf0, +}; + +static const struct nvmem_keepout tegra241_fuse_keepouts[] = { + { .start = 0xc, .end = 0x1600c } +}; + +const struct tegra_fuse_soc tegra241_fuse_soc = { + .init = tegra30_fuse_init, + .info = &tegra241_fuse_info, + .keepouts = tegra241_fuse_keepouts, + .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts), + .soc_attr_group = &tegra194_soc_attr_group, +}; +#endif diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index 90f23be738947a5b5ebd7e8dc03e01bdd5d6b434..9fee6ad6ad9e98852c9d47079cdf49433e55fdb7 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -69,6 +69,7 @@ struct tegra_fuse { void tegra_init_revision(void); void tegra_init_apbmisc(void); +void tegra_acpi_init_apbmisc(void); u32 __init tegra_fuse_read_spare(unsigned int spare); u32 __init tegra_fuse_read_early(unsigned int offset); @@ -123,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc; #endif #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) extern const struct attribute_group tegra194_soc_attr_group; #endif @@ -135,4 +137,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc; extern const struct tegra_fuse_soc tegra234_fuse_soc; #endif +#ifdef CONFIG_ARCH_TEGRA_241_SOC +extern const struct tegra_fuse_soc tegra241_fuse_soc; +#endif + #endif diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index da970f3dbf35620ac64f3d6255301ee4807a8f57..e2ca5d55fd31259452f444e7d846d9cc145f5e8c 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -3,9 +3,11 @@ * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. */ +#include #include #include #include +#include #include #include @@ -62,6 +64,7 @@ bool tegra_is_silicon(void) switch (tegra_get_chip_id()) { case TEGRA194: case TEGRA234: + case TEGRA241: case TEGRA264: if (tegra_get_platform() == 0) return true; @@ -160,9 +163,34 @@ void __init tegra_init_revision(void) tegra_sku_info.platform = tegra_get_platform(); } -void __init tegra_init_apbmisc(void) +static void tegra_init_apbmisc_resources(struct resource *apbmisc, + struct resource *straps) { void __iomem *strapping_base; + + apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc)); + if (apbmisc_base) + chipid = readl_relaxed(apbmisc_base + 4); + else + pr_err("failed to map APBMISC registers\n"); + + strapping_base = ioremap(straps->start, resource_size(straps)); + if (strapping_base) { + strapping = readl_relaxed(strapping_base); + iounmap(strapping_base); + } else { + pr_err("failed to map strapping options registers\n"); + } +} + +/** + * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers. + * + * This is called during early init as some of the old 32-bit ARM code needs + * information from the APBMISC registers very early during boot. + */ +void __init tegra_init_apbmisc(void) +{ struct resource apbmisc, straps; struct device_node *np; @@ -219,23 +247,73 @@ void __init tegra_init_apbmisc(void) } } - apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); - if (!apbmisc_base) { - pr_err("failed to map APBMISC registers\n"); - } else { - chipid = readl_relaxed(apbmisc_base + 4); + tegra_init_apbmisc_resources(&apbmisc, &straps); + long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); + +put: + of_node_put(np); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id apbmisc_acpi_match[] = { + { "NVDA2010" }, + { /* sentinel */ } +}; + +void tegra_acpi_init_apbmisc(void) +{ + struct resource *resources[2] = { NULL }; + struct resource_entry *rentry; + struct acpi_device *adev = NULL; + struct list_head resource_list; + int rcount = 0; + int ret; + + adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1); + if (!adev) + return; + + INIT_LIST_HEAD(&resource_list); + + ret = acpi_dev_get_memory_resources(adev, &resource_list); + if (ret < 0) { + pr_err("failed to get APBMISC memory resources"); + goto out_put_acpi_dev; } - strapping_base = ioremap(straps.start, resource_size(&straps)); - if (!strapping_base) { - pr_err("failed to map strapping options registers\n"); - } else { - strapping = readl_relaxed(strapping_base); - iounmap(strapping_base); + /* + * Get required memory resources. + * + * resources[0]: apbmisc. + * resources[1]: straps. + */ + resource_list_for_each_entry(rentry, &resource_list) { + if (rcount >= ARRAY_SIZE(resources)) + break; + + resources[rcount++] = rentry->res; } - long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); + if (!resources[0]) { + pr_err("failed to get APBMISC registers\n"); + goto out_free_resource_list; + } -put: - of_node_put(np); + if (!resources[1]) { + pr_err("failed to get strapping options registers\n"); + goto out_free_resource_list; + } + + tegra_init_apbmisc_resources(resources[0], resources[1]); + +out_free_resource_list: + acpi_dev_free_resource_list(&resource_list); + +out_put_acpi_dev: + acpi_dev_put(adev); +} +#else +void tegra_acpi_init_apbmisc(void) +{ } +#endif diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 162f52456f654e3f3d06ebc0fcc77a43f701ce76..a08c377933c5052f6a1a8081e05cb4905870d507 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -3,7 +3,7 @@ * drivers/soc/tegra/pmc.c * * Copyright (c) 2010 Google, Inc - * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved. * * Author: * Colin Cross @@ -384,6 +384,7 @@ struct tegra_pmc_soc { bool has_blink_output; bool has_usb_sleepwalk; bool supports_core_domain; + bool has_single_mmio_aperture; }; /** @@ -1393,13 +1394,6 @@ tegra_pmc_core_pd_set_performance_state(struct generic_pm_domain *genpd, return 0; } -static unsigned int -tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp) -{ - return dev_pm_opp_get_level(opp); -} - static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np) { struct generic_pm_domain *genpd; @@ -1412,7 +1406,6 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np) genpd->name = "core"; genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state; - genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state; err = devm_pm_opp_set_regulators(pmc->dev, rname); if (err) @@ -1445,7 +1438,7 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, struct device_node *parent) { struct of_phandle_args child_args, parent_args; - struct device_node *np, *child; + struct device_node *np; int err = 0; /* @@ -1464,12 +1457,10 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, if (!np) return 0; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { err = tegra_powergate_add(pmc, child); - if (err < 0) { - of_node_put(child); + if (err < 0) break; - } if (of_parse_phandle_with_args(child, "power-domains", "#power-domain-cells", @@ -1481,10 +1472,8 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, err = of_genpd_add_subdomain(&parent_args, &child_args); of_node_put(parent_args.np); - if (err) { - of_node_put(child); + if (err) break; - } } of_node_put(np); @@ -1785,30 +1774,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id) return TEGRA_IO_PAD_VOLTAGE_3V3; } -/** - * tegra_io_rail_power_on() - enable power to I/O rail - * @id: Tegra I/O pad ID for which to enable power - * - * See also: tegra_io_pad_power_enable() - */ -int tegra_io_rail_power_on(unsigned int id) -{ - return tegra_io_pad_power_enable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_on); - -/** - * tegra_io_rail_power_off() - disable power to I/O rail - * @id: Tegra I/O pad ID for which to disable power - * - * See also: tegra_io_pad_power_disable() - */ -int tegra_io_rail_power_off(unsigned int id) -{ - return tegra_io_pad_power_disable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_off); - #ifdef CONFIG_PM_SLEEP enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) { @@ -2917,31 +2882,29 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake"); - if (res) { - pmc->wake = devm_ioremap_resource(&pdev->dev, res); + if (pmc->soc->has_single_mmio_aperture) { + pmc->wake = base; + pmc->aotag = base; + pmc->scratch = base; + } else { + pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake"); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - } else { - pmc->wake = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag"); - if (res) { - pmc->aotag = devm_ioremap_resource(&pdev->dev, res); + pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag"); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); - } else { - pmc->aotag = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch"); - if (res) { - pmc->scratch = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmc->scratch)) - return PTR_ERR(pmc->scratch); - } else { - pmc->scratch = base; + /* "scratch" is an optional aperture */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "scratch"); + if (res) { + pmc->scratch = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->scratch)) + return PTR_ERR(pmc->scratch); + } else { + pmc->scratch = NULL; + } } pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk"); @@ -2953,12 +2916,15 @@ static int tegra_pmc_probe(struct platform_device *pdev) * PMC should be last resort for restarting since it soft-resets * CPU without resetting everything else. */ - err = devm_register_reboot_notifier(&pdev->dev, - &tegra_pmc_reboot_notifier); - if (err) { - dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", - err); - return err; + if (pmc->scratch) { + err = devm_register_reboot_notifier(&pdev->dev, + &tegra_pmc_reboot_notifier); + if (err) { + dev_err(&pdev->dev, + "unable to register reboot notifier, %d\n", + err); + return err; + } } err = devm_register_sys_off_handler(&pdev->dev, @@ -3332,6 +3298,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra30_powergates[] = { @@ -3393,6 +3360,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra114_powergates[] = { @@ -3450,6 +3418,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra124_powergates[] = { @@ -3594,6 +3563,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra210_powergates[] = { @@ -3757,6 +3727,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const struct tegra_io_pad_soc tegra186_io_pads[] = { @@ -3954,6 +3925,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra194_io_pads[] = { @@ -4094,6 +4066,7 @@ static const char * const tegra194_reset_sources[] = { }; static const struct tegra_wake_event tegra194_wake_events[] = { + TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA194_MAIN_GPIO(G, 4)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)), TEGRA_WAKE_IRQ("rtc", 73, 10), @@ -4139,6 +4112,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra234_io_pads[] = { @@ -4228,6 +4202,8 @@ static const char * const tegra234_reset_sources[] = { }; static const struct tegra_wake_event tegra234_wake_events[] = { + TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)), + TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA234_MAIN_GPIO(G, 4)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)), @@ -4267,6 +4243,7 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = { .pmc_clks_data = NULL, .num_pmc_clks = 0, .has_blink_output = false, + .has_single_mmio_aperture = false, }; static const struct of_device_id tegra_pmc_match[] = { diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ce0fd5df8e9ca471e67de3653091aace94f31ad..60826b7ed21eca3cc9c2234654195ceac3c27b5a 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1179,6 +1179,12 @@ config SPI_AMD # # Add new SPI master controllers in alphabetical order above this line # +config SPI_CHIP3 + tristate "Memory-mapped io interface driver for SUNWAY CHIP3 SPI core" + depends on UNCORE_XUELANG + help + general driver for SPI controller core from DesignWare + comment "SPI Multiplexer support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6af54842b9fa4f2b95355ada8418ad54173649b3..26bf16fcf890c229e60e7191a10f4c62ca505986 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o +obj-$(CONFIG_SPI_CHIP3) += spi-chip3.o spi-chip3-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-chip3-mmio.c b/drivers/spi/spi-chip3-mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..a907f13d4ae5cf6fd3047febd81ca2594320bb5f --- /dev/null +++ b/drivers/spi/spi-chip3-mmio.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory-mapped interface driver for SUNWAY CHIP3 SPI Core + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +#define DRIVER_NAME "sunway_chip3_spi" + +struct chip3_spi_mmio { + struct chip3_spi dws; + struct clk *clk; + void *priv; +}; + +static int chip3_spi_mmio_probe(struct platform_device *pdev) +{ + int (*init_func)(struct platform_device *pdev, + struct chip3_spi_mmio *dwsmmio); + struct chip3_spi_mmio *dwsmmio; + struct chip3_spi *dws; + struct resource *mem; + int ret; + int num_cs; + + dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct chip3_spi_mmio), + GFP_KERNEL); + if (!dwsmmio) + return -ENOMEM; + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dws->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dws->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(dws->regs); + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return dws->irq; /* -ENXIO */ + } + + dwsmmio->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dwsmmio->clk)) + return PTR_ERR(dwsmmio->clk); + ret = clk_prepare_enable(dwsmmio->clk); + if (ret) + return ret; + + dws->bus_num = pdev->id; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + device_property_read_u32(&pdev->dev, "reg-io-width", + &dws->reg_io_width); + + num_cs = 4; + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + dws->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < dws->num_cs; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } + + init_func = device_get_match_data(&pdev->dev); + if (init_func) { + ret = init_func(pdev, dwsmmio); + if (ret) + goto out; + } + + ret = chip3_spi_add_host(&pdev->dev, dws); + if (ret) + goto out; + + platform_set_drvdata(pdev, dwsmmio); + + return 0; +out: + clk_disable_unprepare(dwsmmio->clk); + return ret; +} + +static int chip3_spi_mmio_remove(struct platform_device *pdev) +{ + struct chip3_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + + chip3_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); + + return 0; +} + +static const struct of_device_id chip3_spi_mmio_of_match[] = { + { .compatible = "sunway,chip3-spi", }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, chip3_spi_mmio_of_match); + +static struct platform_driver chip3_spi_mmio_driver = { + .probe = chip3_spi_mmio_probe, + .remove = chip3_spi_mmio_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = chip3_spi_mmio_of_match, + }, +}; +module_platform_driver(chip3_spi_mmio_driver); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for Sunway CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.c b/drivers/spi/spi-chip3.c new file mode 100644 index 0000000000000000000000000000000000000000..8186c84eca8c3bdd411ff4dfd19fdc21f7b30a00 --- /dev/null +++ b/drivers/spi/spi-chip3.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SUNWAY CHIP3 SPI core controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +/* Slave spi_dev related */ +struct chip_data { + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + void (*cs_control)(u32 command); +}; + +static void chip3_spi_handle_err(struct spi_controller *master, + struct spi_message *msg) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + + spi_reset_chip(dws); +} + +static size_t chip3_spi_max_length(struct spi_device *spi) +{ + struct chip3_spi *dws = spi_controller_get_devdata(spi->master); + + return dws->fifo_len; +} + +static int chip3_spi_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + u16 clk_div; + u32 freq; + u32 speed_hz; + u32 status; + u32 len = 0; + int ret = 0; + int i = 0; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(m->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_RECEIVE); + + spi_enable_chip(dws, 1); + + list_for_each_entry(t, &m->transfers, transfer_list) { + len += t->len; + /* Judge if data is overflow */ + if (len > dws->fifo_len) { + pr_err("SPI transfer overflow.\n"); + m->actual_length = 0; + m->status = -EIO; + ret = -EIO; + goto way_out; + } + + if (t->tx_buf) + memcpy(&dws->buf[len], t->tx_buf, t->len); + else + memset(&dws->buf[len], 0, t->len); + } + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(m->spi->chip_select)); + + do { + status = chip3_readl(dws, CHIP3_SPI_SR); + } while (status & SR_BUSY); + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->rx_buf) { + for (i = 0; i < t->len; i++, t->rx_buf += 1) + *(u8 *)t->rx_buf = chip3_readl(dws, CHIP3_SPI_DR); + } else { + for (i = 0; i < t->len; i++) + chip3_readl(dws, CHIP3_SPI_DR); + } + } + + m->actual_length = len; + m->status = 0; + spi_finalize_current_message(master); + +way_out: + return ret; +} + +static int chip3_spi_adjust_mem_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + op->data.nbytes = min((size_t)op->data.nbytes, (dws->fifo_len - len)); + if (!op->data.nbytes) + return -EINVAL; + + return 0; +} + +static int chip3_spi_init_mem_buf(struct chip3_spi *dws, + const struct spi_mem_op *op) +{ + int ret = 0; + int i, j, len; + + /* Calculate the total length of the transfer. */ + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + /* Judge if data is overflow */ + if (len + op->data.nbytes > dws->fifo_len) { + ret = -EIO; + goto way_out; + } + + /* + * Collect the operation code, address and dummy bytes into the single + * buffer. If it's a transfer with data to be sent, also copy it into + * the single buffer. + */ + for (i = 0; i < sizeof(op->cmd.opcode); i++) + dws->buf[i] = op->cmd.opcode; + for (j = 0; j < op->addr.nbytes; i++, j++) + dws->buf[i] = op->addr.val >> (8 * (op->addr.nbytes - i)); + for (j = 0; j < op->dummy.nbytes; i++, j++) + dws->buf[i] = 0xff; + + if (op->data.dir == SPI_MEM_DATA_OUT) { + memcpy(&dws->buf[i], op->data.buf.out, op->data.nbytes); + len += op->data.nbytes; + } + + dws->tx_len = len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dws->rx = op->data.buf.in; + dws->rx_len = op->data.nbytes; + } else { + dws->rx = NULL; + dws->rx_len = 0; + } + +way_out: + return ret; +} + +static int chip3_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + u16 clk_div; + int ret = 0; + int i; + unsigned short value; + u32 freq; + u32 speed_hz; + + ret = chip3_spi_init_mem_buf(dws, op); + if (ret) + return ret; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + if ((dws->tx_len != 0) && (dws->rx_len != 0)) { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_EEPROM_READ); + chip3_writel(dws, CHIP3_SPI_CTRL1, (dws->rx_len - 1)); + } else { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_ONLY); + } + + spi_enable_chip(dws, 1); + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < dws->tx_len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(mem->spi->chip_select)); + + value = chip3_readl(dws, CHIP3_SPI_SR); + while (value & SR_BUSY) + value = chip3_readl(dws, CHIP3_SPI_SR); + + for (i = 0; i < dws->rx_len; dws->rx += dws->n_bytes, i++) + *(u8 *)dws->rx = chip3_readl(dws, CHIP3_SPI_DR); + + return ret; +} + +/* This may be called twice for each spi dev */ +static int chip3_spi_setup(struct spi_device *spi) +{ + struct chip3_spi_chip *chip_info = NULL; + struct chip_data *chip; + u32 poll_mode = 0; + struct device_node *np = spi->dev.of_node; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } else { + if (np) { + of_property_read_u32(np, "poll_mode", &poll_mode); + chip->poll_mode = poll_mode; + } + + } + + chip->tmode = SPI_TMOD_TR; + return 0; +} + +static void chip3_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct device *dev, struct chip3_spi *dws) +{ + spi_reset_chip(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + chip3_writel(dws, CHIP3_SPI_TXFLTR, fifo); + if (fifo != chip3_readl(dws, CHIP3_SPI_TXFLTR)) + break; + } + chip3_writel(dws, CHIP3_SPI_TXFLTR, 0); + + dws->fifo_len = (fifo == 1) ? 0 : fifo; + dev_info(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); + } +} + +static const struct spi_controller_mem_ops chip3_mem_ops = { + .adjust_op_size = chip3_spi_adjust_mem_op_size, + .exec_op = chip3_spi_exec_mem_op, +}; + + +int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws) +{ + struct spi_controller *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + dws->master = master; + dws->type = SSI_MOTO_SPI; + + spi_controller_set_devdata(master, dws); + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->setup = chip3_spi_setup; + master->cleanup = chip3_spi_cleanup; + master->transfer_one_message = chip3_spi_transfer_one_message; + master->handle_err = chip3_spi_handle_err; + master->max_speed_hz = dws->max_freq; + master->dev.of_node = dev->of_node; + master->flags = SPI_CONTROLLER_GPIO_SS; + master->max_transfer_size = chip3_spi_max_length; + master->max_message_size = chip3_spi_max_length; + + master->mem_ops = &chip3_mem_ops; + + /* Basic HW init */ + spi_hw_init(dev, dws); + + ret = devm_spi_register_controller(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + spi_enable_chip(dws, 0); + free_irq(dws->irq, master); + } + + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_add_host); + +void chip3_spi_remove_host(struct chip3_spi *dws) +{ + spi_shutdown_chip(dws); + + free_irq(dws->irq, dws->master); +} +EXPORT_SYMBOL_GPL(chip3_spi_remove_host); + +int chip3_spi_suspend_host(struct chip3_spi *dws) +{ + int ret; + + ret = spi_controller_suspend(dws->master); + if (ret) + return ret; + + spi_shutdown_chip(dws); + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_suspend_host); + +int chip3_spi_resume_host(struct chip3_spi *dws) +{ + int ret; + + spi_hw_init(&dws->master->dev, dws); + ret = spi_controller_resume(dws->master); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(chip3_spi_resume_host); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Driver for Sunway CHIP3 SPI controller core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.h b/drivers/spi/spi-chip3.h new file mode 100644 index 0000000000000000000000000000000000000000..88e49a9091a5d6bb8265fd0b8dc513d8e582b582 --- /dev/null +++ b/drivers/spi/spi-chip3.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CHIP3_SPI_HEADER_H +#define CHIP3_SPI_HEADER_H + +#include +#include +#include +#include + +/* Register offsets */ +#define CHIP3_SPI_CTRL0 (0x00<<7) +#define CHIP3_SPI_CTRL1 (0x04<<7) +#define CHIP3_SPI_SSIENR (0x08<<7) +#define CHIP3_SPI_MWCR (0x0c<<7) +#define CHIP3_SPI_SER (0x10<<7) +#define CHIP3_SPI_BAUDR (0x14<<7) +#define CHIP3_SPI_TXFLTR (0x18<<7) +#define CHIP3_SPI_RXFLTR (0x1c<<7) +#define CHIP3_SPI_TXFLR (0x20<<7) +#define CHIP3_SPI_RXFLR (0x24<<7) +#define CHIP3_SPI_SR (0x28<<7) +#define CHIP3_SPI_IMR (0x2c<<7) +#define CHIP3_SPI_ISR (0x30<<7) +#define CHIP3_SPI_RISR (0x34<<7) +#define CHIP3_SPI_TXOICR (0x38<<7) +#define CHIP3_SPI_RXOICR (0x3c<<7) +#define CHIP3_SPI_RXUICR (0x40<<7) +#define CHIP3_SPI_MSTICR (0x44<<7) +#define CHIP3_SPI_ICR (0x48<<7) +#define CHIP3_SPI_DMACR (0x4c<<7) +#define CHIP3_SPI_DMATDLR (0x50<<7) +#define CHIP3_SPI_DMARDLR (0x54<<7) +#define CHIP3_SPI_IDR (0x58<<7) +#define CHIP3_SPI_VERSION (0x5c<<7) +#define CHIP3_SPI_DR (0x60<<7) + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 + +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in ISR, IMR, RISR, 7 bits */ +#define SPI_INT_TXEI (1 << 0) +#define SPI_INT_TXOI (1 << 1) +#define SPI_INT_RXUI (1 << 2) +#define SPI_INT_RXOI (1 << 3) +#define SPI_INT_RXFI (1 << 4) +#define SPI_INT_MSTI (1 << 5) + +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + +/* TX RX interrupt level threshold, max can be 256 */ +#define SPI_INT_THRESHOLD 32 + +/* The depth of the FIFO buffer is 256, so the max transfer length is 256. */ +#define MAX_LEN 256 + +/* The mode of spi controller. */ +#define SPI_TRANSMIT_RECEIVE 0x0c7 +#define SPI_EEPROM_READ 0x3c7 +#define SPI_TRANSMIT_ONLY 0x1c7 + +enum chip3_ssi_type { + SSI_MOTO_SPI = 0, + SSI_TI_SSP, + SSI_NS_MICROWIRE, +}; + +struct chip3_spi; + +struct chip3_spi { + struct spi_controller *master; + enum chip3_ssi_type type; + + void __iomem *regs; + unsigned long paddr; + int irq; + u32 fifo_len; /* depth of the FIFO buffer */ + u32 max_freq; /* max bus freq supported */ + + u32 reg_io_width; /* DR I/O width in bytes */ + u16 bus_num; + u16 num_cs; /* supported slave numbers */ + void (*set_cs)(struct spi_device *spi, bool enable); + + /* Current message transfer state info */ + size_t len; + void *tx; + unsigned int tx_len; + void *rx; + unsigned int rx_len; + u8 n_bytes; /* current is a 1/2 bytes op */ + u32 current_freq; /* frequency in hz */ + + u8 buf[MAX_LEN]; + + /* Bus interface info */ + void *priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif +}; + +static inline u32 chip3_readl(struct chip3_spi *dws, u32 offset) +{ + return __raw_readl(dws->regs + offset); +} + +static inline u16 chip3_readw(struct chip3_spi *dws, u32 offset) +{ + return __raw_readw(dws->regs + offset); +} + +static inline void chip3_writel(struct chip3_spi *dws, u32 offset, u32 val) +{ + __raw_writel(val, dws->regs + offset); +} + +static inline void chip3_writew(struct chip3_spi *dws, u32 offset, u16 val) +{ + __raw_writew(val, dws->regs + offset); +} + +static inline u32 chip3_read_io_reg(struct chip3_spi *dws, u32 offset) +{ + switch (dws->reg_io_width) { + case 2: + return chip3_readw(dws, offset); + case 4: + default: + return chip3_readl(dws, offset); + } +} + +static inline void chip3_write_io_reg(struct chip3_spi *dws, u32 offset, u32 val) +{ + switch (dws->reg_io_width) { + case 2: + chip3_writew(dws, offset, val); + break; + case 4: + default: + chip3_writel(dws, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct chip3_spi *dws, int enable) +{ + chip3_writel(dws, CHIP3_SPI_SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct chip3_spi *dws, u16 div) +{ + chip3_writel(dws, CHIP3_SPI_BAUDR, div); +} + +/* Disable IRQ bits */ +static inline void spi_mask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) & ~mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* Enable IRQ bits */ +static inline void spi_umask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) | mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* + * This does disable the SPI controller, interrupts, and re-enable the + * controller back. Transmit and receive FIFO buffers are cleared when the + * device is disabled. + */ +static inline void spi_reset_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); +} + +static inline void spi_shutdown_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); +} + +/* + * Each SPI slave device to work with chip3_api controller should + * has such a structure claiming its working mode (poll or PIO/DMA), + * which can be save in the "controller_data" member of the + * struct spi_device. + */ +struct chip3_spi_chip { + u8 poll_mode; /* 1 for controller polling mode */ + u8 type; /* SPI/SSP/MicroWire */ + u8 chip_select; + void (*cs_control)(u32 command); +}; + +extern int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws); +extern void chip3_spi_remove_host(struct chip3_spi *dws); +extern int chip3_spi_suspend_host(struct chip3_spi *dws); +extern int chip3_spi_resume_host(struct chip3_spi *dws); + +/* platform related setup */ +extern int chip3_spi_mid_init(struct chip3_spi *dws); /* Intel MID platforms */ +#endif /* CHIP3_SPI_HEADER_H */ diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4ec99a55ac305a4829e894030226dacb404b0d2d..8ec4667c48d497d505a256494d573245a461b841 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -54,6 +54,9 @@ module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); static unsigned int tcm_loop_cmd_per_lun = 1024; module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); +static unsigned short tcm_loop_sg_tablesize = 256; +module_param_named(sg_tablesize, tcm_loop_sg_tablesize, ushort, 0644); + /* * Called from struct target_core_fabric_ops->check_stop_free() */ @@ -301,7 +304,6 @@ static const struct scsi_host_template tcm_loop_driver_template = { .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, .this_id = -1, - .sg_tablesize = 256, .max_sectors = 0xFFFF, .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, @@ -339,6 +341,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh->nr_hw_queues = tcm_loop_nr_hw_queues; sh->can_queue = tcm_loop_can_queue; sh->cmd_per_lun = tcm_loop_cmd_per_lun; + sh->sg_tablesize = tcm_loop_sg_tablesize; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 2e100b76914a06d37e757cc53fb3fadeefcb41cd..31d3bd1e3ebf8caea472012635a77e150c7b9f20 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -73,6 +75,7 @@ */ #define DATA_PAGES_PER_BLK_DEF 1 #define DATA_AREA_PAGES_DEF (256 * 1024) +#define ZC_DATA_AREA_PAGES_DEF (256 * 1024) #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) @@ -123,6 +126,8 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 +#define TCMU_DEV_BIT_READ_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA 6 unsigned long flags; struct uio_info uio_info; @@ -139,10 +144,12 @@ struct tcmu_dev { /* Must add data_off and mb_addr to get the address */ size_t data_off; int data_area_mb; + uint32_t zc_max_blocks; uint32_t max_blocks; size_t mmap_pages; struct mutex cmdr_lock; + struct rw_semaphore i_mmap_sem; struct list_head qfull_queue; struct list_head tmr_queue; @@ -153,6 +160,12 @@ struct tcmu_dev { uint32_t data_pages_per_blk; uint32_t data_blk_size; + uint32_t zc_dbi_max; + uint32_t zc_dbi_thresh; + unsigned long *zc_data_bitmap; + uint32_t read_zc_size; + uint32_t write_zc_size; + struct xarray commands; struct timer_list cmd_timer; @@ -178,6 +191,12 @@ struct tcmu_cmd { struct tcmu_dev *tcmu_dev; struct list_head queue_entry; + /* for zero_copy */ + struct mm_struct *vma_vm_mm; + struct vm_area_struct *vma; + struct iovec *iov; + int iov_cnt; + uint16_t cmd_id; /* Can't use se_cmd when cleaning up expired cmds, because if @@ -193,7 +212,11 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 +#define TCMU_CMD_BIT_ZEROCOPY 2 +#define TCMU_CMD_BIT_BYPASS_DATA_AREA 3 unsigned long flags; + + struct mutex cmd_lock; }; struct tcmu_tmr { @@ -497,10 +520,38 @@ static struct genl_family tcmu_genl_family __ro_after_init = { static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned long *data_bitmap; uint32_t i; + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + data_bitmap = udev->zc_data_bitmap; + else + data_bitmap = udev->data_bitmap; + for (i = 0; i < len; i++) - clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); + clear_bit(tcmu_cmd->dbi[i], data_bitmap); +} + +static inline int tcmu_get_zc_empty_block(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, + int prev_dbi, int *iov_cnt) +{ + int dbi; + + dbi = find_first_zero_bit(udev->zc_data_bitmap, udev->zc_dbi_thresh); + if (dbi == udev->zc_dbi_thresh) + return -1; + + if (dbi > udev->zc_dbi_max) + udev->zc_dbi_max = dbi; + + set_bit(dbi, udev->zc_data_bitmap); + tcmu_cmd_set_dbi(tcmu_cmd, dbi); + + if (dbi != prev_dbi + 1) + *iov_cnt += 1; + + return dbi; } static inline int tcmu_get_empty_block(struct tcmu_dev *udev, @@ -552,7 +603,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, } static int tcmu_get_empty_blocks(struct tcmu_dev *udev, - struct tcmu_cmd *tcmu_cmd, int length) + struct tcmu_cmd *tcmu_cmd, int length, + bool zero_copy) { /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; @@ -561,7 +613,10 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, for (; length > 0; length -= blk_size) { blk_data_len = min_t(uint32_t, length, blk_size); - dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + if (zero_copy) + dbi = tcmu_get_zc_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); + else + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, &iov_cnt); if (dbi < 0) return -1; @@ -569,8 +624,40 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, return iov_cnt; } +static void tcmu_cmd_zerocopy_unmap(struct tcmu_cmd *cmd) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + struct iovec *iov = cmd->iov; + unsigned long address; + int i; + + mm = cmd->vma_vm_mm; + vma = cmd->vma; + if (!mm) + return; + + if (mmget_not_zero(mm)) { + mmap_read_lock(mm); + for (i = 0; i < cmd->iov_cnt; i++) { + address = (unsigned long)iov->iov_base; + zap_page_range_single(vma, address, iov->iov_len, NULL); + iov++; + } + mmap_read_unlock(mm); + mmput(mm); + } + + cmd->vma_vm_mm = NULL; + cmd->vma = NULL; + mmdrop(mm); + kfree(cmd->iov); +} + static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + tcmu_cmd_zerocopy_unmap(tcmu_cmd); kfree(tcmu_cmd->dbi); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); } @@ -630,11 +717,67 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } +static void tcmu_set_cmd_bypass_data_area(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + /* + * Zero copy is map sg pages to userspace, and bypass data area + * is copy data between sg pages and userspace buffer, so they + * are completely different. + */ + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + return; + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + + if (se_cmd->data_direction == DMA_TO_DEVICE && + test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); +} + +static void tcmu_set_cmd_do_zero_copy(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + if ((se_cmd->se_cmd_flags & SCF_BIDI) || !se_cmd->data_length || + !IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) + return; + + if ((se_cmd->data_direction == DMA_FROM_DEVICE) && + (!udev->read_zc_size || + se_cmd->data_length < (udev->read_zc_size << 10))) + return; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (!udev->write_zc_size || + se_cmd->data_length < (udev->write_zc_size << 10))) + return; + + /* Now, check every sg pages is aligned. */ + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); +} + static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; + bool zero_copy; + bool bypass_data_area; tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); if (!tcmu_cmd) @@ -643,13 +786,24 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - - tcmu_cmd_set_block_cnts(tcmu_cmd); - tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), - GFP_NOIO); - if (!tcmu_cmd->dbi) { - kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); - return NULL; + mutex_init(&tcmu_cmd->cmd_lock); + + tcmu_set_cmd_do_zero_copy(tcmu_cmd); + tcmu_set_cmd_bypass_data_area(tcmu_cmd); + + zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + if (zero_copy || !bypass_data_area) { + tcmu_cmd_set_block_cnts(tcmu_cmd); + tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), + GFP_NOIO); + if (!tcmu_cmd->dbi) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + } else { + tcmu_cmd->dbi_cnt = 0; + tcmu_cmd->dbi = NULL; } return tcmu_cmd; @@ -852,37 +1006,51 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) * Called with ring lock held. */ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - int *iov_bidi_cnt) + int *iov_bidi_cnt, bool zero_copy) { int space, iov_cnt = 0, ret = 0; + unsigned long *data_bitmap; + uint32_t *dbi_thresh, max_blocks; - if (!cmd->dbi_cnt) + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) goto wr_iov_cnts; + if (zero_copy) { + data_bitmap = udev->zc_data_bitmap; + dbi_thresh = &udev->zc_dbi_thresh; + max_blocks = udev->zc_max_blocks; + } else { + data_bitmap = udev->data_bitmap; + dbi_thresh = &udev->dbi_thresh; + max_blocks = udev->max_blocks; + } + /* try to check and get the data blocks as needed */ - space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); + space = spc_bitmap_free(data_bitmap, *dbi_thresh); if (space < cmd->dbi_cnt) { - unsigned long blocks_left = - (udev->max_blocks - udev->dbi_thresh) + space; + unsigned long blocks_left = max_blocks - *dbi_thresh + space; if (blocks_left < cmd->dbi_cnt) { - pr_debug("no data space: only %lu available, but ask for %u\n", + pr_debug("no data space[%s]: only %lu available, but ask for %u\n", ++ zero_copy ? "zero copy" : "non zero copy", blocks_left * udev->data_blk_size, cmd->dbi_cnt * udev->data_blk_size); return -1; } - udev->dbi_thresh += cmd->dbi_cnt; - if (udev->dbi_thresh > udev->max_blocks) - udev->dbi_thresh = udev->max_blocks; + *dbi_thresh += cmd->dbi_cnt; + if (*dbi_thresh > max_blocks) + *dbi_thresh = max_blocks; } - iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length, + zero_copy); if (iov_cnt < 0) return -1; if (cmd->dbi_bidi_cnt) { - ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi, + zero_copy); if (ret < 0) return -1; } @@ -1023,6 +1191,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; + bool zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bool bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); *scsi_err = TCM_NO_SENSE; @@ -1046,7 +1216,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); + iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1095,16 +1265,18 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (se_cmd->data_direction == DMA_TO_DEVICE || - se_cmd->se_cmd_flags & SCF_BIDI) - scatter_data_area(udev, tcmu_cmd, &iov); - else - tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + if (zero_copy || !bypass_data_area) { + if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || + se_cmd->se_cmd_flags & SCF_BIDI) + scatter_data_area(udev, tcmu_cmd, &iov); + else + tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + } entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) && !bypass_data_area) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1113,6 +1285,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); entry->hdr.cmd_id = tcmu_cmd->cmd_id; + if (zero_copy) { + int i; + struct iovec *tiov; + + tiov = &entry->req.iov[0]; + for (i = 0; i < entry->req.iov_cnt; i++) { + tiov->iov_base = tiov->iov_base + + (TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT); + tiov++; + } + entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; + } + + if (bypass_data_area) + entry->hdr.kflags |= TCMU_KFLAG_BYPASS_DATA_AREA; tcmu_hdr_set_len(&entry->hdr.len_op, command_size); @@ -1368,6 +1555,15 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } + + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + goto done; + } + + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) + goto done; + if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ gather_data_area(udev, cmd, true, read_len); @@ -1522,11 +1718,15 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) if (!time_after_eq(jiffies, cmd->deadline)) return; + mutex_lock(&cmd->cmd_lock); + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) + tcmu_cmd_zerocopy_unmap(cmd); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; se_cmd->priv = NULL; cmd->se_cmd = NULL; + mutex_unlock(&cmd->cmd_lock); pr_debug("Timing out inflight cmd %u on dev %s.\n", cmd->cmd_id, cmd->tcmu_dev->name); @@ -1619,10 +1819,14 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->zc_max_blocks = ZC_DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; udev->cmdr_size = CMDR_SIZE_DEF; udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); mutex_init(&udev->cmdr_lock); + udev->read_zc_size = 0; + udev->write_zc_size = 0; + init_rwsem(&udev->i_mmap_sem); INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -1740,6 +1944,7 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_blocks_release(udev, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); + bitmap_free(udev->zc_data_bitmap); mutex_unlock(&udev->cmdr_lock); pr_debug("dev_kref_release\n"); @@ -1840,12 +2045,12 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) { struct page *page; - mutex_lock(&udev->cmdr_lock); + down_read(&udev->i_mmap_sem); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { get_page(page); lock_page(page); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return page; } @@ -1855,7 +2060,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) */ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", dpi, udev->name); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return NULL; } @@ -1928,7 +2133,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); vma->vm_ops = &tcmu_vm_ops; vma->vm_private_data = udev; @@ -1942,6 +2147,109 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) return 0; } +#define TCMU_ZEROCOPY_PAGE_BATCH 32 + +static inline int tcmu_zerocopy_one_seg(struct iovec *iov, + struct vm_area_struct *vma, + struct sg_page_iter *sgiter) +{ + struct page *pages[TCMU_ZEROCOPY_PAGE_BATCH]; + unsigned int len = iov->iov_len; + unsigned long address = (unsigned long)iov->iov_base; + unsigned long pages_remaining, pg_index = 0; + struct page *page; + int ret; + + while (len > 0) { + __sg_page_iter_next(sgiter); + page = sg_page_iter_page(sgiter); + pages[pg_index++] = page; + len -= PAGE_SIZE; + if (pg_index == TCMU_ZEROCOPY_PAGE_BATCH || !len) { + pages_remaining = pg_index; + ret = vm_insert_pages_mkspecial(vma, address, pages, + &pages_remaining); + if (ret < 0) { + pr_err("vm insert pages failed, error code: %d\n", ret); + return ret; + } + address = address + pg_index * PAGE_SIZE; + pg_index = 0; + } + } + + return 0; +} + +long tcmu_ioctl_cmd_zerocopy(struct tcmu_dev *udev, unsigned long arg) +{ + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct scatterlist *data_sg; + unsigned int data_nents; + struct tcmu_cmd_zerocopy zc; + struct iovec *iov, *tiov; + struct sg_page_iter sgiter; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int i, ret = 0; + + if (copy_from_user(&zc, (struct tcmu_cmd_zerocopy __user *)arg, sizeof(zc))) + return -EFAULT; + + if (zc.iov_cnt <= 0) + return -EINVAL; + + iov = kmalloc_array(zc.iov_cnt, sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return -ENOMEM; + if (copy_from_user(iov, zc.iov, sizeof(struct iovec) * zc.iov_cnt)) { + kfree(iov); + return -EFAULT; + } + + mutex_lock(&udev->cmdr_lock); + mmap_read_lock(mm); + cmd = xa_load(&udev->commands, zc.cmd_id); + if (!cmd) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: cmd_id %d not found\n", zc.cmd_id); + goto out; + } + se_cmd = cmd->se_cmd; + + vma = find_vma(current->mm, (unsigned long)iov->iov_base); + if (!vma) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: invalid iov_base\n"); + goto out; + } + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + __sg_page_iter_start(&sgiter, data_sg, data_nents, 0); + tiov = iov; + for (i = 0; i < zc.iov_cnt; i++) { + ret = tcmu_zerocopy_one_seg(tiov, vma, &sgiter); + if (ret < 0) { + kfree(iov); + goto out; + } + tiov++; + } + + cmd->iov = iov; + cmd->iov_cnt = zc.iov_cnt; + cmd->vma_vm_mm = vma->vm_mm; + cmd->vma = vma; + mmgrab(cmd->vma_vm_mm); +out: + mmap_read_unlock(mm); + mutex_unlock(&udev->cmdr_lock); + return ret; +} + static int tcmu_open(struct uio_info *info, struct inode *inode) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); @@ -2000,6 +2308,108 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) return 0; } +static long tcmu_do_copy_data(struct tcmu_cmd *tcmu_cmd, + struct iovec __user *uiovec, + unsigned int vcnt, + bool is_copy_to_sgl) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg, *sg; + int i; + unsigned int data_nents; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } else { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } + + ret = import_iovec(is_copy_to_sgl ? ITER_SOURCE : ITER_DEST, + uiovec, vcnt, ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) { + pr_err("import iovec failed.\n"); + return -EFAULT; + } + + for_each_sg(data_sg, sg, data_nents, i) { + if (is_copy_to_sgl) + ret = copy_page_from_iter(sg_page(sg), sg->offset, sg->length, &iter); + else + ret = copy_page_to_iter(sg_page(sg), sg->offset, sg->length, &iter); + if (ret < 0) { + pr_err("copy failed.\n"); + break; + } + } + kfree(iov); + return ret < 0 ? -EFAULT : 0; +} + +static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, + unsigned long arg, + bool is_copy_to_sgl) +{ + struct tcmu_data_xfer __user *uxfer = (struct tcmu_data_xfer __user *)arg; + struct tcmu_data_xfer xfer; + struct tcmu_cmd *tcmu_cmd; + long ret; + + if (copy_from_user(&xfer, uxfer, sizeof(xfer))) + return -EFAULT; + + tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); + if (!tcmu_cmd) { + pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + return -EFAULT; + } + + mutex_lock(&tcmu_cmd->cmd_lock); + if (!test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags)) { + ret = -EINVAL; + goto out; + } + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { + pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); + ret = -EFAULT; + goto out; + } + + ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, + xfer.iov_cnt, is_copy_to_sgl); +out: + mutex_unlock(&tcmu_cmd->cmd_lock); + return ret; +} + +static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long arg) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + long ret; + + switch (cmd) { + case TCMU_IOCTL_CMD_COPY_TO_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, true); + break; + case TCMU_IOCTL_CMD_COPY_FROM_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, false); + break; + case TCMU_IOCTL_CMD_ZEROCOPY: + ret = tcmu_ioctl_cmd_zerocopy(udev, arg); + break; + default: + ret = -EINVAL; + } + return ret; +} + static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) { struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; @@ -2200,6 +2610,7 @@ static int tcmu_configure_device(struct se_device *dev) struct uio_info *info; struct tcmu_mailbox *mb; size_t data_size; + size_t zc_data_size; int ret = 0; ret = tcmu_update_uio_info(udev); @@ -2210,10 +2621,11 @@ static int tcmu_configure_device(struct se_device *dev) mutex_lock(&udev->cmdr_lock); udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); + udev->zc_data_bitmap = bitmap_zalloc(udev->zc_max_blocks, GFP_KERNEL); mutex_unlock(&udev->cmdr_lock); - if (!udev->data_bitmap) { + if (!udev->data_bitmap || !udev->zc_data_bitmap) { ret = -ENOMEM; - goto err_bitmap_alloc; + goto err_vzalloc; } mb = vzalloc(udev->cmdr_size + CMDR_OFF); @@ -2227,9 +2639,12 @@ static int tcmu_configure_device(struct se_device *dev) udev->cmdr = (void *)mb + CMDR_OFF; udev->data_off = udev->cmdr_size + CMDR_OFF; data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; - udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; + zc_data_size = (udev->zc_max_blocks * udev->data_pages_per_blk) << PAGE_SHIFT; + udev->mmap_pages = (data_size + zc_data_size + udev->cmdr_size + + CMDR_OFF) >> PAGE_SHIFT; udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ + udev->zc_dbi_thresh = 0; /* Default in Idle state */ /* Initialise the mailbox of the ring buffer */ mb->version = TCMU_MAILBOX_VERSION; @@ -2247,7 +2662,8 @@ static int tcmu_configure_device(struct se_device *dev) info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; + info->mem[0].size = data_size + zc_data_size + + udev->cmdr_size + CMDR_OFF; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -2256,6 +2672,7 @@ static int tcmu_configure_device(struct se_device *dev) info->mmap = tcmu_mmap; info->open = tcmu_open; info->release = tcmu_release; + info->ioctl = tcmu_ioctl; ret = uio_register_device(tcmu_root_device, info); if (ret) @@ -2302,7 +2719,8 @@ static int tcmu_configure_device(struct se_device *dev) err_vzalloc: bitmap_free(udev->data_bitmap); udev->data_bitmap = NULL; -err_bitmap_alloc: + kfree(udev->zc_data_bitmap); + udev->zc_data_bitmap = NULL; kfree(info->name); info->name = NULL; @@ -3137,6 +3555,130 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); +static ssize_t tcmu_read_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_read_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, read_bypass_data_area); + +static ssize_t tcmu_write_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_write_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, write_bypass_data_area); + +static ssize_t tcmu_read_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->read_zc_size); +} + +static ssize_t tcmu_read_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t read_zc_size; + int ret; + + ret = kstrtou32(page, 0, &read_zc_size); + if (ret < 0) + return ret; + + udev->read_zc_size = read_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, read_zc_size); + +static ssize_t tcmu_write_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->write_zc_size); +} + +static ssize_t tcmu_write_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t write_zc_size; + int ret; + + ret = kstrtou32(page, 0, &write_zc_size); + if (ret < 0) + return ret; + + udev->write_zc_size = write_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, write_zc_size); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, @@ -3148,6 +3690,10 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, + &tcmu_attr_read_bypass_data_area, + &tcmu_attr_write_bypass_data_area, + &tcmu_attr_read_zc_size, + &tcmu_attr_write_zc_size, NULL, }; @@ -3212,6 +3758,7 @@ static void find_free_blocks(void) continue; } + down_write(&udev->i_mmap_sem); end = udev->dbi_max + 1; block = find_last_bit(udev->data_bitmap, end); if (block == udev->dbi_max) { @@ -3219,6 +3766,7 @@ static void find_free_blocks(void) * The last bit is dbi_max, so it is not possible * reclaim any blocks. */ + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); continue; } else if (block == end) { @@ -3246,6 +3794,7 @@ static void find_free_blocks(void) off = udev->data_off + (loff_t)start * udev->data_blk_size; unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); total_pages_freed += pages_freed; diff --git a/drivers/tty/serial/8250/8250_sunway.c b/drivers/tty/serial/8250/8250_sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..9e3db232c8325528c24d76be8138d5e83bb555ae --- /dev/null +++ b/drivers/tty/serial/8250/8250_sunway.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Synopsys SUNWAY 8250 driver. + * + * Copyright 2011 Picochip, Jamie Iles. + * Copyright 2013 Intel Corporation + * + * The Synopsys SUNWAY 8250 has an extra feature whereby it detects if the + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +/* Offsets for the DesignWare specific registers */ +#define SUNWAY_UART_USR 0x1f /* UART Status Register */ +#define SUNWAY_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define SUNWAY_UART_CPR 0xf4 /* Component Parameter Register */ +#define SUNWAY_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define SUNWAY_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define SUNWAY_UART_CPR_AFCE_MODE (1 << 4) +#define SUNWAY_UART_CPR_THRE_MODE (1 << 5) +#define SUNWAY_UART_CPR_SIR_MODE (1 << 6) +#define SUNWAY_UART_CPR_SIR_LP_MODE (1 << 7) +#define SUNWAY_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define SUNWAY_UART_CPR_FIFO_ACCESS (1 << 9) +#define SUNWAY_UART_CPR_FIFO_STAT (1 << 10) +#define SUNWAY_UART_CPR_SHADOW (1 << 11) +#define SUNWAY_UART_CPR_ENCODED_PARMS (1 << 12) +#define SUNWAY_UART_CPR_DMA_EXTRA (1 << 13) +#define SUNWAY_UART_CPR_FIFO_MODE (0xff << 16) +/* Helper for fifo size calculation */ +#define SUNWAY_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +/* DesignWare specific register fields */ +#define SUNWAY_UART_MCR_SIRE BIT(6) + +struct sunway8250_data { + u8 usr_reg; + u8 dlf_size; + int line; + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct uart_8250_dma dma; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline u32 sunway8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void sunway8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +static inline int sunway8250_modify_msr(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Override any modem control signals if needed */ + if (offset == UART_MSR) { + value |= d->msr_mask_on; + value &= ~d->msr_mask_off; + } + + return value; +} + +static void sunway8250_force_idle(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + + serial8250_clear_and_reinit_fifos(up); + (void)p->serial_in(p, UART_RX); +} + +static void sunway8250_check_lcr(struct uart_port *p, int value) +{ + void __iomem *offset = p->membase + (UART_LCR << p->regshift); + int tries = 1000; + + /* Make sure LCR write wasn't ignored */ + while (tries--) { + unsigned int lcr = p->serial_in(p, UART_LCR); + + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) + return; + + sunway8250_force_idle(p); + +#ifdef CONFIG_64BIT + if (p->type == PORT_OCTEON) { + __raw_writeq(value & 0xff, offset); + continue; + } +#endif + if (p->iotype == UPIO_MEM32) + writel(value, offset); + else if (p->iotype == UPIO_MEM32BE) + iowrite32be(value, offset); + else + writeb(value, offset); + } + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ +} + +/* Returns once the transmitter is empty or we run out of retries */ +static void sunway8250_tx_wait_empty(struct uart_port *p) +{ + unsigned int tries = 20000; + unsigned int delay_threshold = tries - 1000; + unsigned int lsr; + + while (tries--) { + lsr = readb(p->membase + (UART_LSR << p->regshift)); + if (lsr & UART_LSR_TEMT) + break; + + /* + * The device is first given a chance to empty without delay, + * to avoid slowdowns at high bitrates. If after 1000 tries + * the buffer has still not emptied, allow more time for low- + * speed links. + */ + if (tries < delay_threshold) + udelay(1); + } +} + +static void sunway8250_serial_out38x(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Allow the TX to drain before we reconfigure */ + if (offset == UART_LCR) + sunway8250_tx_wait_empty(p); + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + + +static void sunway8250_serial_out(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in(struct uart_port *p, int offset) +{ + unsigned int value = readb(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +#ifdef CONFIG_64BIT +static unsigned int sunway8250_serial_inq(struct uart_port *p, int offset) +{ + unsigned int value; + + value = (u8)__raw_readq(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_outq(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + value &= 0xff; + __raw_writeq(value, p->membase + (offset << p->regshift)); + /* Read back to ensure register write ordering. */ + __raw_readq(p->membase + (UART_LCR << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} +#endif /* CONFIG_64BIT */ + +static void sunway8250_serial_out32(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writel(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32(struct uart_port *p, int offset) +{ + unsigned int value = readl(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_out32be(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + iowrite32be(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32be(struct uart_port *p, int offset) +{ + unsigned int value = ioread32be(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + + +static int sunway8250_handle_irq(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + struct sunway8250_data *d = p->private_data; + unsigned int iir = p->serial_in(p, UART_IIR); + unsigned int status; + unsigned long flags; + + /* + * There are ways to get Designware-based UARTs into a state where + * they are asserting UART_IIR_RX_TIMEOUT but there is no actual + * data available. If we see such a case then we'll do a bogus + * read. If we don't do this then the "RX TIMEOUT" interrupt will + * fire forever. + * + * This problem has only been observed so far when not in DMA mode + * so we limit the workaround only to non-DMA mode. + */ + if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) { + spin_lock_irqsave(&p->lock, flags); + status = p->serial_in(p, UART_LSR); + + if (!(status & (UART_LSR_DR | UART_LSR_BI))) + (void) p->serial_in(p, UART_RX); + + spin_unlock_irqrestore(&p->lock, flags); + } + + if (serial8250_handle_irq(p, iir)) + return 1; + + if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* Clear the USR */ + (void)p->serial_in(p, d->usr_reg); + + return 1; + } + + return 0; +} + +static void +sunway8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + pm_runtime_put_sync_suspend(port->dev); +} + +static void sunway8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct sunway8250_data *d = p->private_data; + long rate; + int ret; + + if (IS_ERR(d->clk)) + goto out; + + clk_disable_unprepare(d->clk); + rate = clk_round_rate(d->clk, baud * 16); + if (rate < 0) + ret = rate; + else if (rate == 0) + ret = -ENOENT; + else + ret = clk_set_rate(d->clk, rate); + clk_prepare_enable(d->clk); + + if (!ret) + p->uartclk = rate; + +out: + p->status &= ~UPSTAT_AUTOCTS; + if (termios->c_cflag & CRTSCTS) + p->status |= UPSTAT_AUTOCTS; + + serial8250_do_set_termios(p, termios, old); +} + +static void sunway8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= SUNWAY_UART_MCR_SIRE; + else + mcr &= ~SUNWAY_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + +/* + * sunway8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels + * assigned to the UART. + * + * REVISIT: This is a work around for limitation in the DMA Engine API. Once the + * core problem is fixed, this function is no longer needed. + */ +static bool sunway8250_fallback_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} + +static bool sunway8250_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int sunway8250_get_divisor(struct uart_port *p, + unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct sunway8250_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void sunway8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + sunway8250_writel_ext(p, SUNWAY_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +static void sunway8250_quirks(struct uart_port *p, struct sunway8250_data *data) +{ + if (p->dev->of_node) { + struct device_node *np = p->dev->of_node; + int id; + + /* get index of serial line, if found in DT aliases */ + id = of_alias_get_id(np, "serial"); + if (id >= 0) + p->line = id; +#ifdef CONFIG_64BIT + if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) { + p->serial_in = sunway8250_serial_inq; + p->serial_out = sunway8250_serial_outq; + p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; + p->type = PORT_OCTEON; + data->usr_reg = 0x27; + data->skip_autocfg = true; + } +#endif + if (of_device_is_big_endian(p->dev->of_node)) { + p->iotype = UPIO_MEM32BE; + p->serial_in = sunway8250_serial_in32be; + p->serial_out = sunway8250_serial_out32be; + } + if (of_device_is_compatible(np, "marvell,armada-38x-uart")) + p->serial_out = sunway8250_serial_out38x; + + } else if (acpi_dev_present("APMC0D08", NULL, -1)) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = sunway8250_serial_in32; + data->uart_16550_compatible = true; + } + + /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->dma.rx_param = p->dev->parent; + data->dma.tx_param = p->dev->parent; + data->dma.fn = sunway8250_idma_filter; + } +} + +static void sunway8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = sunway8250_readl_ext(p, SUNWAY_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + sunway8250_writel_ext(p, SUNWAY_UART_DLF, ~0U); + reg = sunway8250_readl_ext(p, SUNWAY_UART_DLF); + sunway8250_writel_ext(p, SUNWAY_UART_DLF, 0); + + if (reg) { + struct sunway8250_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = sunway8250_get_divisor; + p->set_divisor = sunway8250_set_divisor; + } + + reg = sunway8250_readl_ext(p, SUNWAY_UART_CPR); + if (!reg) + return; + + /* Select the type based on fifo */ + if (reg & SUNWAY_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = SUNWAY_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & SUNWAY_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & SUNWAY_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} + +static int sunway8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct uart_port *p = &uart.port; + struct device *dev = &pdev->dev; + struct sunway8250_data *data; + int err; + u32 val; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "cannot get irq\n"); + irq = 0; // Set serial poll mode + } + + spin_lock_init(&p->lock); + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = sunway8250_handle_irq; + p->pm = sunway8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; + p->iotype = UPIO_MEM; + p->serial_in = sunway8250_serial_in; + p->serial_out = sunway8250_serial_out; + p->set_ldisc = sunway8250_set_ldisc; + p->set_termios = sunway8250_set_termios; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dma.fn = sunway8250_fallback_dma_filter; + data->usr_reg = SUNWAY_UART_USR; + p->private_data = data; + + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + + err = device_property_read_u32(dev, "reg-shift", &val); + if (!err) + p->regshift = val; + + err = device_property_read_u32(dev, "reg-io-width", &val); + if (!err && val == 4) { + p->iotype = UPIO_MEM32; + p->serial_in = sunway8250_serial_in32; + p->serial_out = sunway8250_serial_out32; + } + + if (device_property_read_bool(dev, "dcd-override")) { + /* Always report DCD as active */ + data->msr_mask_on |= UART_MSR_DCD; + data->msr_mask_off |= UART_MSR_DDCD; + } + + if (device_property_read_bool(dev, "dsr-override")) { + /* Always report DSR as active */ + data->msr_mask_on |= UART_MSR_DSR; + data->msr_mask_off |= UART_MSR_DDSR; + } + + if (device_property_read_bool(dev, "cts-override")) { + /* Always report CTS as active */ + data->msr_mask_on |= UART_MSR_CTS; + data->msr_mask_off |= UART_MSR_DCTS; + } + + if (device_property_read_bool(dev, "ri-override")) { + /* Always report Ring indicator as inactive */ + data->msr_mask_off |= UART_MSR_RI; + data->msr_mask_off |= UART_MSR_TERI; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &p->uartclk); + + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get(dev, "baudclk"); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER) + data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR_OR_NULL(data->clk)) { + err = clk_prepare_enable(data->clk); + if (err) + dev_warn(dev, "could not enable optional baudclk: %d\n", + err); + else + p->uartclk = clk_get_rate(data->clk); + } + + /* If no clock rate is defined, fail. */ + if (!p->uartclk) { + dev_err(dev, "clock rate not defined\n"); + err = -EINVAL; + goto err_clk; + } + + data->pclk = devm_clk_get(dev, "apb_pclk"); + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_clk; + } + if (!IS_ERR(data->pclk)) { + err = clk_prepare_enable(data->pclk); + if (err) { + dev_err(dev, "could not enable apb_pclk\n"); + goto err_clk; + } + } + + data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(data->rst)) { + err = PTR_ERR(data->rst); + goto err_pclk; + } + reset_control_deassert(data->rst); + + sunway8250_quirks(p, data); + + /* If the Busy Functionality is not implemented, don't handle it */ + if (data->uart_16550_compatible) + p->handle_irq = NULL; + + if (!data->skip_autocfg) + sunway8250_setup_port(p); + + /* If we have a valid fifosize, try hooking up DMA */ + if (p->fifosize) { + data->dma.rxconf.src_maxburst = p->fifosize / 4; + data->dma.txconf.dst_maxburst = p->fifosize / 4; + uart.dma = &data->dma; + } + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto err_reset; + } + + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +err_reset: + reset_control_assert(data->rst); + +err_pclk: + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + +err_clk: + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + return err; +} + +static int sunway8250_remove(struct platform_device *pdev) +{ + struct sunway8250_data *data = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + serial8250_unregister_port(data->line); + + reset_control_assert(data->rst); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway8250_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int sunway8250_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int sunway8250_runtime_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + return 0; +} + +static int sunway8250_runtime_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); + + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + + return 0; +} +#endif + +static const struct dev_pm_ops sunway8250_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunway8250_suspend, sunway8250_resume) + SET_RUNTIME_PM_OPS(sunway8250_runtime_suspend, sunway8250_runtime_resume, NULL) +}; + +static const struct of_device_id sunway8250_of_match[] = { + { .compatible = "sw6,sunway-apb-uart" }, + { .compatible = "cavium,octeon-3860-uart" }, + { .compatible = "marvell,armada-38x-uart" }, + { .compatible = "renesas,rzn1-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway8250_of_match); + +static const struct acpi_device_id sunway8250_acpi_match[] = { + { "INT33C4", 0 }, + { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, + { "80860F0A", 0 }, + { "8086228A", 0 }, + { "APMC0D08", 0}, + { "AMD0020", 0 }, + { "AMDI0020", 0 }, + { "BRCM2032", 0 }, + { "HISI0031", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sunway8250_acpi_match); + +static struct platform_driver sunway8250_platform_driver = { + .driver = { + .name = "sunway-apb-uart", + .pm = &sunway8250_pm_ops, + .of_match_table = sunway8250_of_match, + .acpi_match_table = ACPI_PTR(sunway8250_acpi_match), + }, + .probe = sunway8250_probe, + .remove = sunway8250_remove, +}; + +module_platform_driver(sunway8250_platform_driver); + +MODULE_AUTHOR("Jamie Iles"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); +MODULE_ALIAS("platform:sunway-apb-uart"); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index ee17cf5c44c6b046227093d2892716e506d3e49d..e8edd9388d762807b493e1c92566084662b82fc8 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -407,6 +407,13 @@ config SERIAL_8250_DW Selecting this option will enable handling of the extra features present in the Synopsys DesignWare APB UART. +config SERIAL_8250_SUNWAY + tristate "Support for SW6B Builtin Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 && SW64 + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART of SW6. + config SERIAL_8250_EM tristate "Support for Emma Mobile integrated serial port" depends on SERIAL_8250 && HAVE_CLK diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 628b75be312ea531d3aed9674793f15f3659dfa6..8186ea891405db2c8c792ec59b74499d25eed546 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +obj-$(CONFIG_SERIAL_8250_SUNWAY) += 8250_sunway.o obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 2d572f6c8ec833023450136e3e2278d46d8937ae..ed942097ee3351732c3e019068c504613d80a30e 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -12,6 +12,8 @@ * Base Functions */ +#include +#include #include #include #include @@ -218,7 +220,9 @@ static ssize_t name_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -228,7 +232,7 @@ static ssize_t name_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->name); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(name); @@ -239,7 +243,9 @@ static ssize_t version_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -249,7 +255,7 @@ static ssize_t version_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->version); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(version); @@ -489,16 +495,20 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + ret = -EINVAL; + goto err_infoopen; + } + if (!idev->info) { - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); ret = -EINVAL; goto err_infoopen; } if (idev->info->open) ret = idev->info->open(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) goto err_infoopen; @@ -531,10 +541,12 @@ static int uio_release(struct inode *inode, struct file *filep) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); module_put(idev->owner); kfree(listener); @@ -548,10 +560,12 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_device *idev = listener->dev; __poll_t ret = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info || !idev->info->irq) ret = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) return ret; @@ -577,13 +591,17 @@ static ssize_t uio_read(struct file *filep, char __user *buf, add_wait_queue(&idev->wait, &wait); do { - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + retval = -EINVAL; + break; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); break; } - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); set_current_state(TASK_INTERRUPTIBLE); @@ -631,7 +649,9 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, if (copy_from_user(&irq_on, buf, count)) return -EFAULT; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { retval = -EINVAL; goto out; @@ -650,7 +670,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval ? retval : sizeof(s32); } @@ -675,7 +695,9 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vm_fault_t ret = 0; int mi; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return VM_FAULT_SIGBUS; + if (!idev->info) { ret = VM_FAULT_SIGBUS; goto out; @@ -702,8 +724,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vmf->page = page; out: - mutex_unlock(&idev->info_lock); - + percpu_ref_put(&idev->info_ref); return ret; } @@ -772,7 +793,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; goto out; @@ -811,10 +834,30 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) } out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } +static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uio_listener *listener = filp->private_data; + struct uio_device *idev = listener->dev; + long retval = 0; + + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + + if (!idev->info || !idev->info->ioctl) { + retval = -EINVAL; + goto out; + } + + retval = idev->info->ioctl(idev->info, cmd, arg); +out: + percpu_ref_put(&idev->info_ref); + return retval; +} + static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, @@ -825,6 +868,8 @@ static const struct file_operations uio_fops = { .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, + .unlocked_ioctl = uio_ioctl, + .compat_ioctl = uio_ioctl, }; static int uio_major_init(void) @@ -907,6 +952,14 @@ static void uio_device_release(struct device *dev) kfree(idev); } +static void uio_info_free(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->free_done); +} + + /** * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device @@ -937,10 +990,18 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); + ret = percpu_ref_init(&idev->info_ref, uio_info_free, 0, GFP_KERNEL); + if (ret) { + pr_err("percpu_ref init failed!\n"); + kfree(idev); + return ret; + } + init_completion(&idev->confirm_done); + init_completion(&idev->free_done); + ret = uio_get_minor(idev); if (ret) { kfree(idev); @@ -1036,6 +1097,13 @@ int __devm_uio_register_device(struct module *owner, } EXPORT_SYMBOL_GPL(__devm_uio_register_device); +static void uio_confirm_info(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->confirm_done); +} + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities @@ -1052,14 +1120,16 @@ void uio_unregister_device(struct uio_info *info) idev = info->uio_dev; minor = idev->minor; - mutex_lock(&idev->info_lock); + percpu_ref_kill_and_confirm(&idev->info_ref, uio_confirm_info); + wait_for_completion(&idev->confirm_done); + wait_for_completion(&idev->free_done); + /* now, we can set info to NULL */ uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); idev->info = NULL; - mutex_unlock(&idev->info_lock); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile index 7d338e9c0657e9aa594c9e3b26f8828645e2067b..8ee58be1fb37ebae836645e3ac4b5c9a32dca086 100644 --- a/drivers/usb/core/Makefile +++ b/drivers/usb/core/Makefile @@ -9,7 +9,7 @@ usbcore-y += devio.o notify.o generic.o quirks.o devices.o usbcore-y += phy.o port.o usbcore-$(CONFIG_OF) += of.o -usbcore-$(CONFIG_USB_PCI) += hcd-pci.o +usbcore-$(CONFIG_USB_PCI) += hcd-pci.o usbcore-$(CONFIG_ACPI) += usb-acpi.o ifdef CONFIG_USB_ONBOARD_HUB diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 990280688b254d913d843bde17dae0a8be203778..df8f91e6a2c7f1aa14fbde177b8886396978da7f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -48,6 +48,9 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); +#if IS_ENABLED(CONFIG_X86) + struct pci_driver *drv; +#endif /* * Iterate through other PCI functions in the same slot. @@ -60,6 +63,18 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, PCI_SLOT(companion->devfn) != slot) continue; +#if IS_ENABLED(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + drv = companion->driver; + if (drv && + strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && + strncmp(drv->name, "ohci-pci", sizeof("ohci-pci") - 1) && + strncmp(drv->name, "ehci-pci", sizeof("ehci-pci") - 1)) + continue; + } +#endif + /* * Companion device should be either UHCI,OHCI or EHCI host * controller, otherwise skip. diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9addff97c4ec520cb7f7bb66de82dac0..498497cace207c1d96841474e2b7d4fd28880aef 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1283,3 +1283,130 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +#ifdef CONFIG_SW64 +#include +#define XHCI_STS_FATAL (1 << 2) +#define XHCI_STS_EINT (1 << 3) +#define XHCI_STS_PORT (1 << 4) +#define XHCI_STS_SRE (1 << 10) +#define STS_RW1C_BITS (XHCI_STS_FATAL | XHCI_STS_EINT | XHCI_STS_PORT | XHCI_STS_SRE) + +static void +fixup_usb_xhci_reset(struct pci_dev *dev) +{ + void __iomem *op_reg_base; + int timeout; + u32 xhci_command; + u32 tmp, val; + void __iomem *base; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long offset; + int ext_cap_offset; + int retries = 3; + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &tmp); + if (tmp & PCI_BASE_ADDRESS_MEM_TYPE_MASK) { + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); + offset = (unsigned long)(val) << 32 | (tmp & (~0xf)); + } else + offset = (unsigned long)(tmp & (~0xf)); + + if (offset == 0) + return; + + base = (void *)__va(SW64_PCI_IO_BASE(hose->node, hose->index) | offset); + + ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); + if (!ext_cap_offset) + goto hc_init; + + val = readl(base + ext_cap_offset); + + if ((dev->vendor == PCI_VENDOR_ID_TI && dev->device == 0x8241) || + (dev->vendor == PCI_VENDOR_ID_RENESAS + && dev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 1000000, 10); + if (timeout) { + pr_err("xHCI BIOS handoff failed (BIOS bug ?) %08x\n", val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + val &= XHCI_LEGACY_DISABLE_SMI; + val |= XHCI_LEGACY_SMI_EVENTS; + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (dev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(dev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000000, 10); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + +retry: + val = readl(op_reg_base + XHCI_STS_OFFSET); + val |= STS_RW1C_BITS; + writel(val, op_reg_base + XHCI_STS_OFFSET); + val = readl(op_reg_base + XHCI_STS_OFFSET); + + if ((val & STS_RW1C_BITS) && retries--) { + pr_err("clear USB Status Register (status = %#x) failed, retry\n", val); + goto retry; + } + + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW did not halt within %d usec status = 0x%x\n", + XHCI_MAX_HALT_USEC, val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, fixup_usb_xhci_reset); +#endif diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 340d9597d1ab05bbcf461bebee8f49a073a53cd4..5e6521323c3f92566961beb7c6a64a4dc48a1642 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -463,6 +463,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_ZERO_64B_REGS; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 6bda6dbb48784b7047c467388575f9e35c8e2c0b..b9b459d9b07331ef8ca83aa8f4e793aeddce2c57 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -39,7 +39,7 @@ config VFIO_GROUP config VFIO_CONTAINER bool "Support for the VFIO container /dev/vfio/vfio" - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64 || LOONGARCH) depends on VFIO_GROUP default y help diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index c82ea032d3521268138811a1cc1b718755c90c26..68c05705200fce8fc9824a8521bbe554e5c130f7 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VFIO) += vfio.o -vfio-y += vfio_main.o \ - iova_bitmap.o +vfio-y += vfio_main.o vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o vfio-$(CONFIG_VFIO_GROUP) += group.o vfio-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 8125e5f37832c40adbf25a6868389c78639e42cc..1b5ca334e225333ee22d3e226920106afd6d4154 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -65,4 +65,8 @@ source "drivers/vfio/pci/hisilicon/Kconfig" source "drivers/vfio/pci/pds/Kconfig" +source "drivers/vfio/pci/qat/Kconfig" + +source "drivers/vfio/pci/nvgrace-gpu/Kconfig" + endmenu diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index 45167be462d8f601c2da3924fd6848ef6c059cf9..a84c7f823b373f0b4d24d7f40239fef25566a78d 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -13,3 +13,7 @@ obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/ obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/ obj-$(CONFIG_PDS_VFIO_PCI) += pds/ + +obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/ + +obj-$(CONFIG_QAT_VFIO_PCI) += qat/ diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig index 7088edc4fb28d88f5603e8f68462993123eece46..c3ced56b7787650ce8b82039b419413e81deedfa 100644 --- a/drivers/vfio/pci/mlx5/Kconfig +++ b/drivers/vfio/pci/mlx5/Kconfig @@ -3,6 +3,7 @@ config MLX5_VFIO_PCI tristate "VFIO support for MLX5 PCI devices" depends on MLX5_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides migration support for MLX5 devices using the VFIO framework. diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 42ec574a86221074b2b201d4546e116adfd79179..5cf2b491d15a01467cc82a5df624ffc494da8b20 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -1376,6 +1376,7 @@ static struct pci_driver mlx5vf_pci_driver = { module_pci_driver(mlx5vf_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Gurtovoy "); MODULE_AUTHOR("Yishai Hadas "); diff --git a/drivers/vfio/pci/nvgrace-gpu/Kconfig b/drivers/vfio/pci/nvgrace-gpu/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a7f624b37e410b4cf1b6409531692895f17214c6 --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NVGRACE_GPU_VFIO_PCI + tristate "VFIO support for the GPU in the NVIDIA Grace Hopper Superchip" + depends on ARM64 || (COMPILE_TEST && 64BIT) + select VFIO_PCI_CORE + help + VFIO support for the GPU in the NVIDIA Grace Hopper Superchip is + required to assign the GPU device to userspace using KVM/qemu/etc. + + If you don't know what to do here, say N. diff --git a/drivers/vfio/pci/nvgrace-gpu/Makefile b/drivers/vfio/pci/nvgrace-gpu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3ca8c187897a970c2d462ec4ac57238ede91a3ca --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu-vfio-pci.o +nvgrace-gpu-vfio-pci-y := main.o diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c new file mode 100644 index 0000000000000000000000000000000000000000..a467085038f0c533b8799af9e4f80136136d1abb --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -0,0 +1,890 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include +#include + +/* + * The device memory usable to the workloads running in the VM is cached + * and showcased as a 64b device BAR (comprising of BAR4 and BAR5 region) + * to the VM and is represented as usemem. + * Moreover, the VM GPU device driver needs a non-cacheable region to + * support the MIG feature. This region is also exposed as a 64b BAR + * (comprising of BAR2 and BAR3 region) and represented as resmem. + */ +#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX +#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX + +/* Memory size expected as non cached and reserved by the VM driver */ +#define RESMEM_SIZE SZ_1G + +/* A hardwired and constant ABI value between the GPU FW and VFIO driver. */ +#define MEMBLK_SIZE SZ_512M + +/* + * The state of the two device memory region - resmem and usemem - is + * saved as struct mem_region. + */ +struct mem_region { + phys_addr_t memphys; /* Base physical address of the region */ + size_t memlength; /* Region size */ + size_t bar_size; /* Reported region BAR size */ + __le64 bar_val; /* Emulated BAR offset registers */ + union { + void *memaddr; + void __iomem *ioaddr; + }; /* Base virtual address of the region */ +}; + +struct nvgrace_gpu_pci_core_device { + struct vfio_pci_core_device core_device; + /* Cached and usable memory for the VM. */ + struct mem_region usemem; + /* Non cached memory carved out from the end of device memory */ + struct mem_region resmem; + /* Lock to control device memory kernel mapping */ + struct mutex remap_lock; +}; + +static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + nvdev->resmem.bar_val = 0; + nvdev->usemem.bar_val = 0; +} + +/* Choose the structure corresponding to the fake BAR with a given index. */ +static struct mem_region * +nvgrace_gpu_memregion(int index, + struct nvgrace_gpu_pci_core_device *nvdev) +{ + if (index == USEMEM_REGION_INDEX) + return &nvdev->usemem; + + if (index == RESMEM_REGION_INDEX) + return &nvdev->resmem; + + return NULL; +} + +static int nvgrace_gpu_open_device(struct vfio_device *core_vdev) +{ + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + if (nvdev->usemem.memlength) { + nvgrace_gpu_init_fake_bar_emu_regs(core_vdev); + mutex_init(&nvdev->remap_lock); + } + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static void nvgrace_gpu_close_device(struct vfio_device *core_vdev) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + /* Unmap the mapping to the device memory cached region */ + if (nvdev->usemem.memaddr) { + memunmap(nvdev->usemem.memaddr); + nvdev->usemem.memaddr = NULL; + } + + /* Unmap the mapping to the device memory non-cached region */ + if (nvdev->resmem.ioaddr) { + iounmap(nvdev->resmem.ioaddr); + nvdev->resmem.ioaddr = NULL; + } + + mutex_destroy(&nvdev->remap_lock); + + vfio_pci_core_close_device(core_vdev); +} + +static int nvgrace_gpu_mmap(struct vfio_device *core_vdev, + struct vm_area_struct *vma) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + struct mem_region *memregion; + unsigned long start_pfn; + u64 req_len, pgoff, end; + unsigned int index; + int ret = 0; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + memregion = nvgrace_gpu_memregion(index, nvdev); + if (!memregion) + return vfio_pci_core_mmap(core_vdev, vma); + + /* + * Request to mmap the BAR. Map to the CPU accessible memory on the + * GPU using the memory information gathered from the system ACPI + * tables. + */ + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + if (check_sub_overflow(vma->vm_end, vma->vm_start, &req_len) || + check_add_overflow(PHYS_PFN(memregion->memphys), pgoff, &start_pfn) || + check_add_overflow(PFN_PHYS(pgoff), req_len, &end)) + return -EOVERFLOW; + + /* + * Check that the mapping request does not go beyond available device + * memory size + */ + if (end > memregion->memlength) + return -EINVAL; + + /* + * The carved out region of the device memory needs the NORMAL_NC + * property. Communicate as such to the hypervisor. + */ + if (index == RESMEM_REGION_INDEX) { + /* + * The nvgrace-gpu module has no issues with uncontained + * failures on NORMAL_NC accesses. VM_ALLOW_ANY_UNCACHED is + * set to communicate to the KVM to S2 map as NORMAL_NC. + * This opens up guest usage of NORMAL_NC for this mapping. + */ + vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED); + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + } + + /* + * Perform a PFN map to the memory and back the device BAR by the + * GPU memory. + * + * The available GPU memory size may not be power-of-2 aligned. The + * remainder is only backed by vfio_device_ops read/write handlers. + * + * During device reset, the GPU is safely disconnected to the CPU + * and access to the BAR will be immediately returned preventing + * machine check. + */ + ret = remap_pfn_range(vma, vma->vm_start, start_pfn, + req_len, vma->vm_page_prot); + if (ret) + return ret; + + vma->vm_pgoff = start_pfn; + + return 0; +} + +static long +nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned long arg) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + unsigned long minsz = offsetofend(struct vfio_region_info, offset); + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + struct vfio_region_info_cap_sparse_mmap *sparse; + struct vfio_region_info info; + struct mem_region *memregion; + u32 size; + int ret; + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + /* + * Request to determine the BAR region information. Send the + * GPU memory information. + */ + memregion = nvgrace_gpu_memregion(info.index, nvdev); + if (!memregion) + return vfio_pci_core_ioctl(core_vdev, + VFIO_DEVICE_GET_REGION_INFO, arg); + + size = struct_size(sparse, areas, 1); + + /* + * Setup for sparse mapping for the device memory. Only the + * available device memory on the hardware is shown as a + * mappable region. + */ + sparse = kzalloc(size, GFP_KERNEL); + if (!sparse) + return -ENOMEM; + + sparse->nr_areas = 1; + sparse->areas[0].offset = 0; + sparse->areas[0].size = memregion->memlength; + sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; + sparse->header.version = 1; + + ret = vfio_info_add_capability(&caps, &sparse->header, size); + kfree(sparse); + if (ret) + return ret; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + /* + * The region memory size may not be power-of-2 aligned. + * Given that the memory as a BAR and may not be + * aligned, roundup to the next power-of-2. + */ + info.size = memregion->bar_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_MMAP; + + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + kfree(caps.buf); + } + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; +} + +static long nvgrace_gpu_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case VFIO_DEVICE_GET_REGION_INFO: + return nvgrace_gpu_ioctl_get_region_info(core_vdev, arg); + case VFIO_DEVICE_IOEVENTFD: + return -ENOTTY; + case VFIO_DEVICE_RESET: + nvgrace_gpu_init_fake_bar_emu_regs(core_vdev); + fallthrough; + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +static __le64 +nvgrace_gpu_get_read_value(size_t bar_size, u64 flags, __le64 val64) +{ + u64 tmp_val; + + tmp_val = le64_to_cpu(val64); + tmp_val &= ~(bar_size - 1); + tmp_val |= flags; + + return cpu_to_le64(tmp_val); +} + +/* + * Both the usable (usemem) and the reserved (resmem) device memory region + * are exposed as a 64b fake device BARs in the VM. These fake BARs must + * respond to the accesses on their respective PCI config space offsets. + * + * resmem BAR owns PCI_BASE_ADDRESS_2 & PCI_BASE_ADDRESS_3. + * usemem BAR owns PCI_BASE_ADDRESS_4 & PCI_BASE_ADDRESS_5. + */ +static ssize_t +nvgrace_gpu_read_config_emu(struct vfio_device *core_vdev, + char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion = NULL; + __le64 val64; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + int ret; + + ret = vfio_pci_core_read(core_vdev, buf, count, ppos); + if (ret < 0) + return ret; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2, + sizeof(val64), + ©_offset, ©_count, + ®ister_offset)) + memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev); + else if (vfio_pci_core_range_intersect_range(pos, count, + PCI_BASE_ADDRESS_4, + sizeof(val64), + ©_offset, ©_count, + ®ister_offset)) + memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev); + + if (memregion) { + val64 = nvgrace_gpu_get_read_value(memregion->bar_size, + PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, + memregion->bar_val); + if (copy_to_user(buf + copy_offset, + (void *)&val64 + register_offset, copy_count)) { + /* + * The position has been incremented in + * vfio_pci_core_read. Reset the offset back to the + * starting position. + */ + *ppos -= count; + return -EFAULT; + } + } + + return count; +} + +static ssize_t +nvgrace_gpu_write_config_emu(struct vfio_device *core_vdev, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion = NULL; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2, + sizeof(u64), ©_offset, + ©_count, ®ister_offset)) + memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev); + else if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_4, + sizeof(u64), ©_offset, + ©_count, ®ister_offset)) + memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev); + + if (memregion) { + if (copy_from_user((void *)&memregion->bar_val + register_offset, + buf + copy_offset, copy_count)) + return -EFAULT; + *ppos += copy_count; + return copy_count; + } + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +/* + * Ad hoc map the device memory in the module kernel VA space. Primarily needed + * as vfio does not require the userspace driver to only perform accesses through + * mmaps of the vfio-pci BAR regions and such accesses should be supported using + * vfio_device_ops read/write implementations. + * + * The usemem region is cacheable memory and hence is memremaped. + * The resmem region is non-cached and is mapped using ioremap_wc (NORMAL_NC). + */ +static int +nvgrace_gpu_map_device_mem(int index, + struct nvgrace_gpu_pci_core_device *nvdev) +{ + struct mem_region *memregion; + int ret = 0; + + memregion = nvgrace_gpu_memregion(index, nvdev); + if (!memregion) + return -EINVAL; + + mutex_lock(&nvdev->remap_lock); + + if (memregion->memaddr) + goto unlock; + + if (index == USEMEM_REGION_INDEX) + memregion->memaddr = memremap(memregion->memphys, + memregion->memlength, + MEMREMAP_WB); + else + memregion->ioaddr = ioremap_wc(memregion->memphys, + memregion->memlength); + + if (!memregion->memaddr) + ret = -ENOMEM; + +unlock: + mutex_unlock(&nvdev->remap_lock); + + return ret; +} + +/* + * Read the data from the device memory (mapped either through ioremap + * or memremap) into the user buffer. + */ +static int +nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev, + char __user *buf, size_t mem_count, loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + int ret; + + if (!mem_count) + return 0; + + /* + * Handle read on the BAR regions. Map to the target device memory + * physical address and copy to the request read buffer. + */ + ret = nvgrace_gpu_map_device_mem(index, nvdev); + if (ret) + return ret; + + if (index == USEMEM_REGION_INDEX) { + if (copy_to_user(buf, + (u8 *)nvdev->usemem.memaddr + offset, + mem_count)) + ret = -EFAULT; + } else { + /* + * The hardware ensures that the system does not crash when + * the device memory is accessed with the memory enable + * turned off. It synthesizes ~0 on such read. So there is + * no need to check or support the disablement/enablement of + * BAR through PCI_COMMAND config space register. Pass + * test_mem flag as false. + */ + ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, + nvdev->resmem.ioaddr, + buf, offset, mem_count, + 0, 0, false); + } + + return ret; +} + +/* + * Read count bytes from the device memory at an offset. The actual device + * memory size (available) may not be a power-of-2. So the driver fakes + * the size to a power-of-2 (reported) when exposing to a user space driver. + * + * Reads starting beyond the reported size generate -EINVAL; reads extending + * beyond the actual device size is filled with ~0; reads extending beyond + * the reported size are truncated. + */ +static ssize_t +nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev, + char __user *buf, size_t count, loff_t *ppos) +{ + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct mem_region *memregion; + size_t mem_count, i; + u8 val = 0xFF; + int ret; + + /* No need to do NULL check as caller does. */ + memregion = nvgrace_gpu_memregion(index, nvdev); + + if (offset >= memregion->bar_size) + return -EINVAL; + + /* Clip short the read request beyond reported BAR size */ + count = min(count, memregion->bar_size - (size_t)offset); + + /* + * Determine how many bytes to be actually read from the device memory. + * Read request beyond the actual device memory size is filled with ~0, + * while those beyond the actual reported size is skipped. + */ + if (offset >= memregion->memlength) + mem_count = 0; + else + mem_count = min(count, memregion->memlength - (size_t)offset); + + ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos); + if (ret) + return ret; + + /* + * Only the device memory present on the hardware is mapped, which may + * not be power-of-2 aligned. A read to an offset beyond the device memory + * size is filled with ~0. + */ + for (i = mem_count; i < count; i++) { + ret = put_user(val, (unsigned char __user *)(buf + i)); + if (ret) + return ret; + } + + *ppos += count; + return count; +} + +static ssize_t +nvgrace_gpu_read(struct vfio_device *core_vdev, + char __user *buf, size_t count, loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + if (nvgrace_gpu_memregion(index, nvdev)) + return nvgrace_gpu_read_mem(nvdev, buf, count, ppos); + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return nvgrace_gpu_read_config_emu(core_vdev, buf, count, ppos); + + return vfio_pci_core_read(core_vdev, buf, count, ppos); +} + +/* + * Write the data to the device memory (mapped either through ioremap + * or memremap) from the user buffer. + */ +static int +nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev, + const char __user *buf, size_t mem_count, + loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + int ret; + + if (!mem_count) + return 0; + + ret = nvgrace_gpu_map_device_mem(index, nvdev); + if (ret) + return ret; + + if (index == USEMEM_REGION_INDEX) { + if (copy_from_user((u8 *)nvdev->usemem.memaddr + pos, + buf, mem_count)) + return -EFAULT; + } else { + /* + * The hardware ensures that the system does not crash when + * the device memory is accessed with the memory enable + * turned off. It drops such writes. So there is no need to + * check or support the disablement/enablement of BAR + * through PCI_COMMAND config space register. Pass test_mem + * flag as false. + */ + ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, + nvdev->resmem.ioaddr, + (char __user *)buf, pos, mem_count, + 0, 0, true); + } + + return ret; +} + +/* + * Write count bytes to the device memory at a given offset. The actual device + * memory size (available) may not be a power-of-2. So the driver fakes the + * size to a power-of-2 (reported) when exposing to a user space driver. + * + * Writes extending beyond the reported size are truncated; writes starting + * beyond the reported size generate -EINVAL. + */ +static ssize_t +nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev, + size_t count, loff_t *ppos, const char __user *buf) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion; + size_t mem_count; + int ret = 0; + + /* No need to do NULL check as caller does. */ + memregion = nvgrace_gpu_memregion(index, nvdev); + + if (offset >= memregion->bar_size) + return -EINVAL; + + /* Clip short the write request beyond reported BAR size */ + count = min(count, memregion->bar_size - (size_t)offset); + + /* + * Determine how many bytes to be actually written to the device memory. + * Do not write to the offset beyond available size. + */ + if (offset >= memregion->memlength) + goto exitfn; + + /* + * Only the device memory present on the hardware is mapped, which may + * not be power-of-2 aligned. Drop access outside the available device + * memory on the hardware. + */ + mem_count = min(count, memregion->memlength - (size_t)offset); + + ret = nvgrace_gpu_map_and_write(nvdev, buf, mem_count, ppos); + if (ret) + return ret; + +exitfn: + *ppos += count; + return count; +} + +static ssize_t +nvgrace_gpu_write(struct vfio_device *core_vdev, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + + if (nvgrace_gpu_memregion(index, nvdev)) + return nvgrace_gpu_write_mem(nvdev, count, ppos, buf); + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return nvgrace_gpu_write_config_emu(core_vdev, buf, count, ppos); + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +static const struct vfio_device_ops nvgrace_gpu_pci_ops = { + .name = "nvgrace-gpu-vfio-pci", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = nvgrace_gpu_open_device, + .close_device = nvgrace_gpu_close_device, + .ioctl = nvgrace_gpu_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = nvgrace_gpu_read, + .write = nvgrace_gpu_write, + .mmap = nvgrace_gpu_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = { + .name = "nvgrace-gpu-vfio-pci-core", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = nvgrace_gpu_open_device, + .close_device = vfio_pci_core_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static int +nvgrace_gpu_fetch_memory_property(struct pci_dev *pdev, + u64 *pmemphys, u64 *pmemlength) +{ + int ret; + + /* + * The memory information is present in the system ACPI tables as DSD + * properties nvidia,gpu-mem-base-pa and nvidia,gpu-mem-size. + */ + ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-base-pa", + pmemphys); + if (ret) + return ret; + + if (*pmemphys > type_max(phys_addr_t)) + return -EOVERFLOW; + + ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-size", + pmemlength); + if (ret) + return ret; + + if (*pmemlength > type_max(size_t)) + return -EOVERFLOW; + + /* + * If the C2C link is not up due to an error, the coherent device + * memory size is returned as 0. Fail in such case. + */ + if (*pmemlength == 0) + return -ENOMEM; + + return ret; +} + +static int +nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev, + struct nvgrace_gpu_pci_core_device *nvdev, + u64 memphys, u64 memlength) +{ + int ret = 0; + + /* + * The VM GPU device driver needs a non-cacheable region to support + * the MIG feature. Since the device memory is mapped as NORMAL cached, + * carve out a region from the end with a different NORMAL_NC + * property (called as reserved memory and represented as resmem). This + * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while + * exposing the rest (termed as usable memory and represented using usemem) + * as cacheable 64b BAR (region 4 and 5). + * + * devmem (memlength) + * |-------------------------------------------------| + * | | + * usemem.memphys resmem.memphys + */ + nvdev->usemem.memphys = memphys; + + /* + * The device memory exposed to the VM is added to the kernel by the + * VM driver module in chunks of memory block size. Only the usable + * memory (usemem) is added to the kernel for usage by the VM + * workloads. Make the usable memory size memblock aligned. + */ + if (check_sub_overflow(memlength, RESMEM_SIZE, + &nvdev->usemem.memlength)) { + ret = -EOVERFLOW; + goto done; + } + + /* + * The USEMEM part of the device memory has to be MEMBLK_SIZE + * aligned. This is a hardwired ABI value between the GPU FW and + * VFIO driver. The VM device driver is also aware of it and make + * use of the value for its calculation to determine USEMEM size. + */ + nvdev->usemem.memlength = round_down(nvdev->usemem.memlength, + MEMBLK_SIZE); + if (nvdev->usemem.memlength == 0) { + ret = -EINVAL; + goto done; + } + + if ((check_add_overflow(nvdev->usemem.memphys, + nvdev->usemem.memlength, + &nvdev->resmem.memphys)) || + (check_sub_overflow(memlength, nvdev->usemem.memlength, + &nvdev->resmem.memlength))) { + ret = -EOVERFLOW; + goto done; + } + + /* + * The memory regions are exposed as BARs. Calculate and save + * the BAR size for them. + */ + nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength); + nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength); +done: + return ret; +} + +static int nvgrace_gpu_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const struct vfio_device_ops *ops = &nvgrace_gpu_pci_core_ops; + struct nvgrace_gpu_pci_core_device *nvdev; + u64 memphys, memlength; + int ret; + + ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength); + if (!ret) + ops = &nvgrace_gpu_pci_ops; + + nvdev = vfio_alloc_device(nvgrace_gpu_pci_core_device, core_device.vdev, + &pdev->dev, ops); + if (IS_ERR(nvdev)) + return PTR_ERR(nvdev); + + dev_set_drvdata(&pdev->dev, &nvdev->core_device); + + if (ops == &nvgrace_gpu_pci_ops) { + /* + * Device memory properties are identified in the host ACPI + * table. Set the nvgrace_gpu_pci_core_device structure. + */ + ret = nvgrace_gpu_init_nvdev_struct(pdev, nvdev, + memphys, memlength); + if (ret) + goto out_put_vdev; + } + + ret = vfio_pci_core_register_device(&nvdev->core_device); + if (ret) + goto out_put_vdev; + + return ret; + +out_put_vdev: + vfio_put_device(&nvdev->core_device.vdev); + return ret; +} + +static void nvgrace_gpu_remove(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + vfio_pci_core_unregister_device(core_device); + vfio_put_device(&core_device->vdev); +} + +static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { + /* GH200 120GB */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) }, + /* GH200 480GB */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, + /* GH200 SKU */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, nvgrace_gpu_vfio_pci_table); + +static struct pci_driver nvgrace_gpu_vfio_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = nvgrace_gpu_vfio_pci_table, + .probe = nvgrace_gpu_probe, + .remove = nvgrace_gpu_remove, + .err_handler = &vfio_pci_core_err_handlers, + .driver_managed_dma = true, +}; + +module_pci_driver(nvgrace_gpu_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ankit Agrawal "); +MODULE_AUTHOR("Aniket Agashe "); +MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory"); diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 6eceef7b028aae9b8b7a8cb49614e88525f4bade..fec9b167c7b9ac98ae24dddd9265e30d95942e7d 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -5,6 +5,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" depends on PDS_CORE && PCI_IOV select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides generic PCI support for PDS devices using the VFIO framework. diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c index caffa1a2cf591e9428f4cf960f2d9265ee86c3a9..a34dda5166293583337372fc0059129c726a9f45 100644 --- a/drivers/vfio/pci/pds/pci_drv.c +++ b/drivers/vfio/pci/pds/pci_drv.c @@ -204,6 +204,7 @@ static struct pci_driver pds_vfio_pci_driver = { module_pci_driver(pds_vfio_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION); MODULE_AUTHOR("Brett Creeley "); MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/pci/qat/Kconfig b/drivers/vfio/pci/qat/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..bf52cfa4b595c3fdee279ec760bc7bade3142187 --- /dev/null +++ b/drivers/vfio/pci/qat/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config QAT_VFIO_PCI + tristate "VFIO support for QAT VF PCI devices" + select VFIO_PCI_CORE + depends on CRYPTO_DEV_QAT_4XXX + help + This provides migration support for Intel(R) QAT Virtual Function + using the VFIO framework. + + To compile this as a module, choose M here: the module + will be called qat_vfio_pci. If you don't know what to do here, + say N. diff --git a/drivers/vfio/pci/qat/Makefile b/drivers/vfio/pci/qat/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5fe5c4ec19d3022b054c9d1d6159e536d62505a5 --- /dev/null +++ b/drivers/vfio/pci/qat/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QAT_VFIO_PCI) += qat_vfio_pci.o +qat_vfio_pci-y := main.o diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c new file mode 100644 index 0000000000000000000000000000000000000000..e36740a282e7bcbe2c69479704744d553fefa6ac --- /dev/null +++ b/drivers/vfio/pci/qat/main.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The migration data of each Intel QAT VF device is encapsulated into a + * 4096 bytes block. The data consists of two parts. + * The first is a pre-configured set of attributes of the VF being migrated, + * which are only set when it is created. This can be migrated during pre-copy + * stage and used for a device compatibility check. + * The second is the VF state. This includes the required MMIO regions and + * the shadow states maintained by the QAT PF driver. This part can only be + * saved when the VF is fully quiesced and be migrated during stop-copy stage. + * Both these 2 parts of data are saved in hierarchical structures including + * a preamble section and several raw state sections. + * When the pre-configured part of the migration data is fully retrieved from + * user space, the preamble section are used to validate the correctness of + * the data blocks and check the version compatibility. The raw state sections + * are then used to do a device compatibility check. + * When the device transits from RESUMING state, the VF states are extracted + * from the raw state sections of the VF state part of the migration data and + * then loaded into the device. + */ + +struct qat_vf_migration_file { + struct file *filp; + /* protects migration region context */ + struct mutex lock; + bool disabled; + struct qat_vf_core_device *qat_vdev; + ssize_t filled_size; +}; + +struct qat_vf_core_device { + struct vfio_pci_core_device core_device; + struct qat_mig_dev *mdev; + /* protects migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + struct qat_vf_migration_file *resuming_migf; + struct qat_vf_migration_file *saving_migf; +}; + +static int qat_vf_pci_open_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = + container_of(core_vdev, struct qat_vf_core_device, + core_device.vdev); + struct vfio_pci_core_device *vdev = &qat_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + ret = qat_vfmig_open(qat_vdev->mdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; + } + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static void qat_vf_disable_fd(struct qat_vf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->disabled = true; + migf->filp->f_pos = 0; + migf->filled_size = 0; + mutex_unlock(&migf->lock); +} + +static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev) +{ + if (qat_vdev->resuming_migf) { + qat_vf_disable_fd(qat_vdev->resuming_migf); + fput(qat_vdev->resuming_migf->filp); + qat_vdev->resuming_migf = NULL; + } + + if (qat_vdev->saving_migf) { + qat_vf_disable_fd(qat_vdev->saving_migf); + fput(qat_vdev->saving_migf->filp); + qat_vdev->saving_migf = NULL; + } +} + +static void qat_vf_pci_close_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_close(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); + vfio_pci_core_close_device(core_vdev); +} + +static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_vf_core_device *qat_vdev = migf->qat_vdev; + struct qat_mig_dev *mig_dev = qat_vdev->mdev; + struct vfio_precopy_info info; + loff_t *pos = &filp->f_pos; + unsigned long minsz; + int ret = 0; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&qat_vdev->state_mutex); + if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + mutex_unlock(&qat_vdev->state_mutex); + return -EINVAL; + } + + mutex_lock(&migf->lock); + if (migf->disabled) { + ret = -ENODEV; + goto out; + } + + if (*pos > mig_dev->setup_size) { + ret = -EINVAL; + goto out; + } + + info.dirty_bytes = 0; + info.initial_bytes = mig_dev->setup_size - *pos; + +out: + mutex_unlock(&migf->lock); + mutex_unlock(&qat_vdev->state_mutex); + if (ret) + return ret; + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +static ssize_t qat_vf_save_read(struct file *filp, char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + ssize_t done = 0; + loff_t *offs; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + mutex_lock(&migf->lock); + if (*offs > migf->filled_size || *offs < 0) { + done = -EINVAL; + goto out_unlock; + } + + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + len = min_t(size_t, migf->filled_size - *offs, len); + if (len) { + ret = copy_to_user(buf, mig_dev->state + *offs, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + done = len; + } + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static int qat_vf_release_file(struct inode *inode, struct file *filp) +{ + struct qat_vf_migration_file *migf = filp->private_data; + + qat_vf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + + return 0; +} + +static const struct file_operations qat_vf_save_fops = { + .owner = THIS_MODULE, + .read = qat_vf_save_read, + .unlocked_ioctl = qat_vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_state(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->state_size; + + return 0; +} + +static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_setup(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->setup_size; + + return 0; +} + +/* + * Allocate a file handler for user space and then save the migration data for + * the device being migrated. If this is called in the pre-copy stage, save the + * pre-configured device data. Otherwise, if this is called in the stop-copy + * stage, save the device state. In both cases, update the data size which can + * then be read from user space. + */ +static struct qat_vf_migration_file * +qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops, + migf, O_RDONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + if (pre_copy) + ret = qat_vf_save_setup(qat_vdev, migf); + else + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) { + fput(migf->filp); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + + return migf; +} + +static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + loff_t end, *offs; + ssize_t done = 0; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + if (*offs < 0 || + check_add_overflow((loff_t)len, *offs, &end)) + return -EOVERFLOW; + + if (end > mig_dev->state_size) + return -ENOMEM; + + mutex_lock(&migf->lock); + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + ret = copy_from_user(mig_dev->state + *offs, buf, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + migf->filled_size += len; + + /* + * Load the pre-configured device data first to check if the target + * device is compatible with the source device. + */ + ret = qat_vfmig_load_setup(mig_dev, migf->filled_size); + if (ret && ret != -EAGAIN) { + done = ret; + goto out_unlock; + } + done = len; + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static const struct file_operations qat_vf_resume_fops = { + .owner = THIS_MODULE, + .write = qat_vf_resume_write, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static struct qat_vf_migration_file * +qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + migf->filled_size = 0; + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + return migf; +} + +static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev) +{ + return qat_vfmig_load_state(qat_vdev->mdev); +} + +static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new) +{ + u32 cur = qat_vdev->mig_state; + int ret; + + /* + * As the device is not capable of just stopping P2P DMAs, suspend the + * device completely once any of the P2P states are reached. + * When it is suspended, all its MMIO registers can still be operated + * correctly, jobs submitted through ring are queued while no jobs are + * processed by the device. The MMIO states can be safely migrated to + * the target VF during stop-copy stage and restored correctly in the + * target VF. All queued jobs can be resumed then. + */ + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + ret = qat_vfmig_suspend(qat_vdev->mdev); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { + qat_vfmig_resume(qat_vdev->mdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P)) + return NULL; + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_resume_device_data(qat_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->resuming_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf = qat_vdev->saving_migf; + + if (!migf) + return ERR_PTR(-EINVAL); + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + ret = qat_vf_load_device_data(qat_vdev); + if (ret) + return ERR_PTR(ret); + + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + /* vfio_mig_get_next_state() does not use arcs other than the above */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev) +{ + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + qat_vfmig_reset(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); +} + +static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&qat_vdev->state_mutex); + while (new_state != qat_vdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(ret); + break; + } + res = qat_vf_pci_step_device_state(qat_vdev, next_state); + if (IS_ERR(res)) + break; + qat_vdev->mig_state = next_state; + if (WARN_ON(res && new_state != qat_vdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + mutex_unlock(&qat_vdev->state_mutex); + + return res; +} + +static int qat_vf_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *curr_state = qat_vdev->mig_state; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static int qat_vf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *stop_copy_length = qat_vdev->mdev->state_size; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static const struct vfio_migration_ops qat_vf_pci_mig_ops = { + .migration_set_state = qat_vf_pci_set_device_state, + .migration_get_state = qat_vf_pci_get_device_state, + .migration_get_data_size = qat_vf_pci_get_data_size, +}; + +static void qat_vf_pci_release_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_cleanup(qat_vdev->mdev); + qat_vfmig_destroy(qat_vdev->mdev); + mutex_destroy(&qat_vdev->state_mutex); + vfio_pci_core_release_dev(core_vdev); +} + +static int qat_vf_pci_init_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + struct qat_mig_dev *mdev; + struct pci_dev *parent; + int ret, vf_id; + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY; + core_vdev->mig_ops = &qat_vf_pci_mig_ops; + + ret = vfio_pci_core_init_dev(core_vdev); + if (ret) + return ret; + + mutex_init(&qat_vdev->state_mutex); + + parent = pci_physfn(qat_vdev->core_device.pdev); + vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev); + if (vf_id < 0) { + ret = -ENODEV; + goto err_rel; + } + + mdev = qat_vfmig_create(parent, vf_id); + if (IS_ERR(mdev)) { + ret = PTR_ERR(mdev); + goto err_rel; + } + + ret = qat_vfmig_init(mdev); + if (ret) + goto err_destroy; + + qat_vdev->mdev = mdev; + + return 0; + +err_destroy: + qat_vfmig_destroy(mdev); +err_rel: + vfio_pci_core_release_dev(core_vdev); + return ret; +} + +static const struct vfio_device_ops qat_vf_pci_ops = { + .name = "qat-vf-vfio-pci", + .init = qat_vf_pci_init_dev, + .release = qat_vf_pci_release_dev, + .open_device = qat_vf_pci_open_device, + .close_device = qat_vf_pci_close_device, + .ioctl = vfio_pci_core_ioctl, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev); + + return container_of(core_device, struct qat_vf_core_device, core_device); +} + +static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + if (!qat_vdev->mdev) + return; + + mutex_lock(&qat_vdev->state_mutex); + qat_vf_reset_done(qat_vdev); + mutex_unlock(&qat_vdev->state_mutex); +} + +static int +qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct qat_vf_core_device *qat_vdev; + int ret; + + qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops); + if (IS_ERR(qat_vdev)) + return PTR_ERR(qat_vdev); + + pci_set_drvdata(pdev, &qat_vdev->core_device); + ret = vfio_pci_core_register_device(&qat_vdev->core_device); + if (ret) + goto out_put_device; + + return 0; + +out_put_device: + vfio_put_device(&qat_vdev->core_device.vdev); + return ret; +} + +static void qat_vf_vfio_pci_remove(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + vfio_pci_core_unregister_device(&qat_vdev->core_device); + vfio_put_device(&qat_vdev->core_device.vdev); +} + +static const struct pci_device_id qat_vf_vfio_pci_table[] = { + /* Intel QAT GEN4 4xxx VF device */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) }, + {} +}; +MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table); + +static const struct pci_error_handlers qat_vf_err_handlers = { + .reset_done = qat_vf_pci_aer_reset_done, + .error_detected = vfio_pci_core_aer_err_detected, +}; + +static struct pci_driver qat_vf_vfio_pci_driver = { + .name = "qat_vfio_pci", + .id_table = qat_vf_vfio_pci_table, + .probe = qat_vf_vfio_pci_probe, + .remove = qat_vf_vfio_pci_remove, + .err_handler = &qat_vf_err_handlers, + .driver_managed_dma = true, +}; +module_pci_driver(qat_vf_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xin Zeng "); +MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index a2ad4f7c716bf31f39c6de6413c87ede0cf2138a..ea2745c1ac5e686caba729c80ddf91ce790cff8e 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1978,3 +1978,45 @@ ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf, return done; } + +/** + * vfio_pci_core_range_intersect_range() - Determine overlap between a buffer + * and register offset ranges. + * @buf_start: start offset of the buffer + * @buf_cnt: number of buffer bytes + * @reg_start: start register offset + * @reg_cnt: number of register bytes + * @buf_offset: start offset of overlap in the buffer + * @intersect_count: number of overlapping bytes + * @register_offset: start offset of overlap in register + * + * Returns: true if there is overlap, false if not. + * The overlap start and size is returned through function args. + */ +bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, + loff_t reg_start, size_t reg_cnt, + loff_t *buf_offset, + size_t *intersect_count, + size_t *register_offset) +{ + if (buf_start <= reg_start && + buf_start + buf_cnt > reg_start) { + *buf_offset = reg_start - buf_start; + *intersect_count = min_t(size_t, reg_cnt, + buf_start + buf_cnt - reg_start); + *register_offset = 0; + return true; + } + + if (buf_start > reg_start && + buf_start < reg_start + reg_cnt) { + *buf_offset = 0; + *intersect_count = min_t(size_t, buf_cnt, + reg_start + reg_cnt - buf_start); + *register_offset = buf_start - reg_start; + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(vfio_pci_core_range_intersect_range); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index a8f259bc2f4d0c392e06fd879049c72789385c5b..2df176edc642542d05fdebad76332bec36878e0e 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1882,8 +1882,25 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma /* * See remap_pfn_range(), called from vfio_pci_fault() but we can't * change vm_flags within the fault handler. Set them now. + * + * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64, + * allowing KVM stage 2 device mapping attributes to use Normal-NC + * rather than DEVICE_nGnRE, which allows guest mappings + * supporting write-combining attributes (WC). ARM does not + * architecturally guarantee this is safe, and indeed some MMIO + * regions like the GICv2 VCPU interface can trigger uncontained + * faults if Normal-NC is used. + * + * To safely use VFIO in KVM the platform must guarantee full + * safety in the guest where no action taken against a MMIO + * mapping can trigger an uncontained failure. The assumption is + * that most VFIO PCI platforms support this for both mapping types, + * at least in common flows, based on some expectations of how + * PCI IP is integrated. Hence VM_ALLOW_ANY_UNCACHED is set in + * the VMA flags. */ - vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED | VM_IO | VM_PFNMAP | + VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &vfio_pci_mmap_ops; return 0; diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index e27de61ac9fe75f5818dc8d7386270c592c05a07..15484e27b26ffa894531c17c5a4d466950f24601 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -94,10 +94,10 @@ VFIO_IOREAD(32) * reads with -1. This is intended for handling MSI-X vector tables and * leftover space for ROM BARs. */ -static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, - void __iomem *io, char __user *buf, - loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite) +ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite) { ssize_t done = 0; int ret; @@ -199,6 +199,7 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, return done; } +EXPORT_SYMBOL_GPL(vfio_pci_core_do_io_rw); static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar) { @@ -276,8 +277,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, x_end = vdev->msix_offset + vdev->msix_size; } - done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, - count, x_start, x_end, iswrite); + done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, + count, x_start, x_end, iswrite); if (done >= 0) *ppos += done; @@ -345,7 +346,8 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, * probing, so we don't currently worry about access in relation * to the memory enable bit in the command register. */ - done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite); + done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count, + 0, 0, iswrite); vga_put(vdev->pdev, rsrc); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 40732e8ed4c6fb018ddfb27fbeaff37775dcc233..a96d97da367daa87a9e5920f36216d64f2c1afc0 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -1693,6 +1693,7 @@ static void __exit vfio_cleanup(void) module_init(vfio_init); module_exit(vfio_cleanup); +MODULE_IMPORT_NS(IOMMUFD); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 325298573e120120a1b1219375489fbfdb8f2cb6..2ada2b100c51cd16767c5b8da3d66b22b78b8821 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1950,6 +1950,19 @@ config FB_SM712 called sm712fb. If you want to compile it as a module, say M here and read . +config FB_LS2K500 + tristate "Loongson LS2K500 frame buffer support" + depends on FB && PCI + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for the Loongson LS7A Platform-Bridge. + + This driver is also available as a module. + If you want to compile it as a module, say M here and read + . + source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 70569f7027ed7b2110c51b1d01f21858a51ee3ce..d3fbb185daa34599e14d397c7df4664774322e9c 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -128,3 +128,4 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o +obj-$(CONFIG_FB_LS2K500) += ls2k500sfb.o diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c new file mode 100644 index 0000000000000000000000000000000000000000..00a83ea7c1e3c3aee2a14140e0628879cfd35179 --- /dev/null +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * linux/drivers/video/ls2k500sfb.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mode_option[32] = "1280x1024-32@2M"; +module_param_string(mode, mode_option, sizeof(mode_option), 0444); +static int useshell; +module_param(useshell, int, 0664); +static int totty = 18; +module_param(totty, int, 0664); +static int resetdelay = 60; +module_param(resetdelay, int, 0664); +static int resetbootwait = 10; +module_param(resetbootwait, int, 0664); +static int GPIO = 14; +module_param(GPIO, int, 0664); +struct ls2k500sfb_struct { + struct pci_dev *dev; + struct platform_device *pd; + struct workqueue_struct *wq; + struct work_struct work; + struct delayed_work redraw_work; + int running; + unsigned long reset_time; + char *penv; + char saved_env[16]; +}; + +static int saved_console; +static unsigned long mscycles; +static atomic_t waiting_for_pciebreak_ipi; + +static int switch_console(int console) +{ + struct file *filp; + + filp = filp_open("/dev/tty1", O_RDWR, 0); + if (IS_ERR(filp)) + return -ENODEV; + + vfs_ioctl(filp, VT_ACTIVATE, console + 1); + filp_close(filp, NULL); + return 0; +} +static void ls2k500sfb_pciebreak_func(void *unused) +{ + atomic_dec(&waiting_for_pciebreak_ipi); + + while (atomic_read(&waiting_for_pciebreak_ipi)) + cpu_relax(); +} + +static void pciebreak_smp_send_stop(int ms) +{ + /* Wait at most 100 msecond for the other cpus to stop */ + unsigned long max_cycles = mscycles * ms; + unsigned long start_time = get_cycles(); + + atomic_set(&waiting_for_pciebreak_ipi, num_online_cpus()); + smp_call_function(ls2k500sfb_pciebreak_func, NULL, false); + while ((atomic_read(&waiting_for_pciebreak_ipi) > 1) + && get_cycles() - start_time < max_cycles) { + cpu_relax(); + } + if (atomic_read(&waiting_for_pciebreak_ipi) > 1) + pr_emerg("Non-pciebreaking CPUs did not react to IPI\n"); +} +static void ls2k500sfb_redraw_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = + container_of(work, struct ls2k500sfb_struct, redraw_work.work); + /*restore resolution info */ + if (memcmp(priv->penv, priv->saved_env, sizeof(priv->saved_env))) + memcpy(priv->penv, priv->saved_env, sizeof(priv->saved_env)); + switch_console(saved_console); +} + +static unsigned long event_jiffies; +static void ls2k500sfb_events_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); + struct pci_dev *pdev = priv->dev; + struct pci_dev *ppdev = pdev->bus->self; + uint32_t i, d, timeout, retry = 0; + static const uint32_t index[] = { + 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x30, 0x3c, 0x54, 0x58, 0x78, 0x7c, 0x80, 4 + }; + + static uint32_t data[sizeof(index) / 4]; + static const uint32_t cindex[] = { 0x10, 0x3c, 4 }; + + static uint32_t cdata[sizeof(cindex) / 4]; + static uint32_t d80c, d71c, ctrl; + static void *p; + + if (!priv->running) { + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_read_config_dword(ppdev, index[i], &data[i]); + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_read_config_dword(pdev, cindex[i], &cdata[i]); + if (ppdev->vendor == 0x14) { + pci_read_config_dword(ppdev, 0x80c, &d80c); + d80c = (d80c & ~(3 << 17)) | (1 << 17); + + pci_read_config_dword(ppdev, 0x71c, &d71c); + d71c |= 1 << 26; + + p = pci_iomap(ppdev, 0, 0x100); + } + ctrl = readl(p); + return; + } + local_bh_disable(); + pciebreak_smp_send_stop(100); + wmb(); /* flush all write before we disable pcie window */ + pci_write_config_dword(ppdev, 0x18, 0); + pci_write_config_dword(ppdev, 0x1c, 0); + pci_write_config_dword(ppdev, 0x20, 0); + event_jiffies = jiffies; + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after change pcie window */ + local_bh_enable(); + if (ppdev->vendor == 0x14) { + timeout = 10000; + while (timeout) { + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) + break; + mdelay(1); + timeout--; + }; + if (!timeout) + pr_info("bar not clear 0\n"); + + pci_read_config_dword(ppdev, 0x0, &d); + pr_info("pcie port deviceid=0x%x recover begin\n", d); +retrain: + while (1) { + pci_write_config_dword(ppdev, index[0], data[0]); + pci_read_config_dword(ppdev, index[0], &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + while (1) { + for (i = 0; i < ARRAY_SIZE(index); i++) { + if (index[i] != 0x18 && index[i] != 0x1c && index[i] != 0x20) + pci_write_config_dword(ppdev, index[i], data[i]); + } + pci_write_config_dword(ppdev, 0x80c, d80c); + pci_write_config_dword(ppdev, 0x71c, d71c); + + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + timeout = 10000; + + writel(ctrl | 0x8, p); + while (1) { + d = readl(p + 0xc); + if ((d & 0x11) == 0x11) { + break; + } else if (!timeout) { + pr_info("pcie train failed status=0x%x\n", d); + goto out; + } + mdelay(1); + timeout--; + } + + + pr_info("pcie recovered done\n"); + + if (!retry) { + /*wait u-boot ddr config */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) { + retry = 1; + goto retrain; + } + } + } else { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + } + local_bh_disable(); + pciebreak_smp_send_stop(10000); + wmb(); /* flush all write before we update pcie window */ + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_write_config_dword(ppdev, index[i], data[i]); + + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_write_config_dword(pdev, cindex[i], cdata[i]); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after we update pcie window */ + local_bh_enable(); + + + pr_info("redraw console\n"); + + saved_console = fg_console; + switch_console(fg_console > 0?fg_console - 1 : fg_console + 1); + queue_delayed_work(priv->wq, &priv->redraw_work, HZ); +out: + priv->running = 0; +} + +irqreturn_t ls2k500sfb_interrupt(int irq, void *arg) +{ + struct ls2k500sfb_struct *priv = arg; + struct pci_dev *pdev = priv->dev; + + if (irq == pdev->irq) + pr_info("ls2k500sfb pcie interrupt\n"); + else + pr_info("ls2k500sfb gpio interrupt\n"); + if (system_state != SYSTEM_RUNNING) + return IRQ_HANDLED; + + if (!priv->running) { + if (!resetdelay || time_after(jiffies, priv->reset_time + resetdelay * HZ)) { + priv->running = 1; + queue_work(priv->wq, &priv->work); + } + priv->reset_time = jiffies; + } + return IRQ_HANDLED; +} + +#ifdef CONFIG_LOONGARCH +#define GPIO_OEN ((void *)IO_BASE+0x1fe00000+0x500) +#define GPIO_FUNCEN ((void *)IO_BASE+0x1fe00000+0x504) +#define GPIO_OUT ((void *)IO_BASE+0x1fe00000+0x508) +#define GPIO_IN ((void *)IO_BASE+0x1fe00000+0x50c) +#define GPIO_INTPOL ((void *)IO_BASE+0x1fe00000+0x510) +#define GPIO_INTEN ((void *)IO_BASE+0x1fe00000+0x514) + +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} +static int get_gpio_irq_from_acpi_table(int gpio) +{ + struct gpio_chip *chip; + struct gpio_desc *desc; + + chip = gpiochip_find("LOON0007:00", gpiochip_match_name); + if (!chip) + return -ENOENT; + desc = gpiochip_request_own_desc(chip, gpio, "reboot", GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_IN); + if (!desc) + return -ENOENT; + return gpiod_to_irq(desc); +} + +static int get_gpio_irq_from_acpi_gsi(int gpio) +{ + int gsi = 16 + (gpio & 7); + + return acpi_register_gsi(NULL, gsi, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW); +} + +static int register_gpio_reboot_handler(struct ls2k500sfb_struct *priv) +{ + int irq = get_gpio_irq_from_acpi_table(GPIO); + + if (irq < 0) { + irq = get_gpio_irq_from_acpi_gsi(GPIO); + pr_notice("gsi gpio irq %d\n", irq); + } else + pr_notice("acpi gpio irq %d\n", irq); + writel(readl(GPIO_OEN) | (0x1 << GPIO), GPIO_OEN); + writel(readl(GPIO_FUNCEN) & ~(0x1 << GPIO), GPIO_FUNCEN); + writel(readl(GPIO_INTPOL) & ~(0x1 << GPIO), GPIO_INTPOL); + writel(readl(GPIO_INTEN) | (0x1 << GPIO), GPIO_INTEN); + if (request_irq(irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_FALLING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", irq); + return 0; +} +#endif + +static const struct fb_fix_screeninfo simplefb_fix = { + .id = "simple", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, +}; + +static const struct fb_var_screeninfo simplefb_var = { + .height = -1, + .width = -1, + .activate = FB_ACTIVATE_NOW, + .vmode = FB_VMODE_NONINTERLACED, +}; + +#define PSEUDO_PALETTE_SIZE 16 +struct simplefb_par { + char *penv; + char *preg; + u32 palette[PSEUDO_PALETTE_SIZE]; +}; + +static u_long get_line_length(int xres_virtual, int bpp) +{ + u_long length; + + length = xres_virtual * bpp; + length = (length + 31) & ~31; + length >>= 3; + return length; +} + +static int simplefb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + u_long line_length; + + /* + * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! + * as FB_VMODE_SMOOTH_XPAN is only used internally + */ + + if (var->vmode & FB_VMODE_CONUPDATE) { + var->vmode |= FB_VMODE_YWRAP; + var->xoffset = info->var.xoffset; + var->yoffset = info->var.yoffset; + } + + /* + * Some very basic checks + */ + if (!var->xres) + var->xres = 1; + if (!var->yres) + var->yres = 1; + if (var->xres > var->xres_virtual) + var->xres_virtual = var->xres; + if (var->yres > var->yres_virtual) + var->yres_virtual = var->yres; + if (var->bits_per_pixel <= 16) + var->bits_per_pixel = 16; + else if (var->bits_per_pixel <= 32) + var->bits_per_pixel = 32; + else + return -EINVAL; + + if (var->xres_virtual < var->xoffset + var->xres) + var->xres_virtual = var->xoffset + var->xres; + if (var->yres_virtual < var->yoffset + var->yres) + var->yres_virtual = var->yoffset + var->yres; + + /* + * Memory limit + */ + line_length = + get_line_length(var->xres_virtual, var->bits_per_pixel); + if (line_length * var->yres_virtual > info->fix.smem_len) + return -ENOMEM; + + /* + * Now that we checked it we alter var. The reason being is that the video + * mode passed in might not work but slight changes to it might make it + * work. This way we let the user know what is acceptable. + */ + switch (var->bits_per_pixel) { + case 16: /* BGR 565 */ + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: /* BGRA 8888 */ + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + } + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + + return 0; +} + +static int simplefb_set_par(struct fb_info *info) +{ + struct simplefb_par *par = info->par; + int reg_val; + + info->fix.line_length = get_line_length(info->var.xres_virtual, + info->var.bits_per_pixel); + sprintf(par->penv, "video=%dx%d-%d@2M", + info->var.xres_virtual, + info->var.yres_virtual, + info->var.bits_per_pixel); + + reg_val = readl(par->preg); + writel(reg_val + 1, par->preg); + + return 0; +} + +static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= PSEUDO_PALETTE_SIZE) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + pal[regno] = value; + + return 0; +} + + +static void simplefb_destroy(struct fb_info *info) +{ + if (info->screen_base) + iounmap(info->screen_base); +} + +static const struct fb_ops simplefb_ops = { + .owner = THIS_MODULE, + .fb_destroy = simplefb_destroy, + .fb_setcolreg = simplefb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_check_var = simplefb_check_var, + .fb_set_par = simplefb_set_par, +}; + +static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +static int simplefb_parse_pd(struct platform_device *pdev, + struct simplefb_params *params) +{ + struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); + int i; + + params->width = pd->width; + params->height = pd->height; + params->stride = pd->stride; + + params->format = NULL; + for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { + if (strcmp(pd->format, simplefb_formats[i].name)) + continue; + + params->format = &simplefb_formats[i]; + break; + } + + if (!params->format) { + dev_err(&pdev->dev, "Invalid format value\n"); + return -EINVAL; + } + + return 0; +} + +static int simplefb_probe(struct platform_device *pdev) +{ + int ret; + struct simplefb_params params; + struct fb_info *info; + struct simplefb_par *par; + struct resource *mem, *envmem, *regmem; + + ret = simplefb_parse_pd(pdev, ¶ms); + + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + envmem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + regmem = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!mem || !envmem || !regmem) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev); + if (!info) + return -ENOMEM; + platform_set_drvdata(pdev, info); + + par = info->par; + par->penv = ioremap(envmem->start, resource_size(envmem)); + par->preg = ioremap(regmem->start, resource_size(regmem)); + + info->fix = simplefb_fix; + info->fix.smem_start = mem->start; + info->fix.smem_len = resource_size(mem); + info->fix.line_length = params.stride; + + info->var = simplefb_var; + info->var.xres = params.width; + info->var.yres = params.height; + info->var.xres_virtual = params.width; + info->var.yres_virtual = params.height; + info->var.bits_per_pixel = params.format->bits_per_pixel; + info->var.red = params.format->red; + info->var.green = params.format->green; + info->var.blue = params.format->blue; + info->var.transp = params.format->transp; + + ret = devm_aperture_acquire_for_platform_device(pdev, + info->fix.smem_start, + info->fix.smem_len); + if (ret) { + dev_info(&pdev->dev, "cannot acquire aperture\n"); + goto error_fb_release; + } + + info->fbops = &simplefb_ops; + info->flags = 0; + info->screen_base = ioremap_wc(info->fix.smem_start, + info->fix.smem_len); + if (!info->screen_base) { + ret = -ENOMEM; + goto error_fb_release; + } + info->pseudo_palette = par->palette; + + dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", + info->fix.smem_start, info->fix.smem_len, + info->screen_base); + dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n", + params.format->name, + info->var.xres, info->var.yres, + info->var.bits_per_pixel, info->fix.line_length); + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); + goto error_fb_release; + } else + dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); + + local_irq_disable(); + mscycles = get_cycles(); + mdelay(1); + mscycles = get_cycles() - mscycles; + local_irq_enable(); + + return ret; +error_fb_release: + framebuffer_release(info); + return ret; +} + +static int simplefb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + + unregister_framebuffer(info); + framebuffer_release(info); + + return 0; +} + +static struct platform_driver simplefb_driver = { + .driver = { + .name = "virt-framebuffer", + }, + .probe = simplefb_probe, + .remove = simplefb_remove, +}; + +static void *kcs_data[2] = {&event_jiffies, &mscycles}; +static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct simplefb_platform_data mode; + struct resource res[3]; + struct platform_device *pd; + struct ls2k500sfb_struct *priv; + long phybase, videooffset, videomemorysize; + char *pmode = mode_option; + int depth; + char *penv; + int ret, i; + + if (!dev->bus->number || pci_enable_device(dev)) + return -ENODEV; + priv = kzalloc(sizeof(struct ls2k500sfb_struct), GFP_KERNEL); + priv->dev = dev; + + /* pcimem bar last 16M free, 2MB offset from free for framebuffer */ + phybase = pci_resource_start(dev, 0); + phybase += pci_resource_len(dev, 0) - 0x1000000; + penv = ioremap(phybase, 0x100000); + /*env at last 16M's beginning, first env is video */ + if (!strncmp(penv, "video=", 6)) + pmode = penv + 6; + + priv->penv = penv + 6; + memcpy(priv->saved_env, priv->penv, sizeof(priv->saved_env)); + + mode.width = simple_strtoul(pmode, &pmode, 0); + pmode++; + mode.height = simple_strtoul(pmode, &pmode, 0); + pmode++; + depth = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + pmode++; + videooffset = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + switch (pmode[0]) { + case 'M': + case 'm': + videooffset *= 0x100000; + break; + case 'K': + case 'k': + videooffset *= 1024; + break; + } + } + } else + videooffset = 0x200000; + mode.stride = mode.width * depth / 8; + mode.format = depth == 32 ? "a8r8g8b8" : "r5g6b5"; + + videomemorysize = 0x400000; + + memset(res, 0, sizeof(res)); + res[0].start = phybase + videooffset; + res[0].end = phybase + videooffset + videomemorysize - 1; + res[0].flags = IORESOURCE_MEM; + res[0].parent = &dev->resource[0]; + + res[1].start = phybase; + res[1].end = phybase + 64 - 1; + res[1].flags = IORESOURCE_MEM; + res[1].parent = &dev->resource[0]; + + res[2].start = phybase + 0x00f00014; + res[2].end = phybase + 0x00f0001c - 1; + res[2].flags = IORESOURCE_MEM; + res[2].parent = &dev->resource[0]; + + priv->pd = pd = platform_device_register_resndata(NULL, "virt-framebuffer", 0, + res, 3, &mode, sizeof(mode)); + + ret = platform_driver_register(&simplefb_driver); + if (ret) + return ret; + priv->wq = create_singlethread_workqueue("ls2k500sfb wq"); + INIT_WORK(&priv->work, ls2k500sfb_events_fn); + INIT_DELAYED_WORK(&priv->redraw_work, ls2k500sfb_redraw_fn); + + ls2k500sfb_events_fn(&priv->work); + if (request_irq(dev->irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_RISING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", dev->irq); + #ifdef CONFIG_LOONGARCH + register_gpio_reboot_handler(priv); + #endif + pci_set_drvdata(dev, priv); + for (i = 0; i < 5; i++) { + res[0].start = phybase + 0x00f00000 + 0x1c*i; + res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; + platform_device_register_resndata(NULL, "ipmi_ls2k500_si", i, res, 1, + kcs_data, sizeof(kcs_data)); + } + + return PTR_ERR_OR_ZERO(pd); +} + +static void ls2k500sfb_remove(struct pci_dev *dev) +{ + struct ls2k500sfb_struct *priv = pci_get_drvdata(dev); + + platform_device_del(priv->pd); +} + +static struct pci_device_id ls2k500sfb_devices[] = { + {PCI_DEVICE(0x14, 0x1a05)}, + {0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, ls2k500sfb_devices); + +static struct pci_driver ls2k500sfb_driver = { + .name = "ls2k500sfb", + .id_table = ls2k500sfb_devices, + .probe = ls2k500sfb_probe, + .remove = ls2k500sfb_remove, + .driver = { + .name = "ls2k500sfb", + }, +}; + +static int __init ls2k500sfb_init(void) +{ + return pci_register_driver(&ls2k500sfb_driver); +} + +module_init(ls2k500sfb_init); + +#ifdef MODULE +static void __exit ls2k500sfb_exit(void) +{ + pci_unregister_driver(&ls2k500sfb_driver); +} + +module_exit(ls2k500sfb_exit); +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index f79ab13a5c28b0cd7a71d41ba8d3177914946351..b1c4efa00182e0f40a8fa199620da19fb615918a 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -54,4 +54,6 @@ source "drivers/virt/coco/sev-guest/Kconfig" source "drivers/virt/coco/tdx-guest/Kconfig" +source "drivers/virt/coco/csv-guest/Kconfig" + endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index e9aa6fc96fab7242a9963d9a834722af24e4b6a2..62681a26075878b0bef4d2513bf9f36120a4cdd2 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_ACRN_HSM) += acrn/ obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/ obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += coco/tdx-guest/ +obj-$(CONFIG_CSV_GUEST) += coco/csv-guest/ diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..f14f6766e5aefb6cec91226231d053297e0354ab --- /dev/null +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -0,0 +1,12 @@ +config CSV_GUEST + tristate "HYGON CSV Guest driver" + default m + depends on HYGON_CSV + help + CSV firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called csv-guest. diff --git a/drivers/virt/coco/csv-guest/Makefile b/drivers/virt/coco/csv-guest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a1c3a1499fc6f6e997d1629df4e1677b31f1a28d --- /dev/null +++ b/drivers/virt/coco/csv-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CSV_GUEST) += csv-guest.o diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c new file mode 100644 index 0000000000000000000000000000000000000000..6a77c68b19b4a2f7849772dcdaaca55e3baacb5f --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Userspace interface for CSV guest driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: fangbaoshun + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "csv-guest.h" + +/* Mutex to serialize the command handling. */ +static DEFINE_MUTEX(csv_cmd_mutex); + +static int csv_get_report(unsigned long arg) +{ + u8 *csv_report; + long ret; + struct csv_report_req req; + + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct csv_report_req))) + return -EFAULT; + + if (req.len < CSV_REPORT_INPUT_DATA_LEN || !req.report_data) + return -EINVAL; + + csv_report = kzalloc(req.len, GFP_KERNEL); + if (!csv_report) { + ret = -ENOMEM; + goto out; + } + + /* Save user input data */ + if (copy_from_user(csv_report, req.report_data, CSV_REPORT_INPUT_DATA_LEN)) { + ret = -EFAULT; + goto out; + } + + /* Generate CSV_REPORT using "KVM_HC_VM_ATTESTATION" VMMCALL */ + ret = kvm_hypercall2(KVM_HC_VM_ATTESTATION, __pa(csv_report), req.len); + if (ret) + goto out; + + if (copy_to_user(req.report_data, csv_report, req.len)) + ret = -EFAULT; + +out: + kfree(csv_report); + return ret; +} + +static int csv3_get_report(unsigned long arg) +{ + struct csv_report_req input; + struct page *page = NULL; + struct csv3_data_attestation_report *cmd_buff = NULL; + void *req_buff = NULL; + void *resp_buff = NULL; + int ret; + + if (copy_from_user(&input, (void __user *)arg, sizeof(input))) + return -EFAULT; + + if (!input.len || !input.report_data) + return -EINVAL; + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) + return -ENOMEM; + cmd_buff = (struct csv3_data_attestation_report *)page_address(page); + + /* + * Query the firmware to get minimum length of request buffer and + * respond buffer. + */ + ret = csv3_issue_request_report(__pa(cmd_buff), sizeof(*cmd_buff)); + + /* + * The input.len must be the maxinum length of the req and resp buffer + * at least, otherwise return with error. + */ + if (input.len < max(cmd_buff->req_len, cmd_buff->resp_len)) { + ret = -EINVAL; + goto err; + } + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) { + ret = -ENOMEM; + goto err; + } + req_buff = page_address(page); + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) { + ret = -ENOMEM; + goto err; + } + resp_buff = page_address(page); + + /* Copy user's input data */ + if (copy_from_user(req_buff, input.report_data, cmd_buff->req_len)) { + ret = -EFAULT; + goto err; + } + + /* + * The req_len and resp_len fields has already been filled by firmware + * when we query the lengths from firmware. + */ + cmd_buff->req_gpa = __pa(req_buff); + cmd_buff->resp_gpa = __pa(resp_buff); + + ret = csv3_issue_request_report(__pa(cmd_buff), sizeof(*cmd_buff)); + if (ret || (!ret && cmd_buff->fw_error_code)) { + pr_err("%s: fail to generate report, fw_error:%#x ret:%d\n", + __func__, cmd_buff->fw_error_code, ret); + ret = -EIO; + goto err; + } + + /* Copy attestation report to user */ + if (copy_to_user(input.report_data, resp_buff, cmd_buff->resp_len)) + ret = -EFAULT; + +err: + if (resp_buff) + free_page((unsigned long)resp_buff); + if (req_buff) + free_page((unsigned long)req_buff); + if (cmd_buff) + free_page((unsigned long)cmd_buff); + + return ret; +} + +static int get_report(unsigned long arg) +{ + int ret = -ENOTTY; + + lockdep_assert_held(&csv_cmd_mutex); + + if (csv3_active()) + ret = csv3_get_report(arg); + else if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + ret = csv_get_report(arg); + return ret; +} + +static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOTTY; + + mutex_lock(&csv_cmd_mutex); + + switch (cmd) { + case CSV_CMD_GET_REPORT: + ret = get_report(arg); + break; + default: + break; + } + + mutex_unlock(&csv_cmd_mutex); + + return ret; +} + +static const struct file_operations csv_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_guest_ioctl, + .compat_ioctl = csv_guest_ioctl, +}; + +static struct miscdevice csv_guest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "csv-guest", + .fops = &csv_guest_fops, + .mode = 0777, +}; + +static int __init csv_guest_init(void) +{ + // This module only working on CSV guest vm. + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return -ENODEV; + + return misc_register(&csv_guest_dev); +} + +static void __exit csv_guest_exit(void) +{ + misc_deregister(&csv_guest_dev); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("HYGON CSV Guest Driver"); +module_init(csv_guest_init); +module_exit(csv_guest_exit); diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h new file mode 100644 index 0000000000000000000000000000000000000000..337211b928db470aee1129f1b4a90e2b61955291 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Userspace interface for CSV guest driver + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __VIRT_CSVGUEST_H__ +#define __VIRT_CSVGUEST_H__ + +#include +#include + +/* Length of the user input datas used in VMMCALL */ +#define CSV_REPORT_USER_DATA_LEN 64 +#define CSV_REPORT_MNONCE_LEN 16 +#define CSV_REPORT_HASH_LEN 32 +#define CSV_REPORT_INPUT_DATA_LEN (CSV_REPORT_USER_DATA_LEN + CSV_REPORT_MNONCE_LEN \ + + CSV_REPORT_HASH_LEN) + +/** + * struct csv_report_req - Request struct for CSV_CMD_GET_REPORT IOCTL. + * + * @report_data:User buffer with REPORT_DATA to be included into CSV_REPORT, and it's also + * user buffer to store CSV_REPORT output from VMMCALL[KVM_HC_VM_ATTESTATION]. + * @len: Length of the user buffer. + */ +struct csv_report_req { + u8 *report_data; + int len; +}; + +/* + * CSV_CMD_GET_REPORT - Get CSV_REPORT using VMMCALL[KVM_HC_VM_ATTESTATION] + * + * Return 0 on success, -EIO on VMMCALL execution failure, and + * standard errno on other general error cases. + */ +#define CSV_CMD_GET_REPORT _IOWR('D', 1, struct csv_report_req) + +#endif /* __VIRT_CSVGUEST_H__ */ diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 0a53a61231c2944092d3ecac74d5caf40aaca2d3..da89d498d14eeea7874286ee8618a06f222ad655 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -117,7 +117,7 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - depends on X86_64 || ARM64 + depends on X86_64 || ARM64 || SW64 depends on VIRTIO depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 80669e05bf0ee4dfc2abebbfaa6cc29ca15336ee..e4e0829eac469ce971de0a7fb88708310dad835b 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef DEBUG @@ -251,6 +252,21 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, return (vq->indirect && total_sg > 1 && vq->vq.num_free); } +static bool vring_force_dma_api; + +#ifdef MODULE +module_param(vring_force_dma_api, bool, 0640); +#else +static int __init vring_dma_api_setup(char *str) +{ + vring_force_dma_api = true; + printk(KERN_INFO "Force vring dma api enabled\n"); + + return 0; +} +__setup("vring_force_dma_api", vring_dma_api_setup); +#endif + /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. @@ -279,6 +295,13 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, static bool vring_use_dma_api(const struct virtio_device *vdev) { + /* + * Prior to xdragon platform 20181230 release (e.g. 0930 release), we + * need this hack to get ENI hotplug to work. + */ + if (vring_force_dma_api) + return true; + if (!virtio_has_dma_quirk(vdev)) return true; diff --git a/fs/Kconfig b/fs/Kconfig index aa7e03cc1941cb3e6145d95886b99b83b677a69b..845e18c97ad50a8936ebde3d9b236b806d91ab2e 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -267,6 +267,7 @@ config HUGETLBFS config HUGETLB_PAGE def_bool HUGETLBFS + select XARRAY_MULTI config HUGETLB_PAGE_OPTIMIZE_VMEMMAP def_bool HUGETLB_PAGE @@ -325,6 +326,7 @@ source "fs/omfs/Kconfig" source "fs/hpfs/Kconfig" source "fs/qnx4/Kconfig" source "fs/qnx6/Kconfig" +source "fs/resctrl/Kconfig" source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index f9541f40be4e08fbdee72f39e8c0c7ef856fb3f6..b62375770deed116ab26f3ce7f37198b54d09c01 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_VBOXSF_FS) += vboxsf/ obj-$(CONFIG_ZONEFS_FS) += zonefs/ +obj-$(CONFIG_RESCTRL_FS) += resctrl/ diff --git a/fs/attr.c b/fs/attr.c index a8ae5f6d9b16786dec162bcdd7150b3203863769..5f0211c6f983e0d8be10757ed83f97134cf1fbf5 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -19,8 +19,6 @@ #include #include -#include "internal.h" - /** * setattr_should_drop_sgid - determine whether the setgid bit needs to be * removed diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index fb2c8d14327ae160d0ac97b49010a7e889671363..9016f46f98ab23a1fc7b89369f2311650011ac46 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1037,6 +1037,7 @@ static int load_elf_binary(struct linux_binprm *bprm) unsigned long k, vaddr; unsigned long total_size = 0; unsigned long alignment; + int exec_order = file_exec_order(); if (elf_ppnt->p_type != PT_LOAD) continue; @@ -1161,6 +1162,10 @@ static int load_elf_binary(struct linux_binprm *bprm) retval = -EINVAL; goto out_free_dentry; } + + if (exec_order > 0 && interpreter && + total_size >= (PAGE_SIZE << exec_order)) + load_bias &= ~((PAGE_SIZE << exec_order) - 1); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, diff --git a/fs/d_path.c b/fs/d_path.c index 5f4da5c8d5db3e957b1034312e9a2a257b6018c9..df50090b6a0f78b696510a2624262657520235db 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -196,6 +196,58 @@ static int prepend_path(const struct path *path, return error; } +static int prepend_path_locked(const struct path *path, + const struct path *root, + struct prepend_buffer *p) +{ + struct prepend_buffer b; + unsigned seq = 0; + int error; + + rcu_read_lock(); +restart: + b = *p; + read_seqbegin_or_lock(&rename_lock, &seq); + error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); + if (!(seq & 1)) + rcu_read_unlock(); + if (need_seqretry(&rename_lock, seq)) { + seq = 1; + goto restart; + } + done_seqretry(&rename_lock, seq); + + if (unlikely(error == 3)) + b = *p; + + if (b.len == p->len) + prepend_char(&b, '/'); + + *p = b; + return error; +} + +/* + * d_absolute_path_locked - return the absolute path of a dentry + * + * @path: path to report + * @buf: buffer to return value in + * @buflen: buffer length + * + * Write absolute pathname like d_absolute_path() except with mount_lock held. + */ +char *d_absolute_path_locked(const struct path *path, char *buf, int buflen) +{ + struct path root = {}; + DECLARE_BUFFER(b, buf, buflen); + + prepend_char(&b, 0); + if (unlikely(prepend_path_locked(path, &root, &b) > 1)) + return ERR_PTR(-EINVAL); + return extract_string(&b); +} +EXPORT_SYMBOL(d_absolute_path_locked); + /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report diff --git a/fs/dax.c b/fs/dax.c index 8c09578fa03573e1df14856367ba175e67739491..400ad547dee1c151eaf6dc5f1a468ff69be2592b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -322,23 +322,34 @@ static unsigned long dax_end_pfn(void *entry) static inline bool dax_page_is_shared(struct page *page) { - return page->mapping == PAGE_MAPPING_DAX_SHARED; + return (unsigned long)READ_ONCE(page->mapping) & PAGE_MAPPING_DAX_SHARED; } /* * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the * refcount. */ -static inline void dax_page_share_get(struct page *page) +static inline void dax_page_share_get(struct page *page, + struct address_space *mapping, pgoff_t index) { - if (page->mapping != PAGE_MAPPING_DAX_SHARED) { + struct address_space *oldmapping = READ_ONCE(page->mapping); + + if (!((unsigned long)oldmapping & PAGE_MAPPING_DAX_SHARED)) { /* * Reset the index if the page was already mapped * regularly before. */ - if (page->mapping) + if (oldmapping) page->share = 1; - page->mapping = PAGE_MAPPING_DAX_SHARED; + + if (test_bit(AS_FSDAX_NORMAP, &mapping->flags)) { + /* Note that we (ab)use page->private to keep index for now */ + WRITE_ONCE(page->private, index); + /* paired with smp_mb() in xfs_dax_notify_ddev_failure2() */ + smp_mb(); + } + WRITE_ONCE(page->mapping, + (void *)((unsigned long)mapping | PAGE_MAPPING_DAX_SHARED)); } page->share++; } @@ -367,7 +378,7 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, struct page *page = pfn_to_page(pfn); if (shared) { - dax_page_share_get(page); + dax_page_share_get(page, mapping, index); } else { WARN_ON_ONCE(page->mapping); page->mapping = mapping; @@ -1063,6 +1074,46 @@ int dax_writeback_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); +int dax_copy_range(struct block_device *bdev, struct dax_device *dax_dev, + u64 src_addr, u64 dst_addr, size_t size) +{ + const sector_t src_sector = src_addr >> SECTOR_SHIFT; + const sector_t dst_sector = dst_addr >> SECTOR_SHIFT; + pgoff_t spgoff, dpgoff; + int id, rc; + long length; + void *saddr, *daddr; + + rc = bdev_dax_pgoff(bdev, src_sector, size, &spgoff); + if (rc) + return rc; + + rc = bdev_dax_pgoff(bdev, dst_sector, size, &dpgoff); + if (rc) + return rc; + + id = dax_read_lock(); + length = dax_direct_access(dax_dev, spgoff, PHYS_PFN(size), DAX_ACCESS, + &saddr, NULL); + if (length < 0) { + rc = length; + goto out; + } + + length = dax_direct_access(dax_dev, dpgoff, PHYS_PFN(size), DAX_ACCESS, + &daddr, NULL); + if (length < 0) { + rc = length; + goto out; + } + + rc = copy_mc_to_kernel(daddr, saddr, size); +out: + dax_read_unlock(id); + return rc; +} +EXPORT_SYMBOL_GPL(dax_copy_range); + static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, pfn_t *pfnp) { diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index f6dc961e6c2bc501bab4bc44f51877c6df92119d..cf99d819b63d6c37d12e49e7f4640d9ed6753f66 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -21,7 +21,7 @@ config EROFS_FS performance under extremely memory pressure without extra cost. See the documentation at - for more details. + and the web pages at for more details. If unsure, say N. @@ -74,6 +74,23 @@ config EROFS_FS_SECURITY If you are not using a security module, say N. +config EROFS_FS_BACKED_BY_FILE + bool "File-backed EROFS filesystem support" + depends on EROFS_FS + default y + help + This allows EROFS to use filesystem image files directly, without + the intercession of loopback block devices or likewise. It is + particularly useful for container images with numerous blobs and + other sandboxes, where loop devices behave intricately. It can also + be used to simplify error-prone lifetime management of unnecessary + virtual block devices. + + Note that this feature, along with ongoing fanotify pre-content + hooks, will eventually replace "EROFS over fscache." + + If you don't want to enable this feature, say N. + config EROFS_FS_ZIP bool "EROFS Data Compression Support" depends on EROFS_FS @@ -91,13 +108,10 @@ config EROFS_FS_ZIP_LZMA select XZ_DEC_MICROLZMA help Saying Y here includes support for reading EROFS file systems - containing LZMA compressed data, specifically called microLZMA. it - gives better compression ratios than the LZ4 algorithm, at the + containing LZMA compressed data, specifically called microLZMA. It + gives better compression ratios than the default LZ4 format, at the expense of more CPU overhead. - LZMA support is an experimental feature for now and so most file - systems will be readable without selecting this option. - If unsure, say N. config EROFS_FS_ZIP_DEFLATE @@ -115,6 +129,21 @@ config EROFS_FS_ZIP_DEFLATE If unsure, say N. +config EROFS_FS_ZIP_ZSTD + bool "EROFS Zstandard compressed data support" + depends on EROFS_FS_ZIP + select ZSTD_DECOMPRESS + help + Saying Y here includes support for reading EROFS file systems + containing Zstandard compressed data. It gives better compression + ratios than the default LZ4 format, while it costs more CPU + overhead. + + Zstandard support is an experimental feature for now and so most + file systems will be readable without selecting this option. + + If unsure, say N. + config EROFS_FS_ONDEMAND bool "EROFS fscache-based on-demand read support" depends on CACHEFILES_ONDEMAND && (EROFS_FS=m && FSCACHE || EROFS_FS=y && FSCACHE=y) diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 994d0b9deddf151ddabce80dfd378c3d95ba4fc2..4331d53c7109550a0518f2ed8df456deecdd2f8c 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -1,9 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_EROFS_FS) += erofs.o -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o +erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o +erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o +erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 279933e007d21798549df035b4aa595597f225b6..7bfe251680ec028d33c4ca36f2ed43c1fa27497f 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -11,13 +11,12 @@ struct z_erofs_decompress_req { struct super_block *sb; struct page **in, **out; - unsigned short pageofs_in, pageofs_out; unsigned int inputsize, outputsize; - /* indicate the algorithm will be used for decompression */ - unsigned int alg; + unsigned int alg; /* the algorithm for decompression */ bool inplace_io, partial_decoding, fillgaps; + gfp_t gfp; /* allocation flags for extra temporary buffers */ }; struct z_erofs_decompressor { @@ -25,6 +24,8 @@ struct z_erofs_decompressor { void *data, int size); int (*decompress)(struct z_erofs_decompress_req *rq, struct page **pagepool); + int (*init)(void); + void (*exit)(void); char *name; }; @@ -53,17 +54,14 @@ struct z_erofs_decompressor { */ /* - * short-lived pages are pages directly from buddy system with specific - * page->private (no need to set PagePrivate since these are non-LRU / - * non-movable pages and bypass reclaim / migration code). + * Currently, short-lived pages are pages directly from buddy system + * with specific page->private (Z_EROFS_SHORTLIVED_PAGE). + * In the future world of Memdescs, it should be type 0 (Misc) memory + * which type can be checked with a new helper. */ static inline bool z_erofs_is_shortlived_page(struct page *page) { - if (page->private != Z_EROFS_SHORTLIVED_PAGE) - return false; - - DBG_BUGON(page->mapping); - return true; + return page->private == Z_EROFS_SHORTLIVED_PAGE; } static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, @@ -71,35 +69,32 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, { if (!z_erofs_is_shortlived_page(page)) return false; - - /* short-lived pages should not be used by others at the same time */ - if (page_ref_count(page) > 1) { - put_page(page); - } else { - /* follow the pcluster rule above. */ - erofs_pagepool_add(pagepool, page); - } + erofs_pagepool_add(pagepool, page); return true; } -#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) -static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, - struct page *page) -{ - return page->mapping == MNGD_MAPPING(sbi); -} +extern const struct z_erofs_decompressor z_erofs_lzma_decomp; +extern const struct z_erofs_decompressor z_erofs_deflate_decomp; +extern const struct z_erofs_decompressor z_erofs_zstd_decomp; +extern const struct z_erofs_decompressor *z_erofs_decomp[]; + +struct z_erofs_stream_dctx { + struct z_erofs_decompress_req *rq; + unsigned int inpages, outpages; /* # of {en,de}coded pages */ + int no, ni; /* the current {en,de}coded page # */ + unsigned int avail_out; /* remaining bytes in the decoded buffer */ + unsigned int inbuf_pos, inbuf_sz; + /* current status of the encoded buffer */ + u8 *kin, *kout; /* buffer mapped pointers */ + void *bounce; /* bounce buffer for inplace I/Os */ + bool bounced; /* is the bounce buffer used now? */ +}; + +int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, + void **src, struct page **pgpl); int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); -extern const struct z_erofs_decompressor erofs_decompressors[]; - -/* prototypes for specific algorithms */ -int z_erofs_load_lzma_config(struct super_block *sb, - struct erofs_super_block *dsb, void *data, int size); -int z_erofs_load_deflate_config(struct super_block *sb, - struct erofs_super_block *dsb, void *data, int size); -int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool); -int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool); +int __init z_erofs_init_decompressor(void); +void z_erofs_exit_decompressor(void); #endif diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 19ab9bb3a9a0e12178ba508d3517460578b74a32..a286a57ee9d9a5129414e7b50cdb72b31794d853 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -5,17 +5,15 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" -#include #include -#include #include void erofs_unmap_metabuf(struct erofs_buf *buf) { - if (buf->kmap_type == EROFS_KMAP) - kunmap_local(buf->base); + if (!buf->base) + return; + kunmap_local(buf->base); buf->base = NULL; - buf->kmap_type = EROFS_NO_KMAP; } void erofs_put_metabuf(struct erofs_buf *buf) @@ -23,45 +21,30 @@ void erofs_put_metabuf(struct erofs_buf *buf) if (!buf->page) return; erofs_unmap_metabuf(buf); - put_page(buf->page); + folio_put(page_folio(buf->page)); buf->page = NULL; } -/* - * Derive the block size from inode->i_blkbits to make compatible with - * anonymous inode in fscache mode. - */ -void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, +void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type) { - struct inode *inode = buf->inode; - erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; pgoff_t index = offset >> PAGE_SHIFT; - struct page *page = buf->page; - struct folio *folio; - unsigned int nofs_flag; + struct folio *folio = NULL; - if (!page || page->index != index) { + if (buf->page) { + folio = page_folio(buf->page); + if (folio_file_page(folio, index) != buf->page) + erofs_unmap_metabuf(buf); + } + if (!folio || !folio_contains(folio, index)) { erofs_put_metabuf(buf); - - nofs_flag = memalloc_nofs_save(); - folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); - memalloc_nofs_restore(nofs_flag); + folio = read_mapping_folio(buf->mapping, index, buf->file); if (IS_ERR(folio)) return folio; - - /* should already be PageUptodate, no need to lock page */ - page = folio_file_page(folio, index); - buf->page = page; - } - if (buf->kmap_type == EROFS_NO_KMAP) { - if (type == EROFS_KMAP) - buf->base = kmap_local_page(page); - buf->kmap_type = type; - } else if (buf->kmap_type != type) { - DBG_BUGON(1); - return ERR_PTR(-EFAULT); } + buf->page = folio_file_page(folio, index); + if (!buf->base && type == EROFS_KMAP) + buf->base = kmap_local_page(buf->page); if (type == EROFS_NO_KMAP) return NULL; return buf->base + (offset & ~PAGE_MASK); @@ -69,54 +52,50 @@ void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) { - if (erofs_is_fscache_mode(sb)) - buf->inode = EROFS_SB(sb)->s_fscache->inode; + struct erofs_sb_info *sbi = EROFS_SB(sb); + + buf->file = NULL; + if (erofs_is_fileio_mode(sbi)) { + buf->file = sbi->fdev; /* some fs like FUSE needs it */ + buf->mapping = buf->file->f_mapping; + } else if (erofs_is_fscache_mode(sb)) + buf->mapping = sbi->s_fscache->inode->i_mapping; else - buf->inode = sb->s_bdev->bd_inode; + buf->mapping = sb->s_bdev->bd_inode->i_mapping; } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_blk_t blkaddr, enum erofs_kmap_type type) + erofs_off_t offset, enum erofs_kmap_type type) { erofs_init_metabuf(buf, sb); - return erofs_bread(buf, blkaddr, type); + return erofs_bread(buf, offset, type); } static int erofs_map_blocks_flatmode(struct inode *inode, struct erofs_map_blocks *map) { - erofs_blk_t nblocks, lastblk; - u64 offset = map->m_la; struct erofs_inode *vi = EROFS_I(inode); struct super_block *sb = inode->i_sb; bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); + erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking; - nblocks = erofs_iblks(inode); - lastblk = nblocks - tailendpacking; - - /* there is no hole in flatmode */ - map->m_flags = EROFS_MAP_MAPPED; - if (offset < erofs_pos(sb, lastblk)) { + map->m_flags = EROFS_MAP_MAPPED; /* no hole in flat inodes */ + if (map->m_la < erofs_pos(sb, lastblk)) { map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; - map->m_plen = erofs_pos(sb, lastblk) - offset; - } else if (tailendpacking) { + map->m_plen = erofs_pos(sb, lastblk) - map->m_la; + } else { + DBG_BUGON(!tailendpacking); map->m_pa = erofs_iloc(inode) + vi->inode_isize + - vi->xattr_isize + erofs_blkoff(sb, offset); - map->m_plen = inode->i_size - offset; + vi->xattr_isize + erofs_blkoff(sb, map->m_la); + map->m_plen = inode->i_size - map->m_la; /* inline data should be located in the same meta block */ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { - erofs_err(sb, "inline data cross block boundary @ nid %llu", - vi->nid); + erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } map->m_flags |= EROFS_MAP_META; - } else { - erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx", - vi->nid, inode->i_size, map->m_la); - DBG_BUGON(1); - return -EIO; } return 0; } @@ -138,7 +117,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) if (map->m_la >= inode->i_size) { /* leave out-of-bound access unmapped */ map->m_flags = 0; - map->m_plen = 0; + map->m_plen = map->m_llen; goto out; } @@ -156,7 +135,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, unit) + unit * chunknr; - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out; @@ -167,7 +146,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) /* handle block map */ if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { - __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos); + __le32 *blkaddr = kaddr; if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { map->m_flags = 0; @@ -178,7 +157,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) goto out_unlock; } /* parse chunk indexes */ - idx = kaddr + erofs_blkoff(sb, pos); + idx = kaddr; switch (le32_to_cpu(idx->blkaddr)) { case EROFS_NULL_ADDR: map->m_flags = 0; @@ -199,16 +178,33 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) return err; } +static void erofs_fill_from_devinfo(struct super_block *sb, + struct erofs_map_dev *map, + struct erofs_device_info *dif) +{ + map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; + map->m_fp = dif->file; + if (!map->m_bdev && !map->m_fp) { + erofs_err(sb, "invalid device handle and file for path %s", dif->path); + DBG_BUGON(1); + } + map->m_daxdev = dif->dax_dev; + map->m_dax_part_off = dif->dax_part_off; + map->m_fscache = dif->fscache; +} + int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) { struct erofs_dev_context *devs = EROFS_SB(sb)->devs; struct erofs_device_info *dif; + erofs_off_t startoff, length; int id; map->m_bdev = sb->s_bdev; map->m_daxdev = EROFS_SB(sb)->dax_dev; map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; map->m_fscache = EROFS_SB(sb)->s_fscache; + map->m_fp = EROFS_SB(sb)->fdev; if (map->m_deviceid) { down_read(&devs->rwsem); @@ -222,29 +218,20 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) up_read(&devs->rwsem); return 0; } - map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; - map->m_daxdev = dif->dax_dev; - map->m_dax_part_off = dif->dax_part_off; - map->m_fscache = dif->fscache; + erofs_fill_from_devinfo(sb, map, dif); up_read(&devs->rwsem); } else if (devs->extra_devices && !devs->flatdev) { down_read(&devs->rwsem); idr_for_each_entry(&devs->tree, dif, id) { - erofs_off_t startoff, length; - if (!dif->mapped_blkaddr) continue; + startoff = erofs_pos(sb, dif->mapped_blkaddr); length = erofs_pos(sb, dif->blocks); - if (map->m_pa >= startoff && map->m_pa < startoff + length) { map->m_pa -= startoff; - map->m_bdev = dif->bdev_handle ? - dif->bdev_handle->bdev : NULL; - map->m_daxdev = dif->dax_dev; - map->m_dax_part_off = dif->dax_part_off; - map->m_fscache = dif->fscache; + erofs_fill_from_devinfo(sb, map, dif); break; } } @@ -253,6 +240,42 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) return 0; } +/* + * bit 30: I/O error occurred on this folio + * bit 0 - 29: remaining parts to complete this folio + */ +#define EROFS_ONLINEFOLIO_EIO (1 << 30) + +void erofs_onlinefolio_init(struct folio *folio) +{ + union { + atomic_t o; + void *v; + } u = { .o = ATOMIC_INIT(1) }; + + folio->private = u.v; /* valid only if file-backed folio is locked */ +} + +void erofs_onlinefolio_split(struct folio *folio) +{ + atomic_inc((atomic_t *)&folio->private); +} + +void erofs_onlinefolio_end(struct folio *folio, int err) +{ + int orig, v; + + do { + orig = atomic_read((atomic_t *)&folio->private); + v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0); + } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); + + if (v & ~EROFS_ONLINEFOLIO_EIO) + return; + folio->private = 0; + folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO)); +} + static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { @@ -298,11 +321,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, struct erofs_buf buf = __EROFS_BUF_INITIALIZER; iomap->type = IOMAP_INLINE; - ptr = erofs_read_metabuf(&buf, sb, - erofs_blknr(sb, mdev.m_pa), EROFS_KMAP); + ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP); if (IS_ERR(ptr)) return PTR_ERR(ptr); - iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); + iomap->inline_data = ptr; iomap->private = buf.base; } else { iomap->type = IOMAP_MAPPED; @@ -322,7 +344,6 @@ static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, struct erofs_buf buf = { .page = kmap_to_page(ptr), .base = ptr, - .kmap_type = EROFS_KMAP, }; DBG_BUGON(iomap->type != IOMAP_INLINE); @@ -383,27 +404,14 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (IS_DAX(inode)) return dax_iomap_rw(iocb, to, &erofs_iomap_ops); #endif - if (iocb->ki_flags & IOCB_DIRECT) { - struct block_device *bdev = inode->i_sb->s_bdev; - unsigned int blksize_mask; - - if (bdev) - blksize_mask = bdev_logical_block_size(bdev) - 1; - else - blksize_mask = i_blocksize(inode) - 1; - - if ((iocb->ki_pos | iov_iter_count(to) | - iov_iter_alignment(to)) & blksize_mask) - return -EINVAL; - + if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) return iomap_dio_rw(iocb, to, &erofs_iomap_ops, NULL, 0, NULL, 0); - } return filemap_read(iocb, to, 0); } /* for uncompressed (aligned) files and raw access for other files */ -const struct address_space_operations erofs_raw_access_aops = { +const struct address_space_operations erofs_aops = { .read_folio = erofs_read_folio, .readahead = erofs_readahead, .bmap = erofs_bmap, @@ -445,8 +453,32 @@ static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) #define erofs_file_mmap generic_file_readonly_mmap #endif +static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file->f_mapping->host; + const struct iomap_ops *ops = &erofs_iomap_ops; + + if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) +#ifdef CONFIG_EROFS_FS_ZIP + ops = &z_erofs_iomap_report_ops; +#else + return generic_file_llseek(file, offset, whence); +#endif + + if (whence == SEEK_HOLE) + offset = iomap_seek_hole(inode, offset, ops); + else if (whence == SEEK_DATA) + offset = iomap_seek_data(inode, offset, ops); + else + return generic_file_llseek(file, offset, whence); + + if (offset < 0) + return offset; + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); +} + const struct file_operations erofs_file_fops = { - .llseek = generic_file_llseek, + .llseek = erofs_file_llseek, .read_iter = erofs_file_read_iter, .mmap = erofs_file_mmap, .get_unmapped_area = thp_get_unmapped_area, diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index aa59788a61e6e4530c264b2baaa637ad8ed7e1d4..3e76908b8ea7d1ace560b946a21f51f34de40bbd 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -2,9 +2,9 @@ /* * Copyright (C) 2019 HUAWEI, Inc. * https://www.huawei.com/ + * Copyright (C) 2024 Alibaba Cloud */ #include "compress.h" -#include #include #ifndef LZ4_DISTANCE_MAX /* history window size */ @@ -55,7 +55,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb, sbi->lz4.max_distance_pages = distance ? DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : LZ4_MAX_DISTANCE_PAGES; - return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); + return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks); } /* @@ -110,10 +110,10 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, if (top) { victim = availables[--top]; - get_page(victim); } else { - victim = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + victim = __erofs_allocpage(pagepool, rq->gfp, true); + if (!victim) + return -ENOMEM; set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); } rq->out[i] = victim; @@ -159,7 +159,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, docopy: /* Or copy compressed data which can be overlapped to per-CPU buffer */ in = rq->in; - src = erofs_get_pcpubuf(ctx->inpages); + src = z_erofs_get_gbuf(ctx->inpages); if (!src) { DBG_BUGON(1); kunmap_local(inpage); @@ -260,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, } else if (maptype == 1) { vm_unmap_ram(src, ctx->inpages); } else if (maptype == 2) { - erofs_put_pcpubuf(src); + z_erofs_put_gbuf(src); } else if (maptype != 3) { DBG_BUGON(1); return -EFAULT; @@ -315,73 +315,169 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, struct page **pagepool) { - const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - const unsigned int outpages = + const unsigned int nrpages_in = + PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; + const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = min_t(unsigned int, rq->outputsize, - PAGE_SIZE - rq->pageofs_out); - const unsigned int lefthalf = rq->outputsize - righthalf; - const unsigned int interlaced_offset = - rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; - u8 *src; - - if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { - DBG_BUGON(1); - return -EFSCORRUPTED; + const unsigned int bs = rq->sb->s_blocksize; + unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; + u8 *kin; + + if (rq->outputsize > rq->inputsize) + return -EOPNOTSUPP; + if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { + cur = bs - (rq->pageofs_out & (bs - 1)); + pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; + cur = min(cur, rq->outputsize); + if (cur && rq->out[0]) { + kin = kmap_local_page(rq->in[nrpages_in - 1]); + if (rq->out[0] == rq->in[nrpages_in - 1]) { + memmove(kin + rq->pageofs_out, kin + pi, cur); + flush_dcache_page(rq->out[0]); + } else { + memcpy_to_page(rq->out[0], rq->pageofs_out, + kin + pi, cur); + } + kunmap_local(kin); + } + rq->outputsize -= cur; } - if (rq->out[0] == *rq->in) { - DBG_BUGON(rq->pageofs_out); - return 0; + for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) { + insz = min_t(unsigned int, PAGE_SIZE - rq->pageofs_in, rq->outputsize); + rq->outputsize -= insz; + if (!rq->in[ni]) + continue; + kin = kmap_local_page(rq->in[ni]); + pi = 0; + do { + no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT; + po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; + DBG_BUGON(no >= nrpages_out); + cnt = min_t(unsigned int, insz - pi, PAGE_SIZE - po); + if (rq->out[no] == rq->in[ni]) { + memmove(kin + po, + kin + rq->pageofs_in + pi, cnt); + flush_dcache_page(rq->out[no]); + } else if (rq->out[no]) { + memcpy_to_page(rq->out[no], po, + kin + rq->pageofs_in + pi, cnt); + } + pi += cnt; + } while (pi < insz); + kunmap_local(kin); } + DBG_BUGON(ni > nrpages_in); + return 0; +} - src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; - if (rq->out[0]) - memcpy_to_page(rq->out[0], rq->pageofs_out, - src + interlaced_offset, righthalf); - - if (outpages > inpages) { - DBG_BUGON(!rq->out[outpages - 1]); - if (rq->out[outpages - 1] != rq->in[inpages - 1]) { - memcpy_to_page(rq->out[outpages - 1], 0, src + - (interlaced_offset ? 0 : righthalf), - lefthalf); - } else if (!interlaced_offset) { - memmove(src, src + righthalf, lefthalf); - flush_dcache_page(rq->in[inpages - 1]); +int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, + void **src, struct page **pgpl) +{ + struct z_erofs_decompress_req *rq = dctx->rq; + struct super_block *sb = rq->sb; + struct page **pgo, *tmppage; + unsigned int j; + + if (!dctx->avail_out) { + if (++dctx->no >= dctx->outpages || !rq->outputsize) { + erofs_err(sb, "insufficient space for decompressed data"); + return -EFSCORRUPTED; } + + if (dctx->kout) + kunmap_local(dctx->kout); + dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out); + rq->outputsize -= dctx->avail_out; + pgo = &rq->out[dctx->no]; + if (!*pgo && rq->fillgaps) { /* deduped */ + *pgo = erofs_allocpage(pgpl, rq->gfp); + if (!*pgo) { + dctx->kout = NULL; + return -ENOMEM; + } + set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE); + } + if (*pgo) { + dctx->kout = kmap_local_page(*pgo); + *dst = dctx->kout + rq->pageofs_out; + } else { + *dst = dctx->kout = NULL; + } + rq->pageofs_out = 0; + } + + if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) { + if (++dctx->ni >= dctx->inpages) { + erofs_err(sb, "invalid compressed data"); + return -EFSCORRUPTED; + } + if (dctx->kout) /* unlike kmap(), take care of the orders */ + kunmap_local(dctx->kout); + kunmap_local(dctx->kin); + + dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE); + rq->inputsize -= dctx->inbuf_sz; + dctx->kin = kmap_local_page(rq->in[dctx->ni]); + *src = dctx->kin; + dctx->bounced = false; + if (dctx->kout) { + j = (u8 *)*dst - dctx->kout; + dctx->kout = kmap_local_page(rq->out[dctx->no]); + *dst = dctx->kout + j; + } + dctx->inbuf_pos = 0; + } + + /* + * Handle overlapping: Use the given bounce buffer if the input data is + * under processing; Or utilize short-lived pages from the on-stack page + * pool, where pages are shared among the same request. Note that only + * a few inplace I/O pages need to be doubled. + */ + if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) { + memcpy(dctx->bounce, *src, dctx->inbuf_sz); + *src = dctx->bounce; + dctx->bounced = true; + } + + for (j = dctx->ni + 1; j < dctx->inpages; ++j) { + if (rq->out[dctx->no] != rq->in[j]) + continue; + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) + return -ENOMEM; + set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); + copy_highpage(tmppage, rq->in[j]); + rq->in[j] = tmppage; } - kunmap_local(src); return 0; } -const struct z_erofs_decompressor erofs_decompressors[] = { - [Z_EROFS_COMPRESSION_SHIFTED] = { +const struct z_erofs_decompressor *z_erofs_decomp[] = { + [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) { .decompress = z_erofs_transform_plain, .name = "shifted" }, - [Z_EROFS_COMPRESSION_INTERLACED] = { + [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) { .decompress = z_erofs_transform_plain, .name = "interlaced" }, - [Z_EROFS_COMPRESSION_LZ4] = { + [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) { .config = z_erofs_load_lz4_config, .decompress = z_erofs_lz4_decompress, + .init = z_erofs_gbuf_init, + .exit = z_erofs_gbuf_exit, .name = "lz4" }, #ifdef CONFIG_EROFS_FS_ZIP_LZMA - [Z_EROFS_COMPRESSION_LZMA] = { - .config = z_erofs_load_lzma_config, - .decompress = z_erofs_lzma_decompress, - .name = "lzma" - }, + [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp, #endif #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE - [Z_EROFS_COMPRESSION_DEFLATE] = { - .config = z_erofs_load_deflate_config, - .decompress = z_erofs_deflate_decompress, - .name = "deflate" - }, + [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp, +#endif +#ifdef CONFIG_EROFS_FS_ZIP_ZSTD + [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp, #endif }; @@ -409,6 +505,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) offset = EROFS_SUPER_OFFSET + sbi->sb_size; alg = 0; for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { + const struct z_erofs_decompressor *dec = z_erofs_decomp[alg]; void *data; if (!(algs & 1)) @@ -420,16 +517,13 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) break; } - if (alg >= ARRAY_SIZE(erofs_decompressors) || - !erofs_decompressors[alg].config) { + if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) { + ret = dec->config(sb, dsb, data, size); + } else { erofs_err(sb, "algorithm %d isn't enabled on this kernel", alg); ret = -EOPNOTSUPP; - } else { - ret = erofs_decompressors[alg].config(sb, - dsb, data, size); } - kfree(data); if (ret) break; @@ -437,3 +531,28 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) erofs_put_metabuf(&buf); return ret; } + +int __init z_erofs_init_decompressor(void) +{ + int i, err; + + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { + err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; + if (err) { + while (i--) + if (z_erofs_decomp[i]) + z_erofs_decomp[i]->exit(); + return err; + } + } + return 0; +} + +void z_erofs_exit_decompressor(void) +{ + int i; + + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) + if (z_erofs_decomp[i]) + z_erofs_decomp[i]->exit(); +} diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index aac2c837ef350bf549c62bee02747b34ac08e690..5070d2fcc737043e1abccc8f0b3ec2efd71b8cb8 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include "compress.h" @@ -16,7 +15,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq); module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444); -void z_erofs_deflate_exit(void) +static void z_erofs_deflate_exit(void) { /* there should be no running fs instance */ while (z_erofs_deflate_avail_strms) { @@ -42,7 +41,7 @@ void z_erofs_deflate_exit(void) } } -int __init z_erofs_deflate_init(void) +static int __init z_erofs_deflate_init(void) { /* by default, use # of possible CPUs instead */ if (!z_erofs_deflate_nstrms) @@ -50,7 +49,7 @@ int __init z_erofs_deflate_init(void) return 0; } -int z_erofs_load_deflate_config(struct super_block *sb, +static int z_erofs_load_deflate_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size) { struct z_erofs_deflate_cfgs *dfl = data; @@ -98,27 +97,26 @@ int z_erofs_load_deflate_config(struct super_block *sb, return -ENOMEM; } -int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) +static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int nrpages_in = - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; struct super_block *sb = rq->sb; - unsigned int insz, outsz, pofs; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; struct z_erofs_deflate *strm; - u8 *kin, *kout = NULL; - bool bounced = false; - int no = -1, ni = 0, j = 0, zerr, err; + int zerr, err; /* 1. get the exact DEFLATE compressed size */ - kin = kmap_local_page(*rq->in); - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, - min_t(unsigned int, rq->inputsize, - sb->s_blocksize - rq->pageofs_in)); + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); if (err) { - kunmap_local(kin); + kunmap_local(dctx.kin); return err; } @@ -135,98 +133,35 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, spin_unlock(&z_erofs_deflate_lock); /* 3. multi-call decompress */ - insz = rq->inputsize; - outsz = rq->outputsize; zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS); if (zerr != Z_OK) { err = -EIO; goto failed_zinit; } - pofs = rq->pageofs_out; - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); - insz -= strm->z.avail_in; - strm->z.next_in = kin + rq->pageofs_in; + rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */ + strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= strm->z.avail_in; + strm->z.next_in = dctx.kin + rq->pageofs_in; strm->z.avail_out = 0; + dctx.bounce = strm->bounce; while (1) { - if (!strm->z.avail_out) { - if (++no >= nrpages_out || !outsz) { - erofs_err(sb, "insufficient space for decompressed data"); - err = -EFSCORRUPTED; - break; - } - - if (kout) - kunmap_local(kout); - strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); - outsz -= strm->z.avail_out; - if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); - set_page_private(rq->out[no], - Z_EROFS_SHORTLIVED_PAGE); - } - kout = kmap_local_page(rq->out[no]); - strm->z.next_out = kout + pofs; - pofs = 0; - } - - if (!strm->z.avail_in && insz) { - if (++ni >= nrpages_in) { - erofs_err(sb, "invalid compressed data"); - err = -EFSCORRUPTED; - break; - } - - if (kout) { /* unlike kmap(), take care of the orders */ - j = strm->z.next_out - kout; - kunmap_local(kout); - } - kunmap_local(kin); - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE); - insz -= strm->z.avail_in; - kin = kmap_local_page(rq->in[ni]); - strm->z.next_in = kin; - bounced = false; - if (kout) { - kout = kmap_local_page(rq->out[no]); - strm->z.next_out = kout + j; - } - } - - /* - * Handle overlapping: Use bounced buffer if the compressed - * data is under processing; Or use short-lived pages from the - * on-stack pagepool where pages share among the same request - * and not _all_ inplace I/O pages are needed to be doubled. - */ - if (!bounced && rq->out[no] == rq->in[ni]) { - memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in); - strm->z.next_in = strm->bounce; - bounced = true; - } - - for (j = ni + 1; j < nrpages_in; ++j) { - struct page *tmppage; - - if (rq->out[no] != rq->in[j]) - continue; - - DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb), - rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); - copy_highpage(tmppage, rq->in[j]); - rq->in[j] = tmppage; - } + dctx.avail_out = strm->z.avail_out; + dctx.inbuf_sz = strm->z.avail_in; + err = z_erofs_stream_switch_bufs(&dctx, + (void **)&strm->z.next_out, + (void **)&strm->z.next_in, pgpl); + if (err) + break; + strm->z.avail_out = dctx.avail_out; + strm->z.avail_in = dctx.inbuf_sz; zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH); - if (zerr != Z_OK || !(outsz + strm->z.avail_out)) { + if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) { if (zerr == Z_OK && rq->partial_decoding) break; - if (zerr == Z_STREAM_END && !outsz) + if (zerr == Z_STREAM_END && !rq->outputsize) break; erofs_err(sb, "failed to decompress %d in[%u] out[%u]", zerr, rq->inputsize, rq->outputsize); @@ -234,13 +169,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, break; } } - if (zlib_inflateEnd(&strm->z) != Z_OK && !err) err = -EIO; - if (kout) - kunmap_local(kout); + if (dctx.kout) + kunmap_local(dctx.kout); failed_zinit: - kunmap_local(kin); + kunmap_local(dctx.kin); /* 4. push back DEFLATE stream context to the global list */ spin_lock(&z_erofs_deflate_lock); strm->next = z_erofs_deflate_head; @@ -249,3 +183,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, wake_up(&z_erofs_deflate_wq); return err; } + +const struct z_erofs_decompressor z_erofs_deflate_decomp = { + .config = z_erofs_load_deflate_config, + .decompress = z_erofs_deflate_decompress, + .init = z_erofs_deflate_init, + .exit = z_erofs_deflate_exit, + .name = "deflate", +}; diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index ba4ec73f4aaec8e8b82bf89c537159269353d8e8..40666815046f2b7a6f8f5b7e6afd9e089ed73d3c 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -1,12 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include -#include #include "compress.h" struct z_erofs_lzma { struct z_erofs_lzma *next; struct xz_dec_microlzma *state; - struct xz_buf buf; u8 bounce[PAGE_SIZE]; }; @@ -19,7 +17,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq); module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444); -void z_erofs_lzma_exit(void) +static void z_erofs_lzma_exit(void) { /* there should be no running fs instance */ while (z_erofs_lzma_avail_strms) { @@ -47,7 +45,7 @@ void z_erofs_lzma_exit(void) } } -int __init z_erofs_lzma_init(void) +static int __init z_erofs_lzma_init(void) { unsigned int i; @@ -71,7 +69,7 @@ int __init z_erofs_lzma_init(void) return 0; } -int z_erofs_load_lzma_config(struct super_block *sb, +static int z_erofs_load_lzma_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size) { static DEFINE_MUTEX(lzma_resize_mutex); @@ -96,8 +94,6 @@ int z_erofs_load_lzma_config(struct super_block *sb, return -EINVAL; } - erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!"); - /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */ mutex_lock(&lzma_resize_mutex); @@ -150,26 +146,28 @@ int z_erofs_load_lzma_config(struct super_block *sb, return err; } -int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) +static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int nrpages_in = - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - unsigned int inlen, outlen, pageofs; + struct super_block *sb = rq->sb; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; + struct xz_buf buf = {}; struct z_erofs_lzma *strm; - u8 *kin; - bool bounced = false; - int no, ni, j, err = 0; + enum xz_ret xz_err; + int err; /* 1. get the exact LZMA compressed size */ - kin = kmap(*rq->in); - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, - min_t(unsigned int, rq->inputsize, - rq->sb->s_blocksize - rq->pageofs_in)); + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); if (err) { - kunmap(*rq->in); + kunmap_local(dctx.kin); return err; } @@ -186,104 +184,45 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, spin_unlock(&z_erofs_lzma_lock); /* 3. multi-call decompress */ - inlen = rq->inputsize; - outlen = rq->outputsize; - xz_dec_microlzma_reset(strm->state, inlen, outlen, + xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize, !rq->partial_decoding); - pageofs = rq->pageofs_out; - strm->buf.in = kin + rq->pageofs_in; - strm->buf.in_pos = 0; - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in); - inlen -= strm->buf.in_size; - strm->buf.out = NULL; - strm->buf.out_pos = 0; - strm->buf.out_size = 0; - - for (ni = 0, no = -1;;) { - enum xz_ret xz_err; - - if (strm->buf.out_pos == strm->buf.out_size) { - if (strm->buf.out) { - kunmap(rq->out[no]); - strm->buf.out = NULL; - } - - if (++no >= nrpages_out || !outlen) { - erofs_err(rq->sb, "decompressed buf out of bound"); - err = -EFSCORRUPTED; - break; - } - strm->buf.out_pos = 0; - strm->buf.out_size = min_t(u32, outlen, - PAGE_SIZE - pageofs); - outlen -= strm->buf.out_size; - if (!rq->out[no] && rq->fillgaps) { /* deduped */ - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); - set_page_private(rq->out[no], - Z_EROFS_SHORTLIVED_PAGE); - } - if (rq->out[no]) - strm->buf.out = kmap(rq->out[no]) + pageofs; - pageofs = 0; - } else if (strm->buf.in_pos == strm->buf.in_size) { - kunmap(rq->in[ni]); - - if (++ni >= nrpages_in || !inlen) { - erofs_err(rq->sb, "compressed buf out of bound"); - err = -EFSCORRUPTED; - break; - } - strm->buf.in_pos = 0; - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE); - inlen -= strm->buf.in_size; - kin = kmap(rq->in[ni]); - strm->buf.in = kin; - bounced = false; - } + buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= buf.in_size; + buf.in = dctx.kin + rq->pageofs_in; + dctx.bounce = strm->bounce; + do { + dctx.avail_out = buf.out_size - buf.out_pos; + dctx.inbuf_sz = buf.in_size; + dctx.inbuf_pos = buf.in_pos; + err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out, + (void **)&buf.in, pgpl); + if (err) + break; - /* - * Handle overlapping: Use bounced buffer if the compressed - * data is under processing; Otherwise, Use short-lived pages - * from the on-stack pagepool where pages share with the same - * request. - */ - if (!bounced && rq->out[no] == rq->in[ni]) { - memcpy(strm->bounce, strm->buf.in, strm->buf.in_size); - strm->buf.in = strm->bounce; - bounced = true; + if (buf.out_size == buf.out_pos) { + buf.out_size = dctx.avail_out; + buf.out_pos = 0; } - for (j = ni + 1; j < nrpages_in; ++j) { - struct page *tmppage; + buf.in_size = dctx.inbuf_sz; + buf.in_pos = dctx.inbuf_pos; - if (rq->out[no] != rq->in[j]) - continue; - - DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb), - rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); - copy_highpage(tmppage, rq->in[j]); - rq->in[j] = tmppage; - } - xz_err = xz_dec_microlzma_run(strm->state, &strm->buf); - DBG_BUGON(strm->buf.out_pos > strm->buf.out_size); - DBG_BUGON(strm->buf.in_pos > strm->buf.in_size); + xz_err = xz_dec_microlzma_run(strm->state, &buf); + DBG_BUGON(buf.out_pos > buf.out_size); + DBG_BUGON(buf.in_pos > buf.in_size); if (xz_err != XZ_OK) { - if (xz_err == XZ_STREAM_END && !outlen) + if (xz_err == XZ_STREAM_END && !rq->outputsize) break; - erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]", + erofs_err(sb, "failed to decompress %d in[%u] out[%u]", xz_err, rq->inputsize, rq->outputsize); err = -EFSCORRUPTED; break; } - } - if (no < nrpages_out && strm->buf.out) - kunmap(rq->out[no]); - if (ni < nrpages_in) - kunmap(rq->in[ni]); + } while (1); + + if (dctx.kout) + kunmap_local(dctx.kout); + kunmap_local(dctx.kin); /* 4. push back LZMA stream context to the global list */ spin_lock(&z_erofs_lzma_lock); strm->next = z_erofs_lzma_head; @@ -292,3 +231,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, wake_up(&z_erofs_lzma_wq); return err; } + +const struct z_erofs_decompressor z_erofs_lzma_decomp = { + .config = z_erofs_load_lzma_config, + .decompress = z_erofs_lzma_decompress, + .init = z_erofs_lzma_init, + .exit = z_erofs_lzma_exit, + .name = "lzma" +}; diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c new file mode 100644 index 0000000000000000000000000000000000000000..7e177304967e19ecd1ab009c701200165f9d9246 --- /dev/null +++ b/fs/erofs/decompressor_zstd.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include "compress.h" + +struct z_erofs_zstd { + struct z_erofs_zstd *next; + u8 bounce[PAGE_SIZE]; + void *wksp; + unsigned int wkspsz; +}; + +static DEFINE_SPINLOCK(z_erofs_zstd_lock); +static unsigned int z_erofs_zstd_max_dictsize; +static unsigned int z_erofs_zstd_nstrms, z_erofs_zstd_avail_strms; +static struct z_erofs_zstd *z_erofs_zstd_head; +static DECLARE_WAIT_QUEUE_HEAD(z_erofs_zstd_wq); + +module_param_named(zstd_streams, z_erofs_zstd_nstrms, uint, 0444); + +static struct z_erofs_zstd *z_erofs_isolate_strms(bool all) +{ + struct z_erofs_zstd *strm; + +again: + spin_lock(&z_erofs_zstd_lock); + strm = z_erofs_zstd_head; + if (!strm) { + spin_unlock(&z_erofs_zstd_lock); + wait_event(z_erofs_zstd_wq, READ_ONCE(z_erofs_zstd_head)); + goto again; + } + z_erofs_zstd_head = all ? NULL : strm->next; + spin_unlock(&z_erofs_zstd_lock); + return strm; +} + +static void z_erofs_zstd_exit(void) +{ + while (z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *strm, *n; + + for (strm = z_erofs_isolate_strms(true); strm; strm = n) { + n = strm->next; + + kvfree(strm->wksp); + kfree(strm); + --z_erofs_zstd_avail_strms; + } + } +} + +static int __init z_erofs_zstd_init(void) +{ + /* by default, use # of possible CPUs instead */ + if (!z_erofs_zstd_nstrms) + z_erofs_zstd_nstrms = num_possible_cpus(); + + for (; z_erofs_zstd_avail_strms < z_erofs_zstd_nstrms; + ++z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *strm; + + strm = kzalloc(sizeof(*strm), GFP_KERNEL); + if (!strm) { + z_erofs_zstd_exit(); + return -ENOMEM; + } + spin_lock(&z_erofs_zstd_lock); + strm->next = z_erofs_zstd_head; + z_erofs_zstd_head = strm; + spin_unlock(&z_erofs_zstd_lock); + } + return 0; +} + +static int z_erofs_load_zstd_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size) +{ + static DEFINE_MUTEX(zstd_resize_mutex); + struct z_erofs_zstd_cfgs *zstd = data; + unsigned int dict_size, wkspsz; + struct z_erofs_zstd *strm, *head = NULL; + void *wksp; + + if (!zstd || size < sizeof(struct z_erofs_zstd_cfgs) || zstd->format) { + erofs_err(sb, "unsupported zstd format, size=%u", size); + return -EINVAL; + } + + if (zstd->windowlog > ilog2(Z_EROFS_ZSTD_MAX_DICT_SIZE) - 10) { + erofs_err(sb, "unsupported zstd window log %u", zstd->windowlog); + return -EINVAL; + } + dict_size = 1U << (zstd->windowlog + 10); + + /* in case 2 z_erofs_load_zstd_config() race to avoid deadlock */ + mutex_lock(&zstd_resize_mutex); + if (z_erofs_zstd_max_dictsize >= dict_size) { + mutex_unlock(&zstd_resize_mutex); + return 0; + } + + /* 1. collect/isolate all streams for the following check */ + while (z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *n; + + for (strm = z_erofs_isolate_strms(true); strm; strm = n) { + n = strm->next; + strm->next = head; + head = strm; + --z_erofs_zstd_avail_strms; + } + } + + /* 2. walk each isolated stream and grow max dict_size if needed */ + wkspsz = zstd_dstream_workspace_bound(dict_size); + for (strm = head; strm; strm = strm->next) { + wksp = kvmalloc(wkspsz, GFP_KERNEL); + if (!wksp) + break; + kvfree(strm->wksp); + strm->wksp = wksp; + strm->wkspsz = wkspsz; + } + + /* 3. push back all to the global list and update max dict_size */ + spin_lock(&z_erofs_zstd_lock); + DBG_BUGON(z_erofs_zstd_head); + z_erofs_zstd_head = head; + spin_unlock(&z_erofs_zstd_lock); + z_erofs_zstd_avail_strms = z_erofs_zstd_nstrms; + wake_up_all(&z_erofs_zstd_wq); + if (!strm) + z_erofs_zstd_max_dictsize = dict_size; + mutex_unlock(&zstd_resize_mutex); + return strm ? -ENOMEM : 0; +} + +static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) +{ + struct super_block *sb = rq->sb; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; + zstd_in_buffer in_buf = { NULL, 0, 0 }; + zstd_out_buffer out_buf = { NULL, 0, 0 }; + struct z_erofs_zstd *strm; + zstd_dstream *stream; + int zerr, err; + + /* 1. get the exact compressed size */ + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); + if (err) { + kunmap_local(dctx.kin); + return err; + } + + /* 2. get an available ZSTD context */ + strm = z_erofs_isolate_strms(false); + + /* 3. multi-call decompress */ + stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz); + if (!stream) { + err = -EIO; + goto failed_zinit; + } + + rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */ + in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= in_buf.size; + in_buf.src = dctx.kin + rq->pageofs_in; + dctx.bounce = strm->bounce; + + do { + dctx.avail_out = out_buf.size - out_buf.pos; + dctx.inbuf_sz = in_buf.size; + dctx.inbuf_pos = in_buf.pos; + err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst, + (void **)&in_buf.src, pgpl); + if (err) + break; + + if (out_buf.size == out_buf.pos) { + out_buf.size = dctx.avail_out; + out_buf.pos = 0; + } + in_buf.size = dctx.inbuf_sz; + in_buf.pos = dctx.inbuf_pos; + + zerr = zstd_decompress_stream(stream, &out_buf, &in_buf); + if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) { + erofs_err(sb, "failed to decompress in[%u] out[%u]: %s", + rq->inputsize, rq->outputsize, + zerr ? zstd_get_error_name(zerr) : "unexpected end of stream"); + err = -EFSCORRUPTED; + break; + } + } while (rq->outputsize || out_buf.pos < out_buf.size); + + if (dctx.kout) + kunmap_local(dctx.kout); +failed_zinit: + kunmap_local(dctx.kin); + /* 4. push back ZSTD stream context to the global list */ + spin_lock(&z_erofs_zstd_lock); + strm->next = z_erofs_zstd_head; + z_erofs_zstd_head = strm; + spin_unlock(&z_erofs_zstd_lock); + wake_up(&z_erofs_zstd_wq); + return err; +} + +const struct z_erofs_decompressor z_erofs_zstd_decomp = { + .config = z_erofs_load_zstd_config, + .decompress = z_erofs_zstd_decompress, + .init = z_erofs_zstd_init, + .exit = z_erofs_zstd_exit, + .name = "zstd", +}; diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index b80abec0531aadc10d0e0ba9a6a4db94d81061d5..c3b90abdee37af537514449a6b25baae314e04eb 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -8,19 +8,15 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, void *dentry_blk, struct erofs_dirent *de, - unsigned int nameoff, unsigned int maxsize) + unsigned int nameoff0, unsigned int maxsize) { - const struct erofs_dirent *end = dentry_blk + nameoff; + const struct erofs_dirent *end = dentry_blk + nameoff0; while (de < end) { - const char *de_name; + unsigned char d_type = fs_ftype_to_dtype(de->file_type); + unsigned int nameoff = le16_to_cpu(de->nameoff); + const char *de_name = (char *)dentry_blk + nameoff; unsigned int de_namelen; - unsigned char d_type; - - d_type = fs_ftype_to_dtype(de->file_type); - - nameoff = le16_to_cpu(de->nameoff); - de_name = (char *)dentry_blk + nameoff; /* the last dirent in the block? */ if (de + 1 >= end) @@ -52,21 +48,20 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct super_block *sb = dir->i_sb; unsigned long bsz = sb->s_blocksize; - const size_t dirsize = i_size_read(dir); - unsigned int i = erofs_blknr(sb, ctx->pos); unsigned int ofs = erofs_blkoff(sb, ctx->pos); int err = 0; bool initial = true; - buf.inode = dir; - while (ctx->pos < dirsize) { + buf.mapping = dir->i_mapping; + while (ctx->pos < dir->i_size) { + erofs_off_t dbstart = ctx->pos - ofs; struct erofs_dirent *de; unsigned int nameoff, maxsize; - de = erofs_bread(&buf, i, EROFS_KMAP); + de = erofs_bread(&buf, dbstart, EROFS_KMAP); if (IS_ERR(de)) { erofs_err(sb, "fail to readdir of logical block %u of nid %llu", - i, EROFS_I(dir)->nid); + erofs_blknr(sb, dbstart), EROFS_I(dir)->nid); err = PTR_ERR(de); break; } @@ -79,25 +74,19 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) break; } - maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz); - + maxsize = min_t(unsigned int, dir->i_size - dbstart, bsz); /* search dirents at the arbitrary position */ if (initial) { initial = false; - ofs = roundup(ofs, sizeof(struct erofs_dirent)); - ctx->pos = erofs_pos(sb, i) + ofs; - if (ofs >= nameoff) - goto skip_this; + ctx->pos = dbstart + ofs; } err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs, nameoff, maxsize); if (err) break; -skip_this: - ctx->pos = erofs_pos(sb, i) + maxsize; - ++i; + ctx->pos = dbstart + maxsize; ofs = 0; } erofs_put_metabuf(&buf); diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index a03ec70ba6f29d14ec93d52645b7ff706ea51867..c8f2ae845bd29e84a5bf2739f49971370f8db15a 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -288,14 +288,18 @@ struct erofs_dirent { #define EROFS_NAME_LEN 255 -/* maximum supported size of a physical compression cluster */ +/* maximum supported encoded size of a physical compressed cluster */ #define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024) +/* maximum supported decoded size of a physical compressed cluster */ +#define Z_EROFS_PCLUSTER_MAX_DSIZE (12 * 1024 * 1024) + /* available compression algorithm types (for h_algorithmtype) */ enum { Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_LZMA = 1, Z_EROFS_COMPRESSION_DEFLATE = 2, + Z_EROFS_COMPRESSION_ZSTD = 3, Z_EROFS_COMPRESSION_MAX }; #define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1) @@ -322,6 +326,15 @@ struct z_erofs_deflate_cfgs { u8 reserved[5]; } __packed; +/* 6 bytes (+ length field = 8 bytes) */ +struct z_erofs_zstd_cfgs { + u8 format; + u8 windowlog; /* windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN(10) */ + u8 reserved[4]; +} __packed; + +#define Z_EROFS_ZSTD_MAX_DICT_SIZE Z_EROFS_PCLUSTER_MAX_SIZE + /* * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * e.g. for 4k logical cluster size, 4B if compacted 2B is off; @@ -396,8 +409,7 @@ enum { Z_EROFS_LCLUSTER_TYPE_MAX }; -#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2 -#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0 +#define Z_EROFS_LI_LCLUSTER_TYPE_MASK (Z_EROFS_LCLUSTER_TYPE_MAX - 1) /* (noncompact only, HEAD) This pcluster refers to partial decompressed data */ #define Z_EROFS_LI_PARTIAL_REF (1 << 15) @@ -451,8 +463,6 @@ static inline void erofs_check_ondisk_layout_definitions(void) sizeof(struct z_erofs_lcluster_index)); BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128); - BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) < - Z_EROFS_LCLUSTER_TYPE_MAX - 1); /* exclude old compiler versions like gcc 7.5.0 */ BUILD_BUG_ON(__builtin_constant_p(fmh) ? fmh != cpu_to_le64(1ULL << 63) : 0); diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c new file mode 100644 index 0000000000000000000000000000000000000000..3af96b1e2c2aa8b11308532c02acf66229b54e55 --- /dev/null +++ b/fs/erofs/fileio.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024, Alibaba Cloud + */ +#include "internal.h" +#include + +struct erofs_fileio_rq { + struct bio_vec bvecs[BIO_MAX_VECS]; + struct bio bio; + struct kiocb iocb; +}; + +struct erofs_fileio { + struct erofs_map_blocks map; + struct erofs_map_dev dev; + struct erofs_fileio_rq *rq; +}; + +static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) +{ + struct erofs_fileio_rq *rq = + container_of(iocb, struct erofs_fileio_rq, iocb); + struct folio_iter fi; + + if (ret > 0) { + if (ret != rq->bio.bi_iter.bi_size) { + bio_advance(&rq->bio, ret); + zero_fill_bio(&rq->bio); + } + ret = 0; + } + if (rq->bio.bi_end_io) { + rq->bio.bi_end_io(&rq->bio); + } else { + bio_for_each_folio_all(fi, &rq->bio) { + DBG_BUGON(folio_test_uptodate(fi.folio)); + erofs_onlinefolio_end(fi.folio, ret); + } + } + bio_uninit(&rq->bio); + kfree(rq); +} + +static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq) +{ + struct iov_iter iter; + int ret; + + if (!rq) + return; + rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT; + rq->iocb.ki_ioprio = get_current_ioprio(); + rq->iocb.ki_complete = erofs_fileio_ki_complete; + rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ? + IOCB_DIRECT : 0; + iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt, + rq->bio.bi_iter.bi_size); + ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter); + if (ret != -EIOCBQUEUED) + erofs_fileio_ki_complete(&rq->iocb, ret); +} + +static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev) +{ + struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq), + GFP_KERNEL | __GFP_NOFAIL); + + bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ); + rq->iocb.ki_filp = mdev->m_fp; + return rq; +} + +struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) +{ + return &erofs_fileio_rq_alloc(mdev)->bio; +} + +void erofs_fileio_submit_bio(struct bio *bio) +{ + return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq, + bio)); +} + +static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) +{ + struct inode *inode = folio_inode(folio); + struct erofs_map_blocks *map = &io->map; + unsigned int cur = 0, end = folio_size(folio), len, attached = 0; + loff_t pos = folio_pos(folio), ofs; + struct iov_iter iter; + struct bio_vec bv; + int err = 0; + + erofs_onlinefolio_init(folio); + while (cur < end) { + if (!in_range(pos + cur, map->m_la, map->m_llen)) { + map->m_la = pos + cur; + map->m_llen = end - cur; + err = erofs_map_blocks(inode, map); + if (err) + break; + } + + ofs = folio_pos(folio) + cur - map->m_la; + len = min_t(loff_t, map->m_llen - ofs, end - cur); + if (map->m_flags & EROFS_MAP_META) { + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + void *src; + + src = erofs_read_metabuf(&buf, inode->i_sb, + map->m_pa + ofs, EROFS_KMAP); + if (IS_ERR(src)) { + err = PTR_ERR(src); + break; + } + bvec_set_folio(&bv, folio, len, cur); + iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len); + if (copy_to_iter(src, len, &iter) != len) { + erofs_put_metabuf(&buf); + err = -EIO; + break; + } + erofs_put_metabuf(&buf); + } else if (!(map->m_flags & EROFS_MAP_MAPPED)) { + folio_zero_segment(folio, cur, cur + len); + attached = 0; + } else { + if (io->rq && (map->m_pa + ofs != io->dev.m_pa || + map->m_deviceid != io->dev.m_deviceid)) { +io_retry: + erofs_fileio_rq_submit(io->rq); + io->rq = NULL; + } + + if (!io->rq) { + io->dev = (struct erofs_map_dev) { + .m_pa = io->map.m_pa + ofs, + .m_deviceid = io->map.m_deviceid, + }; + err = erofs_map_dev(inode->i_sb, &io->dev); + if (err) + break; + io->rq = erofs_fileio_rq_alloc(&io->dev); + io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9; + attached = 0; + } + if (!attached++) + erofs_onlinefolio_split(folio); + if (!bio_add_folio(&io->rq->bio, folio, len, cur)) + goto io_retry; + io->dev.m_pa += len; + } + cur += len; + } + erofs_onlinefolio_end(folio, err); + return err; +} + +static int erofs_fileio_read_folio(struct file *file, struct folio *folio) +{ + struct erofs_fileio io = {}; + int err; + + trace_erofs_read_folio(folio, true); + err = erofs_fileio_scan_folio(&io, folio); + erofs_fileio_rq_submit(io.rq); + return err; +} + +static void erofs_fileio_readahead(struct readahead_control *rac) +{ + struct inode *inode = rac->mapping->host; + struct erofs_fileio io = {}; + struct folio *folio; + int err; + + trace_erofs_readpages(inode, readahead_index(rac), + readahead_count(rac), true); + while ((folio = readahead_folio(rac))) { + err = erofs_fileio_scan_folio(&io, folio); + if (err && err != -EINTR) + erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", + folio->index, EROFS_I(inode)->nid); + } + erofs_fileio_rq_submit(io.rq); +} + +const struct address_space_operations erofs_fileio_aops = { + .read_folio = erofs_fileio_read_folio, + .readahead = erofs_fileio_readahead, +}; diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index afc37c9029ce78b9fc3d5e5663d80090968731de..fda16eedafb57858adeea0f15c9736044ba8239f 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -25,9 +25,15 @@ static struct file_system_type erofs_anon_fs_type = { .kill_sb = kill_anon_super, }; -struct erofs_fscache_request { - struct erofs_fscache_request *primary; - struct netfs_cache_resources cache_resources; +struct erofs_fscache_io { + struct netfs_cache_resources cres; + struct iov_iter iter; + netfs_io_terminated_t end_io; + void *private; + refcount_t ref; +}; + +struct erofs_fscache_rq { struct address_space *mapping; /* The mapping being accessed */ loff_t start; /* Start position */ size_t len; /* Length of the request */ @@ -36,44 +42,17 @@ struct erofs_fscache_request { refcount_t ref; }; -static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping, - loff_t start, size_t len) +static bool erofs_fscache_io_put(struct erofs_fscache_io *io) { - struct erofs_fscache_request *req; - - req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL); - if (!req) - return ERR_PTR(-ENOMEM); - - req->mapping = mapping; - req->start = start; - req->len = len; - refcount_set(&req->ref, 1); - - return req; -} - -static struct erofs_fscache_request *erofs_fscache_req_chain(struct erofs_fscache_request *primary, - size_t len) -{ - struct erofs_fscache_request *req; - - /* use primary request for the first submission */ - if (!primary->submitted) { - refcount_inc(&primary->ref); - return primary; - } - - req = erofs_fscache_req_alloc(primary->mapping, - primary->start + primary->submitted, len); - if (!IS_ERR(req)) { - req->primary = primary; - refcount_inc(&primary->ref); - } - return req; + if (!refcount_dec_and_test(&io->ref)) + return false; + if (io->cres.ops) + io->cres.ops->end_operation(&io->cres); + kfree(io); + return true; } -static void erofs_fscache_req_complete(struct erofs_fscache_request *req) +static void erofs_fscache_req_complete(struct erofs_fscache_rq *req) { struct folio *folio; bool failed = req->error; @@ -93,120 +72,196 @@ static void erofs_fscache_req_complete(struct erofs_fscache_request *req) rcu_read_unlock(); } -static void erofs_fscache_req_put(struct erofs_fscache_request *req) +static void erofs_fscache_req_put(struct erofs_fscache_rq *req) { - if (refcount_dec_and_test(&req->ref)) { - if (req->cache_resources.ops) - req->cache_resources.ops->end_operation(&req->cache_resources); - if (!req->primary) - erofs_fscache_req_complete(req); - else - erofs_fscache_req_put(req->primary); - kfree(req); - } + if (!refcount_dec_and_test(&req->ref)) + return; + erofs_fscache_req_complete(req); + kfree(req); +} + +static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping, + loff_t start, size_t len) +{ + struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL); + + if (!req) + return NULL; + req->mapping = mapping; + req->start = start; + req->len = len; + refcount_set(&req->ref, 1); + return req; +} + +static void erofs_fscache_req_io_put(struct erofs_fscache_io *io) +{ + struct erofs_fscache_rq *req = io->private; + + if (erofs_fscache_io_put(io)) + erofs_fscache_req_put(req); } -static void erofs_fscache_subreq_complete(void *priv, +static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error, bool was_async) { - struct erofs_fscache_request *req = priv; + struct erofs_fscache_io *io = priv; + struct erofs_fscache_rq *req = io->private; - if (IS_ERR_VALUE(transferred_or_error)) { - if (req->primary) - req->primary->error = transferred_or_error; - else - req->error = transferred_or_error; - } - erofs_fscache_req_put(req); + if (IS_ERR_VALUE(transferred_or_error)) + req->error = transferred_or_error; + erofs_fscache_req_io_put(io); +} + +static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req) +{ + struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL); + + if (!io) + return NULL; + io->end_io = erofs_fscache_req_end_io; + io->private = req; + refcount_inc(&req->ref); + refcount_set(&io->ref, 1); + return io; } /* - * Read data from fscache (cookie, pstart, len), and fill the read data into - * page cache described by (req->mapping, lstart, len). @pstart describeis the - * start physical address in the cache file. + * Read data from fscache described by cookie at pstart physical address + * offset, and fill the read data into buffer described by io->iter. */ -static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, - struct erofs_fscache_request *req, loff_t pstart, size_t len) +static int erofs_fscache_read_io_async(struct fscache_cookie *cookie, + loff_t pstart, struct erofs_fscache_io *io) { enum netfs_io_source source; - struct super_block *sb = req->mapping->host->i_sb; - struct netfs_cache_resources *cres = &req->cache_resources; - struct iov_iter iter; - loff_t lstart = req->start + req->submitted; - size_t done = 0; + struct netfs_cache_resources *cres = &io->cres; + struct iov_iter *iter = &io->iter; int ret; - DBG_BUGON(len > req->len - req->submitted); - ret = fscache_begin_read_operation(cres, cookie); if (ret) return ret; - while (done < len) { - loff_t sstart = pstart + done; - size_t slen = len - done; + while (iov_iter_count(iter)) { + size_t orig_count = iov_iter_count(iter), len = orig_count; unsigned long flags = 1 << NETFS_SREQ_ONDEMAND; source = cres->ops->prepare_ondemand_read(cres, - sstart, &slen, LLONG_MAX, &flags, 0); - if (WARN_ON(slen == 0)) + pstart, &len, LLONG_MAX, &flags, 0); + if (WARN_ON(len == 0)) source = NETFS_INVALID_READ; if (source != NETFS_READ_FROM_CACHE) { - erofs_err(sb, "failed to fscache prepare_read (source %d)", source); + erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source); return -EIO; } - refcount_inc(&req->ref); - iov_iter_xarray(&iter, ITER_DEST, &req->mapping->i_pages, - lstart + done, slen); - - ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL, - erofs_fscache_subreq_complete, req); + iov_iter_truncate(iter, len); + refcount_inc(&io->ref); + ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL, + io->end_io, io); if (ret == -EIOCBQUEUED) ret = 0; if (ret) { - erofs_err(sb, "failed to fscache_read (ret %d)", ret); + erofs_err(NULL, "fscache_read failed (ret %d)", ret); return ret; } + if (WARN_ON(iov_iter_count(iter))) + return -EIO; - done += slen; + iov_iter_reexpand(iter, orig_count - len); + pstart += len; } - DBG_BUGON(done != len); return 0; } -static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) +struct erofs_fscache_bio { + struct erofs_fscache_io io; + struct bio bio; /* w/o bdev to share bio_add_page/endio() */ + struct bio_vec bvecs[BIO_MAX_VECS]; +}; + +static void erofs_fscache_bio_endio(void *priv, + ssize_t transferred_or_error, bool was_async) +{ + struct erofs_fscache_bio *io = priv; + + if (IS_ERR_VALUE(transferred_or_error)) + io->bio.bi_status = errno_to_blk_status(transferred_or_error); + io->bio.bi_end_io(&io->bio); + BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0); + erofs_fscache_io_put(&io->io); +} + +struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { + struct erofs_fscache_bio *io; + + io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL); + bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ); + io->io.private = mdev->m_fscache->cookie; + io->io.end_io = erofs_fscache_bio_endio; + refcount_set(&io->io.ref, 1); + return &io->bio; +} + +void erofs_fscache_submit_bio(struct bio *bio) +{ + struct erofs_fscache_bio *io = container_of(bio, + struct erofs_fscache_bio, bio); int ret; - struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private; - struct erofs_fscache_request *req; - req = erofs_fscache_req_alloc(folio_mapping(folio), + iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt, + bio->bi_iter.bi_size); + ret = erofs_fscache_read_io_async(io->io.private, + bio->bi_iter.bi_sector << 9, &io->io); + erofs_fscache_io_put(&io->io); + if (!ret) + return; + bio->bi_status = errno_to_blk_status(ret); + bio->bi_end_io(bio); +} + +static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) +{ + struct erofs_fscache *ctx = folio->mapping->host->i_private; + int ret = -ENOMEM; + struct erofs_fscache_rq *req; + struct erofs_fscache_io *io; + + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); - if (IS_ERR(req)) { + if (!req) { folio_unlock(folio); - return PTR_ERR(req); + return ret; } - ret = erofs_fscache_read_folios_async(ctx->cookie, req, - folio_pos(folio), folio_size(folio)); + io = erofs_fscache_req_io_alloc(req); + if (!io) { + req->error = ret; + goto out; + } + iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages, + folio_pos(folio), folio_size(folio)); + + ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io); if (ret) req->error = ret; + erofs_fscache_req_io_put(io); +out: erofs_fscache_req_put(req); return ret; } -static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) +static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) { - struct address_space *mapping = primary->mapping; + struct address_space *mapping = req->mapping; struct inode *inode = mapping->host; struct super_block *sb = inode->i_sb; - struct erofs_fscache_request *req; + struct erofs_fscache_io *io; struct erofs_map_blocks map; struct erofs_map_dev mdev; - struct iov_iter iter; - loff_t pos = primary->start + primary->submitted; + loff_t pos = req->start + req->submitted; size_t count; int ret; @@ -217,35 +272,32 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) if (map.m_flags & EROFS_MAP_META) { struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - erofs_blk_t blknr; - size_t offset, size; + struct iov_iter iter; + size_t size = map.m_llen; void *src; - /* For tail packing layout, the offset may be non-zero. */ - offset = erofs_blkoff(sb, map.m_pa); - blknr = erofs_blknr(sb, map.m_pa); - size = map.m_llen; - - src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP); + src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP); if (IS_ERR(src)) return PTR_ERR(src); iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE); - if (copy_to_iter(src + offset, size, &iter) != size) { + if (copy_to_iter(src, size, &iter) != size) { erofs_put_metabuf(&buf); return -EFAULT; } iov_iter_zero(PAGE_SIZE - size, &iter); erofs_put_metabuf(&buf); - primary->submitted += PAGE_SIZE; + req->submitted += PAGE_SIZE; return 0; } - count = primary->len - primary->submitted; + count = req->len - req->submitted; if (!(map.m_flags & EROFS_MAP_MAPPED)) { + struct iov_iter iter; + iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count); iov_iter_zero(count, &iter); - primary->submitted += count; + req->submitted += count; return 0; } @@ -260,18 +312,19 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) if (ret) return ret; - req = erofs_fscache_req_chain(primary, count); - if (IS_ERR(req)) - return PTR_ERR(req); + io = erofs_fscache_req_io_alloc(req); + if (!io) + return -ENOMEM; + iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count); + ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie, + mdev.m_pa + (pos - map.m_la), io); + erofs_fscache_req_io_put(io); - ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie, - req, mdev.m_pa + (pos - map.m_la), count); - erofs_fscache_req_put(req); - primary->submitted += count; + req->submitted += count; return ret; } -static int erofs_fscache_data_read(struct erofs_fscache_request *req) +static int erofs_fscache_data_read(struct erofs_fscache_rq *req) { int ret; @@ -280,20 +333,19 @@ static int erofs_fscache_data_read(struct erofs_fscache_request *req) if (ret) req->error = ret; } while (!ret && req->submitted < req->len); - return ret; } static int erofs_fscache_read_folio(struct file *file, struct folio *folio) { - struct erofs_fscache_request *req; + struct erofs_fscache_rq *req; int ret; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); - if (IS_ERR(req)) { + if (!req) { folio_unlock(folio); - return PTR_ERR(req); + return -ENOMEM; } ret = erofs_fscache_data_read(req); @@ -303,14 +355,14 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) static void erofs_fscache_readahead(struct readahead_control *rac) { - struct erofs_fscache_request *req; + struct erofs_fscache_rq *req; if (!readahead_count(rac)) return; req = erofs_fscache_req_alloc(rac->mapping, readahead_pos(rac), readahead_length(rac)); - if (IS_ERR(req)) + if (!req) return; /* The request completion will drop refs on the folios. */ @@ -473,7 +525,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &erofs_fscache_meta_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); inode->i_blkbits = EROFS_SB(sb)->blkszbits; inode->i_private = ctx; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 9e40bee3682f7d6a17a60d13875e0b4f80100f41..c8ef6ebf92080621da14ace412f97e1cdf4d6e4e 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -5,39 +5,54 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "xattr.h" - #include -static void *erofs_read_inode(struct erofs_buf *buf, - struct inode *inode, unsigned int *ofs) +static int erofs_fill_symlink(struct inode *inode, void *kaddr, + unsigned int m_pofs) +{ + struct erofs_inode *vi = EROFS_I(inode); + loff_t off; + + m_pofs += vi->xattr_isize; + /* check if it cannot be handled with fast symlink scheme */ + if (vi->datalayout != EROFS_INODE_FLAT_INLINE || + check_add_overflow(m_pofs, inode->i_size, &off) || + off > i_blocksize(inode)) + return 0; + + inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL); + return inode->i_link ? 0 : -ENOMEM; +} + +static int erofs_read_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); const erofs_off_t inode_loc = erofs_iloc(inode); - erofs_blk_t blkaddr, nblks = 0; void *kaddr; struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; - unsigned int ifmt; - int err; + union erofs_inode_i_u iu; + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + unsigned int ifmt, ofs; + int err = 0; blkaddr = erofs_blknr(sb, inode_loc); - *ofs = erofs_blkoff(sb, inode_loc); + ofs = erofs_blkoff(sb, inode_loc); - kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", vi->nid, PTR_ERR(kaddr)); - return kaddr; + return PTR_ERR(kaddr); } - dic = kaddr + *ofs; + dic = kaddr + ofs; ifmt = le16_to_cpu(dic->i_format); - if (ifmt & ~EROFS_I_ALL) { - erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -45,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { - erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", + erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -55,119 +70,105 @@ static void *erofs_read_inode(struct erofs_buf *buf, case EROFS_INODE_LAYOUT_EXTENDED: vi->inode_isize = sizeof(struct erofs_inode_extended); /* check if the extended inode acrosses block boundary */ - if (*ofs + vi->inode_isize <= sb->s_blocksize) { - *ofs += vi->inode_isize; + if (ofs + vi->inode_isize <= sb->s_blocksize) { + ofs += vi->inode_isize; die = (struct erofs_inode_extended *)dic; } else { - const unsigned int gotten = sb->s_blocksize - *ofs; + const unsigned int gotten = sb->s_blocksize - ofs; - copied = kmalloc(vi->inode_isize, GFP_NOFS); + copied = kmalloc(vi->inode_isize, GFP_KERNEL); if (!copied) { err = -ENOMEM; goto err_out; } memcpy(copied, dic, gotten); - kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1, + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", vi->nid, PTR_ERR(kaddr)); kfree(copied); - return kaddr; + return PTR_ERR(kaddr); } - *ofs = vi->inode_isize - gotten; - memcpy((u8 *)copied + gotten, kaddr, *ofs); + ofs = vi->inode_isize - gotten; + memcpy((u8 *)copied + gotten, kaddr, ofs); die = copied; } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(die->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = die->i_u; i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - - /* extended inode has its own timestamp */ + /* each extended inode has its own timestamp */ inode_set_ctime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); - - /* total blocks for compressed files */ - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(die->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - /* fill chunked inode summary info */ - vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); - copied = NULL; break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); - *ofs += vi->inode_isize; + ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(dic->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time for compact inodes */ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec); inode->i_size = le32_to_cpu(dic->i_size); - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(dic->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: - erofs_err(inode->i_sb, - "unsupported on-disk inode version %u of nid %llu", + erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } - if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + if (unlikely(inode->i_size < 0)) { + erofs_err(sb, "negative i_size @ nid %llu", vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + if(S_ISLNK(inode->i_mode)) { + err = erofs_fill_symlink(inode, kaddr, ofs); + if (err) + goto err_out; + } + break; + case S_IFCHR: + case S_IFBLK: + inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev)); + break; + case S_IFIFO: + case S_IFSOCK: + inode->i_rdev = 0; + break; + default: + erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, + vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + + /* total blocks for compressed files */ + if (erofs_inode_is_data_compressed(vi->datalayout)) { + nblks = le32_to_cpu(iu.compressed_blocks); + } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(iu.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { - erofs_err(inode->i_sb, - "unsupported chunk format %x of nid %llu", + erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -188,61 +189,23 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; else inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); - return kaddr; - -bogusimode: - erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", - inode->i_mode, vi->nid); - err = -EFSCORRUPTED; err_out: - DBG_BUGON(1); - kfree(copied); - erofs_put_metabuf(buf); - return ERR_PTR(err); -} - -static int erofs_fill_symlink(struct inode *inode, void *kaddr, - unsigned int m_pofs) -{ - struct erofs_inode *vi = EROFS_I(inode); - loff_t off; - char *lnk; - - m_pofs += vi->xattr_isize; - /* check if it cannot be handled with fast symlink scheme */ - if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || - check_add_overflow(m_pofs, inode->i_size, &off) || - off > i_blocksize(inode)) { - inode->i_op = &erofs_symlink_iops; - return 0; - } - - lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); - if (!lnk) - return -ENOMEM; - - memcpy(lnk, kaddr + m_pofs, inode->i_size); - lnk[inode->i_size] = '\0'; - - inode->i_link = lnk; - inode->i_op = &erofs_fast_symlink_iops; - return 0; + DBG_BUGON(err); + erofs_put_metabuf(&buf); + return err; } static int erofs_fill_inode(struct inode *inode) { struct erofs_inode *vi = EROFS_I(inode); - struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - void *kaddr; - unsigned int ofs; - int err = 0; + int err; trace_erofs_fill_inode(inode); /* read inode base data from disk */ - kaddr = erofs_read_inode(&buf, inode, &ofs); - if (IS_ERR(kaddr)) - return PTR_ERR(kaddr); + err = erofs_read_inode(inode); + if (err) + return err; /* setup the new inode */ switch (inode->i_mode & S_IFMT) { @@ -259,9 +222,10 @@ static int erofs_fill_inode(struct inode *inode) inode_nohighmem(inode); break; case S_IFLNK: - err = erofs_fill_symlink(inode, kaddr, ofs); - if (err) - goto out_unlock; + if (inode->i_link) + inode->i_op = &erofs_fast_symlink_iops; + else + inode->i_op = &erofs_symlink_iops; inode_nohighmem(inode); break; case S_IFCHR: @@ -270,33 +234,33 @@ static int erofs_fill_inode(struct inode *inode) case S_IFSOCK: inode->i_op = &erofs_generic_iops; init_special_inode(inode, inode->i_mode, inode->i_rdev); - goto out_unlock; + return 0; default: - err = -EFSCORRUPTED; - goto out_unlock; + return -EFSCORRUPTED; } + mapping_set_large_folios(inode->i_mapping); if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP - if (!erofs_is_fscache_mode(inode->i_sb) && - inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { - inode->i_mapping->a_ops = &z_erofs_aops; - err = 0; - goto out_unlock; - } -#endif + DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, + erofs_info, inode->i_sb, + "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); + inode->i_mapping->a_ops = &z_erofs_aops; +#else err = -EOPNOTSUPP; - goto out_unlock; - } - inode->i_mapping->a_ops = &erofs_raw_access_aops; - mapping_set_large_folios(inode->i_mapping); +#endif + } else { + inode->i_mapping->a_ops = &erofs_aops; #ifdef CONFIG_EROFS_FS_ONDEMAND - if (erofs_is_fscache_mode(inode->i_sb)) - inode->i_mapping->a_ops = &erofs_fscache_access_aops; + if (erofs_is_fscache_mode(inode->i_sb)) + inode->i_mapping->a_ops = &erofs_fscache_access_aops; +#endif +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE + if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) + inode->i_mapping->a_ops = &erofs_fileio_aops; #endif + } -out_unlock: - erofs_put_metabuf(&buf); return err; } @@ -353,14 +317,29 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, unsigned int query_flags) { struct inode *const inode = d_inode(path->dentry); + struct block_device *bdev = inode->i_sb->s_bdev; + bool compressed = + erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout); - if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) + if (compressed) stat->attributes |= STATX_ATTR_COMPRESSED; - stat->attributes |= STATX_ATTR_IMMUTABLE; stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_IMMUTABLE); + /* + * Return the DIO alignment restrictions if requested. + * + * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode + * and uncompressed inodes, otherwise we report no DIO support. + */ + if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { + stat->result_mask |= STATX_DIOALIGN; + if (bdev && !compressed) { + stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; + stat->dio_offset_align = bdev_logical_block_size(bdev); + } + } generic_fillattr(idmap, request_mask, inode, stat); return 0; } diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 787cc9ff902944d17ccd967b855fb5164d8efe7f..12ee38400aa21ed2d34b43b3047ebfda33626a7d 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -8,8 +8,10 @@ #define __EROFS_INTERNAL_H #include +#include #include #include +#include #include #include #include @@ -18,18 +20,12 @@ #include #include "erofs_fs.h" -/* redefine pr_fmt "erofs: " */ -#undef pr_fmt -#define pr_fmt(fmt) "erofs: " fmt - -__printf(3, 4) void _erofs_err(struct super_block *sb, - const char *function, const char *fmt, ...); +__printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...); #define erofs_err(sb, fmt, ...) \ - _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__) -__printf(3, 4) void _erofs_info(struct super_block *sb, - const char *function, const char *fmt, ...); + _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__) #define erofs_info(sb, fmt, ...) \ - _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__) + _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__) + #ifdef CONFIG_EROFS_FS_DEBUG #define DBG_BUGON BUG_ON #else @@ -48,6 +44,7 @@ struct erofs_device_info { char *path; struct erofs_fscache *fscache; struct bdev_handle *bdev_handle; + struct file *file; struct dax_device *dax_dev; u64 dax_part_off; @@ -62,15 +59,12 @@ enum { }; struct erofs_mount_opts { -#ifdef CONFIG_EROFS_FS_ZIP /* current strategy of how to use managed cache */ unsigned char cache_strategy; /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ unsigned int sync_decompress; - /* threshold for decompression synchronously */ unsigned int max_sync_decompress_pages; -#endif unsigned int mount_opt; }; @@ -131,6 +125,7 @@ struct erofs_sb_info { struct erofs_sb_lz4_info lz4; #endif /* CONFIG_EROFS_FS_ZIP */ + struct file *fdev; struct inode *packed_inode; struct erofs_dev_context *devs; struct dax_device *dax_dev; @@ -191,9 +186,15 @@ struct erofs_sb_info { #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option) #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option) +static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi) +{ + return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev; +} + static inline bool erofs_is_fscache_mode(struct super_block *sb) { - return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !sb->s_bdev; + return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && + !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev; } enum { @@ -214,16 +215,14 @@ enum erofs_kmap_type { }; struct erofs_buf { - struct inode *inode; + struct address_space *mapping; + struct file *file; struct page *page; void *base; - enum erofs_kmap_type kmap_type; }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -#define ROOT_NID(sb) ((sb)->root_nid) - -#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits) +#define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits)) #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits) @@ -315,17 +314,13 @@ static inline unsigned int erofs_inode_datalayout(unsigned int ifmt) return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; } -/* - * Different from grab_cache_page_nowait(), reclaiming is never triggered - * when allocating new pages. - */ -static inline -struct page *erofs_grab_cache_page_nowait(struct address_space *mapping, - pgoff_t index) +/* reclaiming is never triggered when allocating new folios. */ +static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, + pgoff_t index) { - return pagecache_get_page(mapping, index, + return __filemap_get_folio(as, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, - readahead_gfp_mask(mapping) & ~__GFP_RECLAIM); + readahead_gfp_mask(as) & ~__GFP_RECLAIM); } /* Has a disk mapping */ @@ -372,6 +367,7 @@ struct erofs_map_dev { struct erofs_fscache *m_fscache; struct block_device *m_bdev; struct dax_device *m_daxdev; + struct file *m_fp; u64 m_dax_part_off; erofs_off_t m_pa; @@ -380,7 +376,8 @@ struct erofs_map_dev { extern const struct super_operations erofs_sops; -extern const struct address_space_operations erofs_raw_access_aops; +extern const struct address_space_operations erofs_aops; +extern const struct address_space_operations erofs_fileio_aops; extern const struct address_space_operations z_erofs_aops; extern const struct address_space_operations erofs_fscache_access_aops; @@ -402,15 +399,18 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, erofs_off_t *offset, int *lengthp); void erofs_unmap_metabuf(struct erofs_buf *buf); void erofs_put_metabuf(struct erofs_buf *buf); -void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, +void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type); void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_blk_t blkaddr, enum erofs_kmap_type type); + erofs_off_t offset, enum erofs_kmap_type type); int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); +void erofs_onlinefolio_init(struct folio *folio); +void erofs_onlinefolio_split(struct folio *folio); +void erofs_onlinefolio_end(struct folio *folio, int err); struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, @@ -438,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb); int __init erofs_init_sysfs(void); void erofs_exit_sysfs(void); -struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp); +struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv); +static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) +{ + return __erofs_allocpage(pagepool, gfp, false); +} static inline void erofs_pagepool_add(struct page **pagepool, struct page *page) { set_page_private(page, (unsigned long)*pagepool); @@ -457,17 +461,17 @@ void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); int __init erofs_init_shrinker(void); void erofs_exit_shrinker(void); -int __init z_erofs_init_zip_subsystem(void); -void z_erofs_exit_zip_subsystem(void); -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); +int __init z_erofs_init_subsystem(void); +void z_erofs_exit_subsystem(void); +int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags); -void *erofs_get_pcpubuf(unsigned int requiredpages); -void erofs_put_pcpubuf(void *ptr); -int erofs_pcpubuf_growsize(unsigned int nrpages); -void __init erofs_pcpubuf_init(void); -void erofs_pcpubuf_exit(void); +void *z_erofs_get_gbuf(unsigned int requiredpages); +void z_erofs_put_gbuf(void *ptr); +int z_erofs_gbuf_growsize(unsigned int nrpages); +int __init z_erofs_gbuf_init(void); +void z_erofs_gbuf_exit(void); int erofs_init_managed_cache(struct super_block *sb); int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); #else @@ -475,28 +479,18 @@ static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} -static inline int z_erofs_init_zip_subsystem(void) { return 0; } -static inline void z_erofs_exit_zip_subsystem(void) {} -static inline void erofs_pcpubuf_init(void) {} -static inline void erofs_pcpubuf_exit(void) {} +static inline int z_erofs_init_subsystem(void) { return 0; } +static inline void z_erofs_exit_subsystem(void) {} static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */ -#ifdef CONFIG_EROFS_FS_ZIP_LZMA -int __init z_erofs_lzma_init(void); -void z_erofs_lzma_exit(void); -#else -static inline int z_erofs_lzma_init(void) { return 0; } -static inline int z_erofs_lzma_exit(void) { return 0; } -#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */ - -#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE -int __init z_erofs_deflate_init(void); -void z_erofs_deflate_exit(void); +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE +struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev); +void erofs_fileio_submit_bio(struct bio *bio); #else -static inline int z_erofs_deflate_init(void) { return 0; } -static inline int z_erofs_deflate_exit(void) { return 0; } -#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */ +static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } +static inline void erofs_fileio_submit_bio(struct bio *bio) {} +#endif #ifdef CONFIG_EROFS_FS_ONDEMAND int erofs_fscache_register_fs(struct super_block *sb); @@ -505,6 +499,8 @@ void erofs_fscache_unregister_fs(struct super_block *sb); struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, char *name, unsigned int flags); void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache); +struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev); +void erofs_fscache_submit_bio(struct bio *bio); #else static inline int erofs_fscache_register_fs(struct super_block *sb) { @@ -522,6 +518,8 @@ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache) { } +static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } +static inline void erofs_fscache_submit_bio(struct bio *bio) {} #endif #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index f0110a78acb2078aa2ce6eae13e39481e46b7ea9..c94d0c1608a81e2383185f854f5d0ca5303f113e 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -99,8 +99,8 @@ static void *erofs_find_target_block(struct erofs_buf *target, struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_dirent *de; - buf.inode = dir; - de = erofs_bread(&buf, mid, EROFS_KMAP); + buf.mapping = dir->i_mapping; + de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP); if (!IS_ERR(de)) { const int nameoff = nameoff_from_disk(de->nameoff, bsz); const int ndirents = nameoff / sizeof(*de); @@ -171,7 +171,7 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid, qn.name = name->name; qn.end = name->name + name->len; - buf.inode = dir; + buf.mapping = dir->i_mapping; ndirents = 0; de = erofs_find_target_block(&buf, dir, &qn, &ndirents); diff --git a/fs/erofs/pcpubuf.c b/fs/erofs/pcpubuf.c deleted file mode 100644 index c7a4b1d77069d9e7b749a08aec36988e7fa92d3f..0000000000000000000000000000000000000000 --- a/fs/erofs/pcpubuf.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) Gao Xiang - * - * For low-latency decompression algorithms (e.g. lz4), reserve consecutive - * per-CPU virtual memory (in pages) in advance to store such inplace I/O - * data if inplace decompression is failed (due to unmet inplace margin for - * example). - */ -#include "internal.h" - -struct erofs_pcpubuf { - raw_spinlock_t lock; - void *ptr; - struct page **pages; - unsigned int nrpages; -}; - -static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb); - -void *erofs_get_pcpubuf(unsigned int requiredpages) - __acquires(pcb->lock) -{ - struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb); - - raw_spin_lock(&pcb->lock); - /* check if the per-CPU buffer is too small */ - if (requiredpages > pcb->nrpages) { - raw_spin_unlock(&pcb->lock); - put_cpu_var(erofs_pcb); - /* (for sparse checker) pretend pcb->lock is still taken */ - __acquire(pcb->lock); - return NULL; - } - return pcb->ptr; -} - -void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock) -{ - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id()); - - DBG_BUGON(pcb->ptr != ptr); - raw_spin_unlock(&pcb->lock); - put_cpu_var(erofs_pcb); -} - -/* the next step: support per-CPU page buffers hotplug */ -int erofs_pcpubuf_growsize(unsigned int nrpages) -{ - static DEFINE_MUTEX(pcb_resize_mutex); - static unsigned int pcb_nrpages; - struct page *pagepool = NULL; - int delta, cpu, ret, i; - - mutex_lock(&pcb_resize_mutex); - delta = nrpages - pcb_nrpages; - ret = 0; - /* avoid shrinking pcpubuf, since no idea how many fses rely on */ - if (delta <= 0) - goto out; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - struct page **pages, **oldpages; - void *ptr, *old_ptr; - - pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - break; - } - - for (i = 0; i < nrpages; ++i) { - pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); - if (!pages[i]) { - ret = -ENOMEM; - oldpages = pages; - goto free_pagearray; - } - } - ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL); - if (!ptr) { - ret = -ENOMEM; - oldpages = pages; - goto free_pagearray; - } - raw_spin_lock(&pcb->lock); - old_ptr = pcb->ptr; - pcb->ptr = ptr; - oldpages = pcb->pages; - pcb->pages = pages; - i = pcb->nrpages; - pcb->nrpages = nrpages; - raw_spin_unlock(&pcb->lock); - - if (!oldpages) { - DBG_BUGON(old_ptr); - continue; - } - - if (old_ptr) - vunmap(old_ptr); -free_pagearray: - while (i) - erofs_pagepool_add(&pagepool, oldpages[--i]); - kfree(oldpages); - if (ret) - break; - } - pcb_nrpages = nrpages; - erofs_release_pages(&pagepool); -out: - mutex_unlock(&pcb_resize_mutex); - return ret; -} - -void __init erofs_pcpubuf_init(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - - raw_spin_lock_init(&pcb->lock); - } -} - -void erofs_pcpubuf_exit(void) -{ - int cpu, i; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - - if (pcb->ptr) { - vunmap(pcb->ptr); - pcb->ptr = NULL; - } - if (!pcb->pages) - continue; - - for (i = 0; i < pcb->nrpages; ++i) - if (pcb->pages[i]) - put_page(pcb->pages[i]); - kfree(pcb->pages); - pcb->pages = NULL; - } -} diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 113414e6f35b964ffdb66e1002bfca98f609440f..96c77f5e097169ea4d63d2e411bcb645fa3ac164 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -4,15 +4,13 @@ * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ -#include #include -#include #include #include #include #include -#include #include +#include #include "xattr.h" #define CREATE_TRACE_POINTS @@ -20,31 +18,22 @@ static struct kmem_cache *erofs_inode_cachep __read_mostly; -void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) +void _erofs_printk(struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; + int level; va_start(args, fmt); - vaf.fmt = fmt; + level = printk_get_level(fmt); + vaf.fmt = printk_skip_level(fmt); vaf.va = &args; - - pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); - va_end(args); -} - -void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - pr_info("(device %s): %pV", sb->s_id, &vaf); + if (sb) + printk("%c%cerofs (device %s): %pV", + KERN_SOH_ASCII, level, sb->s_id, &vaf); + else + printk("%c%cerofs: %pV", KERN_SOH_ASCII, level, &vaf); va_end(args); } @@ -105,22 +94,6 @@ static void erofs_free_inode(struct inode *inode) kmem_cache_free(erofs_inode_cachep, vi); } -static bool check_layout_compatibility(struct super_block *sb, - struct erofs_super_block *dsb) -{ - const unsigned int feature = le32_to_cpu(dsb->feature_incompat); - - EROFS_SB(sb)->feature_incompat = feature; - - /* check if current kernel meets all mandatory requirements */ - if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { - erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", - feature & ~EROFS_ALL_FEATURE_INCOMPAT); - return false; - } - return true; -} - /* read variable-sized metadata, offset will be aligned by 4-byte */ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, erofs_off_t *offset, int *lengthp) @@ -129,11 +102,11 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, int len, i, cnt; *offset = round_up(*offset, 4); - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); + ptr = erofs_bread(buf, *offset, EROFS_KMAP); if (IS_ERR(ptr)) return ptr; - len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]); + len = le16_to_cpu(*(__le16 *)ptr); if (!len) len = U16_MAX + 1; buffer = kmalloc(len, GFP_KERNEL); @@ -145,12 +118,12 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, for (i = 0; i < len; i += cnt) { cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), len - i); - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); + ptr = erofs_bread(buf, *offset, EROFS_KMAP); if (IS_ERR(ptr)) { kfree(buffer); return ptr; } - memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt); + memcpy(buffer + i, ptr, cnt); *offset += cnt; } return buffer; @@ -175,12 +148,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, struct erofs_fscache *fscache; struct erofs_deviceslot *dis; struct bdev_handle *bdev_handle; - void *ptr; + struct file *file; - ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - dis = ptr + erofs_blkoff(sb, *pos); + dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP); + if (IS_ERR(dis)) + return PTR_ERR(dis); if (!sbi->devs->flatdev && !dif->path) { if (!dis->tag[0]) { @@ -198,13 +170,24 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, return PTR_ERR(fscache); dif->fscache = fscache; } else if (!sbi->devs->flatdev) { - bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, - sb->s_type, NULL); - if (IS_ERR(bdev_handle)) - return PTR_ERR(bdev_handle); - dif->bdev_handle = bdev_handle; - dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, - &dif->dax_part_off, NULL, NULL); + if (erofs_is_fileio_mode(sbi)) { + file = filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + if (!S_ISREG(file_inode(file)->i_mode)) { + fput(file); + return -EINVAL; + } + dif->file = file; + } else { + bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, + sb->s_type, NULL); + if (IS_ERR(bdev_handle)) + return PTR_ERR(bdev_handle); + dif->bdev_handle = bdev_handle; + dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, + &dif->dax_part_off, NULL, NULL); + } } dif->blocks = le32_to_cpu(dis->blocks); @@ -278,7 +261,7 @@ static int erofs_scan_devices(struct super_block *sb, static int erofs_read_superblock(struct super_block *sb) { - struct erofs_sb_info *sbi; + struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_super_block *dsb; void *data; @@ -290,9 +273,7 @@ static int erofs_read_superblock(struct super_block *sb) return PTR_ERR(data); } - sbi = EROFS_SB(sb); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); - ret = -EINVAL; if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { erofs_err(sb, "cannot find valid erofs superblock"); @@ -317,8 +298,12 @@ static int erofs_read_superblock(struct super_block *sb) } ret = -EINVAL; - if (!check_layout_compatibility(sb, dsb)) + sbi->feature_incompat = le32_to_cpu(dsb->feature_incompat); + if (sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT) { + erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", + sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT); goto out; + } sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) { @@ -342,7 +327,7 @@ static int erofs_read_superblock(struct super_block *sb) sbi->build_time = le64_to_cpu(dsb->build_time); sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); - memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); + super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid)); ret = strscpy(sbi->volume_name, dsb->volume_name, sizeof(dsb->volume_name)); @@ -575,6 +560,22 @@ static const struct export_operations erofs_export_ops = { .get_parent = erofs_get_parent, }; +static void erofs_set_sysfs_name(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + if (sbi->domain_id) + super_set_sysfs_name_generic(sb, "%s,%s", sbi->domain_id, + sbi->fsid); + else if (sbi->fsid) + super_set_sysfs_name_generic(sb, "%s", sbi->fsid); + else if (erofs_is_fileio_mode(sbi)) + super_set_sysfs_name_generic(sb, "%s", + bdi_dev_name(sb->s_bdi)); + else + super_set_sysfs_name_id(sb); +} + static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; @@ -587,14 +588,15 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_op = &erofs_sops; sbi->blkszbits = PAGE_SHIFT; - if (erofs_is_fscache_mode(sb)) { + if (!sb->s_bdev) { sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; - err = erofs_fscache_register_fs(sb); - if (err) - return err; - + if (erofs_is_fscache_mode(sb)) { + err = erofs_fscache_register_fs(sb); + if (err) + return err; + } err = super_setup_bdi(sb); if (err) return err; @@ -618,7 +620,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) errorfc(fc, "unsupported blksize for fscache mode"); return -EINVAL; } - if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { + + if (erofs_is_fileio_mode(sbi)) { + sb->s_blocksize = 1 << sbi->blkszbits; + sb->s_blocksize_bits = sbi->blkszbits; + } else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { errorfc(fc, "failed to set erofs blksize"); return -EINVAL; } @@ -635,7 +641,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) } sb->s_time_gran = 1; - sb->s_xattr = erofs_xattr_handlers; + sb->s_xattr = (const struct xattr_handler **)erofs_xattr_handlers; sb->s_export_op = &erofs_export_ops; if (test_opt(&sbi->opt, POSIX_ACL)) @@ -647,13 +653,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) xa_init(&sbi->managed_pslots); #endif - inode = erofs_iget(sb, ROOT_NID(sbi)); + inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) return PTR_ERR(inode); if (!S_ISDIR(inode->i_mode)) { erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); + sbi->root_nid, inode->i_mode); iput(inode); return -EINVAL; } @@ -679,22 +685,41 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; + erofs_set_sysfs_name(sb); err = erofs_register_sysfs(sb); if (err) return err; - erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); + erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); return 0; } static int erofs_fc_get_tree(struct fs_context *fc) { struct erofs_sb_info *sbi = fc->s_fs_info; + int ret; if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) return get_tree_nodev(fc, erofs_fc_fill_super); - return get_tree_bdev(fc, erofs_fc_fill_super); + ret = get_tree_bdev_flags(fc, erofs_fc_fill_super, + IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ? + GET_TREE_BDEV_QUIET_LOOKUP : 0); +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE + if (ret == -ENOTBLK) { + if (!fc->source) + return invalf(fc, "No source specified"); + sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0); + if (IS_ERR(sbi->fdev)) + return PTR_ERR(sbi->fdev); + + if (S_ISREG(file_inode(sbi->fdev)->i_mode) && + sbi->fdev->f_mapping->a_ops->read_folio) + return get_tree_nodev(fc, erofs_fc_fill_super); + fput(sbi->fdev); + } +#endif + return ret; } static int erofs_fc_reconfigure(struct fs_context *fc) @@ -726,6 +751,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data) fs_put_dax(dif->dax_dev, NULL); if (dif->bdev_handle) bdev_release(dif->bdev_handle); + if (dif->file) + fput(dif->file); erofs_fscache_unregister_cookie(dif->fscache); dif->fscache = NULL; kfree(dif->path); @@ -788,7 +815,7 @@ static void erofs_kill_sb(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) + if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev) kill_anon_super(sb); else kill_block_super(sb); @@ -798,6 +825,8 @@ static void erofs_kill_sb(struct super_block *sb) erofs_fscache_unregister_fs(sb); kfree(sbi->fsid); kfree(sbi->domain_id); + if (sbi->fdev) + fput(sbi->fdev); kfree(sbi); sb->s_fs_info = NULL; } @@ -848,16 +877,7 @@ static int __init erofs_module_init(void) if (err) goto shrinker_err; - err = z_erofs_lzma_init(); - if (err) - goto lzma_err; - - err = z_erofs_deflate_init(); - if (err) - goto deflate_err; - - erofs_pcpubuf_init(); - err = z_erofs_init_zip_subsystem(); + err = z_erofs_init_subsystem(); if (err) goto zip_err; @@ -874,12 +894,8 @@ static int __init erofs_module_init(void) fs_err: erofs_exit_sysfs(); sysfs_err: - z_erofs_exit_zip_subsystem(); + z_erofs_exit_subsystem(); zip_err: - z_erofs_deflate_exit(); -deflate_err: - z_erofs_lzma_exit(); -lzma_err: erofs_exit_shrinker(); shrinker_err: kmem_cache_destroy(erofs_inode_cachep); @@ -894,34 +910,29 @@ static void __exit erofs_module_exit(void) rcu_barrier(); erofs_exit_sysfs(); - z_erofs_exit_zip_subsystem(); - z_erofs_deflate_exit(); - z_erofs_lzma_exit(); + z_erofs_exit_subsystem(); erofs_exit_shrinker(); kmem_cache_destroy(erofs_inode_cachep); - erofs_pcpubuf_exit(); } static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); - u64 id = 0; - - if (!erofs_is_fscache_mode(sb)) - id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->total_blocks; buf->f_bfree = buf->f_bavail = 0; - buf->f_files = ULLONG_MAX; buf->f_ffree = ULLONG_MAX - sbi->inos; - buf->f_namelen = EROFS_NAME_LEN; - buf->f_fsid = u64_to_fsid(id); + if (uuid_is_null(&sb->s_uuid)) + buf->f_fsid = u64_to_fsid(!sb->s_bdev ? 0 : + huge_encode_dev(sb->s_bdev->bd_dev)); + else + buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); return 0; } @@ -930,26 +941,14 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); struct erofs_mount_opts *opt = &sbi->opt; -#ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(opt, XATTR_USER)) - seq_puts(seq, ",user_xattr"); - else - seq_puts(seq, ",nouser_xattr"); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(opt, POSIX_ACL)) - seq_puts(seq, ",acl"); - else - seq_puts(seq, ",noacl"); -#endif -#ifdef CONFIG_EROFS_FS_ZIP - if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) - seq_puts(seq, ",cache_strategy=disabled"); - else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) - seq_puts(seq, ",cache_strategy=readahead"); - else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) - seq_puts(seq, ",cache_strategy=readaround"); -#endif + if (IS_ENABLED(CONFIG_EROFS_FS_XATTR)) + seq_puts(seq, test_opt(opt, XATTR_USER) ? + ",user_xattr" : ",nouser_xattr"); + if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL)) + seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl"); + if (IS_ENABLED(CONFIG_EROFS_FS_ZIP)) + seq_printf(seq, ",cache_strategy=%s", + erofs_param_cache_strategy[opt->cache_strategy].name); if (test_opt(opt, DAX_ALWAYS)) seq_puts(seq, ",dax=always"); if (test_opt(opt, DAX_NEVER)) diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c index 435e515c0792012fb0de8c5268c7f7042cedf811..63cffd0fd26195ac4f62e22f07ac273b97a88b4d 100644 --- a/fs/erofs/sysfs.c +++ b/fs/erofs/sysfs.c @@ -205,34 +205,16 @@ static struct kobject erofs_feat = { int erofs_register_sysfs(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - char *name; - char *str = NULL; int err; - if (erofs_is_fscache_mode(sb)) { - if (sbi->domain_id) { - str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id, - sbi->fsid); - if (!str) - return -ENOMEM; - name = str; - } else { - name = sbi->fsid; - } - } else { - name = sb->s_id; - } sbi->s_kobj.kset = &erofs_root; init_completion(&sbi->s_kobj_unregister); - err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", name); - kfree(str); - if (err) - goto put_sb_kobj; - return 0; - -put_sb_kobj: - kobject_put(&sbi->s_kobj); - wait_for_completion(&sbi->s_kobj_unregister); + err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", + sb->s_sysfs_name); + if (err) { + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); + } return err; } diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 09d341675e890e6bc9a975cc0024bf5429b78ca6..a90d7d649739051db77fb543cd69090512535346 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -81,13 +81,13 @@ static int erofs_init_inode_xattrs(struct inode *inode) it.pos = erofs_iloc(inode) + vi->inode_isize; /* read in shared xattr array (non-atomic, see kmalloc below) */ - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP); + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); if (IS_ERR(it.kaddr)) { ret = PTR_ERR(it.kaddr); goto out_unlock; } - ih = it.kaddr + erofs_blkoff(sb, it.pos); + ih = it.kaddr; vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter); vi->xattr_shared_count = ih->h_shared_count; vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, @@ -102,16 +102,14 @@ static int erofs_init_inode_xattrs(struct inode *inode) it.pos += sizeof(struct erofs_xattr_ibody_header); for (i = 0; i < vi->xattr_shared_count; ++i) { - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), - EROFS_KMAP); + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); if (IS_ERR(it.kaddr)) { kfree(vi->xattr_shared_xattrs); vi->xattr_shared_xattrs = NULL; ret = PTR_ERR(it.kaddr); goto out_unlock; } - vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *) - (it.kaddr + erofs_blkoff(sb, it.pos))); + vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr); it.pos += sizeof(__le32); } erofs_put_metabuf(&it.buf); @@ -168,7 +166,7 @@ const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { }; #endif -const struct xattr_handler *erofs_xattr_handlers[] = { +const struct xattr_handler * const erofs_xattr_handlers[] = { &erofs_xattr_user_handler, &erofs_xattr_trusted_handler, #ifdef CONFIG_EROFS_FS_SECURITY @@ -185,12 +183,11 @@ static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it, void *src; for (processed = 0; processed < len; processed += slice) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); - src = it->kaddr + erofs_blkoff(sb, it->pos); + src = it->kaddr; slice = min_t(unsigned int, sb->s_blocksize - erofs_blkoff(sb, it->pos), len - processed); memcpy(it->buffer + it->buffer_ofs, src, slice); @@ -208,8 +205,7 @@ static int erofs_listxattr_foreach(struct erofs_xattr_iter *it) int err; /* 1. handle xattr entry */ - entry = *(struct erofs_xattr_entry *) - (it->kaddr + erofs_blkoff(it->sb, it->pos)); + entry = *(struct erofs_xattr_entry *)it->kaddr; it->pos += sizeof(struct erofs_xattr_entry); base_index = entry.e_name_index; @@ -259,8 +255,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) unsigned int slice, processed, value_sz; /* 1. handle xattr entry */ - entry = *(struct erofs_xattr_entry *) - (it->kaddr + erofs_blkoff(sb, it->pos)); + entry = *(struct erofs_xattr_entry *)it->kaddr; it->pos += sizeof(struct erofs_xattr_entry); value_sz = le16_to_cpu(entry.e_value_size); @@ -291,8 +286,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) /* 2. handle xattr name */ for (processed = 0; processed < entry.e_name_len; processed += slice) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); @@ -300,7 +294,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) sb->s_blocksize - erofs_blkoff(sb, it->pos), entry.e_name_len - processed); if (memcmp(it->name.name + it->infix_len + processed, - it->kaddr + erofs_blkoff(sb, it->pos), slice)) + it->kaddr, slice)) return -ENOATTR; it->pos += slice; } @@ -336,13 +330,11 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it, it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz; while (remaining) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); - entry_sz = erofs_xattr_entry_size(it->kaddr + - erofs_blkoff(it->sb, it->pos)); + entry_sz = erofs_xattr_entry_size(it->kaddr); /* xattr on-disk corruption: xattr entry beyond xattr_isize */ if (remaining < entry_sz) { DBG_BUGON(1); @@ -375,8 +367,7 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it, for (i = 0; i < vi->xattr_shared_count; ++i) { it->pos = erofs_pos(sb, sbi->xattr_blkaddr) + vi->xattr_shared_xattrs[i] * sizeof(__le32); - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); @@ -492,7 +483,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb) return -ENOMEM; if (sbi->packed_inode) - buf.inode = sbi->packed_inode; + buf.mapping = sbi->packed_inode->i_mapping; else erofs_init_metabuf(&buf, sb); diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index f16283cb8c9301c1f0ce81d5bd321ca4c04d8c72..b246cd0e135ed448ff87c6d8c24959836e7e6e4f 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -23,7 +23,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx, { const struct xattr_handler *handler = NULL; - static const struct xattr_handler *xattr_handler_map[] = { + static const struct xattr_handler * const xattr_handler_map[] = { [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, #ifdef CONFIG_EROFS_FS_POSIX_ACL [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, @@ -44,7 +44,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx, return xattr_prefix(handler); } -extern const struct xattr_handler *erofs_xattr_handlers[]; +extern const struct xattr_handler * const erofs_xattr_handlers[]; int erofs_xattr_prefixes_init(struct super_block *sb); void erofs_xattr_prefixes_cleanup(struct super_block *sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1c0e6167d8e73b8fc35d9f8ca1e0972023bc1c71..a569ff9dfd04424596e79b59c578b349fea07680 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -56,6 +56,9 @@ struct z_erofs_pcluster { /* L: total number of bvecs */ unsigned int vcnt; + /* I: pcluster size (compressed size) in bytes */ + unsigned int pclustersize; + /* I: page offset of start position of decompression */ unsigned short pageofs_out; @@ -70,14 +73,6 @@ struct z_erofs_pcluster { struct rcu_head rcu; }; - union { - /* I: physical cluster size in pages */ - unsigned short pclusterpages; - - /* I: tailpacking inline compressed size */ - unsigned short tailpacking_size; - }; - /* I: compression algorithm format */ unsigned char algorithmformat; @@ -87,6 +82,9 @@ struct z_erofs_pcluster { /* L: indicate several pageofs_outs or not */ bool multibases; + /* L: whether extra buffer allocations are best-effort */ + bool besteffort; + /* A: compressed bvecs (can be cached or inplaced pages) */ struct z_erofs_bvec compressed_bvecs[]; }; @@ -115,52 +113,13 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) { - if (z_erofs_is_inline_pcluster(pcl)) - return 1; - return pcl->pclusterpages; + return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } -/* - * bit 30: I/O error occurred on this page - * bit 0 - 29: remaining parts to complete this page - */ -#define Z_EROFS_PAGE_EIO (1 << 30) - -static inline void z_erofs_onlinepage_init(struct page *page) +#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) +static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo) { - union { - atomic_t o; - unsigned long v; - } u = { .o = ATOMIC_INIT(1) }; - - set_page_private(page, u.v); - smp_wmb(); - SetPagePrivate(page); -} - -static inline void z_erofs_onlinepage_split(struct page *page) -{ - atomic_inc((atomic_t *)&page->private); -} - -static void z_erofs_onlinepage_endio(struct page *page, int err) -{ - int orig, v; - - DBG_BUGON(!PagePrivate(page)); - - do { - orig = atomic_read((atomic_t *)&page->private); - v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0); - } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig); - - if (!(v & ~Z_EROFS_PAGE_EIO)) { - set_page_private(page, 0); - ClearPagePrivate(page); - if (!(v & Z_EROFS_PAGE_EIO)) - SetPageUptodate(page); - unlock_page(page); - } + return fo->mapping == MNGD_MAPPING(sbi); } #define Z_EROFS_ONSTACK_PAGES 32 @@ -237,7 +196,8 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, struct page *nextpage = *candidate_bvpage; if (!nextpage) { - nextpage = erofs_allocpage(pagepool, GFP_NOFS); + nextpage = __erofs_allocpage(pagepool, GFP_KERNEL, + true); if (!nextpage) return -ENOMEM; set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); @@ -298,21 +258,21 @@ static int z_erofs_create_pcluster_pool(void) return 0; } -static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) { - int i; + unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct z_erofs_pcluster_slab *pcs = pcluster_pool; - for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { - struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { struct z_erofs_pcluster *pcl; if (nrpages > pcs->maxpages) continue; - pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); + pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL); if (!pcl) return ERR_PTR(-ENOMEM); - pcl->pclusterpages = nrpages; + pcl->pclustersize = size; return pcl; } return ERR_PTR(-EINVAL); @@ -451,44 +411,51 @@ static inline int erofs_cpu_hotplug_init(void) { return 0; } static inline void erofs_cpu_hotplug_destroy(void) {} #endif -void z_erofs_exit_zip_subsystem(void) +void z_erofs_exit_subsystem(void) { erofs_cpu_hotplug_destroy(); erofs_destroy_percpu_workers(); destroy_workqueue(z_erofs_workqueue); z_erofs_destroy_pcluster_pool(); + z_erofs_exit_decompressor(); } -int __init z_erofs_init_zip_subsystem(void) +int __init z_erofs_init_subsystem(void) { - int err = z_erofs_create_pcluster_pool(); + int err = z_erofs_init_decompressor(); + + if (err) + goto err_decompressor; + err = z_erofs_create_pcluster_pool(); if (err) - goto out_error_pcluster_pool; + goto err_pcluster_pool; z_erofs_workqueue = alloc_workqueue("erofs_worker", WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); if (!z_erofs_workqueue) { err = -ENOMEM; - goto out_error_workqueue_init; + goto err_workqueue_init; } err = erofs_init_percpu_workers(); if (err) - goto out_error_pcpu_worker; + goto err_pcpu_worker; err = erofs_cpu_hotplug_init(); if (err < 0) - goto out_error_cpuhp_init; + goto err_cpuhp_init; return err; -out_error_cpuhp_init: +err_cpuhp_init: erofs_destroy_percpu_workers(); -out_error_pcpu_worker: +err_pcpu_worker: destroy_workqueue(z_erofs_workqueue); -out_error_workqueue_init: +err_workqueue_init: z_erofs_destroy_pcluster_pool(); -out_error_pcluster_pool: +err_pcluster_pool: + z_erofs_exit_decompressor(); +err_decompressor: return err; } @@ -559,6 +526,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool shouldalloc = z_erofs_should_alloc_cache(fe); bool standalone = true; /* @@ -569,42 +537,40 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; - if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) + if (i_blocksize(fe->inode) != PAGE_SIZE || + fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; - for (i = 0; i < pcl->pclusterpages; ++i) { - struct page *page; - void *t; /* mark pages just found for debugging */ - struct page *newpage = NULL; + for (i = 0; i < pclusterpages; ++i) { + struct page *page, *newpage; - /* the compressed page was loaded before */ + /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - - if (page) { - t = (void *)((unsigned long)page | 1); - } else { + if (!page) { /* I/O is needed, no possible to decompress directly */ standalone = false; if (!shouldalloc) continue; /* - * try to use cached I/O if page allocation - * succeeds or fallback to in-place I/O instead - * to avoid any direct reclaim. + * Try cached I/O if allocation succeeds or fallback to + * in-place I/O instead to avoid any direct reclaim. */ newpage = erofs_allocpage(&fe->pagepool, gfp); if (!newpage) continue; set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); - t = (void *)((unsigned long)newpage | 1); } - - if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) + spin_lock(&pcl->obj.lockref.lock); + if (!pcl->compressed_bvecs[i].page) { + pcl->compressed_bvecs[i].page = page ? page : newpage; + spin_unlock(&pcl->obj.lockref.lock); continue; + } + spin_unlock(&pcl->obj.lockref.lock); if (page) put_page(page); @@ -620,36 +586,31 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; } -/* called by erofs_shrinker to get rid of all compressed_pages */ -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp) +/* (erofs_shrinker) disconnect cached encoded data with pclusters */ +int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp) { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); + struct folio *folio; int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - /* - * refcount of workgroup is now freezed as 0, - * therefore no need to worry about available decompression users. - */ - for (i = 0; i < pcl->pclusterpages; ++i) { - struct page *page = pcl->compressed_bvecs[i].page; - - if (!page) - continue; - - /* block other users from reclaiming or migrating the page */ - if (!trylock_page(page)) - return -EBUSY; - - if (!erofs_page_is_managed(sbi, page)) - continue; + /* Each cached folio contains one page unless bs > ps is supported */ + for (i = 0; i < pclusterpages; ++i) { + if (pcl->compressed_bvecs[i].page) { + folio = page_folio(pcl->compressed_bvecs[i].page); + /* Avoid reclaiming or migrating this folio */ + if (!folio_trylock(folio)) + return -EBUSY; - /* barrier is implied in the following 'unlock_page' */ - WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); - detach_page_private(page); - unlock_page(page); + if (!erofs_folio_is_managed(sbi, folio)) + continue; + pcl->compressed_bvecs[i].page = NULL; + folio_detach_private(folio); + folio_unlock(folio); + } } return 0; } @@ -657,28 +618,26 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl = folio_get_private(folio); + struct z_erofs_bvec *bvec = pcl->compressed_bvecs; + struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl); bool ret; - int i; if (!folio_test_private(folio)) return true; ret = false; spin_lock(&pcl->obj.lockref.lock); - if (pcl->obj.lockref.count > 0) - goto out; - - DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pcl->pclusterpages; ++i) { - if (pcl->compressed_bvecs[i].page == &folio->page) { - WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); - ret = true; - break; + if (pcl->obj.lockref.count <= 0) { + DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); + for (; bvec < end; ++bvec) { + if (bvec->page && page_folio(bvec->page) == folio) { + bvec->page = NULL; + folio_detach_private(folio); + ret = true; + break; + } } } - if (ret) - folio_detach_private(folio); -out: spin_unlock(&pcl->obj.lockref.lock); return ret; } @@ -697,7 +656,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio, DBG_BUGON(stop > folio_size(folio) || stop < length); if (offset == 0 && stop == folio_size(folio)) - while (!z_erofs_cache_release_folio(folio, GFP_NOFS)) + while (!z_erofs_cache_release_folio(folio, 0)) cond_resched(); } @@ -716,36 +675,30 @@ int erofs_init_managed_cache(struct super_block *sb) set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &z_erofs_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); EROFS_SB(sb)->managed_cache = inode; return 0; } -static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, - struct z_erofs_bvec *bvec) -{ - struct z_erofs_pcluster *const pcl = fe->pcl; - - while (fe->icur > 0) { - if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, - NULL, bvec->page)) { - pcl->compressed_bvecs[fe->icur] = *bvec; - return true; - } - } - return false; -} - /* callers must be with pcluster lock held */ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { + struct z_erofs_pcluster *pcl = fe->pcl; int ret; if (exclusive) { /* give priority for inplaceio to use file pages first */ - if (z_erofs_try_inplace_io(fe, bvec)) + spin_lock(&pcl->obj.lockref.lock); + while (fe->icur > 0) { + if (pcl->compressed_bvecs[--fe->icur].page) + continue; + pcl->compressed_bvecs[fe->icur] = *bvec; + spin_unlock(&pcl->obj.lockref.lock); return 0; + } + spin_unlock(&pcl->obj.lockref.lock); + /* otherwise, check if it can be used as a bvpage */ if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && !fe->candidate_bvpage) @@ -757,41 +710,23 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, return ret; } -static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) -{ - struct z_erofs_pcluster *pcl = f->pcl; - z_erofs_next_pcluster_t *owned_head = &f->owned_head; - - /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, - *owned_head) == Z_EROFS_PCLUSTER_NIL) { - *owned_head = &pcl->next; - /* so we can attach this pcluster to our submission chain. */ - f->mode = Z_EROFS_PCLUSTER_FOLLOWED; - return; - } - - /* type 2, it belongs to an ongoing chain */ - f->mode = Z_EROFS_PCLUSTER_INFLIGHT; -} - static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; + struct super_block *sb = fe->inode->i_sb; bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; struct erofs_workgroup *grp; int err; if (!(map->m_flags & EROFS_MAP_ENCODED) || - (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { + (!ztailpacking && !erofs_blknr(sb, map->m_pa))) { DBG_BUGON(1); return -EFSCORRUPTED; } /* no available pcluster, let's allocate one */ - pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : - map->m_plen >> PAGE_SHIFT); + pcl = z_erofs_alloc_pcluster(map->m_plen); if (IS_ERR(pcl)) return PTR_ERR(pcl); @@ -815,9 +750,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) if (ztailpacking) { pcl->obj.index = 0; /* which indicates ztailpacking */ - pcl->tailpacking_size = map->m_plen; } else { - pcl->obj.index = map->m_pa >> PAGE_SHIFT; + pcl->obj.index = erofs_blknr(sb, map->m_pa); grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); if (IS_ERR(grp)) { @@ -851,7 +785,6 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) int ret; DBG_BUGON(fe->pcl); - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); @@ -871,7 +804,15 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) if (ret == -EEXIST) { mutex_lock(&fe->pcl->lock); - z_erofs_try_to_claim_pcluster(fe); + /* check if this pcluster hasn't been linked into any chain. */ + if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL, + fe->owned_head) == Z_EROFS_PCLUSTER_NIL) { + /* .. so it can be attached to our submission chain */ + fe->owned_head = &fe->pcl->next; + fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; + } else { /* otherwise, it belongs to an inflight chain */ + fe->mode = Z_EROFS_PCLUSTER_INFLIGHT; + } } else if (ret) { return ret; } @@ -884,7 +825,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) } else { void *mptr; - mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP); + mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP); if (IS_ERR(mptr)) { ret = PTR_ERR(mptr); erofs_err(sb, "failed to get inline data %d", ret); @@ -941,7 +882,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) fe->pcl = NULL; } -static int z_erofs_read_fragment(struct super_block *sb, struct page *page, +static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, unsigned int cur, unsigned int end, erofs_off_t pos) { struct inode *packed_inode = EROFS_SB(sb)->packed_inode; @@ -952,115 +893,112 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, if (!packed_inode) return -EFSCORRUPTED; - buf.inode = packed_inode; + buf.mapping = packed_inode->i_mapping; for (; cur < end; cur += cnt, pos += cnt) { - cnt = min_t(unsigned int, end - cur, - sb->s_blocksize - erofs_blkoff(sb, pos)); - src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP); + cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); + src = erofs_bread(&buf, pos, EROFS_KMAP); if (IS_ERR(src)) { erofs_put_metabuf(&buf); return PTR_ERR(src); } - memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt); + memcpy_to_folio(folio, cur, src, cnt); } erofs_put_metabuf(&buf); return 0; } -static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page) +static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, + struct folio *folio, bool ra) { - struct inode *const inode = fe->inode; - struct erofs_map_blocks *const map = &fe->map; - const loff_t offset = page_offset(page); - bool tight = true, exclusive; - unsigned int cur, end, len, split; + struct inode *const inode = f->inode; + struct erofs_map_blocks *const map = &f->map; + const loff_t offset = folio_pos(folio); + const unsigned int bs = i_blocksize(inode); + unsigned int end = folio_size(folio), split = 0, cur, pgs; + bool tight, excl; int err = 0; - z_erofs_onlinepage_init(page); - - split = 0; - end = PAGE_SIZE; -repeat: - if (offset + end - 1 < map->m_la || - offset + end - 1 >= map->m_la + map->m_llen) { - z_erofs_pcluster_end(fe); - map->m_la = offset + end - 1; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (err) - goto out; - } - - cur = offset > map->m_la ? 0 : map->m_la - offset; - /* bump split parts first to avoid several separate cases */ - ++split; - - if (!(map->m_flags & EROFS_MAP_MAPPED)) { - zero_user_segment(page, cur, end); - tight = false; - goto next_part; - } - - if (map->m_flags & EROFS_MAP_FRAGMENT) { - erofs_off_t fpos = offset + cur - map->m_la; - - len = min_t(unsigned int, map->m_llen - fpos, end - cur); - err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len, - EROFS_I(inode)->z_fragmentoff + fpos); - if (err) - goto out; - tight = false; - goto next_part; - } + tight = (bs == PAGE_SIZE); + erofs_onlinefolio_init(folio); + do { + if (offset + end - 1 < map->m_la || + offset + end - 1 >= map->m_la + map->m_llen) { + z_erofs_pcluster_end(f); + map->m_la = offset + end - 1; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (err) + break; + } - if (!fe->pcl) { - err = z_erofs_pcluster_begin(fe); - if (err) - goto out; - } + cur = offset > map->m_la ? 0 : map->m_la - offset; + pgs = round_down(cur, PAGE_SIZE); + /* bump split parts first to avoid several separate cases */ + ++split; + + if (!(map->m_flags & EROFS_MAP_MAPPED)) { + folio_zero_segment(folio, cur, end); + tight = false; + } else if (map->m_flags & EROFS_MAP_FRAGMENT) { + erofs_off_t fpos = offset + cur - map->m_la; + + err = z_erofs_read_fragment(inode->i_sb, folio, cur, + cur + min(map->m_llen - fpos, end - cur), + EROFS_I(inode)->z_fragmentoff + fpos); + if (err) + break; + tight = false; + } else { + if (!f->pcl) { + err = z_erofs_pcluster_begin(f); + if (err) + break; + f->pcl->besteffort |= !ra; + } - /* - * Ensure the current partial page belongs to this submit chain rather - * than other concurrent submit chains or the noio(bypass) chain since - * those chains are handled asynchronously thus the page cannot be used - * for inplace I/O or bvpage (should be processed in a strict order.) - */ - tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || tight)); - if (cur) - tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); - - err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { - .page = page, - .offset = offset - map->m_la, - .end = end, - }), exclusive); - if (err) - goto out; - - z_erofs_onlinepage_split(page); - if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) - fe->pcl->multibases = true; - if (fe->pcl->length < offset + end - map->m_la) { - fe->pcl->length = offset + end - map->m_la; - fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; - } - if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && - !(map->m_flags & EROFS_MAP_PARTIAL_REF) && - fe->pcl->length == map->m_llen) - fe->pcl->partial = false; -next_part: - /* shorten the remaining extent to update progress */ - map->m_llen = offset + cur - map->m_la; - map->m_flags &= ~EROFS_MAP_FULL_MAPPED; - - end = cur; - if (end > 0) - goto repeat; + pgs = round_down(end - 1, PAGE_SIZE); + /* + * Ensure this partial page belongs to this submit chain + * rather than other concurrent submit chains or + * noio(bypass) chains since those chains are handled + * asynchronously thus it cannot be used for inplace I/O + * or bvpage (should be processed in the strict order.) + */ + tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED); + excl = false; + if (cur <= pgs) { + excl = (split <= 1) || tight; + cur = pgs; + } -out: - z_erofs_onlinepage_endio(page, err); + err = z_erofs_attach_page(f, &((struct z_erofs_bvec) { + .page = folio_page(folio, pgs >> PAGE_SHIFT), + .offset = offset + pgs - map->m_la, + .end = end - pgs, }), excl); + if (err) + break; + + erofs_onlinefolio_split(folio); + if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) + f->pcl->multibases = true; + if (f->pcl->length < offset + end - map->m_la) { + f->pcl->length = offset + end - map->m_la; + f->pcl->pageofs_out = map->m_la & ~PAGE_MASK; + } + if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && + !(map->m_flags & EROFS_MAP_PARTIAL_REF) && + f->pcl->length == map->m_llen) + f->pcl->partial = false; + } + /* shorten the remaining extent to update progress */ + map->m_llen = offset + cur - map->m_la; + map->m_flags &= ~EROFS_MAP_FULL_MAPPED; + if (cur <= pgs) { + split = cur < pgs; + tight = (bs == PAGE_SIZE); + } + } while ((end = cur) > 0); + erofs_onlinefolio_end(folio, err); return err; } @@ -1081,7 +1019,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, static bool z_erofs_page_is_invalidated(struct page *page) { - return !page->mapping && !z_erofs_is_shortlived_page(page); + return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); } struct z_erofs_decompress_backend { @@ -1163,7 +1101,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, cur += len; } kunmap_local(dst); - z_erofs_onlinepage_endio(bvi->bvec.page, err); + erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); list_del(p); kfree(bvi); } @@ -1206,34 +1144,28 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; struct page *page = bvec->page; - /* compressed pages ought to be present before decompressing */ - if (!page) { - DBG_BUGON(1); + /* compressed data ought to be valid when decompressing */ + if (IS_ERR(page) || !page) { + bvec->page = NULL; /* clear the failure reason */ + err = page ? PTR_ERR(page) : -EIO; continue; } be->compressed_pages[i] = page; - if (z_erofs_is_inline_pcluster(pcl)) { + if (z_erofs_is_inline_pcluster(pcl) || + erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) { if (!PageUptodate(page)) err = -EIO; continue; } DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (!z_erofs_is_shortlived_page(page)) { - if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { - if (!PageUptodate(page)) - err = -EIO; - continue; - } - z_erofs_do_decompressed_bvec(be, bvec); - *overlapped = true; - } + if (z_erofs_is_shortlived_page(page)) + continue; + z_erofs_do_decompressed_bvec(be, bvec); + *overlapped = true; } - - if (err) - return err; - return 0; + return err; } static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, @@ -1242,10 +1174,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); - const struct z_erofs_decompressor *decompressor = - &erofs_decompressors[pcl->algorithmformat]; - unsigned int i, inputsize; - int err2; + const struct z_erofs_decompressor *decomp = + z_erofs_decomp[pcl->algorithmformat]; + int i, j, jtop, err2; struct page *page; bool overlapped; @@ -1279,40 +1210,34 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, err2 = z_erofs_parse_in_bvecs(be, &overlapped); if (err2) err = err2; - if (err) - goto out; - - if (z_erofs_is_inline_pcluster(pcl)) - inputsize = pcl->tailpacking_size; - else - inputsize = pclusterpages * PAGE_SIZE; - - err = decompressor->decompress(&(struct z_erofs_decompress_req) { + if (!err) + err = decomp->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, .pageofs_in = pcl->pageofs_in, .pageofs_out = pcl->pageofs_out, - .inputsize = inputsize, + .inputsize = pcl->pclustersize, .outputsize = pcl->length, .alg = pcl->algorithmformat, .inplace_io = overlapped, .partial_decoding = pcl->partial, .fillgaps = pcl->multibases, + .gfp = pcl->besteffort ? GFP_KERNEL : + GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); -out: /* must handle all compressed pages before actual file pages */ if (z_erofs_is_inline_pcluster(pcl)) { page = pcl->compressed_bvecs[0].page; WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); put_page(page); } else { + /* managed folios are still left in compressed_bvecs[] */ for (i = 0; i < pclusterpages; ++i) { - /* consider shortlived pages added when decompressing */ page = be->compressed_pages[i]; - - if (erofs_page_is_managed(sbi, page)) + if (!page || + erofs_folio_is_managed(sbi, page_folio(page))) continue; (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); @@ -1321,27 +1246,38 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, if (be->compressed_pages < be->onstack_pages || be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) kvfree(be->compressed_pages); - z_erofs_fill_other_copies(be, err); + jtop = 0; + z_erofs_fill_other_copies(be, err); for (i = 0; i < be->nr_pages; ++i) { page = be->decompressed_pages[i]; if (!page) continue; DBG_BUGON(z_erofs_page_is_invalidated(page)); - - /* recycle all individual short-lived pages */ - if (z_erofs_put_shortlivedpage(be->pagepool, page)) + if (!z_erofs_is_shortlived_page(page)) { + erofs_onlinefolio_end(page_folio(page), err); + continue; + } + if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { + erofs_pagepool_add(be->pagepool, page); continue; - z_erofs_onlinepage_endio(page, err); + } + for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j) + ; + if (j >= jtop) /* this bounce page is newly detected */ + be->decompressed_pages[jtop++] = page; } - + while (jtop) + erofs_pagepool_add(be->pagepool, + be->decompressed_pages[--jtop]); if (be->decompressed_pages != be->onstack_pages) kvfree(be->decompressed_pages); pcl->length = 0; pcl->partial = true; pcl->multibases = false; + pcl->besteffort = false; pcl->bvset.nextpage = NULL; pcl->vcnt = 0; @@ -1351,8 +1287,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, return err; } -static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, - struct page **pagepool) +static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, + struct page **pagepool) { struct z_erofs_decompress_backend be = { .sb = io->sb, @@ -1361,6 +1297,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, LIST_HEAD_INIT(be.decompressed_secondary_bvecs), }; z_erofs_next_pcluster_t owned = io->head; + int err = io->eio ? -EIO : 0; while (owned != Z_EROFS_PCLUSTER_TAIL) { DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); @@ -1368,12 +1305,13 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, be.pcl = container_of(owned, struct z_erofs_pcluster, next); owned = READ_ONCE(be.pcl->next); - z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); + err = z_erofs_decompress_pcluster(&be, err) ?: err; if (z_erofs_is_inline_pcluster(be.pcl)) z_erofs_free_pcluster(be.pcl); else erofs_workgroup_put(&be.pcl->obj); } + return err; } static void z_erofs_decompressqueue_work(struct work_struct *work) @@ -1435,113 +1373,112 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, z_erofs_decompressqueue_work(&io->u.work); } -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct page **pagepool, - struct address_space *mc) +static void z_erofs_fill_bio_vec(struct bio_vec *bvec, + struct z_erofs_decompress_frontend *f, + struct z_erofs_pcluster *pcl, + unsigned int nr, + struct address_space *mc) { - const pgoff_t index = pcl->obj.index; gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - + struct z_erofs_bvec zbv; struct address_space *mapping; - struct page *oldpage, *page; - int justfound; + struct folio *folio; + struct page *page; + int bs = i_blocksize(f->inode); + /* Except for inplace folios, the entire folio can be used for I/Os */ + bvec->bv_offset = 0; + bvec->bv_len = PAGE_SIZE; repeat: - page = READ_ONCE(pcl->compressed_bvecs[nr].page); - oldpage = page; - - if (!page) - goto out_allocpage; + spin_lock(&pcl->obj.lockref.lock); + zbv = pcl->compressed_bvecs[nr]; + spin_unlock(&pcl->obj.lockref.lock); + if (!zbv.page) + goto out_allocfolio; - justfound = (unsigned long)page & 1UL; - page = (struct page *)((unsigned long)page & ~1UL); + bvec->bv_page = zbv.page; + DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); + folio = page_folio(zbv.page); /* - * preallocated cached pages, which is used to avoid direct reclaim - * otherwise, it will go inplace I/O path instead. + * Handle preallocated cached folios. We tried to allocate such folios + * without triggering direct reclaim. If allocation failed, inplace + * file-backed folios will be used instead. */ - if (page->private == Z_EROFS_PREALLOCATED_PAGE) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); - set_page_private(page, 0); + if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { tocache = true; goto out_tocache; } - mapping = READ_ONCE(page->mapping); + mapping = READ_ONCE(folio->mapping); /* - * file-backed online pages in plcuster are all locked steady, - * therefore it is impossible for `mapping' to be NULL. + * File-backed folios for inplace I/Os are all locked steady, + * therefore it is impossible for `mapping` to be NULL. */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - /* directly return for shortlived page as well */ - if (z_erofs_is_shortlived_page(page)) - goto out; - - lock_page(page); - - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - - /* the page is still in manage cache */ - if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); - - if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_bvecs[]. - */ - DBG_BUGON(!justfound); + if (mapping && mapping != mc) { + if (zbv.offset < 0) + bvec->bv_offset = round_up(-zbv.offset, bs); + bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; + return; + } - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); + folio_lock(folio); + if (likely(folio->mapping == mc)) { + /* + * The cached folio is still in managed cache but without + * a valid `->private` pcluster hint. Let's reconnect them. + */ + if (!folio_test_private(folio)) { + folio_attach_private(folio, pcl); + /* compressed_bvecs[] already takes a ref before */ + folio_put(folio); } - - /* no need to submit io if it is already up-to-date */ - if (PageUptodate(page)) { - unlock_page(page); - page = NULL; + if (likely(folio->private == pcl)) { + /* don't submit cache I/Os again if already uptodate */ + if (folio_test_uptodate(folio)) { + folio_unlock(folio); + bvec->bv_page = NULL; + } + return; } - goto out; + /* + * Already linked with another pcluster, which only appears in + * crafted images by fuzzers for now. But handle this anyway. + */ + tocache = false; /* use temporary short-lived pages */ + } else { + DBG_BUGON(1); /* referenced managed folios can't be truncated */ + tocache = true; } - - /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. - */ - DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - - tocache = true; - unlock_page(page); - put_page(page); -out_allocpage: - page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, - oldpage, page)) { - erofs_pagepool_add(pagepool, page); + folio_unlock(folio); + folio_put(folio); +out_allocfolio: + page = __erofs_allocpage(&f->pagepool, gfp, true); + spin_lock(&pcl->obj.lockref.lock); + if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { + if (page) + erofs_pagepool_add(&f->pagepool, page); + spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } + pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM); + spin_unlock(&pcl->obj.lockref.lock); + bvec->bv_page = page; + if (!page) + return; + folio = page_folio(page); out_tocache: - if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { - /* turn into temporary page if fails (1 ref) */ - set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); - goto out; + if (!tocache || bs != PAGE_SIZE || + filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived folio (1 ref) */ + folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; + return; } - attach_page_private(page, pcl); - /* drop a refcount added by allocpage (then we have 2 refs here) */ - put_page(page); - -out: /* the only exit (for tracing and debugging) */ - return page; + folio_attach_private(folio, pcl); + /* drop a refcount added by allocpage (then 2 refs in total here) */ + folio_put(folio); } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, @@ -1596,29 +1533,29 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static void z_erofs_decompressqueue_endio(struct bio *bio) +static void z_erofs_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *folio = fi.folio; - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(z_erofs_page_is_invalidated(page)); + DBG_BUGON(folio_test_uptodate(folio)); + DBG_BUGON(z_erofs_page_is_invalidated(&folio->page)); + if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio)) + continue; - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { - if (!err) - SetPageUptodate(page); - unlock_page(page); - } + if (!err) + folio_mark_uptodate(folio); + folio_unlock(folio); } if (err) q->eio = true; z_erofs_decompress_kickoff(q, -1); - bio_put(bio); + if (bio->bi_bdev) + bio_put(bio); } static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, @@ -1631,17 +1568,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; z_erofs_next_pcluster_t owned_head = f->owned_head; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ - pgoff_t last_index; - struct block_device *last_bdev; + erofs_off_t last_pa; unsigned int nr_bios = 0; struct bio *bio = NULL; unsigned long pflags; int memstall = 0; - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ + /* No need to read from device for pclusters in the bypass queue. */ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); @@ -1654,7 +1587,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, do { struct erofs_map_dev mdev; struct z_erofs_pcluster *pcl; - pgoff_t cur, end; + erofs_off_t cur, end; + struct bio_vec bvec; unsigned int i = 0; bool bypass = true; @@ -1673,21 +1607,20 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, }; (void)erofs_map_dev(sb, &mdev); - cur = erofs_blknr(sb, mdev.m_pa); - end = cur + pcl->pclusterpages; - + cur = mdev.m_pa; + end = cur + pcl->pclustersize; do { - struct page *page; - - page = pickup_page_for_submission(pcl, i++, - &f->pagepool, mc); - if (!page) - continue; + bvec.bv_page = NULL; + if (bio && (cur != last_pa || + bio->bi_bdev != mdev.m_bdev)) { +drain_io: + if (erofs_is_fileio_mode(EROFS_SB(sb))) + erofs_fileio_submit_bio(bio); + else if (erofs_is_fscache_mode(sb)) + erofs_fscache_submit_bio(bio); + else + submit_bio(bio); - if (bio && (cur != last_index + 1 || - last_bdev != mdev.m_bdev)) { -submit_bio_retry: - submit_bio(bio); if (memstall) { psi_memstall_leave(&pflags); memstall = 0; @@ -1695,31 +1628,43 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; } - if (unlikely(PageWorkingset(page)) && !memstall) { + if (!bvec.bv_page) { + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) + continue; + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); + } + + if (unlikely(PageWorkingset(bvec.bv_page)) && + !memstall) { psi_memstall_enter(&pflags); memstall = 1; } if (!bio) { - bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, - REQ_OP_READ, GFP_NOIO); - bio->bi_end_io = z_erofs_decompressqueue_endio; - - last_bdev = mdev.m_bdev; - bio->bi_iter.bi_sector = (sector_t)cur << - (sb->s_blocksize_bits - 9); + if (erofs_is_fileio_mode(EROFS_SB(sb))) + bio = erofs_fileio_bio_alloc(&mdev); + else if (erofs_is_fscache_mode(sb)) + bio = erofs_fscache_bio_alloc(&mdev); + else + bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, + REQ_OP_READ, GFP_NOIO); + bio->bi_end_io = z_erofs_endio; + bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; } - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) - goto submit_bio_retry; - - last_index = cur; + if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, + bvec.bv_offset)) + goto drain_io; + last_pa = cur + bvec.bv_len; bypass = false; - } while (++cur < end); + } while ((cur += bvec.bv_len) < end); if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; @@ -1728,7 +1673,12 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, } while (owned_head != Z_EROFS_PCLUSTER_TAIL); if (bio) { - submit_bio(bio); + if (erofs_is_fileio_mode(EROFS_SB(sb))) + erofs_fileio_submit_bio(bio); + else if (erofs_is_fscache_mode(sb)) + erofs_fscache_submit_bio(bio); + else + submit_bio(bio); if (memstall) psi_memstall_leave(&pflags); } @@ -1744,26 +1694,28 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); } -static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, - bool force_fg, bool ra) +static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, + unsigned int ra_folios) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; + struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); + bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios); + int err; if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) - return; - z_erofs_submit_queue(f, io, &force_fg, ra); + return 0; + z_erofs_submit_queue(f, io, &force_fg, !!ra_folios); /* handle bypass queue (no i/o pclusters) immediately */ - z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); - + err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); if (!force_fg) - return; + return err; /* wait until all bios are completed */ wait_for_completion_io(&io[JQ_SUBMIT].u.done); /* handle synchronous decompress queue in the caller context */ - z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool); + return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err; } /* @@ -1798,7 +1750,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, end = round_up(end, PAGE_SIZE); } else { end = round_up(map->m_la, PAGE_SIZE); - if (!map->m_llen) return; } @@ -1806,15 +1757,15 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, cur = map->m_la + map->m_llen - 1; while ((cur >= end) && (cur < i_size_read(inode))) { pgoff_t index = cur >> PAGE_SHIFT; - struct page *page; + struct folio *folio; - page = erofs_grab_cache_page_nowait(inode->i_mapping, index); - if (page) { - if (PageUptodate(page)) - unlock_page(page); + folio = erofs_grab_folio_nowait(inode->i_mapping, index); + if (!IS_ERR_OR_NULL(folio)) { + if (folio_test_uptodate(folio)) + folio_unlock(folio); else - (void)z_erofs_do_read_page(f, page); - put_page(page); + z_erofs_scan_folio(f, folio, !!rac); + folio_put(folio); } if (cur < PAGE_SIZE) @@ -1826,7 +1777,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static int z_erofs_read_folio(struct file *file, struct folio *folio) { struct inode *const inode = folio->mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); int err; @@ -1834,13 +1784,12 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; z_erofs_pcluster_readmore(&f, NULL, true); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_scan_folio(&f, folio, false); z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); - /* if some compressed cluster ready, need submit them anyway */ - z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false); - + /* if some pclusters are ready, need submit them anyway */ + err = z_erofs_runqueue(&f, 0) ?: err; if (err && err != -EINTR) erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu", err, folio->index, EROFS_I(inode)->nid); @@ -1853,7 +1802,6 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) static void z_erofs_readahead(struct readahead_control *rac) { struct inode *const inode = rac->mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct folio *head = NULL, *folio; unsigned int nr_folios; @@ -1875,7 +1823,7 @@ static void z_erofs_readahead(struct readahead_control *rac) folio = head; head = folio_get_private(folio); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_scan_folio(&f, folio, true); if (err && err != -EINTR) erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", folio->index, EROFS_I(inode)->nid); @@ -1883,7 +1831,7 @@ static void z_erofs_readahead(struct readahead_control *rac) z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); - z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true); + (void)z_erofs_runqueue(&f, nr_folios); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); } diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 76566c2cbf63eb0f524782848392a5268b4af337..60414b8028e44c756bd7064566b59679418f6737 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -10,8 +10,6 @@ struct z_erofs_maprecorder { struct inode *inode; struct erofs_map_blocks *map; - void *kaddr; - unsigned long lcn; /* compression extent information gathered */ u8 type, headtype; @@ -31,22 +29,17 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, vi->inode_isize + vi->xattr_isize) + lcn * sizeof(struct z_erofs_lcluster_index); struct z_erofs_lcluster_index *di; - unsigned int advise, type; - - m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_blknr(inode->i_sb, pos), EROFS_KMAP); - if (IS_ERR(m->kaddr)) - return PTR_ERR(m->kaddr); + unsigned int advise; - m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); + di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, EROFS_KMAP); + if (IS_ERR(di)) + return PTR_ERR(di); m->lcn = lcn; - di = m->kaddr + erofs_blkoff(inode->i_sb, pos); + m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1); - switch (type) { - case Z_EROFS_LCLUSTER_TYPE_NONHEAD: + m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; + if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << vi->z_logical_clusterbits; m->delta[0] = le16_to_cpu(di->di_u.delta[0]); if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) { @@ -55,29 +48,19 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, DBG_BUGON(1); return -EFSCORRUPTED; } - m->compressedblks = m->delta[0] & - ~Z_EROFS_LI_D0_CBLKCNT; + m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); - break; - case Z_EROFS_LCLUSTER_TYPE_PLAIN: - case Z_EROFS_LCLUSTER_TYPE_HEAD1: - case Z_EROFS_LCLUSTER_TYPE_HEAD2: - if (advise & Z_EROFS_LI_PARTIAL_REF) - m->partialref = true; + } else { + m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF); m->clusterofs = le16_to_cpu(di->di_clusterofs); if (m->clusterofs >= 1 << vi->z_logical_clusterbits) { DBG_BUGON(1); return -EFSCORRUPTED; } m->pblk = le32_to_cpu(di->di_u.blkaddr); - break; - default: - DBG_BUGON(1); - return -EOPNOTSUPP; } - m->type = type; return 0; } @@ -120,10 +103,10 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, { struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs; - int i; - u8 *in, type; + unsigned int vcnt, lo, lobits, encodebits, nblk, bytes; bool big_pcluster; + u8 *in, type; + int i; if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; @@ -132,17 +115,19 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, else return -EOPNOTSUPP; + in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, EROFS_KMAP); + if (IS_ERR(in)) + return PTR_ERR(in); + /* it doesn't equal to round_up(..) */ m->nextpackoff = round_down(pos, vcnt << amortizedshift) + (vcnt << amortizedshift); big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; - eofs = erofs_blkoff(m->inode->i_sb, pos); - base = round_down(eofs, vcnt << amortizedshift); - in = m->kaddr + base; - - i = (eofs - base) >> amortizedshift; + bytes = pos & ((vcnt << amortizedshift) - 1); + in -= bytes; + i = bytes >> amortizedshift; lo = decode_compactedbits(lobits, in, encodebits * i, &type); m->type = type; @@ -266,10 +251,6 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, amortizedshift = 2; out: pos += lcn * (1 << amortizedshift); - m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_blknr(inode->i_sb, pos), EROFS_KMAP); - if (IS_ERR(m->kaddr)) - return PTR_ERR(m->kaddr); return unpack_compacted_index(m, amortizedshift, pos, lookahead); } @@ -562,7 +543,8 @@ static int z_erofs_do_map_blocks(struct inode *inode, if ((flags & EROFS_GET_BLOCKS_FIEMAP) || ((flags & EROFS_GET_BLOCKS_READMORE) && (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA || - map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE) && + map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE || + map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) && map->m_llen >= i_blocksize(inode))) { err = z_erofs_get_extent_decompressedlen(&m); if (!err) @@ -581,7 +563,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) int err, headnr; erofs_off_t pos; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - void *kaddr; struct z_erofs_map_header *h; if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { @@ -601,13 +582,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) goto out_unlock; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); - if (IS_ERR(kaddr)) { - err = PTR_ERR(kaddr); + h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); + if (IS_ERR(h)) { + err = PTR_ERR(h); goto out_unlock; } - h = kaddr + erofs_blkoff(sb, pos); /* * if the highest bit of the 8-byte map header is set, the whole file * is stored in the packed inode. The rest bits keeps z_fragmentoff. @@ -699,34 +679,32 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, struct erofs_inode *const vi = EROFS_I(inode); int err = 0; - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); - - /* when trying to read beyond EOF, leave it unmapped */ - if (map->m_la >= inode->i_size) { + trace_erofs_map_blocks_enter(inode, map, flags); + if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */ map->m_llen = map->m_la + 1 - inode->i_size; map->m_la = inode->i_size; map->m_flags = 0; - goto out; - } - - err = z_erofs_fill_inode_lazy(inode); - if (err) - goto out; - - if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) && - !vi->z_tailextent_headlcn) { - map->m_la = 0; - map->m_llen = inode->i_size; - map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED | - EROFS_MAP_FRAGMENT; - goto out; + } else { + err = z_erofs_fill_inode_lazy(inode); + if (!err) { + if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) && + !vi->z_tailextent_headlcn) { + map->m_la = 0; + map->m_llen = inode->i_size; + map->m_flags = EROFS_MAP_MAPPED | + EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT; + } else { + err = z_erofs_do_map_blocks(inode, map, flags); + } + } + if (!err && (map->m_flags & EROFS_MAP_ENCODED) && + unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE || + map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE)) + err = -EOPNOTSUPP; + if (err) + map->m_llen = 0; } - - err = z_erofs_do_map_blocks(inode, map, flags); -out: - if (err) - map->m_llen = 0; - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); + trace_erofs_map_blocks_exit(inode, map, flags, err); return err; } diff --git a/fs/erofs/utils.c b/fs/erofs/zutil.c similarity index 57% rename from fs/erofs/utils.c rename to fs/erofs/zutil.c index 4256a85719a1d25fbe3f0aa33820fafe3ad01d45..1262809b41046c7116d524e41d72375bb1ff2fc6 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/zutil.c @@ -5,16 +5,189 @@ */ #include "internal.h" -struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) +struct z_erofs_gbuf { + spinlock_t lock; + void *ptr; + struct page **pages; + unsigned int nrpages; +}; + +static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf; +static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages, + z_erofs_rsv_nrpages; + +module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444); +module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444); + +static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */ +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + +/* protects the mounted 'erofs_sb_list' */ +static DEFINE_SPINLOCK(erofs_sb_list_lock); +static LIST_HEAD(erofs_sb_list); + +static unsigned int z_erofs_gbuf_id(void) +{ + return raw_smp_processor_id() % z_erofs_gbuf_count; +} + +void *z_erofs_get_gbuf(unsigned int requiredpages) + __acquires(gbuf->lock) +{ + struct z_erofs_gbuf *gbuf; + + migrate_disable(); + gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; + spin_lock(&gbuf->lock); + /* check if the buffer is too small */ + if (requiredpages > gbuf->nrpages) { + spin_unlock(&gbuf->lock); + migrate_enable(); + /* (for sparse checker) pretend gbuf->lock is still taken */ + __acquire(gbuf->lock); + return NULL; + } + return gbuf->ptr; +} + +void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) +{ + struct z_erofs_gbuf *gbuf; + + gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; + DBG_BUGON(gbuf->ptr != ptr); + spin_unlock(&gbuf->lock); + migrate_enable(); +} + +int z_erofs_gbuf_growsize(unsigned int nrpages) +{ + static DEFINE_MUTEX(gbuf_resize_mutex); + struct page **tmp_pages = NULL; + struct z_erofs_gbuf *gbuf; + void *ptr, *old_ptr; + int last, i, j; + + mutex_lock(&gbuf_resize_mutex); + /* avoid shrinking gbufs, since no idea how many fses rely on */ + if (nrpages <= z_erofs_gbuf_nrpages) { + mutex_unlock(&gbuf_resize_mutex); + return 0; + } + + for (i = 0; i < z_erofs_gbuf_count; ++i) { + gbuf = &z_erofs_gbufpool[i]; + tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL); + if (!tmp_pages) + goto out; + + for (j = 0; j < gbuf->nrpages; ++j) + tmp_pages[j] = gbuf->pages[j]; + do { + last = j; + j = alloc_pages_bulk_array(GFP_KERNEL, nrpages, + tmp_pages); + if (last == j) + goto out; + } while (j != nrpages); + + ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL); + if (!ptr) + goto out; + + spin_lock(&gbuf->lock); + kfree(gbuf->pages); + gbuf->pages = tmp_pages; + old_ptr = gbuf->ptr; + gbuf->ptr = ptr; + gbuf->nrpages = nrpages; + spin_unlock(&gbuf->lock); + if (old_ptr) + vunmap(old_ptr); + } + z_erofs_gbuf_nrpages = nrpages; +out: + if (i < z_erofs_gbuf_count && tmp_pages) { + for (j = 0; j < nrpages; ++j) + if (tmp_pages[j] && (j >= gbuf->nrpages || + tmp_pages[j] != gbuf->pages[j])) + __free_page(tmp_pages[j]); + kfree(tmp_pages); + } + mutex_unlock(&gbuf_resize_mutex); + return i < z_erofs_gbuf_count ? -ENOMEM : 0; +} + +int __init z_erofs_gbuf_init(void) +{ + unsigned int i, total = num_possible_cpus(); + + if (z_erofs_gbuf_count) + total = min(z_erofs_gbuf_count, total); + z_erofs_gbuf_count = total; + + /* The last (special) global buffer is the reserved buffer */ + total += !!z_erofs_rsv_nrpages; + + z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool), + GFP_KERNEL); + if (!z_erofs_gbufpool) + return -ENOMEM; + + if (z_erofs_rsv_nrpages) { + z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1]; + z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages, + sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL); + if (!z_erofs_rsvbuf->pages) { + z_erofs_rsvbuf = NULL; + z_erofs_rsv_nrpages = 0; + } + } + for (i = 0; i < total; ++i) + spin_lock_init(&z_erofs_gbufpool[i].lock); + return 0; +} + +void z_erofs_gbuf_exit(void) +{ + int i, j; + + for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) { + struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; + + if (gbuf->ptr) { + vunmap(gbuf->ptr); + gbuf->ptr = NULL; + } + + if (!gbuf->pages) + continue; + + for (j = 0; j < gbuf->nrpages; ++j) + if (gbuf->pages[j]) + put_page(gbuf->pages[j]); + kfree(gbuf->pages); + gbuf->pages = NULL; + } + kfree(z_erofs_gbufpool); +} + +struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv) { struct page *page = *pagepool; if (page) { - DBG_BUGON(page_ref_count(page) != 1); *pagepool = (struct page *)page_private(page); - } else { - page = alloc_page(gfp); + } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) { + spin_lock(&z_erofs_rsvbuf->lock); + if (z_erofs_rsvbuf->nrpages) + page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages]; + spin_unlock(&z_erofs_rsvbuf->lock); } + if (!page) + page = alloc_page(gfp); + DBG_BUGON(page && page_ref_count(page) != 1); return page; } @@ -24,14 +197,22 @@ void erofs_release_pages(struct page **pagepool) struct page *page = *pagepool; *pagepool = (struct page *)page_private(page); + /* try to fill reserved global pool first */ + if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages < + z_erofs_rsv_nrpages) { + spin_lock(&z_erofs_rsvbuf->lock); + if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) { + z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++] + = page; + spin_unlock(&z_erofs_rsvbuf->lock); + continue; + } + spin_unlock(&z_erofs_rsvbuf->lock); + } put_page(page); } } -#ifdef CONFIG_EROFS_FS_ZIP -/* global shrink count (for all mounted EROFS instances) */ -static atomic_long_t erofs_global_shrink_cnt; - static bool erofs_workgroup_get(struct erofs_workgroup *grp) { if (lockref_get_not_zero(&grp->lockref)) @@ -81,7 +262,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, repeat: xa_lock(&sbi->managed_pslots); pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, - NULL, grp, GFP_NOFS); + NULL, grp, GFP_KERNEL); if (pre) { if (xa_is_err(pre)) { pre = ERR_PTR(xa_err(pre)); @@ -129,7 +310,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * the XArray. Otherwise some cached pages could be still attached to * the orphan old workgroup when the new one is available in the tree. */ - if (erofs_try_to_free_all_cached_pages(sbi, grp)) + if (erofs_try_to_free_all_cached_folios(sbi, grp)) goto out; /* @@ -171,13 +352,6 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, return freed; } -/* protected by 'erofs_sb_list_lock' */ -static unsigned int shrinker_run_no; - -/* protects the mounted 'erofs_sb_list' */ -static DEFINE_SPINLOCK(erofs_sb_list_lock); -static LIST_HEAD(erofs_sb_list); - void erofs_shrinker_register(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); @@ -279,4 +453,3 @@ void erofs_exit_shrinker(void) { unregister_shrinker(&erofs_shrinker_info); } -#endif /* !CONFIG_EROFS_FS_ZIP */ diff --git a/fs/eventfd.c b/fs/eventfd.c index 33a918f9566c310c663776e6c8983695e7476d18..3318d311e64813a83b52d87a99ae3691f66a9241 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -41,6 +41,9 @@ struct eventfd_ctx { __u64 count; unsigned int flags; int id; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 96a048d3f51bf5165df3e94ba92e9ade0840b1b6..3839b6057027ef7351710c1d66c151bb6bf7778d 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -50,6 +50,8 @@ #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +extern int sysctl_hardlink_cross_projid __read_mostly; + static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) @@ -3553,7 +3555,8 @@ static int ext4_link(struct dentry *old_dentry, if (err) return err; - if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && + if (!sysctl_hardlink_cross_projid && + (ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 71ced0ada9a2e5c2893906cef21179a7a235a5b5..32ed37fe40b8264e22334e85a0cad79579e4278a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4976,6 +4976,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, } set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); sbi->s_journal->j_submit_inode_data_buffers = ext4_journal_submit_inode_data_buffers; @@ -5361,7 +5362,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) sb->s_qcop = &ext4_qctl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif - memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); + super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); @@ -6559,6 +6560,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); } /* Flush outstanding errors before changing fs state */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index b72fa103b9632a191417f7765764069ffc5194be..0b780d67b4661aa3ee4fc19ca0c771654ad3e66c 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -4448,7 +4448,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); - memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); + super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid)); sb->s_iflags |= SB_I_CGROUPWB; /* init f2fs-specific super block info */ diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0a498bc60f5573478c6ff7191af0247ba44a83ff..4e9c22e10b96b8d0cbc03c6e08af5ef1e06c0f8d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -858,6 +858,16 @@ void wbc_detach_inode(struct writeback_control *wbc) inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); inode->i_wb_frn_history = history; + /* + * Without wb list lock i_wb can switch at any point, so it can + * judge on the wrong wb anyway. + * + * The wb is switched to the root memcg unconditionally. We expect + * the correct wb (best candidate) is picked up in next round. + */ + if (wb == inode->i_wb && wb_dying(wb) && !(inode->i_state & I_DIRTY_ALL)) + inode_switch_wbs(inode, root_mem_cgroup->css.id); + wb_put(wbc->wb); wbc->wb = NULL; } diff --git a/fs/fs_context.c b/fs/fs_context.c index 98589aae52085c82d4e8951ca63be928cb33cec8..8cc839a46f45a66192e6fb724a57250b91c14d05 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -378,6 +378,7 @@ void fc_drop_locked(struct fs_context *fc) fc->root = NULL; deactivate_locked_super(sb); } +EXPORT_SYMBOL_GPL(fc_drop_locked); static void legacy_fs_context_free(struct fs_context *fc); diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 038ed0b9aaa5d619cf498c55ffd0069dce2abf14..ad36e8915364f252c81c6f12b687dc8d22a370c8 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -52,3 +52,14 @@ config FUSE_DAX If you want to allow mounting a Virtio Filesystem with the "dax" option, answer Y. + +config VIRT_FUSE + tristate "FUSE device virtualization extension" + depends on FUSE_FS + help + This FUSE extension provides virtualized FUSE devices for container + workloads. Each virtualized FUSE device only supports one instance + of FUSE filesystem with special treatments for user namespace. + + If you want to support FUSE device virtualization for containers, + answer Y or M. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 0c48b35c058d78d20966443de6a56928020729b4..0afc54209c494fd8bdd8d470073a344f99f439c3 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -3,11 +3,17 @@ # Makefile for the FUSE filesystem. # +# Needed for trace events +ccflags-y = -I$(src) + obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o obj-$(CONFIG_VIRTIO_FS) += virtiofs.o +obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o +fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o +fuse-$(CONFIG_SYSCTL) += sysctl.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c index 3d192b80a561965e5f902f95e3dcf974308d1b36..04cfd8fee992e4c7117ac4f2cdf00acc117f51a9 100644 --- a/fs/fuse/acl.c +++ b/fs/fuse/acl.c @@ -146,8 +146,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, * be stripped. */ if (fc->posix_acl && - !vfsgid_in_group_p(i_gid_into_vfsgid(&nop_mnt_idmap, inode)) && - !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID)) + !in_group_or_capable(&nop_mnt_idmap, inode, + i_gid_into_vfsgid(&nop_mnt_idmap, inode))) extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID; ret = fuse_setxattr(inode, name, value, size, 0, extra_flags); diff --git a/fs/fuse/control.c b/fs/fuse/control.c index ab62e46242568a5e82c55ec546333d8f1d90d161..1bf928e277fecf55c81c7e0c992cca61b934014b 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -174,11 +174,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, if (!fc) goto out; - down_read(&fc->killsb); - spin_lock(&fc->bg_lock); - fc->congestion_threshold = val; - spin_unlock(&fc->bg_lock); - up_read(&fc->killsb); + WRITE_ONCE(fc->congestion_threshold, val); fuse_conn_put(fc); out: return ret; diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index b6cad106c37e44258bd6e4433cd4aaedfbb98f65..40f3e640e0559aac0e543076202249f6ae335857 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -303,8 +303,8 @@ struct cuse_init_args { struct fuse_args_pages ap; struct cuse_init_in in; struct cuse_init_out out; - struct page *page; - struct fuse_page_desc desc; + struct folio *folio; + struct fuse_folio_desc desc; }; /** @@ -322,7 +322,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, struct fuse_args_pages *ap = &ia->ap; struct cuse_conn *cc = fc_to_cc(fc), *pos; struct cuse_init_out *arg = &ia->out; - struct page *page = ap->pages[0]; + struct folio *folio = ap->folios[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; @@ -339,7 +339,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, /* parse init reply */ cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; - rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size, + rc = cuse_parse_devinfo(folio_address(folio), ap->args.out_args[1].size, &devinfo); if (rc) goto err; @@ -407,7 +407,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, kobject_uevent(&dev->kobj, KOBJ_ADD); out: kfree(ia); - __free_page(page); + folio_put(folio); return; err_cdev: @@ -425,7 +425,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, static int cuse_send_init(struct cuse_conn *cc) { int rc; - struct page *page; + struct folio *folio; struct fuse_mount *fm = &cc->fm; struct cuse_init_args *ia; struct fuse_args_pages *ap; @@ -433,13 +433,14 @@ static int cuse_send_init(struct cuse_conn *cc) BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); rc = -ENOMEM; - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) + + folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0); + if (!folio) goto err; ia = kzalloc(sizeof(*ia), GFP_KERNEL); if (!ia) - goto err_free_page; + goto err_free_folio; ap = &ia->ap; ia->in.major = FUSE_KERNEL_VERSION; @@ -455,18 +456,18 @@ static int cuse_send_init(struct cuse_conn *cc) ap->args.out_args[1].size = CUSE_INIT_INFO_MAX; ap->args.out_argvar = true; ap->args.out_pages = true; - ap->num_pages = 1; - ap->pages = &ia->page; + ap->num_folios = 1; + ap->folios = &ia->folio; ap->descs = &ia->desc; - ia->page = page; + ia->folio = folio; ia->desc.length = ap->args.out_args[1].size; ap->args.end = cuse_process_init_reply; rc = fuse_simple_background(fm, &ap->args, GFP_KERNEL); if (rc) { kfree(ia); -err_free_page: - __free_page(page); +err_free_folio: + folio_put(folio); } err: return rc; diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 12ef91d170bb3091ac35a33d2b9dc38330b00948..9abbc2f2894f905099b48862d776083e6075fbba 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -774,16 +774,6 @@ ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) return ret; } -static int fuse_dax_writepages(struct address_space *mapping, - struct writeback_control *wbc) -{ - - struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); - - return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc); -} - static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order, bool write) { @@ -1323,7 +1313,6 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi) } static const struct address_space_operations fuse_dax_file_aops = { - .writepages = fuse_dax_writepages, .direct_IO = noop_direct_IO, .dirty_folio = noop_dirty_folio, }; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8573d79ef29c80c85ddaedfbb6d9f98675fcab41..52b65377c0c3e0519b9b4e926448380faa383dbf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -22,6 +22,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include "fuse_trace.h" + MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); @@ -31,6 +34,8 @@ MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; +static void end_requests(struct list_head *head); + static struct fuse_dev *fuse_get_dev(struct file *file) { /* @@ -192,11 +197,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) } EXPORT_SYMBOL_GPL(fuse_len_args); -u64 fuse_get_unique(struct fuse_iqueue *fiq) +static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq) { fiq->reqctr += FUSE_REQ_ID_STEP; return fiq->reqctr; } + +u64 fuse_get_unique(struct fuse_iqueue *fiq) +{ + u64 ret; + + spin_lock(&fiq->lock); + ret = fuse_get_unique_locked(fiq); + spin_unlock(&fiq->lock); + + return ret; +} EXPORT_SYMBOL_GPL(fuse_get_unique); static unsigned int fuse_req_hash(u64 unique) @@ -215,22 +231,70 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); } +static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget) +{ + spin_lock(&fiq->lock); + if (fiq->connected) { + fiq->forget_list_tail->next = forget; + fiq->forget_list_tail = forget; + fuse_dev_wake_and_unlock(fiq); + } else { + kfree(forget); + spin_unlock(&fiq->lock); + } +} + +static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + spin_lock(&fiq->lock); + if (list_empty(&req->intr_entry)) { + list_add_tail(&req->intr_entry, &fiq->interrupts); + /* + * Pairs with smp_mb() implied by test_and_set_bit() + * from fuse_request_end(). + */ + smp_mb(); + if (test_bit(FR_FINISHED, &req->flags)) { + list_del_init(&req->intr_entry); + spin_unlock(&fiq->lock); + } else { + fuse_dev_wake_and_unlock(fiq); + } + } else { + spin_unlock(&fiq->lock); + } +} + +static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + spin_lock(&fiq->lock); + if (fiq->connected) { + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique_locked(fiq); + list_add_tail(&req->list, &fiq->pending); + fuse_dev_wake_and_unlock(fiq); + } else { + spin_unlock(&fiq->lock); + req->out.h.error = -ENOTCONN; + clear_bit(FR_PENDING, &req->flags); + fuse_request_end(req); + } +} + const struct fuse_iqueue_ops fuse_dev_fiq_ops = { - .wake_forget_and_unlock = fuse_dev_wake_and_unlock, - .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, - .wake_pending_and_unlock = fuse_dev_wake_and_unlock, + .send_forget = fuse_dev_queue_forget, + .send_interrupt = fuse_dev_queue_interrupt, + .send_req = fuse_dev_queue_req, }; EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); -static void queue_request_and_unlock(struct fuse_iqueue *fiq, - struct fuse_req *req) -__releases(fiq->lock) +static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req) { req->in.h.len = sizeof(struct fuse_in_header) + fuse_len_args(req->args->in_numargs, (struct fuse_arg *) req->args->in_args); - list_add_tail(&req->list, &fiq->pending); - fiq->ops->wake_pending_and_unlock(fiq); + trace_fuse_request_send(req); + fiq->ops->send_req(fiq, req); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, @@ -241,15 +305,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; - spin_lock(&fiq->lock); - if (fiq->connected) { - fiq->forget_list_tail->next = forget; - fiq->forget_list_tail = forget; - fiq->ops->wake_forget_and_unlock(fiq); - } else { - kfree(forget); - spin_unlock(&fiq->lock); - } + fiq->ops->send_forget(fiq, forget); } static void flush_bg_queue(struct fuse_conn *fc) @@ -263,9 +319,7 @@ static void flush_bg_queue(struct fuse_conn *fc) req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; - spin_lock(&fiq->lock); - req->in.h.unique = fuse_get_unique(fiq); - queue_request_and_unlock(fiq, req); + fuse_send_one(fiq, req); } } @@ -286,6 +340,7 @@ void fuse_request_end(struct fuse_req *req) if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; + trace_fuse_request_end(req); /* * test_and_set_bit() implies smp_mb() between bit * changing and below FR_INTERRUPTED check. Pairs with @@ -335,29 +390,12 @@ static int queue_interrupt(struct fuse_req *req) { struct fuse_iqueue *fiq = &req->fm->fc->iq; - spin_lock(&fiq->lock); /* Check for we've sent request to interrupt this req */ - if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { - spin_unlock(&fiq->lock); + if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) return -EINVAL; - } - if (list_empty(&req->intr_entry)) { - list_add_tail(&req->intr_entry, &fiq->interrupts); - /* - * Pairs with smp_mb() implied by test_and_set_bit() - * from fuse_request_end(). - */ - smp_mb(); - if (test_bit(FR_FINISHED, &req->flags)) { - list_del_init(&req->intr_entry); - spin_unlock(&fiq->lock); - return 0; - } - fiq->ops->wake_interrupt_and_unlock(fiq); - } else { - spin_unlock(&fiq->lock); - } + fiq->ops->send_interrupt(fiq, req); + return 0; } @@ -412,21 +450,15 @@ static void __fuse_request_send(struct fuse_req *req) struct fuse_iqueue *fiq = &req->fm->fc->iq; BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); - spin_lock(&fiq->lock); - if (!fiq->connected) { - spin_unlock(&fiq->lock); - req->out.h.error = -ENOTCONN; - } else { - req->in.h.unique = fuse_get_unique(fiq); - /* acquire extra reference, since request is still needed - after fuse_request_end() */ - __fuse_get_request(req); - queue_request_and_unlock(fiq, req); - request_wait_answer(req); - /* Pairs with smp_wmb() in fuse_request_end() */ - smp_rmb(); - } + /* acquire extra reference, since request is still needed after + fuse_request_end() */ + __fuse_get_request(req); + fuse_send_one(fiq, req); + + request_wait_answer(req); + /* Pairs with smp_wmb() in fuse_request_end() */ + smp_rmb(); } static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) @@ -581,7 +613,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, { struct fuse_req *req; struct fuse_iqueue *fiq = &fm->fc->iq; - int err = 0; req = fuse_get_req(fm, false); if (IS_ERR(req)) @@ -592,16 +623,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, fuse_args_to_req(req, args); - spin_lock(&fiq->lock); - if (fiq->connected) { - queue_request_and_unlock(fiq, req); - } else { - err = -ENODEV; - spin_unlock(&fiq->lock); - fuse_put_request(req); - } + fuse_send_one(fiq, req); - return err; + return 0; } /* @@ -773,7 +797,6 @@ static int fuse_check_folio(struct folio *folio) (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | - 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | 1 << PG_workingset | @@ -818,9 +841,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) newfolio = page_folio(buf->page); - if (!folio_test_uptodate(newfolio)) - folio_mark_uptodate(newfolio); - + folio_clear_uptodate(newfolio); folio_clear_mappedtodisk(newfolio); if (fuse_check_folio(newfolio) != 0) @@ -980,17 +1001,27 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, struct fuse_req *req = cs->req; struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); - - for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { + for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) { int err; unsigned int offset = ap->descs[i].offset; unsigned int count = min(nbytes, ap->descs[i].length); + struct page *orig, *pagep; + + orig = pagep = &ap->folios[i]->page; - err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); + err = fuse_copy_page(cs, &pagep, offset, count, zeroing); if (err) return err; nbytes -= count; + + /* + * fuse_copy_page may have moved a page from a pipe instead of + * copying into our given page, so update the folios if it was + * replaced. + */ + if (pagep != orig) + ap->folios[i] = page_folio(pagep); } return 0; } @@ -1076,9 +1107,9 @@ __releases(fiq->lock) return err ? err : reqsize; } -struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, - unsigned int max, - unsigned int *countp) +static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, + unsigned int max, + unsigned int *countp) { struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link **newhead = &head; @@ -1097,7 +1128,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, return head; } -EXPORT_SYMBOL(fuse_dequeue_forget); static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, @@ -1112,7 +1142,7 @@ __releases(fiq->lock) struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, - .unique = fuse_get_unique(fiq), + .unique = fuse_get_unique_locked(fiq), .len = sizeof(ih) + sizeof(arg), }; @@ -1143,7 +1173,7 @@ __releases(fiq->lock) struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_in_header ih = { .opcode = FUSE_BATCH_FORGET, - .unique = fuse_get_unique(fiq), + .unique = fuse_get_unique_locked(fiq), .len = sizeof(ih) + sizeof(arg), }; @@ -1607,24 +1637,25 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, num = outarg.size; while (num) { + struct folio *folio; struct page *page; unsigned int this_num; - err = -ENOMEM; - page = find_or_create_page(mapping, index, - mapping_gfp_mask(mapping)); - if (!page) + folio = filemap_grab_folio(mapping, index); + err = PTR_ERR(folio); + if (IS_ERR(folio)) goto out_iput; - this_num = min_t(unsigned, num, PAGE_SIZE - offset); + page = &folio->page; + this_num = min_t(unsigned, num, folio_size(folio) - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); - if (!PageUptodate(page) && !err && offset == 0 && - (this_num == PAGE_SIZE || file_size == end)) { - zero_user_segment(page, this_num, PAGE_SIZE); - SetPageUptodate(page); + if (!folio_test_uptodate(folio) && !err && offset == 0 && + (this_num == folio_size(folio) || file_size == end)) { + folio_zero_segment(folio, this_num, folio_size(folio)); + folio_mark_uptodate(folio); } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); if (err) goto out_iput; @@ -1656,7 +1687,7 @@ static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, struct fuse_retrieve_args *ra = container_of(args, typeof(*ra), ap.args); - release_pages(ra->ap.pages, ra->ap.num_pages); + release_pages(ra->ap.folios, ra->ap.num_folios); kfree(ra); } @@ -1670,7 +1701,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, unsigned int num; unsigned int offset; size_t total_len = 0; - unsigned int num_pages; + unsigned int num_pages, cur_pages = 0; struct fuse_conn *fc = fm->fc; struct fuse_retrieve_args *ra; size_t args_size = sizeof(*ra); @@ -1689,15 +1720,15 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = min(num_pages, fc->max_pages); - args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); + args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0])); ra = kzalloc(args_size, GFP_KERNEL); if (!ra) return -ENOMEM; ap = &ra->ap; - ap->pages = (void *) (ra + 1); - ap->descs = (void *) (ap->pages + num_pages); + ap->folios = (void *) (ra + 1); + ap->descs = (void *) (ap->folios + num_pages); args = &ap->args; args->nodeid = outarg->nodeid; @@ -1708,19 +1739,20 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, index = outarg->offset >> PAGE_SHIFT; - while (num && ap->num_pages < num_pages) { - struct page *page; + while (num && cur_pages < num_pages) { + struct folio *folio; unsigned int this_num; - page = find_get_page(mapping, index); - if (!page) + folio = filemap_get_folio(mapping, index); + if (IS_ERR(folio)) break; this_num = min_t(unsigned, num, PAGE_SIZE - offset); - ap->pages[ap->num_pages] = page; - ap->descs[ap->num_pages].offset = offset; - ap->descs[ap->num_pages].length = this_num; - ap->num_pages++; + ap->folios[ap->num_folios] = folio; + ap->descs[ap->num_folios].offset = offset; + ap->descs[ap->num_folios].length = this_num; + ap->num_folios++; + cur_pages++; offset = 0; num -= this_num; @@ -1777,6 +1809,69 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, return err; } +/* + * Resending all processing queue requests. + * + * During a FUSE daemon panics and failover, it is possible for some inflight + * requests to be lost and never returned. As a result, applications awaiting + * replies would become stuck forever. To address this, we can use notification + * to trigger resending of these pending requests to the FUSE daemon, ensuring + * they are properly processed again. + * + * Please note that this strategy is applicable only to idempotent requests or + * if the FUSE daemon takes careful measures to avoid processing duplicated + * non-idempotent requests. + */ +static void fuse_resend(struct fuse_conn *fc) +{ + struct fuse_dev *fud; + struct fuse_req *req, *next; + struct fuse_iqueue *fiq = &fc->iq; + LIST_HEAD(to_queue); + unsigned int i; + + spin_lock(&fc->lock); + if (!fc->connected) { + spin_unlock(&fc->lock); + return; + } + + list_for_each_entry(fud, &fc->devices, entry) { + struct fuse_pqueue *fpq = &fud->pq; + + spin_lock(&fpq->lock); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], &to_queue); + spin_unlock(&fpq->lock); + } + spin_unlock(&fc->lock); + + list_for_each_entry_safe(req, next, &to_queue, list) { + set_bit(FR_PENDING, &req->flags); + clear_bit(FR_SENT, &req->flags); + /* mark the request as resend request */ + req->in.h.unique |= FUSE_UNIQUE_RESEND; + } + + spin_lock(&fiq->lock); + if (!fiq->connected) { + spin_unlock(&fiq->lock); + list_for_each_entry(req, &to_queue, list) + clear_bit(FR_PENDING, &req->flags); + end_requests(&to_queue); + return; + } + /* iq and pq requests are both oldest to newest */ + list_splice(&to_queue, &fiq->pending); + fuse_dev_wake_and_unlock(fiq); +} + +static int fuse_notify_resend(struct fuse_conn *fc) +{ + fuse_resend(fc); + return 0; +} + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { @@ -1802,6 +1897,9 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); + case FUSE_NOTIFY_RESEND: + return fuse_notify_resend(fc); + default: fuse_copy_finish(cs); return -EINVAL; @@ -2253,43 +2351,50 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new) return 0; } -static long fuse_dev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp) { int res; int oldfd; struct fuse_dev *fud = NULL; struct fd f; + if (get_user(oldfd, argp)) + return -EFAULT; + + f = fdget(oldfd); + if (!f.file) + return -EINVAL; + + /* + * Check against file->f_op because CUSE + * uses the same ioctl handler. + */ + if (f.file->f_op == file->f_op) + fud = fuse_get_dev(f.file); + + res = -EINVAL; + if (fud) { + mutex_lock(&fuse_mutex); + res = fuse_device_clone(fud->fc, file); + mutex_unlock(&fuse_mutex); + } + + fdput(f); + return res; +} + +static long fuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + switch (cmd) { case FUSE_DEV_IOC_CLONE: - if (get_user(oldfd, (__u32 __user *)arg)) - return -EFAULT; - - f = fdget(oldfd); - if (!f.file) - return -EINVAL; + return fuse_dev_ioctl_clone(file, argp); - /* - * Check against file->f_op because CUSE - * uses the same ioctl handler. - */ - if (f.file->f_op == file->f_op) - fud = fuse_get_dev(f.file); - - res = -EINVAL; - if (fud) { - mutex_lock(&fuse_mutex); - res = fuse_device_clone(fud->fc, file); - mutex_unlock(&fuse_mutex); - } - fdput(f); - break; default: - res = -ENOTTY; - break; + return -ENOTTY; } - return res; } const struct file_operations fuse_dev_operations = { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 95f9913a35373184b7b50805aa0e50ea967aa601..ccf08647359506f73a62cbebad6743a8f9d932d0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -634,7 +634,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, goto out_err; err = -ENOMEM; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, true); if (!ff) goto out_put_forget_req; @@ -696,13 +696,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_dir_changed(dir); - err = finish_open(file, entry, generic_file_open); + err = generic_file_open(inode, file); + if (!err) { + file->private_data = ff; + err = finish_open(file, entry, fuse_finish_open); + } if (err) { fi = get_fuse_inode(inode); fuse_sync_release(fi, ff, flags); } else { - file->private_data = ff; - fuse_finish_open(inode, file); if (fm->fc->atomic_o_trunc && trunc) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) @@ -1490,7 +1492,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask) * * 1) Local access checking ('default_permissions' mount option) based * on file mode. This is the plain old disk filesystem permission - * modell. + * model. * * 2) "Remote" access checking, where server is responsible for * checking permission in each inode operation. An exception to this @@ -1561,13 +1563,13 @@ static int fuse_permission(struct mnt_idmap *idmap, return err; } -static int fuse_readlink_page(struct inode *inode, struct page *page) +static int fuse_readlink_page(struct inode *inode, struct folio *folio) { struct fuse_mount *fm = get_fuse_mount(inode); - struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 }; + struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 }; struct fuse_args_pages ap = { - .num_pages = 1, - .pages = &page, + .num_folios = 1, + .folios = &folio, .descs = &desc, }; char *link; @@ -1590,7 +1592,7 @@ static int fuse_readlink_page(struct inode *inode, struct page *page) if (WARN_ON(res >= PAGE_SIZE)) return -EIO; - link = page_address(page); + link = folio_address(folio); link[res] = '\0'; return 0; @@ -1600,7 +1602,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { struct fuse_conn *fc = get_fuse_conn(inode); - struct page *page; + struct folio *folio; int err; err = -EIO; @@ -1614,20 +1616,20 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, if (!dentry) goto out_err; - page = alloc_page(GFP_KERNEL); + folio = folio_alloc(GFP_KERNEL, 0); err = -ENOMEM; - if (!page) + if (!folio) goto out_err; - err = fuse_readlink_page(inode, page); + err = fuse_readlink_page(inode, folio); if (err) { - __free_page(page); + folio_put(folio); goto out_err; } - set_delayed_call(callback, page_put_link, page); + set_delayed_call(callback, page_put_link, &folio->page); - return page_address(page); + return folio_address(folio); out_err: return ERR_PTR(err); @@ -1635,7 +1637,32 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, static int fuse_dir_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, true); + struct fuse_mount *fm = get_fuse_mount(inode); + int err; + + if (fuse_is_bad(inode)) + return -EIO; + + err = generic_file_open(inode, file); + if (err) + return err; + + err = fuse_do_open(fm, get_node_id(inode), file, true); + if (!err) { + struct fuse_file *ff = file->private_data; + + /* + * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for + * directories for backward compatibility, though it's unlikely + * to be useful. + */ + if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) + nonseekable_open(inode, file); + if (!(ff->open_flags & FOPEN_KEEP_CACHE)) + invalidate_inode_pages2(inode->i_mapping); + } + + return err; } static int fuse_dir_release(struct inode *inode, struct file *file) @@ -2172,7 +2199,7 @@ void fuse_init_dir(struct inode *inode) static int fuse_symlink_read_folio(struct file *null, struct folio *folio) { - int err = fuse_readlink_page(folio->mapping->host, &folio->page); + int err = fuse_readlink_page(folio->mapping->host, folio); if (!err) folio_mark_uptodate(folio); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ceb9f7d230388258e5313ac3db21fc10b0c2bd22..ccc91256c4d6edbda35834848d09bf1d7566fcd8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -19,6 +19,7 @@ #include #include #include +#include static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, int opcode, @@ -55,7 +56,7 @@ struct fuse_release_args { struct inode *inode; }; -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) { struct fuse_file *ff; @@ -64,15 +65,16 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) return NULL; ff->fm = fm; - ff->release_args = kzalloc(sizeof(*ff->release_args), - GFP_KERNEL_ACCOUNT); - if (!ff->release_args) { - kfree(ff); - return NULL; + if (release) { + ff->release_args = kzalloc(sizeof(*ff->release_args), + GFP_KERNEL_ACCOUNT); + if (!ff->release_args) { + kfree(ff); + return NULL; + } } INIT_LIST_HEAD(&ff->write_entry); - mutex_init(&ff->readdir.lock); refcount_set(&ff->count, 1); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); @@ -85,7 +87,6 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) void fuse_file_free(struct fuse_file *ff) { kfree(ff->release_args); - mutex_destroy(&ff->readdir.lock); kfree(ff); } @@ -104,14 +105,17 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, kfree(ra); } -static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (refcount_dec_and_test(&ff->count)) { - struct fuse_args *args = &ff->release_args->args; + struct fuse_release_args *ra = ff->release_args; + struct fuse_args *args = (ra ? &ra->args : NULL); - if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { - /* Do nothing when client does not implement 'open' */ - fuse_release_end(ff->fm, args, 0); + if (ra && ra->inode) + fuse_file_io_release(ff, ra->inode); + + if (!args) { + /* Do nothing when server does not implement 'open' */ } else if (sync) { fuse_simple_request(ff->fm, args); fuse_release_end(ff->fm, args, 0); @@ -131,15 +135,16 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, struct fuse_conn *fc = fm->fc; struct fuse_file *ff; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; + bool open = isdir ? !fc->no_opendir : !fc->no_open; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, open); if (!ff) return ERR_PTR(-ENOMEM); ff->fh = 0; /* Default for no-open */ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); - if (isdir ? !fc->no_opendir : !fc->no_open) { + if (open) { struct fuse_open_out outarg; int err; @@ -147,11 +152,13 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, if (!err) { ff->fh = outarg.fh; ff->open_flags = outarg.open_flags; - } else if (err != -ENOSYS) { fuse_file_free(ff); return ERR_PTR(err); } else { + /* No release needed */ + kfree(ff->release_args); + ff->release_args = NULL; if (isdir) fc->no_opendir = 1; else @@ -194,40 +201,50 @@ static void fuse_link_write_file(struct file *file) spin_unlock(&fi->lock); } -void fuse_finish_open(struct inode *inode, struct file *file) +int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); + int err; + + err = fuse_file_io_open(file, inode); + if (err) + return err; if (ff->open_flags & FOPEN_STREAM) stream_open(inode, file); else if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { - struct fuse_inode *fi = get_fuse_inode(inode); - - spin_lock(&fi->lock); - fi->attr_version = atomic64_inc_return(&fc->attr_version); - i_size_write(inode, 0); - spin_unlock(&fi->lock); - file_update_time(file); - fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); - } if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); + + return 0; +} + +static void fuse_truncate_update_attr(struct inode *inode, struct file *file) +{ + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + i_size_write(inode, 0); + spin_unlock(&fi->lock); + file_update_time(file); + fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); } -int fuse_open_common(struct inode *inode, struct file *file, bool isdir) +static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = fm->fc; + struct fuse_file *ff; int err; - bool is_wb_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && - fc->writeback_cache; - bool dax_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && FUSE_IS_DAX(inode); + bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; + bool is_wb_truncate = is_truncate && fc->writeback_cache; + bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); if (fuse_is_bad(inode)) return -EIO; @@ -249,16 +266,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (is_wb_truncate || dax_truncate) fuse_set_nowrite(inode); - err = fuse_do_open(fm, get_node_id(inode), file, isdir); - if (!err) - fuse_finish_open(inode, file); + err = fuse_do_open(fm, get_node_id(inode), file, false); + if (!err) { + ff = file->private_data; + err = fuse_finish_open(inode, file); + if (err) + fuse_sync_release(fi, ff, file->f_flags); + else if (is_truncate) + fuse_truncate_update_attr(inode, file); + } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { - struct fuse_file *ff = file->private_data; - - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) + if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); @@ -273,7 +294,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) } static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, - unsigned int flags, int opcode) + unsigned int flags, int opcode, bool sync) { struct fuse_conn *fc = ff->fm->fc; struct fuse_release_args *ra = ff->release_args; @@ -291,6 +312,9 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, wake_up_interruptible_all(&ff->poll_wait); + if (!ra) + return; + ra->inarg.fh = ff->fh; ra->inarg.flags = flags; ra->args.in_numargs = 1; @@ -300,6 +324,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, ra->args.nodeid = ff->nodeid; ra->args.force = true; ra->args.nocreds = true; + + /* + * Hold inode until release is finished. + * From fuse_sync_release() the refcount is 1 and everything's + * synchronous, so we are fine with not doing igrab() here. + */ + ra->inode = sync ? NULL : igrab(&fi->inode); } void fuse_file_release(struct inode *inode, struct fuse_file *ff, @@ -309,14 +340,12 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, struct fuse_release_args *ra = ff->release_args; int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; - fuse_prepare_release(fi, ff, open_flags, opcode); + fuse_prepare_release(fi, ff, open_flags, opcode, false); - if (ff->flock) { + if (ra && ff->flock) { ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); } - /* Hold inode until release is finished */ - ra->inode = igrab(inode); /* * Normally this will send the RELEASE request, however if @@ -327,7 +356,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. */ - fuse_file_put(ff, ff->fm->fc->destroy, isdir); + fuse_file_put(ff, ff->fm->fc->destroy); } void fuse_release_common(struct file *file, bool isdir) @@ -336,11 +365,6 @@ void fuse_release_common(struct file *file, bool isdir) (fl_owner_t) file, isdir); } -static int fuse_open(struct inode *inode, struct file *file) -{ - return fuse_open_common(inode, file, false); -} - static int fuse_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = get_fuse_conn(inode); @@ -362,12 +386,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags) { WARN_ON(refcount_read(&ff->count) > 1); - fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); - /* - * iput(NULL) is a no-op and since the refcount is 1 and everything's - * synchronous, we are fine with not doing igrab() here" - */ - fuse_file_put(ff, true, false); + fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); @@ -416,7 +436,7 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); WARN_ON(get_fuse_inode(wpa->inode) != fi); curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from >= curr_index + wpa->ia.ap.num_pages) + if (idx_from >= curr_index + wpa->ia.ap.num_folios) n = n->rb_right; else if (idx_to < curr_index) n = n->rb_left; @@ -428,9 +448,6 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, /* * Check if any page in a range is under writeback - * - * This is currently done by walking the list of writepage requests - * for the inode, which can be pretty inefficient. */ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, pgoff_t idx_to) @@ -438,6 +455,9 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, struct fuse_inode *fi = get_fuse_inode(inode); bool found; + if (RB_EMPTY_ROOT(&fi->writepages)) + return false; + spin_lock(&fi->lock); found = fuse_find_writeback(fi, idx_from, idx_to); spin_unlock(&fi->lock); @@ -463,6 +483,21 @@ static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); } +static inline bool fuse_folio_is_writeback(struct inode *inode, + struct folio *folio) +{ + pgoff_t last = folio_next_index(folio) - 1; + return fuse_range_is_writeback(inode, folio_index(folio), last); +} + +static void fuse_wait_on_folio_writeback(struct inode *inode, + struct folio *folio) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + wait_event(fi->page_waitq, !fuse_folio_is_writeback(inode, folio)); +} + /* * Wait for all pending writepages on the inode to finish. * @@ -625,16 +660,20 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, args->out_args[0].size = count; } -static void fuse_release_user_pages(struct fuse_args_pages *ap, +static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres, bool should_dirty) { unsigned int i; - for (i = 0; i < ap->num_pages; i++) { + for (i = 0; i < ap->num_folios; i++) { if (should_dirty) - set_page_dirty_lock(ap->pages[i]); - put_page(ap->pages[i]); + folio_mark_dirty_lock(ap->folios[i]); + if (ap->args.is_pinned) + unpin_folio(ap->folios[i]); } + + if (nres > 0 && ap->args.invalidate_vmap) + invalidate_kernel_vmap_range(ap->args.vmap_base, nres); } static void fuse_io_release(struct kref *kref) @@ -704,16 +743,16 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) } static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, - unsigned int npages) + unsigned int nfolios) { struct fuse_io_args *ia; ia = kzalloc(sizeof(*ia), GFP_KERNEL); if (ia) { ia->io = io; - ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, - &ia->ap.descs); - if (!ia->ap.pages) { + ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, + &ia->ap.descs); + if (!ia->ap.folios) { kfree(ia); ia = NULL; } @@ -723,7 +762,7 @@ static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, static void fuse_io_free(struct fuse_io_args *ia) { - kfree(ia->ap.pages); + kfree(ia->ap.folios); kfree(ia); } @@ -733,25 +772,29 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); struct fuse_io_priv *io = ia->io; ssize_t pos = -1; - - fuse_release_user_pages(&ia->ap, io->should_dirty); + size_t nres; if (err) { /* Nothing */ } else if (io->write) { if (ia->write.out.size > ia->write.in.size) { err = -EIO; - } else if (ia->write.in.size != ia->write.out.size) { - pos = ia->write.in.offset - io->offset + - ia->write.out.size; + } else { + nres = ia->write.out.size; + if (ia->write.in.size != ia->write.out.size) + pos = ia->write.in.offset - io->offset + + ia->write.out.size; } } else { u32 outsize = args->out_args[0].size; + nres = outsize; if (ia->read.in.size != outsize) pos = ia->read.in.offset - io->offset + outsize; } + fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty); + fuse_aio_complete(io, err, pos); fuse_io_free(ia); } @@ -822,33 +865,33 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, * reached the client fs yet. So the hole is not present there. */ if (!fc->writeback_cache) { - loff_t pos = page_offset(ap->pages[0]) + num_read; + loff_t pos = folio_pos(ap->folios[0]) + num_read; fuse_read_update_size(inode, pos, attr_ver); } } -static int fuse_do_readpage(struct file *file, struct page *page) +static int fuse_do_readfolio(struct file *file, struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct fuse_mount *fm = get_fuse_mount(inode); - loff_t pos = page_offset(page); - struct fuse_page_desc desc = { .length = PAGE_SIZE }; + loff_t pos = folio_pos(folio); + struct fuse_folio_desc desc = { .length = PAGE_SIZE }; struct fuse_io_args ia = { .ap.args.page_zeroing = true, .ap.args.out_pages = true, - .ap.num_pages = 1, - .ap.pages = &page, + .ap.num_folios = 1, + .ap.folios = &folio, .ap.descs = &desc, }; ssize_t res; u64 attr_ver; /* - * Page writeback can extend beyond the lifetime of the - * page-cache page, so make sure we read a properly synced - * page. + * With the temporary pages that are used to complete writeback, we can + * have writeback that extends beyond the lifetime of the folio. So + * make sure we read a properly synced folio. */ - fuse_wait_on_page_writeback(inode, page->index); + fuse_wait_on_folio_writeback(inode, folio); attr_ver = fuse_get_attr_version(fm->fc); @@ -866,25 +909,24 @@ static int fuse_do_readpage(struct file *file, struct page *page) if (res < desc.length) fuse_short_read(inode, attr_ver, res, &ia.ap); - SetPageUptodate(page); + folio_mark_uptodate(folio); return 0; } static int fuse_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; int err; err = -EIO; if (fuse_is_bad(inode)) goto out; - err = fuse_do_readpage(file, page); + err = fuse_do_readfolio(file, folio); fuse_invalidate_atime(inode); out: - unlock_page(page); + folio_unlock(folio); return err; } @@ -898,8 +940,8 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, size_t num_read = args->out_args[0].size; struct address_space *mapping = NULL; - for (i = 0; mapping == NULL && i < ap->num_pages; i++) - mapping = ap->pages[i]->mapping; + for (i = 0; mapping == NULL && i < ap->num_folios; i++) + mapping = ap->folios[i]->mapping; if (mapping) { struct inode *inode = mapping->host; @@ -913,18 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, fuse_invalidate_atime(inode); } - for (i = 0; i < ap->num_pages; i++) { - struct page *page = ap->pages[i]; - - if (!err) - SetPageUptodate(page); - else - SetPageError(page); - unlock_page(page); - put_page(page); - } + for (i = 0; i < ap->num_folios; i++) + folio_end_read(ap->folios[i], !err); if (ia->ff) - fuse_file_put(ia->ff, false, false); + fuse_file_put(ia->ff, false); fuse_io_free(ia); } @@ -934,8 +968,9 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) struct fuse_file *ff = file->private_data; struct fuse_mount *fm = ff->fm; struct fuse_args_pages *ap = &ia->ap; - loff_t pos = page_offset(ap->pages[0]); - size_t count = ap->num_pages << PAGE_SHIFT; + loff_t pos = folio_pos(ap->folios[0]); + /* Currently, all folios in FUSE are one page */ + size_t count = ap->num_folios << PAGE_SHIFT; ssize_t res; int err; @@ -946,7 +981,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) /* Don't overflow end offset */ if (pos + (count - 1) == LLONG_MAX) { count--; - ap->descs[ap->num_pages - 1].length--; + ap->descs[ap->num_folios - 1].length--; } WARN_ON((loff_t) (pos + count) < 0); @@ -968,18 +1003,36 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) static void fuse_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - unsigned int i, max_pages, nr_pages = 0; + unsigned int max_pages, nr_pages; + pgoff_t first = readahead_index(rac); + pgoff_t last = first + readahead_count(rac) - 1; if (fuse_is_bad(inode)) return; + wait_event(fi->page_waitq, !fuse_range_is_writeback(inode, first, last)); + max_pages = min_t(unsigned int, fc->max_pages, fc->max_read / PAGE_SIZE); - for (;;) { + /* + * This is only accurate the first time through, since readahead_folio() + * doesn't update readahead_count() from the previous folio until the + * next call. Grab nr_pages here so we know how many pages we're going + * to have to process. This means that we will exit here with + * readahead_count() == folio_nr_pages(last_folio), but we will have + * consumed all of the folios, and read_pages() will call + * readahead_folio() again which will clean up the rac. + */ + nr_pages = readahead_count(rac); + + while (nr_pages) { struct fuse_io_args *ia; struct fuse_args_pages *ap; + struct folio *folio; + unsigned cur_pages = min(max_pages, nr_pages); if (fc->num_background >= fc->congestion_threshold && rac->ra->async_size >= readahead_count(rac)) @@ -989,23 +1042,19 @@ static void fuse_readahead(struct readahead_control *rac) */ break; - nr_pages = readahead_count(rac) - nr_pages; - if (nr_pages > max_pages) - nr_pages = max_pages; - if (nr_pages == 0) - break; - ia = fuse_io_alloc(NULL, nr_pages); + ia = fuse_io_alloc(NULL, cur_pages); if (!ia) return; ap = &ia->ap; - nr_pages = __readahead_batch(rac, ap->pages, nr_pages); - for (i = 0; i < nr_pages; i++) { - fuse_wait_on_page_writeback(inode, - readahead_index(rac) + i); - ap->descs[i].length = PAGE_SIZE; + + while (ap->num_folios < cur_pages) { + folio = readahead_folio(rac); + ap->folios[ap->num_folios] = folio; + ap->descs[ap->num_folios].length = folio_size(folio); + ap->num_folios++; } - ap->num_pages = nr_pages; fuse_send_readpages(ia, rac->file); + nr_pages -= cur_pages; } } @@ -1122,8 +1171,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, bool short_write; int err; - for (i = 0; i < ap->num_pages; i++) - fuse_wait_on_page_writeback(inode, ap->pages[i]->index); + for (i = 0; i < ap->num_folios; i++) + fuse_wait_on_folio_writeback(inode, ap->folios[i]); fuse_write_args_fill(ia, ff, pos, count); ia->write.in.flags = fuse_write_flags(iocb); @@ -1137,24 +1186,24 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, short_write = ia->write.out.size < count; offset = ap->descs[0].offset; count = ia->write.out.size; - for (i = 0; i < ap->num_pages; i++) { - struct page *page = ap->pages[i]; + for (i = 0; i < ap->num_folios; i++) { + struct folio *folio = ap->folios[i]; if (err) { - ClearPageUptodate(page); + folio_clear_uptodate(folio); } else { - if (count >= PAGE_SIZE - offset) - count -= PAGE_SIZE - offset; + if (count >= folio_size(folio) - offset) + count -= folio_size(folio) - offset; else { if (short_write) - ClearPageUptodate(page); + folio_clear_uptodate(folio); count = 0; } offset = 0; } - if (ia->write.page_locked && (i == ap->num_pages - 1)) - unlock_page(page); - put_page(page); + if (ia->write.folio_locked && (i == ap->num_folios - 1)) + folio_unlock(folio); + folio_put(folio); } return err; @@ -1168,6 +1217,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, struct fuse_args_pages *ap = &ia->ap; struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_SIZE - 1); + unsigned int nr_pages = 0; size_t count = 0; int err; @@ -1176,7 +1226,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, do { size_t tmp; - struct page *page; + struct folio *folio; pgoff_t index = pos >> PAGE_SHIFT; size_t bytes = min_t(size_t, PAGE_SIZE - offset, iov_iter_count(ii)); @@ -1188,27 +1238,30 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, if (fault_in_iov_iter_readable(ii, bytes)) break; - err = -ENOMEM; - page = grab_cache_page_write_begin(mapping, index); - if (!page) + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; + } if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_dcache_folio(folio); - tmp = copy_page_from_iter_atomic(page, offset, bytes, ii); - flush_dcache_page(page); + tmp = copy_folio_from_iter_atomic(folio, offset, bytes, ii); + flush_dcache_folio(folio); if (!tmp) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); goto again; } err = 0; - ap->pages[ap->num_pages] = page; - ap->descs[ap->num_pages].length = tmp; - ap->num_pages++; + ap->folios[ap->num_folios] = folio; + ap->descs[ap->num_folios].length = tmp; + ap->num_folios++; + nr_pages++; count += tmp; pos += tmp; @@ -1218,18 +1271,18 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, /* If we copied full page, mark it uptodate */ if (tmp == PAGE_SIZE) - SetPageUptodate(page); + folio_mark_uptodate(folio); - if (PageUptodate(page)) { - unlock_page(page); + if (folio_test_uptodate(folio)) { + folio_unlock(folio); } else { - ia->write.page_locked = true; + ia->write.folio_locked = true; break; } if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && - ap->num_pages < max_pages && offset == 0); + nr_pages < max_pages && offset == 0); return count > 0 ? count : err; } @@ -1263,8 +1316,8 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), fc->max_pages); - ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); - if (!ap->pages) { + ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs); + if (!ap->folios) { err = -ENOMEM; break; } @@ -1286,7 +1339,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) err = -EIO; } } - kfree(ap->pages); + kfree(ap->folios); } while (!err && iov_iter_count(ii)); fuse_write_update_attr(inode, pos, res); @@ -1298,13 +1351,93 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) return res; } +static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); +} + +/* + * @return true if an exclusive lock for direct IO writes is needed + */ +static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_inode *fi = get_fuse_inode(inode); + + /* Server side has to advise that it supports parallel dio writes. */ + if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) + return true; + + /* + * Append will need to know the eventual EOF - always needs an + * exclusive lock. + */ + if (iocb->ki_flags & IOCB_APPEND) + return true; + + /* shared locks are not allowed with parallel page cache IO */ + if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) + return true; + + /* Parallel dio beyond EOF is not supported, at least for now. */ + if (fuse_io_past_eof(iocb, from)) + return true; + + return false; +} + +static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, + bool *exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + + *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); + if (*exclusive) { + inode_lock(inode); + } else { + inode_lock_shared(inode); + /* + * New parallal dio allowed only if inode is not in caching + * mode and denies new opens in caching mode. This check + * should be performed only after taking shared inode lock. + * Previous past eof check was without inode lock and might + * have raced, so check it again. + */ + if (fuse_io_past_eof(iocb, from) || + fuse_file_uncached_io_start(inode, ff) != 0) { + inode_unlock_shared(inode); + inode_lock(inode); + *exclusive = true; + } + } +} + +static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + + if (exclusive) { + inode_unlock(inode); + } else { + /* Allow opens in caching mode after last parallel dio end */ + fuse_file_uncached_io_end(inode, ff); + inode_unlock_shared(inode); + } +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; ssize_t written = 0; struct inode *inode = mapping->host; - ssize_t err; + ssize_t err, count; struct fuse_conn *fc = get_fuse_conn(inode); if (fc->writeback_cache) { @@ -1326,15 +1459,13 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) writethrough: inode_lock(inode); - err = generic_write_checks(iocb, from); + err = count = generic_write_checks(iocb, from); if (err <= 0) goto out; - err = file_remove_privs(file); - if (err) - goto out; + task_io_account_write(count); - err = file_update_time(file); + err = kiocb_modified(iocb); if (err) goto out; @@ -1368,55 +1499,97 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii, static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, size_t *nbytesp, int write, - unsigned int max_pages) + unsigned int max_pages, + bool use_pages_for_kvec_io) { + bool flush_or_invalidate = false; + unsigned int nr_pages = 0; size_t nbytes = 0; /* # bytes already packed in req */ ssize_t ret = 0; - /* Special case for kernel I/O: can copy directly into the buffer */ + /* Special case for kernel I/O: can copy directly into the buffer. + * However if the implementation of fuse_conn requires pages instead of + * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead. + */ if (iov_iter_is_kvec(ii)) { - unsigned long user_addr = fuse_get_user_addr(ii); - size_t frag_size = fuse_get_frag_size(ii, *nbytesp); + void *user_addr = (void *)fuse_get_user_addr(ii); - if (write) - ap->args.in_args[1].value = (void *) user_addr; - else - ap->args.out_args[0].value = (void *) user_addr; + if (!use_pages_for_kvec_io) { + size_t frag_size = fuse_get_frag_size(ii, *nbytesp); - iov_iter_advance(ii, frag_size); - *nbytesp = frag_size; - return 0; + if (write) + ap->args.in_args[1].value = user_addr; + else + ap->args.out_args[0].value = user_addr; + + iov_iter_advance(ii, frag_size); + *nbytesp = frag_size; + return 0; + } + + if (is_vmalloc_addr(user_addr)) { + ap->args.vmap_base = user_addr; + flush_or_invalidate = true; + } + } + + /* + * Until there is support for iov_iter_extract_folios(), we have to + * manually extract pages using iov_iter_extract_pages() and then + * copy that to a folios array. + */ + struct page **pages = kzalloc(max_pages * sizeof(struct page *), + GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out; } - while (nbytes < *nbytesp && ap->num_pages < max_pages) { - unsigned npages; + while (nbytes < *nbytesp && nr_pages < max_pages) { + unsigned nfolios, i; size_t start; - ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages], - *nbytesp - nbytes, - max_pages - ap->num_pages, - &start); + + ret = iov_iter_extract_pages(ii, &pages, + *nbytesp - nbytes, + max_pages - nr_pages, + 0, &start); if (ret < 0) break; nbytes += ret; - ret += start; - npages = DIV_ROUND_UP(ret, PAGE_SIZE); + nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE); - ap->descs[ap->num_pages].offset = start; - fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); + for (i = 0; i < nfolios; i++) { + struct folio *folio = page_folio(pages[i]); + unsigned int offset = start + + (folio_page_idx(folio, pages[i]) << PAGE_SHIFT); + unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start); - ap->num_pages += npages; - ap->descs[ap->num_pages - 1].length -= - (PAGE_SIZE - ret) & (PAGE_SIZE - 1); + ap->descs[ap->num_folios].offset = offset; + ap->descs[ap->num_folios].length = len; + ap->folios[ap->num_folios] = folio; + start = 0; + ret -= len; + ap->num_folios++; + } + + nr_pages += nfolios; } + kfree(pages); + + if (write && flush_or_invalidate) + flush_kernel_vmap_range(ap->args.vmap_base, nbytes); + ap->args.invalidate_vmap = !write && flush_or_invalidate; + ap->args.is_pinned = iov_iter_extract_will_pin(ii); ap->args.user_pages = true; if (write) ap->args.in_pages = true; else ap->args.out_pages = true; +out: *nbytesp = nbytes; return ret < 0 ? ret : 0; @@ -1478,7 +1651,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, size_t nbytes = min(count, nmax); err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, - max_pages); + max_pages, fc->use_pages_for_kvec_io); if (err && !nbytes) break; @@ -1492,7 +1665,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, } if (!io->async || nres < 0) { - fuse_release_user_pages(&ia->ap, io->should_dirty); + fuse_release_user_pages(&ia->ap, nres, io->should_dirty); fuse_io_free(ia); } ia = NULL; @@ -1546,7 +1719,7 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) { ssize_t res; - if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { + if (!is_sync_kiocb(iocb)) { res = fuse_direct_IO(iocb, to); } else { struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); @@ -1557,63 +1730,27 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) return res; } -static bool fuse_direct_write_extending_i_size(struct kiocb *iocb, - struct iov_iter *iter) -{ - struct inode *inode = file_inode(iocb->ki_filp); - - return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); -} - static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - struct file *file = iocb->ki_filp; - struct fuse_file *ff = file->private_data; - struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = - !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) || - get_fuse_conn(inode)->direct_io_allow_mmap || - iocb->ki_flags & IOCB_APPEND || - fuse_direct_write_extending_i_size(iocb, from); - - /* - * Take exclusive lock if - * - Parallel direct writes are disabled - a user space decision - * - Parallel direct writes are enabled and i_size is being extended. - * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP). - * This might not be needed at all, but needs further investigation. - */ - if (exclusive_lock) - inode_lock(inode); - else { - inode_lock_shared(inode); - - /* A race with truncate might have come up as the decision for - * the lock type was done without holding the lock, check again. - */ - if (fuse_direct_write_extending_i_size(iocb, from)) { - inode_unlock_shared(inode); - inode_lock(inode); - exclusive_lock = true; - } - } + bool exclusive; + fuse_dio_lock(iocb, from, &exclusive); res = generic_write_checks(iocb, from); if (res > 0) { - if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { + task_io_account_write(res); + if (!is_sync_kiocb(iocb)) { res = fuse_direct_IO(iocb, from); } else { + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); + res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); fuse_write_update_attr(inode, iocb->ki_pos, res); } } - if (exclusive_lock) - inode_unlock(inode); - else - inode_unlock_shared(inode); + fuse_dio_unlock(iocb, exclusive); return res; } @@ -1662,30 +1799,34 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) if (wpa->bucket) fuse_sync_bucket_dec(wpa->bucket); - for (i = 0; i < ap->num_pages; i++) - __free_page(ap->pages[i]); + for (i = 0; i < ap->num_folios; i++) + folio_put(ap->folios[i]); - if (wpa->ia.ff) - fuse_file_put(wpa->ia.ff, false, false); + fuse_file_put(wpa->ia.ff, false); - kfree(ap->pages); + kfree(ap->folios); kfree(wpa); } -static void fuse_writepage_finish(struct fuse_mount *fm, - struct fuse_writepage_args *wpa) +static void fuse_writepage_finish_stat(struct inode *inode, struct folio *folio) +{ + struct backing_dev_info *bdi = inode_to_bdi(inode); + + dec_wb_stat(&bdi->wb, WB_WRITEBACK); + node_stat_sub_folio(folio, NR_WRITEBACK_TEMP); + wb_writeout_inc(&bdi->wb); +} + +static void fuse_writepage_finish(struct fuse_writepage_args *wpa) { struct fuse_args_pages *ap = &wpa->ia.ap; struct inode *inode = wpa->inode; struct fuse_inode *fi = get_fuse_inode(inode); - struct backing_dev_info *bdi = inode_to_bdi(inode); int i; - for (i = 0; i < ap->num_pages; i++) { - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); - } + for (i = 0; i < ap->num_folios; i++) + fuse_writepage_finish_stat(inode, ap->folios[i]); + wake_up(&fi->page_waitq); } @@ -1699,7 +1840,8 @@ __acquires(fi->lock) struct fuse_inode *fi = get_fuse_inode(wpa->inode); struct fuse_write_in *inarg = &wpa->ia.write.in; struct fuse_args *args = &wpa->ia.ap.args; - __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; + /* Currently, all folios in FUSE are one page */ + __u64 data_size = wpa->ia.ap.num_folios * PAGE_SIZE; int err; fi->writectr++; @@ -1732,19 +1874,15 @@ __acquires(fi->lock) out_free: fi->writectr--; rb_erase(&wpa->writepages_entry, &fi->writepages); - fuse_writepage_finish(fm, wpa); + fuse_writepage_finish(wpa); spin_unlock(&fi->lock); /* After rb_erase() aux request list is private */ for (aux = wpa->next; aux; aux = next) { - struct backing_dev_info *bdi = inode_to_bdi(aux->inode); - next = aux->next; aux->next = NULL; - - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); + fuse_writepage_finish_stat(aux->inode, + aux->ia.ap.folios[0]); fuse_writepage_free(aux); } @@ -1779,11 +1917,11 @@ static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, struct fuse_writepage_args *wpa) { pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; - pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; + pgoff_t idx_to = idx_from + wpa->ia.ap.num_folios - 1; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; - WARN_ON(!wpa->ia.ap.num_pages); + WARN_ON(!wpa->ia.ap.num_folios); while (*p) { struct fuse_writepage_args *curr; pgoff_t curr_index; @@ -1794,7 +1932,7 @@ static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, WARN_ON(curr->inode != wpa->inode); curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from >= curr_index + curr->ia.ap.num_pages) + if (idx_from >= curr_index + curr->ia.ap.num_folios) p = &(*p)->rb_right; else if (idx_to < curr_index) p = &(*p)->rb_left; @@ -1839,7 +1977,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, wpa->next = next->next; next->next = NULL; - next->ia.ff = fuse_file_get(wpa->ia.ff); tree_insert(&fi->writepages, next); /* @@ -1868,7 +2005,7 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, fuse_send_writepage(fm, next, inarg->offset + inarg->size); } fi->writectr--; - fuse_writepage_finish(fm, wpa); + fuse_writepage_finish(wpa); spin_unlock(&fi->lock); fuse_writepage_free(wpa); } @@ -1914,7 +2051,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) ff = __fuse_write_file_get(fi); err = fuse_flush_times(inode, ff); if (ff) - fuse_file_put(ff, false, false); + fuse_file_put(ff, false); return err; } @@ -1927,9 +2064,9 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void) wpa = kzalloc(sizeof(*wpa), GFP_NOFS); if (wpa) { ap = &wpa->ia.ap; - ap->num_pages = 0; - ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); - if (!ap->pages) { + ap->num_folios = 0; + ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs); + if (!ap->folios) { kfree(wpa); wpa = NULL; } @@ -1952,49 +2089,77 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, rcu_read_unlock(); } -static int fuse_writepage_locked(struct page *page) +static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, + struct folio *tmp_folio, uint32_t folio_index) { - struct address_space *mapping = page->mapping; - struct inode *inode = mapping->host; + struct inode *inode = folio->mapping->host; + struct fuse_args_pages *ap = &wpa->ia.ap; + + folio_copy(tmp_folio, folio); + + ap->folios[folio_index] = tmp_folio; + ap->descs[folio_index].offset = 0; + ap->descs[folio_index].length = PAGE_SIZE; + + inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); + node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); +} + +static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, + struct fuse_file *ff) +{ + struct inode *inode = folio->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_writepage_args *wpa; struct fuse_args_pages *ap; - struct page *tmp_page; - int error = -ENOMEM; - - set_page_writeback(page); wpa = fuse_writepage_args_alloc(); if (!wpa) - goto err; + return NULL; + + fuse_writepage_add_to_bucket(fc, wpa); + fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0); + wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; + wpa->inode = inode; + wpa->ia.ff = ff; + ap = &wpa->ia.ap; + ap->args.in_pages = true; + ap->args.end = fuse_writepage_end; + + return wpa; +} + +static int fuse_writepage_locked(struct folio *folio) +{ + struct address_space *mapping = folio->mapping; + struct inode *inode = mapping->host; + struct fuse_inode *fi = get_fuse_inode(inode); + struct fuse_writepage_args *wpa; + struct fuse_args_pages *ap; + struct folio *tmp_folio; + struct fuse_file *ff; + int error = -ENOMEM; - tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!tmp_page) - goto err_free; + tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); + if (!tmp_folio) + goto err; error = -EIO; - wpa->ia.ff = fuse_write_file_get(fi); - if (!wpa->ia.ff) + ff = fuse_write_file_get(fi); + if (!ff) goto err_nofile; - fuse_writepage_add_to_bucket(fc, wpa); - fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0); + wpa = fuse_writepage_args_setup(folio, ff); + error = -ENOMEM; + if (!wpa) + goto err_writepage_args; - copy_highpage(tmp_page, page); - wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; - wpa->next = NULL; - ap->args.in_pages = true; - ap->num_pages = 1; - ap->pages[0] = tmp_page; - ap->descs[0].offset = 0; - ap->descs[0].length = PAGE_SIZE; - ap->args.end = fuse_writepage_end; - wpa->inode = inode; + ap = &wpa->ia.ap; + ap->num_folios = 1; - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); + folio_start_writeback(folio); + fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0); spin_lock(&fi->lock); tree_insert(&fi->writepages, wpa); @@ -2002,78 +2167,49 @@ static int fuse_writepage_locked(struct page *page) fuse_flush_writepages(inode); spin_unlock(&fi->lock); - end_page_writeback(page); + folio_end_writeback(folio); return 0; +err_writepage_args: + fuse_file_put(ff, false); err_nofile: - __free_page(tmp_page); -err_free: - kfree(wpa); + folio_put(tmp_folio); err: - mapping_set_error(page->mapping, error); - end_page_writeback(page); + mapping_set_error(folio->mapping, error); return error; } -static int fuse_writepage(struct page *page, struct writeback_control *wbc) -{ - struct fuse_conn *fc = get_fuse_conn(page->mapping->host); - int err; - - if (fuse_page_is_writeback(page->mapping->host, page->index)) { - /* - * ->writepages() should be called for sync() and friends. We - * should only get here on direct reclaim and then we are - * allowed to skip a page which is already in flight - */ - WARN_ON(wbc->sync_mode == WB_SYNC_ALL); - - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; - } - - if (wbc->sync_mode == WB_SYNC_NONE && - fc->num_background >= fc->congestion_threshold) - return AOP_WRITEPAGE_ACTIVATE; - - err = fuse_writepage_locked(page); - unlock_page(page); - - return err; -} - struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; struct inode *inode; - struct page **orig_pages; - unsigned int max_pages; + struct folio **orig_folios; + unsigned int max_folios; }; static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) { struct fuse_args_pages *ap = &data->wpa->ia.ap; struct fuse_conn *fc = get_fuse_conn(data->inode); - struct page **pages; - struct fuse_page_desc *descs; - unsigned int npages = min_t(unsigned int, - max_t(unsigned int, data->max_pages * 2, - FUSE_DEFAULT_MAX_PAGES_PER_REQ), + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int nfolios = min_t(unsigned int, + max_t(unsigned int, data->max_folios * 2, + FUSE_DEFAULT_MAX_PAGES_PER_REQ), fc->max_pages); - WARN_ON(npages <= data->max_pages); + WARN_ON(nfolios <= data->max_folios); - pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); - if (!pages) + folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs); + if (!folios) return false; - memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); - memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); - kfree(ap->pages); - ap->pages = pages; + memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios); + memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios); + kfree(ap->folios); + ap->folios = folios; ap->descs = descs; - data->max_pages = npages; + data->max_folios = nfolios; return true; } @@ -2083,17 +2219,16 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) struct fuse_writepage_args *wpa = data->wpa; struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); - int num_pages = wpa->ia.ap.num_pages; + int num_folios = wpa->ia.ap.num_folios; int i; - wpa->ia.ff = fuse_file_get(data->ff); spin_lock(&fi->lock); list_add_tail(&wpa->queue_entry, &fi->queued_writes); fuse_flush_writepages(inode); spin_unlock(&fi->lock); - for (i = 0; i < num_pages; i++) - end_page_writeback(data->orig_pages[i]); + for (i = 0; i < num_folios; i++) + folio_end_writeback(data->orig_folios[i]); } /* @@ -2104,15 +2239,15 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) * swapping the new temp page with the old one. */ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, - struct page *page) + struct folio *folio) { struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); struct fuse_writepage_args *tmp; struct fuse_writepage_args *old_wpa; struct fuse_args_pages *new_ap = &new_wpa->ia.ap; - WARN_ON(new_ap->num_pages != 0); - new_ap->num_pages = 1; + WARN_ON(new_ap->num_folios != 0); + new_ap->num_folios = 1; spin_lock(&fi->lock); old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); @@ -2126,9 +2261,9 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, WARN_ON(tmp->inode != new_wpa->inode); curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; - if (curr_index == page->index) { - WARN_ON(tmp->ia.ap.num_pages != 1); - swap(tmp->ia.ap.pages[0], new_ap->pages[0]); + if (curr_index == folio->index) { + WARN_ON(tmp->ia.ap.num_folios != 1); + swap(tmp->ia.ap.folios[0], new_ap->folios[0]); break; } } @@ -2141,22 +2276,19 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, spin_unlock(&fi->lock); if (tmp) { - struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); - - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); + fuse_writepage_finish_stat(new_wpa->inode, + folio); fuse_writepage_free(new_wpa); } return false; } -static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, +static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, struct fuse_args_pages *ap, struct fuse_fill_wb_data *data) { - WARN_ON(!ap->num_pages); + WARN_ON(!ap->num_folios); /* * Being under writeback is unlikely but possible. For example direct @@ -2164,23 +2296,23 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, * the pages are faulted with get_user_pages(), and then after the read * completed. */ - if (fuse_page_is_writeback(data->inode, page->index)) + if (fuse_folio_is_writeback(data->inode, folio)) return true; /* Reached max pages */ - if (ap->num_pages == fc->max_pages) + if (ap->num_folios == fc->max_pages) return true; /* Reached max write bytes */ - if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) + if ((ap->num_folios + 1) * PAGE_SIZE > fc->max_write) return true; /* Discontinuity */ - if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) + if (data->orig_folios[ap->num_folios - 1]->index + 1 != folio_index(folio)) return true; /* Need to grow the pages array? If so, did the expansion fail? */ - if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) + if (ap->num_folios == data->max_folios && !fuse_pages_realloc(data)) return true; return false; @@ -2195,7 +2327,7 @@ static int fuse_writepages_fill(struct folio *folio, struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - struct page *tmp_page; + struct folio *tmp_folio; int err; if (!data->ff) { @@ -2205,14 +2337,14 @@ static int fuse_writepages_fill(struct folio *folio, goto out_unlock; } - if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { + if (wpa && fuse_writepage_need_send(fc, folio, ap, data)) { fuse_writepages_send(data); data->wpa = NULL; } err = -ENOMEM; - tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!tmp_page) + tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); + if (!tmp_folio) goto out_unlock; /* @@ -2224,40 +2356,25 @@ static int fuse_writepages_fill(struct folio *folio, * This is ensured by holding the page lock in page_mkwrite() while * checking fuse_page_is_writeback(). We already hold the page lock * since clear_page_dirty_for_io() and keep it held until we add the - * request to the fi->writepages list and increment ap->num_pages. + * request to the fi->writepages list and increment ap->num_folios. * After this fuse_page_is_writeback() will indicate that the page is * under writeback, so we can release the page lock. */ if (data->wpa == NULL) { err = -ENOMEM; - wpa = fuse_writepage_args_alloc(); + wpa = fuse_writepage_args_setup(folio, data->ff); if (!wpa) { - __free_page(tmp_page); + folio_put(tmp_folio); goto out_unlock; } - fuse_writepage_add_to_bucket(fc, wpa); - - data->max_pages = 1; - + fuse_file_get(wpa->ia.ff); + data->max_folios = 1; ap = &wpa->ia.ap; - fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0); - wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; - wpa->next = NULL; - ap->args.in_pages = true; - ap->args.end = fuse_writepage_end; - ap->num_pages = 0; - wpa->inode = inode; } folio_start_writeback(folio); - copy_highpage(tmp_page, &folio->page); - ap->pages[ap->num_pages] = tmp_page; - ap->descs[ap->num_pages].offset = 0; - ap->descs[ap->num_pages].length = PAGE_SIZE; - data->orig_pages[ap->num_pages] = &folio->page; - - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); + fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_folios); + data->orig_folios[ap->num_folios] = folio; err = 0; if (data->wpa) { @@ -2266,9 +2383,9 @@ static int fuse_writepages_fill(struct folio *folio, * fuse_page_is_writeback(). */ spin_lock(&fi->lock); - ap->num_pages++; + ap->num_folios++; spin_unlock(&fi->lock); - } else if (fuse_writepage_add(wpa, &folio->page)) { + } else if (fuse_writepage_add(wpa, folio)) { data->wpa = wpa; } else { folio_end_writeback(folio); @@ -2300,21 +2417,21 @@ static int fuse_writepages(struct address_space *mapping, data.ff = NULL; err = -ENOMEM; - data.orig_pages = kcalloc(fc->max_pages, - sizeof(struct page *), - GFP_NOFS); - if (!data.orig_pages) + data.orig_folios = kcalloc(fc->max_pages, + sizeof(struct folio *), + GFP_NOFS); + if (!data.orig_folios) goto out; err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { - WARN_ON(!data.wpa->ia.ap.num_pages); + WARN_ON(!data.wpa->ia.ap.num_folios); fuse_writepages_send(&data); } if (data.ff) - fuse_file_put(data.ff, false, false); + fuse_file_put(data.ff, false); - kfree(data.orig_pages); + kfree(data.orig_folios); out: return err; } @@ -2328,41 +2445,42 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_SHIFT; struct fuse_conn *fc = get_fuse_conn(file_inode(file)); - struct page *page; + struct folio *folio; loff_t fsize; int err = -ENOMEM; WARN_ON(!fc->writeback_cache); - page = grab_cache_page_write_begin(mapping, index); - if (!page) + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) goto error; - fuse_wait_on_page_writeback(mapping->host, page->index); + fuse_wait_on_page_writeback(mapping->host, folio->index); - if (PageUptodate(page) || len == PAGE_SIZE) + if (folio_test_uptodate(folio) || len >= folio_size(folio)) goto success; /* - * Check if the start this page comes after the end of file, in which - * case the readpage can be optimized away. + * Check if the start of this folio comes after the end of file, + * in which case the readpage can be optimized away. */ fsize = i_size_read(mapping->host); - if (fsize <= (pos & PAGE_MASK)) { - size_t off = pos & ~PAGE_MASK; + if (fsize <= folio_pos(folio)) { + size_t off = offset_in_folio(folio, pos); if (off) - zero_user_segment(page, 0, off); + folio_zero_segment(folio, 0, off); goto success; } - err = fuse_do_readpage(file, page); + err = fuse_do_readfolio(file, folio); if (err) goto cleanup; success: - *pagep = page; + *pagep = &folio->page; return 0; cleanup: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); error: return err; } @@ -2371,29 +2489,30 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - struct inode *inode = page->mapping->host; + struct folio *folio = page_folio(page); + struct inode *inode = folio->mapping->host; /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ if (!copied) goto unlock; pos += copied; - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { /* Zero any unwritten bytes at the end of the page */ size_t endoff = pos & ~PAGE_MASK; if (endoff) - zero_user_segment(page, endoff, PAGE_SIZE); - SetPageUptodate(page); + folio_zero_segment(folio, endoff, PAGE_SIZE); + folio_mark_uptodate(folio); } if (pos > inode->i_size) i_size_write(inode, pos); - set_page_dirty(page); + folio_mark_dirty(folio); unlock: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return copied; } @@ -2406,7 +2525,7 @@ static int fuse_launder_folio(struct folio *folio) /* Serialize with pending writeback for the same page */ fuse_wait_on_page_writeback(inode, folio->index); - err = fuse_writepage_locked(&folio->page); + err = fuse_writepage_locked(folio); if (!err) fuse_wait_on_page_writeback(inode, folio->index); } @@ -2442,17 +2561,17 @@ static void fuse_vma_close(struct vm_area_struct *vma) */ static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file); - lock_page(page); - if (page->mapping != inode->i_mapping) { - unlock_page(page); + folio_lock(folio); + if (folio->mapping != inode->i_mapping) { + folio_unlock(folio); return VM_FAULT_NOPAGE; } - fuse_wait_on_page_writeback(inode, page->index); + fuse_wait_on_folio_writeback(inode, folio); return VM_FAULT_LOCKED; } @@ -2467,11 +2586,16 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fm->fc; + int rc; /* DAX mmap is superior to direct_io mmap */ if (FUSE_IS_DAX(file_inode(file))) return fuse_dax_mmap(file, vma); + /* + * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, + * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. + */ if (ff->open_flags & FOPEN_DIRECT_IO) { /* * Can't provide the coherency needed for MAP_SHARED @@ -2486,6 +2610,15 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) /* MAP_PRIVATE */ return generic_file_mmap(file, vma); } + + /* + * First mmap of direct_io file enters caching inode io mode. + * Also waits for parallel dio writers to go into serial mode + * (exclusive instead of shared lock). + */ + rc = fuse_file_cached_io_start(file_inode(file), ff); + if (rc) + return rc; } if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) @@ -2589,10 +2722,6 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) return -ENOLCK; } - /* Unlock on close is handled by the flush method */ - if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) - return 0; - fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); err = fuse_simple_request(fm, &args); @@ -3234,10 +3363,10 @@ static const struct file_operations fuse_file_operations = { static const struct address_space_operations fuse_file_aops = { .read_folio = fuse_read_folio, .readahead = fuse_readahead, - .writepage = fuse_writepage, .writepages = fuse_writepages, .launder_folio = fuse_launder_folio, .dirty_folio = filemap_dirty_folio, + .migrate_folio = filemap_migrate_folio, .bmap = fuse_bmap, .direct_IO = fuse_direct_IO, .write_begin = fuse_write_begin, @@ -3254,7 +3383,9 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); fi->writectr = 0; + fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); + init_waitqueue_head(&fi->direct_io_waitq); fi->writepages = RB_ROOT; if (IS_ENABLED(CONFIG_FUSE_DAX)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 4ce1a6fdc94f030d84aa6049aadef048b4d3ceef..f4a47da5575b84ae68ea4096d5d72ee12a7f7fed 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -31,13 +31,11 @@ #include #include #include +#include /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 -/** Maximum of max_pages received in init_out */ -#define FUSE_MAX_MAX_PAGES 256 - /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -47,6 +45,9 @@ /** Number of dentries for each connection in the control filesystem */ #define FUSE_CTL_NUM_DENTRIES 5 +/** Maximum of max_pages received in init_out */ +extern unsigned int fuse_max_pages_limit; + /** List of active connections */ extern struct list_head fuse_conn_list; @@ -111,7 +112,7 @@ struct fuse_inode { u64 attr_version; union { - /* Write related fields (regular file only) */ + /* read/write io cache (regular file only) */ struct { /* Files usable in writepage. Protected by fi->lock */ struct list_head write_files; @@ -123,9 +124,15 @@ struct fuse_inode { * (FUSE_NOWRITE) means more writes are blocked */ int writectr; + /** Number of files/maps using page cache */ + int iocachectr; + /* Waitq for writepage completion */ wait_queue_head_t page_waitq; + /* waitq for direct-io completion */ + wait_queue_head_t direct_io_waitq; + /* List of writepage requestst (pending or sent) */ struct rb_root writepages; }; @@ -187,6 +194,8 @@ enum { FUSE_I_BAD, /* Has btime */ FUSE_I_BTIME, + /* Wants or already has page cache IO */ + FUSE_I_CACHE_IO_MODE, }; struct fuse_conn; @@ -221,12 +230,6 @@ struct fuse_file { /* Readdir related */ struct { - /* - * Protects below fields against (crazy) parallel readdir on - * same open file. Uncontended in the normal case. - */ - struct mutex lock; - /* Dir stream position */ loff_t pos; @@ -244,6 +247,9 @@ struct fuse_file { /** Wait queue head for poll */ wait_queue_head_t poll_wait; + /** Does file hold a fi->iocachectr refcount? */ + enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode; + /** Has flock been performed on this file? */ bool flock:1; }; @@ -260,8 +266,8 @@ struct fuse_arg { void *value; }; -/** FUSE page descriptor */ -struct fuse_page_desc { +/** FUSE folio descriptor */ +struct fuse_folio_desc { unsigned int length; unsigned int offset; }; @@ -283,16 +289,20 @@ struct fuse_args { bool page_replace:1; bool may_block:1; bool is_ext:1; + bool is_pinned:1; + bool invalidate_vmap:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); + /* Used for kvec iter backed by vmalloc address */ + void *vmap_base; }; struct fuse_args_pages { struct fuse_args args; - struct page **pages; - struct fuse_page_desc *descs; - unsigned int num_pages; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; }; #define FUSE_ARGS(args) struct fuse_args args = {} @@ -410,22 +420,19 @@ struct fuse_iqueue; */ struct fuse_iqueue_ops { /** - * Signal that a forget has been queued + * Send one forget */ - void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link); /** - * Signal that an INTERRUPT request has been queued + * Send interrupt for request */ - void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req); /** - * Signal that a request has been queued + * Send one request */ - void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req); /** * Clean up when fuse_iqueue is destroyed @@ -818,13 +825,16 @@ struct fuse_conn { /* Is statx not implemented by fs? */ unsigned int no_statx:1; + /* Use pages instead of pointer for kernel I/O */ + unsigned int use_pages_for_kvec_io:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; /** Negotiated minor version */ unsigned minor; - /** Entry on the fuse_mount_list */ + /** Entry on the fuse_conn_list */ struct list_head entry; /** Device ID from the root super block */ @@ -948,25 +958,25 @@ static inline bool fuse_is_bad(struct inode *inode) return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state)); } -static inline struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, - struct fuse_page_desc **desc) +static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags, + struct fuse_folio_desc **desc) { - struct page **pages; + struct folio **folios; - pages = kzalloc(npages * (sizeof(struct page *) + - sizeof(struct fuse_page_desc)), flags); - *desc = (void *) (pages + npages); + folios = kzalloc(nfolios * (sizeof(struct folio *) + + sizeof(struct fuse_folio_desc)), flags); + *desc = (void *) (folios + nfolios); - return pages; + return folios; } -static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, - unsigned int index, - unsigned int nr_pages) +static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs, + unsigned int index, + unsigned int nr_folios) { int i; - for (i = index; i < index + nr_pages; i++) + for (i = index; i < index + nr_folios; i++) descs[i].length = PAGE_SIZE - descs[i].offset; } @@ -1003,10 +1013,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, struct fuse_forget_link *fuse_alloc_forget(void); -struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, - unsigned int max, - unsigned int *countp); - /* * Initialize READ or READDIR request */ @@ -1019,7 +1025,7 @@ struct fuse_io_args { struct { struct fuse_write_in in; struct fuse_write_out out; - bool page_locked; + bool folio_locked; } write; }; struct fuse_args_pages ap; @@ -1031,14 +1037,9 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, size_t count, int opcode); -/** - * Send OPEN or OPENDIR request - */ -int fuse_open_common(struct inode *inode, struct file *file, bool isdir); - -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm); +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); -void fuse_finish_open(struct inode *inode, struct file *file); +int fuse_finish_open(struct inode *inode, struct file *file); void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags); @@ -1348,11 +1349,38 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); -/* file.c */ +/* iomode.c */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff); +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff); +int fuse_file_io_open(struct file *file, struct inode *inode); +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); + +/* file.c */ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, bool isdir); void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +typedef int (*fuse_mount_cb_t)(struct file *file); +extern fuse_mount_cb_t fuse_mount_callback; + +#if IS_ENABLED(CONFIG_VIRT_FUSE) +static inline bool is_virtfuse_device(struct file *file) +{ + return iminor(file_inode(file)) != FUSE_MINOR; +} +#else +static inline bool is_virtfuse_device(struct file *file) { return false; } +#endif + +#ifdef CONFIG_SYSCTL +extern int fuse_sysctl_register(void); +extern void fuse_sysctl_unregister(void); +#else +#define fuse_sysctl_register() (0) +#define fuse_sysctl_unregister() do { } while (0) +#endif /* CONFIG_SYSCTL */ + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..bbe9ddd8c71696ddcbca055f6c4c451661bb4444 --- /dev/null +++ b/fs/fuse/fuse_trace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fuse + +#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FUSE_H + +#include + +#define OPCODES \ + EM( FUSE_LOOKUP, "FUSE_LOOKUP") \ + EM( FUSE_FORGET, "FUSE_FORGET") \ + EM( FUSE_GETATTR, "FUSE_GETATTR") \ + EM( FUSE_SETATTR, "FUSE_SETATTR") \ + EM( FUSE_READLINK, "FUSE_READLINK") \ + EM( FUSE_SYMLINK, "FUSE_SYMLINK") \ + EM( FUSE_MKNOD, "FUSE_MKNOD") \ + EM( FUSE_MKDIR, "FUSE_MKDIR") \ + EM( FUSE_UNLINK, "FUSE_UNLINK") \ + EM( FUSE_RMDIR, "FUSE_RMDIR") \ + EM( FUSE_RENAME, "FUSE_RENAME") \ + EM( FUSE_LINK, "FUSE_LINK") \ + EM( FUSE_OPEN, "FUSE_OPEN") \ + EM( FUSE_READ, "FUSE_READ") \ + EM( FUSE_WRITE, "FUSE_WRITE") \ + EM( FUSE_STATFS, "FUSE_STATFS") \ + EM( FUSE_RELEASE, "FUSE_RELEASE") \ + EM( FUSE_FSYNC, "FUSE_FSYNC") \ + EM( FUSE_SETXATTR, "FUSE_SETXATTR") \ + EM( FUSE_GETXATTR, "FUSE_GETXATTR") \ + EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \ + EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \ + EM( FUSE_FLUSH, "FUSE_FLUSH") \ + EM( FUSE_INIT, "FUSE_INIT") \ + EM( FUSE_OPENDIR, "FUSE_OPENDIR") \ + EM( FUSE_READDIR, "FUSE_READDIR") \ + EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \ + EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \ + EM( FUSE_GETLK, "FUSE_GETLK") \ + EM( FUSE_SETLK, "FUSE_SETLK") \ + EM( FUSE_SETLKW, "FUSE_SETLKW") \ + EM( FUSE_ACCESS, "FUSE_ACCESS") \ + EM( FUSE_CREATE, "FUSE_CREATE") \ + EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \ + EM( FUSE_BMAP, "FUSE_BMAP") \ + EM( FUSE_DESTROY, "FUSE_DESTROY") \ + EM( FUSE_IOCTL, "FUSE_IOCTL") \ + EM( FUSE_POLL, "FUSE_POLL") \ + EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \ + EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \ + EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \ + EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \ + EM( FUSE_RENAME2, "FUSE_RENAME2") \ + EM( FUSE_LSEEK, "FUSE_LSEEK") \ + EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \ + EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \ + EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \ + EM( FUSE_SYNCFS, "FUSE_SYNCFS") \ + EM( FUSE_TMPFILE, "FUSE_TMPFILE") \ + EM( FUSE_STATX, "FUSE_STATX") \ + EMe(CUSE_INIT, "CUSE_INIT") + +/* + * This will turn the above table into TRACE_DEFINE_ENUM() for each of the + * entries. + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +OPCODES + +/* Now we redfine it with the table that __print_symbolic needs. */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +TRACE_EVENT(fuse_request_send, + TP_PROTO(const struct fuse_req *req), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(dev_t, connection) + __field(uint64_t, unique) + __field(enum fuse_opcode, opcode) + __field(uint32_t, len) + ), + + TP_fast_assign( + __entry->connection = req->fm->fc->dev; + __entry->unique = req->in.h.unique; + __entry->opcode = req->in.h.opcode; + __entry->len = req->in.h.len; + ), + + TP_printk("connection %u req %llu opcode %u (%s) len %u ", + __entry->connection, __entry->unique, __entry->opcode, + __print_symbolic(__entry->opcode, OPCODES), __entry->len) +); + +TRACE_EVENT(fuse_request_end, + TP_PROTO(const struct fuse_req *req), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(dev_t, connection) + __field(uint64_t, unique) + __field(uint32_t, len) + __field(int32_t, error) + ), + + TP_fast_assign( + __entry->connection = req->fm->fc->dev; + __entry->unique = req->in.h.unique; + __entry->len = req->out.h.len; + __entry->error = req->out.h.error; + ), + + TP_printk("connection %u req %llu len %u error %d", __entry->connection, + __entry->unique, __entry->len, __entry->error) +); + +#endif /* _TRACE_FUSE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE fuse_trace +#include diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 735abf426a0640f2be5df6a2f275fc9bda9d8482..093b7070406e38e1fb812797024834e788db4695 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -35,6 +35,8 @@ DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, const struct kernel_param *kp); +unsigned int fuse_max_pages_limit = 1024; + unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); @@ -63,6 +65,9 @@ MODULE_PARM_DESC(max_user_congthresh, static struct file_system_type fuseblk_fs_type; #endif +fuse_mount_cb_t fuse_mount_callback; +EXPORT_SYMBOL_GPL(fuse_mount_callback); + struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); @@ -944,7 +949,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->user_ns = get_user_ns(user_ns); fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; - fc->max_pages_limit = FUSE_MAX_MAX_PAGES; + fc->max_pages_limit = fuse_max_pages_limit; INIT_LIST_HEAD(&fc->mounts); list_add(&fm->fc_entry, &fc->mounts); @@ -996,7 +1001,7 @@ static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; - return fuse_iget(sb, 1, 0, &attr, 0, 0); + return fuse_iget(sb, FUSE_ROOT_ID, 0, &attr, 0, 0); } struct fuse_inode_handle { @@ -1138,6 +1143,11 @@ static struct dentry *fuse_get_parent(struct dentry *child) return parent; } +/* only for fid encoding; no support for file handle */ +static const struct export_operations fuse_export_fid_operations = { + .encode_fh = fuse_encode_fh, +}; + static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, @@ -1312,6 +1322,10 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->create_supp_group = 1; if (flags & FUSE_DIRECT_IO_ALLOW_MMAP) fc->direct_io_allow_mmap = 1; + if (flags & FUSE_NO_EXPORT_SUPPORT) + fm->sb->s_export_op = &fuse_export_fid_operations; + if (flags & FUSE_DELETE_STALE) + fc->delete_stale = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1358,7 +1372,9 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | - FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP; + FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | + FUSE_DELETE_STALE; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; @@ -1515,8 +1531,8 @@ static void fuse_fill_attr_from_inode(struct fuse_attr *attr, .ctimensec = ctime.tv_nsec, .mode = fi->inode.i_mode, .nlink = fi->inode.i_nlink, - .uid = fi->inode.i_uid.val, - .gid = fi->inode.i_gid.val, + .uid = __kuid_val(fi->inode.i_uid), + .gid = __kgid_val(fi->inode.i_gid), .rdev = fi->inode.i_rdev, .blksize = 1u << fi->inode.i_blkbits, }; @@ -1553,6 +1569,7 @@ static int fuse_fill_super_submount(struct super_block *sb, sb->s_bdi = bdi_get(parent_sb->s_bdi); sb->s_xattr = parent_sb->s_xattr; + sb->s_export_op = parent_sb->s_export_op; sb->s_time_gran = parent_sb->s_time_gran; sb->s_blocksize = parent_sb->s_blocksize; sb->s_blocksize_bits = parent_sb->s_blocksize_bits; @@ -1753,10 +1770,12 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) /* * Require mount to happen from the same user namespace which - * opened /dev/fuse to prevent potential attacks. + * opened /dev/fuse to prevent potential attacks. While for + * virtual fuse, the mount is always bound to init_user_ns. */ - if ((ctx->file->f_op != &fuse_dev_operations) || - (ctx->file->f_cred->user_ns != sb->s_user_ns)) + if (!is_virtfuse_device(ctx->file) && + ((ctx->file->f_op != &fuse_dev_operations) || + (ctx->file->f_cred->user_ns != sb->s_user_ns))) return -EINVAL; ctx->fudptr = &ctx->file->private_data; @@ -1791,6 +1810,7 @@ static int fuse_get_tree(struct fs_context *fsc) struct fuse_conn *fc; struct fuse_mount *fm; struct super_block *sb; + bool is_virtfuse; int err; fc = kmalloc(sizeof(*fc), GFP_KERNEL); @@ -1827,16 +1847,29 @@ static int fuse_get_tree(struct fs_context *fsc) * Allow creating a fuse mount with an already initialized fuse * connection */ + is_virtfuse = is_virtfuse_device(ctx->file); fud = READ_ONCE(ctx->file->private_data); - if (ctx->file->f_op == &fuse_dev_operations && fud) { + if ((ctx->file->f_op == &fuse_dev_operations || is_virtfuse) && fud) { fsc->sget_key = fud->fc; sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); err = PTR_ERR_OR_ZERO(sb); if (!IS_ERR(sb)) fsc->root = dget(sb->s_root); } else { + /* bind sb to init_user_ns for virtfuse */ + if (is_virtfuse) + fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } + + if (is_virtfuse && !err) { + if (WARN_ON(!fuse_mount_callback)) + err = -EINVAL; + else + err = fuse_mount_callback(ctx->file); + if (err) + fc_drop_locked(fsc); + } out: if (fsc->s_fs_info) fuse_mount_destroy(fm); @@ -2014,8 +2047,14 @@ static int __init fuse_fs_init(void) if (err) goto out3; + err = fuse_sysctl_register(); + if (err) + goto out4; + return 0; + out4: + unregister_filesystem(&fuse_fs_type); out3: unregister_fuseblk(); out2: @@ -2026,6 +2065,7 @@ static int __init fuse_fs_init(void) static void fuse_fs_cleanup(void) { + fuse_sysctl_unregister(); unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c index 726640fa439e04a93277697fef4d359d8f7f6822..27115c618e94a9a57159655e135d6bda1d4846ed 100644 --- a/fs/fuse/ioctl.c +++ b/fs/fuse/ioctl.c @@ -201,12 +201,12 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; - ap.pages = fuse_pages_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); + ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); - if (!ap.pages || !iov_page) + if (!ap.folios || !iov_page) goto out; - fuse_page_descs_length_init(ap.descs, 0, fm->fc->max_pages); + fuse_folio_descs_length_init(ap.descs, 0, fm->fc->max_pages); /* * If restricted, initialize IO parameters as encoded in @cmd. @@ -244,14 +244,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -ENOMEM; if (max_pages > fm->fc->max_pages) goto out; - while (ap.num_pages < max_pages) { - ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); - if (!ap.pages[ap.num_pages]) + while (ap.num_folios < max_pages) { + ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); + if (!ap.folios[ap.num_folios]) goto out; - ap.num_pages++; + ap.num_folios++; } - /* okay, let's send it to the client */ ap.args.opcode = FUSE_IOCTL; ap.args.nodeid = ff->nodeid; @@ -265,8 +264,8 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -EFAULT; iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size); - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { - c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { + c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } @@ -304,7 +303,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; - vaddr = kmap_local_page(ap.pages[0]); + vaddr = kmap_local_folio(ap.folios[0], 0); err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); @@ -332,17 +331,17 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -EFAULT; iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred); - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { - c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { + c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } err = 0; out: free_page((unsigned long) iov_page); - while (ap.num_pages) - __free_page(ap.pages[--ap.num_pages]); - kfree(ap.pages); + while (ap.num_folios) + folio_put(ap.folios[--ap.num_folios]); + kfree(ap.folios); return err ? err : outarg.result; } diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c new file mode 100644 index 0000000000000000000000000000000000000000..ea47c76b9df114fb0c83ee1650722f2ada2c27d7 --- /dev/null +++ b/fs/fuse/iomode.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE inode io modes. + * + * Copyright (c) 2024 CTERA Networks. + */ + +#include "fuse_i.h" + +#include +#include +#include +#include + +/* + * Return true if need to wait for new opens in caching mode. + */ +static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi) +{ + return READ_ONCE(fi->iocachectr) < 0; +} + +/* + * Start cached io mode. + * + * Blocks new parallel dio writes and waits for the in-progress parallel dio + * writes to complete. + */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + /* There are no io modes if server does not implement open */ + if (!ff->release_args) + return 0; + + spin_lock(&fi->lock); + /* + * Setting the bit advises new direct-io writes to use an exclusive + * lock - without it the wait below might be forever. + */ + while (fuse_is_io_cache_wait(fi)) { + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); + wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi)); + spin_lock(&fi->lock); + } + WARN_ON(ff->iomode == IOM_UNCACHED); + if (ff->iomode == IOM_NONE) { + ff->iomode = IOM_CACHED; + if (fi->iocachectr == 0) + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + fi->iocachectr++; + } + spin_unlock(&fi->lock); + return 0; +} + +static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr <= 0); + WARN_ON(ff->iomode != IOM_CACHED); + ff->iomode = IOM_NONE; + fi->iocachectr--; + if (fi->iocachectr == 0) + clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); +} + +/* Start strictly uncached io mode where cache access is not allowed */ +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + spin_lock(&fi->lock); + if (fi->iocachectr > 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode != IOM_NONE); + fi->iocachectr--; + ff->iomode = IOM_UNCACHED; +unlock: + spin_unlock(&fi->lock); + return err; +} + +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr >= 0); + WARN_ON(ff->iomode != IOM_UNCACHED); + ff->iomode = IOM_NONE; + fi->iocachectr++; + if (!fi->iocachectr) + wake_up(&fi->direct_io_waitq); + spin_unlock(&fi->lock); +} + +/* Request access to submit new io to inode via open file */ +int fuse_file_io_open(struct file *file, struct inode *inode) +{ + struct fuse_file *ff = file->private_data; + int err; + + /* + * io modes are not relevant with DAX and with server that does not + * implement open. + */ + if (FUSE_IS_DAX(inode) || !ff->release_args) + return 0; + + /* + * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO. + */ + if (!(ff->open_flags & FOPEN_DIRECT_IO)) + ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; + + /* + * First caching file open enters caching inode io mode. + * + * Note that if user opens a file open with O_DIRECT, but server did + * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, + * so we put the inode in caching mode to prevent parallel dio. + */ + if (ff->open_flags & FOPEN_DIRECT_IO) + return 0; + + err = fuse_file_cached_io_start(inode, ff); + if (err) + goto fail; + + return 0; + +fail: + pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n", + ff->open_flags, err); + /* + * The file open mode determines the inode io mode. + * Using incorrect open mode is a server mistake, which results in + * user visible failure of open() with EIO error. + */ + return -EIO; +} + +/* No more pending io and no new io possible to inode via open/mmapped file */ +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) +{ + /* + * Last parallel dio close allows caching inode io mode. + * Last caching file close exits caching inode io mode. + */ + switch (ff->iomode) { + case IOM_NONE: + /* Nothing to do */ + break; + case IOM_UNCACHED: + fuse_file_uncached_io_end(inode, ff); + break; + case IOM_CACHED: + fuse_file_cached_io_end(inode, ff); + break; + } +} diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 9e6d587b3e67311f231baf434979abb8e40f6776..7ebdba80fa5261aa662176d2f4fcbe723d94c065 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -331,23 +331,23 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) { int plus; ssize_t res; - struct page *page; + struct folio *folio; struct inode *inode = file_inode(file); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_io_args ia = {}; struct fuse_args_pages *ap = &ia.ap; - struct fuse_page_desc desc = { .length = PAGE_SIZE }; + struct fuse_folio_desc desc = { .length = PAGE_SIZE }; u64 attr_version = 0; bool locked; - page = alloc_page(GFP_KERNEL); - if (!page) + folio = folio_alloc(GFP_KERNEL, 0); + if (!folio) return -ENOMEM; plus = fuse_use_readdirplus(inode, ctx); ap->args.out_pages = true; - ap->num_pages = 1; - ap->pages = &page; + ap->num_folios = 1; + ap->folios = &folio; ap->descs = &desc; if (plus) { attr_version = fuse_get_attr_version(fm->fc); @@ -367,15 +367,15 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) if (ff->open_flags & FOPEN_CACHE_DIR) fuse_readdir_cache_end(file, ctx->pos); } else if (plus) { - res = parse_dirplusfile(page_address(page), res, + res = parse_dirplusfile(folio_address(folio), res, file, ctx, attr_version); } else { - res = parse_dirfile(page_address(page), res, file, + res = parse_dirfile(folio_address(folio), res, file, ctx); } } - __free_page(page); + folio_put(folio); fuse_invalidate_atime(inode); return res; } @@ -590,15 +590,11 @@ int fuse_readdir(struct file *file, struct dir_context *ctx) if (fuse_is_bad(inode)) return -EIO; - mutex_lock(&ff->readdir.lock); - err = UNCACHED; if (ff->open_flags & FOPEN_CACHE_DIR) err = fuse_readdir_cached(file, ctx); if (err == UNCACHED) err = fuse_readdir_uncached(file, ctx); - mutex_unlock(&ff->readdir.lock); - return err; } diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c new file mode 100644 index 0000000000000000000000000000000000000000..b272bb333005a8ca4e5f10b2e1a659f3f38b660a --- /dev/null +++ b/fs/fuse/sysctl.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/fs/fuse/fuse_sysctl.c + * + * Sysctl interface to fuse parameters + */ +#include + +#include "fuse_i.h" + +static struct ctl_table_header *fuse_table_header; + +/* Bound by fuse_init_out max_pages, which is a u16 */ +static unsigned int sysctl_fuse_max_pages_limit = 65535; + +static struct ctl_table fuse_sysctl_table[] = { + { + .procname = "max_pages_limit", + .data = &fuse_max_pages_limit, + .maxlen = sizeof(fuse_max_pages_limit), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = &sysctl_fuse_max_pages_limit, + }, +}; + +int fuse_sysctl_register(void) +{ + fuse_table_header = register_sysctl("fs/fuse", fuse_sysctl_table); + if (!fuse_table_header) + return -ENOMEM; + return 0; +} + +void fuse_sysctl_unregister(void) +{ + unregister_sysctl_table(fuse_table_header); + fuse_table_header = NULL; +} diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c new file mode 100644 index 0000000000000000000000000000000000000000..b6ef6fbf449005e7827cb9e3dbebf5be1ac18bc7 --- /dev/null +++ b/fs/fuse/virtfuse.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022, Alibaba Cloud + * + * Virtual FUSE Device + */ + +#include +#include +#include +#include +#include +#include +#include +#include "fuse_i.h" +#include "../mount.h" + +static uint virtfuse_dev_count = 64; +module_param_named(max_devices, virtfuse_dev_count, uint, 0644); +MODULE_PARM_DESC(max_devices, "Maximum number of devices supported"); + +struct virtfuse_dev { + char name[16]; /* adequate space for "virtfuse%d" */ + struct miscdevice dev; + atomic_t refcount; + spinlock_t lock; + struct fuse_conn *fc; +}; + +static struct virtfuse_dev *virtfuse_devices; +static struct file_operations virtfuse_fops; + +static inline struct virtfuse_dev *virtfuse_dev_get(struct file *file) +{ + dev_t devt = file_inode(file)->i_rdev; + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device->devt == devt) + return vfud; + } + + pr_err("virtfuse: failed to find virtfuse for minor %d\n", MINOR(devt)); + return NULL; +} + +static int virtfuse_dev_release(struct inode *inode, struct file *file) +{ + struct fuse_dev *fud = READ_ONCE(file->private_data); + struct virtfuse_dev *vfud; + + if (!fud) + return 0; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + /* + * 1. For the initial fuse mount after RESET, the mount may fail + * halfway and thus virtfuse_dev_alloc() is not called yet. + * + * 2. When the old fuse daemon has exited and RESET has not been + * done yet, refcount is zero while vfud->fc is still there. In + * this case, if a new fuse daemon tries to mount, the mount + * will fail and virtfuse_dev_release() will be called then. + */ + spin_lock(&vfud->lock); + if (vfud->fc && vfud->fc == fud->fc) + WARN_ON(atomic_dec_if_positive(&vfud->refcount) < 0); + spin_unlock(&vfud->lock); + + return fuse_dev_release(inode, file); +} + +static int virtfuse_dev_alloc(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_dev *fud = READ_ONCE(file->private_data); + int ret = 0; + + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + /* the initial fuse mount after RESET */ + WARN_ON(atomic_read(&vfud->refcount) != 0); + atomic_set(&vfud->refcount, 1); + vfud->fc = fuse_conn_get(fud->fc); + } else if (atomic_read(&vfud->refcount) == 0) { + pr_err_ratelimited("%s: please reset before mount\n", vfud->dev.name); + ret = -EBUSY; + } else if (fud->fc != vfud->fc) { + pr_err_ratelimited("%s: can't be mounted multiple times\n", vfud->dev.name); + ret = -EBUSY; + } + spin_unlock(&vfud->lock); + return ret; +} + +static int virtfuse_dev_clone(struct file *file, unsigned long arg) +{ + int fd, ret; + struct file *old; + + if (get_user(fd, (__u32 __user *)arg)) + return -EFAULT; + + old = fget(fd); + if (!old) + return -EINVAL; + /* + * Don't clone fuse_conn between normal fuse device and virtfuse, + * or different virtfuse. + */ + if (file_inode(old)->i_rdev != file_inode(file)->i_rdev) { + fput(old); + return -EINVAL; + } + + ret = fuse_dev_operations.unlocked_ioctl(file, FUSE_DEV_IOC_CLONE, arg); + if (!ret) + atomic_inc(&virtfuse_dev_get(file)->refcount); + fput(old); + return ret; +} + +static int virtfuse_clone(struct file *file) +{ + struct virtfuse_dev *vfud; + struct fuse_conn *fc; + struct fuse_dev *fud; + int err; + + if (file->private_data) + return -EEXIST; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + spin_unlock(&vfud->lock); + return -ENODATA; + } + + /* acquire temporary refcount */ + fc = fuse_conn_get(vfud->fc); + atomic_inc(&vfud->refcount); + spin_unlock(&vfud->lock); + + /* follow fuse_device_clone() to clone the connection */ + fud = fuse_dev_alloc_install(fc); + if (fud) { + atomic_inc(&vfud->refcount); + file->private_data = fud; + atomic_inc(&fc->dev_count); + err = 0; + } else { + err = -ENOMEM; + } + + /* drop temporary refcount */ + atomic_dec(&vfud->refcount); + fuse_conn_put(fc); + return err; +} + +static int virtfuse_reset(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + + if (!vfud) + return -EUCLEAN; + + if (atomic_read(&vfud->refcount)) + return -EBUSY; + + spin_lock(&vfud->lock); + if (vfud->fc) { + fc = vfud->fc; + vfud->fc = NULL; + } + spin_unlock(&vfud->lock); + + if (fc) + fuse_conn_put(fc); + return 0; +} + +static int fillbuf(char *buf, unsigned int len, unsigned int *pcount, + const char *fmt, ...) +{ + va_list args; + unsigned int count = *pcount; + int step; + + va_start(args, fmt); + step = vsnprintf(buf + count, len - count, fmt, args); + va_end(args); + if (step >= len - count) + return -EMSGSIZE; + + *pcount += step; + return 0; +} + +static int virtfuse_get_mounts(struct file *file, unsigned long arg) +{ + struct virtfuse_mounts_buf vbuf, __user *u_vbuf; + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + struct fuse_mount *fm; + struct super_block *sb; + struct mount *mnt; + unsigned int count = 0, len; + int order, step, ret = 0; + char *buf, *name, *p; + void __user *u_buf; + + if (!vfud) + return -EUCLEAN; + + u_vbuf = (struct virtfuse_mounts_buf __user *)arg; + u_buf = (void __user *)u_vbuf->buf; + if (copy_from_user(&vbuf, u_vbuf, sizeof(vbuf)) != 0) + return -EFAULT; + + len = vbuf.len; + if (len <= 1) + return -EMSGSIZE; + + /* init the user buffer as an empty string */ + if (clear_user(u_buf, 1) != 0) + return -EFAULT; + + spin_lock(&vfud->lock); + if (vfud->fc) + fc = fuse_conn_get(vfud->fc); + spin_unlock(&vfud->lock); + if (!fc) + return 0; + + down_read(&fc->killsb); + fm = list_first_entry_or_null(&fc->mounts, struct fuse_mount, fc_entry); + if (!fm || !fm->sb) + goto out_up_killsb; + sb = fm->sb; + + name = __getname(); + if (!name) { + ret = -ENOMEM; + goto out_up_killsb; + } + + order = get_order(len); + buf = (void *)__get_free_pages(GFP_KERNEL, order); + if (!buf) { + ret = -ENOMEM; + goto out_putname; + } + + /* connection state */ + ret = fillbuf(buf, len, &count, "%s\n", + fc->connected ? "Connected" : "Aborted"); + if (ret) + goto out_free_pages; + + /* open coded lock_mount_hash() */ + write_seqlock(&mount_lock); + + list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + struct path path = { + .dentry = mnt->mnt.mnt_root, + .mnt = &mnt->mnt + }; + + /* skip slave mounts */ + if (mnt->mnt_master) + continue; + + /* skip private mounts, e.g. from clone_private_mount() */ + if (!mnt->mnt_ns) + continue; + + /* mountpoint */ + p = d_absolute_path_locked(&path, name, PATH_MAX); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + break; + } + ret = fillbuf(buf, len, &count, "%s %s", + mnt->mnt_devname ? : "none", p); + if (ret) + break; + + /* fstype */ + if (sb->s_subtype && sb->s_subtype[0]) + sprintf(name, "%s.%s", sb->s_type->name, sb->s_subtype); + else + sprintf(name, "%s", sb->s_type->name); + ret = fillbuf(buf, len, &count, " %s", name); + if (ret) + break; + + /* mount options */ + step = sprintf(name, "%s,user_id=%u,group_id=%u", + __mnt_is_readonly(&mnt->mnt) ? "ro" : "rw", + from_kuid_munged(fc->user_ns, fc->user_id), + from_kgid_munged(fc->user_ns, fc->group_id)); + if (fc->default_permissions) + step += sprintf(name + step, ",default_permissions"); + if (fc->allow_other) + step += sprintf(name + step, ",allow_other"); + ret = fillbuf(buf, len, &count, " %s\n", name); + if (ret) + break; + } + + /* open coded unlock_mount_hash() */ + write_sequnlock(&mount_lock); + + /* also copy the trailing null (ensured by vsnprintf) */ + if (!ret && (copy_to_user(u_buf, buf, count + 1) != 0)) + ret = -EFAULT; + +out_free_pages: + free_pages((unsigned long)buf, order); +out_putname: + __putname(name); +out_up_killsb: + up_read(&fc->killsb); + fuse_conn_put(fc); + return ret; +} + +static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case FUSE_DEV_IOC_CLONE: + return virtfuse_dev_clone(file, arg); + case VIRTFUSE_IOC_CLONE: + return virtfuse_clone(file); + case VIRTFUSE_IOC_RESET: + return virtfuse_reset(file); + case VIRTFUSE_IOC_GET_MOUNTS: + return virtfuse_get_mounts(file, arg); + default: + return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); + } +} + +static void virtfuse_free_devices(void) +{ + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device) + misc_deregister(&vfud->dev); + WARN_ON(atomic_read(&vfud->refcount) != 0); + } + kfree(virtfuse_devices); + virtfuse_devices = NULL; +} + +static int __init virtfuse_init(void) +{ + struct virtfuse_dev *vfud; + int i, ret; + + if (virtfuse_dev_count == 0) { + pr_err("virtfuse: max_devices is zero\n"); + return -EINVAL; + } else if (virtfuse_dev_count > VIRT_FUSE_MAX_DEVICES) { + pr_err("virtfuse: max_devices is too big, max %d\n", + VIRT_FUSE_MAX_DEVICES); + return -EINVAL; + } + + virtfuse_fops = fuse_dev_operations; + virtfuse_fops.owner = THIS_MODULE; + virtfuse_fops.compat_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.unlocked_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.release = virtfuse_dev_release; + + virtfuse_devices = kcalloc(virtfuse_dev_count, + sizeof(struct virtfuse_dev), GFP_KERNEL); + if (virtfuse_devices == NULL) + return -ENOMEM; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + spin_lock_init(&vfud->lock); + snprintf(vfud->name, sizeof(vfud->name), "virtfuse%d", i); + + vfud->dev.name = vfud->name; + vfud->dev.minor = MISC_DYNAMIC_MINOR; + vfud->dev.fops = &virtfuse_fops; + + ret = misc_register(&vfud->dev); + if (ret) { + pr_err("virtfuse: failed to create virtfuse%d\n", i); + vfud->dev.this_device = NULL; + virtfuse_free_devices(); + return ret; + } + } + + fuse_mount_callback = virtfuse_dev_alloc; + return 0; +} + +static void __exit virtfuse_exit(void) +{ + fuse_mount_callback = NULL; + virtfuse_free_devices(); +} + +module_init(virtfuse_init); +module_exit(virtfuse_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Virtual FUSE Device"); +MODULE_AUTHOR("Jingbo Xu "); +MODULE_AUTHOR("Jiang Liu "); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index d84dacbdce2c9de254e286685e9746679537bbff..6706b7be86522732ffe1d575b294f6159b4ada0f 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -31,6 +33,9 @@ static DEFINE_MUTEX(virtio_fs_mutex); static LIST_HEAD(virtio_fs_instances); +/* The /sys/fs/virtio_fs/ kset */ +static struct kset *virtio_fs_kset; + enum { VQ_HIPRIO, VQ_REQUEST @@ -45,17 +50,19 @@ struct virtio_fs_vq { struct work_struct done_work; struct list_head queued_reqs; struct list_head end_reqs; /* End these requests */ - struct delayed_work dispatch_work; + struct work_struct dispatch_work; struct fuse_dev *fud; bool connected; long in_flight; struct completion in_flight_zero; /* No inflight requests */ + struct kobject *kobj; char name[VQ_NAME_LEN]; } ____cacheline_aligned_in_smp; /* A virtio-fs device instance */ struct virtio_fs { - struct kref refcount; + struct kobject kobj; + struct kobject *mqs_kobj; struct list_head list; /* on virtio_fs_instances */ char *tag; struct virtio_fs_vq *vqs; @@ -63,6 +70,8 @@ struct virtio_fs { unsigned int num_request_queues; /* number of request queues */ struct dax_device *dax_dev; + unsigned int *mq_map; /* index = cpu id, value = request vq id */ + /* DAX memory window where file contents are mapped */ void *window_kaddr; phys_addr_t window_phys_addr; @@ -87,7 +96,8 @@ struct virtio_fs_req_work { }; static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight); + struct fuse_req *req, bool in_flight, + gfp_t gfp); static const struct constant_table dax_param_enums[] = { {"always", FUSE_DAX_ALWAYS }, @@ -161,27 +171,125 @@ static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq) complete(&fsvq->in_flight_zero); } -static void release_virtio_fs_obj(struct kref *ref) +static ssize_t tag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj); + + return sysfs_emit(buf, "%s\n", fs->tag); +} + +static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag); + +static struct attribute *virtio_fs_attrs[] = { + &virtio_fs_tag_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(virtio_fs); + +static void virtio_fs_ktype_release(struct kobject *kobj) { - struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); + struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj); + kfree(vfs->mq_map); kfree(vfs->vqs); kfree(vfs); } +static const struct kobj_type virtio_fs_ktype = { + .release = virtio_fs_ktype_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = virtio_fs_groups, +}; + +static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs, + struct kobject *kobj) +{ + int i; + + for (i = 0; i < fs->nvqs; i++) { + if (kobj == fs->vqs[i].kobj) + return &fs->vqs[i]; + } + return NULL; +} + +static ssize_t name_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj); + struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj); + + if (!fsvq) + return -EINVAL; + return sysfs_emit(buf, "%s\n", fsvq->name); +} + +static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name); + +static ssize_t cpu_list_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj); + struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj); + unsigned int cpu, qid; + const size_t size = PAGE_SIZE - 1; + bool first = true; + int ret = 0, pos = 0; + + if (!fsvq) + return -EINVAL; + + qid = fsvq->vq->index; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) { + if (first) + ret = snprintf(buf + pos, size - pos, "%u", cpu); + else + ret = snprintf(buf + pos, size - pos, ", %u", cpu); + + if (ret >= size - pos) + break; + first = false; + pos += ret; + } + } + ret = snprintf(buf + pos, size + 1 - pos, "\n"); + return pos + ret; +} + +static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list); + +static struct attribute *virtio_fs_vq_attrs[] = { + &virtio_fs_vq_name_attr.attr, + &virtio_fs_vq_cpu_list_attr.attr, + NULL +}; + +static struct attribute_group virtio_fs_vq_attr_group = { + .attrs = virtio_fs_vq_attrs, +}; + /* Make sure virtiofs_mutex is held */ +static void virtio_fs_put_locked(struct virtio_fs *fs) +{ + lockdep_assert_held(&virtio_fs_mutex); + + kobject_put(&fs->kobj); +} + static void virtio_fs_put(struct virtio_fs *fs) { - kref_put(&fs->refcount, release_virtio_fs_obj); + mutex_lock(&virtio_fs_mutex); + virtio_fs_put_locked(fs); + mutex_unlock(&virtio_fs_mutex); } static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) { struct virtio_fs *vfs = fiq->priv; - mutex_lock(&virtio_fs_mutex); virtio_fs_put(vfs); - mutex_unlock(&virtio_fs_mutex); } static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) @@ -202,7 +310,7 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) } flush_work(&fsvq->done_work); - flush_delayed_work(&fsvq->dispatch_work); + flush_work(&fsvq->dispatch_work); } static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs) @@ -242,27 +350,107 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs) } } +static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + int i; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + kobject_put(fsvq->kobj); + } +} + +static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + char buff[12]; + int i, j, ret; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + + sprintf(buff, "%d", i); + fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj); + if (!fs->mqs_kobj) { + ret = -ENOMEM; + goto out_del; + } + + ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group); + if (ret) { + kobject_put(fsvq->kobj); + goto out_del; + } + } + + return 0; + +out_del: + for (j = 0; j < i; j++) { + fsvq = &fs->vqs[j]; + kobject_put(fsvq->kobj); + } + return ret; +} + /* Add a new instance to the list or return -EEXIST if tag name exists*/ -static int virtio_fs_add_instance(struct virtio_fs *fs) +static int virtio_fs_add_instance(struct virtio_device *vdev, + struct virtio_fs *fs) { struct virtio_fs *fs2; - bool duplicate = false; + int ret; mutex_lock(&virtio_fs_mutex); list_for_each_entry(fs2, &virtio_fs_instances, list) { - if (strcmp(fs->tag, fs2->tag) == 0) - duplicate = true; + if (strcmp(fs->tag, fs2->tag) == 0) { + mutex_unlock(&virtio_fs_mutex); + return -EEXIST; + } } - if (!duplicate) - list_add_tail(&fs->list, &virtio_fs_instances); + /* Use the virtio_device's index as a unique identifier, there is no + * need to allocate our own identifiers because the virtio_fs instance + * is only visible to userspace as long as the underlying virtio_device + * exists. + */ + fs->kobj.kset = virtio_fs_kset; + ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index); + if (ret < 0) + goto out_unlock; + + fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj); + if (!fs->mqs_kobj) { + ret = -ENOMEM; + goto out_del; + } + + ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device"); + if (ret < 0) + goto out_put; + + ret = virtio_fs_add_queues_sysfs(fs); + if (ret) + goto out_remove; + + list_add_tail(&fs->list, &virtio_fs_instances); mutex_unlock(&virtio_fs_mutex); - if (duplicate) - return -EEXIST; + kobject_uevent(&fs->kobj, KOBJ_ADD); + return 0; + +out_remove: + sysfs_remove_link(&fs->kobj, "device"); +out_put: + kobject_put(fs->mqs_kobj); +out_del: + kobject_del(&fs->kobj); +out_unlock: + mutex_unlock(&virtio_fs_mutex); + return ret; } /* Return the virtio_fs with a given tag, or NULL */ @@ -274,7 +462,7 @@ static struct virtio_fs *virtio_fs_find_instance(const char *tag) list_for_each_entry(fs, &virtio_fs_instances, list) { if (strcmp(fs->tag, tag) == 0) { - kref_get(&fs->refcount); + kobject_get(&fs->kobj); goto found; } } @@ -333,6 +521,7 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) return -EINVAL; } + dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag); return 0; } @@ -355,7 +544,11 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work) kfree(req); dec_in_flight_req(fsvq); } - } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + } while (!virtqueue_enable_cb(vq)); + + if (!list_empty(&fsvq->queued_reqs)) + schedule_work(&fsvq->dispatch_work); + spin_unlock(&fsvq->lock); } @@ -363,7 +556,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) { struct fuse_req *req; struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, - dispatch_work.work); + dispatch_work); int ret; pr_debug("virtio-fs: worker %s called.\n", __func__); @@ -383,6 +576,8 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) /* Dispatch pending requests */ while (1) { + unsigned int flags; + spin_lock(&fsvq->lock); req = list_first_entry_or_null(&fsvq->queued_reqs, struct fuse_req, list); @@ -393,13 +588,13 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) list_del_init(&req->list); spin_unlock(&fsvq->lock); - ret = virtio_fs_enqueue_req(fsvq, req, true); + flags = memalloc_nofs_save(); + ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL); + memalloc_nofs_restore(flags); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->queued_reqs); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); spin_unlock(&fsvq->lock); return; } @@ -442,12 +637,10 @@ static int send_forget_request(struct virtio_fs_vq *fsvq, ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", ret); list_add_tail(&forget->list, &fsvq->queued_reqs); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); if (!in_flight) inc_in_flight_req(fsvq); /* Queue is full */ @@ -479,7 +672,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) { struct virtio_fs_forget *forget; struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, - dispatch_work.work); + dispatch_work); pr_debug("virtio-fs: worker %s called.\n", __func__); while (1) { spin_lock(&fsvq->lock); @@ -498,7 +691,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) } /* Allocate and copy args into req->argbuf */ -static int copy_args_to_argbuf(struct fuse_req *req) +static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp) { struct fuse_args *args = req->args; unsigned int offset = 0; @@ -512,7 +705,7 @@ static int copy_args_to_argbuf(struct fuse_req *req) len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + fuse_len_args(num_out, args->out_args); - req->argbuf = kmalloc(len, GFP_ATOMIC); + req->argbuf = kmalloc(len, gfp); if (!req->argbuf) return -ENOMEM; @@ -572,7 +765,7 @@ static void virtio_fs_request_complete(struct fuse_req *req, struct fuse_args *args; struct fuse_args_pages *ap; unsigned int len, i, thislen; - struct page *page; + struct folio *folio; /* * TODO verify that server properly follows FUSE protocol @@ -584,12 +777,12 @@ static void virtio_fs_request_complete(struct fuse_req *req, if (args->out_pages && args->page_zeroing) { len = args->out_args[args->out_numargs - 1].size; ap = container_of(args, typeof(*ap), args); - for (i = 0; i < ap->num_pages; i++) { + for (i = 0; i < ap->num_folios; i++) { thislen = ap->descs[i].length; if (len < thislen) { WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); + folio = ap->folios[i]; + folio_zero_segment(folio, len, thislen); len = 0; } else { len -= thislen; @@ -637,7 +830,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work) list_move_tail(&req->list, &reqs); spin_unlock(&fpq->lock); } - } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + } while (!virtqueue_enable_cb(vq)); spin_unlock(&fsvq->lock); /* End requests */ @@ -657,6 +850,50 @@ static void virtio_fs_requests_done_work(struct work_struct *work) virtio_fs_request_complete(req, fsvq); } } + + /* Try to push previously queued requests, as the queue might no longer be full */ + spin_lock(&fsvq->lock); + if (!list_empty(&fsvq->queued_reqs)) + schedule_work(&fsvq->dispatch_work); + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs) +{ + const struct cpumask *mask, *masks; + unsigned int q, cpu; + + /* First attempt to map using existing transport layer affinities + * e.g. PCIe MSI-X + */ + if (!vdev->config->get_vq_affinity) + goto fallback; + + for (q = 0; q < fs->num_request_queues; q++) { + mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + fs->mq_map[cpu] = q + VQ_REQUEST; + } + + return; +fallback: + /* Attempt to map evenly in groups over the CPUs */ + masks = group_cpus_evenly(fs->num_request_queues); + /* If even this fails we default to all CPUs use first request queue */ + if (!masks) { + for_each_possible_cpu(cpu) + fs->mq_map[cpu] = VQ_REQUEST; + return; + } + + for (q = 0; q < fs->num_request_queues; q++) { + for_each_cpu(cpu, &masks[q]) + fs->mq_map[cpu] = q + VQ_REQUEST; + } + kfree(masks); } /* Virtqueue interrupt handler */ @@ -680,12 +917,12 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name, if (vq_type == VQ_REQUEST) { INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work); - INIT_DELAYED_WORK(&fsvq->dispatch_work, - virtio_fs_request_dispatch_work); + INIT_WORK(&fsvq->dispatch_work, + virtio_fs_request_dispatch_work); } else { INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work); - INIT_DELAYED_WORK(&fsvq->dispatch_work, - virtio_fs_hiprio_dispatch_work); + INIT_WORK(&fsvq->dispatch_work, + virtio_fs_hiprio_dispatch_work); } } @@ -695,6 +932,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, { struct virtqueue **vqs; vq_callback_t **callbacks; + /* Specify pre_vectors to ensure that the queues before the + * request queues (e.g. hiprio) don't claim any of the CPUs in + * the multi-queue mapping and interrupt affinities + */ + struct irq_affinity desc = { .pre_vectors = VQ_REQUEST }; const char **names; unsigned int i; int ret = 0; @@ -704,6 +946,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, if (fs->num_request_queues == 0) return -EINVAL; + /* Truncate nr of request queues to nr_cpu_id */ + fs->num_request_queues = min_t(unsigned int, fs->num_request_queues, + nr_cpu_ids); fs->nvqs = VQ_REQUEST + fs->num_request_queues; fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); if (!fs->vqs) @@ -713,7 +958,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), GFP_KERNEL); names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); - if (!vqs || !callbacks || !names) { + fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL, + dev_to_node(&vdev->dev)); + if (!vqs || !callbacks || !names || !fs->mq_map) { ret = -ENOMEM; goto out; } @@ -733,7 +980,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, names[i] = fs->vqs[i].name; } - ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc); if (ret < 0) goto out; @@ -745,8 +992,10 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, kfree(names); kfree(callbacks); kfree(vqs); - if (ret) + if (ret) { kfree(fs->vqs); + kfree(fs->mq_map); + } return ret; } @@ -875,7 +1124,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) fs = kzalloc(sizeof(*fs), GFP_KERNEL); if (!fs) return -ENOMEM; - kref_init(&fs->refcount); + kobject_init(&fs->kobj, &virtio_fs_ktype); vdev->priv = fs; ret = virtio_fs_read_tag(vdev, fs); @@ -886,7 +1135,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) if (ret < 0) goto out; - /* TODO vq affinity */ + virtio_fs_map_queues(vdev, fs); ret = virtio_fs_setup_dax(vdev, fs); if (ret < 0) @@ -897,7 +1146,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) */ virtio_device_ready(vdev); - ret = virtio_fs_add_instance(fs); + ret = virtio_fs_add_instance(vdev, fs); if (ret < 0) goto out_vqs; @@ -906,11 +1155,10 @@ static int virtio_fs_probe(struct virtio_device *vdev) out_vqs: virtio_reset_device(vdev); virtio_fs_cleanup_vqs(vdev); - kfree(fs->vqs); out: vdev->priv = NULL; - kfree(fs); + kobject_put(&fs->kobj); return ret; } @@ -934,6 +1182,10 @@ static void virtio_fs_remove(struct virtio_device *vdev) mutex_lock(&virtio_fs_mutex); /* This device is going away. No one should get new reference */ list_del_init(&fs->list); + virtio_fs_delete_queues_sysfs(fs); + sysfs_remove_link(&fs->kobj, "device"); + kobject_put(fs->mqs_kobj); + kobject_del(&fs->kobj); virtio_fs_stop_all_queues(fs); virtio_fs_drain_all_queues_locked(fs); virtio_reset_device(vdev); @@ -941,7 +1193,7 @@ static void virtio_fs_remove(struct virtio_device *vdev) vdev->priv = NULL; /* Put device reference on virtio_fs object */ - virtio_fs_put(fs); + virtio_fs_put_locked(fs); mutex_unlock(&virtio_fs_mutex); } @@ -981,22 +1233,13 @@ static struct virtio_driver virtio_fs_driver = { #endif }; -static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link) { - struct fuse_forget_link *link; struct virtio_fs_forget *forget; struct virtio_fs_forget_req *req; - struct virtio_fs *fs; - struct virtio_fs_vq *fsvq; - u64 unique; - - link = fuse_dequeue_forget(fiq, 1, NULL); - unique = fuse_get_unique(fiq); - - fs = fiq->priv; - fsvq = &fs->vqs[VQ_HIPRIO]; - spin_unlock(&fiq->lock); + struct virtio_fs *fs = fiq->priv; + struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO]; + u64 unique = fuse_get_unique(fiq); /* Allocate a buffer for the request */ forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL); @@ -1016,8 +1259,7 @@ __releases(fiq->lock) kfree(link); } -static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { /* * TODO interrupts. @@ -1026,19 +1268,18 @@ __releases(fiq->lock) * Exceptions are blocking lock operations; for example fcntl(F_SETLKW) * with shared lock between host and guest. */ - spin_unlock(&fiq->lock); } /* Count number of scatter-gather elements required */ -static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs, - unsigned int num_pages, - unsigned int total_len) +static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs, + unsigned int num_folios, + unsigned int total_len) { unsigned int i; unsigned int this_len; - for (i = 0; i < num_pages && total_len; i++) { - this_len = min(page_descs[i].length, total_len); + for (i = 0; i < num_folios && total_len; i++) { + this_len = min(folio_descs[i].length, total_len); total_len -= this_len; } @@ -1057,8 +1298,8 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->in_pages) { size = args->in_args[args->in_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, - size); + total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios, + size); } if (!test_bit(FR_ISREPLY, &req->flags)) @@ -1071,27 +1312,27 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->out_pages) { size = args->out_args[args->out_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, - size); + total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios, + size); } return total_sgs; } -/* Add pages to scatter-gather list and return number of elements used */ -static unsigned int sg_init_fuse_pages(struct scatterlist *sg, - struct page **pages, - struct fuse_page_desc *page_descs, - unsigned int num_pages, - unsigned int total_len) +/* Add folios to scatter-gather list and return number of elements used */ +static unsigned int sg_init_fuse_folios(struct scatterlist *sg, + struct folio **folios, + struct fuse_folio_desc *folio_descs, + unsigned int num_folios, + unsigned int total_len) { unsigned int i; unsigned int this_len; - for (i = 0; i < num_pages && total_len; i++) { + for (i = 0; i < num_folios && total_len; i++) { sg_init_table(&sg[i], 1); - this_len = min(page_descs[i].length, total_len); - sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset); + this_len = min(folio_descs[i].length, total_len); + sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset); total_len -= this_len; } @@ -1116,10 +1357,10 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, sg_init_one(&sg[total_sgs++], argbuf, len); if (argpages) - total_sgs += sg_init_fuse_pages(&sg[total_sgs], - ap->pages, ap->descs, - ap->num_pages, - args[numargs - 1].size); + total_sgs += sg_init_fuse_folios(&sg[total_sgs], + ap->folios, ap->descs, + ap->num_folios, + args[numargs - 1].size); if (len_used) *len_used = len; @@ -1129,7 +1370,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, /* Add a request to a virtqueue and kick the device */ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight) + struct fuse_req *req, bool in_flight, + gfp_t gfp) { /* requests need at least 4 elements */ struct scatterlist *stack_sgs[6]; @@ -1150,8 +1392,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, /* Does the sglist fit on the stack? */ total_sgs = sg_count_fuse_req(req); if (total_sgs > ARRAY_SIZE(stack_sgs)) { - sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC); - sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC); + sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp); + sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp); if (!sgs || !sg) { ret = -ENOMEM; goto out; @@ -1159,7 +1401,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, } /* Use a bounce buffer since stack args cannot be mapped */ - ret = copy_args_to_argbuf(req); + ret = copy_args_to_argbuf(req, gfp); if (ret < 0) goto out; @@ -1231,33 +1473,31 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, return ret; } -static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req) { - unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ + unsigned int queue_id; struct virtio_fs *fs; - struct fuse_req *req; struct virtio_fs_vq *fsvq; int ret; - WARN_ON(list_empty(&fiq->pending)); - req = list_last_entry(&fiq->pending, struct fuse_req, list); + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique(fiq); + clear_bit(FR_PENDING, &req->flags); - list_del_init(&req->list); - WARN_ON(!list_empty(&fiq->pending)); - spin_unlock(&fiq->lock); fs = fiq->priv; + queue_id = fs->mq_map[raw_smp_processor_id()]; - pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", - __func__, req->in.h.opcode, req->in.h.unique, + pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n", + __func__, req->in.h.opcode, req->in.h.unique, req->in.h.nodeid, req->in.h.len, - fuse_len_args(req->args->out_numargs, req->args->out_args)); + fuse_len_args(req->args->out_numargs, req->args->out_args), + queue_id); fsvq = &fs->vqs[queue_id]; - ret = virtio_fs_enqueue_req(fsvq, req, false); + ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { /* * Virtqueue full. Retry submission from worker * context as we might be holding fc->bg_lock. @@ -1265,8 +1505,6 @@ __releases(fiq->lock) spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->queued_reqs); inc_in_flight_req(fsvq); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); spin_unlock(&fsvq->lock); return; } @@ -1276,17 +1514,17 @@ __releases(fiq->lock) /* Can't end request in submission context. Use a worker */ spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->end_reqs); - schedule_delayed_work(&fsvq->dispatch_work, 0); + schedule_work(&fsvq->dispatch_work); spin_unlock(&fsvq->lock); return; } } static const struct fuse_iqueue_ops virtio_fs_fiq_ops = { - .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock, - .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock, - .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock, - .release = virtio_fs_fiq_release, + .send_forget = virtio_fs_send_forget, + .send_interrupt = virtio_fs_send_interrupt, + .send_req = virtio_fs_send_req, + .release = virtio_fs_fiq_release, }; static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx) @@ -1458,6 +1696,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc) fc->delete_stale = true; fc->auto_submounts = true; fc->sync_fs = true; + fc->use_pages_for_kvec_io = true; /* Tell FUSE to split requests that exceed the virtqueue's size */ fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit, @@ -1486,9 +1725,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc) out_err: kfree(fc); - mutex_lock(&virtio_fs_mutex); virtio_fs_put(fs); - mutex_unlock(&virtio_fs_mutex); return err; } @@ -1520,21 +1757,56 @@ static struct file_system_type virtio_fs_type = { .kill_sb = virtio_kill_sb, }; +static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) +{ + const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj); + + add_uevent_var(env, "TAG=%s", fs->tag); + return 0; +} + +static const struct kset_uevent_ops virtio_fs_uevent_ops = { + .uevent = virtio_fs_uevent, +}; + +static int __init virtio_fs_sysfs_init(void) +{ + virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops, + fs_kobj); + if (!virtio_fs_kset) + return -ENOMEM; + return 0; +} + +static void virtio_fs_sysfs_exit(void) +{ + kset_unregister(virtio_fs_kset); + virtio_fs_kset = NULL; +} + static int __init virtio_fs_init(void) { int ret; - ret = register_virtio_driver(&virtio_fs_driver); + ret = virtio_fs_sysfs_init(); if (ret < 0) return ret; + ret = register_virtio_driver(&virtio_fs_driver); + if (ret < 0) + goto sysfs_exit; + ret = register_filesystem(&virtio_fs_type); - if (ret < 0) { - unregister_virtio_driver(&virtio_fs_driver); - return ret; - } + if (ret < 0) + goto unregister_virtio_driver; return 0; + +unregister_virtio_driver: + unregister_virtio_driver(&virtio_fs_driver); +sysfs_exit: + virtio_fs_sysfs_exit(); + return ret; } module_init(virtio_fs_init); @@ -1542,6 +1814,7 @@ static void __exit virtio_fs_exit(void) { unregister_filesystem(&virtio_fs_type); unregister_virtio_driver(&virtio_fs_driver); + virtio_fs_sysfs_exit(); } module_exit(virtio_fs_exit); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index f4c066aa24b963cca1760ccb476a7b065c033627..65da3ae6d48e4c53e85d147d9f69b7dfe8bdb674 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -215,7 +215,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN); memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); - memcpy(&s->s_uuid, str->sb_uuid, 16); + super_set_uuid(s, str->sb_uuid, 16); } /** diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ac519515ef6c06c9ff552ffe9d6539ba39ece14f..36ac4e536bcb86df0e78efc0e90ded18cbb00fb3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -345,7 +345,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) ssize_t retval = 0; while (iov_iter_count(to)) { - struct page *page; + struct folio *folio; size_t nr, copied, want; /* nr is the maximum number of bytes to copy from this page */ @@ -363,18 +363,18 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) } nr = nr - offset; - /* Find the page */ - page = find_lock_page(mapping, index); - if (unlikely(page == NULL)) { + /* Find the folio */ + folio = filemap_lock_hugetlb_folio(h, mapping, index); + if (IS_ERR(folio)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ copied = iov_iter_zero(nr, to); } else { - unlock_page(page); + folio_unlock(folio); - if (!PageHWPoison(page)) + if (!folio_test_hwpoison(folio)) want = nr; else { /* @@ -382,19 +382,19 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) * touching the 1st raw HWPOISON subpage after * offset. */ - want = adjust_range_hwpoison(page, offset, nr); + want = adjust_range_hwpoison(&folio->page, offset, nr); if (want == 0) { - put_page(page); + folio_put(folio); retval = -EIO; break; } } /* - * We have the page, copy it to user space buffer. + * We have the folio, copy it to user space buffer. */ - copied = copy_page_to_iter(page, offset, want, to); - put_page(page); + copied = copy_folio_to_iter(folio, offset, want, to); + folio_put(folio); } offset += copied; retval += copied; @@ -672,21 +672,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; - const pgoff_t start = lstart >> huge_page_shift(h); - const pgoff_t end = lend >> huge_page_shift(h); + const pgoff_t end = lend >> PAGE_SHIFT; struct folio_batch fbatch; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); folio_batch_init(&fbatch); - next = start; + next = lstart >> PAGE_SHIFT; while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); ++i) { struct folio *folio = fbatch.folios[i]; u32 hash = 0; - index = folio->index; + index = folio->index >> huge_page_order(h); hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -704,7 +703,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, } if (truncate_op) - (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); + (void)hugetlb_unreserve_pages(inode, + lstart >> huge_page_shift(h), + LONG_MAX, freed); } static void hugetlbfs_evict_inode(struct inode *inode) @@ -752,7 +753,7 @@ static void hugetlbfs_zero_partial_page(struct hstate *h, pgoff_t idx = start >> huge_page_shift(h); struct folio *folio; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) return; @@ -897,7 +898,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ - folio = filemap_get_folio(mapping, index); + folio = filemap_get_folio(mapping, index << huge_page_order(h)); if (!IS_ERR(folio)) { folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); diff --git a/fs/inode.c b/fs/inode.c index 030e07b169c27674154df57e2870e9a438742bc7..2c44dda61a690d254849f71292aaf268ded0ff77 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -2609,6 +2609,7 @@ bool in_group_or_capable(struct mnt_idmap *idmap, return true; return false; } +EXPORT_SYMBOL(in_group_or_capable); /** * mode_strip_sgid - handle the sgid bit for non-directories diff --git a/fs/ioctl.c b/fs/ioctl.c index 76cf22ac97d76256104a6dbc4f819247168ce769..1d5abfdf0f22a626560b9ae6bb95309f8c146be5 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -763,6 +763,33 @@ static int ioctl_fssetxattr(struct file *file, void __user *argp) return err; } +static int ioctl_getfsuuid(struct file *file, void __user *argp) +{ + struct super_block *sb = file_inode(file)->i_sb; + struct fsuuid2 u = { .len = sb->s_uuid_len, }; + + if (!sb->s_uuid_len) + return -ENOIOCTLCMD; + + memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len); + + return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; +} + +static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp) +{ + struct super_block *sb = file_inode(file)->i_sb; + + if (!strlen(sb->s_sysfs_name)) + return -ENOIOCTLCMD; + + struct fs_sysfs_path u = {}; + + u.len = scnprintf(u.name, sizeof(u.name), "%s/%s", sb->s_type->name, sb->s_sysfs_name); + + return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; +} + /* * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. * It's just a simple helper for sys_ioctl and compat_sys_ioctl. @@ -845,6 +872,12 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, case FS_IOC_FSSETXATTR: return ioctl_fssetxattr(filp, argp); + case FS_IOC_GETFSUUID: + return ioctl_getfsuuid(filp, argp); + + case FS_IOC_GETFSSYSFSPATH: + return ioctl_get_fs_sysfs_path(filp, argp); + default: if (S_ISREG(inode->i_mode)) return file_ioctl(filp, cmd, argp); diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 8fda66c98a610f0970d69851dfa8d4b32bdee63f..64965c4d4e4393315c9dfe46696154fa5f06fad4 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -88,7 +88,18 @@ __releases(&journal->j_state_lock) spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { - jbd2_log_do_checkpoint(journal); + DEFINE_WAIT(wait); + + prepare_to_wait( + &journal->j_wait_done_checkpoint, &wait, + TASK_UNINTERRUPTIBLE); + mutex_unlock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + finish_wait(&journal->j_wait_done_checkpoint, + &wait); + jbd2_debug(1, "wake up checkpoint thread.\n"); } else if (jbd2_cleanup_journal_tail(journal) <= 0) { /* * We were able to recover space or the diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 0cd7439470fc43c46278826606af1b7fc98220cb..97c2da3758a4a58b35fea1c6847f193ec8a686ae 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -438,6 +438,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; + WRITE_ONCE(commit_transaction->t_locked_time, jiffies); trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index dfbb8f73861f646efefe77a969d69c3e80339334..735828ef1e7bbdebf5c4b80f2e7a197d008391dd 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -191,6 +191,9 @@ static int kjournald2(void *arg) if (journal->j_flags & JBD2_UNMOUNT) goto end_loop; + if (kthread_should_stop()) + goto end_loop; + jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", journal->j_commit_sequence, journal->j_commit_request); @@ -261,9 +264,40 @@ static int kjournald2(void *arg) return 0; } +static int jbd2_checkpoint_thread(void *arg) +{ + journal_t *journal = arg; + DEFINE_WAIT(wait); + + jbd2_debug(1, "%s\n", __func__); + journal->j_checkpoint_task = current; + +loop: + prepare_to_wait(&journal->j_wait_checkpoint, &wait, + TASK_INTERRUPTIBLE); + wake_up_all(&journal->j_wait_done_checkpoint); + schedule(); + finish_wait(&journal->j_wait_checkpoint, &wait); + + if (journal->j_flags & JBD2_UNMOUNT) + goto end_loop; + + mutex_lock(&journal->j_checkpoint_mutex); + jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); + + goto loop; + +end_loop: + journal->j_checkpoint_task = NULL; + wake_up_all(&journal->j_wait_done_checkpoint); + jbd2_debug(1, "%s exiting.\n", __func__); + return 0; +} + static int jbd2_journal_start_thread(journal_t *journal) { - struct task_struct *t; + struct task_struct *t, *t_ckpt; t = kthread_run(kjournald2, journal, "jbd2/%s", journal->j_devname); @@ -271,6 +305,17 @@ static int jbd2_journal_start_thread(journal_t *journal) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); + + t_ckpt = kthread_run(jbd2_checkpoint_thread, journal, "jbd2-ckpt/%s", + journal->j_devname); + if (IS_ERR(t_ckpt)) { + kthread_stop(t); + return PTR_ERR(t_ckpt); + } + + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task != NULL); + return 0; } @@ -286,6 +331,14 @@ static void journal_kill_thread(journal_t *journal) write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); + + while (journal->j_checkpoint_task) { + mutex_lock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task == NULL); + mutex_unlock(&journal->j_checkpoint_mutex); + } } /* @@ -389,6 +442,9 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, } kunmap_local(mapped_data); + /* force copy-out */ + if (need_copy_out == 0 && journal->j_force_copy) + need_copy_out = 1; /* * Do we need to do a data copy? */ @@ -1204,25 +1260,78 @@ static const struct seq_operations jbd2_seq_info_ops = { .show = jbd2_seq_info_show, }; -static int jbd2_seq_info_open(struct inode *inode, struct file *file) +static void *jbd2_seq_stats_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? NULL : SEQ_START_TOKEN; +} + +static void *jbd2_seq_stats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static int jbd2_seq_stats_show(struct seq_file *seq, void *v) +{ + struct jbd2_stats_proc_session *s = seq->private; + + if (v != SEQ_START_TOKEN) + return 0; + + seq_printf(seq, "%lu %lu %d %lu %lu %lu %lu %lu %lu %llu %u %u %u %d %d\n", + s->stats->ts_tid, s->stats->ts_requested, + s->journal->j_max_transaction_buffers, s->stats->run.rs_wait, + s->stats->run.rs_request_delay, s->stats->run.rs_running, + s->stats->run.rs_locked, s->stats->run.rs_flushing, + s->stats->run.rs_logging, + div_u64(s->journal->j_average_commit_time, NSEC_PER_MSEC), + s->stats->run.rs_handle_count, s->stats->run.rs_blocks, + s->stats->run.rs_blocks_logged, HZ, jiffies_to_msecs(HZ)); + return 0; +} + +static void jbd2_seq_stats_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations jbd2_seq_stats_ops = { + .start = jbd2_seq_stats_start, + .next = jbd2_seq_stats_next, + .stop = jbd2_seq_stats_stop, + .show = jbd2_seq_stats_show, +}; + +static struct jbd2_stats_proc_session *__jbd2_seq_open(struct inode *inode, + struct file *file) { journal_t *journal = pde_data(inode); struct jbd2_stats_proc_session *s; - int rc, size; + int size; s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); size = sizeof(struct transaction_stats_s); s->stats = kmalloc(size, GFP_KERNEL); if (s->stats == NULL) { kfree(s); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } spin_lock(&journal->j_history_lock); memcpy(s->stats, &journal->j_stats, size); s->journal = journal; spin_unlock(&journal->j_history_lock); + return s; +} + +static int jbd2_seq_info_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); rc = seq_open(file, &jbd2_seq_info_ops); if (rc == 0) { @@ -1233,7 +1342,6 @@ static int jbd2_seq_info_open(struct inode *inode, struct file *file) kfree(s); } return rc; - } static int jbd2_seq_info_release(struct inode *inode, struct file *file) @@ -1252,6 +1360,143 @@ static const struct proc_ops jbd2_info_proc_ops = { .proc_release = jbd2_seq_info_release, }; +static int jbd2_seq_stats_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); + + rc = seq_open(file, &jbd2_seq_stats_ops); + if (rc == 0) { + struct seq_file *m = file->private_data; + + m->private = s; + } else { + kfree(s->stats); + kfree(s); + } + return rc; +} + +static int jbd2_seq_stats_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct jbd2_stats_proc_session *s = seq->private; + + kfree(s->stats); + kfree(s); + return seq_release(inode, file); +} + +static const struct proc_ops jbd2_stats_proc_ops = { + .proc_open = jbd2_seq_stats_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = jbd2_seq_stats_release, +}; + +static int jbd2_seq_force_copy_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%u\n", journal->j_force_copy); + return 0; +} + +static int jbd2_seq_force_copy_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_force_copy_show, journal); +} + +/* Worst case buffer size needed for holding an integer. */ +#define PROC_NUMBUF 13 + +static ssize_t jbd2_seq_force_copy_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned int force_copy; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtouint(strstrip(buffer), 0, &force_copy); + if (err) + goto out; + journal->j_force_copy = force_copy; +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_force_copy_proc_ops = { + .proc_open = jbd2_seq_force_copy_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_force_copy_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int jbd2_seq_stall_thresh_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%lu\n", journal->j_stall_thresh); + return 0; +} + +static int jbd2_seq_stall_thresh_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_stall_thresh_show, journal); +} + +static ssize_t jbd2_seq_stall_thresh_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned long long stall_thresh; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoull(strstrip(buffer), 0, &stall_thresh); + if (err) + goto out; + WRITE_ONCE(journal->j_stall_thresh, stall_thresh); +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_stall_thresh_proc_ops = { + .proc_open = jbd2_seq_stall_thresh_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_stall_thresh_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1260,12 +1505,21 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("force_copy", 0644, journal->j_proc_entry, + &jbd2_force_copy_proc_ops, journal); + proc_create_data("stats", 0444, journal->j_proc_entry, + &jbd2_stats_proc_ops, journal); + proc_create_data("stall_thresh", 0644, journal->j_proc_entry, + &jbd2_stall_thresh_proc_ops, journal); } } static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("force_copy", journal->j_proc_entry); + remove_proc_entry("stats", journal->j_proc_entry); + remove_proc_entry("stall_thresh", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } @@ -1584,6 +1838,8 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); + init_waitqueue_head(&journal->j_wait_checkpoint); + init_waitqueue_head(&journal->j_wait_done_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); @@ -1599,6 +1855,7 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ + journal->j_stall_thresh = JBD2_DEFAULT_TRANS_STALL_THRESH; atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", &jbd2_trans_commit_key, 0); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 76adab83cac368e389e096330d08ab276fc513a9..8594f28c96af66fb4c2b5afde393b55afb2ab2d6 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -463,6 +463,11 @@ static handle_t *new_handle(int nblocks) return NULL; handle->h_total_credits = nblocks; handle->h_ref = 1; + handle->h_pre_start_jiffies = jiffies; +#ifdef CONFIG_SCHEDSTATS + handle->h_sched_wait_sum = current->stats.wait_sum; + handle->h_io_wait_sum = current->stats.iowait_sum; +#endif return handle; } @@ -1093,7 +1098,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); spin_unlock(&jh->b_state_lock); + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); + task_clear_wait_res(); goto repeat; } @@ -1927,6 +1934,40 @@ int jbd2_journal_stop(handle_t *handle) wait_for_commit = 1; } + do { + unsigned long transaction_locked_time, delta; + unsigned long journal_space_wait; + u64 sched_wait, io_wait; + + transaction_locked_time = READ_ONCE(transaction->t_locked_time); + if (!transaction_locked_time) + break; + + delta = jiffies_to_msecs(jiffies - transaction_locked_time); + if (delta < READ_ONCE(journal->j_stall_thresh)) + break; + + journal_space_wait = handle->h_start_jiffies - + handle->h_pre_start_jiffies; +#ifdef CONFIG_SCHEDSTATS + sched_wait = current->stats.wait_sum - + handle->h_sched_wait_sum; + io_wait = current->stats.iowait_sum - + handle->h_io_wait_sum; +#else + sched_wait = 0; + io_wait = 0; +#endif + trace_jbd2_slow_handle_stats(journal->j_fs_dev->bd_dev, + transaction->t_tid, handle->h_type, handle->h_line_no, + jiffies - handle->h_start_jiffies, handle->h_sync, + handle->h_requested_credits, + handle->h_requested_credits - handle->h_total_credits, + delta, jiffies_to_msecs(journal_space_wait), + div_u64(sched_wait, NSEC_PER_MSEC), + div_u64(io_wait, NSEC_PER_MSEC)); + } while (0); + /* * Once stop_this_handle() drops t_updates, the transaction could start * committing on us and eventually disappear. So we must not diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index c4bf26142eec9baf2d955166ccb892705a1834a8..433b10d066698ec9e19fe2495fbc5e9d64f18a3a 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -360,7 +360,9 @@ int kernfs_get_tree(struct fs_context *fc) } sb->s_flags |= SB_ACTIVE; - uuid_gen(&sb->s_uuid); + uuid_t uuid; + uuid_gen(&uuid); + super_set_uuid(sb, uuid.b, sizeof(uuid)); down_write(&root->kernfs_supers_rwsem); list_add(&info->node, &info->root->supers); diff --git a/fs/mount.h b/fs/mount.h index 130c07c2f8d258165b7056646dee76dd4c1e7d13..97a62879ef8e01127c6ad2bdb39335d4a21c0652 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -4,6 +4,7 @@ #include #include #include +#include struct mnt_namespace { struct ns_common ns; @@ -77,6 +78,8 @@ struct mount { int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; + + CK_KABI_RESERVE(1) } __randomize_layout; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ diff --git a/fs/namei.c b/fs/namei.c index beffbb02a24e67b24906a6a26cfbfb28198e4cd8..ef117f0adce7319f5985d087adb86e57b0ea9f36 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1033,6 +1033,9 @@ static int sysctl_protected_hardlinks __read_mostly; static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; +int sysctl_hardlink_cross_projid __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_hardlink_cross_projid); + #ifdef CONFIG_SYSCTL static struct ctl_table namei_sysctls[] = { { @@ -1071,6 +1074,16 @@ static struct ctl_table namei_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, + { + .procname = "hardlink_cross_projid", + .data = &sysctl_hardlink_cross_projid, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + + }, { } }; diff --git a/fs/namespace.c b/fs/namespace.c index b4385e2413d5996bacb12f9374046efe18bb27f5..45463bc554280f1ba742b9ac12f4ece4aad7cf92 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -98,6 +98,7 @@ EXPORT_SYMBOL_GPL(fs_kobj); * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); +EXPORT_SYMBOL_GPL(mount_lock); static inline void lock_mount_hash(void) { diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index b3eca08f15b13e306d6cfab7c2f0d2a5000d2e46..4147f18dfe86709ef53b4bd60abf89d2f26dea24 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1794,6 +1794,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd42_write_res *result; __be32 status; + /* + * Currently, async COPY is not reliable. Force all COPY + * requests to be synchronous to avoid client application + * hangs waiting for COPY completion. + */ + nfsd4_copy_set_sync(copy, true); + result = ©->cp_res; nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 9f6bbb4a0844aaeeac7257eb7a9930e9e2134f58..fb6b8bf3c0a8f5a7729e4577573d2b8c1d701b1e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2029,8 +2029,8 @@ static int ocfs2_initialize_super(struct super_block *sb, cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits); - memcpy(&sb->s_uuid, di->id2.i_super.s_uuid, - sizeof(di->id2.i_super.s_uuid)); + super_set_uuid(sb, di->id2.i_super.s_uuid, + sizeof(di->id2.i_super.s_uuid)); osb->osb_dx_mask = (1 << (cbits - bbits)) - 1; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 2c056d737c27c3492938f6b9a18b5e73282e1853..a86de37f18c2af4a83365dd1f2ff471a49c37a73 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -403,7 +403,8 @@ static int ovl_lower_dir(const char *name, struct path *path, (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; - pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", + pr_warn_ratelimited("fs on '%s' does not support file handles, " + "falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; @@ -526,11 +527,15 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { - pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", + pr_err("%s is in-use as upperdir/workdir of another mount, " + "mount with '-o index=off' to override exclusive " + "upperdir protection.\n", name); return -EBUSY; } else { - pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", + pr_warn_ratelimited("%s is in-use as upperdir/workdir of " + "another mount, accessing files from both mounts will " + "result in undefined behavior.\n", name); return 0; } diff --git a/fs/proc/array.c b/fs/proc/array.c index 34a47fb0c57f2570a4f7cb1f45373ddaf2afa883..d7142ab120df7d4b3fc258b82d49c6192a8082f0 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -484,6 +484,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, int exit_code = task->exit_code; struct signal_struct *sig = task->signal; unsigned int seq = 1; + struct task_struct *init_tsk; state = *get_task_state(task); vsize = eip = esp = 0; @@ -586,6 +587,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, start_time = nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime)); + /* + * While uptime in container is fixed to container start time, + * task start time need to be fixed too, otherwise wrong start + * time will show in "ps". + */ + rcu_read_lock(); + if (in_rich_container(current)) { + init_tsk = task_active_pid_ns(current)->child_reaper; + start_time -= nsec_to_clock_t(init_tsk->start_boottime); + } + rcu_read_unlock(); + seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); seq_puts(m, " ("); proc_task_name(m, task, false); diff --git a/fs/proc/base.c b/fs/proc/base.c index 699f085d4de7d7894973d57564f52142e226371c..ac4a9d53a13d5e71f1afa183c4b1cdc78118c217 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -603,6 +603,15 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, return 0; } +static int proc_wait_res(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "%d %px %lu %lu\n", task->wait_res_type, task->wait_folio, + task->wait_moment, jiffies); + + return 0; +} + struct limit_names { const char *name; const char *unit; @@ -3411,6 +3420,7 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3750,6 +3760,7 @@ static const struct pid_entry tid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 817981e57223ef62a362f9c57319c125e1e9b6b8..7a7e443a58c8fd31456290b272d767c76d3b05c5 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -9,19 +9,43 @@ #include #include #include +#include #include "internal.h" static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; + unsigned int nr_R = 0; + struct cpumask cpuset_allowed; + int i; - get_avenrun(avnrun, FIXED_1/200, 0); + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + enum rich_container_source from; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + rich_container_source(&from); + rich_container_get_avenrun(from, init_tsk, avnrun, FIXED_1/200, 0, false); + rich_container_get_cpuset_cpus(&cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) + nr_R += rich_container_get_running(from, init_tsk, i); + put_task_struct(init_tsk); + } else { + get_avenrun(avnrun, FIXED_1/200, 0); + nr_R = nr_running(); + } + rcu_read_unlock(); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + nr_R, nr_threads, idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 45af9a989d4040135a4fe18acc9b5ef055a2affc..4b090aeacfc27badd5c9290f201cbe9d1ab83231 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -20,6 +20,7 @@ #include #include #include "internal.h" +#include void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { @@ -35,43 +36,85 @@ static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; - long cached; - long available; - unsigned long pages[NR_LRU_LISTS]; unsigned long sreclaimable, sunreclaim; int lru; - si_meminfo(&i); - si_swapinfo(&i); - committed = vm_memory_committed(); - - cached = global_node_page_state(NR_FILE_PAGES) - - total_swapcache_pages() - i.bufferram; - if (cached < 0) - cached = 0; + struct mem_cgroup *memcg = NULL; + struct sysinfo_ext ext; + struct rich_container_ext *rich = NULL; + unsigned long commit_limit; + +#ifdef CONFIG_MEMCG + rcu_read_lock(); + if (in_rich_container(current)) { + memcg = rich_container_get_memcg(); + rich = rich_container_get_ext(); + } + rcu_read_unlock(); +#endif - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + if (!memcg) { + si_meminfo(&i); + si_swapinfo(&i); - available = si_mem_available(); + ext.cached = global_node_page_state(NR_FILE_PAGES) - + total_swapcache_pages() - i.bufferram; + if (ext.cached < 0) + ext.cached = 0; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) { + ext.lrupages[lru] = + global_node_page_state(NR_LRU_BASE + lru); + } + ext.available = si_mem_available(); + ext.file_dirty = global_node_page_state(NR_FILE_DIRTY); + ext.writeback = global_node_page_state(NR_WRITEBACK); + ext.anon_mapped = global_node_page_state(NR_ANON_MAPPED); + ext.file_mapped = global_node_page_state(NR_FILE_MAPPED); + ext.slab_reclaimable = + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); + ext.slab_unreclaimable = + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); + ext.kernel_stack_kb = + global_node_page_state(NR_KERNEL_STACK_KB); + ext.writeback_temp = global_node_page_state(NR_WRITEBACK_TEMP); + ext.anon_thps = global_node_page_state(NR_ANON_THPS); + ext.shmem_thps = global_node_page_state(NR_SHMEM_THPS); + ext.shmem_pmd_mapped = + global_node_page_state(NR_SHMEM_PMDMAPPED); + } else { + memcg_meminfo(memcg, &i, &ext); + } + + if (rich && memcg) { +#ifdef CONFIG_MEMCG + commit_limit = rich_container_vm_commit_limit(rich, memcg); +#else + commit_limit = vm_commit_limit(); +#endif + committed = percpu_counter_read_positive(&rich->vm_committed_as); + } else { + commit_limit = vm_commit_limit(); + committed = percpu_counter_read_positive(&vm_committed_as); + } sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); - show_val_kb(m, "MemAvailable: ", available); + show_val_kb(m, "MemAvailable: ", ext.available); show_val_kb(m, "Buffers: ", i.bufferram); - show_val_kb(m, "Cached: ", cached); + show_val_kb(m, "Cached: ", ext.cached); show_val_kb(m, "SwapCached: ", total_swapcache_pages()); - show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + - pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + - pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); - show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); - show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); + show_val_kb(m, "Active: ", ext.lrupages[LRU_ACTIVE_ANON] + + ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive: ", ext.lrupages[LRU_INACTIVE_ANON] + + ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Active(anon): ", ext.lrupages[LRU_ACTIVE_ANON]); + show_val_kb(m, "Inactive(anon): ", ext.lrupages[LRU_INACTIVE_ANON]); + show_val_kb(m, "Active(file): ", ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive(file): ", ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Unevictable: ", ext.lrupages[LRU_UNEVICTABLE]); show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK)); #ifdef CONFIG_HIGHMEM @@ -95,22 +138,19 @@ static int meminfo_proc_show(struct seq_file *m, void *v) (unsigned long)atomic_read(&zswap_stored_pages) << (PAGE_SHIFT - 10)); #endif - show_val_kb(m, "Dirty: ", - global_node_page_state(NR_FILE_DIRTY)); - show_val_kb(m, "Writeback: ", - global_node_page_state(NR_WRITEBACK)); - show_val_kb(m, "AnonPages: ", - global_node_page_state(NR_ANON_MAPPED)); - show_val_kb(m, "Mapped: ", - global_node_page_state(NR_FILE_MAPPED)); + show_val_kb(m, "Dirty: ", ext.file_dirty); + show_val_kb(m, "Writeback: ", ext.writeback); + show_val_kb(m, "AnonPages: ", ext.anon_mapped); + show_val_kb(m, "Mapped: ", ext.file_mapped); show_val_kb(m, "Shmem: ", i.sharedram); show_val_kb(m, "KReclaimable: ", sreclaimable + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE)); - show_val_kb(m, "Slab: ", sreclaimable + sunreclaim); - show_val_kb(m, "SReclaimable: ", sreclaimable); - show_val_kb(m, "SUnreclaim: ", sunreclaim); - seq_printf(m, "KernelStack: %8lu kB\n", - global_node_page_state(NR_KERNEL_STACK_KB)); + show_val_kb(m, "Slab: ", + ext.slab_reclaimable + ext.slab_unreclaimable); + + show_val_kb(m, "SReclaimable: ", ext.slab_reclaimable); + show_val_kb(m, "SUnreclaim: ", ext.slab_unreclaimable); + seq_printf(m, "KernelStack: %8lu kB\n", ext.kernel_stack_kb); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", global_node_page_state(NR_KERNEL_SCS_KB)); @@ -123,9 +163,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "NFS_Unstable: ", 0); show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); - show_val_kb(m, "WritebackTmp: ", - global_node_page_state(NR_WRITEBACK_TEMP)); - show_val_kb(m, "CommitLimit: ", vm_commit_limit()); + show_val_kb(m, "WritebackTmp: ", ext.writeback_temp); + show_val_kb(m, "CommitLimit: ", commit_limit); show_val_kb(m, "Committed_AS: ", committed); seq_printf(m, "VmallocTotal: %8lu kB\n", (unsigned long)VMALLOC_TOTAL >> 10); @@ -141,12 +180,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - show_val_kb(m, "AnonHugePages: ", - global_node_page_state(NR_ANON_THPS)); - show_val_kb(m, "ShmemHugePages: ", - global_node_page_state(NR_SHMEM_THPS)); - show_val_kb(m, "ShmemPmdMapped: ", - global_node_page_state(NR_SHMEM_PMDMAPPED)); + show_val_kb(m, "AnonHugePages: ", ext.anon_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemHugePages: ", ext.shmem_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemPmdMapped: ", ext.shmem_pmd_mapped * HPAGE_PMD_NR); show_val_kb(m, "FileHugePages: ", global_node_page_state(NR_FILE_THPS)); show_val_kb(m, "FilePmdMapped: ", @@ -168,6 +204,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) arch_report_meminfo(m); +#ifdef CONFIG_MEMCG + if (memcg) + css_put(&memcg->css); +#endif + return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index da60956b29156451740530f9e37ba76b39849e56..9c1d734f006991ac2e8bc81f28797ef1ba6b65a3 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 @@ -38,7 +40,7 @@ u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) return idle; } -static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) +u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) { u64 iowait, iowait_usecs = -1ULL; @@ -81,13 +83,19 @@ static void show_all_irqs(struct seq_file *p) static int show_stat(struct seq_file *p, void *v) { - int i, j; + int i, j, seq = 0; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec64 boottime; + struct cpumask cpuset_allowed; + unsigned int nr_runnable = 0; + struct task_struct *init_tsk = NULL; + struct cpuacct_usage_result res; + enum rich_container_source from; + bool rich_container; user = nice = system = idle = iowait = irq = softirq = steal = 0; @@ -96,24 +104,55 @@ static int show_stat(struct seq_file *p, void *v) /* shift boot timestamp according to the timens offset */ timens_sub_boottime(&boottime); + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + /* fix btime in containers */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; + + rich_container_get_cpuset_cpus(&cpuset_allowed); + rich_container_source(&from); + for_each_cpu(i, &cpuset_allowed) { + rich_container_get_usage(from, init_tsk, i, &res); + user += res.user; + nice += res.nice; + system += res.system; + idle += res.idle; + iowait += res.iowait; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest_nice += res.guest_nice; + } + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + user += cpustat[CPUTIME_USER]; + nice += cpustat[CPUTIME_NICE]; + system += cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(&kcpustat, i); + iowait += get_iowait_time(&kcpustat, i); + irq += cpustat[CPUTIME_IRQ]; + softirq += cpustat[CPUTIME_SOFTIRQ]; + steal += cpustat[CPUTIME_STEAL]; + guest += cpustat[CPUTIME_GUEST]; + guest_nice += cpustat[CPUTIME_GUEST_NICE]; + } + } + rcu_read_unlock(); + for_each_possible_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - user += cpustat[CPUTIME_USER]; - nice += cpustat[CPUTIME_NICE]; - system += cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(&kcpustat, i); - iowait += get_iowait_time(&kcpustat, i); - irq += cpustat[CPUTIME_IRQ]; - softirq += cpustat[CPUTIME_SOFTIRQ]; - steal += cpustat[CPUTIME_STEAL]; - guest += cpustat[CPUTIME_GUEST]; - guest_nice += cpustat[CPUTIME_GUEST_NICE]; - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -136,40 +175,85 @@ static int show_stat(struct seq_file *p, void *v) seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); - for_each_online_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = cpustat[CPUTIME_USER]; - nice = cpustat[CPUTIME_NICE]; - system = cpustat[CPUTIME_SYSTEM]; - idle = get_idle_time(&kcpustat, i); - iowait = get_iowait_time(&kcpustat, i); - irq = cpustat[CPUTIME_IRQ]; - softirq = cpustat[CPUTIME_SOFTIRQ]; - steal = cpustat[CPUTIME_STEAL]; - guest = cpustat[CPUTIME_GUEST]; - guest_nice = cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); - seq_putc(p, '\n'); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) { + rich_container_get_usage(from, init_tsk, i, &res); + + seq_printf(p, "cpu%d", seq++); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.user)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.nice)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.system)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.idle)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.iowait)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.irq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.softirq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.steal)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest_nice)); + seq_putc(p, '\n'); + } + } else { + for_each_online_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + user = cpustat[CPUTIME_USER]; + nice = cpustat[CPUTIME_NICE]; + system = cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(&kcpustat, i); + iowait = get_iowait_time(&kcpustat, i); + irq = cpustat[CPUTIME_IRQ]; + softirq = cpustat[CPUTIME_SOFTIRQ]; + steal = cpustat[CPUTIME_STEAL]; + guest = cpustat[CPUTIME_GUEST]; + guest_nice = cpustat[CPUTIME_GUEST_NICE]; + + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + } } + rcu_read_unlock(); + seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); show_all_irqs(p); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) + nr_runnable += rich_container_get_running(from, init_tsk, i); + } else + nr_runnable = nr_running(); + rcu_read_unlock(); + + if (rich_container) + put_task_struct(init_tsk); + seq_printf(p, "\nctxt %llu\n" "btime %llu\n" @@ -179,7 +263,7 @@ static int show_stat(struct seq_file *p, void *v) nr_context_switches(), (unsigned long long)boottime.tv_sec, total_forks, - nr_running(), + nr_runnable, nr_iowait()); seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b8640f36ebf8ab19d48fe9acc1e9bd8a161cbf28..5a081bad7782a7ed9d882259f93ec3a0e85eebe7 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -865,7 +865,8 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); seq_printf(m, "THPeligible: %8u\n", - hugepage_vma_check(vma, vma->vm_flags, true, false, true)); + !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false, + true, THP_ORDERS_ALL)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index b5343d209381aae892d2423602607f1ce05e479e..591909b4d1113aa01a9f9bf18dceae833d749bd4 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "internal.h" static int uptime_proc_show(struct seq_file *m, void *v) @@ -17,16 +19,39 @@ static int uptime_proc_show(struct seq_file *m, void *v) u32 rem; int i; + ktime_get_boottime_ts64(&uptime); + timens_add_boottime(&uptime); + idle_nsec = 0; - for_each_possible_cpu(i) { - struct kernel_cpustat kcs; - kcpustat_cpu_fetch(&kcs, i); - idle_nsec += get_idle_time(&kcs, i); - } + rcu_read_lock(); + if (in_rich_container(current)) { + enum rich_container_source from; + struct task_struct *init_tsk; + struct cpuacct_usage_result res; - ktime_get_boottime_ts64(&uptime); - timens_add_boottime(&uptime); + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + rich_container_source(&from); + for_each_possible_cpu(i) { + rich_container_get_usage(from, init_tsk, i, &res); + idle_nsec += res.idle; + } + uptime = timespec64_sub(uptime, + ns_to_timespec64(init_tsk->start_time)); + put_task_struct(init_tsk); + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcs; + + kcpustat_cpu_fetch(&kcs, i); + idle_nsec += get_idle_time(&kcs, i); + } + } + rcu_read_unlock(); idle.tv_sec = div_u64_rem(idle_nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 67562c78e57d53f0c032e302e0b30e1575ad87c6..f4bb4accdd7750fba0f601b76c41d17c90ba7504 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -83,6 +83,7 @@ #include "../internal.h" /* ugh */ #include +#include /* * There are five quota SMP locks: @@ -1293,6 +1294,13 @@ static void flush_warnings(struct dquot_warn *warn) static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + if (rich_container) + return 0; return capable(CAP_SYS_RESOURCE) && (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || diff --git a/fs/remap_range.c b/fs/remap_range.c index 87ae4f0dc3aa01c6099ef2fa7a66b5d84bdb9703..2fdc13c901ef30cc0186ffa7cf875c307075846d 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -315,10 +315,18 @@ __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, if (!same_inode) inode_dio_wait(inode_out); - ret = filemap_write_and_wait_range(inode_in->i_mapping, - pos_in, pos_in + *len - 1); - if (ret) - return ret; + if (remap_flags & REMAP_FILE_FAST_REFLINK) { + ret = fast_reflink_apply(inode_in->i_mapping, + pos_in >> PAGE_SHIFT, + (pos_in + *len - 1) >> PAGE_SHIFT); + if (ret) + return ret; + } else { + ret = filemap_write_and_wait_range(inode_in->i_mapping, + pos_in, pos_in + *len - 1); + if (ret) + return ret; + } ret = filemap_write_and_wait_range(inode_out->i_mapping, pos_out, pos_out + *len - 1); diff --git a/fs/resctrl/Kconfig b/fs/resctrl/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..36a1ddbe6c216faf4ca69eeff8273b2400481491 --- /dev/null +++ b/fs/resctrl/Kconfig @@ -0,0 +1,23 @@ +config RESCTRL_FS + bool "CPU Resource Control Filesystem (resctrl)" + depends on ARCH_HAS_CPU_RESCTRL + select KERNFS + select PROC_CPU_RESCTRL if PROC_FS + help + Resctrl is a filesystem interface + to control allocation and + monitoring of system resources + used by the CPUs. + +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + +config RESCTRL_RMID_DEPENDS_ON_CLOSID + bool + help + Enable by the architecture when the RMID values depend on the CLOSID. + This causes the closid allocator to search for CLOSID with clean + RMID. diff --git a/fs/resctrl/Makefile b/fs/resctrl/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..10fcfb0fdb102a4329e2cbbfdc119e3b288d7db2 --- /dev/null +++ b/fs/resctrl/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL_FS) += rdtgroup.o ctrlmondata.o monitor.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += psuedo_lock.o diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c new file mode 100644 index 0000000000000000000000000000000000000000..f5cdabe2ee9e5740afc47a7d4dfc0ed118248648 --- /dev/null +++ b/fs/resctrl/ctrlmondata.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Cache Allocation code. + * + * Copyright (C) 2016 Intel Corporation + * + * Authors: + * Fenghua Yu + * Tony Luck + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "internal.h" + +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int(ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, struct rdt_domain *d); + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and max bandwidth values specified by the + * hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + int ret; + u32 bw; + + /* + * Only linear delay values is supported for current Intel SKUs. + */ + if (!r->membw.delay_linear && r->membw.arch_needs_linear) { + rdt_last_cmd_puts("No support for non-linear MB domains\n"); + return false; + } + + ret = kstrtou32(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Invalid MB value %s\n", buf); + return false; + } + + /* Nothing else to do if software controller is enabled. */ + if (is_mba_sc(r)) { + *data = bw; + return true; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct resctrl_staged_config *cfg; + u32 closid = data->rdtgrp->closid; + struct rdt_resource *r = s->res; + u32 bw_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate(data->buf, &bw_val, r)) + return -EINVAL; + + if (is_mba_sc(r)) { + d->mbps_val[closid] = bw_val; + return 0; + } + + cfg->new_ctrl = bw_val; + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Check whether a cache bit mask is valid. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. + * AMD allows non-contiguous bitmasks. + */ +static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long first_bit, zero_bit, val; + unsigned int cbm_len = r->cache.cbm_len; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && + (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", + val); + return false; + } + + if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("Need at least %d bits in the mask\n", + r->cache.min_cbm_bits); + return false; + } + + *data = val; + return true; +} + +/* + * Read one cache bit mask (hex). Check that it is valid for the current + * resource type. + */ +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct rdtgroup *rdtgrp = data->rdtgrp; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 cbm_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + /* + * Cannot set up more than one pseudo-locked region in a cache + * hierarchy. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + rdtgroup_pseudo_locked_in_hierarchy(d)) { + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); + return -EINVAL; + } + + if (!cbm_validate(data->buf, &cbm_val, r)) + return -EINVAL; + + if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_SHAREABLE) && + rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); + return -EINVAL; + } + + /* + * The CBM may not overlap with the CBM of another closid if + * either is exclusive. + */ + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { + rdt_last_cmd_puts("Overlaps with exclusive group\n"); + return -EINVAL; + } + + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { + if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + rdt_last_cmd_puts("Overlaps with other group\n"); + return -EINVAL; + } + } + + cfg->new_ctrl = cbm_val; + cfg->have_new_ctrl = true; + + return 0; +} + +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + +/* + * For each domain in this resource we expect to find a series of: + * id=mask + * separated by ";". The "id" is in decimal, and must match one of + * the "id"s for this resource. + */ +static int parse_line(char *line, struct resctrl_schema *s, + struct rdtgroup *rdtgrp) +{ + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + struct rdt_parse_data data; + char *dom = NULL, *id; + struct rdt_domain *d; + unsigned long dom_id; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + +next: + if (!line || line[0] == '\0') + return 0; + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + dom = strim(dom); + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + data.buf = dom; + data.rdtgrp = rdtgrp; + if (parse_ctrlval(&data, s, d)) + return -EINVAL; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + cfg = &d->staged_config[t]; + /* + * In pseudo-locking setup mode and just + * parsed a valid CBM that should be + * pseudo-locked. Only one locked region per + * resource group and domain so just do + * the required initialization for single + * region and return. + */ + rdtgrp->plr->s = s; + rdtgrp->plr->d = d; + rdtgrp->plr->cbm = cfg->new_ctrl; + d->plr = rdtgrp->plr; + return 0; + } + goto next; + } + } + return -EINVAL; +} + +static int rdtgroup_parse_resource(char *resname, char *tok, + struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + + list_for_each_entry(s, &resctrl_schema_all, list) { + if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) + return parse_line(tok, s, rdtgrp); + } + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); + return -EINVAL; +} + +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct resctrl_schema *s; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + char *tok, *resname; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + /* + * No changes to pseudo-locked region allowed. It has to be removed + * and re-created instead. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = -EINVAL; + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); + goto out; + } + + rdt_staged_configs_clear(); + + while ((tok = strsep(&buf, "\n")) != NULL) { + resname = strim(strsep(&tok, ":")); + if (!tok) { + rdt_last_cmd_puts("Missing ':'\n"); + ret = -EINVAL; + goto out; + } + if (tok[0] == '\0') { + rdt_last_cmd_printf("Missing '%s' value\n", resname); + ret = -EINVAL; + goto out; + } + ret = rdtgroup_parse_resource(resname, tok, rdtgrp); + if (ret) + goto out; + } + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + + /* + * Writes to mba_sc resources update the software controller, + * not the control MSR. + */ + if (is_mba_sc(r)) + continue; + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret) + goto out; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * If pseudo-locking fails we keep the resource group in + * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service + * active and updated for just the domain the pseudo-locked + * region was requested for. + */ + ret = rdtgroup_pseudo_lock_create(rdtgrp); + } + +out: + rdt_staged_configs_clear(); + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) +{ + struct rdt_resource *r = schema->res; + struct rdt_domain *dom; + bool sep = false; + u32 ctrl_val; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + if (is_mba_sc(r)) + ctrl_val = dom->mbps_val[closid]; + else + ctrl_val = resctrl_arch_get_config(r, dom, closid, + schema->conf_type); + + seq_printf(s, r->format_str, dom->id, max_data_width, + ctrl_val); + sep = true; + } + seq_puts(s, "\n"); +} + +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + struct rdtgroup *rdtgrp; + int ret = 0; + u32 closid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + list_for_each_entry(schema, &resctrl_schema_all, list) { + seq_printf(s, "%s:uninitialized\n", schema->name); + } + } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%s:%d=%x\n", + rdtgrp->plr->s->res->name, + rdtgrp->plr->d->id, + rdtgrp->plr->cbm); + } + } else { + closid = rdtgrp->closid; + list_for_each_entry(schema, &resctrl_schema_all, list) { + if (closid < schema->num_closid) + show_doms(s, schema, closid); + } + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + return ret; +} + +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first) +{ + int cpu; + + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + /* + * Setup the parameters to pass to mon_event_count() to read the data. + */ + rr->rgrp = rdtgrp; + rr->evtid = evtid; + rr->r = r; + rr->d = d; + rr->val = 0; + rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } + + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); +} + +int rdtgroup_mondata_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + u32 resid, evtid, domid; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + union mon_data_bits md; + struct rdt_domain *d; + struct rmid_read rr; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } + + md.priv = of->kn->priv; + resid = md.u.rid; + domid = md.u.domid; + evtid = md.u.evtid; + + r = resctrl_arch_get_resource(resid); + d = resctrl_arch_find_domain(r, domid); + if (IS_ERR_OR_NULL(d)) { + ret = -ENOENT; + goto out; + } + + mon_event_read(&rr, r, d, rdtgrp, evtid, false); + + if (rr.err == -EIO) + seq_puts(m, "Error\n"); + else if (rr.err == -EINVAL) + seq_puts(m, "Unavailable\n"); + else + seq_printf(m, "%llu\n", rr.val); + +out: + rdtgroup_kn_unlock(of->kn); + return ret; +} diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h new file mode 100644 index 0000000000000000000000000000000000000000..b8afeee36fff0fb2b2ad7991fd076bca7973ff9c --- /dev/null +++ b/fs/resctrl/internal.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_RESCTRL_INTERNAL_H +#define _FS_RESCTRL_INTERNAL_H + +#include +#include +#include +#include +#include +#include + +#include + +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. + * + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. + */ +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) +{ + unsigned int cpu, hk_cpu; + + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + /* Only continue if tick_nohz_full_mask has been initialized. */ + if (!tick_nohz_full_enabled()) + return cpu; + + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; + bool enable_debug; +}; + +static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) +{ + struct kernfs_fs_context *kfc = fc->fs_private; + + return container_of(kfc, struct rdt_fs_context, kfc); +} + +/** + * struct mon_evt - Entry in the event list of a resource + * @evtid: event id + * @name: name of the event + * @configurable: true if the event is configurable + * @list: entry in &rdt_resource->evt_list + */ +struct mon_evt { + enum resctrl_event_id evtid; + char *name; + bool configurable; + struct list_head list; +}; + +/** + * union mon_data_bits - Monitoring details for each event file + * @priv: Used to store monitoring event data in @u + * as kernfs private data + * @rid: Resource id associated with the event file + * @evtid: Event id associated with the event file + * @domid: The domain to which the event file belongs + * @u: Name of the bit fields struct + */ +union mon_data_bits { + void *priv; + struct { + unsigned int rid : 10; + enum resctrl_event_id evtid : 8; + unsigned int domid : 14; + } u; +}; + +struct rmid_read { + struct rdtgroup *rgrp; + struct rdt_resource *r; + struct rdt_domain *d; + enum resctrl_event_id evtid; + bool first; + int err; + u64 val; + void *arch_mon_ctx; +}; + +extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP, + RDT_NUM_GROUP, +}; + +/** + * enum rdtgrp_mode - Mode of a RDT resource group + * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations + * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed + * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking + * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations + * allowed AND the allocations are Cache Pseudo-Locked + * @RDT_NUM_MODES: Total number of modes + * + * The mode of a resource group enables control over the allowed overlap + * between allocations associated with different resource groups (classes + * of service). User is able to modify the mode of a resource group by + * writing to the "mode" resctrl file associated with the resource group. + * + * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by + * writing the appropriate text to the "mode" file. A resource group enters + * "pseudo-locked" mode after the schemata is written while the resource + * group is in "pseudo-locksetup" mode. + */ +enum rdtgrp_mode { + RDT_MODE_SHAREABLE = 0, + RDT_MODE_EXCLUSIVE, + RDT_MODE_PSEUDO_LOCKSETUP, + RDT_MODE_PSEUDO_LOCKED, + + /* Must be last */ + RDT_NUM_MODES, +}; + +/** + * struct mongroup - store mon group's data in resctrl fs. + * @mon_data_kn: kernfs node for the mon_data directory + * @parent: parent rdtgrp + * @crdtgrp_list: child rdtgroup node list + * @rmid: rmid for this rdtgroup + */ +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; +}; + +/** + * struct rdtgroup - store rdtgroup's data in resctrl file system. + * @kn: kernfs node + * @rdtgroup_list: linked list for all rdtgroups + * @closid: closid for this rdtgroup + * @cpu_mask: CPUs assigned to this rdtgroup + * @flags: status bits + * @waitcount: how many cpus expect to find this + * group when they acquire rdtgroup_mutex + * @type: indicates type of this rdtgroup - either + * monitor only or ctrl_mon group + * @mon: mongroup related data + * @mode: mode of resource group + * @plr: pseudo-locked region + */ +struct rdtgroup { + struct kernfs_node *kn; + struct list_head rdtgroup_list; + u32 closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + enum rdtgrp_mode mode; + struct pseudo_lock_region *plr; +}; + +/* List of all resource groups */ +extern struct list_head rdt_all_groups; + +extern int max_name_width, max_data_width; + +/** + * struct rftype - describe each file in the resctrl file system + * @name: File name + * @mode: Access mode + * @kf_ops: File operations + * @flags: File specific RFTYPE_FLAGS_* flags + * @fflags: File specific RFTYPE_* flags + * @seq_show: Show content of the file + * @write: Write to the file + */ +struct rftype { + char *name; + umode_t mode; + const struct kernfs_ops *kf_ops; + unsigned long flags; + unsigned long fflags; + + int (*seq_show)(struct kernfs_open_file *of, + struct seq_file *sf, void *v); + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +}; + +/** + * struct mbm_state - status for each MBM counter in each domain + * @prev_bw_bytes: Previous bytes value read for bandwidth calculation + * @prev_bw: The most recent bandwidth in MBps + */ +struct mbm_state { + u64 prev_bw_bytes; + u32 prev_bw; +}; + +static inline bool is_mba_sc(struct rdt_resource *r) +{ + if (!r) + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + /* + * The software controller support is only applicable to MBA resource. + * Make sure to check for resource type. + */ + if (r->rid != RDT_RESOURCE_MBA) + return false; + + return r->membw.mba_sc; +} + +extern struct mutex rdtgroup_mutex; +extern struct rdtgroup rdtgroup_default; +extern struct dentry *debugfs_resctrl; + +void rdt_last_cmd_clear(void); +void rdt_last_cmd_puts(const char *s); +__printf(1, 2) +void rdt_last_cmd_printf(const char *fmt, ...); + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); +void rdtgroup_kn_unlock(struct kernfs_node *kn); +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask); +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v); +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive); +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm); +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); +int rdtgroup_tasks_assigned(struct rdtgroup *r); +int closids_supported(void); +void closid_free(int closid); +int alloc_rmid(u32 closid); +void free_rmid(u32 closid, u32 rmid); +void resctrl_mon_resource_exit(void); +void mon_event_count(void *info); +int rdtgroup_mondata_show(struct seq_file *m, void *arg); +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first); +int resctrl_mon_resource_init(void); +void mbm_setup_overflow_handler(struct rdt_domain *dom, + unsigned long delay_ms, + int exclude_cpu); +void mbm_handle_overflow(struct work_struct *work); +bool is_mba_sc(struct rdt_resource *r); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); +void cqm_handle_limbo(struct work_struct *work); +bool has_busy_rmid(struct rdt_domain *d); +void __check_limbo(struct rdt_domain *d, bool force_free); +void mbm_config_rftype_init(const char *config); +void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); + +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + +#endif /* _FS_RESCTRL_INTERNAL_H */ diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c new file mode 100644 index 0000000000000000000000000000000000000000..ea4183cc48ade854467e6f794370fa849bf967f4 --- /dev/null +++ b/fs/resctrl/monitor.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Monitoring code + * + * Copyright (C) 2017 Intel Corporation + * + * Author: + * Vikas Shivappa + * + * This replaces the cqm.c based on perf but we reuse a lot of + * code and datastructures originally from Peter Zijlstra and Matt Fleming. + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#include +#include +#include +#include +#include "internal.h" + +/* + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ +struct rmid_entry { + u32 closid; + u32 rmid; + int busy; + struct list_head list; +}; + +/* + * @rmid_free_lru - A least recently used list of free RMIDs + * These RMIDs are guaranteed to have an occupancy less than the + * threshold occupancy + */ +static LIST_HEAD(rmid_free_lru); + +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + +/* + * @rmid_limbo_count - count of currently unused but (potentially) + * dirty RMIDs. + * This counts RMIDs that no one is currently using but that + * may have a occupancy value > resctrl_rmid_realloc_threshold. User can + * change the threshold occupancy value. + */ +static unsigned int rmid_limbo_count; + +/* + * @rmid_entry - The entry in the limbo and free lists. + */ +static struct rmid_entry *rmid_ptrs; + +/* + * This is the threshold cache occupancy in bytes at which we will consider an + * RMID available for re-allocation. + */ +unsigned int resctrl_rmid_realloc_threshold; + +/* + * This is the maximum value for the reallocation threshold, in bytes. + */ +unsigned int resctrl_rmid_realloc_limit; + +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) +{ + struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); + + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); + + return entry; +} + +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + +/* + * Check the RMIDs that are marked as busy for this domain. If the + * reported LLC occupancy is below the threshold clear the busy bit and + * decrement the count. If the busy count gets to zero on an RMID, we + * free the RMID + */ +void __check_limbo(struct rdt_domain *d, bool force_free) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + struct rmid_entry *entry; + u32 idx, cur_idx = 1; + void *arch_mon_ctx; + bool rmid_dirty; + u64 val = 0; + + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + + /* + * Skip RMID 0 and start from RMID 1 and check all the RMIDs that + * are marked as busy for occupancy < threshold. If the occupancy + * is less than the threshold decrement the busy counter of the + * RMID and move it to the free list when the counter reaches 0. + */ + for (;;) { + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) + break; + + entry = __rmid_entry(idx); + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { + rmid_dirty = true; + } else { + rmid_dirty = (val >= resctrl_rmid_realloc_threshold); + } + + if (force_free || !rmid_dirty) { + clear_bit(idx, d->rmid_busy_llc); + if (!--entry->busy) + limbo_release_entry(entry); + } + cur_idx = idx + 1; + } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); +} + +bool has_busy_rmid(struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; +} + +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + +/* + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. + */ +int alloc_rmid(u32 closid) +{ + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); + + list_del(&entry->list); + return entry->rmid; +} + +static void add_rmid_to_limbo(struct rmid_entry *entry) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + u32 idx; + + lockdep_assert_held(&rdtgroup_mutex); + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); + + entry->busy = 0; + list_for_each_entry(d, &r->domains, list) { + /* + * For the first limbo RMID in the domain, + * setup up the limbo worker. + */ + if (!has_busy_rmid(d)) + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); + set_bit(idx, d->rmid_busy_llc); + entry->busy++; + } + + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; +} + +void free_rmid(u32 closid, u32 rmid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (!resctrl_arch_mon_capable() || + idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); + + if (resctrl_arch_is_llc_occupancy_enabled()) + add_rmid_to_limbo(entry); + else + list_add_tail(&entry->list, &rmid_free_lru); +} + +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return &d->mbm_total[idx]; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return &d->mbm_local[idx]; + default: + return NULL; + } +} + +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + struct mbm_state *m; + u64 tval = 0; + + if (rr->first) { + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); + if (m) + memset(m, 0, sizeof(struct mbm_state)); + return 0; + } + + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval, rr->arch_mon_ctx); + if (rr->err) + return rr->err; + + rr->val += tval; + + return 0; +} + +/* + * mbm_bw_count() - Update bw count from values previously read by + * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. + * @rmid: The rmid used to identify the cached mbm_state. + * @rr: The struct rmid_read populated by __mon_event_count(). + * + * Supporting function to calculate the memory bandwidth + * and delta bandwidth in MBps. The chunks value previously read by + * __mon_event_count() is compared with the chunks value from the previous + * invocation. This must be called once per second to maintain values in MBps. + */ +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; + u64 cur_bw, bytes, cur_bytes; + + cur_bytes = rr->val; + bytes = cur_bytes - m->prev_bw_bytes; + m->prev_bw_bytes = cur_bytes; + + cur_bw = bytes / SZ_1M; + + m->prev_bw = cur_bw; +} + +/* + * This is scheduled by mon_event_read() to read the CQM/MBM counters + * on a domain. + */ +void mon_event_count(void *info) +{ + struct rdtgroup *rdtgrp, *entry; + struct rmid_read *rr = info; + struct list_head *head; + int ret; + + rdtgrp = rr->rgrp; + + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); + + /* + * For Ctrl groups read data from child monitor groups and + * add them together. Count events which are read successfully. + * Discard the rmid_read's reporting errors. + */ + head = &rdtgrp->mon.crdtgrp_list; + + if (rdtgrp->type == RDTCTRL_GROUP) { + list_for_each_entry(entry, head, mon.crdtgrp_list) { + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) + ret = 0; + } + } + + /* + * __mon_event_count() calls for newly created monitor groups may + * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. + * Discard error if any of the monitor event reads succeeded. + */ + if (ret == 0) + rr->err = 0; +} + +/* + * Feedback loop for MBA software controller (mba_sc) + * + * mba_sc is a feedback loop where we periodically read MBM counters and + * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so + * that: + * + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) + * + * This uses the MBM counters to measure the bandwidth and MBA throttle + * MSRs to control the bandwidth for a particular rdtgrp. It builds on the + * fact that resctrl rdtgroups have both monitoring and control. + * + * The frequency of the checks is 1s and we just tag along the MBM overflow + * timer. Having 1s interval makes the calculation of bandwidth simpler. + * + * Although MBA's goal is to restrict the bandwidth to a maximum, there may + * be a need to increase the bandwidth to avoid unnecessarily restricting + * the L2 <-> L3 traffic. + * + * Since MBA controls the L2 external bandwidth where as MBM measures the + * L3 external bandwidth the following sequence could lead to such a + * situation. + * + * Consider an rdtgroup which had high L3 <-> memory traffic in initial + * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but + * after some time rdtgroup has mostly L2 <-> L3 traffic. + * + * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its + * throttle MSRs already have low percentage values. To avoid + * unnecessarily restricting such rdtgroups, we also increase the bandwidth. + */ +static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) +{ + u32 closid, rmid, cur_msr_val, new_msr_val; + struct mbm_state *pmbm_data, *cmbm_data; + struct rdt_resource *r_mba; + struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; + struct list_head *head; + struct rdtgroup *entry; + + if (!resctrl_arch_is_mbm_local_enabled()) + return; + + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + closid = rgrp->closid; + rmid = rgrp->mon.rmid; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; + + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); + if (!dom_mba) { + pr_warn_once("Failure to get domain for MBA update\n"); + return; + } + + cur_bw = pmbm_data->prev_bw; + user_bw = dom_mba->mbps_val[closid]; + + /* MBA resource doesn't support CDP */ + cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); + + /* + * For Ctrl groups read data from child monitor groups. + */ + head = &rgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cur_bw += cmbm_data->prev_bw; + } + + /* + * Scale up/down the bandwidth linearly for the ctrl group. The + * bandwidth step is the bandwidth granularity specified by the + * hardware. + * Always increase throttling if current bandwidth is above the + * target set by user. + * But avoid thrashing up and down on every poll by checking + * whether a decrease in throttling is likely to push the group + * back over target. E.g. if currently throttling to 30% of bandwidth + * on a system with 10% granularity steps, check whether moving to + * 40% would go past the limit by multiplying current bandwidth by + * "(30 + 10) / 30". + */ + if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { + new_msr_val = cur_msr_val - r_mba->membw.bw_gran; + } else if (cur_msr_val < MAX_MBA_BW && + (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { + new_msr_val = cur_msr_val + r_mba->membw.bw_gran; + } else { + return; + } + + resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); +} + +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) +{ + struct rmid_read rr; + + rr.first = false; + rr.r = r; + rr.d = d; + + /* + * This is protected from concurrent reads from user + * as both the user and we hold the global mutex. + */ + if (resctrl_arch_is_mbm_total_enabled()) { + rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } + if (resctrl_arch_is_mbm_local_enabled()) { + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ + if (is_mba_sc(NULL)) + mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } +} + +/* + * Handler to scan the limbo list and move the RMIDs + * to free list whose occupancy < threshold_occupancy. + */ +void cqm_handle_limbo(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + d = container_of(work, struct rdt_domain, cqm_limbo.work); + + __check_limbo(d, false); + + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->cqm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); +} + +void mbm_handle_overflow(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + struct rdt_resource *r; + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + goto out_unlock; + + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + d = container_of(work, struct rdt_domain, mbm_over.work); + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); + + if (is_mba_sc(NULL)) + update_mba_bw(prgrp, d); + } + + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + return; + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->mbm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); +} + +static int dom_data_init(struct rdt_resource *r) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rmid_entry *entry = NULL; + int err = 0, i; + u32 idx; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } + + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } + + for (i = 0; i < idx_limit; i++) { + entry = &rmid_ptrs[i]; + INIT_LIST_HEAD(&entry->list); + + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); + list_add_tail(&entry->list, &rmid_free_lru); + } + + /* + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). + */ + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); + list_del(&entry->list); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +static void dom_data_exit(struct rdt_resource *r) +{ + if (!r->mon_capable) + return; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + +static struct mon_evt llc_occupancy_event = { + .name = "llc_occupancy", + .evtid = QOS_L3_OCCUP_EVENT_ID, +}; + +static struct mon_evt mbm_total_event = { + .name = "mbm_total_bytes", + .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, +}; + +static struct mon_evt mbm_local_event = { + .name = "mbm_local_bytes", + .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, +}; + +static struct mon_evt mbm_bps_event = { + .name = "mbm_local_bytes", + .evtid = QOS_MC_MBM_BPS_EVENT_ID, +}; + +/* + * Initialize the event list for the resource. + * + * Note that MBM events are also part of RDT_RESOURCE_L3 resource + * because as per the SDM the total and local memory bandwidth + * are enumerated as part of L3 monitoring. + */ +static void l3_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_llc_occupancy_enabled()) + list_add_tail(&llc_occupancy_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_total_enabled()) + list_add_tail(&mbm_total_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_local_enabled()) + list_add_tail(&mbm_local_event.list, &r->evt_list); +} + +static void mc_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_mbm_bps_enabled()) + list_add_tail(&mbm_bps_event.list, &r->evt_list); +} + +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + mc_mon_evt_init(r); + + return 0; +} + +void resctrl_mon_resource_exit(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); +} diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c new file mode 100644 index 0000000000000000000000000000000000000000..077c2abb6edd90cad1e1e237dd4c1861cf1dc025 --- /dev/null +++ b/fs/resctrl/psuedo_lock.c @@ -0,0 +1,1122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Resource Director Technology (RDT) + * + * Pseudo-locking support built on top of Cache Allocation Technology (CAT) + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Reinette Chatre + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "internal.h" + +/* + * Major number assigned to and shared by all devices exposing + * pseudo-locked regions. + */ +static unsigned int pseudo_lock_major; +static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); + +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) +{ + const struct rdtgroup *rdtgrp; + + rdtgrp = dev_get_drvdata(dev); + if (mode) + *mode = 0600; + return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); +} + +static const struct class pseudo_lock_class = { + .name = "pseudo_lock", + .devnode = pseudo_lock_devnode, +}; + +/** + * pseudo_lock_minor_get - Obtain available minor number + * @minor: Pointer to where new minor number will be stored + * + * A bitmask is used to track available minor numbers. Here the next free + * minor number is marked as unavailable and returned. + * + * Return: 0 on success, <0 on failure. + */ +static int pseudo_lock_minor_get(unsigned int *minor) +{ + unsigned long first_bit; + + first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); + + if (first_bit == MINORBITS) + return -ENOSPC; + + __clear_bit(first_bit, &pseudo_lock_minor_avail); + *minor = first_bit; + + return 0; +} + +/** + * pseudo_lock_minor_release - Return minor number to available + * @minor: The minor number made available + */ +static void pseudo_lock_minor_release(unsigned int minor) +{ + __set_bit(minor, &pseudo_lock_minor_avail); +} + +/** + * region_find_by_minor - Locate a pseudo-lock region by inode minor number + * @minor: The minor number of the device representing pseudo-locked region + * + * When the character device is accessed we need to determine which + * pseudo-locked region it belongs to. This is done by matching the minor + * number of the device to the pseudo-locked region it belongs. + * + * Minor numbers are assigned at the time a pseudo-locked region is associated + * with a cache instance. + * + * Return: On success return pointer to resource group owning the pseudo-locked + * region, NULL on failure. + */ +static struct rdtgroup *region_find_by_minor(unsigned int minor) +{ + struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->plr && rdtgrp->plr->minor == minor) { + rdtgrp_match = rdtgrp; + break; + } + } + return rdtgrp_match; +} + +/** + * struct pseudo_lock_pm_req - A power management QoS request list entry + * @list: Entry within the @pm_reqs list for a pseudo-locked region + * @req: PM QoS request + */ +struct pseudo_lock_pm_req { + struct list_head list; + struct dev_pm_qos_request req; +}; + +static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req, *next; + + list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { + dev_pm_qos_remove_request(&pm_req->req); + list_del(&pm_req->list); + kfree(pm_req); + } +} + +/** + * pseudo_lock_cstates_constrain - Restrict cores from entering C6 + * @plr: Pseudo-locked region + * + * To prevent the cache from being affected by power management entering + * C6 has to be avoided. This is accomplished by requesting a latency + * requirement lower than lowest C6 exit latency of all supported + * platforms as found in the cpuidle state tables in the intel_idle driver. + * At this time it is possible to do so with a single latency requirement + * for all supported platforms. + * + * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, + * the ACPI latencies need to be considered while keeping in mind that C2 + * may be set to map to deeper sleep states. In this case the latency + * requirement needs to prevent entering C2 also. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req; + int cpu; + int ret; + + for_each_cpu(cpu, &plr->d->cpu_mask) { + pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); + if (!pm_req) { + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); + ret = -ENOMEM; + goto out_err; + } + ret = dev_pm_qos_add_request(get_cpu_device(cpu), + &pm_req->req, + DEV_PM_QOS_RESUME_LATENCY, + 30); + if (ret < 0) { + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", + cpu); + kfree(pm_req); + ret = -1; + goto out_err; + } + list_add(&pm_req->list, &plr->pm_reqs); + } + + return 0; + +out_err: + pseudo_lock_cstates_relax(plr); + return ret; +} + +/** + * pseudo_lock_region_clear - Reset pseudo-lock region data + * @plr: pseudo-lock region + * + * All content of the pseudo-locked region is reset - any memory allocated + * freed. + * + * Return: void + */ +static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) +{ + plr->size = 0; + plr->line_size = 0; + kfree(plr->kmem); + plr->kmem = NULL; + plr->s = NULL; + if (plr->d) + plr->d->plr = NULL; + plr->d = NULL; + plr->cbm = 0; + plr->debugfs_dir = NULL; +} + +/** + * pseudo_lock_region_init - Initialize pseudo-lock region information + * @plr: pseudo-lock region + * + * Called after user provided a schemata to be pseudo-locked. From the + * schemata the &struct pseudo_lock_region is on entry already initialized + * with the resource, domain, and capacity bitmask. Here the information + * required for pseudo-locking is deduced from this data and &struct + * pseudo_lock_region initialized further. This information includes: + * - size in bytes of the region to be pseudo-locked + * - cache line size to know the stride with which data needs to be accessed + * to be pseudo-locked + * - a cpu associated with the cache instance on which the pseudo-locking + * flow can be executed + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_init(struct pseudo_lock_region *plr) +{ + struct cpu_cacheinfo *ci; + int ret; + int i; + + /* Pick the first cpu we find that is associated with the cache. */ + plr->cpu = cpumask_first(&plr->d->cpu_mask); + + if (!cpu_online(plr->cpu)) { + rdt_last_cmd_printf("CPU %u associated with cache not online\n", + plr->cpu); + ret = -ENODEV; + goto out_region; + } + + ci = get_cpu_cacheinfo(plr->cpu); + + plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == plr->s->res->cache_level) { + plr->line_size = ci->info_list[i].coherency_line_size; + return 0; + } + } + + ret = -1; + rdt_last_cmd_puts("Unable to determine cache line size\n"); +out_region: + pseudo_lock_region_clear(plr); + return ret; +} + +/** + * pseudo_lock_init - Initialize a pseudo-lock region + * @rdtgrp: resource group to which new pseudo-locked region will belong + * + * A pseudo-locked region is associated with a resource group. When this + * association is created the pseudo-locked region is initialized. The + * details of the pseudo-locked region are not known at this time so only + * allocation is done and association established. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_init(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr; + + plr = kzalloc(sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + init_waitqueue_head(&plr->lock_thread_wq); + INIT_LIST_HEAD(&plr->pm_reqs); + rdtgrp->plr = plr; + return 0; +} + +/** + * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked + * @plr: pseudo-lock region + * + * Initialize the details required to set up the pseudo-locked region and + * allocate the contiguous memory that will be pseudo-locked to the cache. + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) +{ + int ret; + + ret = pseudo_lock_region_init(plr); + if (ret < 0) + return ret; + + /* + * We do not yet support contiguous regions larger than + * KMALLOC_MAX_SIZE. + */ + if (plr->size > KMALLOC_MAX_SIZE) { + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); + ret = -E2BIG; + goto out_region; + } + + plr->kmem = kzalloc(plr->size, GFP_KERNEL); + if (!plr->kmem) { + rdt_last_cmd_puts("Unable to allocate memory\n"); + ret = -ENOMEM; + goto out_region; + } + + ret = 0; + goto out; +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * pseudo_lock_free - Free a pseudo-locked region + * @rdtgrp: resource group to which pseudo-locked region belonged + * + * The pseudo-locked region's resources have already been released, or not + * yet created at this point. Now it can be freed and disassociated from the + * resource group. + * + * Return: void + */ +static void pseudo_lock_free(struct rdtgroup *rdtgrp) +{ + pseudo_lock_region_clear(rdtgrp->plr); + kfree(rdtgrp->plr); + rdtgrp->plr = NULL; +} + +/** + * rdtgroup_monitor_in_progress - Test if monitoring in progress + * @rdtgrp: resource group being queried + * + * Return: 1 if monitor groups have been created for this resource + * group, 0 otherwise. + */ +static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) +{ + return !list_empty(&rdtgrp->mon.crdtgrp_list); +} + +/** + * rdtgroup_locksetup_user_restrict - Restrict user access to group + * @rdtgrp: resource group needing access restricted + * + * A resource group used for cache pseudo-locking cannot have cpus or tasks + * assigned to it. This is communicated to the user by restricting access + * to all the files that can be used to make such changes. + * + * Permissions restored with rdtgroup_locksetup_user_restore() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restriction of access an attempt will be made to restore permissions but + * the state of the mode of these files will be uncertain when a failure + * occurs. + */ +static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); +err_cpus: + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); +err_tasks: + rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); +out: + return ret; +} + +/** + * rdtgroup_locksetup_user_restore - Restore user access to group + * @rdtgrp: resource group needing access restored + * + * Restore all file access previously removed using + * rdtgroup_locksetup_user_restrict() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restoration of access an attempt will be made to restrict permissions + * again but the state of the mode of these files will be uncertain when + * a failure occurs. + */ +static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); +err_cpus: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); +err_tasks: + rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); +out: + return ret; +} + +/** + * rdtgroup_locksetup_enter - Resource group enters locksetup mode + * @rdtgrp: resource group requested to enter locksetup mode + * + * A resource group enters locksetup mode to reflect that it would be used + * to represent a pseudo-locked region and is in the process of being set + * up to do so. A resource group used for a pseudo-locked region would + * lose the closid associated with it so we cannot allow it to have any + * tasks or cpus assigned nor permit tasks or cpus to be assigned in the + * future. Monitoring of a pseudo-locked region is not allowed either. + * + * The above and more restrictions on a pseudo-locked region are checked + * for and enforced before the resource group enters the locksetup mode. + * + * Returns: 0 if the resource group successfully entered locksetup mode, <0 + * on failure. On failure the last_cmd_status buffer is updated with text to + * communicate details of failure to the user. + */ +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + int ret; + + /* + * The default resource group can neither be removed nor lose the + * default closid associated with it. + */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); + return -EINVAL; + } + + /* + * Cache Pseudo-locking not supported when CDP is enabled. + * + * Some things to consider if you would like to enable this + * support (using L3 CDP as example): + * - When CDP is enabled two separate resources are exposed, + * L3DATA and L3CODE, but they are actually on the same cache. + * The implication for pseudo-locking is that if a + * pseudo-locked region is created on a domain of one + * resource (eg. L3CODE), then a pseudo-locked region cannot + * be created on that same domain of the other resource + * (eg. L3DATA). This is because the creation of a + * pseudo-locked region involves a call to wbinvd that will + * affect all cache allocations on particular domain. + * - Considering the previous, it may be possible to only + * expose one of the CDP resources to pseudo-locking and + * hide the other. For example, we could consider to only + * expose L3DATA and since the L3 cache is unified it is + * still possible to place instructions there are execute it. + * - If only one region is exposed to pseudo-locking we should + * still keep in mind that availability of a portion of cache + * for pseudo-locking should take into account both resources. + * Similarly, if a pseudo-locked region is created in one + * resource, the portion of cache used by it should be made + * unavailable to all future allocations from both resources. + */ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || + resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { + rdt_last_cmd_puts("CDP enabled\n"); + return -EINVAL; + } + + /* + * Not knowing the bits to disable prefetching implies that this + * platform does not support Cache Pseudo-Locking. + */ + if (resctrl_arch_get_prefetch_disable_bits() == 0) { + rdt_last_cmd_puts("Pseudo-locking not supported\n"); + return -EINVAL; + } + + if (rdtgroup_monitor_in_progress(rdtgrp)) { + rdt_last_cmd_puts("Monitoring in progress\n"); + return -EINVAL; + } + + if (rdtgroup_tasks_assigned(rdtgrp)) { + rdt_last_cmd_puts("Tasks assigned to resource group\n"); + return -EINVAL; + } + + if (!cpumask_empty(&rdtgrp->cpu_mask)) { + rdt_last_cmd_puts("CPUs assigned to resource group\n"); + return -EINVAL; + } + + if (rdtgroup_locksetup_user_restrict(rdtgrp)) { + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); + return -EIO; + } + + ret = pseudo_lock_init(rdtgrp); + if (ret) { + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); + goto out_release; + } + + /* + * If this system is capable of monitoring a rmid would have been + * allocated when the control group was created. This is not needed + * anymore when this group would be used for pseudo-locking. This + * is safe to call on platforms not capable of monitoring. + */ + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + ret = 0; + goto out; + +out_release: + rdtgroup_locksetup_user_restore(rdtgrp); +out: + return ret; +} + +/** + * rdtgroup_locksetup_exit - resource group exist locksetup mode + * @rdtgrp: resource group + * + * When a resource group exits locksetup mode the earlier restrictions are + * lifted. + * + * Return: 0 on success, <0 on failure + */ +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + int ret; + + if (resctrl_arch_mon_capable()) { + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + } + + ret = rdtgroup_locksetup_user_restore(rdtgrp); + if (ret) { + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + pseudo_lock_free(rdtgrp); + return 0; +} + +/** + * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked + * @d: RDT domain + * @cbm: CBM to test + * + * @d represents a cache instance and @cbm a capacity bitmask that is + * considered for it. Determine if @cbm overlaps with any existing + * pseudo-locked region on @d. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: true if @cbm overlaps with pseudo-locked region on @d, false + * otherwise. + */ +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + unsigned int cbm_len; + unsigned long cbm_b; + + if (d->plr) { + cbm_len = d->plr->s->res->cache.cbm_len; + cbm_b = d->plr->cbm; + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) + return true; + } + return false; +} + +/** + * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy + * @d: RDT domain under test + * + * The setup of a pseudo-locked region affects all cache instances within + * the hierarchy of the region. It is thus essential to know if any + * pseudo-locked regions exist within a cache hierarchy to prevent any + * attempts to create new pseudo-locked regions in the same hierarchy. + * + * Return: true if a pseudo-locked region exists in the hierarchy of @d or + * if it is not possible to test due to memory allocation issue, + * false otherwise. + */ +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *d_i; + bool ret = false; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) + return true; + + /* + * First determine which cpus have pseudo-locked regions + * associated with them. + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(d_i, &r->domains, list) { + if (d_i->plr) + cpumask_or(cpu_with_psl, cpu_with_psl, + &d_i->cpu_mask); + } + } + + /* + * Next test if new pseudo-locked region would intersect with + * existing region. + */ + if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) + ret = true; + + free_cpumask_var(cpu_with_psl); + return ret; +} + +/** + * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region + * @rdtgrp: Resource group to which the pseudo-locked region belongs. + * @sel: Selector of which measurement to perform on a pseudo-locked region. + * + * The measurement of latency to access a pseudo-locked region should be + * done from a cpu that is associated with that pseudo-locked region. + * Determine which cpu is associated with this region and start a thread on + * that cpu to perform the measurement, wait for that thread to complete. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int cpu; + int ret = -1; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out; + } + + if (!plr->d) { + ret = -ENODEV; + goto out; + } + + plr->thread_done = 0; + cpu = cpumask_first(&plr->d->cpu_mask); + if (!cpu_online(cpu)) { + ret = -ENODEV; + goto out; + } + + plr->cpu = cpu; + + if (sel == 1) + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 2) + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 3) + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else + goto out; + + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + goto out; + } + kthread_bind(thread, cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) + goto out; + + ret = 0; + +out: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +static ssize_t pseudo_lock_measure_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rdtgroup *rdtgrp = file->private_data; + size_t buf_size; + char buf[32]; + int ret; + int sel; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + ret = kstrtoint(buf, 10, &sel); + if (ret == 0) { + if (sel != 1 && sel != 2 && sel != 3) + return -EINVAL; + ret = debugfs_file_get(file->f_path.dentry); + if (ret) + return ret; + ret = pseudo_lock_measure_cycles(rdtgrp, sel); + if (ret == 0) + ret = count; + debugfs_file_put(file->f_path.dentry); + } + + return ret; +} + +static const struct file_operations pseudo_measure_fops = { + .write = pseudo_lock_measure_trigger, + .open = simple_open, + .llseek = default_llseek, +}; + +/** + * rdtgroup_pseudo_lock_create - Create a pseudo-locked region + * @rdtgrp: resource group to which pseudo-lock region belongs + * + * Called when a resource group in the pseudo-locksetup mode receives a + * valid schemata that should be pseudo-locked. Since the resource group is + * in pseudo-locksetup mode the &struct pseudo_lock_region has already been + * allocated and initialized with the essential information. If a failure + * occurs the resource group remains in the pseudo-locksetup mode with the + * &struct pseudo_lock_region associated with it, but cleared from all + * information and ready for the user to re-attempt pseudo-locking by + * writing the schemata again. + * + * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 + * on failure. Descriptive error will be written to last_cmd_status buffer. + */ +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int new_minor; + struct device *dev; + int ret; + + ret = pseudo_lock_region_alloc(plr); + if (ret < 0) + return ret; + + ret = pseudo_lock_cstates_constrain(plr); + if (ret < 0) { + ret = -EINVAL; + goto out_region; + } + + plr->thread_done = 0; + + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, + cpu_to_node(plr->cpu), + "pseudo_lock/%u", plr->cpu); + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); + goto out_cstates; + } + + kthread_bind(thread, plr->cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) { + /* + * If the thread does not get on the CPU for whatever + * reason and the process which sets up the region is + * interrupted then this will leave the thread in runnable + * state and once it gets on the CPU it will dereference + * the cleared, but not freed, plr struct resulting in an + * empty pseudo-locking loop. + */ + rdt_last_cmd_puts("Locking thread interrupted\n"); + goto out_cstates; + } + + ret = pseudo_lock_minor_get(&new_minor); + if (ret < 0) { + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); + goto out_cstates; + } + + /* + * Unlock access but do not release the reference. The + * pseudo-locked region will still be here on return. + * + * The mutex has to be released temporarily to avoid a potential + * deadlock with the mm->mmap_lock which is obtained in the + * device_create() and debugfs_create_dir() callpath below as well as + * before the mmap() callback is called. + */ + mutex_unlock(&rdtgroup_mutex); + + if (!IS_ERR_OR_NULL(debugfs_resctrl)) { + plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, + debugfs_resctrl); + if (!IS_ERR_OR_NULL(plr->debugfs_dir)) + debugfs_create_file("pseudo_lock_measure", 0200, + plr->debugfs_dir, rdtgrp, + &pseudo_measure_fops); + } + + dev = device_create(&pseudo_lock_class, NULL, + MKDEV(pseudo_lock_major, new_minor), + rdtgrp, "%s", rdtgrp->kn->name); + + mutex_lock(&rdtgroup_mutex); + + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + rdt_last_cmd_printf("Failed to create character device: %d\n", + ret); + goto out_debugfs; + } + + /* We released the mutex - check if group was removed while we did so */ + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out_device; + } + + plr->minor = new_minor; + + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; + closid_free(rdtgrp->closid); + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); + + ret = 0; + goto out; + +out_device: + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); +out_debugfs: + debugfs_remove_recursive(plr->debugfs_dir); + pseudo_lock_minor_release(new_minor); +out_cstates: + pseudo_lock_cstates_relax(plr); +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region + * @rdtgrp: resource group to which the pseudo-locked region belongs + * + * The removal of a pseudo-locked region can be initiated when the resource + * group is removed from user space via a "rmdir" from userspace or the + * unmount of the resctrl filesystem. On removal the resource group does + * not go back to pseudo-locksetup mode before it is removed, instead it is + * removed directly. There is thus asymmetry with the creation where the + * &struct pseudo_lock_region is removed here while it was not created in + * rdtgroup_pseudo_lock_create(). + * + * Return: void + */ +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * Default group cannot be a pseudo-locked region so we can + * free closid here. + */ + closid_free(rdtgrp->closid); + goto free; + } + + pseudo_lock_cstates_relax(plr); + debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); + pseudo_lock_minor_release(plr->minor); + +free: + pseudo_lock_free(rdtgrp); +} + +static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = region_find_by_minor(iminor(inode)); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + filp->private_data = rdtgrp; + atomic_inc(&rdtgrp->waitcount); + /* Perform a non-seekable open - llseek is not supported */ + filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + + mutex_unlock(&rdtgroup_mutex); + + return 0; +} + +static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + filp->private_data = NULL; + atomic_dec(&rdtgrp->waitcount); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) +{ + /* Not supported */ + return -EINVAL; +} + +static const struct vm_operations_struct pseudo_mmap_ops = { + .mremap = pseudo_lock_dev_mremap, +}; + +static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + struct pseudo_lock_region *plr; + struct rdtgroup *rdtgrp; + unsigned long physical; + unsigned long psize; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + plr = rdtgrp->plr; + + if (!plr->d) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + /* + * Task is required to run with affinity to the cpus associated + * with the pseudo-locked region. If this is not the case the task + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + physical = __pa(plr->kmem) >> PAGE_SHIFT; + psize = plr->size - off; + + if (off > plr->size) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + /* + * Ensure changes are carried directly to the memory being mapped, + * do not allow copy-on-write mapping. + */ + if (!(vma->vm_flags & VM_SHARED)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + if (vsize > psize) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + memset(plr->kmem + off, 0, vsize); + + if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, + vsize, vma->vm_page_prot)) { + mutex_unlock(&rdtgroup_mutex); + return -EAGAIN; + } + vma->vm_ops = &pseudo_mmap_ops; + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static const struct file_operations pseudo_lock_dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = NULL, + .write = NULL, + .open = pseudo_lock_dev_open, + .release = pseudo_lock_dev_release, + .mmap = pseudo_lock_dev_mmap, +}; + +int rdt_pseudo_lock_init(void) +{ + int ret; + + ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); + if (ret < 0) + return ret; + + pseudo_lock_major = ret; + + ret = class_register(&pseudo_lock_class); + if (ret) { + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + return ret; + } + + return 0; +} + +void rdt_pseudo_lock_release(void) +{ + class_unregister(&pseudo_lock_class); + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + pseudo_lock_major = 0; +} diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c new file mode 100644 index 0000000000000000000000000000000000000000..643ea199c428a2b525002227997d206330adf12f --- /dev/null +++ b/fs/resctrl/rdtgroup.c @@ -0,0 +1,4031 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * User interface for Resource Allocation in Resource Director Technology(RDT) + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Fenghua Yu + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "internal.h" + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + +static struct kernfs_root *rdt_root; +struct rdtgroup rdtgroup_default; +LIST_HEAD(rdt_all_groups); + +/* list of entries for the schemata file */ +LIST_HEAD(resctrl_schema_all); + +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + +/* Kernel fs node for "info" directory under root */ +static struct kernfs_node *kn_info; + +/* Kernel fs node for "mon_groups" directory under root */ +static struct kernfs_node *kn_mongrp; + +/* Kernel fs node for "mon_data" directory under root */ +static struct kernfs_node *kn_mondata; + +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + +static struct seq_buf last_cmd_status; +static char last_cmd_status_buf[512]; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + +struct dentry *debugfs_resctrl; + +static bool resctrl_debug; + +void rdt_last_cmd_clear(void) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_clear(&last_cmd_status); +} + +void rdt_last_cmd_puts(const char *s) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_puts(&last_cmd_status, s); +} + +void rdt_last_cmd_printf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_vprintf(&last_cmd_status, fmt, ap); + va_end(ap); +} + +void rdt_staged_configs_clear(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *dom; + + lockdep_assert_held(&rdtgroup_mutex); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(dom, &r->domains, list) + memset(dom->staged_config, 0, sizeof(dom->staged_config)); + } +} + +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_L3_MBM_LOCAL_EVENT_ID); +} + +/* + * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap + * of free CLOSIDs. + * + * Using a global CLOSID across all resources has some advantages and + * some drawbacks: + * + We can simply set current's closid to assign a task to a resource + * group. + * + Context switch code can avoid extra memory references deciding which + * CLOSID to load into the PQR_ASSOC MSR + * - We give up some options in configuring resource groups across multi-socket + * systems. + * - Our choices on how to configure each resource become progressively more + * limited as the number of resources grows. + */ +static unsigned long *closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} + +static void closid_init(void) +{ + struct resctrl_schema *s; + u32 rdt_min_closid = ~0; + + /* Compute rdt_min_closid across all resources */ + list_for_each_entry(s, &resctrl_schema_all, list) + rdt_min_closid = min(rdt_min_closid, s->num_closid); + + closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL); + bitmap_fill(closid_free_map, rdt_min_closid); + + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map); + closid_free_map_len = rdt_min_closid; +} + +static int closid_alloc(void) +{ + int cleanest_closid; + u32 closid; + + lockdep_assert_held(&rdtgroup_mutex); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + resctrl_arch_is_llc_occupancy_enabled()) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = find_first_bit(closid_free_map, closid_free_map_len); + if (closid == closid_free_map_len) + return -ENOSPC; + } + + __clear_bit(closid, closid_free_map); + + return closid; +} + +void closid_free(int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, closid_free_map); +} + +/** + * closid_allocated - test if provided closid is in use + * @closid: closid to be tested + * + * Return: true if @closid is currently associated with a resource group, + * false if @closid is free + */ +bool closid_allocated(unsigned int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, closid_free_map); +} + +/** + * rdtgroup_mode_by_closid - Return mode of resource group with closid + * @closid: closid if the resource group + * + * Each resource group is associated with a @closid. Here the mode + * of a resource group can be queried by searching for it using its closid. + * + * Return: mode as &enum rdtgrp_mode of resource group with closid @closid + */ +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdtgrp->mode; + } + + return RDT_NUM_MODES; +} + +static const char * const rdt_mode_str[] = { + [RDT_MODE_SHAREABLE] = "shareable", + [RDT_MODE_EXCLUSIVE] = "exclusive", + [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", + [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", +}; + +/** + * rdtgroup_mode_str - Return the string representation of mode + * @mode: the resource group mode as &enum rdtgroup_mode + * + * Return: string representation of valid mode, "unknown" otherwise + */ +static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +{ + if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) + return "unknown"; + + return rdt_mode_str[mode]; +} + +/* set uid and gid of rdtgroup dirs and files to that of the creator */ +static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + +static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) +{ + struct kernfs_node *kn; + int ret; + + kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + 0, rft->kf_ops, rft, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return 0; +} + +static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rftype *rft = of->kn->priv; + + if (rft->seq_show) + return rft->seq_show(of, m, arg); + return 0; +} + +static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct rftype *rft = of->kn->priv; + + if (rft->write) + return rft->write(of, buf, nbytes, off); + + return -EINVAL; +} + +static const struct kernfs_ops rdtgroup_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = rdtgroup_file_write, + .seq_show = rdtgroup_seqfile_show, +}; + +static const struct kernfs_ops kf_mondata_ops = { + .atomic_write_len = PAGE_SIZE, + .seq_show = rdtgroup_mondata_show, +}; + +static bool is_cpu_list(struct kernfs_open_file *of) +{ + struct rftype *rft = of->kn->priv; + + return rft->flags & RFTYPE_FLAGS_CPUS_LIST; +} + +static int rdtgroup_cpus_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + struct cpumask *mask; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + mask = &rdtgrp->plr->d->cpu_mask; + seq_printf(s, is_cpu_list(of) ? + "%*pbl\n" : "%*pb\n", + cpumask_pr_args(mask)); + } + } else { + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", + cpumask_pr_args(&rdtgrp->cpu_mask)); + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +/* + * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, + * + * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. + */ +static void +update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +{ + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); +} + +static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask) +{ + struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; + struct list_head *head; + + /* Check whether cpus belong to parent ctrl group */ + cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + return -EINVAL; + } + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Give any dropped cpus to parent rdtgroup */ + cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); + update_closid_rmid(tmpmask, prgrp); + } + + /* + * If we added cpus, remove them from previous group that owned them + * and update per-cpu rmid + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + if (crgrp == rdtgrp) + continue; + cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, + tmpmask); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + return 0; +} + +static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +{ + struct rdtgroup *crgrp; + + cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); + /* update the child mon group masks as well*/ + list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) + cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); +} + +static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +{ + struct rdtgroup *r, *crgrp; + struct list_head *head; + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Can't drop from default group */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Can't drop CPUs from default group\n"); + return -EINVAL; + } + + /* Give any dropped cpus to rdtgroup_default */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, tmpmask); + update_closid_rmid(tmpmask, &rdtgroup_default); + } + + /* + * If we added cpus, remove them from previous group and + * the prev group's child groups that owned them + * and update per-cpu closid/rmid. + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { + if (r == rdtgrp) + continue; + cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); + if (!cpumask_empty(tmpmask1)) + cpumask_rdtgrp_clear(r, tmpmask1); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + /* + * Clear child mon group masks since there is a new parent mask + * now and update the rmid for the cpus the child lost. + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); + update_closid_rmid(tmpmask, rdtgrp); + cpumask_clear(&crgrp->cpu_mask); + } + + return 0; +} + +static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + cpumask_var_t tmpmask, newmask, tmpmask1; + struct rdtgroup *rdtgrp; + int ret; + + if (!buf) + return -EINVAL; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + return -ENOMEM; + } + if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + return -ENOMEM; + } + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto unlock; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + if (is_cpu_list(of)) + ret = cpulist_parse(buf, newmask); + else + ret = cpumask_parse(buf, newmask); + + if (ret) { + rdt_last_cmd_puts("Bad CPU list/mask\n"); + goto unlock; + } + + /* check that user didn't specify any offline cpus */ + cpumask_andnot(tmpmask, newmask, cpu_online_mask); + if (!cpumask_empty(tmpmask)) { + ret = -EINVAL; + rdt_last_cmd_puts("Can only assign online CPUs\n"); + goto unlock; + } + + if (rdtgrp->type == RDTCTRL_GROUP) + ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); + else if (rdtgrp->type == RDTMON_GROUP) + ret = cpus_mon_write(rdtgrp, newmask, tmpmask); + else + ret = -EINVAL; + +unlock: + rdtgroup_kn_unlock(of->kn); + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + free_cpumask_var(tmpmask1); + + return ret ?: nbytes; +} + +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static void rdtgroup_remove(struct rdtgroup *rdtgrp) +{ + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} + +static void _update_task_closid_rmid(void *task) +{ + /* + * If the task is still current on this CPU, update PQR_ASSOC MSR. + * Otherwise, the MSR is updated when the task is scheduled in. + */ + if (task == current) + resctrl_arch_sched_in(task); +} + +static void update_task_closid_rmid(struct task_struct *t) +{ + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); + else + _update_task_closid_rmid(t); +} + +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + +static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + /* If the task is already in rdtgrp, no need to move the task. */ + if (task_in_rdtgroup(tsk, rdtgrp)) + return 0; + + /* + * Set the task's closid/rmid before the PQR_ASSOC MSR can be + * updated by them. + * + * For ctrl_mon groups, move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. + */ + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; + } + + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + + /* + * Ensure the task's closid and rmid are written before determining if + * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * By now, the task's closid and rmid are set. If the task is current + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource + * group go into effect. If the task is not current, the MSR will be + * updated when the task is scheduled in. + */ + update_task_closid_rmid(tsk); + + return 0; +} + +static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); +} + +static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); +} + +/** + * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group + * @r: Resource group + * + * Return: 1 if tasks have been assigned to @r, 0 otherwise + */ +int rdtgroup_tasks_assigned(struct rdtgroup *r) +{ + struct task_struct *p, *t; + int ret = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + ret = 1; + break; + } + } + rcu_read_unlock(); + + return ret; +} + +static int rdtgroup_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *tcred = get_task_cred(task); + const struct cred *cred = current_cred(); + int ret = 0; + + /* + * Even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) { + rdt_last_cmd_printf("No permission to move task %d\n", task->pid); + ret = -EPERM; + } + + put_cred(tcred); + return ret; +} + +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + rcu_read_unlock(); + rdt_last_cmd_printf("No task %d\n", pid); + return -ESRCH; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = rdtgroup_task_write_permission(tsk, of); + if (!ret) + ret = __rdtgroup_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; +} + +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + char *pid_str; + int ret = 0; + pid_t pid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } + +unlock: + rdtgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p, *t; + pid_t pid; + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + pid = task_pid_vnr(t); + if (pid) + seq_printf(s, "%d\n", pid); + } + } + rcu_read_unlock(); +} + +static int rdtgroup_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + show_rdt_tasks(rdtgrp, s); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +#ifdef CONFIG_PROC_CPU_RESCTRL + +/* + * A task can only be part of one resctrl control group and of one monitor + * group which is associated to that control group. + * + * 1) res: + * mon: + * + * resctrl is not available. + * + * 2) res:/ + * mon: + * + * Task is part of the root resctrl control group, and it is not associated + * to any monitor group. + * + * 3) res:/ + * mon:mon0 + * + * Task is part of the root resctrl control group and monitor group mon0. + * + * 4) res:group0 + * mon: + * + * Task is part of resctrl control group group0, and it is not associated + * to any monitor group. + * + * 5) res:group0 + * mon:mon1 + * + * Task is part of resctrl control group group0 and monitor group mon1. + */ +int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk) +{ + struct rdtgroup *rdtg; + int ret = 0; + + mutex_lock(&rdtgroup_mutex); + + /* Return empty if resctrl has not been mounted. */ + if (!resctrl_mounted) { + seq_puts(s, "res:\nmon:\n"); + goto unlock; + } + + list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { + struct rdtgroup *crg; + + /* + * Task information is only relevant for shareable + * and exclusive groups. + */ + if (rdtg->mode != RDT_MODE_SHAREABLE && + rdtg->mode != RDT_MODE_EXCLUSIVE) + continue; + + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) + continue; + + seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", + rdtg->kn->name); + seq_puts(s, "mon:"); + list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, + mon.crdtgrp_list) { + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) + continue; + seq_printf(s, "%s", crg->kn->name); + break; + } + seq_putc(s, '\n'); + goto unlock; + } + /* + * The above search should succeed. Otherwise return + * with an error. + */ + ret = -ENOENT; +unlock: + mutex_unlock(&rdtgroup_mutex); + + return ret; +} +#endif + +static int rdt_last_cmd_status_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + int len; + + mutex_lock(&rdtgroup_mutex); + len = seq_buf_used(&last_cmd_status); + if (len) + seq_printf(seq, "%.*s", len, last_cmd_status_buf); + else + seq_puts(seq, "ok\n"); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int rdt_num_closids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + + seq_printf(seq, "%u\n", s->num_closid); + return 0; +} + +static int rdt_default_ctrl_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->default_ctrl); + return 0; +} + +static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.min_cbm_bits); + return 0; +} + +static int rdt_shareable_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->cache.shareable_bits); + return 0; +} + +/* + * rdt_bit_usage_show - Display current usage of resources + * + * A domain is a shared resource that can now be allocated differently. Here + * we display the current regions of the domain as an annotated bitmask. + * For each domain of this resource its allocation bitmask + * is annotated as below to indicate the current usage of the corresponding bit: + * 0 - currently unused + * X - currently available for sharing and used by software and hardware + * H - currently used by hardware only but available for software use + * S - currently used and shareable by software only + * E - currently used exclusively by one resource group + * P - currently pseudo-locked by one resource group + */ +static int rdt_bit_usage_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; + struct rdt_resource *r = s->res; + struct rdt_domain *dom; + int i, hwb, swb, excl, psl; + enum rdtgrp_mode mode; + bool sep = false; + u32 ctrl_val; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + hw_shareable = r->cache.shareable_bits; + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_putc(seq, ';'); + sw_shareable = 0; + exclusive = 0; + seq_printf(seq, "%d=", dom->id); + for (i = 0; i < closids_supported(); i++) { + if (!closid_allocated(i)) + continue; + ctrl_val = resctrl_arch_get_config(r, dom, i, + s->conf_type); + mode = rdtgroup_mode_by_closid(i); + switch (mode) { + case RDT_MODE_SHAREABLE: + sw_shareable |= ctrl_val; + break; + case RDT_MODE_EXCLUSIVE: + exclusive |= ctrl_val; + break; + case RDT_MODE_PSEUDO_LOCKSETUP: + /* + * RDT_MODE_PSEUDO_LOCKSETUP is possible + * here but not included since the CBM + * associated with this CLOSID in this mode + * is not initialized and no task or cpu can be + * assigned this CLOSID. + */ + break; + case RDT_MODE_PSEUDO_LOCKED: + case RDT_NUM_MODES: + WARN(1, + "invalid mode for closid %d\n", i); + break; + } + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { + pseudo_locked = dom->plr ? dom->plr->cbm : 0; + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); + if (hwb && swb) + seq_putc(seq, 'X'); + else if (hwb && !swb) + seq_putc(seq, 'H'); + else if (!hwb && swb) + seq_putc(seq, 'S'); + else if (excl) + seq_putc(seq, 'E'); + else if (psl) + seq_putc(seq, 'P'); + else /* Unused bits remain */ + seq_putc(seq, '0'); + } + sep = true; + } + seq_putc(seq, '\n'); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return 0; +} + +static int rdt_min_bw_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.min_bw); + return 0; +} + +static int rdt_num_rmids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%d\n", r->num_rmid); + + return 0; +} + +static int rdt_mon_features_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + struct mon_evt *mevt; + + list_for_each_entry(mevt, &r->evt_list, list) { + seq_printf(seq, "%s\n", mevt->name); + if (mevt->configurable) + seq_printf(seq, "%s_config\n", mevt->name); + } + + return 0; +} + +static int rdt_bw_gran_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.bw_gran); + return 0; +} + +static int rdt_delay_linear_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.delay_linear); + return 0; +} + +static int max_threshold_occ_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); + + return 0; +} + +static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) + seq_puts(seq, "per-thread\n"); + else + seq_puts(seq, "max\n"); + + return 0; +} + +static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + unsigned int bytes; + int ret; + + ret = kstrtouint(buf, 0, &bytes); + if (ret) + return ret; + + if (bytes > resctrl_rmid_realloc_limit) + return -EINVAL; + + resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); + + return nbytes; +} + +/* + * rdtgroup_mode_show - Display mode of this resource group + */ +static int rdtgroup_mode_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); + + rdtgroup_kn_unlock(of->kn); + return 0; +} + +static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +{ + switch (my_type) { + case CDP_CODE: + return CDP_DATA; + case CDP_DATA: + return CDP_CODE; + default: + case CDP_NONE: + return CDP_NONE; + } +} + +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @type: CDP type of @r. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Checks if provided @cbm intended to be used for @closid on domain + * @d overlaps with any other closids or other hardware usage associated + * with this domain. If @exclusive is true then only overlaps with + * resource groups in exclusive mode will be considered. If @exclusive + * is false then overlaps with any resource group or hardware entities + * will be considered. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: false if CBM does not overlap, true if it does. + */ +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, + enum resctrl_conf_type type, bool exclusive) +{ + enum rdtgrp_mode mode; + unsigned long ctrl_b; + int i; + + /* Check for any overlap with regions used by hardware directly */ + if (!exclusive) { + ctrl_b = r->cache.shareable_bits; + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) + return true; + } + + /* Check for overlap with other resource groups */ + for (i = 0; i < closids_supported(); i++) { + ctrl_b = resctrl_arch_get_config(r, d, i, type); + mode = rdtgroup_mode_by_closid(i); + if (closid_allocated(i) && i != closid && + mode != RDT_MODE_PSEUDO_LOCKSETUP) { + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { + if (exclusive) { + if (mode == RDT_MODE_EXCLUSIVE) + return true; + continue; + } + return true; + } + } + } + + return false; +} + +/** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @s: Schema for the resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + struct rdt_resource *r = s->res; + + if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, + exclusive)) + return true; + + if (!resctrl_arch_get_cdp_enabled(r->rid)) + return false; + return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); +} + +/** + * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. + * + * An exclusive resource group implies that there should be no sharing of + * its allocated resources. At the time this group is considered to be + * exclusive this test can determine if its current schemata supports this + * setting by testing for overlap with all other resource groups. + * + * Return: true if resource group can be exclusive, false if there is overlap + * with allocations of other resource groups and thus this resource group + * cannot be exclusive. + */ +static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) +{ + int closid = rdtgrp->closid; + struct resctrl_schema *s; + struct rdt_resource *r; + bool has_cache = false; + struct rdt_domain *d; + u32 ctrl; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) + continue; + has_cache = true; + list_for_each_entry(d, &r->domains, list) { + ctrl = resctrl_arch_get_config(r, d, closid, + s->conf_type); + if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { + rdt_last_cmd_puts("Schemata overlaps\n"); + return false; + } + } + } + + if (!has_cache) { + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); + return false; + } + + return true; +} + +/* + * rdtgroup_mode_write - Modify the resource group's mode + */ +static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + enum rdtgrp_mode mode; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + rdt_last_cmd_clear(); + + mode = rdtgrp->mode; + + if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || + (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || + (!strcmp(buf, "pseudo-locksetup") && + mode == RDT_MODE_PSEUDO_LOCKSETUP) || + (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) + goto out; + + if (mode == RDT_MODE_PSEUDO_LOCKED) { + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); + ret = -EINVAL; + goto out; + } + + if (!strcmp(buf, "shareable")) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_SHAREABLE; + } else if (!strcmp(buf, "exclusive")) { + if (!rdtgroup_mode_test_exclusive(rdtgrp)) { + ret = -EINVAL; + goto out; + } + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_EXCLUSIVE; + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { + ret = rdtgroup_locksetup_enter(rdtgrp); + if (ret) + goto out; + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; + } else { + rdt_last_cmd_puts("Unknown or unsupported mode\n"); + ret = -EINVAL; + } + +out: + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +/** + * rdtgroup_cbm_to_size - Translate CBM to size in bytes + * @r: RDT resource to which @d belongs. + * @d: RDT domain instance. + * @cbm: bitmask for which the size should be computed. + * + * The bitmask provided associated with the RDT domain instance @d will be + * translated into how many bytes it represents. The size in bytes is + * computed by first dividing the total cache size by the CBM length to + * determine how many bytes each bit in the bitmask represents. The result + * is multiplied with the number of bits set in the bitmask. + * + * @cbm is unsigned long, even if only 32 bits are used to make the + * bitmap functions work correctly. + */ +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, + struct rdt_domain *d, unsigned long cbm) +{ + struct cpu_cacheinfo *ci; + unsigned int size = 0; + int num_b, i; + + num_b = bitmap_weight(&cbm, r->cache.cbm_len); + ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == r->cache_level) { + size = ci->info_list[i].size / r->cache.cbm_len * num_b; + break; + } + } + + return size; +} + +/* + * rdtgroup_size_show - Display size in bytes of allocated regions + * + * The "size" file mirrors the layout of the "schemata" file, printing the + * size in bytes of each region instead of the capacity bitmask. + */ +static int rdtgroup_size_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + enum resctrl_conf_type type; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + struct rdt_domain *d; + unsigned int size; + int ret = 0; + u32 closid; + bool sep; + u32 ctrl; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%*s:", max_name_width, + rdtgrp->plr->s->name); + size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, + rdtgrp->plr->d, + rdtgrp->plr->cbm); + seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + } + goto out; + } + + closid = rdtgrp->closid; + + list_for_each_entry(schema, &resctrl_schema_all, list) { + r = schema->res; + type = schema->conf_type; + sep = false; + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(d, &r->domains, list) { + if (sep) + seq_putc(s, ';'); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + size = 0; + } else { + if (is_mba_sc(r)) + ctrl = d->mbps_val[closid]; + else + ctrl = resctrl_arch_get_config(r, d, + closid, + type); + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); + } + seq_printf(s, "%d=%u", d->id, size); + sep = true; + } + seq_putc(s, '\n'); + } + +out: + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) +{ + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); +} + +static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) +{ + struct resctrl_mon_config_info mon_info = {0}; + struct rdt_domain *dom; + bool sep = false; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + + seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); + sep = true; + } + seq_puts(s, "\n"); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return 0; +} + +static int mbm_total_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); + + return 0; +} + +static int mbm_local_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); + + return 0; +} + +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) +{ + struct resctrl_mon_config_info mon_info = {0}; + + /* + * Read the current config value first. If both are the same then + * no need to write it again. + */ + mon_info.r = r; + mon_info.d = d; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + if (mon_info.mon_config == val) + return 0; + + mon_info.mon_config = val; + + /* + * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the + * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE + * are scoped at the domain level. Writing any of these MSRs + * on one CPU is observed by all the CPUs in the domain. + */ + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, + &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } + + /* + * When an Event Configuration is changed, the bandwidth counters + * for all RMIDs and Events will be cleared by the hardware. The + * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for + * every RMID on the next read to any event for every RMID. + * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) + * cleared while it is tracked by the hardware. Clear the + * mbm_local and mbm_total counts for all the RMIDs. + */ + resctrl_arch_reset_rmid_all(r, d); + + return 0; +} + +static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) +{ + char *dom_str = NULL, *id_str; + unsigned long dom_id, val; + struct rdt_domain *d; + int err; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + +next: + if (!tok || tok[0] == '\0') + return 0; + + /* Start processing the strings for each domain */ + dom_str = strim(strsep(&tok, ";")); + id_str = strsep(&dom_str, "="); + + if (!id_str || kstrtoul(id_str, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); + return -EINVAL; + } + + if (!dom_str || kstrtoul(dom_str, 16, &val)) { + rdt_last_cmd_puts("Non-numeric event configuration value\n"); + return -EINVAL; + } + + /* Value from user cannot be more than the supported set of events */ + if ((val & r->mbm_cfg_mask) != val) { + rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", + r->mbm_cfg_mask); + return -EINVAL; + } + + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; + goto next; + } + } + + return -EINVAL; +} + +static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +/* rdtgroup information files for one cache resource. */ +static struct rftype res_common_files[] = { + { + .name = "last_cmd_status", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_last_cmd_status_show, + .fflags = RFTYPE_TOP_INFO, + }, + { + .name = "num_closids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_closids_show, + .fflags = RFTYPE_CTRL_INFO, + }, + { + .name = "mon_features", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_features_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "num_rmids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_rmids_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "cbm_mask", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_default_ctrl_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_cbm_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_cbm_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "shareable_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_shareable_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "bit_usage", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bit_usage_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_bandwidth", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_bw_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "bandwidth_gran", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bw_gran_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "delay_linear", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_delay_linear_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + /* + * Platform specific which (if any) capabilities are provided by + * thread_throttle_mode. Defer "fflags" initialization to platform + * discovery. + */ + { + .name = "thread_throttle_mode", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_thread_throttle_mode_show, + }, + { + .name = "max_threshold_occupancy", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = max_threshold_occ_write, + .seq_show = max_threshold_occ_show, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "mbm_total_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_total_bytes_config_show, + .write = mbm_total_bytes_config_write, + }, + { + .name = "mbm_local_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_local_bytes_config_show, + .write = mbm_local_bytes_config_write, + }, + { + .name = "cpus", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "cpus_list", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_BASE, + }, + { + .name = "tasks", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_tasks_write, + .seq_show = rdtgroup_tasks_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, + { + .name = "schemata", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_schemata_write, + .seq_show = rdtgroup_schemata_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "mode", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_mode_write, + .seq_show = rdtgroup_mode_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "size", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_size_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, + +}; + +static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) +{ + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + lockdep_assert_held(&rdtgroup_mutex); + + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + + for (rft = rfts; rft < rfts + len; rft++) { + if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { + ret = rdtgroup_add_file(kn, rft); + if (ret) + goto error; + } + } + + return 0; +error: + pr_warn("Failed to add %s, err=%d\n", rft->name, ret); + while (--rft >= rfts) { + if ((fflags & rft->fflags) == rft->fflags) + kernfs_remove_by_name(kn, rft->name); + } + return ret; +} + +static struct rftype *rdtgroup_get_rftype_by_name(const char *name) +{ + struct rftype *rfts, *rft; + int len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + return rft; + } + + return NULL; +} + +static void thread_throttle_mode_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + struct rftype *rft; + + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); + if (!rft) + return; + + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; +} + +void mbm_config_rftype_init(const char *config) +{ + struct rftype *rft; + + rft = rdtgroup_get_rftype_by_name(config); + if (rft) + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; +} + +/** + * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * + * The permissions of named resctrl file, directory, or link are modified + * to not allow read, write, or execute by any user. + * + * WARNING: This function is intended to communicate to the user that the + * resctrl file has been locked down - that it is not relevant to the + * particular state the system finds itself in. It should not be relied + * on to protect from user access because after the file's permissions + * are restricted the user can still change the permissions using chmod + * from the command line. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn; + int ret = 0; + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + iattr.ia_mode = S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode = S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode = S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +/** + * rdtgroup_kn_mode_restore - Restore user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * @mask: Mask of permissions that should be restored + * + * Restore the permissions of the named file. If @name is a directory the + * permissions of its parent will be used. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn, *parent; + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + iattr.ia_mode = rft->mode & mask; + } + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + parent = kernfs_get_parent(kn); + if (parent) { + iattr.ia_mode |= parent->mode; + kernfs_put(parent); + } + iattr.ia_mode |= S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode |= S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode |= S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +static int rdtgroup_mkdir_info_resdir(void *priv, char *name, + unsigned long fflags) +{ + struct kernfs_node *kn_subdir; + int ret; + + kn_subdir = kernfs_create_dir(kn_info, name, + kn_info->mode, priv); + if (IS_ERR(kn_subdir)) + return PTR_ERR(kn_subdir); + + ret = rdtgroup_kn_set_ugid(kn_subdir); + if (ret) + return ret; + + ret = rdtgroup_add_files(kn_subdir, fflags); + if (!ret) + kernfs_activate(kn_subdir); + + return ret; +} + +static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) +{ + enum resctrl_res_level i; + struct resctrl_schema *s; + struct rdt_resource *r; + unsigned long fflags; + char name[32]; + int ret; + + /* create the directory */ + kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); + if (IS_ERR(kn_info)) + return PTR_ERR(kn_info); + + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); + if (ret) + goto out_destroy; + + /* loop over enabled controls, these are all alloc_capable */ + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + fflags = r->fflags | RFTYPE_CTRL_INFO; + ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); + if (ret) + goto out_destroy; + } + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; + sprintf(name, "%s_MON", r->name); + ret = rdtgroup_mkdir_info_resdir(r, name, fflags); + if (ret) + goto out_destroy; + } + + ret = rdtgroup_kn_set_ugid(kn_info); + if (ret) + goto out_destroy; + + kernfs_activate(kn_info); + + return 0; + +out_destroy: + kernfs_remove(kn_info); + return ret; +} + +static int +mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, + char *name, struct kernfs_node **dest_kn) +{ + struct kernfs_node *kn; + int ret; + + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + if (dest_kn) + *dest_kn = kn; + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + kernfs_activate(kn); + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +static inline bool is_mba_linear(void) +{ + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; +} + +static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 num_closid = resctrl_arch_get_num_closid(r); + int cpu = cpumask_any(&d->cpu_mask); + int i; + + d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), + GFP_KERNEL, cpu_to_node(cpu)); + if (!d->mbps_val) + return -ENOMEM; + + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + + return 0; +} + +static void mba_sc_domain_destroy(struct rdt_resource *r, + struct rdt_domain *d) +{ + kfree(d->mbps_val); + d->mbps_val = NULL; +} + +/* + * MBA software controller is supported only if + * MBM is supported and MBA is in linear scale. + */ +static bool supports_mba_mbps(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + return (resctrl_arch_is_mbm_local_enabled() && + r->alloc_capable && is_mba_linear()); +} + +/* + * Enable or disable the MBA software controller + * which helps user specify bandwidth in MBps. + */ +static int set_mba_sc(bool mba_sc) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rdt_domain *d; + int i; + + if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) + return -EINVAL; + + r->membw.mba_sc = mba_sc; + + list_for_each_entry(d, &r->domains, list) { + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + } + + return 0; +} + +/* + * We don't allow rdtgroup directories to be created anywhere + * except the root directory. Thus when looking for the rdtgroup + * structure for a kernfs node we are either looking at a directory, + * in which case the rdtgroup structure is pointed at by the "priv" + * field, otherwise we have a file, and need only look to the parent + * to find the rdtgroup. + */ +static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) +{ + if (kernfs_type(kn) == KERNFS_DIR) { + /* + * All the resource directories use "kn->priv" + * to point to the "struct rdtgroup" for the + * resource. "info" and its subdirectories don't + * have rdtgroup structures, so return NULL here. + */ + if (kn == kn_info || kn->parent == kn_info) + return NULL; + else + return kn->priv; + } else { + return kn->parent->priv; + } +} + +static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + atomic_inc(&rdtgrp->waitcount); + kernfs_break_active_protection(kn); +} + +static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + kernfs_unbreak_active_protection(kn); + rdtgroup_remove(rdtgrp); + } else { + kernfs_unbreak_active_protection(kn); + } +} + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return NULL; + + rdtgroup_kn_get(rdtgrp, kn); + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* Was this group deleted while we waited? */ + if (rdtgrp->flags & RDT_DELETED) + return NULL; + + return rdtgrp; +} + +void rdtgroup_kn_unlock(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return; + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + rdtgroup_kn_put(rdtgrp, kn); +} + +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **mon_data_kn); + +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); + + resctrl_debug = false; +} + +static int rdt_enable_ctx(struct rdt_fs_context *ctx) +{ + int ret = 0; + + if (ctx->enable_cdpl2) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } + + if (ctx->enable_cdpl3) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } + + if (ctx->enable_mba_mbps) { + ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + if (ctx->enable_debug) + resctrl_debug = true; + + return 0; + +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: + return ret; +} + +static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) +{ + struct resctrl_schema *s; + const char *suffix = ""; + int ret, cl; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->res = r; + s->num_closid = resctrl_arch_get_num_closid(r); + if (resctrl_arch_get_cdp_enabled(r->rid)) + s->num_closid /= 2; + + s->conf_type = type; + switch (type) { + case CDP_CODE: + suffix = "CODE"; + break; + case CDP_DATA: + suffix = "DATA"; + break; + case CDP_NONE: + suffix = ""; + break; + } + + ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); + if (ret >= sizeof(s->name)) { + kfree(s); + return -EINVAL; + } + + cl = strlen(s->name); + + /* + * If CDP is supported by this resource, but not enabled, + * include the suffix. This ensures the tabular format of the + * schemata file does not change between mounts of the filesystem. + */ + if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) + cl += 4; + + if (cl > max_name_width) + max_name_width = cl; + + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + + INIT_LIST_HEAD(&s->list); + list_add(&s->list, &resctrl_schema_all); + + return 0; +} + +static int schemata_list_create(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + int ret = 0; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + if (resctrl_arch_get_cdp_enabled(r->rid)) { + ret = schemata_list_add(r, CDP_CODE); + if (ret) + break; + + ret = schemata_list_add(r, CDP_DATA); + } else { + ret = schemata_list_add(r, CDP_NONE); + } + + if (ret) + break; + } + + return ret; +} + +static void schemata_list_destroy(void) +{ + struct resctrl_schema *s, *tmp; + + list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { + list_del(&s->list); + kfree(s); + } +} + +static int rdt_get_tree(struct fs_context *fc) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; + struct rdt_domain *dom; + int ret; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + /* + * resctrl file system can only be mounted once. + */ + if (resctrl_mounted) { + ret = -EBUSY; + goto out; + } + + ret = rdtgroup_setup_root(ctx); + if (ret) + goto out; + + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + + ret = schemata_list_create(); + if (ret) { + schemata_list_destroy(); + goto out_ctx; + } + + closid_init(); + + if (resctrl_arch_mon_capable()) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); + if (ret < 0) + goto out_schemata_free; + + if (resctrl_arch_mon_capable()) { + ret = mongroup_create_dir(rdtgroup_default.kn, + &rdtgroup_default, "mon_groups", + &kn_mongrp); + if (ret < 0) + goto out_info; + + ret = mkdir_mondata_all(rdtgroup_default.kn, + &rdtgroup_default, &kn_mondata); + if (ret < 0) + goto out_mongrp; + rdtgroup_default.mon.mon_data_kn = kn_mondata; + } + + ret = rdt_pseudo_lock_init(); + if (ret) + goto out_mondata; + + ret = kernfs_get_tree(fc); + if (ret < 0) + goto out_psl; + + if (resctrl_arch_alloc_capable()) + resctrl_arch_enable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_enable_mon(); + + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) + resctrl_mounted = true; + + if (resctrl_is_mbm_enabled()) { + list_for_each_entry(dom, &l3->domains, list) + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + goto out; + +out_psl: + rdt_pseudo_lock_release(); +out_mondata: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mondata); +out_mongrp: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mongrp); +out_info: + kernfs_remove(kn_info); +out_schemata_free: + schemata_list_destroy(); +out_ctx: + rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); +out: + rdt_last_cmd_clear(); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +enum rdt_param { + Opt_cdp, + Opt_cdpl2, + Opt_mba_mbps, + Opt_debug, + nr__rdt_params +}; + +static const struct fs_parameter_spec rdt_fs_parameters[] = { + fsparam_flag("cdp", Opt_cdp), + fsparam_flag("cdpl2", Opt_cdpl2), + fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), + {} +}; + +static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + struct fs_parse_result result; + int opt; + + opt = fs_parse(fc, rdt_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_cdp: + ctx->enable_cdpl3 = true; + return 0; + case Opt_cdpl2: + ctx->enable_cdpl2 = true; + return 0; + case Opt_mba_mbps: + if (!supports_mba_mbps()) + return -EINVAL; + ctx->enable_mba_mbps = true; + return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; + } + + return -EINVAL; +} + +static void rdt_fs_context_free(struct fs_context *fc) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + + kernfs_free_fs_context(fc); + kfree(ctx); +} + +static const struct fs_context_operations rdt_fs_context_ops = { + .free = rdt_fs_context_free, + .parse_param = rdt_parse_param, + .get_tree = rdt_get_tree, +}; + +static int rdt_init_fs_context(struct fs_context *fc) +{ + struct rdt_fs_context *ctx; + + ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; + fc->fs_private = &ctx->kfc; + fc->ops = &rdt_fs_context_ops; + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; +} + +/* + * Move tasks from one to the other group. If @from is NULL, then all tasks + * in the systems are moved unconditionally (used for teardown). + * + * If @mask is not NULL the cpus on which moved tasks are running are set + * in that mask so the update smp function call is restricted to affected + * cpus. + */ +static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, + struct cpumask *mask) +{ + struct task_struct *p, *t; + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (!from || is_closid_match(t, from) || + is_rmid_match(t, from)) { + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); + + /* + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * If the task is on a CPU, set the CPU in the mask. + * The detection is inaccurate as tasks might move or + * schedule before the smp function call takes place. + * In such a case the function call is pointless, but + * there is no other side effect. + */ + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) + cpumask_set_cpu(task_cpu(t), mask); + } + } + read_unlock(&tasklist_lock); +} + +static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) +{ + struct rdtgroup *sentry, *stmp; + struct list_head *head; + + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { + free_rmid(sentry->closid, sentry->mon.rmid); + list_del(&sentry->mon.crdtgrp_list); + + if (atomic_read(&sentry->waitcount) != 0) + sentry->flags = RDT_DELETED; + else + rdtgroup_remove(sentry); + } +} + +/* + * Forcibly remove all of subdirectories under root. + */ +static void rmdir_all_sub(void) +{ + struct rdtgroup *rdtgrp, *tmp; + + /* Move all tasks to the default resource group */ + rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); + + list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { + /* Free any child rmids */ + free_all_child_rdtgrp(rdtgrp); + + /* Remove each rdtgroup other than root */ + if (rdtgrp == &rdtgroup_default) + continue; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + + /* + * Give any CPUs back to the default group. We cannot copy + * cpu_online_mask because a CPU might have executed the + * offline callback already, but is still marked online. + */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + kernfs_remove(rdtgrp->kn); + list_del(&rdtgrp->rdtgroup_list); + + if (atomic_read(&rdtgrp->waitcount) != 0) + rdtgrp->flags = RDT_DELETED; + else + rdtgroup_remove(rdtgrp); + } + /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ + update_closid_rmid(cpu_online_mask, &rdtgroup_default); + + kernfs_remove(kn_info); + kernfs_remove(kn_mongrp); + kernfs_remove(kn_mondata); +} + +static void rdt_kill_sb(struct super_block *sb) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_disable_ctx(); + + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + + rmdir_all_sub(); + + /* + * When resctrl is umounted, forcefully cancel delayed works since the + * new mount option may be changed. + */ + list_for_each_entry(d, &l3->domains, list) { + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + } + + rdt_pseudo_lock_release(); + rdtgroup_default.mode = RDT_MODE_SHAREABLE; + schemata_list_destroy(); + rdtgroup_destroy_root(); + if (resctrl_arch_alloc_capable()) + resctrl_arch_disable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_disable_mon(); + resctrl_mounted = false; + kernfs_kill_sb(sb); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +static struct file_system_type rdt_fs_type = { + .name = "resctrl", + .init_fs_context = rdt_init_fs_context, + .parameters = rdt_fs_parameters, + .kill_sb = rdt_kill_sb, +}; + +static int mon_addfile(struct kernfs_node *parent_kn, const char *name, + void *priv) +{ + struct kernfs_node *kn; + int ret = 0; + + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, + &kf_mondata_ops, priv, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return ret; +} + +/* + * Remove all subdirectories of mon_data of ctrl_mon groups + * and monitor groups with given domain id. + */ +static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + unsigned int dom_id) +{ + struct rdtgroup *prgrp, *crgrp; + char name[32]; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + sprintf(name, "mon_%s_%02d", r->name, dom_id); + kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); + + list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) + kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); + } +} + +static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, + struct rdt_domain *d, + struct rdt_resource *r, struct rdtgroup *prgrp) +{ + union mon_data_bits priv; + struct kernfs_node *kn; + struct mon_evt *mevt; + struct rmid_read rr; + char name[32]; + int ret; + + sprintf(name, "mon_%s_%02d", r->name, d->id); + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + if (WARN_ON(list_empty(&r->evt_list))) { + ret = -EPERM; + goto out_destroy; + } + + priv.u.rid = r->rid; + priv.u.domid = d->id; + list_for_each_entry(mevt, &r->evt_list, list) { + priv.u.evtid = mevt->evtid; + ret = mon_addfile(kn, mevt->name, priv.priv); + if (ret) + goto out_destroy; + + if (resctrl_is_mbm_event(mevt->evtid)) + mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); + } + kernfs_activate(kn); + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/* + * Add all subdirectories of mon_data for "ctrl_mon" groups + * and "monitor" groups with given domain id. + */ +static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + struct rdt_domain *d) +{ + struct kernfs_node *parent_kn; + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + parent_kn = prgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, prgrp); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + parent_kn = crgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, crgrp); + } + } +} + +static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, + struct rdt_resource *r, + struct rdtgroup *prgrp) +{ + struct rdt_domain *dom; + int ret; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(dom, &r->domains, list) { + ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); + if (ret) + return ret; + } + + return 0; +} + +/* + * This creates a directory mon_data which contains the monitored data. + * + * mon_data has one directory for each domain which are named + * in the format mon__. For ex: A mon_data + * with L3 domain looks as below: + * ./mon_data: + * mon_L3_00 + * mon_L3_01 + * mon_L3_02 + * ... + * + * Each domain directory has one file per event: + * ./mon_L3_00/: + * llc_occupancy + * + */ +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **dest_kn) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct kernfs_node *kn; + int ret; + + /* + * Create the mon_data directory first. + */ + ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); + if (ret) + return ret; + + if (dest_kn) + *dest_kn = kn; + + /* + * Create the subdirectories for each domain. Note that all events + * in a domain like L3 are grouped into a resource whose domain is L3 + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); + if (ret) + goto out_destroy; + } + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/** + * cbm_ensure_valid - Enforce validity on provided CBM + * @_val: Candidate CBM + * @r: RDT resource to which the CBM belongs + * + * The provided CBM represents all cache portions available for use. This + * may be represented by a bitmap that does not consist of contiguous ones + * and thus be an invalid CBM. + * Here the provided CBM is forced to be a valid CBM by only considering + * the first set of contiguous bits as valid and clearing all bits. + * The intention here is to provide a valid default CBM with which a new + * resource group is initialized. The user can follow this with a + * modification to the CBM if the default does not satisfy the + * requirements. + */ +static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) +{ + unsigned int cbm_len = r->cache.cbm_len; + unsigned long first_bit, zero_bit; + unsigned long val = _val; + + if (!val) + return 0; + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Clear any remaining bits to ensure contiguous region */ + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + return (u32)val; +} + +/* + * Initialize cache resources per RDT domain + * + * Set the RDT domain up to start off with all usable allocations. That is, + * all shareable and unused bits. All-zero CBM is invalid. + */ +static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, + u32 closid) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 used_b = 0, unused_b = 0; + unsigned long tmp_cbm; + enum rdtgrp_mode mode; + u32 peer_ctl, ctrl_val; + int i; + + cfg = &d->staged_config[t]; + cfg->have_new_ctrl = false; + cfg->new_ctrl = r->cache.shareable_bits; + used_b = r->cache.shareable_bits; + for (i = 0; i < closids_supported(); i++) { + if (closid_allocated(i) && i != closid) { + mode = rdtgroup_mode_by_closid(i); + if (mode == RDT_MODE_PSEUDO_LOCKSETUP) + /* + * ctrl values for locksetup aren't relevant + * until the schemata is written, and the mode + * becomes RDT_MODE_PSEUDO_LOCKED. + */ + continue; + /* + * If CDP is active include peer domain's + * usage to ensure there is no overlap + * with an exclusive group. + */ + if (resctrl_arch_get_cdp_enabled(r->rid)) + peer_ctl = resctrl_arch_get_config(r, d, i, + peer_type); + else + peer_ctl = 0; + ctrl_val = resctrl_arch_get_config(r, d, i, + s->conf_type); + used_b |= ctrl_val | peer_ctl; + if (mode == RDT_MODE_SHAREABLE) + cfg->new_ctrl |= ctrl_val | peer_ctl; + } + } + if (d->plr && d->plr->cbm > 0) + used_b |= d->plr->cbm; + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); + unused_b &= BIT_MASK(r->cache.cbm_len) - 1; + cfg->new_ctrl |= unused_b; + /* + * Force the initial CBM to be valid, user can + * modify the CBM based on system availability. + */ + cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); + /* + * Assign the u32 CBM to an unsigned long to ensure that + * bitmap_weight() does not access out-of-bound memory. + */ + tmp_cbm = cfg->new_ctrl; + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); + return -ENOSPC; + } + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Initialize cache resources with default values. + * + * A new RDT group is being created on an allocation capable (CAT) + * supporting system. Set this group up to start off with all usable + * allocations. + * + * If there are no more shareable bits available on any domain then + * the entire allocation will fail. + */ +static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +{ + struct rdt_domain *d; + int ret; + + list_for_each_entry(d, &s->res->domains, list) { + ret = __init_one_rdt_domain(d, s, closid); + if (ret < 0) + return ret; + } + + return 0; +} + +/* Initialize MBA resource with default values. */ +static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) +{ + struct resctrl_staged_config *cfg; + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + if (is_mba_sc(r)) { + d->mbps_val[closid] = MBA_MAX_MBPS; + continue; + } + + cfg = &d->staged_config[CDP_NONE]; + cfg->new_ctrl = r->default_ctrl; + cfg->have_new_ctrl = true; + } +} + +/* Initialize the RDT group's allocations. */ +static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + struct rdt_resource *r; + int ret = 0; + + rdt_staged_configs_clear(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) { + rdtgroup_init_mba(r, rdtgrp->closid); + if (is_mba_sc(r)) + continue; + } else { + ret = rdtgroup_init_cat(s, rdtgrp->closid); + if (ret < 0) + goto out; + } + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Failed to initialize allocations\n"); + goto out; + } + + } + + rdtgrp->mode = RDT_MODE_SHAREABLE; + +out: + rdt_staged_configs_clear(); + return ret; +} + +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!resctrl_arch_mon_capable()) + return 0; + + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (resctrl_arch_mon_capable()) + free_rmid(rgrp->closid, rgrp->mon.rmid); +} + +static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, + const char *name, umode_t mode, + enum rdt_group_type rtype, struct rdtgroup **r) +{ + struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; + struct kernfs_node *kn; + int ret; + + prdtgrp = rdtgroup_kn_lock_live(parent_kn); + if (!prdtgrp) { + ret = -ENODEV; + goto out_unlock; + } + + if (rtype == RDTMON_GROUP && + (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto out_unlock; + } + + /* allocate the rdtgroup. */ + rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); + if (!rdtgrp) { + ret = -ENOSPC; + rdt_last_cmd_puts("Kernel out of memory\n"); + goto out_unlock; + } + *r = rdtgrp; + rdtgrp->mon.parent = prdtgrp; + rdtgrp->type = rtype; + INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); + + /* kernfs creates the directory for rdtgrp */ + kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); + if (IS_ERR(kn)) { + ret = PTR_ERR(kn); + rdt_last_cmd_puts("kernfs create error\n"); + goto out_free_rgrp; + } + rdtgrp->kn = kn; + + /* + * kernfs_remove() will drop the reference count on "kn" which + * will free it. But we still need it to stick around for the + * rdtgroup_kn_unlock(kn) call. Take one extra reference here, + * which will be dropped by kernfs_put() in rdtgroup_remove(). + */ + kernfs_get(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + rdt_last_cmd_puts("kernfs perm error\n"); + goto out_destroy; + } + + if (rtype == RDTCTRL_GROUP) { + files = RFTYPE_BASE | RFTYPE_CTRL; + if (resctrl_arch_mon_capable()) + files |= RFTYPE_MON; + } else { + files = RFTYPE_BASE | RFTYPE_MON; + } + + ret = rdtgroup_add_files(kn, files); + if (ret) { + rdt_last_cmd_puts("kernfs fill error\n"); + goto out_destroy; + } + + /* + * The caller unlocks the parent_kn upon success. + */ + return 0; + +out_destroy: + kernfs_put(rdtgrp->kn); + kernfs_remove(rdtgrp->kn); +out_free_rgrp: + kfree(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) +{ + kernfs_remove(rgrp->kn); + rdtgroup_remove(rgrp); +} + +/* + * Create a monitor group under "mon_groups" directory of a control + * and monitor group(ctrl_mon). This is a resource group + * to monitor a subset of tasks and cpus in its parent ctrl_mon group. + */ +static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp, *prgrp; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); + if (ret) + return ret; + + prgrp = rdtgrp->mon.parent; + rdtgrp->closid = prgrp->closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + + /* + * Add the rdtgrp to the list of rdtgrps the parent + * ctrl_mon group has to track. + */ + list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); + +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * These are rdtgroups created under the root directory. Can be used + * to allocate and monitor resources. + */ +static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp; + struct kernfs_node *kn; + u32 closid; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); + if (ret) + return ret; + + kn = rdtgrp->kn; + ret = closid_alloc(); + if (ret < 0) { + rdt_last_cmd_puts("Out of CLOSIDs\n"); + goto out_common_fail; + } + closid = ret; + ret = 0; + + rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + + ret = rdtgroup_init_alloc(rdtgrp); + if (ret < 0) + goto out_rmid_free; + + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); + + if (resctrl_arch_mon_capable()) { + /* + * Create an empty mon_groups directory to hold the subset + * of tasks and cpus to monitor. + */ + ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + goto out_del_list; + } + } + + goto out_unlock; + +out_del_list: + list_del(&rdtgrp->rdtgroup_list); +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: + closid_free(closid); +out_common_fail: + mkdir_rdt_prepare_clean(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * We allow creating mon groups only with in a directory called "mon_groups" + * which is present in every ctrl_mon group. Check if this is a valid + * "mon_groups" directory. + * + * 1. The directory should be named "mon_groups". + * 2. The mon group itself should "not" be named "mon_groups". + * This makes sure "mon_groups" directory always has a ctrl_mon group + * as parent. + */ +static bool is_mon_groups(struct kernfs_node *kn, const char *name) +{ + return (!strcmp(kn->name, "mon_groups") && + strcmp(name, "mon_groups")); +} + +static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, + umode_t mode) +{ + /* Do not accept '\n' to avoid unparsable situation. */ + if (strchr(name, '\n')) + return -EINVAL; + + /* + * If the parent directory is the root directory and RDT + * allocation is supported, add a control and monitoring + * subdirectory + */ + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) + return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); + + /* + * If RDT monitoring is supported and the parent directory is a valid + * "mon_groups" directory, add a monitoring subdirectory. + */ + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) + return rdtgroup_mkdir_mon(parent_kn, name, mode); + + return -EPERM; +} + +static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the parent group */ + rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); + + /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + rdtgrp->flags = RDT_DELETED; + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + /* + * Remove the rdtgrp from the parent ctrl_mon group's list + */ + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_del(&rdtgrp->mon.crdtgrp_list); + + kernfs_remove(rdtgrp->kn); + + return 0; +} + +static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) +{ + rdtgrp->flags = RDT_DELETED; + list_del(&rdtgrp->rdtgroup_list); + + kernfs_remove(rdtgrp->kn); + return 0; +} + +static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the default group */ + rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); + + /* Give any CPUs back to the default group */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + /* Update per cpu closid and rmid of the moved CPUs first */ + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + closid_free(rdtgrp->closid); + + rdtgroup_ctrl_remove(rdtgrp); + + /* + * Free all the child monitor group rmids. + */ + free_all_child_rdtgrp(rdtgrp); + + return 0; +} + +static int rdtgroup_rmdir(struct kernfs_node *kn) +{ + struct kernfs_node *parent_kn = kn->parent; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret = 0; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + + rdtgrp = rdtgroup_kn_lock_live(kn); + if (!rdtgrp) { + ret = -EPERM; + goto out; + } + + /* + * If the rdtgroup is a ctrl_mon group and parent directory + * is the root directory, remove the ctrl_mon group. + * + * If the rdtgroup is a mon group and parent directory + * is a valid "mon_groups" directory, remove the mon group. + */ + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = rdtgroup_ctrl_remove(rdtgrp); + } else { + ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); + } + } else if (rdtgrp->type == RDTMON_GROUP && + is_mon_groups(parent_kn, kn->name)) { + ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); + } else { + ret = -EPERM; + } + +out: + rdtgroup_kn_unlock(kn); + free_cpumask_var(tmpmask); + return ret; +} + +/** + * mongrp_reparent() - replace parent CTRL_MON group of a MON group + * @rdtgrp: the MON group whose parent should be replaced + * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp + * @cpus: cpumask provided by the caller for use during this call + * + * Replaces the parent CTRL_MON group for a MON group, resulting in all member + * tasks' CLOSID immediately changing to that of the new parent group. + * Monitoring data for the group is unaffected by this operation. + */ +static void mongrp_reparent(struct rdtgroup *rdtgrp, + struct rdtgroup *new_prdtgrp, + cpumask_var_t cpus) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + + WARN_ON(rdtgrp->type != RDTMON_GROUP); + WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); + + /* Nothing to do when simply renaming a MON group. */ + if (prdtgrp == new_prdtgrp) + return; + + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_move_tail(&rdtgrp->mon.crdtgrp_list, + &new_prdtgrp->mon.crdtgrp_list); + + rdtgrp->mon.parent = new_prdtgrp; + rdtgrp->closid = new_prdtgrp->closid; + + /* Propagate updated closid to all tasks in this group. */ + rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); + + update_closid_rmid(cpus, NULL); +} + +static int rdtgroup_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, const char *new_name) +{ + struct rdtgroup *new_prdtgrp; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret; + + rdtgrp = kernfs_to_rdtgroup(kn); + new_prdtgrp = kernfs_to_rdtgroup(new_parent); + if (!rdtgrp || !new_prdtgrp) + return -ENOENT; + + /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ + rdtgroup_kn_get(rdtgrp, kn); + rdtgroup_kn_get(new_prdtgrp, new_parent); + + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + /* + * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if + * either kernfs_node is a file. + */ + if (kernfs_type(kn) != KERNFS_DIR || + kernfs_type(new_parent) != KERNFS_DIR) { + rdt_last_cmd_puts("Source and destination must be directories"); + ret = -EPERM; + goto out; + } + + if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { + ret = -ENOENT; + goto out; + } + + if (rdtgrp->type != RDTMON_GROUP || !kn->parent || + !is_mon_groups(kn->parent, kn->name)) { + rdt_last_cmd_puts("Source must be a MON group\n"); + ret = -EPERM; + goto out; + } + + if (!is_mon_groups(new_parent, new_name)) { + rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); + ret = -EPERM; + goto out; + } + + /* + * If the MON group is monitoring CPUs, the CPUs must be assigned to the + * current parent CTRL_MON group and therefore cannot be assigned to + * the new parent, making the move illegal. + */ + if (!cpumask_empty(&rdtgrp->cpu_mask) && + rdtgrp->mon.parent != new_prdtgrp) { + rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); + ret = -EPERM; + goto out; + } + + /* + * Allocate the cpumask for use in mongrp_reparent() to avoid the + * possibility of failing to allocate it after kernfs_rename() has + * succeeded. + */ + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + /* + * Perform all input validation and allocations needed to ensure + * mongrp_reparent() will succeed before calling kernfs_rename(), + * otherwise it would be necessary to revert this call if + * mongrp_reparent() failed. + */ + ret = kernfs_rename(kn, new_parent, new_name); + if (!ret) + mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); + + free_cpumask_var(tmpmask); + +out: + mutex_unlock(&rdtgroup_mutex); + rdtgroup_kn_put(rdtgrp, kn); + rdtgroup_kn_put(new_prdtgrp, new_parent); + return ret; +} + +static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) +{ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) + seq_puts(seq, ",cdp"); + + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) + seq_puts(seq, ",mba_MBps"); + + if (resctrl_debug) + seq_puts(seq, ",debug"); + + return 0; +} + +static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { + .mkdir = rdtgroup_mkdir, + .rmdir = rdtgroup_rmdir, + .rename = rdtgroup_rename, + .show_options = rdtgroup_show_options, +}; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) +{ + rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, + KERNFS_ROOT_CREATE_DEACTIVATED | + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, + &rdtgroup_default); + if (IS_ERR(rdt_root)) + return PTR_ERR(rdt_root); + + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void rdtgroup_setup_default(void) +{ + mutex_lock(&rdtgroup_mutex); + + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; + rdtgroup_default.type = RDTCTRL_GROUP; + INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); + + list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); + + mutex_unlock(&rdtgroup_mutex); +} + +static void domain_destroy_mon_state(struct rdt_domain *d) +{ + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); +} + +void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + mba_sc_domain_destroy(r, d); + + if (!r->mon_capable) + goto out_unlock; + + /* + * If resctrl is mounted, remove all the + * per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + rmdir_mondata_subdir_allrdtgrp(r, d->id); + + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + /* + * When a package is going down, forcefully + * decrement rmid->ebusy. There is no way to know + * that the L3 was flushed and hence may lead to + * incorrect counts in rare scenarios, but leaving + * the RMID as busy creates RMID leaks if the + * package never comes back. + */ + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + + domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + size_t tsize; + + if (resctrl_arch_is_llc_occupancy_enabled()) { + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); + if (!d->rmid_busy_llc) + return -ENOMEM; + } + if (resctrl_arch_is_mbm_total_enabled()) { + tsize = sizeof(*d->mbm_total); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_total) { + bitmap_free(d->rmid_busy_llc); + return -ENOMEM; + } + } + if (resctrl_arch_is_mbm_local_enabled()) { + tsize = sizeof(*d->mbm_local); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_local) { + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + return -ENOMEM; + } + } + + return 0; +} + +int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + int err = 0; + + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { + /* RDT_RESOURCE_MBA is never mon_capable */ + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } + + if (!r->mon_capable) + goto out_unlock; + + err = domain_setup_mon_state(r, d); + if (err) + goto out_unlock; + + if (resctrl_is_mbm_enabled()) { + INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + if (resctrl_arch_is_llc_occupancy_enabled()) + INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); + + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + mkdir_mondata_subdir_allrdtgrp(r, d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +void resctrl_online_cpu(unsigned int cpu) +{ + mutex_lock(&rdtgroup_mutex); + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); +} + +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdtgroup *rdtgrp; + struct rdt_domain *d; + + mutex_lock(&rdtgroup_mutex); + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } + + if (!l3->mon_capable) + goto out_unlock; + + d = resctrl_get_domain_from_cpu(cpu, l3); + if (d) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +/* + * resctrl_init - resctrl filesystem initialization + * + * Setup resctrl file system including set up root, create mount point, + * register resctrl filesystem, and initialize files under root directory. + * + * Return: 0 on success or -errno + */ +int resctrl_init(void) +{ + int ret = 0; + + seq_buf_init(&last_cmd_status, last_cmd_status_buf, + sizeof(last_cmd_status_buf)); + + rdtgroup_setup_default(); + + thread_throttle_mode_init(); + + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); + if (ret) + return ret; + + ret = register_filesystem(&rdt_fs_type); + if (ret) + goto cleanup_mountpoint; + + /* + * Adding the resctrl debugfs directory here may not be ideal since + * it would let the resctrl debugfs directory appear on the debugfs + * filesystem before the resctrl filesystem is mounted. + * It may also be ok since that would enable debugging of RDT before + * resctrl is mounted. + * The reason why the debugfs directory is created here and not in + * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and + * during the debugfs directory creation also &sb->s_type->i_mutex_key + * (the lockdep class of inode->i_rwsem). Other filesystem + * interactions (eg. SyS_getdents) have the lock ordering: + * &sb->s_type->i_mutex_key --> &mm->mmap_lock + * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex + * is taken, thus creating dependency: + * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause + * issues considering the other two lock dependencies. + * By creating the debugfs directory here we avoid a dependency + * that may cause deadlock (even though file operations cannot + * occur until the filesystem is mounted, but I do not know how to + * tell lockdep that). + */ + debugfs_resctrl = debugfs_create_dir("resctrl", NULL); + + return 0; + +cleanup_mountpoint: + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + return ret; +} + +void resctrl_exit(void) +{ + debugfs_remove_recursive(debugfs_resctrl); + unregister_filesystem(&rdt_fs_type); + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); +} diff --git a/fs/super.c b/fs/super.c index b142e71eb8dfdd88ad025b26cf1d4974f0016b44..f2cea05a0052315d8db2b1abdb1ac7cd4f101fb4 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1544,13 +1544,14 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, EXPORT_SYMBOL_GPL(setup_bdev_super); /** - * get_tree_bdev - Get a superblock based on a single block device + * get_tree_bdev_flags - Get a superblock based on a single block device * @fc: The filesystem context holding the parameters * @fill_super: Helper to initialise a new superblock + * @flags: GET_TREE_BDEV_* flags */ -int get_tree_bdev(struct fs_context *fc, - int (*fill_super)(struct super_block *, - struct fs_context *)) +int get_tree_bdev_flags(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), unsigned int flags) { struct super_block *s; int error = 0; @@ -1561,10 +1562,10 @@ int get_tree_bdev(struct fs_context *fc, error = lookup_bdev(fc->source, &dev); if (error) { - errorf(fc, "%s: Can't lookup blockdev", fc->source); + if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP)) + errorf(fc, "%s: Can't lookup blockdev", fc->source); return error; } - fc->sb_flags |= SB_NOSEC; s = sget_dev(fc, dev); if (IS_ERR(s)) @@ -1600,6 +1601,19 @@ int get_tree_bdev(struct fs_context *fc, fc->root = dget(s->s_root); return 0; } +EXPORT_SYMBOL_GPL(get_tree_bdev_flags); + +/** + * get_tree_bdev - Get a superblock based on a single block device + * @fc: The filesystem context holding the parameters + * @fill_super: Helper to initialise a new superblock + */ +int get_tree_bdev(struct fs_context *fc, + int (*fill_super)(struct super_block *, + struct fs_context *)) +{ + return get_tree_bdev_flags(fc, fill_super, 0); +} EXPORT_SYMBOL(get_tree_bdev); static int test_bdev_super(struct super_block *s, void *data) diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index ed966108da806793f6f6991d9dff67d573709907..b477203de531d7965d409b319121014fdcbcdc17 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -11,6 +11,8 @@ #ifndef __UBIFS_DEBUG_H__ #define __UBIFS_DEBUG_H__ +#include + /* Checking helper functions */ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *priv); @@ -115,6 +117,9 @@ struct ubifs_debug_info { struct dentry *dfs_chk_fs; struct dentry *dfs_tst_rcvry; struct dentry *dfs_ro_error; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3409488d39ba1ccec0d9064bae53b0f4ccf76872..78f31bddff74655d45b12a75f3180bba7e0e8269 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2246,7 +2246,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_umount; } - import_uuid(&sb->s_uuid, c->uuid); + super_set_uuid(sb, c->uuid, sizeof(c->uuid)); mutex_unlock(&c->umount_mutex); return 0; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ebb3ad6b5e7e6bf72f25bd0fad8ce2e16e00862a..eff0e642a52ef238e14e1230aa770e321acdd480 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -428,6 +428,9 @@ struct ubifs_inode { pgoff_t read_in_a_row; int data_len; void *data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1522,6 +1525,15 @@ struct ubifs_info { struct ubifs_debug_info *dbg; struct ubifs_stats_info *stats; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; extern struct list_head ubifs_infos; diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 6360073865dbc3136e3655e1b493a53ca4310dd3..7a38c22787bcd5f548db439244345f34e0cf1565 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -766,6 +766,11 @@ struct xfs_scrub_metadata { # define XFS_XATTR_LIST_MAX 65536 #endif +enum { + XFS_REFLINK_NORMAL = 0, + XFS_REFLINK_PRIMARY = (1 << 0), + XFS_REFLINK_SECONDARY = (1 << 1), +}; /* * ioctl commands that are used by Linux filesystems @@ -840,6 +845,10 @@ struct xfs_scrub_metadata { /* XFS_IOC_GETFSUUID ---------- deprecated 140 */ +#define XFS_IOC_SET_REFLINK_FLAGS _IOW('X', 200, uint32_t) +#define XFS_IOC_GET_REFLINK_FLAGS _IOR('X', 201, uint32_t) +#define XFS_IOC_WAIT_REFLINK_SECONDARY _IOW('X', 202, uint32_t) + #ifndef HAVE_BBMACROS /* * Block I/O parameterization. A basic block (BB) is the lowest size of diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 3b9d43d5c7466259dd4b07c272e607585c1f8375..e4948a481438a21b72cce2ab79ff3a82e533e43b 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -870,9 +870,18 @@ xfs_break_dax_layouts( struct inode *inode, bool *retry) { + struct xfs_inode *ip = XFS_I(inode); struct page *page; - ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL)); + ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); + + /* + * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, users + * can ensure there are no inflight dio operations on these inodes, + * so we can bypass xfs_break_dax_layouts(BREAK_UNMAP) safely. + */ + if (ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) + return 0; page = dax_layout_busy_page(inode->i_mapping); if (!page) @@ -1209,6 +1218,19 @@ xfs_file_remap_range( if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out)) xfs_log_force_inode(dest); + + if (remapped && (src->i_reflink_flags & XFS_REFLINK_PRIMARY)) { + mutex_lock(&mp->m_reflink_opt_lock); + src->i_reflink_opt_ip = dest; + dest->i_reflink_opt_ip = src; + mutex_unlock(&mp->m_reflink_opt_lock); + + if (!xfs_has_rmapbt(mp)) { + set_bit(AS_FSDAX_NORMAP, &VFS_I(src)->i_mapping->flags); + set_bit(AS_FSDAX_NORMAP, &VFS_I(dest)->i_mapping->flags); + } + } + out_unlock: xfs_iunlock2_remapping(src, dest); if (ret) diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c index 9edc1f2bc9399eca73414ffa2f6da89db7f59964..afeeef53a7b026c1aa1436cebbc2cf191884658c 100644 --- a/fs/xfs/xfs_globals.c +++ b/fs/xfs/xfs_globals.c @@ -30,6 +30,7 @@ xfs_param_t xfs_params = { .inherit_nodfrg = { 0, 1, 1 }, .fstrm_timer = { 1, 30*100, 3600*100}, .blockgc_timer = { 1, 300, 3600*24}, + .reflink_inactive_force_log_period = { 0, 5, 1000 }, }; struct xfs_globals xfs_globals = { diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 57a9f23175250a239c656f53cb9574ffa96c3290..6f80df4f6bd97977af48b4c131090cc92f55fdad 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -107,6 +107,8 @@ xfs_inode_alloc( ip->i_diflags2 = mp->m_ino_geo.new_diflags2; ip->i_nblocks = 0; ip->i_forkoff = 0; + ip->i_reflink_flags = 0; + ip->i_reflink_opt_ip = NULL; ip->i_sick = 0; ip->i_checked = 0; INIT_WORK(&ip->i_ioend_work, xfs_end_io); @@ -114,7 +116,7 @@ xfs_inode_alloc( spin_lock_init(&ip->i_ioend_lock); ip->i_next_unlinked = NULLAGINO; ip->i_prev_unlinked = 0; - + INIT_LIST_HEAD(&ip->i_reflink_opt_gclist); return ip; } @@ -385,6 +387,8 @@ xfs_iget_recycle( */ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; + ip->i_reflink_flags = 0; + ip->i_reflink_opt_ip = NULL; xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); inode->i_state = I_NEW; @@ -1824,7 +1828,7 @@ xfs_inodegc_set_reclaimable( * This is the last chance to make changes to an otherwise unreferenced file * before incore reclamation happens. */ -static int +int xfs_inodegc_inactivate( struct xfs_inode *ip) { @@ -1837,6 +1841,40 @@ xfs_inodegc_inactivate( } +void +xfs_inodegc_reflink_opt_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(work, struct xfs_mount, + m_reflink_opt_gcwork); + struct xfs_inode *ip; + + while (1) { + spin_lock(&mp->m_reflink_opt_gclock); + /* + * fg ioctl can handle a specific inode too. In that case, + * we will not see such inode on the list anymore. + */ + if (list_empty(&mp->m_reflink_opt_gclist)) { + spin_unlock(&mp->m_reflink_opt_gclock); + break; + } + ip = list_first_entry(&mp->m_reflink_opt_gclist, + struct xfs_inode, i_reflink_opt_gclist); + /* + * Or we detach it ourselves in the gclock, in that case, + * fg ioctl will hit list_empty (fg ioctl also check + * list_empty under gclock). + */ + list_del_init(&ip->i_reflink_opt_gclist); + spin_unlock(&mp->m_reflink_opt_gclock); + + ASSERT(ip->i_flags & XFS_NEED_INACTIVE); + xfs_iflags_set(ip, XFS_INACTIVATING); + xfs_inodegc_inactivate(ip); + } +} + void xfs_inodegc_worker( struct work_struct *work) @@ -2071,6 +2109,22 @@ xfs_inodegc_queue( unsigned long queue_delay = 1; trace_xfs_inode_set_need_inactive(ip); + + if ((ip->i_reflink_flags & XFS_REFLINK_SECONDARY) && + /* ip->i_reflink_opt_ip won't be changed here since we're the owner */ + READ_ONCE(ip->i_reflink_opt_ip)) { + /* gclist will be attached before marking XFS_NEED_INACTIVE */ + spin_lock(&mp->m_reflink_opt_gclock); + list_add_tail(&ip->i_reflink_opt_gclist, + &mp->m_reflink_opt_gclist); + queue_work(mp->m_inodegc_wq, + &mp->m_reflink_opt_gcwork); + spin_unlock(&mp->m_reflink_opt_gclock); + wake_up_all(&mp->m_reflink_opt_wait); + xfs_iflags_set(ip, XFS_NEED_INACTIVE); + return; + } + spin_lock(&ip->i_flags_lock); ip->i_flags |= XFS_NEED_INACTIVE; spin_unlock(&ip->i_flags_lock); diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 905944dafbe539245bc30fe2df2d8681af0c5a04..6646eb2a7654af8ebbfff7ea83328fa8d5ea6b8f 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -70,10 +70,12 @@ void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); void xfs_inode_set_cowblocks_tag(struct xfs_inode *ip); void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip); +int xfs_inodegc_inactivate(struct xfs_inode *ip); void xfs_blockgc_worker(struct work_struct *work); void xfs_blockgc_stop(struct xfs_mount *mp); void xfs_blockgc_start(struct xfs_mount *mp); +void xfs_inodegc_reflink_opt_worker(struct work_struct *work); void xfs_inodegc_worker(struct work_struct *work); void xfs_inodegc_push(struct xfs_mount *mp); int xfs_inodegc_flush(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 6f7dca1c14c75aa1c37561875572f7a514b535e6..73c8b74d9405eae173718079e3698d5670120985 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1356,6 +1356,8 @@ xfs_itruncate_extents_flags( xfs_fileoff_t first_unmap_block; xfs_filblks_t unmap_len; int error = 0; + bool secondary_inactive = false; + int force_count = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(!atomic_read(&VFS_I(ip)->i_count) || @@ -1386,9 +1388,13 @@ xfs_itruncate_extents_flags( return 0; } + if (!new_size && (ip->i_reflink_flags & XFS_REFLINK_SECONDARY)) + secondary_inactive = true; + unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; while (unmap_len > 0) { ASSERT(tp->t_highest_agno == NULLAGNUMBER); + error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, flags, XFS_ITRUNC_MAX_EXTENTS); if (error) @@ -1398,6 +1404,14 @@ xfs_itruncate_extents_flags( error = xfs_defer_finish(&tp); if (error) goto out; + + if (secondary_inactive) { + if (xfs_reflink_inactive_force_log_period && + ++force_count >= xfs_reflink_inactive_force_log_period) { + xfs_log_force(mp, 0); + force_count = 0; + } + } } if (whichfork == XFS_DATA_FORK) { @@ -1698,6 +1712,33 @@ xfs_inode_needs_inactive( return xfs_can_free_eofblocks(ip); } +STATIC void +xfs_reflink_opt_disconnect( + struct xfs_mount *mp, + struct xfs_inode *ip, + bool unexpected) +{ + bool valid = false; + + if (!(ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | + XFS_REFLINK_SECONDARY))) + return; + + mutex_lock(&mp->m_reflink_opt_lock); + if (ip->i_reflink_opt_ip) { + ip->i_reflink_opt_ip->i_reflink_opt_ip = NULL; + ip->i_reflink_opt_ip = NULL; + valid = true; + } + mutex_unlock(&mp->m_reflink_opt_lock); + if (valid) { + wake_up_all(&mp->m_reflink_opt_wait); + if (unexpected) + xfs_warn(mp, "unexpectedly, inactive reflink file in advance %llu", + ip->i_ino); + } +} + /* * xfs_inactive * @@ -1750,6 +1791,7 @@ xfs_inactive( if (xfs_can_free_eofblocks(ip)) error = xfs_free_eofblocks(ip); + xfs_reflink_opt_disconnect(mp, ip, true); goto out; } @@ -1781,6 +1823,8 @@ xfs_inactive( if (error) goto out; + xfs_reflink_opt_disconnect(mp, ip, false); + /* * If there are attributes associated with the file then blow them away * now. The code calls a routine that recursively deconstructs the diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 0f2999b84e7d96d45f08b14255679424ee9bcc3f..1736c10333aadd61f1ffaf5b5e5367b8f3e19430 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -85,6 +85,15 @@ typedef struct xfs_inode { */ xfs_agino_t i_prev_unlinked; + /* flags for controlling reflink cow behavior */ + uint32_t i_reflink_flags; + /* + * Saved reflink ip for the sake of quick unshare, currently we + * only support one reflink file under flag XFS_REFLINK_PRIMARY + */ + struct xfs_inode *i_reflink_opt_ip; + struct list_head i_reflink_opt_gclist; + /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 32e718043e0e2ee9e28ba13e918b3d3727d4059b..458c79ea81d30c17223712308cc85bc2b316c00a 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1892,6 +1892,93 @@ xfs_fs_eofblocks_from_user( #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) +static bool +xfs_need_wait_reflink_secondary( + struct xfs_mount *mp, + struct xfs_inode *ip) +{ + struct xfs_inode *sip; + + mutex_lock(&mp->m_reflink_opt_lock); + sip = ip->i_reflink_opt_ip; + if (!sip /* pair nolonger valid */ || + (READ_ONCE(sip->i_flags) & XFS_NEED_INACTIVE) /* retry now */) { + mutex_unlock(&mp->m_reflink_opt_lock); + return false; + } + mutex_unlock(&mp->m_reflink_opt_lock); + return true; +} + +int +xfs_wait_reflink_secondary( + struct xfs_mount *mp, + struct xfs_inode *ip, + u32 timeout_sec) +{ + struct xfs_inode *sip; + unsigned long expire = 0; + + if (!(ip->i_reflink_flags & XFS_REFLINK_PRIMARY)) + return -EINVAL; + if (timeout_sec) + expire = jiffies + HZ * timeout_sec; +retry: + mutex_lock(&mp->m_reflink_opt_lock); + sip = ip->i_reflink_opt_ip; + if (!sip) { + mutex_unlock(&mp->m_reflink_opt_lock); + return 0; + } + spin_lock(&sip->i_flags_lock); + /* + * We need to consider if this inode needs to be inactive + * immediately here. + */ + /* already inactivating now by others? */ + if ((sip->i_flags & XFS_INACTIVATING) || + /* the inode isn't reclaimable (active or race). */ + !(sip->i_flags & (XFS_NEED_INACTIVE | XFS_INACTIVATING))) { + spin_unlock(&sip->i_flags_lock); + mutex_unlock(&mp->m_reflink_opt_lock); + if (fatal_signal_pending(current)) + return -EINTR; + if (timeout_sec) { + if (time_after(jiffies, expire)) + return -ETIMEDOUT; + wait_event_killable_timeout(mp->m_reflink_opt_wait, + !xfs_need_wait_reflink_secondary(mp, ip), + HZ * timeout_sec); + } else { + wait_event_killable(mp->m_reflink_opt_wait, + !xfs_need_wait_reflink_secondary(mp, ip)); + } + goto retry; + } + spin_unlock(&sip->i_flags_lock); + + /* + * gcwork is already on the list since XFS_NEED_INACTIVE is + * set afterwards, let's try to drop this from gcwork list. + */ + spin_lock(&mp->m_reflink_opt_gclock); + /* if the bg kworker decides to handle instead, list_empty will be hit */ + if (list_empty(&sip->i_reflink_opt_gclist)) { + spin_unlock(&mp->m_reflink_opt_gclock); + mutex_unlock(&mp->m_reflink_opt_lock); + goto retry; + } + list_del_init(&sip->i_reflink_opt_gclist); + spin_unlock(&mp->m_reflink_opt_gclock); + mutex_unlock(&mp->m_reflink_opt_lock); + + /* XFS_NEED_INACTIVE will be stable here. */ + ASSERT(sip->i_flags & XFS_NEED_INACTIVE); + xfs_iflags_set(sip, XFS_INACTIVATING); + xfs_inodegc_inactivate(sip); + return 0; +} + /* * Note: some of the ioctl's return positive numbers as a * byte count indicating success, such as readlink_by_handle. @@ -2171,6 +2258,48 @@ xfs_file_ioctl( return error; } + case XFS_IOC_SET_REFLINK_FLAGS: { + uint32_t in; + + if (get_user(in, (uint32_t __user *)arg)) + return -EFAULT; + + /* invalid values */ + if ((in & ~(XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) || + (in & (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) == + (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) + return -EINVAL; + + /* clearing all flags is unallowed */ + if (!in) + return -EINVAL; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (!ip->i_reflink_flags) { + ip->i_reflink_flags = in; + } else if (ip->i_reflink_flags != in) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return -EINVAL; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; + } + + case XFS_IOC_GET_REFLINK_FLAGS: { + if (put_user(ip->i_reflink_flags, (uint32_t __user *)arg)) + return -EFAULT; + return 0; + } + + case XFS_IOC_WAIT_REFLINK_SECONDARY: { + u32 timeout_sec; + + if (get_user(timeout_sec, (uint32_t __user *)arg)) + return -EFAULT; + + return xfs_wait_reflink_secondary(mp, ip, timeout_sec); + } + default: return -ENOTTY; } diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h index 38be600b5e1e8391c52f0fdf0db69ff6f498125a..f74bb55133d944687b8db1d760145d491f522ae6 100644 --- a/fs/xfs/xfs_ioctl.h +++ b/fs/xfs/xfs_ioctl.h @@ -69,4 +69,6 @@ int xfs_fsbulkstat_one_fmt(struct xfs_ibulk *breq, const struct xfs_bulkstat *bstat); int xfs_fsinumbers_fmt(struct xfs_ibulk *breq, const struct xfs_inumbers *igrp); +int xfs_wait_reflink_secondary(struct xfs_mount *mp, struct xfs_inode *ip, u32 timeout_sec); + #endif diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index e9d317a3dafe4e673c439e504bc7fc24ca503dbe..41828f4e5d7c31ccefcb9297ced6e1a291c87884 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -103,6 +103,7 @@ typedef __u32 xfs_nlink_t; #define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val #define xfs_fstrm_centisecs xfs_params.fstrm_timer.val #define xfs_blockgc_secs xfs_params.blockgc_timer.val +#define xfs_reflink_inactive_force_log_period xfs_params.reflink_inactive_force_log_period.val #define current_cpu() (raw_smp_processor_id()) #define current_set_flags_nested(sp, f) \ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 0a0fd19573d8cea1d253ea412703d82b979b8c34..27141289c9d615f5bb69ca393f9364efe58ae6f9 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -62,7 +62,7 @@ xfs_uuid_mount( int hole, i; /* Publish UUID in struct super_block */ - uuid_copy(&mp->m_super->s_uuid, uuid); + super_set_uuid(mp->m_super, uuid->b, sizeof(*uuid)); if (xfs_has_nouuid(mp)) return 0; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d19cca099bc3a701786c3e5e9871f086ac3f290c..348d2eb9f649826a3e2f888256893b2365e0ac4b 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -250,6 +250,12 @@ typedef struct xfs_mount { /* cpus that have inodes queued for inactivation */ struct cpumask m_inodegc_cpumask; + + struct mutex m_reflink_opt_lock; + spinlock_t m_reflink_opt_gclock; + struct list_head m_reflink_opt_gclist; + struct work_struct m_reflink_opt_gcwork; + struct wait_queue_head m_reflink_opt_wait; } xfs_mount_t; #define M_IGEO(mp) (&(mp)->m_ino_geo) diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index a7daa522e00fe758039b0811bfc9568b28dd1466..30655551dc756bf6db32a64d00c0d6bb3392146b 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -22,6 +22,7 @@ #include #include +#include struct xfs_failure_info { xfs_agblock_t startblock; @@ -173,6 +174,128 @@ xfs_dax_notify_ddev_failure( return error; } +static int +xfs_mf_dax_kill_procs( + struct xfs_mount *mp, + struct address_space *mapping, + pgoff_t pgoff, + unsigned long nrpages, + int mf_flags, + bool share) +{ + int rc, rc2 = 0; + + if (share) { + struct xfs_inode *ip = XFS_I(mapping->host); + + mutex_lock(&mp->m_reflink_opt_lock); + if (ip->i_reflink_opt_ip) { + rc2 = mf_dax_kill_procs(VFS_I(ip->i_reflink_opt_ip)->i_mapping, + pgoff, nrpages, mf_flags); + } else { + xfs_warn(mp, "this mode should be only used with REFLINK_PRIMARY|REFLINK_SECONDARY @ ino %llu", + ip->i_ino); + } + mutex_unlock(&mp->m_reflink_opt_lock); + } + rc = mf_dax_kill_procs(mapping, pgoff, nrpages, mf_flags); + iput(mapping->host); + return rc ? rc : rc2; +} + +static int +xfs_dax_notify_ddev_failure2( + struct dax_device *dax_dev, + struct xfs_mount *mp, + loff_t pos, + size_t size, + int mf_flags) +{ + struct address_space *lmapping = NULL; + bool lshare = false; + pfn_t pfn; + pgoff_t pgoff, lpgoff; + unsigned long nrpages; + long length; + int rc, id; + + rc = bdev_dax_pgoff(mp->m_ddev_targp->bt_bdev, pos >> SECTOR_SHIFT, + size, &pgoff); + if (rc) + return rc; + id = dax_read_lock(); + length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, + NULL, &pfn); + if (length < 0) { + rc = length; + goto out; + } + + if (PFN_PHYS(length) < size) { + rc = -EINVAL; + goto out; + } + rc = 0; + while (length) { + struct page *page; + struct address_space *mapping; + bool share = false; + + page = pfn_t_to_page(pfn); + pfn.val++; + --length; + +retry: + rcu_read_lock(); + mapping = page ? READ_ONCE(page->mapping) : NULL; + if (mapping) { + share = (unsigned long)mapping & PAGE_MAPPING_DAX_SHARED; + mapping = (void *)((unsigned long)mapping & ~PAGE_MAPPING_DAX_SHARED); + if (!igrab(mapping->host)) { + rcu_read_unlock(); + goto retry; + } + /* paired with smp_mb() in dax_page_share_get() to ensure valid index */ + smp_mb(); + if (!share) { + pgoff = READ_ONCE(page->index); + } else { + WARN_ON(!test_bit(AS_FSDAX_NORMAP, &mapping->flags)); + pgoff = READ_ONCE(page->private); + } + } + rcu_read_unlock(); + + if (lmapping) { + if (mapping != lmapping || share != lshare || + lpgoff + nrpages != pgoff) { + rc = xfs_mf_dax_kill_procs(mp, lmapping, lpgoff, + nrpages, mf_flags, lshare); + if (rc) + break; + } else { + nrpages++; + continue; + } + } + lmapping = mapping; + lpgoff = pgoff; + lshare = share; + nrpages = 1; + } + + if (lmapping) { + int rc2; + + rc2 = xfs_mf_dax_kill_procs(mp, lmapping, lpgoff, nrpages, mf_flags, lshare); + if (!rc) + rc = rc2; + } +out: + dax_read_unlock(id); + return rc; +} + static int xfs_dax_notify_failure( struct dax_device *dax_dev, @@ -202,11 +325,6 @@ xfs_dax_notify_failure( return -EFSCORRUPTED; } - if (!xfs_has_rmapbt(mp)) { - xfs_debug(mp, "notify_failure() needs rmapbt enabled!"); - return -EOPNOTSUPP; - } - ddev_start = mp->m_ddev_targp->bt_dax_part_off; ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1; @@ -226,6 +344,9 @@ xfs_dax_notify_failure( if (offset + len - 1 > ddev_end) len = ddev_end - offset + 1; + if (!xfs_has_rmapbt(mp)) + return xfs_dax_notify_ddev_failure2(dax_dev, mp, offset, len, + mf_flags); return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len), mf_flags); } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 3431d0d8b6f3a1ed9deba7f741b3913f5cc9dd65..387322536ff336934dd63c51ece1325bb0f1f9d8 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -27,6 +27,8 @@ #include "xfs_quota.h" #include "xfs_reflink.h" #include "xfs_iomap.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" @@ -500,6 +502,139 @@ xfs_reflink_fill_delalloc( return error; } +#ifdef CONFIG_FS_DAX +STATIC int +xfs_reflink_unshare_range( + struct xfs_inode *src, + struct xfs_bmbt_irec *oimap, + bool *secondary_evicting) +{ + struct xfs_mount *mp = src->i_mount; + struct xfs_inode *ip; + xfs_fileoff_t offset_fsb = oimap->br_startoff; + xfs_filblks_t count_fsb = oimap->br_blockcount; + struct xfs_trans *tp; + int nimaps, error = 0; + bool shared, found; + xfs_filblks_t resaligned; + xfs_extlen_t resblks = 0; + uint lockmode = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + struct xfs_bmbt_irec imap = *oimap; + struct xfs_bmbt_irec cmap; + + mutex_lock(&mp->m_reflink_opt_lock); + ip = src->i_reflink_opt_ip; + if (!ip || !igrab(VFS_I(ip))) { + mutex_unlock(&mp->m_reflink_opt_lock); + *secondary_evicting = true; + return 0; + } + mutex_unlock(&mp->m_reflink_opt_lock); + + xfs_ilock(ip, lockmode); + xfs_flush_unmap_range(ip, XFS_FSB_TO_B(mp, imap.br_startoff), + XFS_FSB_TO_B(mp, imap.br_blockcount)); + + error = xfs_find_trim_cow_extent(ip, &imap, &cmap, &shared, &found); + if (error || !shared) + goto error; + + if (found) + goto convert; + + resaligned = xfs_aligned_fsb_count(imap.br_startoff, + imap.br_blockcount, xfs_get_cowextsz_hint(ip)); + resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); + if (error) { + lockmode = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + goto error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + error = xfs_qm_dqattach_locked(ip, false); + if (error) + goto out_trans_cancel; + + /* + * Check for an overlapping extent again now that we dropped the ilock. + */ + error = xfs_find_trim_cow_extent(ip, &imap, &cmap, &shared, &found); + if (error || !shared) + goto out_trans_cancel; + if (found) { + xfs_trans_cancel(tp); + goto convert; + } + + error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_trans_cancel; + + xfs_trans_ijoin(tp, ip, 0); + + /* Allocate the entire reservation as zeroed blocks. */ + nimaps = 1; + error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount, + XFS_BMAPI_COWFORK | XFS_BMAPI_ZERO, resblks, &cmap, &nimaps); + if (error) + goto out_trans_cancel; + + xfs_inode_set_cowblocks_tag(ip); + error = xfs_trans_commit(tp); + if (error) + goto error; + + /* + * Allocation succeeded but the requested range was not even partially + * satisfied? Bail out! + */ + if (nimaps == 0) { + error = -ENOSPC; + goto error; + } +convert: + xfs_trim_extent(&cmap, offset_fsb, count_fsb); + trace_xfs_reflink_convert_cow(ip, &cmap); + error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); + if (error) + goto error; + cmap.br_state = XFS_EXT_NORM; + dax_copy_range(xfs_inode_buftarg(ip)->bt_bdev, + xfs_inode_buftarg(ip)->bt_daxdev, + BBTOB(xfs_fsb_to_db(ip, oimap->br_startblock)), + BBTOB(xfs_fsb_to_db(ip, cmap.br_startblock)), + XFS_FSB_TO_B(mp, cmap.br_blockcount)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_reflink_end_cow(ip, XFS_FSB_TO_B(mp, cmap.br_startoff), + XFS_FSB_TO_B(mp, cmap.br_blockcount)); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); + xfs_irele(ip); + return error; + +out_trans_cancel: + xfs_trans_cancel(tp); + +error: + xfs_iunlock(ip, lockmode); + xfs_irele(ip); + return error; +} +#else +STATIC int +xfs_reflink_unshare_range( + struct xfs_inode *src, + struct xfs_bmbt_irec *oimap, + bool *secondary_evicting) +{ + return 0; +} +#endif + /* Allocate all CoW reservations covering a range of blocks in a file. */ int xfs_reflink_allocate_cow( @@ -512,6 +647,7 @@ xfs_reflink_allocate_cow( { int error; bool found; + bool secondary_evicting = false; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (!ip->i_cowfp) { @@ -528,6 +664,26 @@ xfs_reflink_allocate_cow( return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); + if (ip->i_reflink_flags & XFS_REFLINK_PRIMARY) { + xfs_iunlock(ip, *lockmode); + error = xfs_reflink_unshare_range(ip, imap, + &secondary_evicting); + xfs_ilock(ip, *lockmode); + if (error) { + xfs_warn(ip->i_mount, + "failed to unshare secondary range @ ino %llu", + ip->i_ino); + } else if (secondary_evicting) { + /* + * It's impossible to have another reflink here (racing with + * FICLONE) since ip takes XFS_MMAPLOCK_SHARED lock and FICLONE + * needs XFS_MMAPLOCK_EXEC. + */ + *shared = false; + return 0; + } + } + /* * CoW fork does not have an extent and data extent is shared. * Allocate a real extent in the CoW fork. @@ -1480,6 +1636,27 @@ xfs_reflink_remap_prep( if (IS_DAX(inode_in) != IS_DAX(inode_out)) goto out_unlock; + if (src->i_reflink_flags & XFS_REFLINK_PRIMARY) { + if (!(dest->i_reflink_flags & XFS_REFLINK_SECONDARY)) + goto out_unlock; + if (pos_in != pos_out) + goto out_unlock; + if (src->i_reflink_opt_ip || dest->i_reflink_opt_ip) { + xfs_warn(src->i_mount, + "src(XFS_REFLINK_PRIMARY) and/or dest(XFS_REFLINK_SECONDARY) is already paired with FICLONE"); + goto out_unlock; + } + } + + /* + * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, + * users do not need persistence, so we can apply fast reflink, + * i.e., write protect without flushing dirty. + */ + if (src->i_reflink_flags & (XFS_REFLINK_PRIMARY | + XFS_REFLINK_SECONDARY)) + remap_flags |= REMAP_FILE_FAST_REFLINK; + if (!IS_DAX(inode_in)) ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 13007b6bc9f3378a9dd24820602a829ea10a896f..b6db17213816502c3d27dfa1ee977b09657a543a 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1528,6 +1528,11 @@ xfs_fs_fill_super( #endif sb->s_op = &xfs_super_operations; + spin_lock_init(&mp->m_reflink_opt_gclock); + INIT_LIST_HEAD(&mp->m_reflink_opt_gclist); + INIT_WORK(&mp->m_reflink_opt_gcwork, xfs_inodegc_reflink_opt_worker); + init_waitqueue_head(&mp->m_reflink_opt_wait); + /* * Delay mount work if the debug hook is set. This is debug * instrumention to coordinate simulation of xfs mount failures with @@ -2002,6 +2007,8 @@ static int xfs_init_fs_context( INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); spin_lock_init(&mp->m_perag_lock); mutex_init(&mp->m_growlock); + mutex_init(&mp->m_reflink_opt_lock); + INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); mp->m_kobj.kobject.kset = xfs_kset; diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index fade337353931663ec17ff72a47d74b9053c901c..6b93b230166cbe08202f19d3b83bfb670d392af9 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -113,6 +113,15 @@ static struct ctl_table xfs_table[] = { .extra1 = &xfs_params.syncd_timer.min, .extra2 = &xfs_params.syncd_timer.max }, + { + .procname = "reflink_inactive_force_log_period", + .data = &xfs_params.reflink_inactive_force_log_period.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.reflink_inactive_force_log_period.min, + .extra2 = &xfs_params.reflink_inactive_force_log_period.max + }, { .procname = "inherit_sync", .data = &xfs_params.inherit_sync.val, diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h index f78ad6b10ea58dedb0f93773abfc5f156de08f85..726eb447bb49ff4221280f5630cab288a933b5d2 100644 --- a/fs/xfs/xfs_sysctl.h +++ b/fs/xfs/xfs_sysctl.h @@ -36,6 +36,7 @@ typedef struct xfs_param { xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ xfs_sysctl_val_t blockgc_timer; /* Interval between blockgc scans */ + xfs_sysctl_val_t reflink_inactive_force_log_period; } xfs_param_t; /* diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index d9c20ae23b63267738b11d601b1114eb546aa44e..a867baf7beab0dc28240e256c096e1f0c6486052 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -282,6 +282,8 @@ struct acpi_device_power { struct acpi_device_power_flags flags; struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ u8 state_for_enumeration; /* Deepest power state for enumeration */ + + CK_KABI_RESERVE(1) }; struct acpi_dep_data { diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 3751ae69432f1285533a61db8e0c31d42ed5fa31..8104c262bbae9b6d517841827b127ce22c584861 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -897,7 +897,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_APLIC = 26, ACPI_MADT_TYPE_PLIC = 27, ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */ - ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_OEM_RESERVED = 0x80, /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/acpi/apei.h b/include/acpi/apei.h index dc60f7db5524f2054024f750ad878fe236f20388..808cfa7d16b11f9487be14d6d39d98d5be55612f 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -52,6 +52,8 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err); +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err); #endif #endif diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index be1dd4c1a917447c1d20d59ef0de7df423758e1d..82eba57ac423a0ee6822d085b9d0cc7509d72c69 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -133,4 +133,104 @@ static inline int ghes_notify_sea(void) { return -ENOENT; } struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +#pragma pack(1) +struct yitian_raw_data_header { + uint32_t signature; /* 'r' 'a' 'w' 'd' */ + uint8_t type; + uint8_t common_reg_nr; + /* one record may have multiple sub-record (up to 6) */ + uint8_t sub_type[6]; +}; + +struct yitian_ras_common_reg { + uint64_t fr; + uint64_t ctrl; + uint64_t status; + uint64_t addr; + uint64_t misc0; + uint64_t misc1; + uint64_t misc2; + uint64_t misc3; +}; + +enum yitian_ras_type { + ERR_TYPE_GENERIC = 0x40, + ERR_TYPE_CORE = 0x41, + ERR_TYPE_GIC = 0x42, + ERR_TYPE_CMN = 0x43, + ERR_TYPE_SMMU = 0x44, + ERR_TYPE_DDR = 0x50, + ERR_TYPE_PCI = 0x60 +}; + +enum cmn_node_type { + NODE_TYPE_DVM = 0x1, + NODE_TYPE_CFG = 0x2, + NODE_TYPE_DTC = 0x3, + NODE_TYPE_HN_I = 0x4, + NODE_TYPE_HN_F = 0x5, + NODE_TYPE_XP = 0x6, + NODE_TYPE_SBSX = 0x7, + NODE_TYPE_MPAM_S = 0x8, + NODE_TYPE_MPAM_NS = 0x9, + NODE_TYPE_RN_I = 0xA, + NODE_TYPE_RN_D = 0xD, + NODE_TYPE_RN_SAM = 0xF, + NODE_TYPE_HN_P = 0x11, + /* Coherent Multichip Link (CML) node types */ + NODE_TYPE_CML_BASE = 0x100, + NODE_TYPE_CXRA = 0x100, + NODE_TYPE_CXHA = 0x101, + NODE_TYPE_CXLA = 0x102, + NODE_TYPE_CCRA = 0x103, + NODE_TYPE_CCHA = 0x104, + NODE_TYPE_CCLA = 0x105, +}; + +struct yitian_ddr_sys_reg { + uint64_t esr; + uint64_t elr; + uint64_t far; + uint64_t scr; + uint64_t sctlr; + uint64_t lr; +}; + +struct yitian_ddr_ecc_data { + uint32_t eccerrcnt; + uint32_t eccstat; + uint32_t adveccstat; + uint32_t eccsymbol; + uint32_t eccerrcntstat; + uint32_t eccerrcnt0; + uint32_t eccerrcnt1; + uint32_t ecccaddr0; + uint32_t ecccaddr1; + uint32_t ecccdata0; + uint32_t ecccdata1; + uint32_t eccuaddr0; + uint32_t eccuaddr1; + uint32_t eccudata0; + uint32_t eccudata1; +}; + +struct yitian_ddr_raw_data { + uint32_t intr; /* interrupt num, valid for interrupt only, for exception intr=0 */ + uint8_t ex_type; /* 1:sync exception 2:interrupt 3:Serror */ + uint8_t el_nr; /* error el, only valid for ex_type==1, 0:el0 1:el1 2:el2 */ + uint8_t err_type; /* 1:ecc 2:CA parity 3:R/W CRC */ + struct yitian_ddr_sys_reg sys_regs; /* Only valid for ex_type==1 */ + struct yitian_ddr_ecc_data ecc_data; /* Only valid for err_type==1 */ +}; + +#pragma pack() + +#define yitian_estatus_for_each_raw_reg_common(header, reg, nr) \ + for (reg = (struct yitian_ras_common_reg *)(header + 1); \ + nr < header->common_reg_nr; \ + reg++, nr++) +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + #endif /* GHES_H */ diff --git a/include/acpi/pdc_sw64.h b/include/acpi/pdc_sw64.h new file mode 100644 index 0000000000000000000000000000000000000000..4724f10e8c6af2f6c1f9553e00b0d3880fb5b661 --- /dev/null +++ b/include/acpi/pdc_sw64.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_PDC_SW64_H +#define _ASM_PDC_SW64_H + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* _ASM_PDC_SW64_H */ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 941be574bbe000ac801be295e53de00bf82d56ec..ca2be8eaba5eaf28d1d9592f983e1e0a1a182c2f 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -11,6 +11,7 @@ mandatory-y += bitops.h mandatory-y += bug.h mandatory-y += bugs.h mandatory-y += cacheflush.h +mandatory-y += cfi.h mandatory-y += checksum.h mandatory-y += compat.h mandatory-y += current.h diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h new file mode 100644 index 0000000000000000000000000000000000000000..41fac3537bf94dd863e61fbef3f8f75f13b382c4 --- /dev/null +++ b/include/asm-generic/cfi.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CFI_H +#define __ASM_GENERIC_CFI_H + +#endif /* __ASM_GENERIC_CFI_H */ diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97f4c1b9cd717253d342c7ed2cd57e..be1ce406f48189c47eef7192bfd9f82e1ee185e1 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -32,12 +32,6 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); -/* - * Early copy from unmapped memory to kernel mapped memory. - */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, - unsigned long size); - #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h index 10cd4ffc6ba29563c1f5813c587db69164b23189..f933d99c63e0c73768e3efa82e79e398919203d9 100644 --- a/include/asm-generic/mcs_spinlock.h +++ b/include/asm-generic/mcs_spinlock.h @@ -4,8 +4,8 @@ /* * Architectures can define their own: * - * arch_mcs_spin_lock_contended(l) - * arch_mcs_spin_unlock_contended(l) + * arch_mcs_spin_wait(l) + * arch_mcs_lock_handoff(l, val) * * See kernel/locking/mcs_spinlock.c. */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849398f3c0bae8d60104c7ba3849bd0..f7413f68f8d596f11d5ecee184621d8352f6b1e7 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -4,7 +4,7 @@ #ifdef CONFIG_MMU -#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO | __GFP_NOKFENCE) #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h index c86abf6bc7ba27e10de5292232f37c3ca1b589c0..caad5193913c3a9d5449608bb2a422622d3c1ca1 100644 --- a/include/asm-generic/set_memory.h +++ b/include/asm-generic/set_memory.h @@ -9,5 +9,5 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); - +int set_memory_np(unsigned long addr, int numpages); #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 129a3a759976598efe88f390847565c7027cecd5..709830274b7565187bed9a427f128d9e94acb3a1 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -69,6 +69,7 @@ * * - tlb_remove_page() / __tlb_remove_page() * - tlb_remove_page_size() / __tlb_remove_page_size() + * - __tlb_remove_folio_pages() * * __tlb_remove_page_size() is the basic primitive that queues a page for * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a @@ -78,6 +79,11 @@ * tlb_remove_page() and tlb_remove_page_size() imply the call to * tlb_flush_mmu() when required and has no return value. * + * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however, + * instead of removing a single page, remove the given number of consecutive + * pages that are all part of the same (large) folio: just like calling + * __tlb_remove_page() on each page individually. + * * - tlb_change_page_size() * * call before __tlb_remove_page*() to set the current page-size; implies a @@ -260,9 +266,10 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + bool delay_rmap, int page_size); +bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, + unsigned int nr_pages, bool delay_rmap); #ifdef CONFIG_SMP /* @@ -462,13 +469,14 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size)) + if (__tlb_remove_page_size(tlb, page, false, page_size)) tlb_flush_mmu(tlb); } -static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags) +static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, + struct page *page, bool delay_rmap) { - return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE); + return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE); } /* tlb_remove_page @@ -592,7 +600,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, } #ifndef __tlb_remove_tlb_entry -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) +{ +} #endif /** @@ -608,6 +618,26 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) +/** + * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for + * later tlb invalidation. + * + * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple + * consecutive ptes instead of only a single one. + */ +static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb, + pte_t *ptep, unsigned int nr, unsigned long address) +{ + tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr); + for (;;) { + __tlb_remove_tlb_entry(tlb, ptep, address); + if (--nr == 0) + break; + ptep++; + address += PAGE_SIZE; + } +} + #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 9a022caacf9361a322f84ea6e838541556bfc804..5eb7e205a9bcf96034c384019fe336d160122c76 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -239,6 +239,8 @@ struct drm_private_state_funcs { */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_private_state *state); + + CK_KABI_RESERVE(1) }; /** @@ -338,6 +340,8 @@ struct drm_private_state { * @obj: backpointer to the private object */ struct drm_private_obj *obj; + + CK_KABI_RESERVE(1) }; struct __drm_private_objs_state { diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index 08e0e3ffad1319d8a32a21b37e1b070ae0a9acaa..667fb0368ef6d5a6a9d987636a72e77aeca8de3a 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -74,7 +74,7 @@ static inline bool drm_arch_can_wc_memory(void) * cache coherency machanism. This means WUC can only used for write-only * memory regions. */ - return false; + return wc_enabled; #else return true; #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index c0a14b40c039fcff90848d5f3d068452ed070d3a..d3b9612b37da763b525e43e691ccbe3b49301a24 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -153,6 +153,8 @@ struct drm_client_buffer { * @fb: DRM framebuffer */ struct drm_framebuffer *fb; + + CK_KABI_RESERVE(1) }; struct drm_client_buffer * diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index d300fde6c1a47a0330dc17d17725c26fa206235d..baa830c768a303088d3bfe9da5d12d8261afff44 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -321,6 +321,9 @@ struct drm_hdmi_info { /** @dsc_cap: DSC capabilities of the sink */ struct drm_hdmi_dsc_cap dsc_cap; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -383,6 +386,8 @@ enum drm_panel_orientation { struct drm_monitor_range_info { u16 min_vfreq; u16 max_vfreq; + + CK_KABI_RESERVE(1) }; /** @@ -816,6 +821,9 @@ struct drm_display_info { * @quirks: EDID based quirks. Internal to EDID parsing. */ u32 quirks; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -1335,6 +1343,9 @@ struct drm_connector_funcs { * Allows connectors to create connector-specific debugfs files. */ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1876,6 +1887,19 @@ struct drm_connector { /** @hdr_sink_metadata: HDR Metadata Information read from sink */ struct hdr_sink_metadata hdr_sink_metadata; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 8b48a1974da3143c7de176e6fe3e01da9c8fc9d8..6fcb4893a5217b67701ab197402174ed142009bf 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -385,6 +385,8 @@ struct drm_crtc_state { /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; + + CK_KABI_RESERVE(1) }; /** @@ -1175,6 +1177,8 @@ struct drm_crtc { * Initialized via drm_self_refresh_helper_init(). */ struct drm_self_refresh_data *self_refresh_data; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 159213786e6e1c549587402b4fd469f4b71d6fcc..190ce62d5dc0e3322690dec42c71edda0a70f134 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1509,6 +1509,8 @@ struct drm_mode_config_helper_funcs { * This hook is optional. */ int (*atomic_commit_setup)(struct drm_atomic_state *state); + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index ec4f543c3d9503d2fa5ac5905e1c5f508355a941..b65d1ff10884ff43398ba119074ae8639b205ff0 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -72,6 +72,8 @@ struct drm_modeset_acquire_ctx { /* Perform interruptible waits on this context. */ bool interruptible; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index fef775200a81fac8e7d51789f0f2c88067275177..e1ad7ee742dcb79c3d6a7d137ea00e0593c1ccd1 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -758,6 +758,8 @@ struct drm_plane { * scaling. */ struct drm_property *scaling_filter_property; + + CK_KABI_RESERVE(1) }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h index 17e576c80169a820e8d5587b229b2cc2ee369a18..da540bf80762b95478f24b809e26826d19632882 100644 --- a/include/drm/drm_writeback.h +++ b/include/drm/drm_writeback.h @@ -84,6 +84,19 @@ struct drm_writeback_connector { * The name of the connector's fence timeline. */ char timeline_name[32]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; /** diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 78a226eba953cc7d58b0e4e33015d33ede086150..279733a10c56f585b68635c03b74de01c1df048e 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -174,6 +174,9 @@ struct ttm_resource_manager { * bdev->lru_lock. */ uint64_t usage; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1b76d2f83eac6a05058fabec7b913e3fdb644980..487fc3f49e6e335cee8a7cedd13e8165763ef6e0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -259,7 +259,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -1495,6 +1495,11 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +int find_acpi_cache_level_from_id(u32 cache_id); +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1516,6 +1521,26 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cache_level_from_id(u32 cache_id) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, + cpumask_t *cpus) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, + cpumask_t *cpus) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, + u32 cache_level, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 @@ -1548,4 +1573,8 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache); + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h new file mode 100644 index 0000000000000000000000000000000000000000..6607764919419e7ab5dcbf6680e19da7560919cc --- /dev/null +++ b/include/linux/arm_mpam.h @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef __LINUX_ARM_MPAM_H +#define __LINUX_ARM_MPAM_H + +#include +#include +#include + +/* + * The value of the MPAM1_EL1 sysreg when a task is in the default group. + * This is used by the context switch code to use the resctrl CPU property + * instead. The value is modified when CDP is enabled/disabled by mounting + * the resctrl filesystem. + */ +extern u64 mpam_resctrl_default_group; + +#include + +struct mpam_msc; + +enum mpam_msc_iface { + MPAM_IFACE_MMIO, /* a real MPAM MSC */ + MPAM_IFACE_PCC, /* a fake MPAM MSC */ +}; + +enum mpam_class_types { + MPAM_CLASS_CACHE, /* Well known caches, e.g. L2 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ +}; + +enum mpam_machine_type { + MPAM_DEFAULT_MACHINE, + MPAM_YITIAN710, + + MPAM_NUM_MACHINE_TYPES, +}; + +/* Machine identifier which can be used for vendor-specific MPAM features */ +extern enum mpam_machine_type mpam_current_machine; + +#ifdef CONFIG_ACPI_MPAM +/* Parse the ACPI description of resources entries for this MSC. */ +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc); +int acpi_mpam_count_msc(void); +enum mpam_machine_type acpi_mpam_get_machine_type(void); +#else +static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + return -EINVAL; +} +static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +static inline enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + return MPAM_DEFAULT_MACHINE; +} +#endif + +int mpam_register_requestor(u16 partid_max, u8 pmg_max); + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id); + +static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) +{ + return val; +} + +/* MPAM counters requires a monitor to be allocated */ +static inline bool resctrl_arch_event_is_free_running(enum resctrl_event_id evt) +{ + return false; +} + +bool resctrl_arch_alloc_capable(void); +bool resctrl_arch_mon_capable(void); +bool resctrl_arch_is_llc_occupancy_enabled(void); +bool resctrl_arch_is_mbm_local_enabled(void); +bool resctrl_arch_is_mbm_total_enabled(void); +bool resctrl_arch_is_mbm_bps_enabled(void); + +/* reset cached configurations, then all devices */ +void resctrl_arch_reset_resources(void); + +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); +void resctrl_arch_sched_in(struct task_struct *tsk); +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); +u32 resctrl_arch_system_num_rmid_idx(void); + +struct rdt_resource; +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); + +/* Pseudo lock is not supported by MPAM */ +static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } +static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } + +/* + * The CPU configuration for MPAM is cheap to write, and is only written if it + * has changed. No need for fine grained enables. + */ +static inline void resctrl_arch_enable_mon(void) { } +static inline void resctrl_arch_disable_mon(void) { } +static inline void resctrl_arch_enable_alloc(void) { } +static inline void resctrl_arch_disable_alloc(void) { } + +#endif /* __LINUX_ARM_MPAM_H */ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 255701e1251b4ac242456693f998e3940a36851c..28e247dd57731c118f4bdc51a2f1bc6b87aeca30 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -36,6 +36,11 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); +int sdei_api_event_interrupt_bind(int hwirq); +int sdei_api_event_disable(u32 event_num); +int sdei_api_event_enable(u32 event_num); +int sdei_api_clear_eoi(int hwirq); +int sdei_api_set_secure_timer_period(int sec); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 2ad261082bba5f6f0049fa1c642b6ff057f32b5a..edd7b9d621e9f460d0c44b6ea4537127c5017a7a 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -13,6 +13,7 @@ #include #include #include +#include struct page; struct device; @@ -158,6 +159,11 @@ struct bdi_writeback { struct rcu_head rcu; }; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct backing_dev_info { @@ -201,6 +207,11 @@ struct backing_dev_info { #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct wb_lock_cookie { diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 1a97277f99b1b82de9e96eb8b9ca5544f9aa6e3a..81adf07c9637a04512dbb2b7529e0c9de2e9f436 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -159,6 +159,21 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); +extern bool cgwb_v1; + +static inline bool memcg_blkcg_on_dfl(void) +{ + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys); +} + +static inline bool cgroup_writeback_support_v1(void) +{ + return cgwb_v1 && + !cgroup_subsys_on_dfl(memory_cgrp_subsys) && + !cgroup_subsys_on_dfl(io_cgrp_subsys); +} + /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest @@ -174,8 +189,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_subsys_on_dfl(memory_cgrp_subsys) && - cgroup_subsys_on_dfl(io_cgrp_subsys) && + return (memcg_blkcg_on_dfl() || + cgroup_writeback_support_v1()) && (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -318,6 +333,13 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, rcu_read_unlock(); } +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset); +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links); +void free_memcg_blkcg_links(struct list_head *links_to_free); +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css); #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -368,6 +390,30 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +#ifdef CONFIG_CGROUPS +struct cgroup_subsys; + +static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ +} + +static inline int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + return 0; +} + +static inline void free_memcg_blkcg_links(struct list_head *links_to_free) +{ +} + +static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ +} +#endif + #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5ef8adcb05ee9eef0ced6818b76161..636c2b5b1bb95cdfbfc9d657501d2a722fd93ba4 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -62,6 +62,9 @@ struct linux_binprm { struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ char buf[BINPRM_BUF_SIZE]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 diff --git a/include/linux/bio.h b/include/linux/bio.h index 0286bada25ce724b8bcb54a7faf097b7224b03ee..356c5d2b7895d01da8f996e13b5b541eb56b5c36 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -9,8 +9,17 @@ /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include #include +#include +#ifdef CONFIG_THP_SWAP +#if HPAGE_PMD_NR > 256 +#define BIO_MAX_VECS (HPAGE_PMD_NR * 1U) +#else #define BIO_MAX_VECS 256U +#endif +#else +#define BIO_MAX_VECS 256U +#endif struct queue_limits; @@ -242,6 +251,21 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } +static inline bool bio_ext_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_ext_flags & (1U << bit)) != 0; +} + +static inline void bio_set_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags |= (1U << bit); +} + +static inline void bio_clear_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags &= ~(1U << bit); +} + static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); @@ -346,6 +370,10 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ }; @@ -705,6 +733,11 @@ struct bio_set { * Hot un-plug notifier for the per-cpu cache, if used */ struct hlist_node cpuhp_dead; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool bioset_initialized(struct bio_set *bs) diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index f7cc8080672cc2a20e1936fe1910da89c71218ac..a8d2a0f8faf3a8d9df3d0033973ea7eb076f9547 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -33,6 +33,9 @@ struct blk_integrity_profile { integrity_prepare_fn *prepare_fn; integrity_complete_fn *complete_fn; const char *name; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_BLK_DEV_INTEGRITY diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b301e0fa968ed27dd8ae764d47ab68f..991f87788b0529fc804d6d330c953c5faf4736e7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -8,6 +8,7 @@ #include #include #include +#include struct blk_mq_tags; struct blk_flush_queue; @@ -429,6 +430,11 @@ struct blk_mq_hw_ctx { * q->unused_hctx_list. */ struct list_head hctx_list; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -515,6 +521,11 @@ struct blk_mq_tag_set { struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -526,6 +537,8 @@ struct blk_mq_tag_set { struct blk_mq_queue_data { struct request *rq; bool last; + + CK_KABI_RESERVE(1) }; typedef bool (busy_tag_iter_fn)(struct request *, void *); @@ -645,6 +658,11 @@ struct blk_mq_ops { */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -749,6 +767,11 @@ struct blk_mq_tags { * request pool */ spinlock_t lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 92c8997b19381660ee68e1cee0cf730c97f69fc6..d8f5999e4748caaafb058807b19438f72da79875 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -10,6 +10,8 @@ #include #include #include +#include +#include struct bio_set; struct bio; @@ -75,6 +77,11 @@ struct block_device { * path */ struct device bd_device; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #define bdev_whole(_bdev) \ @@ -287,6 +294,12 @@ struct bio { */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; +#ifdef CONFIG_BLK_DEV_THROTTLING + unsigned long long start_time_ns; /* when passed to block throttle */ + unsigned long long io_start_time_ns; /* when no more throttle */ + bio_end_io_t *bi_tg_end_io; + void *bi_tg_private; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST u64 bi_iocost_cost; #endif @@ -316,6 +329,13 @@ struct bio { struct bio_set *bi_pool; + unsigned long bi_ext_flags; /* extend the bi_flags */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -349,6 +369,37 @@ enum { BIO_FLAG_LAST }; +/* + * Extend bio flags should be added in here + */ +#define BIO_THROTL_STATED 0 /* bio already stated */ + +#ifdef CONFIG_BLK_DEV_THROTTLING +static inline void bio_set_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void bio_set_io_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t bio_start_time_ns(struct bio *bio) +{ + return bio->start_time_ns; +} + +static inline uint64_t bio_io_start_time_ns(struct bio *bio) +{ + return bio->io_start_time_ns; +} +#endif + typedef __u32 __bitwise blk_mq_req_flags_t; #define REQ_OP_BITS 8 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ef35e9a9878c6acbc34d0855e05767426a30ad1a..4412c7c644688b3e37ab5a082d3b783eff000d30 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -110,6 +110,9 @@ struct blk_integrity { unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; typedef unsigned int __bitwise blk_mode_t; @@ -207,6 +210,11 @@ struct gendisk { * devices that do not have multiple independent access ranges. */ struct blk_independent_access_ranges *ia_ranges; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool disk_live(struct gendisk *disk) @@ -337,6 +345,11 @@ struct queue_limits { * due to possible offsets. */ unsigned int dma_alignment; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, @@ -388,6 +401,12 @@ struct blk_independent_access_ranges { struct blk_independent_access_range ia_range[]; }; +/* + * default request hang threshold, unit is millisecond. If one request does + * not complete in this threashold time, consider this request as hang. + */ +#define BLK_REQ_HANG_THRESHOLD 5000 + struct request_queue { struct request *last_merge; struct elevator_queue *elevator; @@ -464,6 +483,7 @@ struct request_queue { #endif unsigned int rq_timeout; + unsigned int rq_hang_threshold; struct timer_list timeout; struct work_struct timeout_work; @@ -536,6 +556,11 @@ struct request_queue { struct mutex debugfs_mutex; bool mq_sysfs_init_done; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ @@ -955,6 +980,8 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); +extern void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); @@ -986,6 +1013,11 @@ struct blk_plug { bool has_elevator; struct list_head cb_list; /* md requires an unplug callback */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct blk_plug_cb; @@ -1418,6 +1450,11 @@ struct block_device_operations { * driver. */ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_COMPAT diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780eb733bf4cc2165b6bdbc2fc7f0cb1b..cf06058b7381f7916cc8f437d93e6081338294a4 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -7,6 +7,7 @@ #include #include #include +#include struct bpf_prog_array; @@ -70,6 +71,9 @@ struct cgroup_bpf { /* cgroup_bpf is released using a work queue */ struct work_struct release_work; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #else /* CONFIG_CGROUP_BPF */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 035e627f94f62d6f10668e297b27d67a8b97c08d..66b263c916856918dc81c3bb2a681bade9879837 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -29,6 +29,7 @@ #include #include #include +#include struct bpf_verifier_env; struct bpf_verifier_log; @@ -172,6 +173,11 @@ struct bpf_map_ops { /* bpf_iter info used to open a seq_file */ const struct bpf_iter_seq_info *iter_seq_info; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -300,6 +306,11 @@ struct bpf_map { bool free_after_rcu_gp; atomic64_t sleepable_refcnt; s64 __percpu *elem_count; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline const char *btf_field_type_name(enum btf_field_type type) @@ -918,6 +929,9 @@ struct bpf_insn_access_aux { }; }; struct bpf_verifier_log *log; /* for verbose logs */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline void @@ -1024,6 +1038,9 @@ struct btf_func_model { u8 nr_args; u8 arg_size[MAX_BPF_FUNC_ARGS]; u8 arg_flags[MAX_BPF_FUNC_ARGS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Restore arguments before returning from trampoline to let original function @@ -1061,6 +1078,17 @@ struct btf_func_model { */ #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) +/* + * Indicate the trampoline should be suitable to receive indirect calls; + * without this indirectly calling the generated code can result in #UD/#CP, + * depending on the CFI options. + * + * Used by bpf_struct_ops. + * + * Incompatible with FENTRY usage, overloads @func_addr argument. + */ +#define BPF_TRAMP_F_INDIRECT BIT(8) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. */ @@ -1124,6 +1152,8 @@ struct bpf_ksym { struct list_head lnode; struct latch_tree_node tnode; bool prog; + + CK_KABI_RESERVE(1) }; enum bpf_tramp_prog_type { @@ -1144,6 +1174,8 @@ struct bpf_tramp_image { struct rcu_head rcu; struct work_struct work; }; + + CK_KABI_RESERVE(1) }; struct bpf_trampoline { @@ -1172,6 +1204,9 @@ struct bpf_trampoline { /* Executable image of trampoline */ struct bpf_tramp_image *cur_image; struct module *mod; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct bpf_attach_target_info { @@ -1205,7 +1240,11 @@ struct bpf_dispatcher { #endif }; -static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( +#ifndef __bpfcall +#define __bpfcall __nocfi +#endif + +static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, bpf_func_t bpf_func) @@ -1295,7 +1334,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func #define DEFINE_BPF_DISPATCHER(name) \ __BPF_DISPATCHER_SC(name); \ - noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ + noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ bpf_func_t bpf_func) \ @@ -1427,7 +1466,6 @@ struct bpf_prog_aux { bool offload_requested; /* Program is bound and offloaded to the netdev. */ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ bool func_proto_unreliable; - bool sleepable; bool tail_call_reachable; bool xdp_has_frags; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ @@ -1440,6 +1478,9 @@ struct bpf_prog_aux { struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; +#ifdef CONFIG_FINEIBT + struct bpf_ksym ksym_prefix; +#endif struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; @@ -1488,6 +1529,11 @@ struct bpf_prog_aux { struct work_struct work; struct rcu_head rcu; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct bpf_prog { @@ -1505,7 +1551,8 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ - tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ + tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ + sleepable:1; /* BPF program is sleepable */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -1522,6 +1569,11 @@ struct bpf_prog { DECLARE_FLEX_ARRAY(struct sock_filter, insns); DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct bpf_array_aux { @@ -1566,6 +1618,8 @@ struct bpf_link_ops { struct bpf_link_info *info); int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, struct bpf_map *old_map); + + CK_KABI_RESERVE(1) }; struct bpf_tramp_link { @@ -1657,18 +1711,48 @@ struct bpf_struct_ops { void (*unreg)(void *kdata); int (*update)(void *kdata, void *old_kdata); int (*validate)(void *kdata); - const struct btf_type *type; - const struct btf_type *value_type; + void *cfi_stubs; + struct module *owner; const char *name; struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + + const struct btf_type *type; + const struct btf_type *value_type; u32 type_id; u32 value_id; }; +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT, + BPF_STRUCT_OPS_STATE_INUSE, + BPF_STRUCT_OPS_STATE_TOBEFREE, + BPF_STRUCT_OPS_STATE_READY, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) +/* This macro helps developer to register a struct_ops type and generate + * type information correctly. Developers should use this macro to register + * a struct_ops type instead of calling __register_bpf_struct_ops() directly. + */ +#define register_bpf_struct_ops(st_ops, type) \ + ({ \ + struct bpf_struct_ops_##type { \ + struct bpf_struct_ops_common_value common; \ + struct type data ____cacheline_aligned_in_smp; \ + }; \ + BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \ + __register_bpf_struct_ops(st_ops); \ + }) #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) -const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, @@ -1676,6 +1760,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, struct bpf_tramp_link *link, const struct btf_func_model *model, + void *stub_func, void *image, void *image_end); static inline bool bpf_try_module_get(const void *data, struct module *owner) { @@ -1709,15 +1794,12 @@ struct bpf_dummy_ops { int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); #endif +int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, + struct btf *btf, + struct bpf_verifier_log *log); +void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); #else -static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) -{ - return NULL; -} -static inline void bpf_struct_ops_init(struct btf *btf, - struct bpf_verifier_log *log) -{ -} +#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; }) static inline bool bpf_try_module_get(const void *data, struct module *owner) { return try_module_get(owner); @@ -1736,6 +1818,9 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr) { return -EOPNOTSUPP; } +static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) +{ +} #endif @@ -2010,14 +2095,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; while ((prog = READ_ONCE(item->prog))) { - if (!prog->aux->sleepable) + if (!prog->sleepable) rcu_read_lock(); run_ctx.bpf_cookie = item->bpf_cookie; ret &= run_prog(prog, ctx); item++; - if (!prog->aux->sleepable) + if (!prog->sleepable) rcu_read_unlock(); } bpf_reset_run_ctx(old_run_ctx); @@ -2256,6 +2341,10 @@ struct bpf_iter_aux_info { enum bpf_iter_task_type type; u32 pid; } task; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 173ec7f43ed1e0213b55da85d16054a23c2a3fc0..f77c2087dd08cd66d4f4e4620219adcaed3c7485 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -94,6 +94,8 @@ struct bpf_local_storage { */ struct rcu_head rcu; raw_spinlock_t lock; /* Protect adding/removing from the "list" */ + + CK_KABI_RESERVE(1) }; /* U16_MAX is much more than enough for sk local storage diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 92919d52f7e1b2a2676085423d69f90c58bd7297..c7d44c1ee9cfc51c784e2d7a2e2c8719e483bf22 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -317,6 +317,8 @@ struct bpf_func_state { struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; + + CK_KABI_RESERVE(1) }; struct bpf_idx_pair { @@ -522,6 +524,9 @@ struct bpf_insn_aux_data { * accepts callback function as a parameter. */ bool calls_callback; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -607,6 +612,7 @@ struct bpf_verifier_env { u32 prev_insn_idx; struct bpf_prog *prog; /* eBPF program being verified */ const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ @@ -672,6 +678,11 @@ struct bpf_verifier_env { * e.g., in reg_type_str() to generate reg_type string */ char tmp_str_buf[TMP_STR_BUF_LEN]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, diff --git a/include/linux/btf.h b/include/linux/btf.h index 928113a80a95278e44161ed8a7c559716e237651..ca4c482efded4eba40ee6908e2f1a205db0ee0f2 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -83,6 +83,17 @@ */ #define __bpf_kfunc __used noinline +#define __bpf_kfunc_start_defs() \ + __diag_push(); \ + __diag_ignore_all("-Wmissing-declarations", \ + "Global kfuncs as their definitions will be in BTF");\ + __diag_ignore_all("-Wmissing-prototypes", \ + "Global kfuncs as their definitions will be in BTF") + +#define __bpf_kfunc_end_defs() __diag_pop() +#define __bpf_hook_start() __bpf_kfunc_start_defs() +#define __bpf_hook_end() __bpf_kfunc_end_defs() + /* * Return the name of the passed struct, if exists, or halt the build if for * example the structure gets renamed. In this way, developers have to revisit @@ -125,6 +136,7 @@ struct btf_struct_metas { extern const struct file_operations btf_fops; +const char *btf_get_name(const struct btf *btf); void btf_get(struct btf *btf); void btf_put(struct btf *btf); int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); @@ -482,8 +494,24 @@ static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id) return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); } +bool btf_param_match_suffix(const struct btf *btf, + const struct btf_param *arg, + const char *suffix); + struct bpf_verifier_log; +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) +struct bpf_struct_ops; +int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops); +const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id); +const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id); +#else +static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id) +{ + return NULL; +} +#endif + #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index a9cb10b0e2e9bf45c33f4ad39e7a95d8d9b6b1b5..e24aabfe8ecc91fae3a814eccade54b366016bc1 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -8,6 +8,9 @@ struct btf_id_set { u32 ids[]; }; +/* This flag implies BTF_SET8 holds kfunc(s) */ +#define BTF_SET8_KFUNCS (1 << 0) + struct btf_id_set8 { u32 cnt; u32 flags; @@ -21,6 +24,7 @@ struct btf_id_set8 { #include /* for __PASTE */ #include /* for __maybe_unused */ +#include /* * Following macros help to define lists of BTF IDs placed @@ -183,17 +187,18 @@ extern struct btf_id_set name; * .word (1 << 3) | (1 << 1) | (1 << 2) * */ -#define __BTF_SET8_START(name, scope) \ +#define __BTF_SET8_START(name, scope, flags) \ +__BTF_ID_LIST(name, local) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ "." #scope " __BTF_ID__set8__" #name "; \n" \ "__BTF_ID__set8__" #name ":; \n" \ -".zero 8 \n" \ +".zero 4 \n" \ +".long " __stringify(flags) "\n" \ ".popsection; \n"); #define BTF_SET8_START(name) \ -__BTF_ID_LIST(name, local) \ -__BTF_SET8_START(name, local) +__BTF_SET8_START(name, local, 0) #define BTF_SET8_END(name) \ asm( \ @@ -202,6 +207,12 @@ asm( \ ".popsection; \n"); \ extern struct btf_id_set8 name; +#define BTF_KFUNCS_START(name) \ +__BTF_SET8_START(name, local, BTF_SET8_KFUNCS) + +#define BTF_KFUNCS_END(name) \ +BTF_SET8_END(name) + #else #define BTF_ID_LIST(name) static u32 __maybe_unused name[64]; @@ -216,6 +227,8 @@ extern struct btf_id_set8 name; #define BTF_SET_END(name) #define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 }; #define BTF_SET8_END(name) +#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS }; +#define BTF_KFUNCS_END(name) #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 44e9de51eedfb634aa2a44420a0fc32ee61ff924..9711ae81d988a63ee59c94bcef08a8ff73c8deb3 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -369,8 +369,11 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) static inline void wait_on_buffer(struct buffer_head *bh) { might_sleep(); - if (buffer_locked(bh)) + if (buffer_locked(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __wait_on_buffer(bh); + task_clear_wait_res(); + } } static inline int trylock_buffer(struct buffer_head *bh) @@ -381,8 +384,11 @@ static inline int trylock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (!trylock_buffer(bh)) + if (!trylock_buffer(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __lock_buffer(bh); + task_clear_wait_res(); + } } static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 3b7a0ff4642fd96cdca4996f240c63acaa44f426..d093f52d73e0624f27fd8438be5537717b1f5436 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -6,8 +6,8 @@ #define BUILD_ID_SIZE_MAX 20 -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size); +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab45b482cd5505b4cfefa4ba64b3f075..467ec6a765673c1d31a1edbb2cca522eeaf71e82 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -47,7 +47,7 @@ extern unsigned int coherency_max_size; * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { - unsigned int id; + unsigned long id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -110,12 +110,13 @@ int acpi_get_cache_info(unsigned int cpu, #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); +unsigned long cache_of_get_id(struct device_node *np); /* * Get the id of the cache associated with @cpu at level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; @@ -124,11 +125,32 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) return ci->info_list[i].id; - return -1; + return ~0UL; } } - return -1; + return ~0UL; +} + +/* + * Get the size of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline unsigned int get_cpu_cacheinfo_size(int cpu, int level) +{ + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); + int i; + + if (!ci->info_list) + return 0; + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == level) { + return ci->info_list[i].size; + } + } + + return 0; } #ifdef CONFIG_ARM64 diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a17ba353d6a0e67af8a033fbcf67ff..8e34f05bc6b1681fa11cc89384cd92e555064afb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,202 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; + +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +796,8 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +808,10 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -657,6 +860,10 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */ diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 3552ec82b72561e433047b7cd0f65dae37978026..f0df518e11dd15a63aa3561355f71afe66c45e9f 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -9,6 +9,14 @@ #include #include +#include + +#ifndef cfi_get_offset +static inline int cfi_get_offset(void) +{ + return 0; +} +#endif #ifdef CONFIG_CFI_CLANG enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, @@ -38,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr, #endif /* CONFIG_ARCH_USES_CFI_TRAPS */ #endif /* CONFIG_MODULES */ +#ifndef CFI_NOSEAL +#define CFI_NOSEAL(x) +#endif + #endif /* _LINUX_CFI_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 6eefe5153a6ff794327389e639e3ab7c05940403..e8d59e792b3a2ad469d4352ec826962d85bb0ae2 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -143,6 +143,9 @@ struct cgroup_file { struct kernfs_node *kn; unsigned long notified_at; struct timer_list notify_timer; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -175,6 +178,9 @@ struct cgroup_subsys_state { */ int id; + /* number of procs under this css and its descendants */ + int nr_procs; + unsigned int flags; /* @@ -195,6 +201,11 @@ struct cgroup_subsys_state { struct work_struct destroy_work; struct rcu_work destroy_rwork; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * PI: the parent css. Placed here for cache proximity to following * fields of the containing structure. @@ -297,6 +308,11 @@ struct css_set { /* For RCU-protected deletion */ struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_base_stat { @@ -304,6 +320,11 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; + u64 forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_sum; + u64 sibidle_task_sum; #endif }; @@ -368,6 +389,11 @@ struct cgroup_rstat_cpu { */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_freezer_state { @@ -525,6 +551,11 @@ struct cgroup { struct bpf_local_storage __rcu *bpf_cgrp_storage; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* All ancestors including self */ struct cgroup *ancestors[]; }; @@ -568,6 +599,11 @@ struct cgroup_root { /* The name for this hierarchy - may be empty */ char name[MAX_CGROUP_ROOT_NAMELEN]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -660,6 +696,8 @@ struct cftype { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif + + CK_KABI_RESERVE(1) }; /* @@ -691,6 +729,11 @@ struct cgroup_subsys { void (*release)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + bool early_init:1; /* @@ -812,6 +855,8 @@ struct sock_cgroup_data { #ifdef CONFIG_CGROUP_NET_PRIO u16 prioidx; /* v1 */ #endif + + CK_KABI_RESERVE(1) }; static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c9a2c892644f7fba94698bb82133b..5eda25d7c3a5ae8df0eec44760c3572683af5a50 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -121,6 +121,8 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); +extern struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid); void cgroup_fork(struct task_struct *p); extern int cgroup_can_fork(struct task_struct *p, @@ -855,4 +857,16 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +#ifdef CONFIG_SCHED_SLI +void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added); +void cgroup_idle_end(struct sched_entity *se); +void cgroup_idle_start(struct sched_entity *se); +#else +static inline void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added) { } +static inline void cgroup_idle_end(struct sched_entity *se) { } +static inline void cgroup_idle_start(struct sched_entity *se) { } +#endif + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/ck_kabi.h b/include/linux/ck_kabi.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ecc950c93a94c28c9430d103f71d7752158a23 --- /dev/null +++ b/include/linux/ck_kabi.h @@ -0,0 +1,532 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ck_kabi.h - Anolis Cloud-Kernel kABI abstraction header + * + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * Copyright (c) 2023 Guixin Liu + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + * + * These kabi macros hide the changes from the kabi checker and from the + * process that computes the exported symbols' checksums. + * They have 2 variants: one (defined under __GENKSYMS__) used when + * generating the checksums, and the other used when building the kernel's + * binaries. + * + * The use of these macros does not guarantee that the usage and modification + * of code is correct. As with all Anolis only changes, an engineer must + * explain why the use of the macro is valid in the patch containing the + * changes. + * + */ + +#ifndef _LINUX_CK_KABI_H +#define _LINUX_CK_KABI_H + +#include +#include +#include + +/* + * NOTE + * Unless indicated otherwise, don't use ';' after these macros as it + * messes up the kABI checker by changing what the resulting token string + * looks like. Instead let the macros add the ';' so it can be properly + * hidden from the kABI checker (mainly for CK_KABI_EXTEND, but applied to + * most macros for uniformity). + * + * + * CK_KABI_CONST + * Adds a new const modifier to a function parameter preserving the old + * checksum. + * + * CK_KABI_ADD_MODIFIER + * Adds a new modifier to a function parameter or a typedef, preserving + * the old checksum. Useful e.g. for adding rcu annotations or changing + * int to unsigned. Beware that this may change the semantics; if you're + * sure this is safe, always explain why binary compatibility with 3rd + * party modules is retained. + * + * CK_KABI_DEPRECATE + * Marks the element as deprecated and make it unusable by modules while + * keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_DEPRECATE_FN + * Marks the function pointer as deprecated and make it unusable by modules + * while keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_EXTEND + * Adds a new field to a struct. This must always be added to the end of + * the struct. Before using this macro, make sure this is actually safe + * to do - there is a number of conditions under which it is *not* safe. + * In particular (but not limited to), this macro cannot be used: + * - if the struct in question is embedded in another struct, or + * - if the struct is allocated by drivers either statically or + * dynamically, or + * - if the struct is allocated together with driver data (an example of + * such behavior is struct net_device or struct request). + * + * CK_KABI_EXTEND_WITH_SIZE + * Adds a new element (usually a struct) to a struct and reserves extra + * space for the new element. The provided 'size' is the total space to + * be added in longs (i.e. it's 8 * 'size' bytes), including the size of + * the added element. It is automatically checked that the new element + * does not overflow the reserved space, now nor in the future. However, + * no attempt is done to check the content of the added element (struct) + * for kABI conformance - kABI checking inside the added element is + * effectively switched off. + * For any struct being added by CK_KABI_EXTEND_WITH_SIZE, it is + * recommended its content to be documented as not covered by kABI + * guarantee. + * + * CK_KABI_FILL_HOLE + * Fills a hole in a struct. + * + * Warning: only use if a hole exists for _all_ arches. Use pahole to verify. + * + * CK_KABI_RENAME + * Renames an element without changing its type. This macro can be used in + * bitfields, for example. + * + * NOTE: this macro does not add the final ';' + * + * CK_KABI_REPLACE + * Replaces the _orig field by the _new field. The size of the occupied + * space is preserved, it's fine if the _new field is smaller than the + * _orig field. If a _new field is larger or has a different alignment, + * compilation will abort. + * + * CK_KABI_REPLACE_SPLIT + * Works the same as CK_KABI_REPLACE but replaces a single _orig field by + * multiple new fields. The checks for size and alignment done by + * CK_KABI_REPLACE are still applied. + * + * CK_KABI_HIDE_INCLUDE + * Hides the given include file from kABI checksum computations. This is + * used when a newly added #include makes a previously opaque struct + * visible. + * + * Example usage: + * #include CK_KABI_HIDE_INCLUDE() + * + * CK_KABI_FAKE_INCLUDE + * Pretends inclusion of the given file for kABI checksum computations. + * This is used when upstream removed a particular #include but that made + * some structures opaque that were previously visible and is causing kABI + * checker failures. + * + * Example usage: + * #include CK_KABI_FAKE_INCLUDE() + * + * CK_KABI_RESERVE + * Adds a reserved field to a struct. This is done prior to kABI freeze + * for structs that cannot be expanded later using CK_KABI_EXTEND (for + * example because they are embedded in another struct or because they are + * allocated by drivers or because they use unusual memory layout). The + * size of the reserved field is 'unsigned long' and is assumed to be + * 8 bytes. + * + * The argument is a number unique for the given struct; usually, multiple + * CK_KABI_RESERVE macros are added to a struct with numbers starting from + * one. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_RESERVE(2) + * CK_KABI_RESERVE(3) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE + * Uses a previously reserved field or multiple fields. The arguments are + * one or more numbers assigned to CK_KABI_RESERVE, followed by a field to + * be put in their place. The compiler ensures that the new field is not + * larger than the reserved area. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_USE(1, int b) + * CK_KABI_USE(2, 3, int c[3]) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE_SPLIT + * Works the same as CK_KABI_USE but replaces a single reserved field by + * multiple new fields. + * + * CK_KABI_AUX_EMBED + * CK_KABI_AUX_PTR + * Adds an extension of a struct in the form of "auxiliary structure". + * This is done prior to kABI freeze for structs that cannot be expanded + * later using CK_KABI_EXTEND. See also CK_KABI_RESERVED, these two + * approaches can (and often are) combined. + * + * To use this for 'struct foo' (the "base structure"), define a new + * structure called 'struct foo_ck_reserved'; this new struct is called "auxiliary + * structure". Then add CK_KABI_AUX_EMBED or CK_KABI_AUX_PTR to the end + * of the base structure. The argument is the name of the base structure, + * without the 'struct' keyword. + * + * CK_KABI_AUX_PTR stores a pointer to the aux structure in the base + * struct. The lifecycle of the aux struct needs to be properly taken + * care of. + * + * CK_KABI_AUX_EMBED embeds the aux struct into the base struct. This + * cannot be used when the base struct is itself embedded into another + * struct, allocated in an array, etc. + * + * Both approaches (ptr and embed) work correctly even when the aux struct + * is allocated by modules. To ensure this, the code responsible for + * allocation/assignment of the aux struct has to properly set the size of + * the aux struct; see the CK_KABI_AUX_SET_SIZE and CK_KABI_AUX_INIT_SIZE + * macros. + * + * New fields can be later added to the auxiliary structure, always to its + * end. Note the auxiliary structure cannot be shrunk in size later (i.e., + * fields cannot be removed, only deprecated). Any code accessing fields + * from the aux struct must guard the access using the CK_KABI_AUX macro. + * The access itself is then done via a '_ck_reserved' field in the base struct. + * + * The auxiliary structure is not guaranteed for access by modules unless + * explicitly commented as such in the declaration of the aux struct + * itself or some of its elements. + * + * Example: + * + * struct foo_ck_reserved { + * int newly_added; + * }; + * + * struct foo { + * bool big_hammer; + * CK_KABI_AUX_PTR(foo) + * }; + * + * void use(struct foo *f) + * { + * if (CK_KABI_AUX(f, foo, newly_added)) + * f->_ck_reserved->newly_added = 123; + * else + * // the field 'newly_added' is not present in the passed + * // struct, fall back to old behavior + * f->big_hammer = true; + * } + * + * static struct foo_ck_reserved my_foo_ck_reserved { + * .newly_added = 0; + * } + * + * static struct foo my_foo = { + * .big_hammer = false, + * ._ck_reserved = &my_foo_ck_reserved, + * CK_KABI_AUX_INIT_SIZE(foo) + * }; + * + * CK_KABI_USE_AUX_PTR + * Creates an auxiliary structure post kABI freeze. This works by using + * two reserved fields (thus there has to be two reserved fields still + * available) and converting them to CK_KABI_AUX_PTR. + * + * Example: + * + * struct foo_ck_reserved { + * }; + * + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_USE_AUX_PTR(2, 3, foo) + * }; + * + * CK_KABI_AUX_SET_SIZE + * CK_KABI_AUX_INIT_SIZE + * Calculates and stores the size of the auxiliary structure. + * + * CK_KABI_AUX_SET_SIZE is for dynamically allocated base structs, + * CK_KABI_AUX_INIT_SIZE is for statically allocated case structs. + * + * These macros must be called from the allocation (CK_KABI_AUX_SET_SIZE) + * or declaration (CK_KABI_AUX_INIT_SIZE) site, regardless of whether + * that happens in the kernel or in a module. Without calling one of + * these macros, the aux struct will appear to have no fields to the + * kernel. + * + * Note: since CK_KABI_AUX_SET_SIZE is intended to be invoked outside of + * a struct definition, it does not add the semicolon and must be + * terminated by semicolon by the caller. + * + * CK_KABI_AUX + * Verifies that the given field exists in the given auxiliary structure. + * This MUST be called prior to accessing that field; failing to do that + * may lead to invalid memory access. + * + * The first argument is a pointer to the base struct, the second argument + * is the name of the base struct (without the 'struct' keyword), the + * third argument is the field name. + * + * This macro works for structs extended by either of CK_KABI_AUX_EMBED, + * CK_KABI_AUX_PTR and CK_KABI_USE_AUX_PTR. + * + * CK_KABI_FORCE_CHANGE + * Force change of the symbol checksum. The argument of the macro is a + * version for cases we need to do this more than once. + * + * This macro does the opposite: it changes the symbol checksum without + * actually changing anything about the exported symbol. It is useful for + * symbols that are not whitelisted, we're changing them in an + * incompatible way and want to prevent 3rd party modules to silently + * corrupt memory. Instead, by changing the symbol checksum, such modules + * won't be loaded by the kernel. This macro should only be used as a + * last resort when all other KABI workarounds have failed. + * + * CK_KABI_EXCLUDE + * !!! WARNING: DANGEROUS, DO NOT USE unless you are aware of all the !!! + * !!! implications. This should be used ONLY EXCEPTIONALLY and only !!! + * !!! under specific circumstances. Very likely, this macro does not !!! + * !!! do what you expect it to do. Note that any usage of this macro !!! + * !!! MUST be paired with a CK_KABI_FORCE_CHANGE annotation of !!! + * !!! a suitable symbol (or an equivalent safeguard) and the commit !!! + * !!! log MUST explain why the chosen solution is appropriate. !!! + * + * Exclude the element from checksum generation. Any such element is + * considered not to be part of the kABI whitelist and may be changed at + * will. Note however that it's the responsibility of the developer + * changing the element to ensure 3rd party drivers using this element + * won't panic, for example by not allowing them to be loaded. That can + * be achieved by changing another, non-whitelisted symbol they use, + * either by nature of the change or by using CK_KABI_FORCE_CHANGE. + * + * Also note that any change to the element must preserve its size. Change + * of the size is not allowed and would constitute a silent kABI breakage. + * Beware that the CK_KABI_EXCLUDE macro does not do any size checks. + * + * CK_KABI_BROKEN_INSERT + * CK_KABI_BROKEN_REMOVE + * Insert a field to the middle of a struct / delete a field from a struct. + * Note that this breaks kABI! It can be done only when it's certain that + * no 3rd party driver can validly reach into the struct. A typical + * example is a struct that is: both (a) referenced only through a long + * chain of pointers from another struct that is part of a whitelisted + * symbol and (b) kernel internal only, it should have never been visible + * to genksyms in the first place. + * + * Another example are structs that are explicitly exempt from kABI + * guarantee but we did not have enough foresight to use CK_KABI_EXCLUDE. + * In this case, the warning for CK_KABI_EXCLUDE applies. + * + * A detailed explanation of correctness of every CK_KABI_BROKEN_* macro + * use is especially important. + * + * CK_KABI_BROKEN_INSERT_BLOCK + * CK_KABI_BROKEN_REMOVE_BLOCK + * A version of CK_KABI_BROKEN_INSERT / REMOVE that allows multiple fields + * to be inserted or removed together. All fields need to be terminated + * by ';' inside(!) the macro parameter. The macro itself must not be + * terminated by ';'. + * + * CK_KABI_BROKEN_REPLACE + * Replace a field by a different one without doing any checking. This + * allows replacing a field by another with a different size. Similarly + * to other CK_KABI_BROKEN macros, use of this indicates a kABI breakage. + * + * CK_KABI_BROKEN_INSERT_ENUM + * CK_KABI_BROKEN_REMOVE_ENUM + * Insert a field to the middle of an enumaration type / delete a field from + * an enumaration type. Note that this can break kABI especially if the + * number of enum fields is used in an array within a structure. It can be + * done only when it is certain that no 3rd party driver will use the + * enumeration type or a structure that embeds an array with size determined + * by an enumeration type. + * + * CK_KABI_EXTEND_ENUM + * Adds a new field to an enumeration type. This must always be added to + * the end of the enum. Before using this macro, make sure this is actually + * safe to do. + */ + +#ifdef __GENKSYMS__ + +# define CK_KABI_CONST +# define CK_KABI_ADD_MODIFIER(_new) +# define CK_KABI_EXTEND(_new) +# define CK_KABI_FILL_HOLE(_new) +# define CK_KABI_FORCE_CHANGE(ver) __attribute__((ck_kabi_change ## ver)) +# define CK_KABI_RENAME(_orig, _new) _orig +# define CK_KABI_HIDE_INCLUDE(_file) +# define CK_KABI_FAKE_INCLUDE(_file) _file +# define CK_KABI_BROKEN_INSERT(_new) +# define CK_KABI_BROKEN_REMOVE(_orig) _orig; +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) _orig +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _orig; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) _orig, +# define CK_KABI_EXTEND_ENUM(_new) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type _orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) _type (*_orig)(_args) +# define _CK_KABI_REPLACE(_orig, _new) _orig +# define _CK_KABI_EXCLUDE(_elem) + +#else + +# define CK_KABI_ALIGN_WARNING ". Disable CONFIG_CK_KABI_SIZE_ALIGN_CHECKS if debugging." + +# define CK_KABI_CONST const +# define CK_KABI_ADD_MODIFIER(_new) _new +# define CK_KABI_EXTEND(_new) _new; +# define CK_KABI_FILL_HOLE(_new) _new; +# define CK_KABI_FORCE_CHANGE(ver) +# define CK_KABI_RENAME(_orig, _new) _new +# define CK_KABI_HIDE_INCLUDE(_file) _file +# define CK_KABI_FAKE_INCLUDE(_file) +# define CK_KABI_BROKEN_INSERT(_new) _new; +# define CK_KABI_BROKEN_REMOVE(_orig) +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) _new +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _new; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) _new, +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) +# define CK_KABI_EXTEND_ENUM(_new) _new, + +#if IS_BUILTIN(CONFIG_CK_KABI_SIZE_ALIGN_CHECKS) +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_new) " is larger than " __stringify(_orig) CK_KABI_ALIGN_WARNING); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_orig) " is not aligned the same as " __stringify(_new) CK_KABI_ALIGN_WARNING); \ + } +# define __CK_KABI_CHECK_SIZE(_item, _size) \ + _Static_assert(sizeof(struct{_item;}) <= _size, \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_item) " is larger than the reserved size (" __stringify(_size) " bytes)" CK_KABI_ALIGN_WARNING) +#else +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) +# define __CK_KABI_CHECK_SIZE(_item, _size) +#endif + +#define CK_KABI_UNIQUE_ID __PASTE(ck_kabi_hidden_, __LINE__) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type ck_reserved_##_orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _type (* ck_reserved_##_orig)(_args) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } CK_KABI_UNIQUE_ID; \ + __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } +#else +# define _CK_KABI_REPLACE(_orig, _new) CK_KABI_BROKEN_REPLACE(_orig, _new) +#endif + +# define _CK_KABI_EXCLUDE(_elem) _elem + +#endif /* __GENKSYMS__ */ + +# define CK_KABI_DEPRECATE(_type, _orig) _CK_KABI_DEPRECATE(_type, _orig); +# define CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _CK_KABI_DEPRECATE_FN(_type, _orig, _args); +# define CK_KABI_REPLACE(_orig, _new) _CK_KABI_REPLACE(_orig, _new); + +#define _CK_KABI_REPLACE1(_new) _new; +#define _CK_KABI_REPLACE2(_new, ...) _new; _CK_KABI_REPLACE1(__VA_ARGS__) +#define _CK_KABI_REPLACE3(_new, ...) _new; _CK_KABI_REPLACE2(__VA_ARGS__) +#define _CK_KABI_REPLACE4(_new, ...) _new; _CK_KABI_REPLACE3(__VA_ARGS__) +#define _CK_KABI_REPLACE5(_new, ...) _new; _CK_KABI_REPLACE4(__VA_ARGS__) +#define _CK_KABI_REPLACE6(_new, ...) _new; _CK_KABI_REPLACE5(__VA_ARGS__) +#define _CK_KABI_REPLACE7(_new, ...) _new; _CK_KABI_REPLACE6(__VA_ARGS__) +#define _CK_KABI_REPLACE8(_new, ...) _new; _CK_KABI_REPLACE7(__VA_ARGS__) +#define _CK_KABI_REPLACE9(_new, ...) _new; _CK_KABI_REPLACE8(__VA_ARGS__) +#define _CK_KABI_REPLACE10(_new, ...) _new; _CK_KABI_REPLACE9(__VA_ARGS__) +#define _CK_KABI_REPLACE11(_new, ...) _new; _CK_KABI_REPLACE10(__VA_ARGS__) +#define _CK_KABI_REPLACE12(_new, ...) _new; _CK_KABI_REPLACE11(__VA_ARGS__) + +#define CK_KABI_REPLACE_SPLIT(_orig, ...) _CK_KABI_REPLACE(_orig, \ + struct { __PASTE(_CK_KABI_REPLACE, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) }); + +# define CK_KABI_RESERVE(n) _CK_KABI_RESERVE(n); + +#define _CK_KABI_USE1(n, _new) _CK_KABI_RESERVE(n), _new +#define _CK_KABI_USE2(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE1(__VA_ARGS__) +#define _CK_KABI_USE3(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE2(__VA_ARGS__) +#define _CK_KABI_USE4(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE3(__VA_ARGS__) +#define _CK_KABI_USE5(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE4(__VA_ARGS__) +#define _CK_KABI_USE6(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE5(__VA_ARGS__) +#define _CK_KABI_USE7(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE6(__VA_ARGS__) +#define _CK_KABI_USE8(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE7(__VA_ARGS__) +#define _CK_KABI_USE9(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE8(__VA_ARGS__) +#define _CK_KABI_USE10(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE9(__VA_ARGS__) +#define _CK_KABI_USE11(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE10(__VA_ARGS__) +#define _CK_KABI_USE12(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE11(__VA_ARGS__) + +#define _CK_KABI_USE(...) _CK_KABI_REPLACE(__VA_ARGS__) +#define CK_KABI_USE(n, ...) _CK_KABI_USE(__PASTE(_CK_KABI_USE, COUNT_ARGS(__VA_ARGS__))(n, __VA_ARGS__)); + +# define CK_KABI_USE_SPLIT(n, ...) CK_KABI_REPLACE_SPLIT(_CK_KABI_RESERVE(n), __VA_ARGS__) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_RESERVE(n) unsigned long ck_reserved##n +#else +# define _CK_KABI_RESERVE(n) +#endif + +#define CK_KABI_EXCLUDE(_elem) _CK_KABI_EXCLUDE(_elem); + +#define CK_KABI_EXTEND_WITH_SIZE(_new, _size) \ + CK_KABI_EXTEND(union { \ + _new; \ + unsigned long CK_KABI_UNIQUE_ID[_size]; \ + __CK_KABI_CHECK_SIZE(_new, 8 * (_size)); \ + }) + +#ifdef CONFIG_CK_KABI_RESERVE +#define _CK_KABI_AUX_PTR(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved *_ck_reserved) +#define CK_KABI_AUX_PTR(_struct) \ + _CK_KABI_AUX_PTR(_struct); + +#define _CK_KABI_AUX_EMBED(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved _ck_reserved) +#define CK_KABI_AUX_EMBED(_struct) \ + _CK_KABI_AUX_EMBED(_struct); + +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) \ + CK_KABI_USE(n1, n2, \ + struct { CK_KABI_AUX_PTR(_struct) }) + +#define CK_KABI_AUX_SET_SIZE(_name, _struct) ({ \ + (_name)->_struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved); \ +}) + +#define CK_KABI_AUX_INIT_SIZE(_struct) \ + ._struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved), + +#define CK_KABI_AUX(_ptr, _struct, _field) ({ \ + size_t __off = offsetof(struct _struct##_ck_reserved, _field); \ + (_ptr)->_struct##_size_ck_reserved > __off ? true : false; \ +}) +#else +#define CK_KABI_AUX_PTR(_struct) +#define CK_KABI_AUX_EMBED(_struct) +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) +#define CK_KABI_AUX_SET_SIZE(_name, _struct) +#define CK_KABI_AUX_INIT_SIZE(_struct) +#define CK_KABI_AUX(_ptr, _struct, _field) (false) +#endif /* CONFIG_CK_KABI_RESERVE */ + +#endif /* _LINUX_CK_KABI_H */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 63873b93deaa62634da9ed3fb19543ba0131abdf..326ec54b8efaabed425be610e681887f6ef1bbd0 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -56,4 +56,6 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern void cma_reserve_pages_on_error(struct cma *cma); +extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5a4054f17cbc688e071eedfadd160fa8897547cb..db140f106f3df532eae0563e53255234392c5377 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -116,6 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, */ #define __stringify_label(n) #n +#define __annotate_reachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ +}) +#define annotate_reachable() __annotate_reachable(__COUNTER__) + #define __annotate_unreachable(c) ({ \ asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.unreachable\n\t" \ @@ -125,9 +133,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define annotate_unreachable() __annotate_unreachable(__COUNTER__) /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table") +#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") #else /* !CONFIG_OBJTOOL */ +#define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif /* CONFIG_OBJTOOL */ diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d3eba4360150875d3a8f4890b25e985486f9eaf8..52c7c487c9faa0acb332e40438afa41f9e3a2f7b 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -28,6 +28,8 @@ struct coredump_params { int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; + + CK_KABI_RESERVE(1) }; /* diff --git a/include/linux/cper.h b/include/linux/cper.h index c1a7dc3251215a5ba0e982568a746ff5b04602d1..ba5ee2355370eb6f176698e4a186b741d70fbd50 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -578,4 +578,5 @@ void cper_estatus_print(const char *pfx, int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); int cper_estatus_check(const struct acpi_hest_generic_status *estatus); +const char *cper_zdi_zpi_err_type_str(unsigned int etype); #endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 624d4a38c358a08f2ca417523058bc1c6a319a8d..2b5bc17a9ae7f34e00aa99c88cd959100881f7ec 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -152,7 +152,8 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_RISCV_STARTING, - CPUHP_AP_IRQ_LOONGARCH_STARTING, + CPUHP_AP_IRQ_EIOINTC_STARTING, + CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, @@ -245,6 +246,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, CPUHP_AP_PERF_CSKY_ONLINE, + CPUHP_AP_ARM_SDEI_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RANDOM_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3183aeb7f5b4238a2b88349ae4bf8c492475460f..3fd1fe4e6531729640e2fc8724f50ac44e07051a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -44,6 +44,11 @@ struct cpuidle_state_usage { unsigned long long s2idle_usage; unsigned long long s2idle_time; /* in US */ #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cpuidle_state { @@ -74,6 +79,11 @@ struct cpuidle_state { int (*enter_s2idle)(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Idle State Flags */ @@ -111,6 +121,11 @@ struct cpuidle_device { cpumask_t coupled_cpus; struct cpuidle_coupled *coupled; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); @@ -165,6 +180,9 @@ struct cpuidle_driver { /* preferred governor to switch at register time */ const char *governor; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_CPU_IDLE diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index dbdbf1451cadd753363af69b04c9cc111ee4c020..a354aa5dd57dc8bc4883976908cd3dd72b9f33bc 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -388,7 +388,30 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** - * cpumask_nth - get the first cpu in a cpumask + * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * @cpu: the cpu to ignore + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_and_but(const struct cpumask *mask1, + const struct cpumask *mask2, + unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + i = cpumask_first_and(mask1, mask2); + if (i != cpu) + return i; + + return cpumask_next_and(cpu, mask1, mask2); +} + +/** + * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer * @cpu: the N'th cpu to find, starting from 0 * diff --git a/include/linux/cred.h b/include/linux/cred.h index bb55703e1166413f0c7148ca219a1ec006fdcc32..023712a4ca866c5d2040936237deb6b3a4219bf8 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -143,6 +143,15 @@ struct cred { int non_rcu; /* Can we skip RCU deletion? */ struct rcu_head rcu; /* RCU deletion hook */ }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } __randomize_layout; extern void __put_cred(struct cred *); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c36c6448c00358679301db7538bf307a..772a8fcb13bb8f555cd6c38df00cf802e906bb7e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -430,6 +430,8 @@ struct crypto_tfm { struct crypto_alg *__crt_alg; + CK_KABI_RESERVE(1) + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; diff --git a/include/linux/dax.h b/include/linux/dax.h index b463502b16e17fbc08ff3a982a9597f5d70cb06b..66e663664accaf5ebb462b74dff39fc3224a73e2 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -128,6 +128,8 @@ void set_dax_nocache(struct dax_device *dax_dev); void set_dax_nomc(struct dax_device *dax_dev); struct writeback_control; +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff); #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); void dax_remove_host(struct gendisk *disk); @@ -248,6 +250,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int dax_copy_range(struct block_device *bdev, struct dax_device *dax_dev, + u64 src_addr, u64 dst_addr, size_t size); int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f5976db447e8da82e579713d02f20b5..05b9b6e86c3a9a4d836f3d665e8f03ef18d6957b 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -14,6 +14,7 @@ #include #include #include +#include struct path; struct file; @@ -111,6 +112,9 @@ struct dentry { struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; /* @@ -140,6 +144,11 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; /* @@ -296,6 +305,7 @@ extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); +extern char *d_absolute_path_locked(const struct path *, char *, int); /* Allocation counts.. */ diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac365a6013a2b11ef4ebc81b5556f4ac..40f9990da3d9db3f9c988c8697be1a65e59c525a 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -55,6 +55,13 @@ struct task_delay_info { u32 compact_count; /* total count of memory compact */ u32 wpcopy_count; /* total count of write-protect copy */ u32 irq_count; /* total count of IRQ/SOFTIRQ */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #endif diff --git a/include/linux/device.h b/include/linux/device.h index 3627b26b243e61b1809160c12740d8200412ee4d..092bbdb60cdf37fc3760151a910c31c1f76941ce 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -805,6 +805,23 @@ struct device { #ifdef CONFIG_DMA_OPS_BYPASS bool dma_ops_bypass : 1; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; /** diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index ae10c432275437dec02f431278f976e7059446a0..ccaa3cf10c56469d96a0a241f7970fd2c8513357 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -107,6 +107,9 @@ struct bus_type { const struct iommu_ops *iommu_ops; bool need_parent_lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int __must_check bus_register(const struct bus_type *bus); diff --git a/include/linux/device/class.h b/include/linux/device/class.h index abf3d3bfb6fe4dc3f29e26c599a8b18a676793ba..3048719f63c2dc9a49fb5f9ab9f06932a6474814 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -69,6 +69,11 @@ struct class { void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid); const struct dev_pm_ops *pm; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct class_dev_iter { diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index 7738f458995fba858e0e3ead9e5d12ceed35730d..1d806b1d593c5affb2ff048552e586f4174bee93 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -119,6 +119,11 @@ struct device_driver { void (*coredump) (struct device *dev); struct driver_private *p; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index b3772edca2e6e0ac742e5d41bd9b2fad6fba1647..f92fc14619775e2f3fc12731c25082618eb93f87 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -277,6 +277,8 @@ struct dma_fence_ops { * This callback is optional. */ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); + + CK_KABI_RESERVE(1) }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1a253d3abad99402d9e7da148d2ea..7b5b7dd2cd954dcb2e11fb6901dc6c1d024e3213 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -82,6 +82,13 @@ struct dma_map_ops { size_t (*max_mapping_size)(struct device *dev); size_t (*opt_mapping_size)(void); unsigned long (*get_merge_boundary)(struct device *dev); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #ifdef CONFIG_DMA_OPS @@ -509,4 +516,21 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, } #endif /* CONFIG_PCI_P2PDMA */ +#if defined CONFIG_PCI && defined CONFIG_X86 + +extern bool is_zhaoxin_kh40000; +extern const struct dma_map_ops kh40000_dma_direct_ops; +void kh40000_set_iommu_dma_ops(struct device *dev); + +#else + +bool __weak is_zhaoxin_kh40000; +static inline void kh40000_set_iommu_dma_ops(struct device *dev) +{ + +} + + +#endif + #endif /* _LINUX_DMA_MAP_OPS_H */ diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e34b601b71fd25c4b68c6e6c44075fea16275cd0..543c53e84a7014f2dfba99beb8d1fd790aa69f49 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -112,6 +112,9 @@ extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, void *start, void*end, u16 segment, struct dmar_dev_scope *devices, int devices_cnt); +extern bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, void *start, void *end, + struct dmar_dev_scope *devices, int devices_cnt); extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, struct dmar_dev_scope *devices, int count); @@ -144,6 +147,7 @@ extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +extern int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } @@ -155,6 +159,11 @@ static inline void intel_iommu_shutdown(void) { } #define dmar_release_one_atsr dmar_res_noop #define dmar_parse_one_satc dmar_res_noop +static inline int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + return 0; +} + static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; diff --git a/include/linux/efi.h b/include/linux/efi.h index 80b21d1c6eafafd825686b8454e66808a83ce470..aab980c98a2360f2252c96bf509946a9db4252a7 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -694,6 +694,11 @@ extern struct efi { extern struct mm_struct efi_mm; +static inline bool mm_is_efi(struct mm_struct *mm) +{ + return IS_ENABLED(CONFIG_EFI) && mm == &efi_mm; +} + static inline int efi_guidcmp (efi_guid_t left, efi_guid_t right) { diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index b9caa01dfac48594e463e954a3bbd65a5b8ad96e..2934c3a4edaeeb15b1801b750d54fb33e4fc6302 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -24,6 +24,8 @@ struct em_perf_state { unsigned long power; unsigned long cost; unsigned long flags; + + CK_KABI_RESERVE(1) }; /* @@ -56,6 +58,9 @@ struct em_perf_domain { struct em_perf_state *table; int nr_perf_states; unsigned long flags; + + CK_KABI_RESERVE(1) + unsigned long cpus[]; }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1b523fd48586f6ec19308054711fa50386c33041..a01872f6b49a751e0f7ae9d95d349d359f3c0ff5 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -172,6 +172,8 @@ struct ethtool_link_ksettings { __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; u32 lanes; + + CK_KABI_RESERVE(1) }; /** @@ -388,6 +390,8 @@ struct ethtool_pause_stats { u64 tx_pause_frames; u64 rx_pause_frames; ); + + CK_KABI_RESERVE(1) }; #define ETHTOOL_MAX_LANES 8 @@ -912,6 +916,23 @@ struct ethtool_ops { int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack); void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; int ethtool_check_ops(const struct ethtool_ops *ops); diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 11fbd0ee1370809f9f1770aba8e8e3d3cec27be5..14bc03473cc0f8318ef5c362aa49119b4f8b7535 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -3,6 +3,7 @@ #define LINUX_EXPORTFS_H 1 #include +#include struct dentry; struct iattr; @@ -225,6 +226,9 @@ struct export_operations { */ #define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */ unsigned long flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, diff --git a/include/linux/fb.h b/include/linux/fb.h index 322b4d20afa558170bb46b9d9fa2f77786b46df3..6b088a55932c846d7413571dfbb7f28d542618c2 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -218,6 +218,8 @@ struct fb_deferred_io { struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 95e868e09e298bb70ca23ec760d1c00fdb07201e..2225f53aa55f6751a7f1a756febd6958a031ac91 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -31,6 +31,9 @@ struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct lock_manager_operations { @@ -45,6 +48,11 @@ struct lock_manager_operations { bool (*lm_breaker_owns_lease)(struct file_lock *); bool (*lm_lock_expirable)(struct file_lock *cfl); void (*lm_expire_lock)(void); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct lock_manager { @@ -124,6 +132,8 @@ struct file_lock { struct inode *inode; } ceph; } fl_u; + + CK_KABI_RESERVE(1) } __randomize_layout; struct file_lock_context { diff --git a/include/linux/filter.h b/include/linux/filter.h index 5090e940ba3e46fa9cabd8b8bcea08d719b20b51..dabd2ea9c7ddd14567915a7135752525d43683ae 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -570,6 +570,9 @@ struct bpf_prog_stats { u64_stats_t nsecs; u64_stats_t misses; struct u64_stats_sync syncp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __aligned(2 * sizeof(u64)); struct sk_filter { diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c3d86532e3f91ce8cee577ea8525521c69e6bfb..4cdaabab293fece863bf31563fcca88961517cfb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -443,6 +443,11 @@ struct address_space_operations { sector_t *span); void (*swap_deactivate)(struct file *file); int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern const struct address_space_operations empty_aops; @@ -484,11 +489,18 @@ struct address_space { pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; - struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; + struct rw_semaphore i_mmap_rwsem; void *private_data; + + struct fast_reflink_work *fast_reflink_work; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -748,6 +760,9 @@ struct inode { #endif void *i_private; /* fs or device private pointer */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); @@ -1249,8 +1264,22 @@ struct super_block { struct fsnotify_mark_connector __rcu *s_fsnotify_marks; #endif + /* + * q: why are s_id and s_sysfs_name not the same? both are human + * readable strings that identify the filesystem + * a: s_id is allowed to change at runtime; it's used in log messages, + * and we want to when a device starts out as single device (s_id is dev + * name) but then a device is hot added and we have to switch to + * identifying it by UUID + * but s_sysfs_name is a handle for programmatic access, and can't + * change at runtime + */ char s_id[32]; /* Informational name */ uuid_t s_uuid; /* UUID */ + u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */ + + /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */ + char s_sysfs_name[UUID_STRING_LEN + 1]; unsigned int s_max_links; @@ -1843,6 +1872,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode, extern bool may_open_dev(const struct path *path); umode_t mode_strip_sgid(struct mnt_idmap *idmap, const struct inode *dir, umode_t mode); +bool in_group_or_capable(struct mnt_idmap *idmap, + const struct inode *inode, vfsgid_t vfsgid); /* * This is the "filldir" function type, used by readdir() to let @@ -1899,6 +1930,8 @@ struct dir_context { */ #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) +#define REMAP_FILE_FAST_REFLINK (1 << 2) + /* * These flags control the behavior of vfs_copy_file_range(). * They are not available to the user via syscall. @@ -1954,6 +1987,11 @@ struct file_operations { int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int poll_flags); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; /* Wrap a directory iterator that needs exclusive inode access */ @@ -2004,6 +2042,11 @@ struct inode_operations { struct dentry *dentry, struct fileattr *fa); int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); struct offset_ctx *(*get_offset_ctx)(struct inode *inode); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -2087,6 +2130,11 @@ struct super_operations { long (*free_cached_objects)(struct super_block *, struct shrink_control *); void (*shutdown)(struct super_block *sb); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -2391,6 +2439,11 @@ struct file_system_type { struct lock_class_key i_mutex_key; struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) @@ -2453,6 +2506,44 @@ extern __printf(2, 3) int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); extern int super_setup_bdi(struct super_block *sb); +static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len) +{ + if (WARN_ON(len > sizeof(sb->s_uuid))) + len = sizeof(sb->s_uuid); + sb->s_uuid_len = len; + memcpy(&sb->s_uuid, uuid, len); +} + +/* set sb sysfs name based on sb->s_bdev */ +static inline void super_set_sysfs_name_bdev(struct super_block *sb) +{ + snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev); +} + +/* set sb sysfs name based on sb->s_uuid */ +static inline void super_set_sysfs_name_uuid(struct super_block *sb) +{ + WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid)); + snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b); +} + +/* set sb sysfs name based on sb->s_id */ +static inline void super_set_sysfs_name_id(struct super_block *sb) +{ + strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name)); +} + +/* try to use something standard before you use this */ +__printf(2, 3) +static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args); + va_end(args); +} + extern int current_umask(void); extern void ihold(struct inode * inode); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index c13e99cbbf8162bb395a48f3709cbd69ac009e7d..4b4bfef6f053a49557118c3592ef64fc7ce1ab39 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -160,6 +160,12 @@ extern int get_tree_keyed(struct fs_context *fc, int setup_bdev_super(struct super_block *sb, int sb_flags, struct fs_context *fc); + +#define GET_TREE_BDEV_QUIET_LOOKUP 0x0001 +int get_tree_bdev_flags(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), unsigned int flags); + extern int get_tree_bdev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e8921871ef9aaa4ff9a8c74359bf49ebe95d8b3e..4281d27af70a59816a1f8b276e977c7f64130239 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -340,6 +340,8 @@ struct ftrace_ops { unsigned long direct_call; #endif #endif + + CK_KABI_RESERVE(1) }; extern struct ftrace_ops __rcu *ftrace_ops_list; diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 525cc031596b681b8c9fdad3624a626ff2c3537d..64a0c689361c8ea7850f90ed8e102765b38cead9 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -13,6 +13,7 @@ #include #include #include +#include struct fwnode_operations; struct device; @@ -45,6 +46,13 @@ struct fwnode_handle { struct list_head suppliers; struct list_head consumers; u8 flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; /* @@ -166,6 +174,11 @@ struct fwnode_operations { void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index); int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index); int (*add_links)(struct fwnode_handle *fwnode); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define fwnode_has_op(fwnode, op) \ diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index dfde1e1e321c381ceec969f3818fd78db169fd7e..ff2899762749a79b7c18470e598c4917e2242d3b 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -60,6 +60,11 @@ typedef unsigned int __bitwise gfp_t; #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_KFENCE +#define ___GFP_NOKFENCE 0x8000000u +#else +#define ___GFP_NOKFENCE 0 +#endif /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -101,12 +106,15 @@ typedef unsigned int __bitwise gfp_t; * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NOKFENCE informs DO NOT try to alloc page from kfence pool. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NOKFENCE ((__force gfp_t)___GFP_NOKFENCE) /** * DOC: Watermark modifiers @@ -251,7 +259,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (28) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26cb7b75802522445aba6cbb8d2b681..bf4ea9f2f45716ebba8c60b2a4b7c4933a30d954 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -371,19 +371,23 @@ static inline int copy_mc_highpage(struct page *to, struct page *from) return ret; } #else +#ifndef __HAVE_ARCH_COPY_MC_USER_HIGHPAGE static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } +#endif +#ifndef __HAVE_ARCH_COPY_MC_HIGHPAGE static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif +#endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 254d4a898179c0ad9c3a4b41a07b8e2ed9b1741b..461742279f61b355a1670833354ec1eaec954b8d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -20,6 +20,7 @@ #include #include #include +#include struct hrtimer_clock_base; struct hrtimer_cpu_base; @@ -124,6 +125,10 @@ struct hrtimer { u8 is_rel; u8 is_soft; u8 is_hard; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /** @@ -165,6 +170,9 @@ struct hrtimer_clock_base { struct timerqueue_head active; ktime_t (*get_time)(void); ktime_t offset; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __hrtimer_clock_base_align; enum hrtimer_base_type { @@ -237,6 +245,9 @@ struct hrtimer_cpu_base { ktime_t softirq_expires_next; struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } ____cacheline_aligned; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index fc789c0ac85b8fc78e6c541c01ece0b94bb5f348..5a7100db2956927bd8b4a8aa93a8a609a9da79e9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,7 @@ #include #include /* only for vma_is_dax() */ +#include vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -63,10 +64,103 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; +extern struct kobj_attribute thpsize_shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1< PMD_ORDER) + return; + + this_cpu_add(mthp_stats.stats[order][item], delta); +} + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + mod_mthp_stat(order, item, 1); +} + +#else +static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) +{ +} + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) @@ -77,45 +171,94 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) extern unsigned long transparent_hugepage_flags; +extern unsigned long huge_anon_orders_always; +extern unsigned long huge_anon_orders_madvise; +extern unsigned long huge_anon_orders_inherit; +extern unsigned long huge_file_orders_always; +extern int huge_file_exec_order; + +static inline unsigned long file_orders_always(void) +{ + return READ_ONCE(huge_file_orders_always); +} -#define hugepage_flags_enabled() \ - (transparent_hugepage_flags & \ - ((1<vm_start >> PAGE_SHIFT) - vma->vm_pgoff, - HPAGE_PMD_NR)) + hpage_size >> PAGE_SHIFT)) return false; } - haddr = addr & HPAGE_PMD_MASK; + haddr = ALIGN_DOWN(addr, hpage_size); - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) return false; return true; } +/* + * Filter the bitfield of input orders to the ones suitable for use in the vma. + * See thp_vma_suitable_order(). + * All orders that pass the checks are returned as a bitfield. + */ +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) +{ + int order; + + /* + * Iterate over orders, highest to lowest, removing orders that don't + * meet alignment requirements from the set. Exit loop at first order + * that meets requirements, since all lower orders must also meet + * requirements. + */ + + order = highest_order(orders); + + while (orders) { + if (thp_vma_suitable_order(vma, addr, order)) + break; + order = next_order(&orders, order); + } + + return orders; +} + static inline bool file_thp_enabled(struct vm_area_struct *vma) { struct inode *inode; @@ -130,8 +273,60 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps, bool in_pf, bool enforce_sysfs); +unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders); + +/** + * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma + * @vma: the vm area to check + * @vm_flags: use these vm_flags instead of vma->vm_flags + * @smaps: whether answer will be used for smaps file + * @in_pf: whether answer will be used by page fault handler + * @enforce_sysfs: whether sysfs config should be taken into account + * @orders: bitfield of all orders to consider + * + * Calculates the intersection of the requested hugepage orders and the allowed + * hugepage orders for the provided vma. Permitted orders are encoded as a set + * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 + * corresponds to order-3, etc). Order-0 is never considered a hugepage order. + * + * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage + * orders are allowed. + */ +static inline +unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + /* Optimization to check if required orders are enabled early. */ + if (enforce_sysfs && vma_is_anonymous(vma)) { + unsigned long mask = READ_ONCE(huge_anon_orders_always); + + if (vm_flags & VM_HUGEPAGE) + mask |= READ_ONCE(huge_anon_orders_madvise); + if (hugepage_global_always() || + ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) + mask |= READ_ONCE(huge_anon_orders_inherit); + + orders &= mask; + if (!orders) + return 0; + } + + return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, + enforce_sysfs, orders); +} + +struct thpsize { + struct kobject kobj; + struct list_head node; + int order; +}; + +#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -285,17 +480,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long addr) +static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, + unsigned long addr, int order) { return false; } -static inline bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs) +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) { - return false; + return 0; +} + +static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + return 0; } static inline void folio_prep_large_rmappable(struct folio *folio) {} @@ -405,6 +607,16 @@ static inline bool thp_migration_supported(void) { return false; } + +static inline unsigned long file_orders_always(void) +{ + return 0; +} + +static inline int file_exec_order(void) +{ + return -1; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list(struct folio *folio, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0c50c4fceb95dd6dd2144589496b6efb6b84691e..6f811747d3eebb742e4d35656d38edb51d3ea45d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -845,6 +845,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) return huge_page_size(h) / 512; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return filemap_lock_folio(mapping, idx << huge_page_order(h)); +} + #include #ifndef is_hugepage_only_range @@ -1041,6 +1047,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio return NULL; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a3166100f0cce23712a82cb97b313928a2dbec25..68ee918badc61200865ab71a31cc1571e678c19b 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -566,6 +566,9 @@ struct i2c_algorithm { int (*reg_slave)(struct i2c_client *client); int (*unreg_slave)(struct i2c_client *client); #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -655,6 +658,9 @@ struct i2c_bus_recovery_info { struct pinctrl *pinctrl; struct pinctrl_state *pins_default; struct pinctrl_state *pins_gpio; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int i2c_recover_bus(struct i2c_adapter *adap); @@ -751,6 +757,9 @@ struct i2c_adapter { /* 7bit address space */ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 523025106a643eebc2d3f948f8fe380bce376bbd..bbdf70d2166bb25df78aec57a3b0c37351ff0dd9 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -35,6 +35,9 @@ struct macvlan_dev { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ddb27fc0ee8c8862d62f8c6243c4239ea53374f2..f2fa95b0c0e8a9dbd51d9efdc22205c26680a256 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -18,6 +18,8 @@ struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); + + CK_KABI_RESERVE(1) }; #define MC_HASH_SZ_LOG 9 @@ -50,6 +52,9 @@ struct in_device { struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index f3196f82fd8a14364b09490a6e1ac46a2c55b0e9..c0397423d3a89887edc08fad1c6cd634515bec12 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -158,6 +158,26 @@ struct rapl_if_priv { void *rpi; }; +#ifdef CONFIG_PERF_EVENTS +/** + * struct rapl_package_pmu_data: Per package data for PMU support + * @scale: Scale of 2^-32 Joules for each energy counter increase. + * @lock: Lock to protect n_active and active_list. + * @n_active: Number of active events. + * @active_list: List of active events. + * @timer_interval: Maximum timer expiration time before counter overflow. + * @hrtimer: Periodically update the counter to prevent overflow. + */ +struct rapl_package_pmu_data { + u64 scale[RAPL_DOMAIN_MAX]; + raw_spinlock_t lock; + int n_active; + struct list_head active_list; + ktime_t timer_interval; + struct hrtimer hrtimer; +}; +#endif + /* maximum rapl package domain name: package-%d-die-%d */ #define PACKAGE_DOMAIN_NAME_LENGTH 30 @@ -176,6 +196,10 @@ struct rapl_package { struct cpumask cpumask; char name[PACKAGE_DOMAIN_NAME_LENGTH]; struct rapl_if_priv *priv; +#ifdef CONFIG_PERF_EVENTS + bool has_pmu; + struct rapl_package_pmu_data pmu_data; +#endif }; struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, @@ -188,4 +212,12 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); void rapl_remove_package(struct rapl_package *rp); +#ifdef CONFIG_PERF_EVENTS +int rapl_package_add_pmu(struct rapl_package *rp); +void rapl_package_remove_pmu(struct rapl_package *rp); +#else +static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; } +static inline void rapl_package_remove_pmu(struct rapl_package *rp) { } +#endif + #endif /* __INTEL_RAPL_H__ */ diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index ee07393445f9f21730ab7bcd40f823fe9168e2df..1e880cb0f4541a1154dd1aa2d2107744a7370f02 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -12,18 +12,37 @@ #define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val) #define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val) +/* + * List of supported TMPI IDs. + * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. + */ +enum intel_tpmi_id { + TPMI_ID_RAPL = 0, /* Running Average Power Limit */ + TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ + TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ + TPMI_ID_SST = 5, /* Speed Select Technology */ + TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ + TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ +}; + /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance - * @package_id: CPU Package id - * @bus_number: PCI bus number - * @device_number: PCI device number + * @cdie_mask: Mask of all compute dies in the partition + * @package_id: CPU Package id + * @partition: Package partition id when multiple VSEC PCI devices per package + * @segment: PCI segment ID + * @bus_number: PCI bus number + * @device_number: PCI device number * @function_number: PCI function number * * Structure to store platform data for a TPMI device instance. This * struct is used to return data via tpmi_get_platform_data(). */ struct intel_tpmi_plat_info { + u16 cdie_mask; u8 package_id; + u8 partition; + u8 segment; u8 bus_number; u8 device_number; u8 function_number; @@ -32,7 +51,6 @@ struct intel_tpmi_plat_info { struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev); struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index); int tpmi_get_resource_count(struct auxiliary_device *auxdev); - -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked, - int *disabled); +int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, + bool *write_blocked); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a1dc88ddbff9a6bfec51b1b62f3cd2cc3758463..46612e4aaa361243c55f82e17d2292f70cf75ce5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -289,6 +289,8 @@ struct irq_affinity { unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); void *priv; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616c7d00cb383425c72fe10ee079ff1..25142a0e2fc2c51d4c7807a1fb87cc21b16a163b 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b6ef263e85c061495d3e506859c8d15f39f46836..eb86d5b80110c99adcf24b3c5498c5131f87df2d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -95,6 +97,8 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -111,6 +115,11 @@ struct iommu_domain { int users; }; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool iommu_is_dma_domain(struct iommu_domain *domain) @@ -133,6 +142,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -225,6 +235,38 @@ struct iommu_iotlb_gather { size_t pgsize; struct list_head freelist; bool queued; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) +}; + +/** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** @@ -234,7 +276,15 @@ struct iommu_iotlb_gather { * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, a domain is returned. Upon failure, + * ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -267,6 +317,7 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)(struct device *dev, u32 flags); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -294,6 +345,15 @@ struct iommu_ops { const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** @@ -541,6 +601,21 @@ void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); +static inline bool apply_zhaoxin_dmar_acpi_a_behavior(void) +{ +#if defined(CONFIG_CPU_SUP_ZHAOXIN) || defined(CONFIG_CPU_SUP_CENTAUR) + if (((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) || + (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)) && + ((boot_cpu_data.x86 == 7) && (boot_cpu_data.x86_model == 0x3b))) + return true; +#endif + return false; +} + +extern int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end); + +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev); + static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) { if (domain->ops->flush_iotlb_all) @@ -632,6 +707,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ @@ -738,6 +835,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -970,6 +1069,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 25d768d4897017c62b1353ac0dce117c4565263c..8144354f6da2028f270cb15ac5bce6273d17b514 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -14,6 +14,8 @@ #include #include #include +#include + /* * Resources are tree-like, allowing * nesting etc.. @@ -25,6 +27,11 @@ struct resource { unsigned long flags; unsigned long desc; struct resource *parent, *sibling, *child; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3daac2ccc39c67c9a3193245a4077..1c338f5e5b7a62027290b44ad47c4a74d84706ac 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include +#include struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index af8a771a053c51eed297516f927a5fd003315ef4..443ded24c730004fd2319124e4d6370f5cae9320 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -3,6 +3,7 @@ #define _IPV6_H #include +#include #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) @@ -84,6 +85,11 @@ struct ipv6_devconf { __u8 ndisc_evict_nocarrier; struct ctl_table_header *sysctl_header; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct ipv6_params { diff --git a/include/linux/irq.h b/include/linux/irq.h index 90081afa10ce529f053d97d094f65722c80b9efe..528487d6be71de613e490b807360518f81c47173 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -160,6 +160,9 @@ struct irq_common_data { #ifdef CONFIG_GENERIC_IRQ_IPI unsigned int ipi_offset; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -187,6 +190,11 @@ struct irq_data { struct irq_data *parent_data; #endif void *chip_data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -548,6 +556,8 @@ struct irq_chip { void (*irq_nmi_teardown)(struct irq_data *data); unsigned long flags; + + CK_KABI_RESERVE(1) }; /* @@ -1023,6 +1033,8 @@ struct irq_chip_type { u32 type; u32 mask_cache_priv; u32 *mask_cache; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 136f2980cba3030adefb65d006758322c44cb36f..e31f999896de5b88f6710a2ad2cc124253cbf9ae 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -18,6 +18,11 @@ struct irq_work { struct __call_single_node node; void (*func)(struct irq_work *); struct rcuwait irqwait; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 0000000000000000000000000000000000000000..f212a29390bf654941489686151e7b1e08ff2bf2 --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_TYPER2 0x000C +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_TYPER2_nASSGIcap (1U << 8) +#define GICD_TYPER2_VIL (1U << 7) +#define GICD_TYPER2_VID GENMASK(4, 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_CES (1UL << 1) +#define GICR_CTLR_IR (1UL << 2) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_RVPEID (1U << 7) +#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) +#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) + +#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) +#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) +#define GICR_INVLPIR_V GENMASK_ULL(63, 63) + +#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID +#define GICR_INVALLR_V GICR_INVLPIR_V + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +/* + * GICv4.1 VPROPBASER reinvention. A subtle mix between the old + * VPROPBASER and ITS_BASER. Just not quite any of the two. + */ +#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) +#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) +#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) +#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) +#define GICR_VPROPBASER_4_1_Z (1ULL << 52) +#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) +#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, + * also use the above Valid, PendingLast and Dirty. + */ +#define GICR_VPENDBASER_4_1_DB (1ULL << 62) +#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) +#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) +#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) + +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_MPIDR 0x0018 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) +#define GITS_TYPER_VMAPP (1ULL << 40) +#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) +#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PMHE_SHIFT 6 +#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct irq_domain; +struct fwnode_handle; +int __init its_lpi_memreserve_init(void); +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index d9451d456a7333fe3b1af852bc3590bb179ce6dd..6a0a883770735d28a2108cc2e68d4dca3cd4b6e8 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -105,6 +105,11 @@ struct irq_desc { #ifdef CONFIG_HARDIRQS_SW_RESEND struct hlist_node resend_node; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_internodealigned_in_smp; #ifdef CONFIG_SPARSE_IRQ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 51c254b7fec273550f17742e53bf28923cc561c8..a57e45f8098641c840fdd914ce79dc85180ef10f 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -174,6 +174,12 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + struct irq_data __rcu *revmap[]; }; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f0bc9aa5aed3f69c1dd5d5e0ce1c5af3a1979d30..53b3123f8c2a86f80986f0de6a7c66ce485816f2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -497,7 +497,10 @@ struct jbd2_journal_handle unsigned int h_type: 8; unsigned int h_line_no: 16; + unsigned long h_pre_start_jiffies; unsigned long h_start_jiffies; + u64 h_sched_wait_sum; + u64 h_io_wait_sum; unsigned int h_requested_credits; unsigned int saved_alloc_context; @@ -706,6 +709,9 @@ struct transaction_s * structures associated with the transaction */ struct list_head t_private_list; + + /* When this transaction is locked */ + unsigned long t_locked_time; }; struct transaction_run_stats_s { @@ -844,6 +850,16 @@ struct journal_s */ wait_queue_head_t j_wait_commit; + /** + * @j_wait_done_checkpoint: Wait queue for waiting for checkpoint to complete. + */ + wait_queue_head_t j_wait_done_checkpoint; + + /** + * @j_wait_checkpoint: Wait queue to trigger checkpointing. + */ + wait_queue_head_t j_wait_checkpoint; + /** * @j_wait_updates: Wait queue to wait for updates to complete. */ @@ -1207,6 +1223,13 @@ struct journal_s int (*j_finish_inode_data_buffers) (struct jbd2_inode *); + /** + * @j_checkpoint_task: + * + * Pointer to the current checkpoint thread for this journal. + */ + struct task_struct *j_checkpoint_task; + /* * Journal statistics */ @@ -1226,6 +1249,20 @@ struct journal_s */ struct transaction_stats_s j_stats; + /** + * @j_force_copy: if not zero, force to do buffer copy-out. + */ + unsigned int j_force_copy; + + /** + * @j_stall_thresh: when transaction is locked and there are still + * outstanding handles, such handles will prevent transaction + * committing, trace these handles if they have stalled the transaction + * for @j_stall_thresh time, unit is millisecond, default 100ms. + */ +#define JBD2_DEFAULT_TRANS_STALL_THRESH 100 + unsigned long j_stall_thresh; + /** * @j_failed_commit: Failed journal commit ID. */ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbfb9e31a68a6c6d0748aea49e0171344..9e86ee77d335490e7ab3021cdd0ec72b8065a5fd 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,6 +30,11 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, + CPUTIME_FORCEIDLE_TASK, +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + CPUTIME_SIBIDLE, + CPUTIME_SIBIDLE_TASK, #endif NR_STATS, }; @@ -130,8 +135,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); -#ifdef CONFIG_SCHED_CORE -extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, u64 delta_task, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5a952d00ea159effb936ec54549e6928896c0b5f..226cada23934ee1e7709167f81e7d264019a362d 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -19,6 +19,7 @@ #include #include #include +#include struct file; struct dentry; @@ -165,6 +166,8 @@ struct kernfs_elem_dir { * node has changed during negative dentry revalidation. */ unsigned long rev; + + CK_KABI_RESERVE(1) }; struct kernfs_elem_symlink { @@ -244,6 +247,11 @@ struct kernfs_syscall_ops { const char *new_name); int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, struct kernfs_root *root); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root); @@ -318,6 +326,9 @@ struct kernfs_ops { struct poll_table_struct *pt); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 5caf3ce823733ab510f53d8dfa4b25cc150dbae4..547cb56c4b8f9d73857467020f87d466af7b0dfb 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -36,6 +36,8 @@ struct key_preparsed_payload { size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time64_t expiry; /* Expiry time of key */ + + CK_KABI_RESERVE(1) } __randomize_layout; typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); @@ -56,6 +58,8 @@ struct key_match_data { unsigned lookup_type; /* Type of lookup for this search. */ #define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ #define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ + + CK_KABI_RESERVE(1) }; /* diff --git a/include/linux/key.h b/include/linux/key.h index 938d7ecfb495def7b3e6c5e5a7985b1b134561e7..79207da6e10ae532dc627fb2b8f9b1cf3b4d34ac 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef __KERNEL__ #include @@ -278,6 +279,8 @@ struct key { * restriction. */ struct key_restriction *restrict_link; + + CK_KABI_RESERVE(1) }; extern struct key *key_alloc(struct key_type *type, diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 401af475751413187fe047f5a5cb69aae11b0be0..6771c6eea7206c0e9f56debb1d5097c9b34d1db6 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -16,19 +16,45 @@ #include #include +#include +#include -extern unsigned long kfence_sample_interval; +extern long kfence_sample_interval; -/* - * We allocate an even number of pages, as it simplifies calculations to map - * address to metadata indices; effectively, the very first page serves as an - * extended guard page, but otherwise has no special purpose. - */ -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) -extern char *__kfence_pool; +struct kfence_pool_area { + struct rb_node rb_node; /* binary tree linked to root */ + struct kfence_metadata *meta; /* metadata per area */ + char *addr; /* start kfence pool address */ + unsigned long pool_size; /* size of kfence pool of this area */ + unsigned long nr_objects; /* max object number of this area, 0 marked as zombie area */ + int node; /* the numa node (freelist) this area belongs to, likely from phy mem node */ + atomic_t _ref; /* count kpa ref, to protect kpa itself */ + struct list_head list; /* ready to be added to kfence_pool_root */ + struct percpu_ref refcnt; /* count in-use objects, to protect pool, meta, etc... */ + struct work_struct work; /* use workqueue to free unused area */ +}; DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); extern atomic_t kfence_allocation_gate; +extern unsigned long kfence_num_objects; +extern char *__kfence_pool_early_init; + +/** + * is_kfence_address_area() - check if an address belongs to KFENCE pool in given area + * @addr: address to check + * @kpa: area to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range in given area. + * + * This function is used when you already know the nearest leftside area. + */ +static __always_inline bool is_kfence_address_area(const void *addr, + const struct kfence_pool_area *kpa) +{ + return unlikely(kpa && (unsigned long)((char *)addr - kpa->addr) < kpa->pool_size); +} /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -50,12 +76,17 @@ extern atomic_t kfence_allocation_gate; */ static __always_inline bool is_kfence_address(const void *addr) { +#if defined(CONFIG_KASAN) || defined(CONFIG_DEBUG_KMEMLEAK) /* - * The __kfence_pool != NULL check is required to deal with the case - * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in - * the slow-path after the range-check! + * KASAN functions such as kasan_record_aux_stack(), + * kasan_poison_shadow(), or kasan_unpoison_shadow() + * may give an invalid kaddr (direct mapping kernel address). + * We must add a check here. */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); + return virt_addr_valid(addr) && PageKfence(virt_to_page(addr)); +#else + return PageKfence(virt_to_page(addr)); +#endif } /** @@ -72,6 +103,17 @@ void __init kfence_alloc_pool_and_metadata(void); */ void __init kfence_init(void); +/** + * update_kfence_booting_max() - analyse the max num_objects from cmdline + * + * Read the config from boot cmdline and limit kfence pool size. + * This function is called by kfence itself (e.g., kfence_alloc_pool()), or, + * by specific arch alloc (e.g., arm64_kfence_alloc_pool()). + * + * Return: 1 if kfence_num_objects is changed, otherwise 0. + */ +int __init update_kfence_booting_max(void); + /** * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects * @s: cache being shut down @@ -97,7 +139,8 @@ void kfence_shutdown_cache(struct kmem_cache *s); * Allocate a KFENCE object. Allocators must not call this function directly, * use kfence_alloc() instead. */ -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); +struct page *__kfence_alloc_page(int node, gfp_t flags); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -124,9 +167,73 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp if (!static_branch_likely(&kfence_allocation_key)) return NULL; #endif - if (likely(atomic_read(&kfence_allocation_gate))) + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) return NULL; - return __kfence_alloc(s, size, flags); + return __kfence_alloc(s, size, flags, NUMA_NO_NODE); +} + +/** + * kfence_alloc_node() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * @node: alloc from kfence pool on which node + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc_node() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, + int node) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + return __kfence_alloc(s, size, flags, node); +} + +/** + * kfence_alloc_page() - allocate a KFENCE page with a low probability + * @node: preferred nid + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE page. + * + * the order-0 page version of kfence_alloc(). + */ +static __always_inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (order) + return NULL; + + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + + return __kfence_alloc_page(node, flags); } /** @@ -166,6 +273,7 @@ void *kfence_object_start(const void *addr); * Release a KFENCE object and mark it as freed. */ void __kfence_free(void *addr); +void __kfence_free_page(struct page *page, void *addr); /** * kfence_free() - try to release an arbitrary heap object to KFENCE pool @@ -188,6 +296,30 @@ static __always_inline __must_check bool kfence_free(void *addr) return true; } +/** + * kfence_free_page() - try to release a page to KFENCE pool + * @page: page to be freed + * + * Return: + * * false - page doesn't belong to KFENCE pool and was ignored, + * * true - page was released to KFENCE pool. + * + * Release a KFENCE page and mark it as freed. May be called on any page, + * even non-KFENCE page. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free_page(struct page *page) +{ + void *addr; + + if (!PageKfence(page)) + return false; + + addr = page_to_virt(page); + __kfence_free_page(page, addr); + return true; +} + /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address @@ -228,10 +360,19 @@ static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, int node) +{ + return NULL; +} +static inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_free_page(struct page *page) { return false; } static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { diff --git a/include/linux/kobject.h b/include/linux/kobject.h index c30affcc43b444cc17cb894b83b17b52e41f8ebc..a4545ddb3d5530db7a5ae2b27175b14766fc4270 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -79,6 +79,11 @@ struct kobject { #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); @@ -120,6 +125,11 @@ struct kobj_type { const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj); const void *(*namespace)(const struct kobject *kobj); void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct kobj_uevent_env { @@ -170,6 +180,11 @@ struct kset { spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; void kset_init(struct kset *kset); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 45d5b0a76b0bd5369dd05d90d571eae15c4c7d00..5d6adf9ac86df0e2856e7f581e248b1e17bfa05e 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -92,6 +92,8 @@ struct kprobe { * Protected by kprobe_mutex after this kprobe is registered. */ u32 flags; + + CK_KABI_RESERVE(1) }; /* Kprobe status flags */ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index b9cdeba03668aeb6ddc884ed27083ddf2b74df2b..f701b57fc64bf5632e0021be09965577dca795f8 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -87,8 +87,8 @@ static inline void ksm_exit(struct mm_struct *mm) * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); @@ -140,10 +140,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +static inline struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - return page; + return folio; } static inline void rmap_walk_ksm(struct folio *folio, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdcad69f81cd38edf52dce7dc3d7a4e9..f3f8e6112a7ebb1b35d233a4f76415c36e7f0c57 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -216,6 +216,7 @@ enum kvm_bus { KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, KVM_FAST_MMIO_BUS, + KVM_IOCSR_BUS, KVM_NR_BUSES }; @@ -392,6 +393,8 @@ struct kvm_vcpu { */ struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; + + CK_KABI_RESERVE(1) }; /* @@ -588,6 +591,8 @@ struct kvm_memory_slot { u32 flags; short id; u16 as_id; + + CK_KABI_RESERVE(1) }; static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) @@ -808,6 +813,8 @@ struct kvm { struct notifier_block pm_notifier; #endif char stats_id[KVM_STATS_NAME_SIZE]; + + CK_KABI_RESERVE(1) }; #define kvm_err(fmt, ...) \ @@ -1766,6 +1773,9 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, +#ifdef CONFIG_SW64 + KVM_STAT_DFX_SW64, /* Detail For vcpu stat EXtension */ +#endif }; struct kvm_stat_data { @@ -1895,6 +1905,21 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) +#ifdef CONFIG_SW64 +enum dfx_sw64_stat_kind { + DFX_SW64_STAT_U64, + DFX_SW64_STAT_CPUTIME, +}; + +/* Detail For vcpu stat EXtension debugfs item */ +struct dfx_sw64_kvm_stats_debugfs_item { + const char *name; + int offset; + enum dfx_sw64_stat_kind dfx_kind; + struct dentry *dentry; +}; +extern struct dfx_sw64_kvm_stats_debugfs_item dfx_sw64_debugfs_entries[]; +#endif extern struct dentry *kvm_debugfs_dir; ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index b35968ee9fb50804f015778595db9a3fd4d5d109..069d0515b50c35e6b1cd43cbb76556f4fef55b4a 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -54,6 +54,9 @@ struct list_lru { bool memcg_aware; struct xarray xa; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; void list_lru_destroy(struct list_lru *lru); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9b9b38e8956352d6f102ca1e28db51a4d9095d5d..51a258c24ff52014d71701b0335c641b2f3f1738 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -18,9 +18,9 @@ #if IS_ENABLED(CONFIG_LIVEPATCH) /* task patch states */ -#define KLP_UNDEFINED -1 -#define KLP_UNPATCHED 0 -#define KLP_PATCHED 1 +#define KLP_TRANSITION_IDLE -1 +#define KLP_TRANSITION_UNPATCHED 0 +#define KLP_TRANSITION_PATCHED 1 /** * struct klp_func - function structure for live patching diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b1fdb1554f2f9c2ccb7009314d0b4ef3b64555ec..f78f00829dcfb1f3da336597dd80c423c1379756 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -27,6 +27,9 @@ struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; +struct oom_control; + +#define MEMCG_OOM_PRIORITY 12 /* Cgroup-specific page state, on top of universal node page state */ enum memcg_stat_item { @@ -40,6 +43,20 @@ enum memcg_stat_item { MEMCG_NR_STAT, }; +enum memcg_exstat_item { + MEMCG_WMARK_MIN, + MEMCG_WMARK_RECLAIM, +#ifdef CONFIG_PAGECACHE_LIMIT + MEMCG_PGCACHE_RECLAIM, +#endif + MEMCG_NR_EXSTAT, +}; + +/* Only care about 64bit using "long" */ +struct mem_cgroup_exstat_cpu { + unsigned long item[MEMCG_NR_EXSTAT]; +}; + enum memcg_memory_event { MEMCG_LOW, MEMCG_HIGH, @@ -58,6 +75,35 @@ struct mem_cgroup_reclaim_cookie { unsigned int generation; }; +struct alloc_context; + +enum mem_lat_stat_item { + MEM_LAT_GLOBAL_DIRECT_RECLAIM, /* global direct reclaim latency */ + MEM_LAT_MEMCG_DIRECT_RECLAIM, /* memcg direct reclaim latency */ + MEM_LAT_DIRECT_COMPACT, /* direct compact latency */ + MEM_LAT_GLOBAL_DIRECT_SWAPOUT, /* global direct swapout latency */ + MEM_LAT_MEMCG_DIRECT_SWAPOUT, /* memcg direct swapout latency */ + MEM_LAT_DIRECT_SWAPIN, /* direct swapin latency */ + MEM_LAT_NR_STAT, +}; + +/* Memory latency histogram distribution, in milliseconds */ +enum mem_lat_count_t { + MEM_LAT_0_1, + MEM_LAT_1_5, + MEM_LAT_5_10, + MEM_LAT_10_100, + MEM_LAT_100_500, + MEM_LAT_500_1000, + MEM_LAT_1000_INF, + MEM_LAT_TOTAL, + MEM_LAT_NR_COUNT, +}; + +struct mem_cgroup_lat_stat_cpu { + unsigned long item[MEM_LAT_NR_STAT][MEM_LAT_NR_COUNT]; +}; + #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 @@ -65,6 +111,8 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; refcount_t ref; + + CK_KABI_RESERVE(1) }; /* @@ -139,6 +187,11 @@ struct mem_cgroup_per_node { bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct mem_cgroup_threshold { @@ -244,6 +297,11 @@ struct mem_cgroup { /* protected by memcg_oom_lock */ bool oom_lock; int under_oom; + /* memcg priority */ + bool use_priority_oom; + int priority; + int num_oom_skip; + struct mem_cgroup *next_reset; int swappiness; /* OOM-Killer disable */ @@ -297,6 +355,16 @@ struct mem_cgroup { bool tcpmem_active; int tcpmem_pressure; + /* memory.exstat */ + struct mem_cgroup_exstat_cpu __percpu *exstat_cpu; + + int wmark_min_adj; /* user-set value */ + int wmark_min_eadj; /* value in effect */ + + unsigned int wmark_ratio; + struct work_struct wmark_work; + unsigned int wmark_scale_factor; + #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; struct obj_cgroup __rcu *objcg; @@ -328,11 +396,40 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif +#ifdef CONFIG_MEMSLI + struct mem_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; + struct list_head lat_stat_notify[MEM_LAT_NR_STAT]; + struct mutex lat_stat_notify_lock; +#endif + +#ifdef CONFIG_PAGECACHE_LIMIT + bool allow_pgcache_limit; + unsigned long pgcache_limit_size; + bool pgcache_limit_sync; + struct work_struct pgcache_limit_work; +#endif + #ifdef CONFIG_LRU_GEN /* per-memcg mm_struct list */ struct lru_gen_mm_list mm_list; #endif +#ifdef CONFIG_ASYNC_FORK + unsigned long async_fork; +#endif + + unsigned long offline_jiffies; + unsigned long reap_background; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + struct mem_cgroup_per_node *nodeinfo[]; }; @@ -688,7 +785,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); + +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); void __mem_cgroup_uncharge(struct folio *folio); @@ -907,6 +1005,21 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return !!(memcg->css.flags & CSS_ONLINE); } +/* memcg priority*/ +void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc); + +void mem_cgroup_select_bad_process(struct oom_control *oc); + +static inline bool root_memcg_use_priority_oom(void) +{ + if (mem_cgroup_disabled()) + return false; + if (root_mem_cgroup->use_priority_oom) + return true; + return false; +} + void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); @@ -921,6 +1034,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, } void mem_cgroup_handle_over_high(gfp_t gfp_mask); +void mem_cgroup_wmark_min_throttle(void); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); @@ -1150,6 +1264,32 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext); + +void drain_all_stock(struct mem_cgroup *root_memcg); + +#ifdef CONFIG_RICH_CONTAINER +struct mem_cgroup *rich_container_get_memcg(void); +#else +static inline struct mem_cgroup *rich_container_get_memcg(void) +{ + return NULL; +} +#endif + +static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool high) +{ + if (high) + return page_counter_read(&memcg->memory) < memcg->memory.wmark_high; + + return page_counter_read(&memcg->memory) < memcg->memory.wmark_low; +} + +int memcg_get_wmark_min_adj(struct task_struct *curr); +void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1252,7 +1392,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) { } @@ -1402,6 +1542,21 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return true; } +/* memcg priority */ +static inline void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc) +{ +} + +static inline void mem_cgroup_select_bad_process(struct oom_control *oc) +{ +} + +static inline bool root_memcg_use_priority_oom(void) +{ + return false; +} + static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) @@ -1453,6 +1608,10 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { } +static inline void mem_cgroup_wmark_min_throttle(void) +{ +} + static inline void mem_cgroup_enter_user_fault(void) { } @@ -1566,6 +1725,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } +static inline void +memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ +} + static inline void split_page_memcg(struct page *head, unsigned int nr) { } @@ -1577,8 +1742,60 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, { return 0; } + +static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool low) +{ + return false; +} + +static inline int memcg_get_wmark_min_adj(struct task_struct *curr) +{ + return 0; +} + +static inline void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac) +{ +} #endif /* CONFIG_MEMCG */ +#ifdef CONFIG_MEMSLI +extern void memcg_lat_stat_start(u64 *start); +extern void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start); +#else +static inline void memcg_lat_stat_start(u64 *start) +{ +} + +static inline void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start) +{ +} +#endif /* CONFIG_MEMSLI */ + +#ifdef CONFIG_ASYNC_FORK +static inline unsigned long task_async_fork(struct task_struct *p) +{ + struct mem_cgroup *task_memcg; + unsigned long async_fork = 0UL; + + if (!async_fork_enabled() || mem_cgroup_disabled()) + return 0UL; + + rcu_read_lock(); + task_memcg = mem_cgroup_from_task(p); + if (task_memcg) + async_fork = task_memcg->async_fork; + rcu_read_unlock(); + + return async_fork; +} +#else +static inline unsigned long task_async_fork(struct task_struct *p) +{ + return 0UL; +} +#endif + static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { __mod_lruvec_kmem_state(p, idx, 1); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index d232de7cdc569f88adb5e2fe5d978fcaa68b2cc2..4888f8ee09b31371ce5b575cbf4fee4bf59be48a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -52,6 +52,10 @@ struct mempolicy { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ } w; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* @@ -174,7 +178,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -278,7 +282,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) } #endif -static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, +static inline int mpol_misplaced(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 4aae6c06c5f283d4abbcf213b309f834af0f03e0..e090794f89e148756c992189b3a0af1a76b8aa0f 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,7 @@ #ifndef _LINUX_MEMPOOL_H #define _LINUX_MEMPOOL_H +#include #include #include @@ -23,6 +24,10 @@ typedef struct mempool_s { mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) } mempool_t; static inline bool mempool_initialized(mempool_t *pool) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1314d9c5f05b0e91b50394f99ab8e1ac93fb6a4b..fae593fa9012e1cd4e55fe66e450d70e0df37545 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -25,6 +25,9 @@ struct vmem_altmap { unsigned long free; unsigned long align; unsigned long alloc; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -99,6 +102,9 @@ struct dev_pagemap_ops { */ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, unsigned long nr_pages, int mf_flags); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define PGMAP_ALTMAP_VALID (1 << 0) @@ -133,6 +139,13 @@ struct dev_pagemap { const struct dev_pagemap_ops *ops; void *owner; int nr_range; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + union { struct range range; DECLARE_FLEX_ARRAY(struct range, ranges); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 711dd9412561f6291f73e35b6a8f0c75fa7f1b0b..2ce13e8a309bdcc6b9a05448471e62cdf62aae88 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page) } #ifdef CONFIG_NUMA_BALANCING -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else -static inline int migrate_misplaced_page(struct page *page, +static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b6a4d6471b4a7257de12c82d359dba610fa0bcc2..6b911df2b638625eefb26a8200319480d3961ef6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -397,6 +397,20 @@ extern unsigned int kobjsize(const void *objp); # define VM_UFFD_MINOR VM_NONE #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ +/* + * This flag is used to connect VFIO to arch specific KVM code. It + * indicates that the memory under this VMA is safe for use with any + * non-cachable memory type inside KVM. Some VFIO devices, on some + * platforms, are thought to be unsafe and can cause machine crashes + * if KVM does not lock down the memory type. + */ +#ifdef CONFIG_64BIT +#define VM_ALLOW_ANY_UNCACHED_BIT 39 +#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT) +#else +#define VM_ALLOW_ANY_UNCACHED VM_NONE +#endif + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) @@ -553,6 +567,10 @@ struct vm_fault { * page table to avoid allocation from * atomic context. */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -632,6 +650,11 @@ struct vm_operations_struct { */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_NUMA_BALANCING @@ -1343,7 +1366,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif /* @@ -1583,11 +1605,13 @@ static inline void put_page(struct page *page) #define GUP_PIN_COUNTING_BIAS (1U << 10) void unpin_user_page(struct page *page); +void unpin_folio(struct folio *folio); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); +void unpin_folios(struct folio **folios, unsigned long nfolios); static inline bool is_cow_mapping(vm_flags_t flags) { @@ -1692,26 +1716,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); + return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK); } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page->_last_cpupid; + return folio->_last_cpupid; } static inline void page_cpupid_reset_last(struct page *page) { page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; + return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } -extern int page_cpupid_xchg_last(struct page *page, int cpupid); +int folio_xchg_last_cpupid(struct folio *folio, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { @@ -1719,11 +1743,12 @@ static inline void page_cpupid_reset_last(struct page *page) } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { int last_time; - last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); + last_time = folio_xchg_last_cpupid(folio, + time >> PAGE_ACCESS_TIME_BUCKETS); return last_time << PAGE_ACCESS_TIME_BUCKETS; } @@ -1737,19 +1762,19 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } } #else /* !CONFIG_NUMA_BALANCING */ -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { return 0; } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } static inline int cpupid_to_nid(int cpupid) @@ -1950,15 +1975,15 @@ static inline bool page_maybe_dma_pinned(struct page *page) * * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) +static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, + struct folio *folio) { VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; - return page_maybe_dma_pinned(page); + return folio_maybe_dma_pinned(folio); } /** @@ -2128,21 +2153,49 @@ static inline size_t folio_size(struct folio *folio) } /** - * folio_estimated_sharers - Estimate the number of sharers of a folio. + * folio_likely_mapped_shared - Estimate if the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * folio_estimated_sharers() aims to serve as a function to efficiently - * estimate the number of processes sharing a folio. This is done by - * looking at the precise mapcount of the first subpage in the folio, and - * assuming the other subpages are the same. This may not be true for large - * folios. If you want exact mapcounts for exact calculations, look at - * page_mapcount() or folio_total_mapcount(). + * This function checks if the folio is currently mapped into more than one + * MM ("mapped shared"), or if the folio is only mapped into a single MM + * ("mapped exclusively"). + * + * As precise information is not easily available for all folios, this function + * estimates the number of MMs ("sharers") that are currently mapping a folio + * using the number of times the first page of the folio is currently mapped + * into page tables. + * + * For small anonymous folios (except KSM folios) and anonymous hugetlb folios, + * the return value will be exactly correct, because they can only be mapped + * at most once into an MM, and they cannot be partially mapped. * - * Return: The estimated number of processes sharing a folio. + * For other folios, the result can be fuzzy: + * #. For partially-mappable large folios (THP), the return value can wrongly + * indicate "mapped exclusively" (false negative) when the folio is + * only partially mapped into at least one MM. + * #. For pagecache folios (including hugetlb), the return value can wrongly + * indicate "mapped shared" (false positive) when two VMAs in the same MM + * cover the same file range. + * #. For (small) KSM folios, the return value can wrongly indicate "mapped + * shared" (false negative), when the folio is mapped multiple times into + * the same MM. + * + * Further, this function only considers current page table mappings that + * are tracked using the folio mapcount(s). + * + * This function does not consider: + * #. If the folio might get mapped in the (near) future (e.g., swapcache, + * pagecache, temporary unmapping for migration). + * #. If the folio is mapped differently (VM_PFNMAP). + * #. If hugetlb page table sharing applies. Callers might want to check + * hugetlb_pmd_shared(). + * + * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline int folio_estimated_sharers(struct folio *folio) +static inline bool folio_likely_mapped_shared(struct folio *folio) { - return page_mapcount(folio_page(folio, 0)); + return page_mapcount(folio_page(folio, 0)) > 1; } #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE @@ -2333,6 +2386,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -2478,6 +2533,7 @@ struct kvec; struct page *get_dump_page(unsigned long addr); bool folio_mark_dirty(struct folio *folio); +bool folio_mark_dirty_lock(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); @@ -3162,6 +3218,7 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); +extern int __meminit init_min_cache_kbytes(void); extern void mem_init(void); extern void __init mmap_init(void); @@ -3467,6 +3524,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, @@ -3763,8 +3824,9 @@ void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -void pmd_init(void *addr); void pud_init(void *addr); +void pmd_init(void *addr); +void kernel_pte_init(void *addr); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -4068,4 +4130,115 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end) #endif +#ifdef CONFIG_ASYNC_FORK +#define ASYNC_FORK_CANDIDATE 0 +DECLARE_STATIC_KEY_FALSE(async_fork_enabled_key); +DECLARE_STATIC_KEY_FALSE(async_fork_staging_key); +static inline bool async_fork_enabled(void) +{ + return static_branch_unlikely(&async_fork_enabled_key); +} +static inline bool async_fork_staging(void) +{ + return static_branch_unlikely(&async_fork_staging_key); +} + +int async_fork_cpr_fast(struct vm_area_struct *vma, struct vm_area_struct *mpnt); +void async_fork_cpr_bind(struct mm_struct *oldmm, struct mm_struct *mm, int err); +void async_fork_cpr_rest(void); +void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l); + +bool __is_pmd_async_fork(pmd_t pmd); +void __async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr); +void __async_fork_fixup_vma(struct vm_area_struct *mpnt); + +static inline bool is_pmd_async_fork(pmd_t pmd) +{ + if (async_fork_staging()) + return __is_pmd_async_fork(pmd); + return false; +} +static inline void async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr) +{ + if (async_fork_staging()) + __async_fork_fixup_pmd(mpnt, pmd, addr); +} +static inline void async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ + if (async_fork_staging()) + __async_fork_fixup_vma(mpnt); +} +#else +static inline bool async_fork_enabled(void) +{ + return false; +} +static inline bool async_fork_staging(void) +{ + return false; +} + +static inline int async_fork_cpr_fast(struct vm_area_struct *vma, + struct vm_area_struct *mpnt) +{ + return -EOPNOTSUPP; +} +static inline void async_fork_cpr_bind(struct mm_struct *oldmm, + struct mm_struct *mm, int err) +{ +} +static inline void async_fork_cpr_rest(void) +{ +} +static inline void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l) +{ +} + +static inline bool is_pmd_async_fork(pmd_t pmd) +{ + return false; +} +static inline void async_fork_fixup_pmd(struct vm_area_struct *mpnt, + pmd_t *pmd, unsigned long addr) +{ +} +static inline void async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ +} +#endif + +struct fast_reflink_work { + struct work_struct work; + struct address_space *mapping; +}; + +int fast_reflink_apply(struct address_space *mapping, pgoff_t start, + pgoff_t end); +bool is_pmd_fast_reflink(pmd_t pmd); +void fast_reflink_fixup_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr); +void fast_reflink_fixup_vma(struct vm_area_struct *vma); + +static inline bool is_pmd_transient(pmd_t pmd) +{ + if (is_pmd_fast_reflink(pmd)) + return true; + if (is_pmd_async_fork(pmd)) + return true; + return false; +} +static inline void fixup_pmd(struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr) +{ + fast_reflink_fixup_pmd(vma, pmd, addr); + async_fork_fixup_pmd(vma, pmd, addr); +} +static inline void fixup_vma(struct vm_area_struct *vma) +{ + fast_reflink_fixup_vma(vma); + async_fork_fixup_vma(vma); +} + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 20c96ce98751a46cb520dc24565023b82a5b09a6..4fd65d52ccfac78e97183628df62c5cf4c8453cc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -199,6 +199,10 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif + #ifdef CONFIG_KMSAN /* * KMSAN metadata for this page: @@ -210,10 +214,6 @@ struct page { struct page *kmsan_shadow; struct page *kmsan_origin; #endif - -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS - int _last_cpupid; -#endif } _struct_page_alignment; /* @@ -221,8 +221,8 @@ struct page { * * An 'encoded_page' pointer is a pointer to a regular 'struct page', but * with the low bits of the pointer indicating extra context-dependent - * information. Not super-common, but happens in mmu_gather and mlock - * handling, and this acts as a type system check on that use. + * information. Only used in mmu_gather handling, and this acts as a type + * system check on that use. * * We only really have two guaranteed bits in general, although you could * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE) @@ -231,21 +231,46 @@ struct page { * Use the supplied helper functions to endcode/decode the pointer and bits. */ struct encoded_page; -#define ENCODE_PAGE_BITS 3ul + +#define ENCODED_PAGE_BITS 3ul + +/* Perform rmap removal after we have flushed the TLB. */ +#define ENCODED_PAGE_BIT_DELAY_RMAP 1ul + +/* + * The next item in an encoded_page array is the "nr_pages" argument, specifying + * the number of consecutive pages starting from this page, that all belong to + * the same folio. For example, "nr_pages" corresponds to the number of folio + * references that must be dropped. If this bit is not set, "nr_pages" is + * implicitly 1. + */ +#define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul + static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags) { - BUILD_BUG_ON(flags > ENCODE_PAGE_BITS); + BUILD_BUG_ON(flags > ENCODED_PAGE_BITS); return (struct encoded_page *)(flags | (unsigned long)page); } static inline unsigned long encoded_page_flags(struct encoded_page *page) { - return ENCODE_PAGE_BITS & (unsigned long)page; + return ENCODED_PAGE_BITS & (unsigned long)page; } static inline struct page *encoded_page_ptr(struct encoded_page *page) { - return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page); + return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page); +} + +static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr) +{ + VM_WARN_ON_ONCE((nr << 2) >> 2 != nr); + return (struct encoded_page *)(nr << 2); +} + +static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page) +{ + return ((unsigned long)page) >> 2; } /* @@ -272,6 +297,8 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @virtual: Virtual address in the kernel direct map. + * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). @@ -317,6 +344,12 @@ struct folio { atomic_t _refcount; #ifdef CONFIG_MEMCG unsigned long memcg_data; +#endif +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; #endif /* private: the union with struct page is transitional */ }; @@ -373,6 +406,12 @@ FOLIO_MATCH(_refcount, _refcount); #ifdef CONFIG_MEMCG FOLIO_MATCH(memcg_data, memcg_data); #endif +#if defined(WANT_PAGE_VIRTUAL) +FOLIO_MATCH(virtual, virtual); +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +FOLIO_MATCH(_last_cpupid, _last_cpupid); +#endif #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ @@ -690,6 +729,17 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + +#ifdef CONFIG_ASYNC_FORK + struct vm_area_struct *async_fork_vma; +#endif + + bool fast_reflink; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #ifdef CONFIG_SCHED_MM_CID @@ -947,8 +997,21 @@ struct mm_struct { #endif } lru_gen; #endif /* CONFIG_LRU_GEN */ +#ifdef CONFIG_ASYNC_FORK + struct mm_struct *async_fork_mm; + unsigned long async_fork_flags; +#endif } __randomize_layout; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. diff --git a/include/linux/mman.h b/include/linux/mman.h index b2e2677ea156ac5eddc55b90f6a133a92249cfe6..4d962ec5b2fd74c073efafe75a2b9d52cbc0c468 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -77,6 +78,16 @@ unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { + struct rich_container_ext *ext = NULL; + + /* Account pages in current rich container */ + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) + percpu_counter_add_batch(&ext->vm_committed_as, pages, ext->as_batch); + percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); } @@ -154,14 +165,20 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey) static inline unsigned long calc_vm_flag_bits(struct file *file, unsigned long flags) { - return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | - _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | - arch_calc_vm_flag_bits(file, flags); + return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED) | + _calc_vm_trans(flags, MAP_SYNC, VM_SYNC) | + _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | + arch_calc_vm_flag_bits(file, flags); } unsigned long vm_commit_limit(void); +#ifdef CONFIG_MEMCG +unsigned long rich_container_vm_commit_limit(struct rich_container_ext *ext, + struct mem_cgroup *memcg); +#endif + #ifndef arch_memory_deny_write_exec_supported static inline bool arch_memory_deny_write_exec_supported(void) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 05092c37a430c2ff88f49345524ad51ee8af821b..569df4b25e2e99705d9a230c84ff82a9c667934a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -313,6 +313,13 @@ enum lruvec_flags { */ LRUVEC_CGROUP_CONGESTED, LRUVEC_NODE_CONGESTED, + LRUVEC_DIRTY, /* reclaim scanning has recently found + * many dirty file pages at the tail + * of the LRU. + */ + LRUVEC_WRITEBACK, /* reclaim scanning has recently found + * many pages under writeback + */ }; #endif /* !__GENERATING_BOUNDS_H */ @@ -643,6 +650,7 @@ struct lruvec { #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif + CK_KABI_RESERVE(1) }; /* Isolate unmapped pages */ @@ -713,6 +721,8 @@ struct per_cpu_zonestat { struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; + + CK_KABI_RESERVE(1) }; #endif /* !__GENERATING_BOUNDS.H */ @@ -985,16 +995,14 @@ struct zone { /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_internodealigned_in_smp; enum pgdat_flags { - PGDAT_DIRTY, /* reclaim scanning has recently found - * many dirty file pages at the tail - * of the LRU. - */ - PGDAT_WRITEBACK, /* reclaim scanning has recently found - * many pages under writeback - */ PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; @@ -1336,6 +1344,7 @@ typedef struct pglist_data { */ unsigned long totalreserve_pages; + unsigned long min_cache_pages; #ifdef CONFIG_NUMA /* * node reclaim becomes active if more unmapped pages exist. @@ -1403,6 +1412,11 @@ typedef struct pglist_data { #ifdef CONFIG_MEMORY_FAILURE struct memory_failure_stats mf_stats; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) diff --git a/include/linux/module.h b/include/linux/module.h index a98e188cf37b8182b8654b3be069f93ce1d97174..362258391d38b5eac1ed87db7be326407558b428 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -583,6 +583,11 @@ struct module { #ifdef CONFIG_DYNAMIC_DEBUG_CORE struct _ddebug_info dyndbg_info; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2cb5aff1dbf16a90f2573d81f064444c..c401b4e975cc05659cddc535af9738e662673a3b 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) || defined(CONFIG_SW64) #define __moduleparam_const #else #define __moduleparam_const const diff --git a/include/linux/mount.h b/include/linux/mount.h index 4f40b40306d0f9679ca3be2615a03656196a501b..802437cd61cbe4204c98862fe775a01db56702ea 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -12,6 +12,7 @@ #include #include +#include struct super_block; struct dentry; @@ -72,6 +73,8 @@ struct vfsmount { struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; struct mnt_idmap *mnt_idmap; + + CK_KABI_RESERVE(1) } __randomize_layout; static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) diff --git a/include/linux/msi.h b/include/linux/msi.h index ddace8c34dcf958edae65de2858bf924adb9d19e..e8205818000468c4a00242412ffa620c5efbbb31 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -203,6 +203,9 @@ struct msi_desc { struct pci_msi_desc pci; struct msi_desc_data data; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/net.h b/include/linux/net.h index c9b4a63791a4594882991a8fd109fe3648bf2997..6b789115b39e972a6d2455ae34da92f5c49b92bb 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -223,6 +223,15 @@ struct proto_ops { int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1576e7443eee50aeeaf8ccadd3b10d27bc97c72e..210abf138efa5f9c912142b2c70558d9382b5814 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -257,6 +257,8 @@ struct netdev_hw_addr_list { /* Auxiliary tree for faster lookup on addition and deletion */ struct rb_root tree; + + CK_KABI_RESERVE(1) }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -319,6 +321,9 @@ struct header_ops { const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); __be16 (*parse_protocol)(const struct sk_buff *skb); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* These flag bits are private to the generic network queueing @@ -380,6 +385,11 @@ struct napi_struct { /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -658,6 +668,14 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; @@ -823,6 +841,9 @@ struct xps_dev_maps { struct rcu_head rcu; unsigned int nr_ids; s16 num_tc; + + CK_KABI_RESERVE(1) + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ }; @@ -1038,6 +1059,11 @@ struct xfrmdev_ops { int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); void (*xdo_dev_policy_delete) (struct xfrm_policy *x); void (*xdo_dev_policy_free) (struct xfrm_policy *x); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #endif @@ -1645,6 +1671,23 @@ struct net_device_ops { int (*ndo_hwtstamp_set)(struct net_device *dev, struct kernel_hwtstamp_config *kernel_config, struct netlink_ext_ack *extack); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; /** @@ -2417,6 +2460,22 @@ struct net_device { struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2719,6 +2778,11 @@ struct offload_callbacks { struct sk_buff *(*gro_receive)(struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct packet_offload { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index cc5a2a220af8e39e2ad3d415e57b1e702f0d7066..840254d1dbb38a85697de8b33ce8e7a0d8c3353b 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -179,6 +179,8 @@ struct nf_sockopt_ops { int (*get)(struct sock *sk, int optval, void __user *user, int *len); /* Use the module struct to lock set/get code in place */ struct module *owner; + + CK_KABI_RESERVE(1) }; /* Function to register/unregister hook points. */ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 2770db2fa080d2925d1e17b1915b4242ce93f7a2..0b65d9b6597d6ad41879a43ba259067392e46a69 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -4,6 +4,7 @@ #include #include +#include struct ip_conntrack_stat { unsigned int found; @@ -19,6 +20,8 @@ struct ip_conntrack_stat { unsigned int expect_delete; unsigned int search_restart; unsigned int chaintoolong; + + CK_KABI_RESERVE(1) }; #define NFCT_INFOMASK 7UL diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5897f3dbaf7c3e03f0e6d989922641117a1b19fd..743ece7a7bbbf9ddfb4d5d98da42627ab845d00a 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -217,6 +217,9 @@ struct xt_target { unsigned short proto; unsigned short family; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Furniture shopping... */ diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e92e378df000fb1eca1e082ebc889fe7849a19d2..43dd3a79fdf21ec7143614c30c4bb49a2183273e 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -235,4 +235,18 @@ static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif +#ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_hardlockup_enable(unsigned int cpu); +void sdei_watchdog_hardlockup_disable(unsigned int cpu); +void sdei_watchdog_clear_eoi(void); +int sdei_watchdog_hardlockup_probe(void); +extern bool disable_sdei_nmi_watchdog; +#else +static inline void sdei_watchdog_hardlockup_enable(unsigned int cpu) { } +static inline void sdei_watchdog_hardlockup_disable(unsigned int cpu) { } +static inline void sdei_watchdog_clear_eoi(void) { } +static inline int sdei_watchdog_hardlockup_probe(void) { return -ENODEV; } +#define disable_sdei_nmi_watchdog 1 +#endif + #endif diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h index 0f1d024bd9582618a54b601988969694b4dafb47..1a15faeaf768ff465f1092f47ccb88526ea233a7 100644 --- a/include/linux/ns_common.h +++ b/include/linux/ns_common.h @@ -3,6 +3,7 @@ #define _LINUX_NS_COMMON_H #include +#include struct proc_ns_operations; @@ -11,6 +12,8 @@ struct ns_common { const struct proc_ns_operations *ops; unsigned int inum; refcount_t count; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 771cb028587242a55509e4aa99684f61b0598f03..c9162ab4fd2556637330f87442b47d4cc5092fbf 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -38,6 +38,15 @@ struct nsproxy { struct time_namespace *time_ns; struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 33212e93f4a6318493342a541894ec0879a884fa..ac959bbbffd55fbd4f9dc90e23bbc1252a6d16be 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -6,7 +6,9 @@ #ifdef CONFIG_OBJTOOL +#ifndef CONFIG_ARM64 #include +#endif #ifndef __ASSEMBLY__ diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h index 453a4f4ef39d441d1e047b2a06e6700bf1e51b21..ca51cc50dc8221a513087206e98192af213a4b7b 100644 --- a/include/linux/objtool_types.h +++ b/include/linux/objtool_types.h @@ -43,6 +43,8 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain * location so that it can be restored later. + * + * UNWIND_HINT_TYPE_IRQ_STACK: Used to unwind through the IRQ stack. */ #define UNWIND_HINT_TYPE_UNDEFINED 0 #define UNWIND_HINT_TYPE_END_OF_STACK 1 @@ -53,5 +55,6 @@ struct unwind_hint { #define UNWIND_HINT_TYPE_FUNC 5 #define UNWIND_HINT_TYPE_SAVE 6 #define UNWIND_HINT_TYPE_RESTORE 7 +#define UNWIND_HINT_TYPE_IRQ_STACK 8 #endif /* _LINUX_OBJTOOL_TYPES_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 024dda54b9c77638023d18f61409632f1a38f9ba..8917b1e23e521c71ceaa52d0daeed474b6b3a92a 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -65,6 +65,13 @@ struct device_node { unsigned int unique_id; struct of_irq_controller *irq_trans; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #define MAX_PHANDLE_ARGS 16 diff --git a/include/linux/oom.h b/include/linux/oom.h index 7d0c9c48a0c54e7265e2dc872d7f81c60463788a..3d7ab770308e1c6d794b34031fb5288bf352abb5 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -50,6 +50,11 @@ struct oom_control { struct task_struct *chosen; long chosen_points; + /* Memcg priority */ + struct mem_cgroup *reset_list; + int num_skip; + bool use_priority_oom; + /* Used to print the constraint info. */ enum oom_constraint constraint; }; @@ -110,6 +115,8 @@ extern int unregister_oom_notifier(struct notifier_block *nb); extern bool oom_killer_disable(signed long timeout); extern void oom_killer_enable(void); +extern int oom_evaluate_task(struct task_struct *task, void *arg); + extern struct task_struct *find_lock_task_mm(struct task_struct *p); #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/orc_entry.h b/include/linux/orc_entry.h new file mode 100644 index 0000000000000000000000000000000000000000..194a6c41476ee15009a8942f347df209d5a51cf2 --- /dev/null +++ b/include/linux/orc_entry.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#ifndef _ORC_ENTRY_H +#define _ORC_ENTRY_H + +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:4; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:3; + unsigned signal:1; + unsigned type:4; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_ENTRY_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a77f3a7d21d12f0d83f59957460391f255a45abc..56052b705f9abe626976de4557d3364866e48552 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -135,6 +135,9 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, PG_arch_3, +#endif +#ifdef CONFIG_KFENCE + PG_kfence, /* Page in kfence pool */ #endif __NR_PAGEFLAGS, @@ -623,6 +626,10 @@ PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif +#ifdef CONFIG_KFENCE +__PAGEFLAG(Kfence, kfence, PF_ANY) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -655,7 +662,7 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) * Different with flags above, this flag is used only for fsdax mode. It * indicates that this page->mapping is now under reflink case. */ -#define PAGE_MAPPING_DAX_SHARED ((void *)0x1) +#define PAGE_MAPPING_DAX_SHARED 0x1UL static __always_inline bool folio_mapping_flags(struct folio *folio) { @@ -1070,6 +1077,12 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define __PG_MLOCKED 0 #endif +#ifdef CONFIG_KFENCE +#define __PG_KFENCE (1UL << PG_kfence) +#else +#define __PG_KFENCE 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. If they are, there is a problem. diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index c141ea9a95ef86c9eea582d1872485e73321914e..192326822f4607631541d5a1bdb71d5f4998b210 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -25,6 +25,10 @@ struct page_counter { atomic_long_t low_usage; atomic_long_t children_low_usage; + /* water mark low and high */ + unsigned long wmark_low; + unsigned long wmark_high; + unsigned long watermark; unsigned long failcnt; @@ -65,6 +69,10 @@ bool page_counter_try_charge(struct page_counter *counter, void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_wmark_high(struct page_counter *counter, + unsigned long nr_pages); +void page_counter_set_wmark_low(struct page_counter *counter, + unsigned long nr_pages); static inline void page_counter_set_high(struct page_counter *counter, unsigned long nr_pages) diff --git a/include/linux/pagecache_limit.h b/include/linux/pagecache_limit.h new file mode 100644 index 0000000000000000000000000000000000000000..c85122603ca36f559718d05f4fcc04e1c0a46e97 --- /dev/null +++ b/include/linux/pagecache_limit.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PAGECACHE_LIMIT_H +#define _PAGECACHE_LIMIT_H + +#ifdef CONFIG_PAGECACHE_LIMIT + +DECLARE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); +extern struct workqueue_struct *memcg_pgcache_limit_wq; + +enum pgcache_limit_reclaim_type { + /* per-memcg or global pagecaeche reclaim defaut way is async */ + PGCACHE_RECLAIM_ASYNC = 0, + PGCACHE_RECLAIM_DIRECT +}; + +static inline bool pagecache_limit_enabled(void) +{ + return static_branch_unlikely(&pagecache_limit_enabled_key); +} +bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg); +void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr); +unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg); +void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask); +void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask); +void memcg_pgcache_limit_work_func(struct work_struct *work); + +#else +static inline bool pagecache_limit_enabled(void) +{ + return false; +} +static inline bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg) +{ + return false; +} +static inline void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr) +{ +} +static inline unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg) +{ + return 0; +} +static inline void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask) +{ +} +static inline void memcg_pagecache_shrink(struct mem_cgroup *memcg, + gfp_t gfp_mask) +{ +} +static inline void memcg_pgcache_limit_work_func(struct work_struct *work) +{ +} +#endif +#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 15793a4af9d4455581641db1298fc45756a83702..f9ca9c252dcaa47f1d8ee1d365b7a05518e57c6b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -206,6 +206,8 @@ enum mapping_flags { AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES, /* must wait for writeback before modifying folio contents */ + + AS_FSDAX_NORMAP = 30, }; /** @@ -821,9 +823,6 @@ static inline pgoff_t folio_next_index(struct folio *folio) */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return &folio->page; return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } @@ -839,9 +838,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return folio->index == index; return index - folio_index(folio) < folio_nr_pages(folio); } @@ -899,10 +895,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, } /* - * Get index of the page within radix-tree (but not for hugetlb pages). - * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE (even for hugetlb pages). */ -static inline pgoff_t page_to_index(struct page *page) +static inline pgoff_t page_to_pgoff(struct page *page) { struct page *head; @@ -917,19 +912,6 @@ static inline pgoff_t page_to_index(struct page *page) return head->index + page - head; } -extern pgoff_t hugetlb_basepage_index(struct page *page); - -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - * (TODO: hugetlb pages should have ->index in PAGE_SIZE) - */ -static inline pgoff_t page_to_pgoff(struct page *page) -{ - if (unlikely(PageHuge(page))) - return hugetlb_basepage_index(page); - return page_to_index(page); -} - /* * Return byte-offset into filesystem object for page. */ @@ -966,24 +948,16 @@ static inline loff_t folio_file_pos(struct folio *folio) /* * Get the offset in PAGE_SIZE (even for hugetlb folios). - * (TODO: hugetlb folios should have ->index in PAGE_SIZE) */ static inline pgoff_t folio_pgoff(struct folio *folio) { - if (unlikely(folio_test_hugetlb(folio))) - return hugetlb_basepage_index(&folio->page); return folio->index; } -extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address); - static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; - if (unlikely(is_vm_hugetlb_page(vma))) - return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; @@ -1161,6 +1135,7 @@ static inline void wait_on_page_locked(struct page *page) folio_wait_locked(page_folio(page)); } +void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); @@ -1283,6 +1258,11 @@ struct readahead_control { unsigned int _batch_count; bool _workingset; unsigned long _pflags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define DEFINE_READAHEAD(ractl, f, r, m, i) \ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc3f5794275e14d83036d7020031c40f..f03f0c0735de8d395372d19b14d6d9f1e110d7cc 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -7,6 +7,7 @@ struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; + u64 d2c_nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 6b1301e2498e9e163c6e9a0525e49afa226bffea..863e572202e2d590fbe55cef76dc5ef12193cb22 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -88,6 +88,7 @@ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ +extern const struct pci_ecam_ops sw64_pci_ecam_ops; /* SW64 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 5cb69403107290dc452690730cf6fd55e06dd7ce..f4ce2e9b7e94cfc97363e1ac3460b924b7fc5965 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -89,6 +89,8 @@ struct pci_epc_ops { const struct pci_epc_features* (*get_features)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); struct module *owner; + + CK_KABI_RESERVE(1) }; /** @@ -150,6 +152,8 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 3f44b6aec4770cb5ee5c925f02cca519d08688ac..5e43a8af6e5344d2e79ee600aa1b0f7b663e047f 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -51,6 +51,8 @@ struct pci_epf_header { u16 subsys_vendor_id; u16 subsys_id; enum pci_interrupt_pin interrupt_pin; + + CK_KABI_RESERVE(1) }; /** @@ -121,6 +123,8 @@ struct pci_epf_bar { size_t size; enum pci_barno barno; int flags; + + CK_KABI_RESERVE(1) }; /** @@ -180,6 +184,8 @@ struct pci_epf { unsigned long vfunction_num_map; struct list_head pci_vepf; const struct pci_epc_event_ops *event_ops; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index 2d1fb935a8c86a273e2c743993454ef9924835d6..5f41f7e2f4e40de2fc6f57c06fbf529e3bf55a9c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -245,6 +245,7 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), /* Device does honor MSI masking despite saying otherwise */ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), + PCI_DEV_FLAGS_NO_LINK_SPEED_CHANGE = (__force pci_dev_flags_t) (1 << 15), }; enum pci_irq_reroute_variant { @@ -312,6 +313,16 @@ struct pci_vpd { u8 cap; }; +/* The structure describes the regs to be saved for yitian710 SoC. */ +struct pci_saved_regs { + u16 dev_ctrl; + u16 dev_ctrl2; + u32 acs_cap_ctrl; + u32 root_err_cmd; + u16 root_ctrl; + u16 slot_ctrl; /* should be the last register to restore */ +}; + struct irq_affinity; struct pcie_link_state; struct pci_sriov; @@ -465,6 +476,7 @@ struct pci_dev { unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ + unsigned int broken_bus_reset:1; /* Abnormal bus reset */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -529,6 +541,23 @@ struct pci_dev { /* These methods index pci_reset_fn_methods[] */ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -678,6 +707,15 @@ struct pci_bus { struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; unsigned int unsafe_warn:1; /* warned about RW1C config write */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) @@ -933,6 +971,15 @@ struct pci_driver { struct device_driver driver; struct pci_dynids dynids; bool driver_managed_dma; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline struct pci_driver *to_pci_driver(struct device_driver *drv) @@ -1216,6 +1263,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set); int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 3a10d6ec3ee7f9618418a4e8394de3e096c72c1a..256b124d3d743c92a9a5b7f0c9820f3be369a0af 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -45,6 +45,15 @@ struct hotplug_slot_ops { int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); int (*reset_slot) (struct hotplug_slot *slot, bool probe); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** @@ -63,6 +72,15 @@ struct hotplug_slot { struct pci_slot *pci_slot; struct module *owner; const char *mod_name; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3dce2be622e745ebe3b71890dde480f99637f48a..a9bb8aa1112a835d9fdea2cb764221f5694c7814 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2600,6 +2600,10 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 +#define PCI_DEVICE_ID_HYGON_18H_M10H_HDA 0x14c9 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3 0x14d3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad @@ -2608,6 +2612,8 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_ALIBABA 0x1ded + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 @@ -3221,4 +3227,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d01351b1526f6123199eb54e58c0ead87246877b..3a44dd1e33d241589fce9fb8b629cd6fa32c6a12 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, + s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) @@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + return __percpu_counter_limited_add(fbc, limit, amount, + percpu_counter_batch); +} + /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until @@ -185,6 +194,27 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) local_irq_restore(flags); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + unsigned long flags; + bool good = false; + s64 count; + + if (amount == 0) + return true; + + local_irq_save(flags); + count = fbc->count + amount; + if ((amount > 0 && count <= limit) || + (amount < 0 && count >= limit)) { + fbc->count = count; + good = true; + } + local_irq_restore(flags); + return good; +} + /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7a5563ffe61b53bb425582c6d6a1d36f2ad768cb..a6a0b44080160e941f17fdcf685da9188a18561a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -540,6 +540,11 @@ struct pmu { * Check period value for PERF_EVENT_IOC_PERIOD ioctl. */ int (*check_period) (struct perf_event *event, u64 value); /* optional */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum perf_addr_filter_action_t { @@ -841,6 +846,19 @@ struct perf_event { */ __u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; /* @@ -966,6 +984,8 @@ struct perf_event_context { * that until the signal is delivered. */ local_t nr_pending; + + CK_KABI_RESERVE(1) }; /* @@ -1009,6 +1029,8 @@ struct perf_cpu_context { int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; + + CK_KABI_RESERVE(1) }; struct perf_output_handle { @@ -1144,6 +1166,15 @@ static inline bool branch_sample_priv(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } +static inline bool branch_sample_counters(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; +} + +static inline bool branch_sample_call_stack(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} struct perf_sample_data { /* @@ -1178,6 +1209,7 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; @@ -1255,7 +1287,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, - struct perf_branch_stack *brs) + struct perf_branch_stack *brs, + u64 *brs_cntr) { int size = sizeof(u64); /* nr */ @@ -1263,7 +1296,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); + /* + * The extension space for counters is appended after the + * struct perf_branch_stack. It is used to store the occurrences + * of events of each branch. + */ + if (brs_cntr) + size += brs->nr * sizeof(u64); + data->br_stack = brs; + data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8b7daccd11bef242e9477687d04639f3f28ed893..4db811acb03076ff46d53115d5cdcbbf827427b0 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -205,15 +205,37 @@ static inline int pmd_young(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif -#ifndef set_ptes +#ifndef pte_batch_hint +/** + * pte_batch_hint - Number of pages that can be added to batch without scanning. + * @ptep: Page table pointer for the entry. + * @pte: Page table entry. + * + * Some architectures know that a set of contiguous ptes all map the same + * contiguous memory with the same permissions. In this case, it can provide a + * hint to aid pte batching without the core code needing to scan every pte. + * + * An architecture implementation may ignore the PTE accessed state. Further, + * the dirty state must apply atomically to all the PTEs described by the hint. + * + * May be overridden by the architecture, else pte_batch_hint is always 1. + */ +static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) +{ + return 1; +} +#endif -#ifndef pte_next_pfn -static inline pte_t pte_next_pfn(pte_t pte) +#ifndef pte_advance_pfn +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { - return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } #endif +#define pte_next_pfn(pte) pte_advance_pfn(pte, 1) + +#ifndef set_ptes /** * set_ptes - Map consecutive pages to a contiguous range of addresses. * @mm: Address space to map the pages into. @@ -222,6 +244,10 @@ static inline pte_t pte_next_pfn(pte_t pte) * @pte: Page table entry for the first page. * @nr: Number of pages to map. * + * When nr==1, initial state of pte may be present or not present, and new state + * may be present or not present. When nr>1, initial state of all ptes must be + * not present, and new state must be present. + * * May be overridden by the architecture, or the architecture can define * set_pte() and PFN_PTE_SHIFT. * @@ -328,6 +354,36 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, } #endif +#ifndef mkold_ptes +/** + * mkold_ptes - Mark PTEs that map consecutive pages of the same folio as old. + * @vma: VMA the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to mark old. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_test_and_clear_young(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void mkold_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + ptep_test_and_clear_young(vma, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, @@ -573,6 +629,76 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } #endif +#ifndef get_and_clear_full_ptes +/** + * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of + * the same folio, collecting dirty/accessed bits. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the + * returned PTE. + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + pte_t pte, tmp_pte; + + pte = ptep_get_and_clear_full(mm, addr, ptep, full); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} +#endif + +#ifndef clear_full_ptes +/** + * clear_full_ptes - Clear present PTEs that map consecutive pages of the same + * folio. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_get_and_clear_full(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + ptep_get_and_clear_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif /* * If two threads concurrently fault at the same page, the thread that @@ -582,13 +708,18 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ -#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +#ifndef update_mmu_tlb_range +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} +#endif + static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + update_mmu_tlb_range(vma, address, ptep, 1); } -#define __HAVE_ARCH_UPDATE_MMU_TLB -#endif /* * Some architectures may be able to avoid expensive synchronization @@ -605,6 +736,35 @@ static inline void pte_clear_not_present_full(struct mm_struct *mm, } #endif +#ifndef clear_not_present_full_ptes +/** + * clear_not_present_full_ptes - Clear multiple not present PTEs which are + * consecutive in the pgtable. + * @mm: Address space the ptes represent. + * @addr: Address of the first pte. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over pte_clear_not_present_full(). + * + * Context: The caller holds the page table lock. The PTEs are all not present. + * The PTEs are all in the same PMD. + */ +static inline void clear_not_present_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + pte_clear_not_present_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, @@ -643,6 +803,37 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef wrprotect_ptes +/** + * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same + * folio. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to write-protect. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_set_wrprotect(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + ptep_set_wrprotect(mm, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + /* * On some architectures hardware does not set page access bit when accessing * memory page, it is responsibility of software setting this bit. It brings @@ -877,6 +1068,15 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) }) #ifndef __HAVE_ARCH_DO_SWAP_PAGE +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + +} +#else /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be @@ -885,12 +1085,17 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ -static inline void arch_do_swap_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long addr, - pte_t pte, pte_t oldpte) -{ - +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + for (int i = 0; i < nr; i++) { + arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, + pte_advance_pfn(pte, i), + pte_advance_pfn(oldpte, i)); + } } #endif diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2b886ea654bb36ede4e06accf13a6f257fd19f42..0798198a09efe172b192843655ef825a0a79d285 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -228,6 +228,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. + * @mac_get_caps: Get MAC capabilities for interface mode. * @mac_select_pcs: Select a PCS for the interface mode. * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. @@ -241,6 +242,8 @@ struct phylink_mac_ops { void (*validate)(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); + unsigned long (*mac_get_caps)(struct phylink_config *config, + phy_interface_t interface); struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, phy_interface_t interface); int (*mac_prepare)(struct phylink_config *config, unsigned int mode, @@ -292,6 +295,18 @@ struct phylink_mac_ops { */ void validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); +/** + * mac_get_caps: Get MAC capabilities for interface mode. + * @config: a pointer to a &struct phylink_config. + * @interface: PHY interface mode. + * + * Optional method. When not provided, config->mac_capabilities will be used. + * When implemented, this returns the MAC capabilities for the specified + * interface mode where there is some special handling required by the MAC + * driver (e.g. not supporting half-duplex in certain interface modes.) + */ +unsigned long mac_get_caps(struct phylink_config *config, + phy_interface_t interface); /** * mac_select_pcs: Select a PCS for the interface mode. * @config: a pointer to a &struct phylink_config. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6ad51970be627edcc58fbda879f0f..fb3a04dc13327d593505f11ad0d3f778bb18581b 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -23,6 +23,15 @@ struct fs_pin; #define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */ #endif +struct rich_container_ext { + /* overcommit */ + int overcommit_memory; + int overcommit_ratio; + unsigned long overcommit_kbytes; + struct percpu_counter vm_committed_as; + s32 as_batch; +}; + struct pid_namespace { struct idr idr; struct rcu_head rcu; @@ -41,6 +50,7 @@ struct pid_namespace { #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) int memfd_noexec_scope; #endif + struct rich_container_ext *ext; } __randomize_layout; extern struct pid_namespace init_pid_ns; @@ -123,4 +133,69 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) return task_active_pid_ns(tsk) == &init_pid_ns; } +#ifdef CONFIG_RICH_CONTAINER +extern int sysctl_rich_container_enable; +extern int sysctl_rich_container_source; +extern int sysctl_rich_container_cpuinfo_source; +extern unsigned int sysctl_rich_container_cpuinfo_sharesbase; +extern int sysctl_rich_container_ext_enable; + +static inline struct task_struct *rich_container_get_scenario(void) +{ + if (sysctl_rich_container_source == 1) + return task_active_pid_ns(current)->child_reaper; + + return current; +} + +static inline bool in_rich_container(struct task_struct *tsk) +{ + if (sysctl_rich_container_enable == 0) + return false; + + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); +} + +void rich_container_get_cpuset_cpus(struct cpumask *pmask); + +static inline struct rich_container_ext *rich_container_get_ext(void) +{ + if (sysctl_rich_container_ext_enable == 0) + return NULL; + + return task_active_pid_ns(current)->ext; +} + +struct rich_container_ext *create_rich_container_ext(void); +void destroy_rich_container_ext(struct rich_container_ext *ext); +#else +static inline bool in_rich_container(struct task_struct *tsk) +{ + return false; +} + +static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ +} + +static inline struct task_struct *rich_container_get_scenario(void) +{ + return NULL; +} + +static inline struct rich_container_ext *create_rich_container_ext(void) +{ + return NULL; +} + +static inline void destroy_rich_container_ext(struct rich_container_ext *ext) +{ +} + +static inline struct rich_container_ext *rich_container_get_ext(void) +{ + return NULL; +} +#endif + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/platform_data/gpio-sunway.h b/include/linux/platform_data/gpio-sunway.h new file mode 100644 index 0000000000000000000000000000000000000000..58b1bddeb409ff46751b829f21f99ef05d330828 --- /dev/null +++ b/include/linux/platform_data/gpio-sunway.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_SUNWAY_H +#define GPIO_SUNWAY_H + +struct sunway_port_property { + struct fwnode_handle *fwnode; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + int irq[32]; + bool has_irq; + bool irq_shared; +}; + +struct sunway_platform_data { + struct sunway_port_property *properties; + unsigned int nports; +}; + +#endif diff --git a/include/linux/pm.h b/include/linux/pm.h index 629c1633bbd00d0b01174287f55fc03e6025288a..8d54915df1af6dd585fde6f81a6efc7d74783a40 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -640,6 +640,13 @@ struct pm_subsys_data { #ifdef CONFIG_PM_GENERIC_DOMAINS struct pm_domain_data *domain_data; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; /* @@ -720,6 +727,9 @@ struct dev_pm_info { struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int dev_pm_get_subsys_data(struct device *dev); @@ -746,6 +756,9 @@ struct dev_pm_domain { int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 4a69d4af3ff8e954c9e15b5879a32ea08d0d980d..444b651f9f84cb9692e7d8637aed3b686e8f4bca 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -112,6 +112,9 @@ struct dev_pm_qos_request { struct freq_qos_request freq; } data; struct device *dev; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct dev_pm_qos { @@ -122,6 +125,9 @@ struct dev_pm_qos { struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Action requested to pm_qos_update_target */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 6eb9adaef52beb72442a06c7cd77ed6182f7a14a..0427a8c37db7bf72cca4f95ffdd95091027bba09 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -14,6 +14,7 @@ #endif #include +#include struct wake_irq; @@ -61,6 +62,9 @@ struct wakeup_source { struct device *dev; bool active:1; bool autosleep_enabled:1; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define for_each_wakeup_source(ws) \ diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 468328b1e1dd583fedfdd67e4f85bfd94d43c31d..4df1db8896e351da2c4cd907444e70ae58dc4113 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -62,6 +62,11 @@ struct posix_clock_operations { ssize_t (*read) (struct posix_clock *pc, uint flags, char __user *buf, size_t cnt); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -88,6 +93,15 @@ struct posix_clock { struct device *dev; struct rw_semaphore rwsem; bool zombie; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index d607f51404fca063e4134bdba93bde6265be7d45..9a9db1da47231b358e7d6c647d0761a3a2e18fd4 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -2,6 +2,7 @@ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H +#include #include #include #include @@ -145,6 +146,11 @@ struct posix_cputimers_work { struct callback_head work; struct mutex mutex; unsigned int scheduled; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline void posix_cputimers_init(struct posix_cputimers *pct) diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index f1fd3a8044e0eca4fcea4487243bd6b0e438aaf4..4730a6bf41a14dbef6b52bb61c88be66e2388071 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -205,6 +205,9 @@ struct psi_group { u64 rtpoll_total[NR_PSI_STATES - 1]; u64 rtpoll_next_update; u64 rtpoll_until; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #else /* CONFIG_PSI */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h new file mode 100644 index 0000000000000000000000000000000000000000..f69c1110b57111017a19a89d4726cb349c13f6ec --- /dev/null +++ b/include/linux/psp-hygon.h @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_H__ +#define __PSP_HYGON_H__ + +#include +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + +#define CSV_EXT_CSV3_MULT_LUP_DATA_BIT 0 +#define CSV_EXT_CSV3_MULT_LUP_DATA (1 << CSV_EXT_CSV3_MULT_LUP_DATA_BIT) +#define CSV_EXT_CSV3_INJ_SECRET_BIT 1 +#define CSV_EXT_CSV3_INJ_SECRET (1 << CSV_EXT_CSV3_INJ_SECRET_BIT) + +/** + * Guest/platform management commands for CSV + */ +enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + +/** + * struct csv3_data_attestation_report - ATTESTATION secure call command parameters + * + * @handle: handle of the VM to process + * @resp_gpa: guest physical address to save the generated report + * @resp_length: length of the generated report + * @req_gpa: guest physical address of the input for the report + * @req_length: length of the input for the report + * @fw_error_code: firmware status code when generating the report + */ +struct csv3_data_attestation_report { + u32 handle; /* Out */ + u32 reserved1; + u64 resp_gpa; /* In */ + u8 reserved2[16]; + u32 resp_len; /* In/Out */ + u32 reserved3; + u64 req_gpa; /* In */ + u32 req_len; /* In,Out */ + u32 fw_error_code; /* Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + +/** + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 1; + u32 format : 1; + u32 index : 12; + u32 status : 2; +}; +#define VPSP_RET_SYS_FORMAT 1 +#define VPSP_RET_PSP_FORMAT 0 + +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); + kvm_pfn_t (*gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); + u32 vm_handle; + u8 is_csv_guest; +}; + +#define PSP_2MB_MASK (2*1024*1024 - 1) +#define PSP_HUGEPAGE_2MB (2*1024*1024) +#define PSP_HUGEPAGE_NUM_MAX 128 +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f +#define TKM_PSP_CMDID TKM_CMD_ID_MIN +#define TKM_PSP_CMDID_OFFSET 0x128 +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + +struct vpsp_context { + u32 vid; + pid_t pid; + u64 gpa_start; + u64 gpa_end; + + // `vm_is_bound` indicates whether the binding operation has been performed + u32 vm_is_bound; + u32 vm_handle; // only for csv +}; + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + +int psp_do_cmd(int cmd, void *data, int *psp_ret); + +int csv_ring_buffer_queue_init(void); +int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); + +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + +int vpsp_try_get_result(uint8_t prio, uint32_t index, + phys_addr_t phy_addr, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); + +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); + +int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); + +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret); + +/** + * csv_get_extension_info - collect extension set of the firmware + * + * @buf: The buffer to save extension set + * @size: The size of the @buf + * + * Returns: + * 0 if @buf is filled with extension bitflags + * -%ENODEV if the CSV device is not available + * -%EINVAL if @buf is NULL or @size is too smaller + */ +int csv_get_extension_info(void *buf, size_t *size); + +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ + +static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_get_result(uint8_t prio, + uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; } + +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa) { return -ENODEV; } + +static inline int +kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; } + +static inline int csv_get_extension_info(void *buf, size_t *size) { return -ENODEV; } + +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ + +#endif /* __PSP_HYGON_H__ */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fd17e82bab43ff4409cf9e4c8827ff35c9df4d9..76ee067a962c93ed72c1c207393673532fe4def6 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -387,6 +387,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 1ef4e0f9bd2a5d61352987b4af7240c663a25cf9..bae7ae9f23d9c54eb7ce2c815e6a5e4a228c1026 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -193,6 +193,12 @@ struct ptp_clock_info { int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan); long (*do_aux_work)(struct ptp_clock_info *ptp); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) }; struct ptp_clock; diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..dbbb6a063dd25cb1509a756b5dc437cc7e1439f7 --- /dev/null +++ b/include/linux/qat/qat_mig_dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef QAT_MIG_DEV_H_ +#define QAT_MIG_DEV_H_ + +struct pci_dev; + +struct qat_mig_dev { + void *parent_accel_dev; + u8 *state; + u32 setup_size; + u32 remote_setup_size; + u32 state_size; + s32 vf_id; +}; + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id); +int qat_vfmig_init(struct qat_mig_dev *mdev); +void qat_vfmig_cleanup(struct qat_mig_dev *mdev); +void qat_vfmig_reset(struct qat_mig_dev *mdev); +int qat_vfmig_open(struct qat_mig_dev *mdev); +void qat_vfmig_close(struct qat_mig_dev *mdev); +int qat_vfmig_suspend(struct qat_mig_dev *mdev); +int qat_vfmig_resume(struct qat_mig_dev *mdev); +int qat_vfmig_save_state(struct qat_mig_dev *mdev); +int qat_vfmig_save_setup(struct qat_mig_dev *mdev); +int qat_vfmig_load_state(struct qat_mig_dev *mdev); +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size); +void qat_vfmig_destroy(struct qat_mig_dev *mdev); + +#endif /*QAT_MIG_DEV_H_*/ diff --git a/include/linux/quota.h b/include/linux/quota.h index 07071e64abf3d66298680b829a2d3b251ebbde5d..574cc9ad519238535fd5d2823bc7ac937a718a78 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -47,6 +47,7 @@ #include #include #include +#include #undef USRQUOTA #undef GRPQUOTA @@ -318,6 +319,9 @@ struct quota_format_ops { int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Operations working with dquots */ @@ -337,6 +341,9 @@ struct dquot_operations { int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct path; @@ -440,6 +447,11 @@ struct quotactl_ops { int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct quota_format_type { diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 659d13a7ddaaadf7ab41692af5caf684aef7523b..d1933edbce59ec6b614fbf44abe52527226544ab 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -16,6 +16,7 @@ #include #include +#include /* Simple unsegmented callback lists. */ struct rcu_cblist { @@ -214,6 +215,11 @@ struct rcu_segcblist { #endif long seglen[RCU_CBLIST_NSEGS]; u8 flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfec5257628eca1bcca143796feed496f..dc80ddab26cf2857e02fd60ba48f97b01b19a9f8 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,9 +2,21 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include #include #include #include +#include + +#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL +#include +#endif + +/* CLOSID, RMID value used by the default control group */ +#define RESCTRL_RESERVED_CLOSID 0 +#define RESCTRL_RESERVED_RMID 0 + +#define RESCTRL_PICK_ANY_CPU -1 #ifdef CONFIG_PROC_CPU_RESCTRL @@ -18,28 +30,52 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/** - * enum resctrl_conf_type - The type of configuration. - * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. - * @CDP_CODE: Configuration applies to instruction fetches. - * @CDP_DATA: Configuration applies to reads and writes. +/* + * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is + * 32. */ -enum resctrl_conf_type { - CDP_NONE, - CDP_CODE, - CDP_DATA, -}; +#define RESCTRL_MAX_CBM 32 -#define CDP_NUM_TYPES (CDP_DATA + 1) +extern unsigned int resctrl_rmid_realloc_limit; +extern unsigned int resctrl_rmid_realloc_threshold; -/* - * Event IDs, the values match those used to program IA32_QM_EVTSEL before - * reading IA32_QM_CTR on RDT systems. +/** + * struct pseudo_lock_region - pseudo-lock region information + * @s: Resctrl schema for the resource to which this + * pseudo-locked region belongs + * @closid: The closid that this pseudo-locked region uses + * @d: RDT domain to which this pseudo-locked region + * belongs + * @cbm: bitmask of the pseudo-locked region + * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread + * completion + * @thread_done: variable used by waitqueue to test if pseudo-locking + * thread completed + * @cpu: core associated with the cache on which the setup code + * will be run + * @line_size: size of the cache lines + * @size: size of pseudo-locked region in bytes + * @kmem: the kernel memory associated with pseudo-locked region + * @minor: minor number of character device associated with this + * region + * @debugfs_dir: pointer to this region's directory in the debugfs + * filesystem + * @pm_reqs: Power management QoS requests related to this region */ -enum resctrl_event_id { - QOS_L3_OCCUP_EVENT_ID = 0x01, - QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, - QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, +struct pseudo_lock_region { + struct resctrl_schema *s; + u32 closid; + struct rdt_domain *d; + u32 cbm; + wait_queue_head_t lock_thread_wq; + int thread_done; + int cpu; + unsigned int line_size; + unsigned int size; + void *kmem; + unsigned int minor; + struct dentry *debugfs_dir; + struct list_head pm_reqs; }; /** @@ -94,7 +130,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +138,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; @@ -141,9 +177,6 @@ struct resctrl_membw { u32 *mb_map; }; -struct rdt_parse_data; -struct resctrl_schema; - /** * struct rdt_resource - attributes of a resctrl resource * @rid: The index of the resource @@ -153,14 +186,15 @@ struct resctrl_schema; * @cache_level: Which cache level defines scope of this resource * @cache: Cache allocation related data * @membw: If the component has bandwidth controls, their properties. - * @domains: All domains for this resource + * @domains: RCU list of all domains for this resource * @name: Name to use in "schemata" file. * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. * @format_str: Per resource format string to show domain value - * @parse_ctrlval: Per resource function pointer to parse control values * @evt_list: List of monitoring events * @fflags: flags to choose base and info files + * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth + * Monitoring Event Configuration (BMEC) is supported. * @cdp_capable: Is the CDP feature available on this resource */ struct rdt_resource { @@ -176,14 +210,19 @@ struct rdt_resource { int data_width; u32 default_ctrl; const char *format_str; - int (*parse_ctrlval)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); struct list_head evt_list; unsigned long fflags; + unsigned int mbm_cfg_mask; bool cdp_capable; }; +/* + * Get the resource that exists at this level. If the level is not supported + * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES + * will return NULL. + */ +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); + /** * struct resctrl_schema - configuration abilities of a resource presented to * user-space @@ -204,10 +243,68 @@ struct resctrl_schema { u32 num_closid; }; +struct resctrl_cpu_sync { + u32 closid; + u32 rmid; +}; + +struct resctrl_mon_config_info { + struct rdt_resource *r; + struct rdt_domain *d; + u32 evtid; + u32 mon_config; + int err; +}; + +/* + * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to + * struct resctrl_cpu_sync, or NULL. + */ +void resctrl_arch_sync_cpu_defaults(void *info); + /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); +void resctrl_arch_mon_event_config_write(void *info); +void resctrl_arch_mon_event_config_read(void *info); + +/* For use by arch code to remap resctrl's smaller CDP CLOSID range */ +static inline u32 resctrl_get_config_index(u32 closid, + enum resctrl_conf_type type) +{ + switch (type) { + default: + case CDP_NONE: + return closid; + case CDP_CODE: + return (closid * 2) + 1; + case CDP_DATA: + return (closid * 2); + } +} + +/* + * Caller must hold the cpuhp read lock to prevent the struct rdt_domain being + * freed. + */ +static inline struct rdt_domain * +resctrl_get_domain_from_cpu(int cpu, struct rdt_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry_rcu(d, &r->domains, list) { + /* Find the domain that contains this CPU */ + if (cpumask_test_cpu(cpu, &d->cpu_mask)) + return d; + } + + return NULL; +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. @@ -219,36 +316,70 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type); int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); +void resctrl_online_cpu(unsigned int cpu); +void resctrl_offline_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid * for this resource and domain. * @r: resource that the counter should be read from. * @d: domain that the counter should be read from. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid + * only. * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. + * @arch_mon_ctx: An architecture specific value from + * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies + * the hardware monitor allocated for this read request. * - * Call from process context on a CPU that belongs to domain @d. + * Some architectures need to sleep when first programming some of the counters. + * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' + * for a short period of time). Call from a non-migrateable process context on + * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or + * schedule_work_on(). This function can be called with interrupts masked, + * e.g. using smp_call_function_any(), but may consistently return an error. * * Return: * 0 on success, or -EIO, -EINVAL etc on error. */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val); + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx); + +/** + * resctrl_arch_rmid_read_context_check() - warn about invalid contexts + * + * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when + * resctrl_arch_rmid_read() is called with preemption disabled. + * + * The contract with resctrl_arch_rmid_read() is that if interrupts + * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an + * IPI, (and fail if the call needed to sleep), while most of the time + * the work is scheduled, allowing the call to sleep. + */ +static inline void resctrl_arch_rmid_read_context_check(void) +{ + if (!irqs_disabled()) + might_sleep(); +} /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid * and eventid. * @r: The domain's resource. * @d: The rmid's domain. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid only. * @rmid: The rmid whose counter values should be reset. * @eventid: The eventid whose counter values should be reset. * * This can be called from any CPU. */ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid); + u32 closid, u32 rmid, + enum resctrl_event_id eventid); /** * resctrl_arch_reset_rmid_all() - Reset all private state associated with @@ -264,4 +395,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; +int resctrl_init(void); +void resctrl_exit(void); + #endif /* _RESCTRL_H */ diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a0d8694be783366a48dfab68acbac1fc56ca58ba --- /dev/null +++ b/include/linux/resctrl_types.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Arm Ltd. + * Based on arch/x86/kernel/cpu/resctrl/internal.h + */ + +#ifndef __LINUX_RESCTRL_TYPES_H +#define __LINUX_RESCTRL_TYPES_H + +#define CQM_LIMBOCHECK_INTERVAL 1000 + +#define MBM_CNTR_WIDTH_BASE 24 +#define MBM_OVERFLOW_INTERVAL 1000 +#define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 + +/* rdtgroup.flags */ +#define RDT_DELETED 1 + +/* rftype.flags */ +#define RFTYPE_FLAGS_CPUS_LIST 1 + +/* + * Define the file type flags for base and info directories. + */ +#define RFTYPE_INFO BIT(0) +#define RFTYPE_BASE BIT(1) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) +#define RFTYPE_RES_CACHE BIT(8) +#define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) + +/* Reads to Local DRAM Memory */ +#define READS_TO_LOCAL_MEM BIT(0) + +/* Reads to Remote DRAM Memory */ +#define READS_TO_REMOTE_MEM BIT(1) + +/* Non-Temporal Writes to Local Memory */ +#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) + +/* Non-Temporal Writes to Remote Memory */ +#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) + +/* Reads to Local Memory the system identifies as "Slow Memory" */ +#define READS_TO_LOCAL_S_MEM BIT(4) + +/* Reads to Remote Memory the system identifies as "Slow Memory" */ +#define READS_TO_REMOTE_S_MEM BIT(5) + +/* Dirty Victims to All Types of Memory */ +#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) + +/* Max event bits supported */ +#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) + +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/* + * Event IDs, the values match those used to program IA32_QM_EVTSEL before + * reading IA32_QM_CTR on RDT systems. + */ +enum resctrl_event_id { + QOS_L3_OCCUP_EVENT_ID = 0x01, + QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, + QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + QOS_MC_MBM_BPS_EVENT_ID = 0x04, +}; + +#define RESCTRL_MAX_EVENT_NUM 4 + +#endif /* __LINUX_RESCTRL_TYPES_H */ diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h index f8f3e958e9cf2fbf0777fdbf5e3fd993c889cee3..4a63bc7b4bfe1259fdad8c4b69cb75d7573cf7f0 100644 --- a/include/linux/resume_user_mode.h +++ b/include/linux/resume_user_mode.h @@ -56,6 +56,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs) #endif mem_cgroup_handle_over_high(GFP_KERNEL); + mem_cgroup_wmark_min_throttle(); blkcg_maybe_throttle_current(); rseq_handle_notify_resume(NULL, regs); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b1fb58b435a98dfdb5e4542b25b08860ea289844..fac9366b2dc7433f29cfd3dfa4f34c9bf6099cdb 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -5,6 +5,7 @@ * Declarations for Reverse Mapping functions in mm/rmap.c */ +#include #include #include #include @@ -64,6 +65,8 @@ struct anon_vma { /* Interval tree of private "related" vmas */ struct rb_root_cached rb_root; + + CK_KABI_RESERVE(1) }; /* @@ -172,133 +175,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio); typedef int __bitwise rmap_t; /* - * No special request: if the page is a subpage of a compound page, it is - * mapped via a PTE. The mapped (sub)page is possibly shared between processes. + * No special request: A mapped anonymous (sub)page is possibly shared between + * processes. */ #define RMAP_NONE ((__force rmap_t)0) -/* The (sub)page is exclusive to a single process. */ +/* The anonymous (sub)page is exclusive to a single process. */ #define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0)) /* - * The compound page is not mapped via PTEs, but instead via a single PMD and - * should be accounted accordingly. + * Internally, we're using an enum to specify the granularity. We make the + * compiler emit specialized code for each granularity. */ -#define RMAP_COMPOUND ((__force rmap_t)BIT(1)) +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD, +}; + +static inline void __folio_rmap_sanity_checks(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + /* hugetlb folios are handled separately. */ + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + + /* + * TODO: we get driver-allocated folios that have nothing to do with + * the rmap using vm_insert_page(); therefore, we cannot assume that + * folio_test_large_rmappable() holds for large folios. We should + * handle any desired mapcount+stats accounting for these folios in + * VM_MIXEDMAP VMAs separately, and then sanity-check here that + * we really only get rmappable folios. + */ + + VM_WARN_ON_ONCE(nr_pages <= 0); + VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); + VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); + + switch (level) { + case RMAP_LEVEL_PTE: + break; + case RMAP_LEVEL_PMD: + /* + * We don't support folios larger than a single PMD yet. So + * when RMAP_LEVEL_PMD is set, we assume that we are creating + * a single "entire" mapping of the folio. + */ + VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); + VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); + break; + default: + VM_WARN_ON_ONCE(true); + } +} /* * rmap interfaces called when adding or removing pte of page */ -void page_move_anon_rmap(struct page *, struct vm_area_struct *); -void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address, rmap_t flags); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address); +void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); +void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *, unsigned long address, rmap_t flags); +#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ + folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) +void folio_add_anon_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, - unsigned long address); -void page_add_file_rmap(struct page *, struct vm_area_struct *, - bool compound); -void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, - struct vm_area_struct *, bool compound); -void page_remove_rmap(struct page *, struct vm_area_struct *, - bool compound); - -void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, +void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_add_file_rmap_pte(folio, page, vma) \ + folio_add_file_rmap_ptes(folio, page, 1, vma) +void folio_add_file_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); +void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_remove_rmap_pte(folio, page, vma) \ + folio_remove_rmap_ptes(folio, page, 1, vma) +void folio_remove_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); + +void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, + unsigned long address, rmap_t flags); +void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); -static inline void __page_dup_rmap(struct page *page, bool compound) +/* See folio_try_dup_anon_rmap_*() */ +static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, + struct vm_area_struct *vma) { - if (compound) { - struct folio *folio = (struct folio *)page; + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); - VM_BUG_ON_PAGE(compound && !PageHead(page), page); - atomic_inc(&folio->_entire_mapcount); - } else { - atomic_inc(&page->_mapcount); + if (PageAnonExclusive(&folio->page)) { + if (unlikely(folio_needs_cow_for_dma(vma, folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); } + atomic_inc(&folio->_entire_mapcount); + return 0; +} + +/* See folio_try_share_anon_rmap_*() */ +static inline int hugetlb_try_share_anon_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); + + /* Paired with the memory barrier in try_grab_folio(). */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb(); + + if (unlikely(folio_maybe_dma_pinned(folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); + + /* + * This is conceptually a smp_wmb() paired with the smp_rmb() in + * gup_must_unshare(). + */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb__after_atomic(); + return 0; +} + +static inline void hugetlb_add_file_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + + atomic_inc(&folio->_entire_mapcount); +} + +static inline void hugetlb_remove_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + + atomic_dec(&folio->_entire_mapcount); } -static inline void page_dup_file_rmap(struct page *page, bool compound) +static __always_inline void __folio_dup_file_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) { - __page_dup_rmap(page, compound); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + atomic_inc(&folio->_entire_mapcount); + break; + } } /** - * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped - * anonymous page - * @page: the page to duplicate the mapping for - * @compound: the page is mapped as compound or as a small page - * @vma: the source vma + * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated * - * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq. + * The page range of the folio is defined by [page, page + nr_pages) * - * Duplicating the mapping can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. + * The caller needs to hold the page table lock. + */ +static inline void folio_dup_file_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages) +{ + __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); +} +#define folio_dup_file_rmap_pte(folio, page) \ + folio_dup_file_rmap_ptes(folio, page, 1) + +/** + * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of * - * If duplicating the mapping succeeds, the page has to be mapped R/O into - * the parent and the child. It must *not* get mapped writable after this call. + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * - * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. + * The caller needs to hold the page table lock. */ -static inline int page_try_dup_anon_rmap(struct page *page, bool compound, - struct vm_area_struct *vma) +static inline void folio_dup_file_rmap_pmd(struct folio *folio, + struct page *page) { - VM_BUG_ON_PAGE(!PageAnon(page), page); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); +#else + WARN_ON_ONCE(true); +#endif +} - /* - * No need to check+clear for already shared pages, including KSM - * pages. - */ - if (!PageAnonExclusive(page)) - goto dup; +static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma, + enum rmap_level level) +{ + bool maybe_pinned; + int i; + + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); /* - * If this page may have been pinned by the parent process, - * don't allow to duplicate the mapping but instead require to e.g., - * copy the page immediately for the child so that we'll always - * guarantee the pinned page won't be randomly replaced in the + * If this folio may have been pinned by the parent process, + * don't allow to duplicate the mappings but instead require to e.g., + * copy the subpage immediately for the child so that we'll always + * guarantee the pinned folio won't be randomly replaced in the * future on write faults. */ - if (likely(!is_device_private_page(page)) && - unlikely(page_needs_cow_for_dma(vma, page))) - return -EBUSY; + maybe_pinned = likely(!folio_is_device_private(folio)) && + unlikely(folio_needs_cow_for_dma(src_vma, folio)); - ClearPageAnonExclusive(page); /* - * It's okay to share the anon page between both processes, mapping - * the page R/O into both processes. + * No need to check+clear for already shared PTEs/PMDs of the + * folio. But if any page is PageAnonExclusive, we must fallback to + * copying if the folio maybe pinned. */ -dup: - __page_dup_rmap(page, compound); + switch (level) { + case RMAP_LEVEL_PTE: + if (unlikely(maybe_pinned)) { + for (i = 0; i < nr_pages; i++) + if (PageAnonExclusive(page + i)) + return -EBUSY; + } + do { + if (PageAnonExclusive(page)) + ClearPageAnonExclusive(page); + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + if (PageAnonExclusive(page)) { + if (unlikely(maybe_pinned)) + return -EBUSY; + ClearPageAnonExclusive(page); + } + atomic_inc(&folio->_entire_mapcount); + break; + } return 0; } /** - * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly - * shared to prepare for KSM or temporary unmapping - * @page: the exclusive anonymous page to try marking possibly shared + * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range + * of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated + * @src_vma: The vm area from which the mappings are duplicated + * + * The page range of the folio is defined by [page, page + nr_pages) * - * The caller needs to hold the PT lock and has to have the page table entry - * cleared/invalidated. + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. * - * This is similar to page_try_dup_anon_rmap(), however, not used during fork() - * to duplicate a mapping, but instead to prepare for KSM or temporarily - * unmapping a page (swap, migration) via page_remove_rmap(). + * Duplicating the mappings can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. * - * Marking the page shared can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. + * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. * - * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY - * otherwise. + * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. */ -static inline int page_try_share_anon_rmap(struct page *page) +static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma) { - VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page); + return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, + RMAP_LEVEL_PTE); +} +#define folio_try_dup_anon_rmap_pte(folio, page, vma) \ + folio_try_dup_anon_rmap_ptes(folio, page, 1, vma) - /* device private pages cannot get pinned via GUP. */ - if (unlikely(is_device_private_page(page))) { +/** + * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range + * of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of + * @src_vma: The vm area from which the mapping is duplicated + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. + * + * Duplicating the mapping can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. + * + * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. + * + * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. + */ +static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, + struct page *page, struct vm_area_struct *src_vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + +static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + /* device private folios cannot get pinned via GUP. */ + if (unlikely(folio_is_device_private(folio))) { ClearPageAnonExclusive(page); return 0; } @@ -349,7 +542,7 @@ static inline int page_try_share_anon_rmap(struct page *page) if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_mb(); - if (unlikely(page_maybe_dma_pinned(page))) + if (unlikely(folio_maybe_dma_pinned(folio))) return -EBUSY; ClearPageAnonExclusive(page); @@ -362,6 +555,68 @@ static inline int page_try_share_anon_rmap(struct page *page) return 0; } +/** + * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page + * mapped by a PTE possibly shared to prepare + * for KSM or temporary unmapping + * @folio: The folio to share a mapping of + * @page: The mapped exclusive page + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during + * fork() to duplicate mappings, but instead to prepare for KSM or temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte(). + * + * Marking the mapped page shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped page possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pte(struct folio *folio, + struct page *page) +{ + return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); +} + +/** + * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page + * range mapped by a PMD possibly shared to + * prepare for temporary unmapping + * @folio: The folio to share the mapping of + * @page: The first page to share the mapping of + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during + * fork() to duplicate a mapping, but instead to prepare for temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd(). + * + * Marking the mapped pages shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped pages possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, + struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + /* * Called from mm/vmscan.c to handle paging out */ diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 189140bf11fc4078a61d6c72c760c4cdebc41ced..bcd203c5d4eaa245f16e18e41864e325cf13e747 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -82,6 +82,8 @@ struct sbitmap { * cachelines until the map is exhausted. */ unsigned int __percpu *alloc_hint; + + CK_KABI_RESERVE(1) }; #define SBQ_WAIT_QUEUES 8 diff --git a/include/linux/sched.h b/include/linux/sched.h index d4f9d82c69e0b0b3a55c93268be7caff1a2c270a..4b92e943de0ce2a36817bb2318834dec7d494a36 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -11,6 +11,7 @@ #include +#include #include #include #include @@ -132,6 +133,10 @@ struct user_event_mm; #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((READ_ONCE((task)->__state) & TASK_UNINTERRUPTIBLE) != 0 && \ + (READ_ONCE((task)->__state) & TASK_FROZEN) == 0 && \ + (READ_ONCE((task)->__state) & TASK_NOLOAD) == 0) /* * Special states are those that do not use the normal wait-loop pattern. See @@ -392,6 +397,11 @@ struct sched_info { unsigned long long last_queued; #endif /* CONFIG_SCHED_INFO */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -411,6 +421,11 @@ struct sched_info { struct load_weight { unsigned long weight; u32 inv_weight; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -504,6 +519,11 @@ struct sched_avg { unsigned long runnable_avg; unsigned long util_avg; struct util_est util_est; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; struct sched_statistics { @@ -512,6 +532,8 @@ struct sched_statistics { u64 wait_max; u64 wait_count; u64 wait_sum; + u64 parent_wait_sum_base; + u64 parent_wait_contrib; u64 iowait_count; u64 iowait_sum; @@ -544,8 +566,19 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) + u64 core_sibidle_sum; + u64 core_sibidle_task_sum; #endif + #endif /* CONFIG_SCHEDSTATS */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; struct sched_entity { @@ -565,6 +598,20 @@ struct sched_entity { s64 vlag; u64 slice; + /* irq time is included */ + u64 exec_start_raw; + u64 sum_exec_raw; + u64 cg_idle_start; + u64 cg_idle_sum; + u64 cg_init_time; + u64 cg_nr_iowait; + u64 cg_iowait_sum; + u64 cg_iowait_start; + u64 cg_ineffective_sum; + u64 cg_ineffective_start; + seqlock_t idle_seqlock; + spinlock_t iowait_lock; + u64 nr_migrations; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -587,6 +634,18 @@ struct sched_entity { */ struct sched_avg avg; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct sched_rt_entity { @@ -605,6 +664,11 @@ struct sched_rt_entity { /* rq "owned" by this entity/group: */ struct rt_rq *my_q; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; struct sched_dl_entity { @@ -678,6 +742,11 @@ struct sched_dl_entity { */ struct sched_dl_entity *pi_se; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_UCLAMP_TASK @@ -1443,6 +1512,7 @@ struct task_struct { /* Number of pages to reclaim on returning to userland: */ unsigned int memcg_nr_pages_over_high; + unsigned int wmark_min_throttle_ms; /* Used by memcontrol for targeted memcg charge: */ struct mem_cgroup *active_memcg; @@ -1541,6 +1611,22 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif + int wait_res_type; + union { + struct folio *wait_folio; + struct bio *wait_bio; + }; + unsigned long wait_moment; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1558,6 +1644,36 @@ struct task_struct { */ }; +enum { + TASK_WAIT_FOLIO = 1, + TASK_WAIT_BIO, +}; + +static inline void task_set_wait_res(int type, void *res) +{ + switch (type) { + case TASK_WAIT_FOLIO: + current->wait_folio = (struct folio *)res; + break; + case TASK_WAIT_BIO: + current->wait_bio = (struct bio *)res; + break; + default: + current->wait_folio = NULL; + break; + } + + current->wait_res_type = type; + current->wait_moment = jiffies; +} + +static inline void task_clear_wait_res(void) +{ + current->wait_folio = NULL; + current->wait_res_type = 0; + current->wait_moment = 0; +} + static inline struct pid *task_pid(struct task_struct *task) { return task->thread_pid; @@ -2463,4 +2579,74 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +struct cpuacct_usage_result { + u64 user, nice, system, irq, softirq; + u64 steal, iowait, idle, guest, guest_nice; +}; + +enum rich_container_source { + RICH_CONTAINER_REAPER, + RICH_CONTAINER_CURRENT, +}; + +#ifdef CONFIG_RICH_CONTAINER +void rich_container_source(enum rich_container_source *from); +bool child_cpuacct(struct task_struct *tsk); +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res); +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu); +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running); +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total); + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask); + +#else /* CONFIG_RICH_CONTAINER */ +static inline void +rich_container_source(enum rich_container_source *from) +{ +} + +static inline void +rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ +} + +static inline unsigned long +rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + return 0; +} + +static inline void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ +} + +static inline bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + return false; +} + +static inline +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ +} +#endif + +#ifdef CONFIG_SCHED_SLI +void create_rich_container_reaper(struct task_struct *tsk); +#else +static inline void create_rich_container_reaper(struct task_struct *tsk) { } +#endif + #endif diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index fe1a46f30d2409ff0fe0906bd12af567ae225f0e..bf538a280c829cfc195c4275fb42bca870426b20 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -55,6 +55,13 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ +#if defined(CONFIG_CPU_ISOLATION) && defined(CONFIG_CGROUP_SCHED) +DECLARE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +extern void wilds_cpus_allowed(struct cpumask *pmask); +#else +static inline void wilds_cpus_allowed(struct cpumask *pmask) {} +#endif + static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 83ec54b65e792f13e87121918dad334af5481578..20165894027d10ebf11d13c76b168f57b9cd9707 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -13,8 +13,18 @@ * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long avenrun_r[]; /* R load averages */ + extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); +#ifdef CONFIG_SCHED_SLI +extern void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift); +#else +static inline void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift) { } +#endif + #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<mapping & ~PAGE_MAPPING_FLAGS); - - if (!mapping || mapping != folio->mapping) - return false; - return mapping->a_ops == &secretmem_aops; } @@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) return false; } -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { return false; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b839b66518ed33cda02fd3c201b064d6..234bcdb1fba459916635067154bedd17d8cfd423 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 95ac8398ee72d97015a61c4b1bdf5063d20a9e15..ed93371c944ccea9c2fc48bb2f538816077aadee 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -12,6 +12,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_np(unsigned long addr, int numpages) { return 0; } #endif #ifndef set_memory_rox diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 134c686c8676cf6e0e5315b2259eb836aedc6402..9add4f6f8c5774d7dfdf200e3b729b4e0f8d0a11 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -23,18 +23,22 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - pgoff_t fallocend; /* highest fallocate endindex */ - struct list_head shrinklist; /* shrinkable hpage inodes */ - struct list_head swaplist; /* chain of maybes on swap */ + union { + struct offset_ctx dir_offsets; /* stable directory offsets */ + struct { + struct list_head shrinklist; /* shrinkable hpage inodes */ + struct list_head swaplist; /* chain of maybes on swap */ + }; + }; + struct timespec64 i_crtime; /* file creation time */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ + pgoff_t fallocend; /* highest fallocate endindex */ + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ atomic_t stop_eviction; /* hold when working on inode */ - struct timespec64 i_crtime; /* file creation time */ - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ #ifdef CONFIG_TMPFS_QUOTA struct dquot __rcu *i_dquot[MAXQUOTAS]; #endif - struct offset_ctx dir_offsets; /* stable entry offsets */ struct inode vfs_inode; }; @@ -111,11 +115,19 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags); +unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool shmem_huge_force); +bool shmem_hpage_pmd_enabled(void); #else -static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags) +static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool shmem_huge_force) +{ + return 0; +} + +static inline bool shmem_hpage_pmd_enabled(void) { return false; } diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 224293b2dd0663ee56777bc20a5c19f02089d569..ac9565dd6f46b1819b5bb074674d6ccc3be09f5c 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,6 +4,7 @@ #include #include +#include /* * This struct is used to pass information from page reclaim to the shrinkers. @@ -83,6 +84,12 @@ struct shrinker { #endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5f11f98733419030d2400657c28147938f783517..5703f351e0a7be16bdc3587a0ccd0899fe81a779 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1043,6 +1043,18 @@ struct sk_buff { ); /* end headers group */ + /* + * kABI: The kmem_caches of struct sk_buff are initialized with + * SLAB_HWCACHE_ALIGN flag, such as skbuff_head_cache and + * skbuff_fclone_cache, which causes each skb to be forcibly + * aligned with cacheline size(64 bytes). + * Reserve 24 bytes, total 256 bytes, this will not break + * cacheline alignment. + */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 6ccfd9236387c682ea7b319a4a3ec3019b1f6e14..4d793604e5742de019f6f7ea89584c4c2986e86a 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -108,6 +108,11 @@ struct sk_psock { struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 8f3f72480e78b0d467167c777592e6472e3485e7..2da11d07b547cca7ba315bd425def5f86dbd535d 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -41,6 +41,15 @@ struct srcu_data { /* ->srcu_data_have_cbs[]. */ int cpu; struct srcu_struct *ssp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /* diff --git a/include/linux/stat.h b/include/linux/stat.h index 52150570d37a53f79767770308e0745e642c9970..d487187976ff41ec9e2c192f7b7b0f7944972717 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -2,7 +2,7 @@ #ifndef _LINUX_STAT_H #define _LINUX_STAT_H - +#include #include #include @@ -53,6 +53,9 @@ struct kstat { u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* These definitions are internal to the kernel for now. Mainly used by nfsd. */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 42ff5a4de8ee7633676d525e64e0a8b5697319e0..e286ebd32fe61eea6af9413f3bf1d6bc1161d145 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -100,6 +100,7 @@ struct stmmac_dma_cfg { bool eame; bool multi_msi_en; bool dche; + bool atds; }; #define AXI_BLEN 7 @@ -220,6 +221,7 @@ struct dwmac4_addrs { #define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10) #define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11) #define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12) +#define STMMAC_FLAG_DISABLE_FORCE_1000 BIT(13) struct plat_stmmacenet_data { int bus_id; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index ea7a74ea7389318627bae6f5410576e00135ca76..59e41990f8b5339176f22daea53b3a222223067f 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -27,6 +27,8 @@ struct cpu_stop_work { unsigned long caller; void *arg; struct cpu_stop_done *done; + + CK_KABI_RESERVE(1) }; int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); diff --git a/include/linux/string.h b/include/linux/string.h index 5077776e995e0193b3569e0b5fedca4900ef54c0..361294697f8c75f43dd9c1c614c1967e2fde9240 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -219,6 +219,7 @@ extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); +extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3d8b215f32d5b084c5075f12365ca611bb35eb2f..54673955d10974ea4c40a110b84a110f1722caa1 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -44,6 +44,9 @@ struct svc_pool { struct percpu_counter sp_threads_woken; unsigned long sp_flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } ____cacheline_aligned_in_smp; /* bits for sp_flags */ @@ -96,6 +99,9 @@ struct svc_serv { * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/include/linux/swap.h b/include/linux/swap.h index cb25db2a93dd1bd138479363829bed5ef566155f..5eb47d3554f069b3d382ebe564f2a8df7ea32012 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -243,23 +243,38 @@ enum { * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * - * The data field stores next cluster if the cluster is free or cluster usage - * counter otherwise. The flags field determines if a cluster is free. This is - * protected by swap_info_struct.lock. + * The flags field determines if a cluster is free. This is + * protected by cluster lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields - * and swap_info_struct->swap_map - * elements correspond to the swap - * cluster + * other than list, and swap_info_struct->swap_map + * elements corresponding to the swap cluster. */ - unsigned int data:24; - unsigned int flags:8; + u16 count; + u8 flags; + u8 order; + struct list_head list; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ -#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ +#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */ + +/* + * The first page in the swap file is the swap header, which is always marked + * bad to prevent it from being allocated as an entry. This also prevents the + * cluster to which it belongs being marked free. Therefore 0 is safe to use as + * a sentinel to indicate next is not valid in percpu_cluster. + */ +#define SWAP_NEXT_INVALID 0 + +#ifdef CONFIG_THP_SWAP +#define SWAP_NR_ORDERS (PMD_ORDER + 1) +#else +#define SWAP_NR_ORDERS 1 +#endif /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from @@ -267,13 +282,7 @@ struct swap_cluster_info { * throughput. */ struct percpu_cluster { - struct swap_cluster_info index; /* Current cluster index */ - unsigned int next; /* Likely next allocation offset */ -}; - -struct swap_cluster_list { - struct swap_cluster_info head; - struct swap_cluster_info tail; + unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; /* @@ -288,7 +297,13 @@ struct swap_info_struct { unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ - struct swap_cluster_list free_clusters; /* free clusters list */ + struct list_head free_clusters; /* free clusters list */ + struct list_head full_clusters; /* full clusters list */ + struct list_head nonfull_clusters[SWAP_NR_ORDERS]; + /* list of cluster that contains at least one free slot */ + struct list_head frag_clusters[SWAP_NR_ORDERS]; + /* list of cluster that are fragmented or contented */ + unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ @@ -320,7 +335,15 @@ struct swap_info_struct { * list. */ struct work_struct discard_work; /* discard worker */ - struct swap_cluster_list discard_clusters; /* discard clusters list */ + struct list_head discard_clusters; /* discard clusters list */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. @@ -361,6 +384,7 @@ extern struct list_lru shadow_nodes; /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; +extern unsigned long sysctl_min_cache_kbytes; /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) @@ -396,9 +420,6 @@ void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma); - /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, @@ -474,14 +495,14 @@ swp_entry_t folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); +extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); -extern void swap_shmem_alloc(swp_entry_t); +extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); -extern int swapcache_prepare(swp_entry_t); -extern void swap_free(swp_entry_t); +extern int swapcache_prepare(swp_entry_t entry, int nr); +extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); -extern int free_swap_and_cache(swp_entry_t); +extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); @@ -531,8 +552,9 @@ static inline void put_swap_device(struct swap_info_struct *si) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); -/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ -#define free_swap_and_cache(e) is_pfn_swap_entry(e) +static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) +{ +} static inline void free_swap_cache(struct page *page) { @@ -543,7 +565,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) return 0; } -static inline void swap_shmem_alloc(swp_entry_t swp) +static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } @@ -552,12 +574,12 @@ static inline int swap_duplicate(swp_entry_t swp) return 0; } -static inline int swapcache_prepare(swp_entry_t swp) +static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } -static inline void swap_free(swp_entry_t swp) +static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } @@ -600,14 +622,15 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ -#ifdef CONFIG_THP_SWAP -extern int split_swap_cluster(swp_entry_t entry); -#else -static inline int split_swap_cluster(swp_entry_t entry) +static inline void free_swap_and_cache(swp_entry_t entry) { - return 0; + free_swap_and_cache_nr(entry, 1); +} + +static inline void swap_free(swp_entry_t entry) +{ + swap_free_nr(entry, 1); } -#endif #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fd3fe5c8c17fce0b18111b3b4e48c5f6a5590819..9f10e60e32dbb678591334585f98da1f6064c325 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -183,6 +183,8 @@ struct bin_attribute { char *, loff_t, size_t); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 6f6acce064dea535b0a4e6fe84d20a99a3c9a9dd..6b9e504b7a6120824c0755d29e761595737ba656 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -43,4 +43,13 @@ struct task_io_accounting { */ u64 cancelled_write_bytes; #endif /* CONFIG_TASK_IO_ACCOUNTING */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9b371aa7c79623a55371530fb6019b329347edb6..daafa15458182c4619f57f96cbac1ce31d6eb6bd 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -165,6 +165,9 @@ struct tcp_request_sock { * after data-in-SYN. */ u8 syn_tos; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -460,6 +463,11 @@ struct tcp_sock { */ struct request_sock __rcu *fastopen_rsk; struct saved_syn *saved_syn; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum tsq_enum { @@ -505,6 +513,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/linux/tick.h b/include/linux/tick.h index 9701c571a5cfe917be32a0ec244ccc07f91afb84..39f01f0bef7664d96a275dd692dabc13ca6c8f11 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,9 +174,16 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ +/* + * Mask of CPUs that are nohz_full. + * + * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() + * check. + */ +extern cpumask_var_t tick_nohz_full_mask; + #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; -extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { diff --git a/include/linux/timer.h b/include/linux/timer.h index 9162f275819a780def8efeb98cc4b553e1c44d28..38fbcdc78310367c39a037dab153fa8be435d1d6 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,6 +7,7 @@ #include #include #include +#include struct timer_list { /* @@ -21,6 +22,11 @@ struct timer_list { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_LOCKDEP diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index aa1bc417266208a910f3872615ba357d8a6e3d0b..3a14307c19cb65bb6390040efb8db44bf6d6ba4b 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -126,6 +126,8 @@ struct trace_iterator { long idx; /* All new field here will be zeroed out in pipe_read */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; enum trace_iter_flags { diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4dc4955f0fbfe6c00f686314ffb96592753479a2..00eb4f999952e0fbbc32a4863c12d06024e967c9 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -10,6 +10,7 @@ #include #include +#include struct static_call_key; @@ -39,6 +40,8 @@ struct tracepoint { int (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; + + CK_KABI_RESERVE(1) }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 6b367eb17979a20436e856311d211859db101c1c..7187b88d706c48a87f47d43494ad5ed35f11839d 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -44,6 +44,8 @@ struct tty_port_client_operations { void (*lookahead_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*write_wakeup)(struct tty_port *port); + + CK_KABI_RESERVE(1) }; extern const struct tty_port_client_operations tty_port_default_client_ops; @@ -121,6 +123,10 @@ struct tty_port { int drain_delay; struct kref kref; void *client_data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* tty_port::iflags bits -- use atomic bit ops */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3064314f48329cac729b9b4e29fc62072ec2df3b..550287c929906a402b03c65ee81479c2650799c8 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -205,6 +205,15 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt) } #endif +#ifndef copy_mc_to_user +static inline unsigned long __must_check +copy_mc_to_user(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; diff --git a/include/linux/udp.h b/include/linux/udp.h index 00790bb5cbde666a2a72180d94beae8eb647905d..91b5b63ce6677e67f0f9e0f5a17d23ccd761c1d4 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -56,6 +56,12 @@ struct udp_sock { int pending; /* Any pending frames ? */ __u8 encap_type; /* Is this an Encapsulation socket? */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + /* For UDP 4-tuple hash */ + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; +#endif + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -196,6 +202,11 @@ static inline void udp_allow_gso(struct sock *sk) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) +#if !IS_ENABLED(CONFIG_BASE_SMALL) +#define udp_lrpa_for_each_entry_rcu(__up, node, list) \ + hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node) +#endif + #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 42bce38a8e870506a09e248a32b8dea1434cba84..6ff9a0728486c7c51e4b597d6d00a6cf728538be 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -185,6 +185,12 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, return copy_page_to_iter(&folio->page, offset, bytes, i); } +static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, + size_t bytes, struct iov_iter *i) +{ + return copy_page_from_iter(&folio->page, offset, bytes, i); +} + static inline size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 47c5962b876b027e2ea935c4003b37f7a1bfdca3..46e2710985e6b906d8ef5b8fc180c7e1dbf30b13 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -16,6 +16,7 @@ #include #include #include +#include struct module; struct uio_map; @@ -74,9 +75,11 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; + struct percpu_ref info_ref; + struct completion confirm_done; + struct completion free_done; }; /** @@ -109,6 +112,7 @@ struct uio_info { int (*open)(struct uio_info *info, struct inode *inode); int (*release)(struct uio_info *info, struct inode *inode); int (*irqcontrol)(struct uio_info *info, s32 irq_on); + long (*ioctl)(struct uio_info *info, unsigned int cmd, unsigned long arg); }; extern int __must_check diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2b6c36a14bd9a245e582e5c4ae560..45110daaf8d3260ced995b66ba62669e8b29ddfa 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include #include /* Metric prefixes in accordance with Système international (d'unités) */ @@ -31,6 +32,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 733f2a97589b2604d2f7f62683f03f1ce1834eee..c814a21eff99389c6b2912d39556068d3d51668d 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -102,6 +102,11 @@ struct user_namespace { struct ucounts *ucounts; long ucount_max[UCOUNT_COUNTS]; long rlimit_max[UCOUNT_RLIMIT_COUNTS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; struct ucounts { diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 562e8754869da6c6a5c46c2dd55b4775c27b0722..783d414ce7889f5132dd436043e178a48b57135e 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -129,5 +129,13 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, pci_channel_state_t state); - +ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite); +bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, + loff_t reg_start, size_t reg_cnt, + loff_t *buf_offset, + size_t *intersect_count, + size_t *register_offset); #endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3219b368db79ccbbba88f56407061ce970132871..b2c512708086cb7e10ea5588fcd827a327be0013 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -34,6 +34,23 @@ struct reclaim_stat { unsigned nr_lazyfree_fail; }; +struct sysinfo_ext { + unsigned long lrupages[NR_LRU_LISTS]; + unsigned long cached; + unsigned long available; + unsigned long file_dirty; + unsigned long writeback; + unsigned long anon_mapped; + unsigned long file_mapped; + unsigned long slab_reclaimable; + unsigned long slab_unreclaimable; + unsigned long kernel_stack_kb; + unsigned long writeback_temp; + unsigned long anon_thps; + unsigned long shmem_thps; + unsigned long shmem_pmd_mapped; +}; + enum writeback_stat_item { NR_DIRTY_THRESHOLD, NR_DIRTY_BG_THRESHOLD, diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 99660197a36cb44099d9a96c4173a5b712e4af8b..7d07a164307602f88fa160062c128d0c6a803616 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -53,6 +53,9 @@ struct watchdog_ops { unsigned int (*get_timeleft)(struct watchdog_device *); int (*restart)(struct watchdog_device *, unsigned long, void *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** struct watchdog_device - The structure that defines a watchdog device @@ -119,6 +122,9 @@ struct watchdog_device { #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ #define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */ struct list_head deferred; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 52c6dd6d80ac0964bc3f4684dc5c7bc1a19b8222..12d830469e6048834edb646ae4b943325b72e482 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -14,6 +14,7 @@ #include #include #include +#include struct workqueue_struct; @@ -102,6 +103,11 @@ struct work_struct { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) @@ -115,6 +121,11 @@ struct delayed_work { /* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; int cpu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct rcu_work { diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 083387c00f0c8b8e070705c38da59c97c8170dcb..eba6d07602591567a8baacc51c928417f9250ed9 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -77,6 +77,9 @@ struct writeback_control { */ struct swap_iocb **swap_plug; + /* Target list for splitting a large folio */ + struct list_head *list; + #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb; /* wb this writeback is issued under */ struct inode *inode; /* inode being written out */ @@ -89,6 +92,9 @@ struct writeback_control { size_t wb_lcand_bytes; /* bytes written by last candidate */ size_t wb_tcand_bytes; /* bytes written by this candidate */ #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d20051865800800df5d24a0edd7a485b296ba9fa..25d9bda1177bb145f2ddbc515eef5d650c35fcb8 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -45,6 +45,8 @@ struct xattr_handler { struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 2a60ce39cfde19ac90cebb9b16aaba8b6a99c4ab..f647ee38598a692a8c19876dafcf1b8eae07bc8d 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -15,6 +15,7 @@ bool zswap_load(struct folio *folio); void zswap_invalidate(int type, pgoff_t offset); void zswap_swapon(int type); void zswap_swapoff(int type); +bool zswap_never_enabled(void); #else @@ -32,6 +33,11 @@ static inline void zswap_invalidate(int type, pgoff_t offset) {} static inline void zswap_swapon(int type) {} static inline void zswap_swapoff(int type) {} +static inline bool zswap_never_enabled(void) +{ + return true; +} + #endif #endif /* _LINUX_ZSWAP_H */ diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index 42207fc446602df618a6f227c65c90febdf4956c..60da3f65e3e8698dc3b8701a408046e6bb4f7efe 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -9,6 +9,7 @@ #define __NET_DCBNL_H__ #include +#include struct net_device; @@ -131,6 +132,15 @@ struct dcbnl_rtnl_ops { /* rewrite */ int (*dcbnl_setrewr)(struct net_device *dev, struct dcb_app *app); int (*dcbnl_delrewr)(struct net_device *dev, struct dcb_app *app); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #endif /* __NET_DCBNL_H__ */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 29fd1b4ee6548d7cd2fe6dc3066e45d1ab502d1f..d98e0604328fc382e076cd3baf22ef731ce2faba 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1506,6 +1506,15 @@ struct devlink_ops { enum devlink_selftest_status (*selftest_run)(struct devlink *devlink, unsigned int id, struct netlink_ext_ack *extack); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; void *devlink_priv(struct devlink *devlink); diff --git a/include/net/dst.h b/include/net/dst.h index 78884429deed82cc4872ff30404af69aee11a333..c2f211a628b6d36280d6002368b4de2a0fc49df8 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -92,6 +92,15 @@ struct dst_entry { #ifdef CONFIG_64BIT struct lwtunnel_state *lwtstate; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct dst_metrics { diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 3a9001a042a5c392a79cfc59af528ef410a28668..b4d97a2e528136650593e96b1efb4eeb0ef37588 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_DST_OPS_H #define _NET_DST_OPS_H +#include #include #include #include @@ -41,6 +42,15 @@ struct dst_ops { struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline int dst_entries_get_fast(struct dst_ops *dst) diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 82da359bca035dc0ebfc019cc6d07acb5be77082..7e2adbcaa576968446f3327adf554d1eb116feb5 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -44,6 +44,15 @@ struct fib_rule { struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct fib_lookup_arg { diff --git a/include/net/flow.h b/include/net/flow.h index 335bbc52171c10eb4b4b7e03a18eb8147902c6ac..f8dfd041541d0900cd220989eb74172dbe193215 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -12,6 +12,7 @@ #include #include #include +#include struct flow_keys; @@ -42,6 +43,9 @@ struct flowi_common { kuid_t flowic_uid; __u32 flowic_multipath_hash; struct flowi_tunnel flowic_tun_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; union flowi_uli { @@ -88,6 +92,9 @@ struct flowi4 { #define fl4_icmp_code uli.icmpt.code #define fl4_mh_type uli.mht.type #define fl4_gre_key uli.gre_key + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline void flowi4_init_output(struct flowi4 *fl4, int oif, @@ -149,6 +156,9 @@ struct flowi6 { #define fl6_mh_type uli.mht.type #define fl6_gre_key uli.gre_key __u32 mp_hash; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __attribute__((__aligned__(BITS_PER_LONG/8))); struct flowi { @@ -168,6 +178,8 @@ struct flowi { #define flowi_secid u.__fl_common.flowic_secid #define flowi_tun_key u.__fl_common.flowic_tun_key #define flowi_uid u.__fl_common.flowic_uid + + CK_KABI_RESERVE(1) } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 1a7131d6cb0e4b8e0e273f33a57c6816dabe899e..fcf79497431186118f469a0ef853ebad3ed382d3 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -6,6 +6,7 @@ #include #include #include +#include #include struct bpf_prog; @@ -382,6 +383,8 @@ struct flow_dissector { unsigned long long used_keys; /* each bit represents presence of one key id */ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; + + CK_KABI_RESERVE(1) }; struct flow_keys_basic { diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 9efa9a59e81f67a0acb95e63c6e948abb3bda8b4..8e2ae4b4466869e88b30000dcd3b992d6b76c2a0 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -329,6 +329,11 @@ struct flow_action_entry { } pppoe; }; struct flow_action_cookie *user_cookie; /* user defined action cookie */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct flow_action { diff --git a/include/net/genetlink.h b/include/net/genetlink.h index e8c34aa4a640dbf7401c566d1acf3fae01fe8c32..fc52a9f716ff6044fc37f1470a1f9fca3992e500 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -18,6 +18,8 @@ struct genl_multicast_group { char name[GENL_NAMSIZ]; u8 flags; u8 cap_sys_admin:1; + + CK_KABI_RESERVE(1) }; struct genl_split_ops; @@ -89,6 +91,8 @@ struct genl_family { int id; /* starting number of multicast group IDs in this family */ unsigned int mcgrp_offset; + + CK_KABI_RESERVE(1) }; /** @@ -113,6 +117,8 @@ struct genl_info { possible_net_t _net; void * user_ptr[2]; struct netlink_ext_ack *extack; + + CK_KABI_RESERVE(1) }; static inline struct net *genl_info_net(const struct genl_info *info) @@ -195,6 +201,11 @@ struct genl_ops { u8 internal_flags; u8 flags; u8 validate; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 31bf475eca762ac40c63f2eb46a0be02eb7b298c..9382c06e0bd12cd6d097148c8c203e97e14bf8c8 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -219,6 +219,23 @@ struct inet6_dev { struct rcu_head rcu; unsigned int ra_mtu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 1121d614942c8ae0859274579c28f58e04e57039..e2e9c7109efbb842ea8d39849df628490e2360e1 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -84,6 +84,8 @@ struct fib6_node { int fn_sernum; struct fib6_info __rcu *rr_ptr; struct rcu_head rcu; + + CK_KABI_RESERVE(1) }; struct fib6_gc_args { @@ -203,6 +205,9 @@ struct fib6_info { struct rcu_head rcu; struct nexthop *nh; + + CK_KABI_RESERVE(1) + struct fib6_nh fib6_nh[]; }; @@ -219,6 +224,8 @@ struct rt6_info { /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; + + CK_KABI_RESERVE(1) }; struct fib6_result { diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 031c661aa14df7148c51f54284ab6e33429352d6..723a63f7cce6a921acf853510eef99d40c41aee0 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -43,6 +43,11 @@ struct l3mdev_ops { /* IPv6 ops */ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev, struct flowi6 *fl6); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_NET_L3_MASTER_DEV diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 53bd2d02a4f0db374b3920386afd12b0d7cbe6a0..c9d552c18f861debc2b2341b37bb6712503ac8eb 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -33,6 +33,12 @@ struct lwtunnel_state { int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*orig_input)(struct sk_buff *); struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + __u8 data[]; }; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 0d28172193fa634ec08c85007ca851da7f56d7ee..eccce6a78198ecf15e70c378c6cd23a492a42b5e 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -86,6 +86,8 @@ struct neigh_parms { u32 qlen; int data[NEIGH_VAR_DATA_MAX]; DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); + + CK_KABI_RESERVE(1) }; static inline void neigh_var_set(struct neigh_parms *p, int index, int val) @@ -162,6 +164,10 @@ struct neighbour { struct rcu_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + u8 primary_key[]; } __randomize_layout; @@ -234,6 +240,15 @@ struct neigh_table { struct neigh_statistics __percpu *stats; struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; enum { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 958c805df1915bc9237c9b4f9a8aea86b95792db..4b70533d57cde9289a1b1986f9465b71cede260f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -191,6 +191,11 @@ struct net { #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #include diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index cdcafb30d43784db2769f105d0745a92ae9f7006..49a5dd290587c6aef689b76eb386fbb7420c8985 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -21,6 +21,15 @@ struct netdev_rx_queue { #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } ____cacheline_aligned_in_smp; /* diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4085765c337054b5770043d327653aebb88843db..e1ec22070b4d62c54fc623f1665c36f0bf213e74 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -123,6 +123,9 @@ struct nf_conn { /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct nf_conn * diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 0c1dac318e02589287764a17257fda1a74521eb0..908f11a8cf83085b7471e298fecd5b23ec679d66 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -63,6 +63,7 @@ struct nf_exp_event { struct nf_ct_event_notifier { int (*ct_event)(unsigned int events, const struct nf_ct_event *item); int (*exp_event)(unsigned int events, const struct nf_exp_event *item); + CK_KABI_RESERVE(1) }; void nf_conntrack_register_notifier(struct net *net, diff --git a/include/net/netlink.h b/include/net/netlink.h index 8a7cd1170e1f7b1788b7989a63dba5b482e2e777..117837ebcf6ca2c6885eb5cd60eba15a6069fd57 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -359,6 +359,11 @@ struct nla_policy { int (*validate)(const struct nlattr *attr, struct netlink_ext_ack *extack); }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) diff --git a/include/net/netns/can.h b/include/net/netns/can.h index 48b79f7e6236d9b4cf4bd57655f89e161faff05d..ecee28c5e3728c9c992e00b46a5e873a89dc2090 100644 --- a/include/net/netns/can.h +++ b/include/net/netns/can.h @@ -36,6 +36,8 @@ struct netns_can { /* CAN GW per-net gateway jobs */ struct hlist_head cgw_list; + + CK_KABI_RESERVE(1) }; #endif /* __NETNS_CAN_H__ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7a41c4791536732005cedbb80c223b86aa43249e..536145eb1228fb0466f46702e12c7f49fcada616 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -239,5 +239,10 @@ struct netns_ipv4 { atomic_t rt_genid; siphash_key_t ip_id_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5f2cfd84570aea8dd225208d36065b47b27b6b10..75a669da99b8afcaf396d7f39763929c504413df 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -56,6 +56,8 @@ struct netns_sysctl_ipv6 { u8 skip_notify_on_dev_down; u8 fib_notify_on_flag_change; u8 icmpv6_error_anycast_as_unicast; + + CK_KABI_RESERVE(1) }; struct netns_ipv6 { @@ -119,6 +121,8 @@ struct netns_ipv6 { u32 seq; } ip6addrlbl_table; struct ioam6_pernet_data *ioam6_data; + + CK_KABI_RESERVE(1) }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index a6a0bf4a247e51e95189d98f5438a12cfe3cfb74..5f283ec350af38708c2b60e85b1ca8cf04c48b26 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -33,5 +33,7 @@ struct netns_nf { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) unsigned int defrag_ipv6_users; #endif + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index cc8060c017d5fb4d96a7fab85c7140cfbe44332e..221ce4a63eeb0e5dcc4839b9a91a14e136be4f39 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -4,6 +4,8 @@ struct netns_nftables { u8 gencursor; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 7eff3d981b893e5fd10c11cae4078369570be848..6322fcb6ab9d3bdff351dd4d70d6b9109997ccfa 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -179,6 +179,9 @@ struct netns_sctp { #ifdef CONFIG_NET_L3_MASTER_DEV int l3mdev_accept; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif /* __NETNS_SCTP_H__ */ diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index 582212ada3ba7441d34fafdd81ae09dae929e8da..d55342489ef5dd6ad6214bd2c85c875b890e8001 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -22,5 +22,38 @@ struct netns_smc { int sysctl_smcr_testlink_time; int sysctl_wmem; int sysctl_rmem; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) + CK_KABI_RESERVE(17) + CK_KABI_RESERVE(18) + CK_KABI_RESERVE(19) + CK_KABI_RESERVE(20) + CK_KABI_RESERVE(21) + CK_KABI_RESERVE(22) + CK_KABI_RESERVE(23) + CK_KABI_RESERVE(24) + CK_KABI_RESERVE(25) + CK_KABI_RESERVE(26) + CK_KABI_RESERVE(27) + CK_KABI_RESERVE(28) + CK_KABI_RESERVE(29) + CK_KABI_RESERVE(30) + CK_KABI_RESERVE(31) + CK_KABI_RESERVE(32) }; #endif diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h index 9859d134d5a89c3c65d8faf943eb611b99938f03..7df05c34e7bec871609a57fccaabbedec979c5b5 100644 --- a/include/net/netns/unix.h +++ b/include/net/netns/unix.h @@ -17,6 +17,9 @@ struct netns_unix { struct unix_table table; int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif /* __NETNS_UNIX_H__ */ diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 423b52eca908d90009889b64764fbd4008a29529..e53dc9e98cec3ac4889fa13432e99b39cf9977d4 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -83,6 +83,8 @@ struct netns_xfrm { spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 2b12725de9c094f6ac89831576a2556d5dad5e64..73d1071a1b3ae825f42b061ed3fa8f23907aa2a4 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -114,6 +114,10 @@ struct nh_grp_entry { struct list_head nh_list; struct nexthop *nh_parent; /* nexthop of group with this entry */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; struct nh_group { @@ -126,6 +130,9 @@ struct nh_group { bool has_v4; struct nh_res_table __rcu *res_table; + + CK_KABI_RESERVE(1) + struct nh_grp_entry nh_entries[]; }; diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 887e7946a597494d714f12f9f7fb2e53e32557d1..7835c8ecbda878aec1e182afda9ce3ad92fa8735 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -68,6 +68,9 @@ struct page_pool_params { /* private: used by test code only */ void (*init_callback)(struct page *page, void *arg); void *init_arg; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_PAGE_POOL_STATS @@ -180,6 +183,8 @@ struct page_pool { refcount_t user_cnt; u64 destroy_cnt; + + CK_KABI_RESERVE(1) }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index c1fa6fee0acfa7e885b07f99a45ab06bd0cdc27c..135176bc4eec9e022a16e5f413e22861355b10c7 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -152,6 +152,15 @@ struct rtnl_link_ops { int (*fill_linkxstats)(struct sk_buff *skb, const struct net_device *dev, int *prividx, int attr); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; int __rtnl_link_register(struct rtnl_link_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 326d3a322c109e044cc0f0495bd58d7ea8638db7..0c1718df1ed785722c8cebb65ce958c49ed4c59d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -127,6 +127,16 @@ struct Qdisc { struct rcu_head rcu; netdevice_tracker dev_tracker; struct lock_class_key root_lock_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* private data */ long privdata[] ____cacheline_aligned; }; @@ -278,6 +288,8 @@ struct Qdisc_class_ops { struct sk_buff *skb, struct tcmsg*); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); + + CK_KABI_RESERVE(1) }; /* Qdisc_class_ops flag values */ @@ -323,6 +335,8 @@ struct Qdisc_ops { u32 (*egress_block_get)(struct Qdisc *sch); struct module *owner; + + CK_KABI_RESERVE(1) }; @@ -398,6 +412,8 @@ struct tcf_proto_ops { struct module *owner; int flags; + + CK_KABI_RESERVE(1) }; /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags diff --git a/include/net/snmp.h b/include/net/snmp.h index 468a67836e2f1fcf744b845784312175fab63668..58b51cf331ef417528e9e347159afeb22da34f80 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -91,6 +91,8 @@ struct icmpv6msg_mib_device { #define TCP_MIB_MAX __TCP_MIB_MAX struct tcp_mib { unsigned long mibs[TCP_MIB_MAX]; + + CK_KABI_RESERVE(1) }; /* UDP */ diff --git a/include/net/sock.h b/include/net/sock.h index dc625f94ee37b759a762f74bb40c854e5d9a278a..e4fa259c920f5343557cd86ac18d14734d5f216a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -545,6 +545,15 @@ struct sock { struct rcu_head sk_rcu; netns_tracker ns_tracker; struct hlist_node sk_bind2_node; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; enum sk_pacing { @@ -1369,6 +1378,15 @@ struct proto { struct list_head node; int (*diag_destroy)(struct sock *sk, int err); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } __randomize_layout; int proto_register(struct proto *prot, int alloc_slab); @@ -1532,6 +1550,8 @@ proto_memory_pressure(struct proto *prot) struct prot_inuse { int all; int val[PROTO_INUSE_NR]; + + CK_KABI_RESERVE(1) }; static inline void sock_prot_inuse_add(const struct net *net, diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 6ec140b0a61bf157431aac2068e044e04ab03c8c..d79eab371b3bf0f76ee310afe244bcb440424b84 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -26,6 +26,9 @@ struct sock_reuseport { unsigned int bind_inany:1; unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ + + CK_KABI_RESERVE(1) + struct sock *socks[]; /* array of sock pointers */ }; diff --git a/include/net/tcp.h b/include/net/tcp.h index b3917af309e0f1c16423f70378cad5cda6bb08b3..e6399c51acf83a6770271e517ed732b9c2b67d70 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1060,6 +1060,9 @@ struct rate_sample { bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_retrans; /* is sample from retransmission? */ bool is_ack_delayed; /* is this (likely) a delayed ACK? */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct tcp_congestion_ops { @@ -1112,6 +1115,11 @@ struct tcp_congestion_ops { void (*init)(struct sock *sk); /* cleanup private data (optional) */ void (*release)(struct sock *sk); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned_in_smp; int tcp_register_congestion_control(struct tcp_congestion_ops *type); diff --git a/include/net/tls.h b/include/net/tls.h index 6c642ea18050418c77b86cc0f669f9dffbd280cf..f0f8309c2db60279ca47a2ded565968b37ee619f 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -191,6 +191,11 @@ enum tls_context_flags { struct cipher_context { char *iv; char *rec_seq; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; union tls_crypto_context { @@ -261,6 +266,11 @@ struct tls_context { struct list_head list; refcount_t refcount; struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum tls_offload_ctx_dir { @@ -279,6 +289,13 @@ struct tlsdev_ops { int (*tls_dev_resync)(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn, enum tls_offload_ctx_dir direction); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; enum tls_offload_sync_type { diff --git a/include/net/udp.h b/include/net/udp.h index 488a6d2babccf26edfbaecc525f25e03d86b7d62..f2be158ad07a82bc3edd908c62c037c0f81dc606 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -50,29 +50,56 @@ struct udp_skb_cb { #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) /** - * struct udp_hslot - UDP hash slot + * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4 * * @head: head of list of sockets + * @nulls_head: head of list of sockets, only used by hash4 * @count: number of sockets in 'head' list * @lock: spinlock protecting changes to head/count */ struct udp_hslot { - struct hlist_head head; + union { + struct hlist_head head; + /* hash4 uses hlist_nulls to avoid moving wrongly onto another + * hlist, because rehash() can happen with lookup(). + */ + struct hlist_nulls_head nulls_head; + }; int count; spinlock_t lock; -} __attribute__((aligned(2 * sizeof(long)))); +} __aligned(2 * sizeof(long)); + +/** + * struct udp_hslot_main - UDP hash slot used by udp_table.hash2 + * + * @hslot: basic hash slot + * @hash4_cnt: number of sockets in hslot4 of the same + * (local port, local address) + */ +struct udp_hslot_main { + struct udp_hslot hslot; /* must be the first member */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + u32 hash4_cnt; +#endif +} __aligned(2 * sizeof(long)); +#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot)) /** * struct udp_table - UDP table * * @hash: hash table, sockets are hashed on (local port) * @hash2: hash table, sockets are hashed on (local port, local address) + * @hash4: hash table, connected sockets are hashed on + * (local port, local address, remote port, remote address) * @mask: number of slots in hash tables, minus 1 * @log: log2(number of slots in hash table) */ struct udp_table { struct udp_hslot *hash; - struct udp_hslot *hash2; + struct udp_hslot_main *hash2; +#if !IS_ENABLED(CONFIG_BASE_SMALL) + struct udp_hslot *hash4; +#endif unsigned int mask; unsigned int log; }; @@ -83,6 +110,7 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, { return &table->hash[udp_hashfn(net, num, table->mask)]; } + /* * For secondary hash, net_hash_mix() is performed before calling * udp_hashslot2(), this explains difference with udp_hashslot() @@ -90,8 +118,89 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, unsigned int hash) { - return &table->hash2[hash & table->mask]; + return &table->hash2[hash & table->mask].hslot; +} + +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_table_hash4_init(struct udp_table *table) +{ +} + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + BUILD_BUG(); + return NULL; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return false; +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return 0; +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return false; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ } +#else /* !CONFIG_BASE_SMALL */ + +/* Must be called with table->hash2 initialized */ +static inline void udp_table_hash4_init(struct udp_table *table) +{ + table->hash4 = (void *)(table->hash2 + (table->mask + 1)); + for (int i = 0; i <= table->mask; i++) { + table->hash2[i].hash4_cnt = 0; + + INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i); + table->hash4[i].count = 0; + spin_lock_init(&table->hash4[i].lock); + } +} + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + return &table->hash4[hash & table->mask]; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node); +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return sizeof(struct udp_hslot); +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return UDP_HSLOT_MAIN(hslot2)->hash4_cnt; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt++; +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt--; +} +#endif /* CONFIG_BASE_SMALL */ extern struct proto udp_prot; @@ -192,13 +301,29 @@ static inline int udp_lib_hash(struct sock *sk) } void udp_lib_unhash(struct sock *sk); -void udp_lib_rehash(struct sock *sk, u16 new_hash); +void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4); +u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, + const __be32 faddr, const __be16 fport); static inline void udp_lib_close(struct sock *sk, long timeout) { sk_common_release(sk); } +/* hash4 routines shared between UDPv4/6 */ +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_lib_hash4(struct sock *sk, u16 hash) +{ +} + +static inline void udp4_hash4(struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +void udp_lib_hash4(struct sock *sk, u16 hash); +void udp4_hash4(struct sock *sk); +#endif /* CONFIG_BASE_SMALL */ + int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr); diff --git a/include/net/xdp.h b/include/net/xdp.h index de08c8e0d13483a6840877dc585f6e87de07553d..97bfe6ae09aef99aea4a26df52f907080a231c49 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -64,6 +64,11 @@ struct xdp_rxq_info { struct xdp_mem_info mem; unsigned int napi_id; u32 frag_size; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; /* perf critical, avoid false-sharing */ struct xdp_txq_info { @@ -175,6 +180,8 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ u32 frame_sz; u32 flags; /* supported values defined in xdp_buff_flags */ + + CK_KABI_RESERVE(1) }; static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 93a9866ee481fa400a344f3505ecaae2c2757eab..87bf8fa5de590ed1e66b5c04691b7355a916195e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -124,6 +124,8 @@ struct xfrm_state_walk { u8 proto; u32 seq; struct xfrm_address_filter *filter; + + CK_KABI_RESERVE(1) }; enum { diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index b0bdff26fc882ccafe8bdd3c2727366198eabb94..4d1797ec652602f7fd04742879d2284a9d5ef2ca 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -87,6 +87,10 @@ struct xsk_buff_pool { * sockets share a single cq when the same netdev and queue id is shared. */ spinlock_t cq_lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + struct xdp_buff_xsk *free_heads[]; }; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 6ae00983a6126e7262016182a1b1470917e13129..ccb8e7ae16993782d91c1a6f72c76812ed91620c 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -141,6 +141,11 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c38f4fe5e64cf4f14b668328ab0cfac76ea5d496..b3d2868c9a7e26ca2cca2eaf742f766a16a3830e 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -280,6 +280,14 @@ struct scsi_device { struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + unsigned long sdev_data[]; } __attribute__((aligned(sizeof(unsigned long)))); @@ -366,6 +374,12 @@ struct scsi_target { char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + unsigned long starget_data[]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index f9d5ce6170a706db7b812bd1c589b3c57989e8ec..9232930d73c43ffc37caa82e6c53efaf923cf96f 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -497,6 +497,11 @@ struct scsi_host_template { /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -710,6 +715,13 @@ struct Scsi_Host { */ struct device *dma_dev; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 3a513be502437f991cdb29518a83146e72f48a87..8f421b9f7585ca1714197c8c102d7a19e622d1a2 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -17,6 +17,7 @@ #define TEGRA186 0x18 #define TEGRA194 0x19 #define TEGRA234 0x23 +#define TEGRA241 0x24 #define TEGRA264 0x26 #define TEGRA_FUSE_SKU_CALIB_0 0xf0 diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index aadb845d281dd7d3c0532111d82cdfae3e742059..c545875d0ff18e6117e1ad0ffc61252240fd16a2 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -148,10 +148,6 @@ enum tegra_io_pad { TEGRA_IO_PAD_AO_HV, }; -/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */ -#define TEGRA_IO_RAIL_HDMI TEGRA_IO_PAD_HDMI -#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS - #ifdef CONFIG_SOC_TEGRA_PMC int tegra_powergate_power_on(unsigned int id); int tegra_powergate_power_off(unsigned int id); @@ -164,10 +160,6 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, int tegra_io_pad_power_enable(enum tegra_io_pad id); int tegra_io_pad_power_disable(enum tegra_io_pad id); -/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */ -int tegra_io_rail_power_on(unsigned int id); -int tegra_io_rail_power_off(unsigned int id); - void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); @@ -211,16 +203,6 @@ static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id) return -ENOSYS; } -static inline int tegra_io_rail_power_on(unsigned int id) -{ - return -ENOSYS; -} - -static inline int tegra_io_rail_power_off(unsigned int id) -{ - return -ENOSYS; -} - static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode) { } diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48edca72dcf57cda55b674c3a9dd5e3..101183b8d3bcd897191fcdae216bbc624248d488 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -350,6 +350,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool hygon_dword_access:1; int poll_count; diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 2b2a975efd20774e178057680f8fc3e36d0a3639..d05759d1853896d60b563a7ccb57282a16750877 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -78,10 +78,10 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepage #ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_migratepages, - TP_PROTO(struct compact_control *cc, + TP_PROTO(unsigned int nr_migratepages, unsigned int nr_succeeded), - TP_ARGS(cc, nr_succeeded), + TP_ARGS(nr_migratepages, nr_succeeded), TP_STRUCT__entry( __field(unsigned long, nr_migrated) @@ -90,7 +90,7 @@ TRACE_EVENT(mm_compaction_migratepages, TP_fast_assign( __entry->nr_migrated = nr_succeeded; - __entry->nr_failed = cc->nr_migratepages - nr_succeeded; + __entry->nr_failed = nr_migratepages - nr_succeeded; ), TP_printk("nr_migrated=%lu nr_failed=%lu", diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index e18684b02c3dfa0c138da518172a3cb99998cbd4..455ab94a511c877553ec81bed1a110bf3639f3a6 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages, __entry->raw) ); -DECLARE_EVENT_CLASS(erofs__map_blocks_enter, +TRACE_EVENT(erofs_map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags), @@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter, __entry->flags ? show_map_flags(__entry->flags) : "NULL") ); -DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags), - - TP_ARGS(inode, map, flags) -); - -DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags) -); +TRACE_EVENT(erofs_map_blocks_exit, -DECLARE_EVENT_CLASS(erofs__map_blocks_exit, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags, int ret), @@ -223,20 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit, show_mflags(__entry->mflags), __entry->ret) ); -DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - TRACE_EVENT(erofs_destroy_inode, TP_PROTO(struct inode *inode), diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 37f2443b3cdb0f2e2dcd4588df6841cd3aabbe92..9277524e84ebe73eb2c93db7bc18104aeb4022fb 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin, TRACE_EVENT(mm_khugepaged_scan_file, - TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file, + TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result), - TP_ARGS(mm, page, file, present, swap, result), + TP_ARGS(mm, folio, file, present, swap, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file, TP_fast_assign( __entry->mm = mm; - __entry->pfn = page ? page_to_pfn(page) : -1; + __entry->pfn = folio ? folio_pfn(folio) : -1; __assign_str(filename, file->f_path.dentry->d_iname); __entry->present = present; __entry->swap = swap; diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index af0af3f1d9b7c081fb61ab582482302094409ff3..70323acde1de2cb174b2e31ac1ff811df1ee2e83 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,31 +10,58 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, int start, int stop, u64 status), + TP_PROTO(int batch, int start, int stop, u64 status), - TP_ARGS(cpu, start, stop, status), + TP_ARGS(batch, start, stop, status), TP_STRUCT__entry( + __field( int, batch ) __field( u64, status ) - __field( int, cpu ) __field( u16, start ) __field( u16, stop ) ), TP_fast_assign( - __entry->cpu = cpu; + __entry->batch = batch; __entry->start = start; __entry->stop = stop; __entry->status = status; ), - TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", - __entry->cpu, + TP_printk("batch: 0x%.2x, start: 0x%.4x, stop: 0x%.4x, status: 0x%.16llx", + __entry->batch, __entry->start, __entry->stop, __entry->status) ); +TRACE_EVENT(ifs_sbaf, + + TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status), + + TP_ARGS(batch, activate, status), + + TP_STRUCT__entry( + __field( u64, status ) + __field( int, batch ) + __field( u16, bundle ) + __field( u16, pgm ) + ), + + TP_fast_assign( + __entry->status = status.data; + __entry->batch = batch; + __entry->bundle = activate.bundle_idx; + __entry->pgm = activate.pgm_idx; + ), + + TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx", + __entry->batch, + __entry->bundle, + __entry->pgm, + __entry->status) +); + #endif /* _TRACE_IFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 5646ae15a957ad90804255c21a486323e8fddbb7..5779ac0df0392e12deda56e3ae98235df2964009 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -245,6 +245,56 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks) ); +TRACE_EVENT(jbd2_slow_handle_stats, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks, + unsigned long trans_wait, unsigned long space_wait, + u64 sched_wait, u64 io_wait), + + TP_ARGS(dev, tid, type, line_no, interval, sync, + requested_blocks, dirtied_blocks, trans_wait, space_wait, + sched_wait, io_wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, tid) + __field(unsigned int, type) + __field(unsigned int, line_no) + __field(int, interval) + __field(int, sync) + __field(int, requested_blocks) + __field(int, dirtied_blocks) + __field(unsigned long, trans_wait) + __field(unsigned long, space_wait) + __field(u64, sched_wait) + __field(u64, io_wait) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->interval = interval; + __entry->sync = sync; + __entry->requested_blocks = requested_blocks; + __entry->dirtied_blocks = dirtied_blocks; + __entry->trans_wait = trans_wait; + __entry->space_wait = space_wait; + __entry->sched_wait = sched_wait; + __entry->io_wait = io_wait; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d " + "trans_wait %lu space_wait %lu sched_wait %llu io_wait %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, + __entry->sync, __entry->requested_blocks, + __entry->dirtied_blocks, __entry->trans_wait, + __entry->space_wait, __entry->sched_wait, __entry->io_wait) +); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 3bd31ea23fee9e294659d4da406e60d4c6e2f23b..0dc9bb18c7bace31be21f47ca1f658270a4e6db8 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -461,7 +461,7 @@ TRACE_EVENT(kvm_set_spte_hva, TP_ARGS(hva), TP_STRUCT__entry( - __field( unsigned long, hva ) + __field(unsigned long, hva) ), TP_fast_assign( diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index e010618f93264f0d3a303d164e93957fcf0f8f29..4ad037141eb700fb2e8b06250303d37aeea5f952 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -60,9 +60,16 @@ #define __def_gfpflag_names_kasan #endif -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names __def_gfpflag_names_kasan \ +#ifdef CONFIG_KFENCE +#define __def_gfpflag_names_kfence , \ + gfpflag_string(__GFP_NOKFENCE) +#else +#define __def_gfpflag_names_kfence +#endif + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names __def_gfpflag_names_kasan __def_gfpflag_names_kfence \ ) : "none" #ifdef CONFIG_MMU @@ -95,6 +102,12 @@ #define IF_HAVE_PG_ARCH_X(_name) #endif +#ifdef CONFIG_KFENCE +#define IF_HAVE_PG_KFENCE(_name) ,{1UL << PG_##_name, __stringify(_name)} +#else +#define IF_HAVE_PG_KFENCE(_name) +#endif + #define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) } #define __def_pageflag_names \ @@ -125,7 +138,8 @@ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ IF_HAVE_PG_IDLE(young) \ IF_HAVE_PG_ARCH_X(arch_2) \ -IF_HAVE_PG_ARCH_X(arch_3) +IF_HAVE_PG_ARCH_X(arch_3) \ +IF_HAVE_PG_KFENCE(kfence) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 7b1ddffa3dfc825f431bc575f4e86a3509dc9426..5c0da806115136813781c172dc9414616e783a70 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -108,6 +108,13 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb, */ DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb__nullable), + + TP_ARGS(sk, skb__nullable) +); + +DEFINE_EVENT(tcp_event_sk_skb, tcp_pkt_recv, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) @@ -187,6 +194,20 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, TP_ARGS(sk) ); +DEFINE_EVENT(tcp_event_sk, tcp_data_send, + + TP_PROTO(struct sock *sk), + + TP_ARGS(sk) +); + +DEFINE_EVENT(tcp_event_sk, tcp_data_acked, + + TP_PROTO(struct sock *sk), + + TP_ARGS(sk) +); + TRACE_EVENT(tcp_retransmit_synack, TP_PROTO(const struct sock *sk, const struct request_sock *req), diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 54e353c9f919fcfdc70c1cc5c53532f6accd4f29..0e190e112dc4a44be72d24d1bd9db1cea8b9a277 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,29 @@ TRACE_EVENT(writeback_bdi_register, ) ); +#ifdef CONFIG_CGROUP_WRITEBACK +TRACE_EVENT(insert_memcg_blkcg_link, + TP_PROTO(struct cgroup_subsys_state *memcg_css, + struct cgroup_subsys_state *blkcg_css, + struct cgroup_subsys_state *old_blkcg_css), + TP_ARGS(memcg_css, blkcg_css, old_blkcg_css), + TP_STRUCT__entry( + __field(unsigned int, memcg_ino) + __field(unsigned int, blkcg_ino) + __field(unsigned int, old_blkcg_ino) + ), + TP_fast_assign( + __entry->memcg_ino = kernfs_ino(memcg_css->cgroup->kn); + __entry->blkcg_ino = kernfs_ino(blkcg_css->cgroup->kn); + __entry->old_blkcg_ino = old_blkcg_css ? + kernfs_ino(old_blkcg_css->cgroup->kn) : 0; + ), + TP_printk("memcg_ino=%u blkcg_ino=%u old_blkcg_ino=%u", + __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino + ) +); +#endif + DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index af0630ba5437d4f3f15f4ee0b4ca94843a813a9f..a5375679dd503dbef5acc176442ecd073773aef3 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -24,6 +24,8 @@ #define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E) #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) +#define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) +#define SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD SDEI_1_0_FN(0x19) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d676ed2b246ec269a23f005e4b89c46dfb4504b4..f428015e85de916a65c7e150814eb61f7cbcb33a 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -441,6 +441,7 @@ enum { #define AUDIT_ARCH_XTENSA (EM_XTENSA) #define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE) #define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SW64 (EM_SW64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 431bc700bcfb93629c660043dcfcb05b7f7dd3c1..806c7535537cb5c3e8e77d110e085619e9c0962d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1327,6 +1327,9 @@ enum { /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), + +/* Flag for value_type_btf_obj_fd, the fd is available */ + BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), }; /* Flags for BPF_PROG_QUERY. */ @@ -1400,6 +1403,11 @@ union bpf_attr { * to using 5 hash functions). */ __u64 map_extra; + + __s32 value_type_btf_obj_fd; /* fd pointing to a BTF + * type data for + * btf_vmlinux_value_type_id. + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -6472,7 +6480,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; - __u32 :32; /* alignment pad */ + __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index ef38c2bc5ab7a58ba03d309654051b25a2ac2933..32458706a403414cc6cb63880b6ee52ee598b63f 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -59,6 +59,7 @@ * up with a final number. */ #define EM_ALPHA 0x9026 +#define EM_SW64 0x9916 /* Bogus old m32r magic number, used by old tools. */ #define EM_CYGNUS_M32R 0x9041 diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index b7b56871029c58148cace2e383249cd193e29151..95ae282849976351e908ec3e22efe911b9779149 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -64,6 +64,24 @@ struct fstrim_range { __u64 minlen; }; +/* + * We include a length field because some filesystems (vfat) have an identifier + * that we do want to expose as a UUID, but doesn't have the standard length. + * + * We use a fixed size buffer beacuse this interface will, by fiat, never + * support "UUIDs" longer than 16 bytes; we don't want to force all downstream + * users to have to deal with that. + */ +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + /* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ #define FILE_DEDUPE_RANGE_SAME 0 #define FILE_DEDUPE_RANGE_DIFFERS 1 @@ -215,6 +233,13 @@ struct fsxattr { #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) #define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) +/* Returns the external filesystem UUID, the same one blkid returns */ +#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2) +/* + * Returns the path component under /sys/fs/ that refers to this filesystem; + * also /sys/kernel/debug/ for filesystems with debugfs exports + */ +#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path) /* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index e7418d15fe3906507827025545dcd5348202ff30..730e620286a7ebea94025f89f98640079195908f 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -211,6 +211,10 @@ * 7.39 * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures + * + * 7.40 + * - add FUSE_NO_EXPORT_SUPPORT init flag + * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag */ #ifndef _LINUX_FUSE_H @@ -410,6 +414,9 @@ struct fuse_file_lock { * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. + * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support + * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit + * of the request ID indicates resend requests */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -449,10 +456,15 @@ struct fuse_file_lock { #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) +#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) +#define FUSE_HAS_RESEND (1ULL << 39) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP +#define FUSE_DELETE_STALE (1ULL << 58) +/* The 59th bit is left to FUSE_DIO_SHARED_MMAP */ + /** * CUSE INIT request/reply flags * @@ -635,6 +647,7 @@ enum fuse_notify_code { FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, FUSE_NOTIFY_CODE_MAX, }; @@ -960,6 +973,14 @@ struct fuse_fallocate_in { uint32_t padding; }; +/** + * FUSE request unique ID flag + * + * Indicates whether this is a resend request. The receiver should handle this + * request accordingly. + */ +#define FUSE_UNIQUE_RESEND (1ULL << 63) + struct fuse_in_header { uint32_t len; uint32_t opcode; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 606b52e88ce335b02e3efa5a558fd3bba20481ab..3d1987e1bb2dd65557d11c2fe99c89c2d9b1704c 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -31,6 +31,7 @@ enum idxd_scmd_stat { IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, IDXD_SCMD_DEV_EVL_ERR = 0x80120000, + IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, }; #define IDXD_SCMD_SOFTERR_MASK 0x80000000 diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ced1221179e3ada854f4bbba73f36f..fdea50f53da39958f8e9f63d18864e297722d9c7 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,8 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_IDLE_US (1U << 30) /* unit of thread_idle is macrosecond */ +#define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* * Defer running task work to get events. @@ -466,6 +468,7 @@ struct io_cqring_offsets { #define IORING_ENTER_SQ_WAIT (1U << 2) #define IORING_ENTER_EXT_ARG (1U << 3) #define IORING_ENTER_REGISTERED_RING (1U << 4) +#define IORING_ENTER_SQ_SUBMIT_ON_IDLE (1U << 31) /* * Passed in for io_uring_setup(2). Copied back with updated info on success diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cbab6b8b1562fa359d34f9835a9cde757..c44eecf5d318e520a3c78c69e60b2d55a4d1618b 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,8 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -347,10 +349,22 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for * @pt_id: The IOAS to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT @@ -404,6 +418,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +443,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +469,81 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 01766dd839b076029b07d906a01bbf732e94dd5b..3be3e81c67ae26d7d75d2128908b676b85227e98 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -45,6 +45,7 @@ #define KEXEC_ARCH_AARCH64 (183 << 16) #define KEXEC_ARCH_RISCV (243 << 16) #define KEXEC_ARCH_LOONGARCH (258 << 16) +#define KEXEC_ARCH_SW64 (0x9916UL << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132da65beb99f9455659c9b75ed109d..07ea54b36f1d31430f9c7260145ac58806b1676f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1193,6 +1201,15 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_SEV_ES_GHCB 500 +#define KVM_CAP_HYGON_COCO_EXT 501 +/* support userspace to request firmware to build CSV3 guest's memory space */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM (1 << 0) +/* support request to update CSV3 guest's memory region multiple times */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA (1 << 1) +/* support request to inject secret to CSV3 guest */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET (1 << 2) + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { @@ -1362,6 +1379,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1453,7 +1471,15 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_LA_IOAPIC = 0x100, +#define KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IOAPIC + KVM_DEV_TYPE_LA_IPI, +#define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI + KVM_DEV_TYPE_LA_EXTIOI, +#define KVM_DEV_TYPE_LA_EXTIOI KVM_DEV_TYPE_LA_EXTIOI + KVM_DEV_TYPE_MAX, + }; struct kvm_vfio_spapr_tce { @@ -1563,6 +1589,11 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +/* ioctl for SW vcpu init*/ +#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) +#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) +#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) + /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -2256,4 +2287,106 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +enum csv_cmd_id { + /* HYGON CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + + KVM_CSV_NR_MAX, +}; + +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + +struct kvm_csv_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + +/* ioctls for control vm during system reset, currently only for CSV */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + + KVM_CSV3_SET_GUEST_PRIVATE_MEMORY = 0xc8, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a98a363df4c6e7eeaffee065512f7c..944fe133ae3c1144482a8ccfbc8f9bc77a4f306f 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,10 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */ +#define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */ +#define KVM_HC_PSP_FORWARD_OP 103 /* Specific to Hygon platform */ /* * hypercalls use architecture specific diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250dd1b92af18e3b4a72a047d2784f89382..3a64499b0f5d63734d632ab03cd1966211473d8c 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h new file mode 100644 index 0000000000000000000000000000000000000000..7bb6a4f8b6e02ca7d359a3e078e4f36bb01595bd --- /dev/null +++ b/include/uapi/linux/psp-hygon.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for HYGON Platform Security Processor (PSP) + * commands. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_USER_H__ +#define __PSP_HYGON_USER_H__ + +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +/** + * CSV guest/platform commands + */ +enum { + CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + +/** + * struct csv_guest_user_data_attestation - ATTESTATION command parameters + * + * @user_data: user specified data for the attestation report + * @mnonce: user's random nonce + * @hash: sm3 hash of the @user_data and @mnonce + */ +struct csv_guest_user_data_attestation { + __u8 user_data[64]; /* In */ + __u8 monce[16]; /* In */ + __u8 hash[32]; /* In */ +} __packed; + +#endif /* __PSP_HYGON_USER_H__ */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index f925a77f19ed1b42ab0626b1b897aefe1328b25d..8931c2bb0afe34263f4438c4dc7385cf45fd3c73 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -73,6 +73,8 @@ enum tcmu_opcode { struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; +#define TCMU_KFLAG_ZERO_COPY 0x1 +#define TCMU_KFLAG_BYPASS_DATA_AREA 0x2 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 @@ -185,4 +187,21 @@ enum tcmu_genl_attr { }; #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) +struct tcmu_data_xfer { + __u16 cmd_id; + __u16 __pad1; + __u32 iov_cnt; + struct iovec __user *iovec; +}; + +#define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_ZEROCOPY _IOW('T', 0xe2, struct tcmu_cmd_zerocopy) + +struct tcmu_cmd_zerocopy { + struct iovec __user *iov; + __u32 iov_cnt; + __u16 cmd_id; +}; + #endif diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h new file mode 100644 index 0000000000000000000000000000000000000000..93b7ab200b3293be5c49b33653e37f976d35c515 --- /dev/null +++ b/include/uapi/linux/virtfuse.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VIRTFUSE_H +#define _LINUX_VIRTFUSE_H + +#include +#include + +/* Maximum number of devices supported. */ +#define VIRT_FUSE_MAX_DEVICES 1024 + +/* + * Clone a fuse device sharing the fuse connection bound to the specified + * virtual device. + */ +#define VIRTFUSE_IOC_CLONE _IO(0x99, 1) + +/* Reset the specified virtual device */ +#define VIRTFUSE_IOC_RESET _IO(0x99, 2) + +/* Print all mountinfo of the specified virtual device. */ +#define VIRTFUSE_IOC_GET_MOUNTS _IO(0x99, 3) + +/* + * @len indicates the size of the buffer indicated by @buf + * @buf indicates a buffer to contain the output mountinfo of the specified + * virtual device. + */ +struct virtfuse_mounts_buf { + __u32 len; + __u8 buf[]; +}; + +#endif diff --git a/init/Kconfig b/init/Kconfig index 60ed7713b5ee2a2e4909c36ec9c907f758f54449..6b458f16330d2eab9786236dc5200935ce041e16 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1133,6 +1133,36 @@ config CGROUP_DEVICE Provides a cgroup controller implementing whitelists for devices which a process in the cgroup can mknod or open. +config SCHED_SLI + bool "cgroup CPU usage and additional scheduler statistics" + depends on CGROUP_CPUACCT + depends on FAIR_GROUP_SCHED + default Y + help + This accounts CPU time spent by tasks in a cgroup into "usr%" "sys%" + "idle" "steal%" "irq%" "softirq%" "guest%". And this exports + nr_migrations, nr_running, nr_uninterruptible of a cgroup. + + The corresponding interface is cpuacct.proc_stat. + + Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted + together to take effect. + +config RICH_CONTAINER + bool "Alibaba rich container" + depends on CGROUP_CPUACCT + depends on CFS_BANDWIDTH + depends on CPUSETS + select SCHED_SLI + default n + help + Make containers feel like VMs. Change the following interface + to reflect the information from containers, like: + "/proc/cpuinfo", "/proc/meminfo", "/sys/devices/system/cpu/online". + Then tools(i.e. top, free) can work in containers as in VMs. + Note that it requires "cpu,cpuacct,cpuset" shared v1 hierarchy to + work properly. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help @@ -1431,7 +1461,7 @@ config LD_ORPHAN_WARN config LD_ORPHAN_WARN_LEVEL string depends on LD_ORPHAN_WARN - default "error" if WERROR + default "error" if WERROR && !(ARM64 && LIVEPATCH) default "warn" config SYSCTL @@ -2006,3 +2036,21 @@ config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE # . config ARCH_HAS_SYSCALL_WRAPPER def_bool n + +config CK_KABI_RESERVE + bool "Enables KABI and hotfix RESERVE" + default y + help + This option enables KABI and hotfix reserve. + For Anolis Cloud Kernel, the KABI reserve macros and hotfix reserve + macros are the same. + For some embedded systems, KABI and hotfix reserve may be not necessary. + Disable it on demand. + +config CK_KABI_SIZE_ALIGN_CHECKS + bool "Enables more stringent kabi checks in the macros" + default y + depends on CK_KABI_RESERVE + help + This option enables more stringent kabi checks. Those must be disabled + in case of a debug-build because they allow to change struct sizes. \ No newline at end of file diff --git a/init/init_task.c b/init/init_task.c index fd9e27185e23a759316ebee3acf60cfe31ae8a75..ea99f5d9d076f5a9d42a102893a220885c563e0e 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -203,7 +203,7 @@ struct task_struct init_task .trace_recursion = 0, #endif #ifdef CONFIG_LIVEPATCH - .patch_state = KLP_UNDEFINED, + .patch_state = KLP_TRANSITION_IDLE, #endif #ifdef CONFIG_SECURITY .security = NULL, diff --git a/init/main.c b/init/main.c index c787e94cc8982b6209bd542744663944e88cc021..a61ac250fdcbe3a8f5f8070ed436c4abdd540272 100644 --- a/init/main.c +++ b/init/main.c @@ -1541,7 +1541,8 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); - lockup_detector_init(); + if (disable_sdei_nmi_watchdog) + lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1552,6 +1553,10 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); + /* sdei_watchdog needs to be initialized after sdei_init */ + if (!disable_sdei_nmi_watchdog) + lockup_detector_init(); + kunit_run_all_tests(); wait_for_initramfs(); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 0122f220ef0d2b6b1850dc43beb5bcff36400746..5087203699b029c38bc151133801a18596e2aa06 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -137,6 +137,8 @@ struct io_defer_entry { u32 seq; }; +extern struct io_sq_data __percpu **percpu_sqd; + /* requests with any of those set should undergo io_disarm_next() */ #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) @@ -1736,11 +1738,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle - * in sq thread task context or in io worker task context. If - * current task context is sq thread, we don't need to check - * whether should wake up sq thread. + * in sq thread task context or in io worker task context or + * in original context. If current task context is sq thread, + * we don't need to check whether should wake up sq thread. */ if ((ctx->flags & IORING_SETUP_SQPOLL) && + (current != ctx->sq_data->thread) && wq_has_sleeper(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); @@ -3625,6 +3628,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_SQ_SUBMIT_ON_IDLE | IORING_ENTER_REGISTERED_RING))) return -EINVAL; @@ -3668,8 +3672,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ret = -EOWNERDEAD; goto out; } - if (flags & IORING_ENTER_SQ_WAKEUP) + if (flags & IORING_ENTER_SQ_WAKEUP) { wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_SUBMIT_ON_IDLE) { + bool has_lock; + + has_lock = mutex_trylock(&ctx->uring_lock); + if (has_lock) { + io_submit_sqes(ctx, min(to_submit, 8U)); + mutex_unlock(&ctx->uring_lock); + } + } + } if (flags & IORING_ENTER_SQ_WAIT) io_sqpoll_wait_sq(ctx); @@ -4076,7 +4090,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU | + IORING_SETUP_IDLE_US)) return -EINVAL; return io_uring_create(entries, &p, params); @@ -4365,7 +4380,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); mutex_lock(&ctx->uring_lock); } @@ -4393,9 +4408,8 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); mutex_lock(&ctx->uring_lock); - } return ret; } @@ -4616,6 +4630,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, static int __init io_uring_init(void) { + int cpu; + #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ @@ -4705,6 +4721,9 @@ static int __init io_uring_init(void) sizeof_field(struct io_kiocb, cmd.data), NULL); iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); + percpu_sqd = alloc_percpu(struct io_sq_data *); + for_each_possible_cpu(cpu) + *per_cpu_ptr(percpu_sqd, cpu) = NULL; #ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 489e66647e07950c1ec71b0935d94e46430f805e..8314d57564f4b555eee4f56001fed1590e6f140b 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -25,6 +25,9 @@ enum { IO_SQ_THREAD_SHOULD_PARK, }; +DEFINE_MUTEX(percpu_sqd_lock); +struct io_sq_data __percpu **percpu_sqd; + void io_sq_thread_unpark(struct io_sq_data *sqd) __releases(&sqd->lock) { @@ -65,14 +68,28 @@ void io_sq_thread_stop(struct io_sq_data *sqd) wait_for_completion(&sqd->exited); } -void io_put_sq_data(struct io_sq_data *sqd) +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd) { + int percpu = 0; + + if ((ctx->flags & IORING_SETUP_SQ_AFF) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) + percpu = 1; + + if (percpu) + mutex_lock(&percpu_sqd_lock); + if (refcount_dec_and_test(&sqd->refs)) { WARN_ON_ONCE(atomic_read(&sqd->park_pending)); io_sq_thread_stop(sqd); + if (percpu) + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = NULL; kfree(sqd); } + + if (percpu) + mutex_unlock(&percpu_sqd_lock); } static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) @@ -80,11 +97,36 @@ static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->idle_mode_us = false; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + bool idle_mode_us = ctx->flags & IORING_SETUP_IDLE_US; + unsigned int tmp_idle = idle_mode_us ? ctx->sq_thread_idle : + jiffies_to_usecs(ctx->sq_thread_idle); + + if (idle_mode_us && !sqd->idle_mode_us) + sqd->idle_mode_us = true; + + if (sq_thread_idle < tmp_idle) + sq_thread_idle = tmp_idle; + } + + if (!sqd->idle_mode_us) + sq_thread_idle = usecs_to_jiffies(sq_thread_idle); sqd->sq_thread_idle = sq_thread_idle; } +static inline u64 io_current_time(bool idle_mode_us) +{ + return idle_mode_us ? (ktime_get_ns() >> 10) : get_jiffies_64(); +} + +static inline bool io_time_after(bool idle_mode_us, u64 timeout) +{ + u64 now = io_current_time(idle_mode_us); + + return time_after64(now, timeout); +} + void io_sq_thread_finish(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; @@ -95,7 +137,7 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx) io_sqd_update_thread_idle(sqd); io_sq_thread_unpark(sqd); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); ctx->sq_data = NULL; } } @@ -131,11 +173,11 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) } static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, - bool *attached) + bool *attached, bool *percpu_found) { struct io_sq_data *sqd; - *attached = false; + *attached = *percpu_found = false; if (p->flags & IORING_SETUP_ATTACH_WQ) { sqd = io_attach_sq_data(p); if (!IS_ERR(sqd)) { @@ -147,6 +189,27 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, return sqd; } + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + int cpu = p->sq_thread_cpu; + + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + return ERR_PTR(-EINVAL); + mutex_lock(&percpu_sqd_lock); + sqd = *per_cpu_ptr(percpu_sqd, cpu); + if (sqd) { + if (sqd->task_tgid != current->tgid) { + mutex_unlock(&percpu_sqd_lock); + return ERR_PTR(-EPERM); + } + refcount_inc(&sqd->refs); + mutex_unlock(&percpu_sqd_lock); + *percpu_found = true; + return sqd; + } + mutex_unlock(&percpu_sqd_lock); + } + sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); @@ -224,7 +287,7 @@ static int io_sq_thread(void *data) { struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long timeout = 0; + u64 timeout = 0; char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); @@ -256,7 +319,7 @@ static int io_sq_thread(void *data) if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) break; - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } cap_entries = !list_is_singular(&sqd->ctx_list); @@ -269,9 +332,9 @@ static int io_sq_thread(void *data) if (io_run_task_work()) sqt_spin = true; - if (sqt_spin || !time_after(jiffies, timeout)) { + if (sqt_spin || !io_time_after(sqd->idle_mode_us, timeout)) { if (sqt_spin) - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); @@ -318,7 +381,7 @@ static int io_sq_thread(void *data) } finish_wait(&sqd->wait, &wait); - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } io_uring_cancel_generic(true, sqd); @@ -349,6 +412,8 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) finish_wait(&ctx->sqo_sq_wait, &wait); } +#define DEFAULT_SQ_IDLE_US 10 + __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -372,13 +437,26 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; struct io_sq_data *sqd; - bool attached; + bool attached, percpu_found; ret = security_uring_sqpoll(); if (ret) return ret; - sqd = io_get_sq_data(p, &attached); + if ((ctx->flags & IORING_SETUP_ATTACH_WQ) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + /* ATTACH_WQ and SQPOLL_PERCPU are mutual exclusive */ + ret = -EINVAL; + goto err; + } + if ((ctx->flags & IORING_SETUP_SQPOLL_PERCPU) && + !(ctx->flags & IORING_SETUP_SQ_AFF)) { + /* SQPOLL_PERCPU and SQ_AFF should both exist */ + ret = -EINVAL; + goto err; + } + + sqd = io_get_sq_data(p, &attached, &percpu_found); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; @@ -386,9 +464,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); - if (!ctx->sq_thread_idle) - ctx->sq_thread_idle = HZ; + if ((ctx->flags & IORING_SETUP_IDLE_US) && + !(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + ret = -EINVAL; + goto err; + } + + /* + * for ms mode: ctx->sq_thread_idle is jiffies + * for us mode: ctx->sq_thread_idle is time in terms of microsecond + */ + if (ctx->flags & IORING_SETUP_IDLE_US) + ctx->sq_thread_idle = p->sq_thread_idle ? + p->sq_thread_idle : DEFAULT_SQ_IDLE_US; + else + ctx->sq_thread_idle = p->sq_thread_idle ? + msecs_to_jiffies(p->sq_thread_idle) : HZ; io_sq_thread_park(sqd); list_add(&ctx->sqd_list, &sqd->ctx_list); @@ -399,7 +490,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ret < 0) goto err; - if (attached) + if (attached || percpu_found) return 0; if (p->flags & IORING_SETUP_SQ_AFF) { @@ -434,12 +525,19 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->thread = tsk; task_to_put = get_task_struct(tsk); + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = sqd; + mutex_unlock(&percpu_sqd_lock); + } ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & IORING_SETUP_SQ_AFF) { - /* Can't have SQ_AFF without SQPOLL */ + } else if (p->flags & (IORING_SETUP_SQ_AFF | IORING_SETUP_IDLE_US | + IORING_SETUP_SQPOLL_PERCPU)) { + /* Can't have SQ_AFF or IDLE_US or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 8df37e8c914936d777b9d0495796c236f41e5189..60eec0c97864f84e9cec956833a739936b3223d3 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -18,6 +18,8 @@ struct io_sq_data { unsigned long state; struct completion exited; + + bool idle_mode_us; }; int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p); @@ -25,6 +27,6 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx); void io_sq_thread_stop(struct io_sq_data *sqd); void io_sq_thread_park(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd); -void io_put_sq_data(struct io_sq_data *sqd); +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); diff --git a/ipc/shm.c b/ipc/shm.c index 576a543b7cfff9c09187d8605bd3f3086af2a458..c9b9bc8c4fac245f47605b531d0fb2cd54c34f72 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -757,11 +757,21 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) file = hugetlb_file_setup(name, hugesize, acctflag, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { + struct rich_container_ext *ext = NULL; + /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ - if ((shmflg & SHM_NORESERVE) && + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + if ((shmflg & SHM_NORESERVE) && + ext->overcommit_memory != OVERCOMMIT_NEVER) + acctflag = VM_NORESERVE; + } else if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_kernel_file_setup(name, size, acctflag); diff --git a/kernel/audit.h b/kernel/audit.h index a60d2840559e2bda51bd2db82e62474886490996..e71720eb3ebdf62721c5f98784da542b6d40fc1a 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -208,6 +208,8 @@ struct audit_context { }; int fds[2]; struct audit_proctitle proctitle; + + CK_KABI_RESERVE(1) }; extern bool audit_ever_enabled; diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index f526b7573e97c1d14e566b529552769a3c756e0c..a2d0e84e725b5676a6056f45b313d6e734a63b55 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -11,6 +11,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o +ifeq ($(CONFIG_RELAY),y) +obj-$(CONFIG_BPF_SYSCALL) += bpf_relay.o +endif obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o obj-$(CONFIG_BPF_JIT) += trampoline.o diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 96856f130cbff8111fd31fa1007a9fade8dfd15d..ec7118517fa915581ec333ebb316c9859cff8db5 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -ENOENT; /* Only allow sleepable program for resched-able iterator */ - if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo)) + if (prog->sleepable && !bpf_iter_target_support_resched(tinfo)) return -EINVAL; link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); @@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx) struct bpf_run_ctx run_ctx, *old_run_ctx; int ret; - if (prog->aux->sleepable) { + if (prog->sleepable) { rcu_read_lock_trace(); migrate_disable(); might_fault(); @@ -782,9 +782,7 @@ struct bpf_iter_num_kern { int end; /* final value, exclusive */ } __aligned(8); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) { @@ -845,4 +843,4 @@ __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it) s->cur = s->end = 0; } -__diag_pop(); +__bpf_kfunc_end_defs(); diff --git a/kernel/bpf/bpf_relay.c b/kernel/bpf/bpf_relay.c new file mode 100644 index 0000000000000000000000000000000000000000..6f0004c7541bc23c37bf78e5b6b7feac1ce00ede --- /dev/null +++ b/kernel/bpf/bpf_relay.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Alibaba Cloud + * + * relay interface only used by bpf. To use it, please + * echo following cmds to /sys/kernel/debug/relay_ebpf: + * - Create: + * create bufnum bufsize percpu + * - Remove: + * remove + * + * Also `cat` can be used to show the current relay files, one entry each line + * - Show: cat /sys/kernel/debug/relay_ebpf + * => id dir_name file_name bufnum bufsize percpu + * + * The field "id" is a unique identifier for each relay channel, which is + * needed by bpf helper to write into the channel. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* preserved index used for tcprt */ +enum reserve_relay_idex { + RELAY_INDEX_TCPRT_LOG = 0, + RELAY_INDEX_TCPRT_STAT, + RELAY_INDEX_BEGIN = 4, +}; + +#define DIR_NAME_TCPRT "tcp-rt" +#define FILE_NAME_TCPRT_LOG "rt-network-log" +#define FILE_NAME_TCPRT_STAT "rt-network-stats" + +/* dynamic array to maintain relay channels, with number limit RCHAN_NUM_MAX */ +static struct rchan **rchan_array; +static size_t array_capacity; +#define RCHAN_NUM_MAX 32 + +/* use to protect relay_ebpf, which makes sure that relay_ebpf process one + * command at a time. + */ +static DEFINE_MUTEX(relay_file_lock); + +/* handle the extension of relay array */ +static int relay_array_extend(size_t new_size) +{ + struct rchan **new_array, **old; + size_t new_capacity; + + /* Calculate new capacity with a simple growth strategy */ + new_capacity = (new_size > array_capacity * 2) ? new_size : (array_capacity * 2); + + /* Compare with RCHAN_NUM_MAX, the max capacity */ + if (new_capacity > RCHAN_NUM_MAX) + new_capacity = RCHAN_NUM_MAX; + + /* Do nothing if new capacity is not larger than old */ + if (new_capacity <= array_capacity) + return -EINVAL; + + /* Allocate and init new array with new capacity */ + new_array = kcalloc(new_capacity, sizeof(*rchan_array), GFP_KERNEL); + if (!new_array) + return -ENOMEM; + + if (rchan_array) + memcpy(new_array, rchan_array, + array_capacity * sizeof(*rchan_array)); + + /* update rchan_array with rcu */ + old = rcu_dereference_protected(rchan_array, + lockdep_is_held(&relay_file_lock)); + rcu_assign_pointer(rchan_array, new_array); + synchronize_rcu(); + + array_capacity = new_capacity; + kfree(old); + + pr_info("bpf-relay: rchan_array extend to size %zu\n", new_capacity); + return 0; +} + +/* return the idx of target relay channel if exists, return -1 if not */ +static int relay_array_lookup(const char *dirname, const char *filename) +{ + const char *fname, *dname; + int i; + + for (i = 0; i < array_capacity; ++i) { + if (!rchan_array[i]) + continue; + + fname = rchan_array[i]->base_filename; + dname = rchan_array[i]->parent->d_name.name; + + if (strcmp(dname, dirname) == 0 && + strcmp(fname, filename) == 0) + return i; + } + + return -1; +} + +/* return first index if found, else return -1 */ +static int relay_array_lookup_dir(struct dentry *dir) +{ + int i; + + for (i = 0; i < array_capacity; ++i) { + if (!rchan_array[i]) + continue; + + if (rchan_array[i]->parent == dir) + return i; + } + + return -1; +} + +/* must ensure the index is valid */ +static void relay_array_delete(int index) +{ + struct dentry *dir = rchan_array[index]->parent; + struct rchan *rch = rchan_array[index]; + + /* remove targe relay channel */ + rcu_assign_pointer(rchan_array[index], NULL); + synchronize_rcu(); + + relay_close(rch); + + /* check if the parent dir is still in use */ + if (relay_array_lookup_dir(dir) == -1) { + debugfs_remove_recursive(dir); + pr_info("bpf-relay: directory deleted\n"); + } +} + +/* get the next usable id, return -1 if there is no id left */ +static int relay_array_usable_id(const char *dir_name, const char *file_name) +{ + int i; + + /* firstly check preserved special ids */ + if (strcmp(dir_name, DIR_NAME_TCPRT) == 0 && + strcmp(file_name, FILE_NAME_TCPRT_LOG) == 0) { + pr_info("bpf-relay: prepare to create tcprt log\n"); + i = RELAY_INDEX_TCPRT_LOG; + goto check; + } + if (strcmp(dir_name, DIR_NAME_TCPRT) == 0 && + strcmp(file_name, FILE_NAME_TCPRT_STAT) == 0) { + pr_info("bpf-relay: prepare to create stats\n"); + i = RELAY_INDEX_TCPRT_STAT; + goto check; + } + + /* not special relay, find the minimal usable id */ + for (i = RELAY_INDEX_BEGIN; i < array_capacity; ++i) { + if (!rchan_array[i]) + return i; + } + +check: + /* if extend needed but fails, return -1 */ + if (i >= array_capacity) { + if (relay_array_extend(i + 1)) + return -1; + } + + return i; +} + +/* relay callbacks used by all relay files */ +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + char final_fname[NAME_MAX]; + + strscpy(final_fname, filename, sizeof(final_fname)); + if (buf->chan->private_data) { + *is_global = 1; + + /* if it is global, remove the last cpu_id 0 */ + final_fname[strlen(filename) - 1] = '\0'; + } + + return debugfs_create_file(final_fname, mode, parent, buf, + &relay_file_operations); +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding) +{ + return 1; +} + +static struct rchan_callbacks relay_callbacks = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, + .subbuf_start = subbuf_start, +}; + +/* each time print a line */ +static ssize_t relay_ebpf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + /* use relay_array index as ppos, each read() process an element and + * *ppos increases by 1, until it reaches array_capacity. + */ + struct rchan *rch; + char buf[128]; + int size, ret; + loff_t index; + + if (count < sizeof(buf)) + return -EACCES; + + mutex_lock(&relay_file_lock); + index = *ppos; + if (index < 0) { + ret = -EINVAL; + goto out; + } + + /* find the first non-null rchan from ppos */ + while (index < array_capacity && !rchan_array[index]) + index++; + + if (index >= array_capacity) { + ret = 0; + goto out; + } + + /* find a valid entry, rch->is_global==0 means percpu is on */ + rch = rchan_array[index]; + size = snprintf(buf, sizeof(buf), "%lld %s %s %lu %lu %d\n", index, + rch->parent->d_name.name, rch->base_filename, + rch->n_subbufs, rch->subbuf_size, !rch->is_global); + + ret = copy_to_user(user_buf, buf, size); + if (ret) { + ret = -EFAULT; + goto out; + } + + ret = size; + /* current element is processed, increase index */ + *ppos = index + 1; +out: + mutex_unlock(&relay_file_lock); + return ret; +} + +static int bpf_relay_create(const char *dir_name, const char *file_name, + unsigned long bufnum, unsigned long bufsize, + void *is_global, int index) +{ + struct dentry *dir; + struct rchan *rch; + int dir_create = 0; + + if (index >= array_capacity || index < 0) { + pr_info("bpf-relay: create fail, index %d out of range\n", + index); + return -EINVAL; + } + + /* check if this relay channel already exists */ + if (relay_array_lookup(dir_name, file_name) != -1) { + pr_info("bpf-relay: create fail, channel already exists\n"); + return -EEXIST; + } + + /* find if the dir already exists, if not, create it */ + dir = debugfs_lookup(dir_name, NULL); + if (!dir) { + dir = debugfs_create_dir(dir_name, NULL); + if (IS_ERR(dir)) + return PTR_ERR(dir); + dir_create = 1; + + } else if (!S_ISDIR(dir->d_inode->i_mode)) { + pr_info("bpf-relay: create fail, %s is not a directory\n", + dir_name); + return -EINVAL; + } + + rch = relay_open(file_name, dir, bufsize, bufnum, + &relay_callbacks, is_global); + if (!rch) { + if (dir_create) + debugfs_remove_recursive(dir); + pr_info("bpf-relay: create fail, relay_open fail\n"); + return -ENOMEM; + } + + rcu_assign_pointer(rchan_array[index], rch); + pr_info("bpf-relay: create finished, id=%d\n", index); + return 0; +} + +static int handle_create(const char *buf) +{ + char dir_name[NAME_MAX], file_name[NAME_MAX], bsize_str[20], percpu[4]; + unsigned long bufnum, bufsize; + static unsigned char global_flag; + unsigned char *is_global; + int ret; + + ret = sscanf(buf, " create %s %s bufnum %lu bufsize %s percpu %4s", + dir_name, file_name, &bufnum, bsize_str, percpu); + if (ret != 5) { + pr_info("bpf-relay: create fail, get args failed\n"); + return -EINVAL; + } + + /* parse arguments */ + bufsize = (unsigned long)memparse(bsize_str, NULL); + + /* by passing a valid pointer as private_data for relay channel, + * we mark the channel as global, see create_buf_file_handler() + */ + is_global = NULL; + if (strcmp(percpu, "off") == 0) + is_global = &global_flag; + + ret = relay_array_usable_id(dir_name, file_name); + if (ret < 0) { + pr_info("bpf-relay: create fail, no id left\n"); + return -ENOMEM; + } + + /* create common relay chan according to args */ + return bpf_relay_create(dir_name, file_name, bufnum, bufsize, + is_global, ret); +} + +static int handle_remove(const char *buf) +{ + char dir_name[NAME_MAX], file_name[NAME_MAX]; + int ret; + + ret = sscanf(buf, " remove %s %s", dir_name, file_name); + if (ret != 2) { + pr_info("bpf-relay: remove fail, get args failed\n"); + return -EINVAL; + } + + ret = relay_array_lookup(dir_name, file_name); + if (ret >= 0) { + relay_array_delete(ret); + pr_info("bpf-relay: remove finished, id=%d\n", ret); + } else { + pr_info("bpf-relay: remove finished, not exists\n"); + } + + return 0; +} + +static ssize_t relay_ebpf_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char cmd[10], buf[128]; + int ret; + + if (!count || count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, user_buf, count)) + return -EFAULT; + + /* parse cmd */ + buf[count] = '\0'; + ret = sscanf(buf, " %s", cmd); + if (ret != 1) { + pr_info("bpf-relay: write fail, get cmd failed\n"); + return -EINVAL; + } + + mutex_lock(&relay_file_lock); + if (strcmp(cmd, "create") == 0) { + ret = handle_create(buf); + } else if (strcmp(cmd, "remove") == 0) { + ret = handle_remove(buf); + } else { + pr_info("bpf-relay: write fail, invalid cmd\n"); + ret = -EINVAL; + } + + /* create or remove succ */ + if (!ret) + ret = count; + + mutex_unlock(&relay_file_lock); + return ret; +} + +static const struct file_operations relay_ebpf_fops = { + .write = relay_ebpf_write, + .read = relay_ebpf_read, +}; + +__bpf_kfunc_start_defs(); + +/* Write data of size size__sz to relay channel of index. + * WARNING: This kfunc can be deprecated at ANY time in the future. + */ +__bpf_kfunc int bpf_anolis_relay_write(void *data, size_t size__sz, int index) +{ + struct rchan *rch, **rch_arr; + int ret = 0; + + /* capacity does not need to be protected because it is always updated + * after rchan_array and will not decrease. It is safe to use a newer + * rchan_array is with older (as well as smaller) capacity. + */ + if (index >= array_capacity) + return -EINVAL; + + rcu_read_lock(); + + /* rch_arr will not be NULL, because if it is NULL, array_capacity must + * be 0 and then the above index checking would not pass. + */ + rch_arr = rcu_dereference(rchan_array); + rch = rcu_dereference(rch_arr[index]); + if (!rch) { + ret = -ENOENT; + goto out; + } + + relay_write(rch, data, size__sz); +out: + rcu_read_unlock(); + return ret; +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(bpf_relay_kfunc_ids) +BTF_ID_FLAGS(func, bpf_anolis_relay_write, KF_TRUSTED_ARGS) +BTF_KFUNCS_END(bpf_relay_kfunc_ids) + +static const struct btf_kfunc_id_set bpf_relay_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_relay_kfunc_ids, +}; + +/* create relay-ebpf file, rchan_array is created with "create" cmd */ +static int __init bpf_relay_init(void) +{ + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, + &bpf_relay_kfunc_set); + if (ret) { + pr_err("bpf-relay: register kfunc fail\n"); + return ret; + } + + if (!debugfs_create_file("relay_ebpf", 0644, NULL, NULL, + &relay_ebpf_fops)) { + pr_err("bpf-relay: debugfs create relay_ebpf fail\n"); + return -ENOMEM; + } + + return 0; +} +late_initcall(bpf_relay_init); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index fdc3e8705a3cb838e82409f0c025b1e39ea4dfec..b354c3883e75c7d25e1f0555118378c4a21fe5f6 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -13,26 +13,15 @@ #include #include -enum bpf_struct_ops_state { - BPF_STRUCT_OPS_STATE_INIT, - BPF_STRUCT_OPS_STATE_INUSE, - BPF_STRUCT_OPS_STATE_TOBEFREE, - BPF_STRUCT_OPS_STATE_READY, -}; - -#define BPF_STRUCT_OPS_COMMON_VALUE \ - refcount_t refcnt; \ - enum bpf_struct_ops_state state - struct bpf_struct_ops_value { - BPF_STRUCT_OPS_COMMON_VALUE; + struct bpf_struct_ops_common_value common; char data[] ____cacheline_aligned_in_smp; }; struct bpf_struct_ops_map { struct bpf_map map; struct rcu_head rcu; - const struct bpf_struct_ops *st_ops; + const struct bpf_struct_ops_desc *st_ops_desc; /* protect map_update */ struct mutex lock; /* link has all the bpf_links that is populated @@ -40,12 +29,15 @@ struct bpf_struct_ops_map { * (in kvalue.data). */ struct bpf_link **links; + u32 links_cnt; /* image is a page that has all the trampolines * that stores the func args before calling the bpf_prog. * A PAGE_SIZE "image" is enough to store all trampoline for * "links[]". */ void *image; + /* The owner moduler's btf. */ + struct btf *btf; /* uvalue->data stores the kernel struct * (e.g. tcp_congestion_ops) that is more useful * to userspace than the kvalue. For example, @@ -70,35 +62,6 @@ static DEFINE_MUTEX(update_mutex); #define VALUE_PREFIX "bpf_struct_ops_" #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) -/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is - * the map's value exposed to the userspace and its btf-type-id is - * stored at the map->btf_vmlinux_value_type_id. - * - */ -#define BPF_STRUCT_OPS_TYPE(_name) \ -extern struct bpf_struct_ops bpf_##_name; \ - \ -struct bpf_struct_ops_##_name { \ - BPF_STRUCT_OPS_COMMON_VALUE; \ - struct _name data ____cacheline_aligned_in_smp; \ -}; -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - -enum { -#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name, -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - __NR_BPF_STRUCT_OPS_TYPE, -}; - -static struct bpf_struct_ops * const bpf_struct_ops[] = { -#define BPF_STRUCT_OPS_TYPE(_name) \ - [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name, -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE -}; - const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { }; @@ -108,138 +71,139 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = { #endif }; -static const struct btf_type *module_type; +BTF_ID_LIST(st_ops_ids) +BTF_ID(struct, module) +BTF_ID(struct, bpf_struct_ops_common_value) + +enum { + IDX_MODULE_ID, + IDX_ST_OPS_COMMON_VALUE_ID, +}; + +extern struct btf *btf_vmlinux; + +static bool is_valid_value_type(struct btf *btf, s32 value_id, + const struct btf_type *type, + const char *value_name) +{ + const struct btf_type *common_value_type; + const struct btf_member *member; + const struct btf_type *vt, *mt; + + vt = btf_type_by_id(btf, value_id); + if (btf_vlen(vt) != 2) { + pr_warn("The number of %s's members should be 2, but we get %d\n", + value_name, btf_vlen(vt)); + return false; + } + member = btf_type_member(vt); + mt = btf_type_by_id(btf, member->type); + common_value_type = btf_type_by_id(btf_vmlinux, + st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]); + if (mt != common_value_type) { + pr_warn("The first member of %s should be bpf_struct_ops_common_value\n", + value_name); + return false; + } + member++; + mt = btf_type_by_id(btf, member->type); + if (mt != type) { + pr_warn("The second member of %s should be %s\n", + value_name, btf_name_by_offset(btf, type->name_off)); + return false; + } + + return true; +} -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) +int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, + struct btf *btf, + struct bpf_verifier_log *log) { - s32 type_id, value_id, module_id; + struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; const struct btf_member *member; - struct bpf_struct_ops *st_ops; const struct btf_type *t; + s32 type_id, value_id; char value_name[128]; const char *mname; - u32 i, j; + int i; - /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ -#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name); -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - - module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT); - if (module_id < 0) { - pr_warn("Cannot find struct module in btf_vmlinux\n"); - return; + if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= + sizeof(value_name)) { + pr_warn("struct_ops name %s is too long\n", + st_ops->name); + return -EINVAL; } - module_type = btf_type_by_id(btf, module_id); + sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - st_ops = bpf_struct_ops[i]; + type_id = btf_find_by_name_kind(btf, st_ops->name, + BTF_KIND_STRUCT); + if (type_id < 0) { + pr_warn("Cannot find struct %s in %s\n", + st_ops->name, btf_get_name(btf)); + return -EINVAL; + } + t = btf_type_by_id(btf, type_id); + if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { + pr_warn("Cannot support #%u members in struct %s\n", + btf_type_vlen(t), st_ops->name); + return -EINVAL; + } - if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= - sizeof(value_name)) { - pr_warn("struct_ops name %s is too long\n", - st_ops->name); - continue; - } - sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); + value_id = btf_find_by_name_kind(btf, value_name, + BTF_KIND_STRUCT); + if (value_id < 0) { + pr_warn("Cannot find struct %s in %s\n", + value_name, btf_get_name(btf)); + return -EINVAL; + } + if (!is_valid_value_type(btf, value_id, t, value_name)) + return -EINVAL; - value_id = btf_find_by_name_kind(btf, value_name, - BTF_KIND_STRUCT); - if (value_id < 0) { - pr_warn("Cannot find struct %s in btf_vmlinux\n", - value_name); - continue; - } + for_each_member(i, t, member) { + const struct btf_type *func_proto; - type_id = btf_find_by_name_kind(btf, st_ops->name, - BTF_KIND_STRUCT); - if (type_id < 0) { - pr_warn("Cannot find struct %s in btf_vmlinux\n", + mname = btf_name_by_offset(btf, member->name_off); + if (!*mname) { + pr_warn("anon member in struct %s is not supported\n", st_ops->name); - continue; - } - t = btf_type_by_id(btf, type_id); - if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { - pr_warn("Cannot support #%u members in struct %s\n", - btf_type_vlen(t), st_ops->name); - continue; + return -EOPNOTSUPP; } - for_each_member(j, t, member) { - const struct btf_type *func_proto; - - mname = btf_name_by_offset(btf, member->name_off); - if (!*mname) { - pr_warn("anon member in struct %s is not supported\n", - st_ops->name); - break; - } - - if (__btf_member_bitfield_size(t, member)) { - pr_warn("bit field member %s in struct %s is not supported\n", - mname, st_ops->name); - break; - } - - func_proto = btf_type_resolve_func_ptr(btf, - member->type, - NULL); - if (func_proto && - btf_distill_func_proto(log, btf, - func_proto, mname, - &st_ops->func_models[j])) { - pr_warn("Error in parsing func ptr %s in struct %s\n", - mname, st_ops->name); - break; - } + if (__btf_member_bitfield_size(t, member)) { + pr_warn("bit field member %s in struct %s is not supported\n", + mname, st_ops->name); + return -EOPNOTSUPP; } - if (j == btf_type_vlen(t)) { - if (st_ops->init(btf)) { - pr_warn("Error in init bpf_struct_ops %s\n", - st_ops->name); - } else { - st_ops->type_id = type_id; - st_ops->type = t; - st_ops->value_id = value_id; - st_ops->value_type = btf_type_by_id(btf, - value_id); - } + func_proto = btf_type_resolve_func_ptr(btf, + member->type, + NULL); + if (func_proto && + btf_distill_func_proto(log, btf, + func_proto, mname, + &st_ops->func_models[i])) { + pr_warn("Error in parsing func ptr %s in struct %s\n", + mname, st_ops->name); + return -EINVAL; } } -} - -extern struct btf *btf_vmlinux; - -static const struct bpf_struct_ops * -bpf_struct_ops_find_value(u32 value_id) -{ - unsigned int i; - - if (!value_id || !btf_vmlinux) - return NULL; - - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i]->value_id == value_id) - return bpf_struct_ops[i]; - } - - return NULL; -} - -const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) -{ - unsigned int i; - if (!type_id || !btf_vmlinux) - return NULL; - - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i]->type_id == type_id) - return bpf_struct_ops[i]; + if (i == btf_type_vlen(t)) { + if (st_ops->init(btf)) { + pr_warn("Error in init bpf_struct_ops %s\n", + st_ops->name); + return -EINVAL; + } else { + st_ops_desc->type_id = type_id; + st_ops_desc->type = t; + st_ops_desc->value_id = value_id; + st_ops_desc->value_type = btf_type_by_id(btf, + value_id); + } } - return NULL; + return 0; } static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, @@ -265,7 +229,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, kvalue = &st_map->kvalue; /* Pair with smp_store_release() during map_update */ - state = smp_load_acquire(&kvalue->state); + state = smp_load_acquire(&kvalue->common.state); if (state == BPF_STRUCT_OPS_STATE_INIT) { memset(value, 0, map->value_size); return 0; @@ -276,7 +240,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, */ uvalue = value; memcpy(uvalue, st_map->uvalue, map->value_size); - uvalue->state = state; + uvalue->common.state = state; /* This value offers the user space a general estimate of how * many sockets are still utilizing this struct_ops for TCP @@ -284,7 +248,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, * should sufficiently meet our present goals. */ refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt); - refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0)); + refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0)); return 0; } @@ -296,10 +260,9 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) { - const struct btf_type *t = st_map->st_ops->type; u32 i; - for (i = 0; i < btf_type_vlen(t); i++) { + for (i = 0; i < st_map->links_cnt; i++) { if (st_map->links[i]) { bpf_link_put(st_map->links[i]); st_map->links[i] = NULL; @@ -307,7 +270,7 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) } } -static int check_zero_holes(const struct btf_type *t, void *data) +static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data) { const struct btf_member *member; u32 i, moff, msize, prev_mend = 0; @@ -319,8 +282,8 @@ static int check_zero_holes(const struct btf_type *t, void *data) memchr_inv(data + prev_mend, 0, moff - prev_mend)) return -EINVAL; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(btf, member->type); + mtype = btf_resolve_size(btf, mtype, &msize); if (IS_ERR(mtype)) return PTR_ERR(mtype); prev_mend = moff + msize; @@ -352,28 +315,30 @@ const struct bpf_link_ops bpf_struct_ops_link_lops = { int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, struct bpf_tramp_link *link, const struct btf_func_model *model, - void *image, void *image_end) + void *stub_func, void *image, void *image_end) { - u32 flags; + u32 flags = BPF_TRAMP_F_INDIRECT; tlinks[BPF_TRAMP_FENTRY].links[0] = link; tlinks[BPF_TRAMP_FENTRY].nr_links = 1; - /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, - * and it must be used alone. - */ - flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0; + + if (model->ret_size > 0) + flags |= BPF_TRAMP_F_RET_FENTRY_RET; + return arch_prepare_bpf_trampoline(NULL, image, image_end, - model, flags, tlinks, NULL); + model, flags, tlinks, stub_func); } static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; - const struct bpf_struct_ops *st_ops = st_map->st_ops; + const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; + const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; struct bpf_struct_ops_value *uvalue, *kvalue; + const struct btf_type *module_type; const struct btf_member *member; - const struct btf_type *t = st_ops->type; + const struct btf_type *t = st_ops_desc->type; struct bpf_tramp_links *tlinks; void *udata, *kdata; int prog_fd, err; @@ -386,16 +351,16 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (*(u32 *)key != 0) return -E2BIG; - err = check_zero_holes(st_ops->value_type, value); + err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value); if (err) return err; uvalue = value; - err = check_zero_holes(t, uvalue->data); + err = check_zero_holes(st_map->btf, t, uvalue->data); if (err) return err; - if (uvalue->state || refcount_read(&uvalue->refcnt)) + if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) return -EINVAL; tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); @@ -407,7 +372,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, mutex_lock(&st_map->lock); - if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) { + if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) { err = -EBUSY; goto unlock; } @@ -419,6 +384,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, image = st_map->image; image_end = st_map->image + PAGE_SIZE; + module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]); for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; @@ -426,7 +392,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, u32 moff; moff = __btf_member_bit_offset(t, member) / 8; - ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); + ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); if (ptype == module_type) { if (*(void **)(udata + moff)) goto reset_unlock; @@ -451,8 +417,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (!ptype || !btf_type_is_func_proto(ptype)) { u32 msize; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(st_map->btf, member->type); + mtype = btf_resolve_size(st_map->btf, mtype, &msize); if (IS_ERR(mtype)) { err = PTR_ERR(mtype); goto reset_unlock; @@ -478,7 +444,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, } if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || - prog->aux->attach_btf_id != st_ops->type_id || + prog->aux->attach_btf_id != st_ops_desc->type_id || prog->expected_attach_type != i) { bpf_prog_put(prog); err = -EINVAL; @@ -497,11 +463,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[i], + *(void **)(st_ops->cfi_stubs + moff), image, image_end); if (err < 0) goto reset_unlock; - *(void **)(kdata + moff) = image; + *(void **)(kdata + moff) = image + cfi_get_offset(); image += err; /* put prog_id to udata */ @@ -520,7 +487,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, * * Pair with smp_load_acquire() during lookup_elem(). */ - smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY); + smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY); goto unlock; } @@ -538,7 +505,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, * It ensures the above udata updates (e.g. prog->aux->id) * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. */ - smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE); + smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE); goto unlock; } @@ -569,12 +536,12 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) if (st_map->map.map_flags & BPF_F_LINK) return -EOPNOTSUPP; - prev_state = cmpxchg(&st_map->kvalue.state, + prev_state = cmpxchg(&st_map->kvalue.common.state, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE); switch (prev_state) { case BPF_STRUCT_OPS_STATE_INUSE: - st_map->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); bpf_map_put(map); return 0; case BPF_STRUCT_OPS_STATE_TOBEFREE: @@ -591,6 +558,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; void *value; int err; @@ -600,7 +568,8 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); if (!err) { - btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id, + btf_type_seq_show(st_map->btf, + map->btf_vmlinux_value_type_id, value, m); seq_puts(m, "\n"); } @@ -615,13 +584,25 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) if (st_map->links) bpf_struct_ops_map_put_progs(st_map); bpf_map_area_free(st_map->links); - bpf_jit_free_exec(st_map->image); + if (st_map->image) { + bpf_jit_free_exec(st_map->image); + bpf_jit_uncharge_modmem(PAGE_SIZE); + } bpf_map_area_free(st_map->uvalue); bpf_map_area_free(st_map); } static void bpf_struct_ops_map_free(struct bpf_map *map) { + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + /* st_ops->owner was acquired during map_alloc to implicitly holds + * the btf's refcnt. The acquire was only done when btf_is_module() + * st_map->btf cannot be NULL here. + */ + if (btf_is_module(st_map->btf)) + module_put(st_map->st_ops_desc->st_ops->owner); + /* The struct_ops's function may switch to another struct_ops. * * For example, bpf_tcp_cc_x->init() may switch to @@ -645,28 +626,61 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) { if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || - (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id) + (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) || + !attr->btf_vmlinux_value_type_id) return -EINVAL; return 0; } static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) { - const struct bpf_struct_ops *st_ops; + const struct bpf_struct_ops_desc *st_ops_desc; size_t st_map_size; struct bpf_struct_ops_map *st_map; const struct btf_type *t, *vt; + struct module *mod = NULL; struct bpf_map *map; + struct btf *btf; + int ret; + + if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) { + /* The map holds btf for its whole life time. */ + btf = btf_get_by_fd(attr->value_type_btf_obj_fd); + if (IS_ERR(btf)) + return ERR_CAST(btf); + if (!btf_is_module(btf)) { + btf_put(btf); + return ERR_PTR(-EINVAL); + } - st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); - if (!st_ops) - return ERR_PTR(-ENOTSUPP); + mod = btf_try_get_module(btf); + /* mod holds a refcnt to btf. We don't need an extra refcnt + * here. + */ + btf_put(btf); + if (!mod) + return ERR_PTR(-EINVAL); + } else { + btf = bpf_get_btf_vmlinux(); + if (IS_ERR(btf)) + return ERR_CAST(btf); + if (!btf) + return ERR_PTR(-ENOTSUPP); + } + + st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); + if (!st_ops_desc) { + ret = -ENOTSUPP; + goto errout; + } - vt = st_ops->value_type; - if (attr->value_size != vt->size) - return ERR_PTR(-EINVAL); + vt = st_ops_desc->value_type; + if (attr->value_size != vt->size) { + ret = -EINVAL; + goto errout; + } - t = st_ops->type; + t = st_ops_desc->type; st_map_size = sizeof(*st_map) + /* kvalue stores the @@ -675,34 +689,58 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) (vt->size - sizeof(struct bpf_struct_ops_value)); st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE); - if (!st_map) - return ERR_PTR(-ENOMEM); + if (!st_map) { + ret = -ENOMEM; + goto errout; + } - st_map->st_ops = st_ops; + st_map->st_ops_desc = st_ops_desc; map = &st_map->map; + ret = bpf_jit_charge_modmem(PAGE_SIZE); + if (ret) + goto errout_free; + + st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!st_map->image) { + /* __bpf_struct_ops_map_free() uses st_map->image as flag + * for "charged or not". In this case, we need to unchange + * here. + */ + bpf_jit_uncharge_modmem(PAGE_SIZE); + ret = -ENOMEM; + goto errout_free; + } st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); + st_map->links_cnt = btf_type_vlen(t); st_map->links = - bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *), + bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *), NUMA_NO_NODE); - st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); - if (!st_map->uvalue || !st_map->links || !st_map->image) { - __bpf_struct_ops_map_free(map); - return ERR_PTR(-ENOMEM); + if (!st_map->uvalue || !st_map->links) { + ret = -ENOMEM; + goto errout_free; } + st_map->btf = btf; mutex_init(&st_map->lock); set_vm_flush_reset_perms(st_map->image); bpf_map_init_from_attr(map, attr); return map; + +errout_free: + __bpf_struct_ops_map_free(map); +errout: + module_put(mod); + + return ERR_PTR(ret); } static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; - const struct bpf_struct_ops *st_ops = st_map->st_ops; - const struct btf_type *vt = st_ops->value_type; + const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; + const struct btf_type *vt = st_ops_desc->value_type; u64 usage; usage = sizeof(*st_map) + @@ -761,7 +799,7 @@ static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && map->map_flags & BPF_F_LINK && /* Pair with smp_store_release() during map_update */ - smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY; + smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY; } static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) @@ -776,7 +814,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) /* st_link->map can be NULL if * bpf_struct_ops_link_create() fails to register. */ - st_map->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); bpf_map_put(&st_map->map); } kfree(st_link); @@ -823,7 +861,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map if (!bpf_struct_ops_valid_to_reg(new_map)) return -EINVAL; - if (!st_map->st_ops->update) + if (!st_map->st_ops_desc->st_ops->update) return -EOPNOTSUPP; mutex_lock(&update_mutex); @@ -836,12 +874,12 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map old_st_map = container_of(old_map, struct bpf_struct_ops_map, map); /* The new and old struct_ops must be the same type. */ - if (st_map->st_ops != old_st_map->st_ops) { + if (st_map->st_ops_desc != old_st_map->st_ops_desc) { err = -EINVAL; goto err_out; } - err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); + err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); if (err) goto err_out; @@ -892,7 +930,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) if (err) goto err_out; - err = st_map->st_ops->reg(st_map->kvalue.data); + err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data); if (err) { bpf_link_cleanup(&link_primer); link = NULL; @@ -908,3 +946,9 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) return err; } +void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + info->btf_vmlinux_id = btf_obj_id(st_map->btf); +} diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h deleted file mode 100644 index 5678a9ddf8178c2d222bb84c612097eb382f9139..0000000000000000000000000000000000000000 --- a/kernel/bpf/bpf_struct_ops_types.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* internal file - do not include directly */ - -#ifdef CONFIG_BPF_JIT -#ifdef CONFIG_NET -BPF_STRUCT_OPS_TYPE(bpf_dummy_ops) -#endif -#ifdef CONFIG_INET -#include -BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) -#endif -#endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 14361b3b9edd00b20d38ab71706491a2ffac87e1..0f80f38cd4f7538c87ec399e87ba9c56bf558f53 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -241,6 +242,12 @@ struct btf_id_dtor_kfunc_tab { struct btf_id_dtor_kfunc dtors[]; }; +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[]; +}; + struct btf { void *data; struct btf_type **types; @@ -258,6 +265,7 @@ struct btf { struct btf_kfunc_set_tab *kfunc_set_tab; struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; /* split BTF support */ struct btf *base_btf; @@ -1688,11 +1696,20 @@ static void btf_free_struct_meta_tab(struct btf *btf) btf->struct_meta_tab = NULL; } +static void btf_free_struct_ops_tab(struct btf *btf) +{ + struct btf_struct_ops_tab *tab = btf->struct_ops_tab; + + kfree(tab); + btf->struct_ops_tab = NULL; +} + static void btf_free(struct btf *btf) { btf_free_struct_meta_tab(btf); btf_free_dtor_kfunc_tab(btf); btf_free_kfunc_set_tab(btf); + btf_free_struct_ops_tab(btf); kvfree(btf->types); kvfree(btf->resolved_sizes); kvfree(btf->resolved_ids); @@ -1707,6 +1724,11 @@ static void btf_free_rcu(struct rcu_head *rcu) btf_free(btf); } +const char *btf_get_name(const struct btf *btf) +{ + return btf->name; +} + void btf_get(struct btf *btf) { refcount_inc(&btf->refcnt); @@ -5764,8 +5786,6 @@ struct btf *btf_parse_vmlinux(void) /* btf_parse_vmlinux() runs under bpf_verifier_lock */ bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); - bpf_struct_ops_init(btf, log); - refcount_set(&btf->refcnt, 1); err = btf_alloc_id(btf); @@ -6075,6 +6095,9 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, if (prog_args_trusted(prog)) info->reg_type |= PTR_TRUSTED; + if (btf_param_match_suffix(btf, &args[arg], "__nullable")) + info->reg_type |= PTR_MAYBE_NULL; + if (tgt_prog) { enum bpf_prog_type tgt_type; @@ -6121,6 +6144,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, __btf_name_by_offset(btf, t->name_off)); return true; } +EXPORT_SYMBOL_GPL(btf_ctx_access); enum bpf_struct_walk_result { /* < 0 error */ @@ -7534,6 +7558,17 @@ static struct btf *btf_get_module_btf(const struct module *module) return btf; } +static int check_btf_kconfigs(const struct module *module, const char *feature) +{ + if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { + pr_err("missing vmlinux BTF, cannot register %s\n", feature); + return -ENOENT; + } + if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) + pr_warn("missing module BTF, cannot register %s\n", feature); + return 0; +} + BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) { struct btf *btf = NULL; @@ -7894,15 +7929,8 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, int ret, i; btf = btf_get_module_btf(kset->owner); - if (!btf) { - if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { - pr_err("missing vmlinux BTF, cannot register kfuncs\n"); - return -ENOENT; - } - if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) - pr_warn("missing module BTF, cannot register kfuncs\n"); - return 0; - } + if (!btf) + return check_btf_kconfigs(kset->owner, "kfunc"); if (IS_ERR(btf)) return PTR_ERR(btf); @@ -7926,6 +7954,14 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, { enum btf_kfunc_hook hook; + /* All kfuncs need to be tagged as such in BTF. + * WARN() for initcall registrations that do not check errors. + */ + if (!(kset->set->flags & BTF_SET8_KFUNCS)) { + WARN_ON(!kset->owner); + return -EINVAL; + } + hook = bpf_prog_type_to_kfunc_hook(prog_type); return __register_btf_kfunc_id_set(hook, kset); } @@ -8002,17 +8038,8 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c int ret; btf = btf_get_module_btf(owner); - if (!btf) { - if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { - pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); - return -ENOENT; - } - if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { - pr_err("missing module BTF, cannot register dtor kfuncs\n"); - return -ENOENT; - } - return 0; - } + if (!btf) + return check_btf_kconfigs(owner, "dtor kfuncs"); if (IS_ERR(btf)) return PTR_ERR(btf); @@ -8599,3 +8626,141 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, return !strncmp(reg_name, arg_name, cmp_len); } + +#ifdef CONFIG_BPF_JIT +static int +btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, + struct bpf_verifier_log *log) +{ + struct btf_struct_ops_tab *tab, *new_tab; + int i, err; + + tab = btf->struct_ops_tab; + if (!tab) { + tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]), + GFP_KERNEL); + if (!tab) + return -ENOMEM; + tab->capacity = 4; + btf->struct_ops_tab = tab; + } + + for (i = 0; i < tab->cnt; i++) + if (tab->ops[i].st_ops == st_ops) + return -EEXIST; + + if (tab->cnt == tab->capacity) { + new_tab = krealloc(tab, + offsetof(struct btf_struct_ops_tab, + ops[tab->capacity * 2]), + GFP_KERNEL); + if (!new_tab) + return -ENOMEM; + tab = new_tab; + tab->capacity *= 2; + btf->struct_ops_tab = tab; + } + + tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops; + + err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log); + if (err) + return err; + + btf->struct_ops_tab->cnt++; + + return 0; +} + +const struct bpf_struct_ops_desc * +bpf_struct_ops_find_value(struct btf *btf, u32 value_id) +{ + const struct bpf_struct_ops_desc *st_ops_list; + unsigned int i; + u32 cnt; + + if (!value_id) + return NULL; + if (!btf->struct_ops_tab) + return NULL; + + cnt = btf->struct_ops_tab->cnt; + st_ops_list = btf->struct_ops_tab->ops; + for (i = 0; i < cnt; i++) { + if (st_ops_list[i].value_id == value_id) + return &st_ops_list[i]; + } + + return NULL; +} + +const struct bpf_struct_ops_desc * +bpf_struct_ops_find(struct btf *btf, u32 type_id) +{ + const struct bpf_struct_ops_desc *st_ops_list; + unsigned int i; + u32 cnt; + + if (!type_id) + return NULL; + if (!btf->struct_ops_tab) + return NULL; + + cnt = btf->struct_ops_tab->cnt; + st_ops_list = btf->struct_ops_tab->ops; + for (i = 0; i < cnt; i++) { + if (st_ops_list[i].type_id == type_id) + return &st_ops_list[i]; + } + + return NULL; +} + +int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) +{ + struct bpf_verifier_log *log; + struct btf *btf; + int err = 0; + + btf = btf_get_module_btf(st_ops->owner); + if (!btf) + return check_btf_kconfigs(st_ops->owner, "struct_ops"); + if (IS_ERR(btf)) + return PTR_ERR(btf); + + log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN); + if (!log) { + err = -ENOMEM; + goto errout; + } + + log->level = BPF_LOG_KERNEL; + + err = btf_add_struct_ops(btf, st_ops, log); + +errout: + kfree(log); + btf_put(btf); + + return err; +} +EXPORT_SYMBOL_GPL(__register_bpf_struct_ops); +#endif + +bool btf_param_match_suffix(const struct btf *btf, + const struct btf_param *arg, + const char *suffix) +{ + int suffix_len = strlen(suffix), len; + const char *param_name; + + /* In the future, this can be ported to use BTF tagging */ + param_name = btf_name_by_offset(btf, arg->name_off); + if (str_is_empty(param_name)) + return false; + len = strlen(param_name); + if (len <= suffix_len) + return false; + param_name += len - suffix_len; + return !strncmp(param_name, suffix, suffix_len); +} diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 02f327f05fd6195938ac74801df29114d18d5ace..49e6430c1fec58ccc8293969f1f3cf1bba0ef6c1 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -121,6 +121,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag #endif INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); +#ifdef CONFIG_FINEIBT + INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); +#endif mutex_init(&fp->aux->used_maps_mutex); mutex_init(&fp->aux->dst_mutex); @@ -691,6 +694,23 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp) fp->aux->ksym.prog = true; bpf_ksym_add(&fp->aux->ksym); + +#ifdef CONFIG_FINEIBT + /* + * When FineIBT, code in the __cfi_foo() symbols can get executed + * and hence unwinder needs help. + */ + if (cfi_mode != CFI_FINEIBT) + return; + + snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, + "__cfi_%s", fp->aux->ksym.name); + + fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; + fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; + + bpf_ksym_add(&fp->aux->ksym_prefix); +#endif } void bpf_prog_kallsyms_del(struct bpf_prog *fp) @@ -699,6 +719,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp) return; bpf_ksym_del(&fp->aux->ksym); +#ifdef CONFIG_FINEIBT + if (cfi_mode != CFI_FINEIBT) + return; + bpf_ksym_del(&fp->aux->ksym_prefix); +#endif } static struct bpf_ksym *bpf_ksym_find(unsigned long addr) @@ -2697,7 +2722,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, bool sleepable; u32 i; - sleepable = aux->sleepable; + sleepable = aux->prog->sleepable; for (i = 0; i < len; i++) { map = used_maps[i]; if (map->ops->map_poke_untrack) diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 6983af8e093c4b0260f64d62d7c5d040e67e647f..317f748953709a95961e30559a7ec118fa5fec7d 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -34,9 +34,7 @@ static bool cpu_valid(u32 cpu) return cpu < nr_cpu_ids; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global kfuncs as their definitions will be in BTF"); +__bpf_kfunc_start_defs(); /** * bpf_cpumask_create() - Create a mutable BPF cpumask. @@ -98,6 +96,12 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) migrate_enable(); } +__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask) +{ + bpf_cpumask_release(cpumask); +} +CFI_NOSEAL(bpf_cpumask_release_dtor); + /** * bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask. * @cpumask: The cpumask being queried. @@ -407,9 +411,9 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, return cpumask_any_and_distribute(src1, src2); } -__diag_pop(); +__bpf_kfunc_end_defs(); -BTF_SET8_START(cpumask_kfunc_btf_ids) +BTF_KFUNCS_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) @@ -434,7 +438,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU) -BTF_SET8_END(cpumask_kfunc_btf_ids) +BTF_KFUNCS_END(cpumask_kfunc_btf_ids) static const struct btf_kfunc_id_set cpumask_kfunc_set = { .owner = THIS_MODULE, @@ -443,7 +447,7 @@ static const struct btf_kfunc_id_set cpumask_kfunc_set = { BTF_ID_LIST(cpumask_dtor_ids) BTF_ID(struct, bpf_cpumask) -BTF_ID(func, bpf_cpumask_release) +BTF_ID(func, bpf_cpumask_release_dtor) static int __init cpumask_kfunc_init(void) { diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 5f2356b47b2ddc9aba5680082721bbbadb86bb47..ebe0176900b849d7cf90f0f3a5b39fe7754b7121 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -60,6 +60,8 @@ struct xdp_dev_bulk_queue { struct net_device *dev_rx; struct bpf_prog *xdp_prog; unsigned int count; + + CK_KABI_RESERVE(1) }; struct bpf_dtab_netdev { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 41d62405c85214cd3a4a6bbe9b2c2eaa0bbef331..ce958e21a7de398de1859331f6c5491e736ef050 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1973,9 +1973,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root, } } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { @@ -2212,6 +2210,12 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p) put_task_struct_rcu_user(p); } +__bpf_kfunc void bpf_task_release_dtor(void *p) +{ + put_task_struct_rcu_user(p); +} +CFI_NOSEAL(bpf_task_release_dtor); + #ifdef CONFIG_CGROUPS /** * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by @@ -2236,6 +2240,12 @@ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) cgroup_put(cgrp); } +__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) +{ + cgroup_put(cgrp); +} +CFI_NOSEAL(bpf_cgroup_release_dtor); + /** * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor * array. A cgroup returned by this kfunc which is not subsequently stored in a @@ -2529,9 +2539,9 @@ __bpf_kfunc void bpf_rcu_read_unlock(void) rcu_read_unlock(); } -__diag_pop(); +__bpf_kfunc_end_defs(); -BTF_SET8_START(generic_btf_ids) +BTF_KFUNCS_START(generic_btf_ids) #ifdef CONFIG_KEXEC_CORE BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) #endif @@ -2556,7 +2566,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) #endif BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) -BTF_SET8_END(generic_btf_ids) +BTF_KFUNCS_END(generic_btf_ids) static const struct btf_kfunc_id_set generic_kfunc_set = { .owner = THIS_MODULE, @@ -2566,13 +2576,13 @@ static const struct btf_kfunc_id_set generic_kfunc_set = { BTF_ID_LIST(generic_dtor_ids) BTF_ID(struct, task_struct) -BTF_ID(func, bpf_task_release) +BTF_ID(func, bpf_task_release_dtor) #ifdef CONFIG_CGROUPS BTF_ID(struct, cgroup) -BTF_ID(func, bpf_cgroup_release) +BTF_ID(func, bpf_cgroup_release_dtor) #endif -BTF_SET8_START(common_btf_ids) +BTF_KFUNCS_START(common_btf_ids) BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) BTF_ID_FLAGS(func, bpf_rdonly_cast) BTF_ID_FLAGS(func, bpf_rcu_read_lock) @@ -2587,7 +2597,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) -BTF_SET8_END(common_btf_ids) +BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 6fc9dae9edc81cad457b3e193ce671a3b0723aa8..9575314f40a69236267f4d52abe2b886e40441f2 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -193,9 +193,7 @@ static int __init bpf_map_iter_init(void) late_initcall(bpf_map_iter_init); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) { @@ -213,11 +211,11 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) return ret; } -__diag_pop(); +__bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_map_iter_kfunc_ids) +BTF_KFUNCS_START(bpf_map_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_map_iter_kfunc_ids) +BTF_KFUNCS_END(bpf_map_iter_kfunc_ids) static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a330f38ae7335ef5d40e0db347c480f97f1ac5e5..0ed48217b2089794ed873ff679c1f89c4a3f519b 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -156,7 +156,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, goto build_id_valid; } vma = find_vma(current->mm, ips[i]); - if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) { + if (!vma || build_id_parse_nofault(vma, id_offs[i].build_id, NULL)) { /* per entry fall back to ips */ id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ba38c08a9a059a9e4630241802af0fdbbbe87bad..5a2e8cdce19e86351f58dea3f991872ebd74dda6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1122,7 +1122,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, return ret; } -#define BPF_MAP_CREATE_LAST_FIELD map_extra +#define BPF_MAP_CREATE_LAST_FIELD value_type_btf_obj_fd /* called via syscall */ static int map_create(union bpf_attr *attr) { @@ -2156,7 +2156,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) btf_put(prog->aux->attach_btf); if (deferred) { - if (prog->aux->sleepable) + if (prog->sleepable) call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); else call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); @@ -2681,11 +2681,11 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) } prog->expected_attach_type = attr->expected_attach_type; + prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); prog->aux->attach_btf = attach_btf; prog->aux->attach_btf_id = attr->attach_btf_id; prog->aux->dst_prog = dst_prog; prog->aux->dev_bound = !!attr->prog_ifindex; - prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; err = security_bpf_prog_alloc(prog->aux); @@ -2906,7 +2906,7 @@ static void bpf_link_free(struct bpf_link *link) bpf_link_free_id(link->id); if (link->prog) { - sleepable = link->prog->aux->sleepable; + sleepable = link->prog->sleepable; /* detach BPF program, clean up used resources */ ops->release(link); } @@ -4701,6 +4701,8 @@ static int bpf_map_get_info_by_fd(struct file *file, info.btf_value_type_id = map->btf_value_type_id; } info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) + bpf_map_struct_ops_info_fill(&info, map); if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_info_fill(&info, map); @@ -5402,7 +5404,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr) /* The bpf program will not access the bpf map, but for the sake of * simplicity, increase sleepable_refcnt for sleepable program as well. */ - if (prog->aux->sleepable) + if (prog->sleepable) atomic64_inc(&map->sleepable_refcnt); memcpy(used_maps_new, used_maps_old, sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index e97aeda3a86b55522a73e53279543cb5b4df6919..45477c604a1ace06b711934451ae1f12788a240b 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -1003,7 +1003,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) { - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; if (bpf_prog_check_recur(prog)) return sleepable ? __bpf_prog_enter_sleepable_recur : @@ -1018,7 +1018,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog) { - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; if (bpf_prog_check_recur(prog)) return sleepable ? __bpf_prog_exit_sleepable_recur : diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d6a4102312fadd63e9fd467c61cdb41fc40a27ef..4948ef414d67edfdd94283e3d2a2239cc6615d1c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include "disasm.h" @@ -5314,6 +5316,11 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, return -EINVAL; } +static bool in_sleepable(struct bpf_verifier_env *env) +{ + return env->prog->sleepable; +} + /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() * can dereference RCU protected pointers and result is PTR_TRUSTED. */ @@ -5321,7 +5328,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env) { return env->cur_state->active_rcu_lock || env->cur_state->active_lock.ptr || - !env->prog->aux->sleepable; + !in_sleepable(env); } /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ @@ -9981,7 +9988,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } - if (!env->prog->aux->sleepable && fn->might_sleep) { + if (!in_sleepable(env) && fn->might_sleep) { verbose(env, "helper call might sleep in a non-sleepable prog\n"); return -EINVAL; } @@ -10011,7 +10018,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } - if (env->prog->aux->sleepable && is_storage_get_function(func_id)) + if (in_sleepable(env) && is_storage_get_function(func_id)) env->insn_aux_data[insn_idx].storage_get_func_atomic = true; } @@ -10456,24 +10463,6 @@ static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) return meta->kfunc_flags & KF_RCU; } -static bool __kfunc_param_match_suffix(const struct btf *btf, - const struct btf_param *arg, - const char *suffix) -{ - int suffix_len = strlen(suffix), len; - const char *param_name; - - /* In the future, this can be ported to use BTF tagging */ - param_name = btf_name_by_offset(btf, arg->name_off); - if (str_is_empty(param_name)) - return false; - len = strlen(param_name); - if (len < suffix_len) - return false; - param_name += len - suffix_len; - return !strncmp(param_name, suffix, suffix_len); -} - static bool is_kfunc_arg_mem_size(const struct btf *btf, const struct btf_param *arg, const struct bpf_reg_state *reg) @@ -10484,7 +10473,7 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf, if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) return false; - return __kfunc_param_match_suffix(btf, arg, "__sz"); + return btf_param_match_suffix(btf, arg, "__sz"); } static bool is_kfunc_arg_const_mem_size(const struct btf *btf, @@ -10497,37 +10486,37 @@ static bool is_kfunc_arg_const_mem_size(const struct btf *btf, if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) return false; - return __kfunc_param_match_suffix(btf, arg, "__szk"); + return btf_param_match_suffix(btf, arg, "__szk"); } static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__opt"); + return btf_param_match_suffix(btf, arg, "__opt"); } static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__k"); + return btf_param_match_suffix(btf, arg, "__k"); } static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__ign"); + return btf_param_match_suffix(btf, arg, "__ign"); } static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__alloc"); + return btf_param_match_suffix(btf, arg, "__alloc"); } static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__uninit"); + return btf_param_match_suffix(btf, arg, "__uninit"); } static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); + return btf_param_match_suffix(btf, arg, "__refcounted_kptr"); } static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, @@ -11747,7 +11736,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } sleepable = is_kfunc_sleepable(&meta); - if (sleepable && !env->prog->aux->sleepable) { + if (sleepable && !in_sleepable(env)) { verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); return -EACCES; } @@ -17554,7 +17543,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return -EINVAL; } - if (prog->aux->sleepable) + if (prog->sleepable) switch (map->map_type) { case BPF_MAP_TYPE_HASH: case BPF_MAP_TYPE_LRU_HASH: @@ -17738,7 +17727,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) return -E2BIG; } - if (env->prog->aux->sleepable) + if (env->prog->sleepable) atomic64_inc(&map->sleepable_refcnt); /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it @@ -19148,7 +19137,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) } if (is_storage_get_function(insn->imm)) { - if (!env->prog->aux->sleepable || + if (!in_sleepable(env) || env->insn_aux_data[i + delta].storage_get_func_atomic) insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); else @@ -19737,10 +19726,12 @@ static void print_verification_stats(struct bpf_verifier_env *env) static int check_struct_ops_btf_id(struct bpf_verifier_env *env) { const struct btf_type *t, *func_proto; + const struct bpf_struct_ops_desc *st_ops_desc; const struct bpf_struct_ops *st_ops; const struct btf_member *member; struct bpf_prog *prog = env->prog; u32 btf_id, member_idx; + struct btf *btf; const char *mname; if (!prog->gpl_compatible) { @@ -19748,15 +19739,30 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) return -EINVAL; } + if (!prog->aux->attach_btf_id) + return -ENOTSUPP; + + btf = prog->aux->attach_btf; + if (btf_is_module(btf)) { + /* Make sure st_ops is valid through the lifetime of env */ + env->attach_btf_mod = btf_try_get_module(btf); + if (!env->attach_btf_mod) { + verbose(env, "struct_ops module %s is not found\n", + btf_get_name(btf)); + return -ENOTSUPP; + } + } + btf_id = prog->aux->attach_btf_id; - st_ops = bpf_struct_ops_find(btf_id); - if (!st_ops) { + st_ops_desc = bpf_struct_ops_find(btf, btf_id); + if (!st_ops_desc) { verbose(env, "attach_btf_id %u is not a supported struct\n", btf_id); return -ENOTSUPP; } + st_ops = st_ops_desc->st_ops; - t = st_ops->type; + t = st_ops_desc->type; member_idx = prog->expected_attach_type; if (member_idx >= btf_type_vlen(t)) { verbose(env, "attach to invalid member idx %u of struct %s\n", @@ -19765,8 +19771,8 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } member = &btf_type_member(t)[member_idx]; - mname = btf_name_by_offset(btf_vmlinux, member->name_off); - func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, + mname = btf_name_by_offset(btf, member->name_off); + func_proto = btf_type_resolve_func_ptr(btf, member->type, NULL); if (!func_proto) { verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", @@ -19825,11 +19831,13 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, struct bpf_attach_target_info *tgt_info) { bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; + char trace_symbol[KSYM_SYMBOL_LEN]; const char prefix[] = "btf_trace_"; + struct bpf_raw_event_map *btp; int ret = 0, subprog = -1, i; const struct btf_type *t; bool conservative = true; - const char *tname; + const char *tname, *fname; struct btf *btf; long addr = 0; struct module *mod = NULL; @@ -19944,10 +19952,34 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, return -EINVAL; } tname += sizeof(prefix) - 1; - t = btf_type_by_id(btf, t->type); - if (!btf_type_is_ptr(t)) - /* should never happen in valid vmlinux build */ + + /* The func_proto of "btf_trace_##tname" is generated from typedef without argument + * names. Thus using bpf_raw_event_map to get argument names. + */ + btp = bpf_get_raw_tracepoint(tname); + if (!btp) return -EINVAL; + fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, + trace_symbol); + bpf_put_raw_tracepoint(btp); + + if (fname) + ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC); + + if (!fname || ret < 0) { + bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n", + prefix, tname); + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_ptr(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + } else { + t = btf_type_by_id(btf, ret); + if (!btf_type_is_func(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + } + t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) /* should never happen in valid vmlinux build */ @@ -20024,7 +20056,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, } } - if (prog->aux->sleepable) { + if (prog->sleepable) { ret = -EINVAL; switch (prog->type) { case BPF_PROG_TYPE_TRACING: @@ -20135,14 +20167,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) u64 key; if (prog->type == BPF_PROG_TYPE_SYSCALL) { - if (prog->aux->sleepable) + if (prog->sleepable) /* attach_btf_id checked to be zero already */ return 0; verbose(env, "Syscall programs can only be sleepable\n"); return -EINVAL; } - if (prog->aux->sleepable && !can_be_sleepable(prog)) { + if (prog->sleepable && !can_be_sleepable(prog)) { verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); return -EINVAL; } @@ -20449,6 +20481,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 env->prog->expected_attach_type = 0; *prog = env->prog; + + module_put(env->attach_btf_mod); err_unlock: if (!is_priv) mutex_unlock(&bpf_verifier_lock); diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 5e17f01ced9fd2345f3f7332c65e276d09e9ec67..b5394a68fb4f93580366639a8abfff94799ad3c5 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -108,6 +108,7 @@ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; + int dst_count; /* the number of tasks in the set */ int nr_tasks; @@ -152,6 +153,7 @@ struct cgroup_mgctx { .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ + .dst_count = 0, \ } #define CGROUP_MGCTX_INIT(name) \ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 36097e8c904fe5df2c218bdf7a15c600c8729da8..28420b1d6c9b08e41268ad6a9f0d4f176fcf1c0c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -492,6 +493,12 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, return &cgrp->self; } +struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid) +{ + return cgroup_css(cgrp, cgroup_subsys[(ssid)]); +} + /** * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss * @cgrp: the cgroup of interest @@ -2404,6 +2411,28 @@ void cgroup_attach_unlock(bool lock_threadgroup) cpus_read_unlock(); } +static void css_account_procs(struct task_struct *task, + struct css_set *cset, int num) +{ + struct cgroup_subsys *ss; + int ssid; + + if (!thread_group_leader(task)) + return; + + for_each_subsys(ss, ssid) { + struct cgroup_subsys_state *css = cset->subsys[ssid]; + + if (!css) + continue; + css->nr_procs += num; + while (css->parent) { + css = css->parent; + css->nr_procs += num; + } + } +} + /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2438,9 +2467,11 @@ static void cgroup_migrate_add_task(struct task_struct *task, if (list_empty(&cset->mg_node)) list_add_tail(&cset->mg_node, &mgctx->tset.src_csets); - if (list_empty(&cset->mg_dst_cset->mg_node)) + if (list_empty(&cset->mg_dst_cset->mg_node)) { list_add_tail(&cset->mg_dst_cset->mg_node, &mgctx->tset.dst_csets); + mgctx->tset.dst_count++; + } } /** @@ -2521,9 +2552,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) struct task_struct *task, *tmp_task; struct css_set *cset, *tmp_cset; int ssid, failed_ssid, ret; + LIST_HEAD(tmp_links); /* check that we can legitimately attach to the cgroup */ if (tset->nr_tasks) { + ret = allocate_memcg_blkcg_links(tset->dst_count*2, &tmp_links); + if (ret) + goto out_release_tset; + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->can_attach) { tset->ssid = ssid; @@ -2549,8 +2585,10 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) get_css_set(to_cset); to_cset->nr_tasks++; + css_account_procs(task, to_cset, 1); css_set_move_task(task, from_cset, to_cset, true); from_cset->nr_tasks--; + css_account_procs(task, from_cset, -1); /* * If the source or destination cgroup is frozen, * the task might require to change its state. @@ -2576,6 +2614,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) tset->ssid = ssid; ss->attach(tset); } + list_for_each_entry(cset, &tset->dst_csets, mg_node) + insert_memcg_blkcg_link(ss, &tmp_links, cset); } while_each_subsys_mask(); } @@ -2602,6 +2642,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) } spin_unlock_irq(&css_set_lock); + free_memcg_blkcg_links(&tmp_links); + /* * Re-initialize the cgroup_taskset structure in case it is reused * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() @@ -5352,6 +5394,8 @@ static void css_free_rwork_fn(struct work_struct *work) struct cgroup_subsys_state *parent = css->parent; int id = css->id; + delete_memcg_blkcg_link(ss, css); + ss->css_free(css); cgroup_idr_remove(&ss->css_idr, id); cgroup_put(cgrp); @@ -6602,6 +6646,7 @@ void cgroup_post_fork(struct task_struct *child, WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++; + css_account_procs(child, cset, 1); css_set_move_task(child, NULL, cset, false); } else { put_css_set(cset); @@ -6683,6 +6728,7 @@ void cgroup_exit(struct task_struct *tsk) css_set_move_task(tsk, cset, NULL, false); list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + css_account_procs(tsk, cset, -1); if (dl_task(tsk)) dec_dl_tasks_cs(tsk); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 3646426c69e253d02daf2a1aa47ba2b90c879c80..72050e6c0224a5e7b5b6e121480844ffe85dc6be 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -44,6 +44,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); @@ -1601,6 +1602,8 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, return 0; } +static struct cpumask added, deleted, old_cpus; + /* * update_cpumasks_hier() flags */ @@ -1657,6 +1660,11 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, parent->child_ecpus_count--; } + if (cpumask_empty(cp->effective_cpus)) + cpumask_copy(&old_cpus, parent->effective_cpus); + else + cpumask_copy(&old_cpus, cp->effective_cpus); + /* * Skip the whole subtree if * 1) the cpumask remains the same, @@ -1748,8 +1756,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); + /* add = new - old = new & (~old) */ + cpumask_andnot(&added, tmp->new_cpus, &old_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, &added); + update_tasks_cpumask(cp, tmp->new_cpus); + /* deleted = old - new = old & (~new) */ + cpumask_andnot(&deleted, &old_cpus, tmp->new_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, &deleted, NULL); + /* * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE * from parent if current cpuset isn't a valid partition root @@ -2646,9 +2662,10 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { lockdep_assert_held(&cpuset_mutex); - if (cs != &top_cpuset) + if (cs != &top_cpuset) { guarantee_online_cpus(task, cpus_attach); - else + wilds_cpus_allowed(cpus_attach); + } else cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), cs->subparts_cpus); /* @@ -3328,6 +3345,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, cs->effective_cpus); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); @@ -3477,8 +3495,24 @@ static void cpuset_fork(struct task_struct *task) rcu_read_unlock(); if (same_cs) { - if (cs == &top_cpuset) + if (cs == &top_cpuset) { + /* + * This is necessary since update_wilds_cpumask() + * could have missed the 'task', if it's parent is + * the last one on the iteratoration list, like: + * + * 1. 'task' dup old dyn_allowed from parent + * 2. update_wilds_cpumask() begin + * 3. new dyn_allowed applied to parent + * 4. update_wilds_cpumask() end + * 5. 'task' add into iteratoration list + * + * Fix this by redup current's allowed here if changed. + */ + if (!cpumask_equal(task->cpus_ptr, current->cpus_ptr)) + set_cpus_allowed_ptr(task, current->cpus_ptr); return; + } set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; @@ -3970,6 +4004,29 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) spin_unlock_irqrestore(&callback_lock, flags); } +#ifdef CONFIG_RICH_CONTAINER +void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ + unsigned long flags; + struct task_struct *p; + + rcu_read_lock(); + if (sysctl_rich_container_source == 1) { + read_lock(&tasklist_lock); + p = task_active_pid_ns(current)->child_reaper; + read_unlock(&tasklist_lock); + + } else { + p = current; + } + + spin_lock_irqsave(&callback_lock, flags); + guarantee_online_cpus(p, pmask); + spin_unlock_irqrestore(&callback_lock, flags); + rcu_read_unlock(); +} +#endif + /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. * @tsk: pointer to task_struct with which the scheduler is struggling diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d80d7a6081412994582e2a3686442226b582cd50..d426513994bb18e065c6e7adcfb43d3164610633 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -156,19 +156,16 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, * optimize away the callsite. Therefore, __weak is needed to ensure that the * call is still emitted, by telling the compiler that we don't know what the * function might eventually be. - * - * __diag_* below are needed to dismiss the missing prototype warning. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); + +__bpf_hook_start(); __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, struct cgroup *parent, int cpu) { } -__diag_pop(); +__bpf_hook_end(); /* see cgroup_rstat_flush() */ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) @@ -326,6 +323,11 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum += src_bstat->forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum += src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum += src_bstat->sibidle_task_sum; #endif } @@ -337,6 +339,11 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum -= src_bstat->forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum -= src_bstat->sibidle_task_sum; #endif } @@ -430,6 +437,17 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; + case CPUTIME_FORCEIDLE_TASK: + rstatc->bstat.forceidle_task_sum += delta_exec; + break; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + case CPUTIME_SIBIDLE: + rstatc->bstat.sibidle_sum += delta_exec; + break; + case CPUTIME_SIBIDLE_TASK: + rstatc->bstat.sibidle_task_sum += delta_exec; + break; #endif default: break; @@ -473,6 +491,11 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; + bstat->forceidle_task_sum += cpustat[CPUTIME_FORCEIDLE_TASK]; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; + bstat->sibidle_task_sum += cpustat[CPUTIME_SIBIDLE_TASK]; #endif } } @@ -484,6 +507,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) struct cgroup_base_stat bstat; #ifdef CONFIG_SCHED_CORE u64 forceidle_time; + u64 forceidle_task_time; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_time; + u64 sibidle_task_time; #endif if (cgroup_parent(cgrp)) { @@ -493,6 +521,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; + forceidle_task_time = cgrp->bstat.forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = cgrp->bstat.sibidle_sum; + sibidle_task_time = cgrp->bstat.sibidle_task_sum; #endif cgroup_rstat_flush_release(); } else { @@ -502,6 +535,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; + forceidle_task_time = bstat.forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = bstat.sibidle_sum; + sibidle_task_time = bstat.sibidle_task_sum; #endif } @@ -510,6 +548,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) do_div(stime, NSEC_PER_USEC); #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); + do_div(forceidle_task_time, NSEC_PER_USEC); +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + do_div(sibidle_time, NSEC_PER_USEC); + do_div(sibidle_task_time, NSEC_PER_USEC); #endif seq_printf(seq, "usage_usec %llu\n" @@ -519,14 +562,19 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); + seq_printf(seq, "core_sched.force_idle_task_usec %llu\n", forceidle_task_time); +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); + seq_printf(seq, "sibidle_task_usec %llu\n", sibidle_task_time); #endif } /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ -BTF_SET8_START(bpf_rstat_kfunc_ids) +BTF_KFUNCS_START(bpf_rstat_kfunc_ids) BTF_ID_FLAGS(func, cgroup_rstat_updated) BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE) -BTF_SET8_END(bpf_rstat_kfunc_ids) +BTF_KFUNCS_END(bpf_rstat_kfunc_ids) static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index f005c66f378c32e9bae2d84b0ca80d1c126f0c15..8b860c7ecabc54edbaaea2bedf8802cd0840bc64 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -224,6 +224,9 @@ void __init dma_contiguous_reserve(phys_addr_t limit) dma_numa_cma_reserve(); + if (is_zhaoxin_kh40000) + return; + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { diff --git a/kernel/events/core.c b/kernel/events/core.c index ec0fae49a0dd9ad69b38fcfc5c1dcc2286e9f05e..a1deaf7e420512f2ff834a5810b86579f9e5a69f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7440,6 +7440,14 @@ void perf_output_sample(struct perf_output_handle *handle, if (branch_sample_hw_index(event)) perf_output_put(handle, data->br_stack->hw_idx); perf_output_copy(handle, data->br_stack->entries, size); + /* + * Add the extension space which is appended + * right after the struct perf_branch_stack. + */ + if (data->br_stack_cntr) { + size = data->br_stack->nr * sizeof(u64); + perf_output_copy(handle, data->br_stack_cntr, size); + } } else { /* * we always store at least the value of nr @@ -8836,7 +8844,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; if (atomic_read(&nr_build_id_events)) - build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); + build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size); perf_iterate_sb(perf_event_mmap_output, mmap_event, @@ -10590,7 +10598,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) return -EINVAL; - if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe) + if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe) /* only uprobe programs are allowed to be sleepable */ return -EINVAL; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6dac0b5798213bb643a747461aba6131e0fab578..b58356719fa62afadf3c92ed2b37243fc353cde2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -181,14 +181,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (new_page) { folio_get(new_folio); - page_add_new_anon_rmap(new_page, vma, addr); + folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); } else /* no new page, just dec_mm_counter for old_page */ dec_mm_counter(mm, MM_ANONPAGES); if (!folio_test_anon(old_folio)) { - dec_mm_counter(mm, mm_counter_file(old_page)); + if (!is_zero_page(old_page)) + dec_mm_counter(mm, mm_counter_file(old_page)); inc_mm_counter(mm, MM_ANONPAGES); } @@ -198,11 +199,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); - page_remove_rmap(old_page, vma, false); - if (!folio_mapped(old_folio)) - folio_free_swap(old_folio); + /* zero page won't be added to rmap, skip, see do_anonymous_page() */ + if (!is_zero_page(old_page)) { + folio_remove_rmap_pte(old_folio, old_page, vma); + if (!folio_mapped(old_folio)) + folio_free_swap(old_folio); + } page_vma_mapped_walk_done(&pvmw); - folio_put(old_folio); + if (!is_zero_page(old_page)) + folio_put(old_folio); err = 0; unlock: @@ -537,7 +542,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, } } - ret = __replace_page(vma, vaddr, old_page, new_page); + ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); if (new_page) put_page(new_page); put_old: diff --git a/kernel/fork.c b/kernel/fork.c index 23efaa2c42e4f8144310435d929de4edaee9c69f..23325fefb0eae491b0dd5b0e20aea87acc006c50 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -111,6 +111,11 @@ #define CREATE_TRACE_POINTS #include +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#else +#define unprivileged_userns_clone 0 +#endif /* * Minimum number of threads to boot the kernel @@ -500,6 +505,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + fixup_vma(orig); + if (!new) return NULL; @@ -653,12 +660,23 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, LIST_HEAD(uf); VMA_ITERATOR(old_vmi, oldmm, 0); VMA_ITERATOR(vmi, mm, 0); +#ifdef CONFIG_ASYNC_FORK + unsigned long async_fork; +#endif uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { retval = -EINTR; goto fail_uprobe_end; } +#ifdef CONFIG_ASYNC_FORK + /* Get task_async_fork with oldmm's mmap write lock hold. */ + rcu_read_lock(); + async_fork = task_async_fork(current); + if (async_fork) + set_bit(ASYNC_FORK_CANDIDATE, &oldmm->async_fork_flags); + rcu_read_unlock(); +#endif flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* @@ -755,8 +773,16 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto fail_nomem_vmi_store; mm->map_count++; - if (!(tmp->vm_flags & VM_WIPEONFORK)) + if (!(tmp->vm_flags & VM_WIPEONFORK)) { +#ifdef CONFIG_ASYNC_FORK + if (async_fork) + retval = async_fork_cpr_fast(tmp, mpnt); + else + retval = copy_page_range(tmp, mpnt); +#else retval = copy_page_range(tmp, mpnt); +#endif + } if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); @@ -773,6 +799,10 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, out: mmap_write_unlock(mm); flush_tlb_mm(oldmm); +#ifdef CONFIG_ASYNC_FORK + if (async_fork) + async_fork_cpr_bind(oldmm, mm, retval); +#endif mmap_write_unlock(oldmm); dup_userfaultfd_complete(&uf); fail_uprobe_end: @@ -917,6 +947,9 @@ void __mmdrop(struct mm_struct *mm) cleanup_lazy_tlbs(mm); WARN_ON_ONCE(mm == current->active_mm); +#ifdef CONFIG_ASYNC_FORK + BUG_ON(mm->async_fork_mm); +#endif mm_free_pgd(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); @@ -1288,6 +1321,11 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_init_uprobes_state(mm); hugetlb_count_init(mm); +#ifdef CONFIG_ASYNC_FORK + mm->async_fork_mm = NULL; + mm->async_fork_flags = 0; +#endif + if (current->mm) { mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; @@ -2259,6 +2297,10 @@ __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -2724,6 +2766,8 @@ __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); + if (likely(p->pid) && is_child_reaper(pid)) + create_rich_container_reaper(p); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -2756,6 +2800,12 @@ __latent_entropy struct task_struct *copy_process( exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { +#ifdef CONFIG_ASYNC_FORK + if (p->mm->async_fork_mm) { + WARN_ON_ONCE(clone_flags & CLONE_VM); + async_fork_cpr_done(p->mm, true, false); + } +#endif mm_clear_owner(p->mm, p); mmput(p->mm); } @@ -3412,6 +3462,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; + if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { + err = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto bad_unshare_out; + } + err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 2531f3496ab6d73a7570c91ad47198bc9622e1fc..8a65b0c34b5ac096794894c12b5168d7066fcbec 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -148,5 +148,5 @@ config GENERIC_IRQ_MULTI_HANDLER # Do not even think of enabling this on any new platform config DEPRECATED_IRQ_CPU_ONOFFLINE bool - depends on CAVIUM_OCTEON_SOC - default CAVIUM_OCTEON_SOC + depends on CAVIUM_OCTEON_SOC || LOONGARCH + default CAVIUM_OCTEON_SOC || LOONGARCH diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ecbc9b6aba3a10bcadecac960bf6ee09f2419d20..52426665eecc1ada13f28a8a0598371f32e1f578 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -973,7 +973,7 @@ static int __klp_disable_patch(struct klp_patch *patch) if (klp_transition_patch) return -EBUSY; - klp_init_transition(patch, KLP_UNPATCHED); + klp_init_transition(patch, KLP_TRANSITION_UNPATCHED); klp_for_each_object(patch, obj) if (obj->patched) @@ -1008,7 +1008,7 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); - klp_init_transition(patch, KLP_PATCHED); + klp_init_transition(patch, KLP_TRANSITION_PATCHED); /* * Enforce the order of the func->transition writes in diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 4152c71507e249e6e679aa6755749e486482d815..90408500e5a38a9e323cada04a02df9c2e67302c 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -95,9 +95,9 @@ static void notrace klp_ftrace_handler(unsigned long ip, patch_state = current->patch_state; - WARN_ON_ONCE(patch_state == KLP_UNDEFINED); + WARN_ON_ONCE(patch_state == KLP_TRANSITION_IDLE); - if (patch_state == KLP_UNPATCHED) { + if (patch_state == KLP_TRANSITION_UNPATCHED) { /* * Use the previously patched version of the function. * If no previous patches exist, continue with the diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index e54c3d60a90450649a15ac472fcc022845dc3c8e..ba069459c1017b815101cc3aeb186d9407f7bd10 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -23,7 +23,7 @@ static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); struct klp_patch *klp_transition_patch; -static int klp_target_state = KLP_UNDEFINED; +static int klp_target_state = KLP_TRANSITION_IDLE; static unsigned int klp_signals_cnt; @@ -96,16 +96,16 @@ static void klp_complete_transition(void) pr_debug("'%s': completing %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { + if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) { klp_unpatch_replaced_patches(klp_transition_patch); klp_discard_nops(klp_transition_patch); } - if (klp_target_state == KLP_UNPATCHED) { + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { /* - * All tasks have transitioned to KLP_UNPATCHED so we can now + * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now * remove the new functions from the func_stack. */ klp_unpatch_objects(klp_transition_patch); @@ -123,36 +123,36 @@ static void klp_complete_transition(void) klp_for_each_func(obj, func) func->transition = false; - /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ - if (klp_target_state == KLP_PATCHED) + /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */ + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } read_unlock(&tasklist_lock); for_each_possible_cpu(cpu) { task = idle_task(cpu); WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } klp_for_each_object(klp_transition_patch, obj) { if (!klp_is_object_loaded(obj)) continue; - if (klp_target_state == KLP_PATCHED) + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_post_patch_callback(obj); - else if (klp_target_state == KLP_UNPATCHED) + else if (klp_target_state == KLP_TRANSITION_UNPATCHED) klp_post_unpatch_callback(obj); } pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - klp_target_state = KLP_UNDEFINED; + klp_target_state = KLP_TRANSITION_IDLE; klp_transition_patch = NULL; } @@ -164,13 +164,13 @@ static void klp_complete_transition(void) */ void klp_cancel_transition(void) { - if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) + if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED)) return; pr_debug("'%s': canceling patching transition, going to unpatch\n", klp_transition_patch->mod->name); - klp_target_state = KLP_UNPATCHED; + klp_target_state = KLP_TRANSITION_UNPATCHED; klp_complete_transition(); } @@ -218,7 +218,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, struct klp_ops *ops; int i; - if (klp_target_state == KLP_UNPATCHED) { + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { /* * Check for the to-be-unpatched function * (the func itself). @@ -455,7 +455,7 @@ void klp_try_complete_transition(void) struct klp_patch *patch; bool complete = true; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); /* * Try to switch the tasks to the target patch state by walking their @@ -532,11 +532,11 @@ void klp_start_transition(void) struct task_struct *g, *task; unsigned int cpu; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); pr_notice("'%s': starting %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Mark all normal tasks as needing a patch state update. They'll @@ -578,7 +578,7 @@ void klp_init_transition(struct klp_patch *patch, int state) struct klp_func *func; int initial_state = !state; - WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE); klp_transition_patch = patch; @@ -589,7 +589,7 @@ void klp_init_transition(struct klp_patch *patch, int state) klp_target_state = state; pr_debug("'%s': initializing %s transition\n", patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Initialize all tasks to the initial patch state to prepare them for @@ -597,7 +597,7 @@ void klp_init_transition(struct klp_patch *patch, int state) */ read_lock(&tasklist_lock); for_each_process_thread(g, task) { - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } read_unlock(&tasklist_lock); @@ -607,19 +607,19 @@ void klp_init_transition(struct klp_patch *patch, int state) */ for_each_possible_cpu(cpu) { task = idle_task(cpu); - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } /* * Enforce the order of the task->patch_state initializations and the * func->transition updates to ensure that klp_ftrace_handler() doesn't - * see a func in transition with a task->patch_state of KLP_UNDEFINED. + * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE. * * Also enforce the order of the klp_target_state write and future * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and * __klp_sched_try_switch() don't set a task->patch_state to - * KLP_UNDEFINED. + * KLP_TRANSITION_IDLE. */ smp_wmb(); @@ -652,7 +652,7 @@ void klp_reverse_transition(void) pr_debug("'%s': reversing transition from %s\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching to unpatching" : + klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" : "unpatching to patching"); /* @@ -741,7 +741,7 @@ void klp_force_transition(void) klp_update_patch_state(idle_task(cpu)); /* Set forced flag for patches being removed. */ - if (klp_target_state == KLP_UNPATCHED) + if (klp_target_state == KLP_TRANSITION_UNPATCHED) klp_transition_patch->forced = true; else if (klp_transition_patch->replace) { klp_for_each_patch(patch) { diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 85251d8771d91904446d085cf6992da4b33c3e0c..3926aad129eda3f49d8454547f03a0c694280367 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -17,11 +17,11 @@ struct mcs_spinlock { struct mcs_spinlock *next; - int locked; /* 1 if lock acquired */ + unsigned int locked; /* 1 if lock acquired */ int count; /* nesting count, see qspinlock.c */ }; -#ifndef arch_mcs_spin_lock_contended +#ifndef arch_mcs_spin_wait /* * Using smp_cond_load_acquire() provides the acquire semantics * required so that subsequent operations happen after the @@ -29,20 +29,20 @@ struct mcs_spinlock { * ARM64 would like to do spin-waiting instead of purely * spinning, and smp_cond_load_acquire() provides that behavior. */ -#define arch_mcs_spin_lock_contended(l) \ -do { \ - smp_cond_load_acquire(l, VAL); \ +#define arch_mcs_spin_wait(l) \ +do { \ + smp_cond_load_acquire(l, VAL); \ } while (0) #endif -#ifndef arch_mcs_spin_unlock_contended +#ifndef arch_mcs_lock_handoff /* * smp_store_release() provides a memory barrier to ensure all * operations in the critical section has been completed before * unlocking. */ -#define arch_mcs_spin_unlock_contended(l) \ - smp_store_release((l), 1) +#define arch_mcs_lock_handoff(l, val) \ + smp_store_release((l), (val)) #endif /* @@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) WRITE_ONCE(prev->next, node); /* Wait until the lock holder passes the lock down. */ - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); } /* @@ -115,7 +115,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) } /* Pass lock to next waiter. */ - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_lock_handoff(&next->locked, 1); } #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ebe6b8ec7cb380da9d62e09f1b7ab98f339d35a8..d3f99060b60f1cab0961c94e154153e7604b2594 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -11,7 +11,7 @@ * Peter Zijlstra */ -#ifndef _GEN_PV_LOCK_SLOWPATH +#if !defined(_GEN_PV_LOCK_SLOWPATH) && !defined(_GEN_CNA_LOCK_SLOWPATH) #include #include @@ -72,7 +72,8 @@ /* * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in * size and four of them will fit nicely in one 64-byte cacheline. For - * pvqspinlock, however, we need more space for extra data. To accommodate + * pvqspinlock, however, we need more space for extra data. The same also + * applies for the NUMA-aware variant of spinlocks (CNA). To accommodate * that, we insert two more long words to pad it up to 32 bytes. IOW, only * two of them can fit in a cacheline in this case. That is OK as it is rare * to have more than 2 levels of slowpath nesting in actual use. We don't @@ -81,7 +82,7 @@ */ struct qnode { struct mcs_spinlock mcs; -#ifdef CONFIG_PARAVIRT_SPINLOCKS +#if defined(CONFIG_PARAVIRT_SPINLOCKS) || defined(CONFIG_NUMA_AWARE_SPINLOCKS) long reserved[2]; #endif }; @@ -105,6 +106,8 @@ struct qnode { * Exactly fits one 64-byte cacheline on a 64-bit architecture. * * PV doubles the storage and uses the second cacheline for PV state. + * CNA also doubles the storage and uses the second cacheline for + * CNA-specific state. */ static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); @@ -290,7 +293,35 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif -#endif /* _GEN_PV_LOCK_SLOWPATH */ +/* + * __try_clear_tail - try to clear tail by setting the lock value to + * _Q_LOCKED_VAL. + * @lock: Pointer to the queued spinlock structure + * @val: Current value of the lock + * @node: Pointer to the MCS node of the lock holder + */ +static __always_inline bool __try_clear_tail(struct qspinlock *lock, + u32 val, + struct mcs_spinlock *node) +{ + return atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL); +} + +/* + * __mcs_lock_handoff - pass the MCS lock to the next waiter + * @node: Pointer to the MCS node of the lock holder + * @next: Pointer to the MCS node of the first waiter in the MCS queue + */ +static __always_inline void __mcs_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + arch_mcs_lock_handoff(&next->locked, 1); +} + +#define try_clear_tail __try_clear_tail +#define mcs_lock_handoff __mcs_lock_handoff + +#endif /* _GEN_PV_LOCK_SLOWPATH && _GEN_CNA_LOCK_SLOWPATH */ /** * queued_spin_lock_slowpath - acquire the queued spinlock @@ -474,7 +505,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) WRITE_ONCE(prev->next, node); pv_wait_node(node, prev); - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); /* * While waiting for the MCS lock, the next pointer may have @@ -536,7 +567,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * PENDING will make the uncontended transition fail. */ if ((val & _Q_TAIL_MASK) == tail) { - if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + if (try_clear_tail(lock, val, node)) goto release; /* No contention */ } @@ -553,7 +584,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (!next) next = smp_cond_load_relaxed(&node->next, (VAL)); - arch_mcs_spin_unlock_contended(&next->locked); + mcs_lock_handoff(node, next); pv_kick_node(lock, next); release: @@ -566,6 +597,37 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) } EXPORT_SYMBOL(queued_spin_lock_slowpath); +/* + * Generate the code for NUMA-aware spinlocks + */ +#if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS) +#define _GEN_CNA_LOCK_SLOWPATH + +#undef pv_init_node +#define pv_init_node cna_init_node + +#undef pv_wait_head_or_lock +#define pv_wait_head_or_lock cna_wait_head_or_lock + +#undef try_clear_tail +#define try_clear_tail cna_try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff cna_lock_handoff + +#undef queued_spin_lock_slowpath +/* + * defer defining queued_spin_lock_slowpath until after the include to + * avoid a name clash with the identically named field in pv_ops.lock + * (see cna_configure_spin_lock_slowpath()) + */ +#include "qspinlock_cna.h" +#define queued_spin_lock_slowpath __cna_queued_spin_lock_slowpath + +#include "qspinlock.c" + +#endif + /* * Generate the paravirt code for queued_spin_unlock_slowpath(). */ @@ -580,6 +642,12 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); #undef pv_kick_node #undef pv_wait_head_or_lock +#undef try_clear_tail +#define try_clear_tail __try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff __mcs_lock_handoff + #undef queued_spin_lock_slowpath #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h new file mode 100644 index 0000000000000000000000000000000000000000..2d834bc8d7dd141f88a33acf8ed84c4e9aaae44c --- /dev/null +++ b/kernel/locking/qspinlock_cna.h @@ -0,0 +1,425 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GEN_CNA_LOCK_SLOWPATH +#error "do not include this file" +#endif + +#include +#include +#include +#include +#include + +/* + * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). + * + * In CNA, spinning threads are organized in two queues, a primary queue for + * threads running on the same NUMA node as the current lock holder, and a + * secondary queue for threads running on other nodes. Schematically, it + * looks like this: + * + * cna_node + * +----------+ +--------+ +--------+ + * |mcs:next | --> |mcs:next| --> ... |mcs:next| --> NULL [Primary queue] + * |mcs:locked| -. +--------+ +--------+ + * +----------+ | + * `----------------------. + * v + * +--------+ +--------+ + * |mcs:next| --> ... |mcs:next| [Secondary queue] + * +--------+ +--------+ + * ^ | + * `--------------------' + * + * N.B. locked := 1 if secondary queue is absent. Otherwise, it contains the + * encoded pointer to the tail of the secondary queue, which is organized as a + * circular list. + * + * After acquiring the MCS lock and before acquiring the spinlock, the MCS lock + * holder checks whether the next waiter in the primary queue (if exists) is + * running on the same NUMA node. If it is not, that waiter is detached from the + * main queue and moved into the tail of the secondary queue. This way, we + * gradually filter the primary queue, leaving only waiters running on the same + * preferred NUMA node. Note that certain priortized waiters (e.g., in + * irq and nmi contexts) are excluded from being moved to the secondary queue. + * + * We change the NUMA node preference after a waiter at the head of the + * secondary queue spins for a certain amount of time (1ms, by default). + * We do that by flushing the secondary queue into the head of the primary queue, + * effectively changing the preference to the NUMA node of the waiter at the head + * of the secondary queue at the time of the flush. + * + * For more details, see https://arxiv.org/abs/1810.05600. + * + * Authors: Alex Kogan + * Dave Dice + */ + +#define FLUSH_SECONDARY_QUEUE 1 + +#define CNA_PRIORITY_NODE 0xffff + +struct cna_node { + struct mcs_spinlock mcs; + u16 numa_node; + u16 real_numa_node; + u32 encoded_tail; /* self */ + u64 start_time; +}; + +static ulong numa_spinlock_threshold_ns = 1000000; /* 1ms, by default */ +module_param(numa_spinlock_threshold_ns, ulong, 0644); + +static inline bool intra_node_threshold_reached(struct cna_node *cn) +{ + u64 current_time = local_clock(); + u64 threshold = cn->start_time + numa_spinlock_threshold_ns; + + return current_time > threshold; +} + +/* + * Controls the probability for enabling the ordering of the main queue + * when the secondary queue is empty. The chosen value reduces the amount + * of unnecessary shuffling of threads between the two waiting queues + * when the contention is low, while responding fast enough and enabling + * the shuffling when the contention is high. + */ +#define SHUFFLE_REDUCTION_PROB_ARG (7) + +/* Per-CPU pseudo-random number seed */ +static DEFINE_PER_CPU(u32, seed); + +/* + * Return false with probability 1 / 2^@num_bits. + * Intuitively, the larger @num_bits the less likely false is to be returned. + * @num_bits must be a number between 0 and 31. + */ +static bool probably(unsigned int num_bits) +{ + u32 s; + + s = this_cpu_read(seed); + s = next_pseudo_random32(s); + this_cpu_write(seed, s); + + return s & ((1 << num_bits) - 1); +} + +static void __init cna_init_nodes_per_cpu(unsigned int cpu) +{ + struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); + int numa_node = cpu_to_node(cpu); + int i; + + for (i = 0; i < MAX_NODES; i++) { + struct cna_node *cn = (struct cna_node *)grab_mcs_node(base, i); + + cn->real_numa_node = numa_node; + cn->encoded_tail = encode_tail(cpu, i); + /* + * make sure @encoded_tail is not confused with other valid + * values for @locked (0 or 1) + */ + WARN_ON(cn->encoded_tail <= 1); + } +} + +static int __init cna_init_nodes(void) +{ + unsigned int cpu; + + /* + * this will break on 32bit architectures, so we restrict + * the use of CNA to 64bit only (see arch/x86/Kconfig) + */ + BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode)); + /* we store an ecoded tail word in the node's @locked field */ + BUILD_BUG_ON(sizeof(u32) > sizeof(unsigned int)); + + for_each_possible_cpu(cpu) + cna_init_nodes_per_cpu(cpu); + + return 0; +} + +static __always_inline void cna_init_node(struct mcs_spinlock *node) +{ + bool priority = !in_task() || irqs_disabled() || rt_task(current); + struct cna_node *cn = (struct cna_node *)node; + + cn->numa_node = priority ? CNA_PRIORITY_NODE : cn->real_numa_node; + cn->start_time = 0; +} + +/* + * cna_splice_head -- splice the entire secondary queue onto the head of the + * primary queue. + * + * Returns the new primary head node or NULL on failure. + */ +static struct mcs_spinlock * +cna_splice_head(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node, struct mcs_spinlock *next) +{ + struct mcs_spinlock *head_2nd, *tail_2nd; + u32 new; + + tail_2nd = decode_tail(node->locked); + head_2nd = tail_2nd->next; + + if (next) { + /* + * If the primary queue is not empty, the primary tail doesn't + * need to change and we can simply link the secondary tail to + * the old primary head. + */ + tail_2nd->next = next; + } else { + /* + * When the primary queue is empty, the secondary tail becomes + * the primary tail. + */ + + /* + * Speculatively break the secondary queue's circular link such + * that when the secondary tail becomes the primary tail it all + * works out. + */ + tail_2nd->next = NULL; + + /* + * tail_2nd->next = NULL; old = xchg_tail(lock, tail); + * prev = decode_tail(old); + * try_cmpxchg_release(...); WRITE_ONCE(prev->next, node); + * + * If the following cmpxchg() succeeds, our stores will not + * collide. + */ + new = ((struct cna_node *)tail_2nd)->encoded_tail | + _Q_LOCKED_VAL; + if (!atomic_try_cmpxchg_release(&lock->val, &val, new)) { + /* Restore the secondary queue's circular link. */ + tail_2nd->next = head_2nd; + return NULL; + } + } + + /* The primary queue head now is what was the secondary queue head. */ + return head_2nd; +} + +static inline bool cna_try_clear_tail(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node) +{ + /* + * We're here because the primary queue is empty; check the secondary + * queue for remote waiters. + */ + if (node->locked > 1) { + struct mcs_spinlock *next; + + /* + * When there are waiters on the secondary queue, try to move + * them back onto the primary queue and let them rip. + */ + next = cna_splice_head(lock, val, node, NULL); + if (next) { + arch_mcs_lock_handoff(&next->locked, 1); + return true; + } + + return false; + } + + /* Both queues are empty. Do what MCS does. */ + return __try_clear_tail(lock, val, node); +} + +/* + * cna_splice_next -- splice the next node from the primary queue onto + * the secondary queue. + */ +static void cna_splice_next(struct mcs_spinlock *node, + struct mcs_spinlock *next, + struct mcs_spinlock *nnext) +{ + /* remove 'next' from the main queue */ + node->next = nnext; + + /* stick `next` on the secondary queue tail */ + if (node->locked <= 1) { /* if secondary queue is empty */ + struct cna_node *cn = (struct cna_node *)node; + + /* create secondary queue */ + next->next = next; + + cn->start_time = local_clock(); + /* secondary queue is not empty iff start_time != 0 */ + WARN_ON(!cn->start_time); + } else { + /* add to the tail of the secondary queue */ + struct mcs_spinlock *tail_2nd = decode_tail(node->locked); + struct mcs_spinlock *head_2nd = tail_2nd->next; + + tail_2nd->next = next; + next->next = head_2nd; + } + + node->locked = ((struct cna_node *)next)->encoded_tail; +} + +/* + * cna_order_queue - check whether the next waiter in the main queue is on + * the same NUMA node as the lock holder; if not, and it has a waiter behind + * it in the main queue, move the former onto the secondary queue. + * Returns 1 if the next waiter runs on the same NUMA node; 0 otherwise. + */ +static int cna_order_queue(struct mcs_spinlock *node) +{ + struct mcs_spinlock *next = READ_ONCE(node->next); + struct cna_node *cn = (struct cna_node *)node; + int numa_node, next_numa_node; + + if (!next) + return 0; + + numa_node = cn->numa_node; + next_numa_node = ((struct cna_node *)next)->numa_node; + + if (next_numa_node != numa_node && next_numa_node != CNA_PRIORITY_NODE) { + struct mcs_spinlock *nnext = READ_ONCE(next->next); + + if (nnext) + cna_splice_next(node, next, nnext); + + return 0; + } + return 1; +} + +#define LOCK_IS_BUSY(lock) (atomic_read(&(lock)->val) & _Q_LOCKED_PENDING_MASK) + +/* Abuse the pv_wait_head_or_lock() hook to get some work done */ +static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, + struct mcs_spinlock *node) +{ + struct cna_node *cn = (struct cna_node *)node; + + if (node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) { + /* + * When the secondary queue is empty, skip the calls to + * cna_order_queue() below with high probability. This optimization + * reduces the overhead of unnecessary shuffling of threads + * between waiting queues when the lock is only lightly contended. + */ + return 0; + } + + if (!cn->start_time || !intra_node_threshold_reached(cn)) { + /* + * We are at the head of the wait queue, no need to use + * the fake NUMA node ID. + */ + if (cn->numa_node == CNA_PRIORITY_NODE) + cn->numa_node = cn->real_numa_node; + + /* + * Try and put the time otherwise spent spin waiting on + * _Q_LOCKED_PENDING_MASK to use by sorting our lists. + */ + while (LOCK_IS_BUSY(lock) && !cna_order_queue(node)) + cpu_relax(); + } else { + cn->start_time = FLUSH_SECONDARY_QUEUE; + } + + return 0; /* we lied; we didn't wait, go do so now */ +} + +static inline void cna_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + struct cna_node *cn = (struct cna_node *)node; + u32 val = 1; + + if (cn->start_time != FLUSH_SECONDARY_QUEUE) { + if (node->locked > 1) { + val = node->locked; /* preseve secondary queue */ + + /* + * We have a local waiter, either real or fake one; + * reload @next in case it was changed by cna_order_queue(). + */ + next = node->next; + + /* + * Pass over NUMA node id of primary queue, to maintain the + * preference even if the next waiter is on a different node. + */ + ((struct cna_node *)next)->numa_node = cn->numa_node; + + ((struct cna_node *)next)->start_time = cn->start_time; + } + } else { + /* + * We decided to flush the secondary queue; + * this can only happen if that queue is not empty. + */ + WARN_ON(node->locked <= 1); + /* + * Splice the secondary queue onto the primary queue and pass the lock + * to the longest waiting remote waiter. + */ + next = cna_splice_head(NULL, 0, node, next); + } + + arch_mcs_lock_handoff(&next->locked, val); +} + +/* + * Constant (boot-param configurable) flag selecting the NUMA-aware variant + * of spinlock. Possible values: -1 (off) / 0 (auto, default) / 1 (on). + */ +static int numa_spinlock_flag = -1; + +static int __init numa_spinlock_setup(char *str) +{ + if (!strcmp(str, "auto")) { + numa_spinlock_flag = 0; + return 1; + } else if (!strcmp(str, "on")) { + numa_spinlock_flag = 1; + return 1; + } else if (!strcmp(str, "off")) { + numa_spinlock_flag = -1; + return 1; + } + + return 0; +} +__setup("numa_spinlock=", numa_spinlock_setup); + +void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +/* + * Switch to the NUMA-friendly slow path for spinlocks when we have + * multiple NUMA nodes in native environment, unless the user has + * overridden this default behavior by setting the numa_spinlock flag. + */ +void __init cna_configure_spin_lock_slowpath(void) +{ + + if (numa_spinlock_flag < 0) + return; + + if (numa_spinlock_flag == 0 && (nr_node_ids < 2 || + pv_ops.lock.queued_spin_lock_slowpath != + native_queued_spin_lock_slowpath)) + return; + + cna_init_nodes(); + + pv_ops.lock.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath; + + pr_info("Enabling CNA spinlock\n"); +} diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6a0184e9c2348e3b51a60d42a1e4e911e2209dd5..b280d4d1f58670c992040c219fe858ed02cb01d6 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -368,7 +368,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) * * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() * - * The write to next->locked in arch_mcs_spin_unlock_contended() + * The write to next->locked in arch_mcs_lock_handoff() * must be ordered before the read of pn->state in the cmpxchg() * below for the code to work correctly. To guarantee full ordering * irrespective of the success or failure of the cmpxchg(), diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 1162e07cdaea13197389df3d8572570c47fe8c60..ce79c4dfe987527ab31045d6a5f6deae18ee5942 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -56,6 +56,8 @@ struct rt_mutex_waiter { struct rt_mutex_base *lock; unsigned int wake_state; struct ww_acquire_ctx *ww_ctx; + + CK_KABI_RESERVE(1) }; /** diff --git a/kernel/module/internal.h b/kernel/module/internal.h index c8b7b4dcf7820dcfea57c5ea5003ac2094285855..bf1b643ef970427a177d30d841d0bddb25bff940 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -82,6 +82,8 @@ struct load_info { struct { unsigned int sym, str, mod, vers, info, pcpu; } index; + + unsigned long subsys; }; enum mod_license { @@ -330,11 +332,20 @@ int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, #ifdef CONFIG_MODULE_SIG int module_sig_check(struct load_info *info, int flags); +int force_subsys_sig_check(struct load_info *info); +void set_module_subsys(struct load_info *info, const char *name); #else /* !CONFIG_MODULE_SIG */ static inline int module_sig_check(struct load_info *info, int flags) { return 0; } + +static inline int force_subsys_sig_check(struct load_info *info) +{ + return 0; +} + +static inline void set_module_subsys(struct load_info *info, const char *name) { } #endif /* !CONFIG_MODULE_SIG */ #ifdef CONFIG_DEBUG_KMEMLEAK diff --git a/kernel/module/main.c b/kernel/module/main.c index b00e31721a73e398c9bce71164139e92766b7c1c..a2cfac94394a67a00693ec7c96ad0c8e446d42d3 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1364,7 +1364,7 @@ static bool ignore_undef_symbol(Elf_Half emachine, const char *name) } /* Change all symbols so that st_value encodes the pointer directly. */ -static int simplify_symbols(struct module *mod, const struct load_info *info) +static int simplify_symbols(struct module *mod, struct load_info *info) { Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; Elf_Sym *sym = (void *)symsec->sh_addr; @@ -1406,6 +1406,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { + set_module_subsys(info, name); sym[i].st_value = kernel_symbol_value(ksym); break; } @@ -2921,6 +2922,10 @@ static int load_module(struct load_info *info, const char __user *uargs, if (err < 0) goto free_modinfo; + err = force_subsys_sig_check(info); + if (err < 0) + goto free_modinfo; + err = apply_relocations(mod, info); if (err < 0) goto free_modinfo; diff --git a/kernel/module/signing.c b/kernel/module/signing.c index a2ff4242e623d5d4e87d2f3d139d8620fb937579..67919f8b20377c788857309db05476110e00c0f5 100644 --- a/kernel/module/signing.c +++ b/kernel/module/signing.c @@ -22,6 +22,62 @@ static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); +static char *sig_enforce_subsys = ""; +module_param(sig_enforce_subsys, charp, 0644); +MODULE_PARM_DESC(sig_enforce_subsys, "Enforce subsys modules signature check"); + +enum modules_subsys { + MODULE_SUBSYS_GPU, + MODULE_SUBSYS_BLOCK, + MODULE_SUBSYS_NET, +}; + +void set_module_subsys(struct load_info *info, const char *name) +{ + char *key_intf_blk = "device_add_disk"; + char *key_intf_scsi = "scsi_host_alloc"; + char *key_intf_net = "register_netdev"; + char *key_intf_gpu = "drm_"; + + if (info->subsys) + return; + + if (!strncmp(name, key_intf_gpu, strlen(key_intf_gpu))) + set_bit(MODULE_SUBSYS_GPU, &info->subsys); + + if (!strncmp(name, key_intf_blk, strlen(key_intf_blk)) || + !strncmp(name, key_intf_scsi, strlen(key_intf_scsi))) + set_bit(MODULE_SUBSYS_BLOCK, &info->subsys); + + /* register_netdev or register_netdevice */ + if (!strncmp(name, key_intf_net, strlen(key_intf_net))) + set_bit(MODULE_SUBSYS_NET, &info->subsys); +} + +int force_subsys_sig_check(struct load_info *info) +{ + if (info->sig_ok) + return 0; + + if (test_bit(MODULE_SUBSYS_GPU, &info->subsys) && + parse_option_str(sig_enforce_subsys, "gpu")) + goto err; + + if (test_bit(MODULE_SUBSYS_BLOCK, &info->subsys) && + parse_option_str(sig_enforce_subsys, "block")) + goto err; + + if (test_bit(MODULE_SUBSYS_NET, &info->subsys) && + parse_option_str(sig_enforce_subsys, "net")) + goto err; + + return 0; +err: + pr_notice("%s: Loading is rejected, because of wrong signature or key missing!\n", + info->name); + return -EKEYREJECTED; +} + /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c index c921bf044050ff5650394372d400517c83e87bc8..a5c59142f2e651d4f1cd652f1b6ec0438fcbc131 100644 --- a/kernel/module/sysfs.c +++ b/kernel/module/sysfs.c @@ -22,6 +22,8 @@ struct module_sect_attr { struct bin_attribute battr; unsigned long address; + + CK_KABI_RESERVE(1) }; struct module_sect_attrs { diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index e9b2bb260ee6c83ef146779b1599a5380c3eb481..18cf494b9bfa833f859aa03be9bd619a2096377a 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -23,13 +23,54 @@ #include #include #include +#include #include "pid_sysctl.h" +#ifdef CONFIG_RICH_CONTAINER +int sysctl_rich_container_enable; +int sysctl_rich_container_source; /* 0 - current; 1 - child_reaper */ +int sysctl_rich_container_ext_enable; + +static struct kmem_cache *ext_cachep; +#endif + static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; +#ifdef CONFIG_RICH_CONTAINER +struct rich_container_ext *create_rich_container_ext(void) +{ + struct rich_container_ext *ext; + + ext = kmem_cache_zalloc(ext_cachep, GFP_KERNEL); + if (!ext) + return NULL; + + ext->overcommit_memory = sysctl_overcommit_memory; + ext->overcommit_ratio = sysctl_overcommit_ratio; + ext->overcommit_kbytes = sysctl_overcommit_kbytes; + if (percpu_counter_init(&ext->vm_committed_as, 0, GFP_KERNEL)) + goto out; + ext->as_batch = vm_committed_as_batch; + + return ext; + +out: + kmem_cache_free(ext_cachep, ext); + return NULL; +} + +void destroy_rich_container_ext(struct rich_container_ext *ext) +{ + if (!ext) + return; + + kmem_cache_free(ext_cachep, ext); +} +#endif + /* * creates the kmem cache to allocate pids from. * @level: pid namespace level @@ -113,6 +154,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns); #endif + ns->ext = create_rich_container_ext(); return ns; out_free_idr: @@ -131,6 +173,7 @@ static void delayed_free_pidns(struct rcu_head *p) dec_pid_namespaces(ns->ucounts); put_user_ns(ns->user_ns); + destroy_rich_container_ext(ns->ext); kmem_cache_free(pid_ns_cachep, ns); } @@ -477,6 +520,9 @@ static __init int pid_namespaces_init(void) #endif register_pid_ns_sysctl_table_vm(); +#ifdef CONFIG_RICH_CONTAINER + ext_cachep = KMEM_CACHE(rich_container_ext, SLAB_PANIC | SLAB_ACCOUNT); +#endif return 0; } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index d71c590550d282a90309d774b38d7c98797ffda5..b1896346fde159168da56bab18af5409ea6f0ee5 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -201,12 +201,11 @@ void free_all_swap_pages(int swap) while ((node = swsusp_extents.rb_node)) { struct swsusp_extent *ext; - unsigned long offset; ext = rb_entry(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); - for (offset = ext->start; offset <= ext->end; offset++) - swap_free(swp_entry(swap, offset)); + swap_free_nr(swp_entry(swap, ext->start), + ext->end - ext->start + 1); kfree(ext); } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 0fca282c0a25473efceb1722d1f0b5fab3728467..b4e390e0b4bd897cc9d2eea326cf9893947b6bea 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2202,10 +2202,17 @@ int vprintk_store(int facility, int level, u16 text_len; int ret = 0; u64 ts_nsec; +#ifdef CONFIG_SW64_RRK + extern int sw64_printk(const char *fmt, va_list args); +#endif if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0; +#ifdef CONFIG_SW64_RRK + sw64_printk(fmt, args); +#endif + /* * Since the duration of printk() can vary depending on the message * and state of the ringbuffer, grab the timestamp now so that it is diff --git a/kernel/profile.c b/kernel/profile.c index 984f819b701c9d2ea947bddac33add827c43e392..fe754a85635c090f555d2a3c2659d4a82dfe24d7 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -124,6 +124,9 @@ int __ref profile_init(void) return 0; free_cpumask_var(prof_cpu_mask); +#ifdef CONFIG_CPUMASK_OFFSTACK + prof_cpu_mask = NULL; +#endif return -ENOMEM; } diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 991fc90025357964edf8d64a361a843e926160c1..e2c2d4e5735f084cc22d86beaac8ab9412eba1c5 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -4,7 +4,7 @@ * Auto-group scheduling implementation: */ -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +unsigned int __read_mostly sysctl_sched_autogroup_enabled; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 86606fb9e6bc6cd7925a66b59fd65f3e3b0cd82e..72f9d0f8a14dbd73431dd316d7da54aa3a605282 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -151,6 +151,19 @@ const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; __read_mostly int scheduler_running; +#ifdef CONFIG_SCHED_ACPU +DEFINE_STATIC_KEY_FALSE(acpu_enabled); +unsigned int sysctl_sched_acpu_enabled; +#endif + +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + #ifdef CONFIG_SCHED_CORE DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); @@ -368,7 +381,8 @@ static void __sched_core_flip(bool enabled) for_each_cpu(t, smt_mask) cpu_rq(t)->core_enabled = enabled; - cpu_rq(cpu)->core->core_forceidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start_task = 0; sched_core_unlock(cpu, &flags); @@ -2118,6 +2132,12 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); } +static void update_nr_uninterruptible(struct task_struct *tsk, long inc) +{ + if (tsk->sched_class->update_nr_uninterruptible) + tsk->sched_class->update_nr_uninterruptible(tsk, inc); +} + void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_on_rq_migrating(p)) @@ -3387,6 +3407,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.nr_migrations++; rseq_migrate(p); sched_mm_cid_migrate_from(p); + task_ca_increase_nr_migrations(p); perf_event_task_migrate(p); } @@ -3771,8 +3792,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, lockdep_assert_rq_held(rq); - if (p->sched_contributes_to_load) + if (p->sched_contributes_to_load) { + update_nr_uninterruptible(p, -1); rq->nr_uninterruptible--; + } #ifdef CONFIG_SMP if (wake_flags & WF_MIGRATED) @@ -3782,6 +3805,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } activate_task(rq, p, en_flags); @@ -3956,6 +3980,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } +/* + * Whether CPUs are share cache resources, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_resources(int this_cpu, int that_cpu) +{ + if (this_cpu == that_cpu) + return true; + + return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); +} + static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* @@ -4333,6 +4369,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } wake_flags |= WF_MIGRATED; @@ -4971,6 +5008,137 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ +#ifdef CONFIG_SCHED_ACPU +static void acpu_enable(void) +{ + int i; + + for_each_possible_cpu(i) { + struct rq *rq = cpu_rq(i); + + /* It may be not that accurate, but useful enough. */ + rq->last_acpu_update_time = rq->clock; + } + static_branch_enable(&acpu_enabled); +} + +static void acpu_disable(void) +{ + static_branch_disable(&acpu_enabled); +} + +int sched_acpu_enable_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + unsigned int old, new; + + if (!write) { + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + return ret; + } + + old = sysctl_sched_acpu_enabled; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + new = sysctl_sched_acpu_enabled; + if (!ret && write && (old != new)) { + if (new) + acpu_enable(); + else + acpu_disable(); + } + + return ret; +} + +static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ + const int cpu = cpu_of(rq); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + u64 now = rq_clock(rq); + u64 now_task = rq_clock_task(rq); + u64 sibidle_sum, sibidle_task_sum, last_update_time, last_update_time_task; + s64 delta, delta_task, last, last_task; + int i; + + if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) + return; + + /* + * If core sched is enabled and core_sibidle_count is not zero, we update sibidle + * time in function __sched_core_account_sibidle(). + */ +#ifdef CONFIG_SCHED_CORE + if (rq->core->core_sibidle_count) + goto out; +#endif + + /* Update idle sum and busy sum for current rq. */ + delta = now - rq->last_acpu_update_time; + if (prev == rq->idle) + rq->acpu_idle_sum += delta; + + /* + * Be carefule, smt_mask maybe NULL. + * We only consider the case where there are two SMT at this stage. + */ + if (unlikely(!smt_mask) || unlikely(cpumask_weight(smt_mask) != 2)) + goto out; + + for_each_cpu(i, smt_mask) { + if (i != cpu) { + struct rq *rq_i = cpu_rq(i); + struct task_struct *curr_i = rq_i->curr; + + last = (s64)(rq->last_acpu_update_time - + rq_i->last_acpu_update_time); + last_update_time = last >= 0 ? rq->last_acpu_update_time : + rq_i->last_acpu_update_time; + last_task = (s64)(rq->last_acpu_update_time_task - + rq_i->last_acpu_update_time_task); + last_update_time_task = last_task >= 0 ? + rq->last_acpu_update_time_task : + rq_i->last_acpu_update_time_task; + /* + * Sibling may update acpu at the same time, and it's + * timestamp may be newer than this rq. + */ + delta = now - last_update_time; + delta = delta > 0 ? delta : 0; + delta_task = now_task - last_update_time_task; + delta_task = delta_task > 0 ? delta_task : 0; + + /* Add the delta to improve accuracy. */ + sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; + sibidle_task_sum = last_task >= 0 ? rq->sibidle_task_sum : + rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) { + sibidle_sum += delta; + sibidle_task_sum += delta_task; + } + } + } + + if (prev != rq->idle) { + delta = sibidle_sum - rq->sibidle_sum; + delta = delta > 0 ? delta : 0; + delta_task = sibidle_task_sum - rq->sibidle_task_sum; + delta_task = delta_task > 0 ? delta_task : 0; + __account_sibidle_time(prev, delta, delta_task, false); + } + + rq->sibidle_sum = sibidle_sum; + rq->sibidle_task_sum = sibidle_task_sum; +out: + rq->last_acpu_update_time = now; + rq->last_acpu_update_time_task = now_task; +} +#else +static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ +} +#endif /* CONFIG_SCHED_ACPU */ + static inline void prepare_task(struct task_struct *next) { #ifdef CONFIG_SMP @@ -5179,6 +5347,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, { kcov_prepare_switch(prev); sched_info_switch(rq, prev, next); + update_acpu(rq, prev, next); perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -5309,6 +5478,8 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) finish_task_switch(prev); preempt_enable(); + async_fork_cpr_rest(); + if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); @@ -5654,6 +5825,7 @@ void scheduler_tick(void) thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); + update_acpu(rq, curr, curr); if (sched_feat(LATENCY_WARN)) resched_latency = cpu_resched_latency(rq); calc_global_load_tick(rq); @@ -6121,18 +6293,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* reset state */ rq->core->core_cookie = 0UL; - if (rq->core->core_forceidle_count) { + if (rq->core->core_sibidle_count) { if (!core_clock_updated) { update_rq_clock(rq->core); core_clock_updated = true; } - sched_core_account_forceidle(rq); + sched_core_account_sibidle(rq); /* reset after accounting force idle */ - rq->core->core_forceidle_start = 0; - rq->core->core_forceidle_count = 0; - rq->core->core_forceidle_occupation = 0; - need_sync = true; - fi_before = true; + rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_start_task = 0; + rq->core->core_sibidle_count = 0; + rq->core->core_sibidle_occupation = 0; + if (rq->core->core_forceidle_count) { + rq->core->core_forceidle_count = 0; + need_sync = true; + fi_before = true; + } } /* @@ -6208,6 +6384,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) rq_i->core_pick = p; if (p == rq_i->idle) { + rq->core->core_sibidle_count++; if (rq_i->nr_running) { rq->core->core_forceidle_count++; if (!fi_before) @@ -6218,9 +6395,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } } - if (schedstat_enabled() && rq->core->core_forceidle_count) { - rq->core->core_forceidle_start = rq_clock(rq->core); - rq->core->core_forceidle_occupation = occ; + if (schedstat_enabled() && rq->core->core_sibidle_count) { + rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_start_task = rq_clock_task(rq->core); + rq->core->core_sibidle_occupation = occ; } rq->core->core_pick_seq = rq->core->core_task_seq; @@ -6262,7 +6440,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (!(fi_before && rq->core->core_forceidle_count)) task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); - rq_i->core_pick->core_occupation = occ; + if (rq->core->core_forceidle_count) + rq_i->core_pick->core_occupation = occ; if (i == cpu) { rq_i->core_pick = NULL; @@ -6477,14 +6656,16 @@ static void sched_core_cpu_deactivate(unsigned int cpu) core_rq->core_cookie = rq->core_cookie; core_rq->core_forceidle_count = rq->core_forceidle_count; core_rq->core_forceidle_seq = rq->core_forceidle_seq; - core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; + core_rq->core_sibidle_occupation = rq->core_sibidle_occupation; + core_rq->core_sibidle_count = rq->core_sibidle_count; /* * Accounting edge for forced idle is handled in pick_next_task(). * Don't need another one here, since the hotplug thread shouldn't * have a cookie. */ - core_rq->core_forceidle_start = 0; + core_rq->core_sibidle_start = 0; + core_rq->core_sibidle_start_task = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -6632,8 +6813,10 @@ static void __sched notrace __schedule(unsigned int sched_mode) !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); - if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load) { + update_nr_uninterruptible(prev, 1); rq->nr_uninterruptible++; + } /* * __schedule() ttwu() @@ -6650,6 +6833,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) if (prev->in_iowait) { atomic_inc(&rq->nr_iowait); + update_nr_iowait(prev, 1); delayacct_blkio_start(); } } @@ -9828,6 +10012,11 @@ static void calc_load_migrate(struct rq *rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 1); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif } static void dump_rq_tasks(struct rq *rq, const char *loglvl) @@ -9919,11 +10108,19 @@ int in_sched_functions(unsigned long addr) } #ifdef CONFIG_CGROUP_SCHED +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); +#endif + /* * Default task group. * Every task in system belongs to this group at bootup. */ -struct task_group root_task_group; +struct task_group root_task_group = { +#ifdef CONFIG_SCHED_SLI + .lat_stat_cpu = &root_lat_stat_cpu, +#endif +}; LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ @@ -10001,6 +10198,9 @@ void __init sched_init(void) raw_spin_lock_init(&rq->__lock); rq->nr_running = 0; rq->calc_load_active = 0; +#ifdef CONFIG_SCHED_SLI + rq->calc_load_active_r = 0; +#endif rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt); @@ -10063,6 +10263,12 @@ void __init sched_init(void) rcuwait_init(&rq->hotplug_wait); #endif #endif /* CONFIG_SMP */ + +#ifdef CONFIG_SCHED_ACPU + rq->acpu_idle_sum = 0; + rq->sibidle_sum = 0; + rq->last_acpu_update_time = rq->clock; +#endif hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); @@ -10072,8 +10278,10 @@ void __init sched_init(void) rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; - rq->core_forceidle_occupation = 0; - rq->core_forceidle_start = 0; + rq->core_sibidle_count = 0; + rq->core_sibidle_occupation = 0; + rq->core_sibidle_start = 0; + rq->core_sibidle_start_task = 0; rq->core_cookie = 0UL; #endif @@ -10386,6 +10594,12 @@ static void sched_free_group(struct task_group *tg) free_fair_sched_group(tg); free_rt_sched_group(tg); autogroup_free(tg); + +#ifdef CONFIG_SCHED_SLI + if (tg->lat_stat_cpu) + free_percpu(tg->lat_stat_cpu); +#endif + kmem_cache_free(task_group_cache, tg); } @@ -10420,8 +10634,17 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; +#ifdef CONFIG_SCHED_SLI + tg->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!tg->lat_stat_cpu) + goto err; +#endif + alloc_uclamp_sched_group(tg, parent); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + tg->ht_ratio = 100; +#endif return tg; err: @@ -10545,8 +10768,20 @@ void sched_move_task(struct task_struct *tsk) if (running) put_prev_task(rq, tsk); + /* decrease old group */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, -1); + sched_change_group(tsk, group); + /* increase new group after change */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, 1); + if (queued) enqueue_task(rq, tsk, queue_flags); if (running) { @@ -10847,15 +11082,16 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ /* More than 203 days if BW_SHIFT equals 20. */ -static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; +const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, - u64 burst) + u64 burst, u64 init_buffer) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + u64 buffer, burst_onset; if (tg == &root_task_group) return -EINVAL; @@ -10882,10 +11118,16 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, if (quota != RUNTIME_INF && quota > max_cfs_runtime) return -EINVAL; - if (quota != RUNTIME_INF && (burst > quota || - burst + quota > max_cfs_runtime)) + /* + * Bound burst to defend burst against overflow during bandwidth shift. + */ + if (burst > max_cfs_runtime || init_buffer > max_cfs_runtime) return -EINVAL; + if (quota == RUNTIME_INF) + buffer = RUNTIME_INF; + else + buffer = min(max_cfs_runtime, quota + burst); /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -10910,15 +11152,29 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; cfs_b->burst = burst; + cfs_b->buffer = buffer; + cfs_b->init_buffer = init_buffer; - __refill_cfs_bandwidth_runtime(cfs_b); + cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); + cfs_b->runtime = cfs_b->quota; - /* - * Restart the period timer (if active) to handle new - * period expiry: - */ + /* burst_onset needed */ + if (cfs_b->quota != RUNTIME_INF && sysctl_sched_cfs_bw_burst_onset_percent > 0) { + + burst_onset = div_u64(burst, 100) * + sysctl_sched_cfs_bw_burst_onset_percent; + + cfs_b->runtime += burst_onset; + cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); + } + + cfs_b->runtime = max(cfs_b->runtime, init_buffer); + cfs_b->current_buffer = max(cfs_b->buffer, init_buffer); + cfs_b->runtime_snap = cfs_b->runtime; + + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 1); } for_each_online_cpu(i) { @@ -10941,10 +11197,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; period = ktime_to_ns(tg->cfs_bandwidth.period); burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -10952,10 +11209,10 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_quota(struct task_group *tg) +long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; @@ -10970,7 +11227,7 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; @@ -10978,11 +11235,12 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_period(struct task_group *tg) +long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; @@ -10994,28 +11252,66 @@ static long tg_get_cfs_period(struct task_group *tg) static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; - if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + if (cfs_burst_us < 0) + burst = RUNTIME_INF; + else if ((u64)cfs_burst_us <= U64_MAX / NSEC_PER_USEC) + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + else return -EINVAL; burst = (u64)cfs_burst_us * NSEC_PER_USEC; period = ktime_to_ns(tg->cfs_bandwidth.period); quota = tg->cfs_bandwidth.quota; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_burst(struct task_group *tg) { u64 burst_us; + if (tg->cfs_bandwidth.burst == RUNTIME_INF) + return -1; + burst_us = tg->cfs_bandwidth.burst; do_div(burst_us, NSEC_PER_USEC); return burst_us; } +static int tg_set_cfs_init_buffer(struct task_group *tg, long cfs_init_buffer_us) +{ + u64 quota, period, burst, init_buffer; + + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; + if (cfs_init_buffer_us < 0) + init_buffer = RUNTIME_INF; + else if ((u64)cfs_init_buffer_us <= U64_MAX / NSEC_PER_USEC) + init_buffer = (u64)cfs_init_buffer_us * NSEC_PER_USEC; + else + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); +} + +static long tg_get_cfs_init_buffer(struct task_group *tg) +{ + u64 init_buffer_us; + + if (tg->cfs_bandwidth.init_buffer == RUNTIME_INF) + return -1; + + init_buffer_us = tg->cfs_bandwidth.init_buffer; + do_div(init_buffer_us, NSEC_PER_USEC); + + return init_buffer_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -11040,18 +11336,30 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } -static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, +static s64 cpu_cfs_burst_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { return tg_get_cfs_burst(css_tg(css)); } -static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, - struct cftype *cftype, u64 cfs_burst_us) +static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_burst_us) { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } +static s64 cpu_cfs_init_buffer_read_s64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_init_buffer(css_tg(css)); +} + +static int cpu_cfs_init_buffer_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_init_buffer_us) +{ + return tg_set_cfs_init_buffer(css_tg(css), cfs_init_buffer_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11162,6 +11470,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "current_bw %llu\n", cfs_b->runtime); seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); @@ -11231,6 +11540,38 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +static int cpu_ht_ratio_write(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 ht_ratio) +{ + struct task_group *tg = css_tg(css); + int cpu; + + if (ht_ratio < 100 || ht_ratio > 200) + return -1; + + if (tg == &root_task_group) + return -1; + + tg->ht_ratio = ht_ratio; + for_each_online_cpu(cpu) { + struct sched_entity *se = tg->se[cpu]; + + se->ht_ratio = ht_ratio; + } + + return 0; +} + +static u64 cpu_ht_ratio_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->ht_ratio; +} +#endif + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11257,8 +11598,13 @@ static struct cftype cpu_legacy_files[] = { }, { .name = "cfs_burst_us", - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, + }, + { + .name = "cfs_init_buffer_us", + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, }, { .name = "stat", @@ -11294,6 +11640,13 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* Terminate */ }; @@ -11305,20 +11658,24 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec, burst_usec; + u64 throttled_usec, current_bw_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + current_bw_usec = cfs_b->runtime; + do_div(current_bw_usec, NSEC_PER_USEC); burst_usec = cfs_b->burst_time; do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" "throttled_usec %llu\n" + "current_bw_usec %llu\n" "nr_bursts %d\n" "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec, cfs_b->nr_burst, burst_usec); + throttled_usec, current_bw_usec, cfs_b->nr_burst, + burst_usec); } #endif return 0; @@ -11450,6 +11807,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct task_group *tg = css_tg(of_css(of)); + u64 init_buffer = tg_get_cfs_init_buffer(tg); u64 period = tg_get_cfs_period(tg); u64 burst = tg->cfs_bandwidth.burst; u64 quota; @@ -11457,11 +11815,304 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota, burst); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); return ret ?: nbytes; } #endif +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpu_no_sched_lat); +static int cpu_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpu_no_sched_lat)); + return 0; +} + +static int cpu_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpu_sched_lat_enabled_show, NULL); +} + +static ssize_t cpu_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpu_no_sched_lat); + break; + case '1': + static_branch_disable(&cpu_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpu_sched_lat_enabled_fops = { + .proc_open = cpu_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpu_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpu_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpu_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpu_sched_lat_enabled); + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + if (msecs < 1) + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; +} + +struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +void task_cpu_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct task_group *tg; + unsigned int msecs; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = css_tg(task_css(tsk, cpu_cgrp_id)); + if (!tg) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + +void cpu_update_latency(struct sched_entity *se, u64 delta) +{ + int idx; + enum sched_lat_stat_item s; + unsigned int msecs; + struct task_group *tg; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = se->cfs_rq->tg; + if (!tg) { + rcu_read_unlock(); + return; + } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); + rcu_read_unlock(); +} + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct task_group *tg = (struct task_group *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(tg->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency, + smp_write_sched_wait_cgroup_latency +}; + +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cgroup *cgrp = css->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + if (val != 0) + return -EINVAL; + + func((void *)tg); + smp_call_function(func, (void *)tg, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct task_group *tg, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(tg->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = cgroup_tg(seq_css(sf)->cgroup); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_50_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_NR)); + + return 0; +} + +static int cpu_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = css_tg(seq_css(sf)); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype cpu_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11493,8 +12144,14 @@ static struct cftype cpu_files[] = { { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, + }, + { + .name = "max.init_buffer", + .flags = CFTYPE_NOT_ON_ROOT, + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP @@ -11510,6 +12167,43 @@ static struct cftype cpu_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, +#endif +#ifdef CONFIG_SCHED_SLI + { + .name = "sched_cfs_statistics", + .seq_show = cpu_sched_cfs_show, + }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index a57fd8f27498f60eefc9b617a57712e1eaae9c4c..924859051b5f14f3c8645c48e49167edac7c1f13 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -237,38 +237,46 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, #ifdef CONFIG_SCHEDSTATS /* REQUIRES: rq->core's clock recently updated. */ -void __sched_core_account_forceidle(struct rq *rq) +void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); + u64 delta_task, now_task = rq_clock_task(rq->core); struct rq *rq_i; struct task_struct *p; int i; lockdep_assert_rq_held(rq); - WARN_ON_ONCE(!rq->core->core_forceidle_count); + WARN_ON_ONCE(!rq->core->core_sibidle_count); - if (rq->core->core_forceidle_start == 0) - return; + /* can't be forced idle without a running task */ + WARN_ON_ONCE(!rq->core->core_sibidle_occupation && + rq->core->core_forceidle_count); + + if (rq->core->core_sibidle_start == 0 || + rq->core->core_sibidle_occupation == 0) + goto out; - delta = now - rq->core->core_forceidle_start; + delta = now - rq->core->core_sibidle_start; + delta_task = now_task - rq->core->core_sibidle_start_task; if (unlikely((s64)delta <= 0)) - return; + goto out; - rq->core->core_forceidle_start = now; + rq->core->core_sibidle_start = now; + rq->core->core_sibidle_start_task = now_task; - if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) { - /* can't be forced idle without a running task */ - } else if (rq->core->core_forceidle_count > 1 || - rq->core->core_forceidle_occupation > 1) { + if (rq->core->core_sibidle_count > 1 || + rq->core->core_sibidle_occupation > 1) { /* * For larger SMT configurations, we need to scale the charged * forced idle amount since there can be more than one forced * idle sibling and more than one running cookied task. */ - delta *= rq->core->core_forceidle_count; - delta = div_u64(delta, rq->core->core_forceidle_occupation); + delta *= rq->core->core_sibidle_count; + delta = div_u64(delta, rq->core->core_sibidle_occupation); + delta_task *= rq->core->core_sibidle_count; + delta_task = div_u64(delta_task, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -279,22 +287,32 @@ void __sched_core_account_forceidle(struct rq *rq) continue; /* - * Note: this will account forceidle to the current cpu, even + * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_forceidle_time(p, delta); + __account_sibidle_time(p, delta, delta_task, + !!rq->core->core_forceidle_count); + account_ht_aware_quota(p, delta_task); + } + +out:; +#ifdef CONFIG_SCHED_ACPU + for_each_cpu(i, smt_mask) { + rq_i = cpu_rq(i); + rq->last_acpu_update_time = now; } +#endif } void __sched_core_tick(struct rq *rq) { - if (!rq->core->core_forceidle_count) + if (!rq->core->core_sibidle_count) return; if (rq != rq->core) update_rq_clock(rq->core); - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0de9dda099496fddb804f6ebb5a7fa0f275a72e5..3322061596a2accd2db95affb91f48d578c51663 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -7,6 +7,8 @@ * (balbir@in.ibm.com). */ +#include + /* Time spent by the tasks of the CPU accounting group executing in ... */ enum cpuacct_stat_index { CPUACCT_STAT_USER, /* ... user mode */ @@ -20,14 +22,49 @@ static const char * const cpuacct_stat_desc[] = { [CPUACCT_STAT_SYSTEM] = "system", }; +struct cpuacct_prev_cputime { + struct prev_cputime prev_cputime1; /* utime and stime */ + struct prev_cputime prev_cputime2; /* user and nice */ +} ____cacheline_aligned; + +#ifdef CONFIG_SCHED_SLI +/* Maintain various statistics */ +struct cpuacct_alistats { + u64 nr_migrations; +} ____cacheline_aligned; +#endif + + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every CPU */ u64 __percpu *cpuusage; + struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; +#ifdef CONFIG_SCHED_SLI + struct cpuacct_alistats __percpu *alistats; + struct list_head sli_list; + bool sli_enabled; + u64 next_load_update; +#endif + unsigned long avenrun[3]; +#ifdef CONFIG_SCHED_SLI + unsigned long avenrun_r[3]; +#endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpuacct_cgrp_id), + struct cpuacct, css); +} + static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) { return css ? container_of(css, struct cpuacct, css) : NULL; @@ -45,16 +82,92 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) } static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); +static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +#endif + static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, + .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, +#ifdef CONFIG_SCHED_SLI + .alistats = &root_alistats, +#endif }; +#ifdef CONFIG_SCHED_SLI + +void task_ca_increase_nr_migrations(struct task_struct *tsk) +{ + struct cpuacct *ca; + + rcu_read_lock(); + ca = task_ca(tsk); + if (ca) + this_cpu_ptr(ca->alistats)->nr_migrations++; + rcu_read_unlock(); +} + +#endif + +#ifdef CONFIG_SCHED_SLI +static DEFINE_SPINLOCK(sli_ca_lock); +LIST_HEAD(sli_ca_list); + +static void ca_enable_sli(struct cpuacct *ca, bool val) +{ + spin_lock(&sli_ca_lock); + if (val && !READ_ONCE(ca->sli_enabled)) + list_add_tail_rcu(&ca->sli_list, &sli_ca_list); + else if (!val && READ_ONCE(ca->sli_enabled)) + list_del_rcu(&ca->sli_list); + WRITE_ONCE(ca->sli_enabled, val); + spin_unlock(&sli_ca_lock); +} + +void create_rich_container_reaper(struct task_struct *tsk) +{ + struct cpuacct *ca; + struct cpuacct *parent_ca; + struct cgroup_subsys_state *css; + + if (thread_group_leader(tsk)) { + rcu_read_lock(); + css = task_css(tsk, cpuacct_cgrp_id); + ca = css_ca(css); + if (!ca || !in_rich_container(tsk)) { + rcu_read_unlock(); + return; + } + + ca_enable_sli(ca, true); + parent_ca = css_ca(css->parent); + if (parent_ca && parent_ca != &root_cpuacct) + ca_enable_sli(parent_ca, true); + rcu_read_unlock(); + } +} + +static int enable_sli_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + ca_enable_sli(css_ca(css), !!val); + return 0; +} + +static u64 enable_sli_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return READ_ONCE(css_ca(css)->sli_enabled); +} +#endif + /* Create a new CPU accounting group */ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuacct *ca; + int i; if (!parent_css) return &root_cpuacct.css; @@ -71,8 +184,37 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->cpustat) goto out_free_cpuusage; + ca->prev_cputime = alloc_percpu(struct cpuacct_prev_cputime); + if (!ca->prev_cputime) + goto out_free_cpustat; + +#ifdef CONFIG_SCHED_SLI + INIT_LIST_HEAD(&ca->sli_list); + + ca->alistats = alloc_percpu(struct cpuacct_alistats); + if (!ca->alistats) + goto out_free_pre_cputime; +#endif + + for_each_possible_cpu(i) { + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); + } + + ca->avenrun[0] = ca->avenrun[1] = ca->avenrun[2] = 0; +#ifdef CONFIG_SCHED_SLI + ca->avenrun_r[0] = ca->avenrun_r[1] = ca->avenrun_r[2] = 0; +#endif return &ca->css; +#ifdef CONFIG_SCHED_SLI +out_free_pre_cputime: + free_percpu(ca->prev_cputime); +#endif +out_free_cpustat: + free_percpu(ca->cpustat); out_free_cpuusage: free_percpu(ca->cpuusage); out_free_ca: @@ -81,13 +223,24 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return ERR_PTR(-ENOMEM); } +#ifdef CONFIG_SCHED_SLI +static void cpuacct_css_offline(struct cgroup_subsys_state *css) +{ + ca_enable_sli(css_ca(css), false); +} +#endif + /* Destroy an existing CPU accounting group */ static void cpuacct_css_free(struct cgroup_subsys_state *css) { struct cpuacct *ca = css_ca(css); + free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); +#ifdef CONFIG_SCHED_SLI + free_percpu(ca->alistats); +#endif kfree(ca); } @@ -289,6 +442,495 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } +#ifdef CONFIG_SCHED_SLI +#ifndef arch_idle_time +#define arch_idle_time(cpu) 0 +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu); + +static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + unsigned long *avenrun; + + if (running) + avenrun = ca->avenrun_r; + else + avenrun = ca->avenrun; + + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} + +static inline unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_possible_cpu(i) + sum += cpu_rq(i)->nr_uninterruptible; + + /* + * Since we read the counters lockless, it might be slightly + * inaccurate. Do not allow it to go below zero though: + */ + if (unlikely((long)sum < 0)) + sum = 0; + + return sum; +} + +#ifdef CONFIG_CFS_BANDWIDTH +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return tg->cfs_rq[cpu]->throttle_count; +} +#else +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return false; +} +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +static inline bool tg_rt_throttled(struct task_group *tg, int cpu) +{ + return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted; +} +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu) +{ + unsigned long nr_running = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return 0; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out; + + if (!tg_cfs_throttled(tg, cpu)) + nr_running += tg->cfs_rq[cpu]->h_nr_running; +#ifdef CONFIG_RT_GROUP_SCHED + if (!tg_rt_throttled(tg, cpu)) + nr_running += tg->rt_rq[cpu]->rt_nr_running; +#endif + /* SCHED_DEADLINE doesn't support cgroup yet */ + +out: + rcu_read_unlock(); + return nr_running; +} + +static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) +{ + unsigned long nr = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return nr; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out_rcu_unlock; + + nr = tg->cfs_rq[cpu]->nr_uninterruptible; +#ifdef CONFIG_RT_GROUP_SCHED + nr += tg->rt_rq[cpu]->nr_uninterruptible; +#endif + +out_rcu_unlock: + rcu_read_unlock(); + return nr; +} + +void cgroup_idle_start(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + __schedstat_set(se->cg_idle_start, clock); + write_sequnlock(&se->idle_seqlock); + + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) + __schedstat_set(se->cg_iowait_start, clock); + spin_unlock(&se->iowait_lock); + + local_irq_restore(flags); +} + +void cgroup_idle_end(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + u64 idle_start, iowait_start; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + idle_start = schedstat_val(se->cg_idle_start); + __schedstat_add(se->cg_idle_sum, clock - idle_start); + __schedstat_set(se->cg_idle_start, 0); + write_sequnlock(&se->idle_seqlock); + + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) { + iowait_start = schedstat_val(se->cg_iowait_start); + __schedstat_add(se->cg_iowait_sum, clock - iowait_start); + __schedstat_set(se->cg_iowait_start, 0); + } + spin_unlock(&se->iowait_lock); + + local_irq_restore(flags); +} + +void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, + struct cpumask *added) +{ + struct task_group *tg; + struct sched_entity *se; + int cpu; + + if (!schedstat_enabled()) + return; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + + if (!tg) { + rcu_read_unlock(); + return; + } + + if (added) { + /* Mark newly added cpus as newly-idle */ + for_each_cpu(cpu, added) { + se = tg->se[cpu]; + cgroup_idle_start(se); + __schedstat_add(se->cg_ineffective_sum, + __rq_clock_broken(cpu_rq(cpu)) - + se->cg_ineffective_start); + __schedstat_set(se->cg_ineffective_start, 0); + } + } + + if (deleted) { + /* Mark ineffective_cpus as idle-invalid */ + for_each_cpu(cpu, deleted) { + se = tg->se[cpu]; + cgroup_idle_end(se); + /* Use __rq_clock_broken to avoid warning */ + __schedstat_set(se->cg_ineffective_start, + __rq_clock_broken(cpu_rq(cpu))); + } + } + + rcu_read_unlock(); +} + +static void cpuacct_calc_load(struct cpuacct *acct) +{ + long active = 0, active_r = 0, nr_r; + int cpu; + + if (acct != &root_cpuacct) { + for_each_possible_cpu(cpu) { + nr_r = ca_running(acct, cpu); + active += nr_r; + active_r += nr_r; + active += ca_uninterruptible(acct, cpu); + } + active = active > 0 ? active * FIXED_1 : 0; + acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); + acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); + acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + + active_r = active_r > 0 ? active_r * FIXED_1 : 0; + acct->avenrun_r[0] = calc_load(acct->avenrun_r[0], + EXP_1, active_r); + acct->avenrun_r[1] = calc_load(acct->avenrun_r[1], + EXP_5, active_r); + acct->avenrun_r[2] = calc_load(acct->avenrun_r[2], + EXP_15, active_r); + } else { + acct->avenrun[0] = avenrun[0]; + acct->avenrun[1] = avenrun[1]; + acct->avenrun[2] = avenrun[2]; + + acct->avenrun_r[0] = avenrun_r[0]; + acct->avenrun_r[1] = avenrun_r[1]; + acct->avenrun_r[2] = avenrun_r[2]; + } +} + +/* + * We walk cpuacct whose SLI is enabled to perform per-cgroup load calculation + * the overhead is acceptable if SLI is not enabled for most of the cgroups. + */ +void calc_cgroup_load(void) +{ + struct cpuacct *ca; + + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) + cpuacct_calc_load(ca); + rcu_read_unlock(); +} + +static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, + struct task_group *tg, struct cpuacct_usage_result *res) +{ + struct kernel_cpustat *kcpustat; + u64 *cpuusage; + struct cpuacct_prev_cputime *prev_cputime; + struct task_cputime cputime; + u64 tick_user, tick_nice, tick_sys, left, right; + struct sched_entity *se; + + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + if (unlikely(!tg)) { + memset(res, 0, sizeof(*res)); + return; + } + + cpuusage = per_cpu_ptr(ca->cpuusage, cpu); + + se = tg->se[cpu]; + prev_cputime = per_cpu_ptr(ca->prev_cputime, cpu); + tick_user = kcpustat->cpustat[CPUTIME_USER]; + tick_nice = kcpustat->cpustat[CPUTIME_NICE]; + tick_sys = kcpustat->cpustat[CPUTIME_SYSTEM]; + + /* Calculate system run time */ + cputime.sum_exec_runtime = *cpuusage; + cputime.utime = tick_user + tick_nice; + cputime.stime = tick_sys; + cputime_adjust(&cputime, &prev_cputime->prev_cputime1, &left, &right); + res->system = right; + + /* Calculate user and nice run time */ + cputime.sum_exec_runtime = left; /* user + nice */ + cputime.utime = tick_user; + cputime.stime = tick_nice; + cputime_adjust(&cputime, &prev_cputime->prev_cputime2, &left, &right); + res->user = left; + res->nice = right; + + res->irq = kcpustat->cpustat[CPUTIME_IRQ]; + res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; + + if (se && schedstat_enabled()) { + unsigned int seq; + unsigned long flags; + u64 idle_start, ineff, ineff_start, elapse, complement; + u64 clock, iowait_start; + + do { + seq = read_seqbegin(&se->idle_seqlock); + res->idle = schedstat_val(se->cg_idle_sum); + idle_start = schedstat_val(se->cg_idle_start); + clock = cpu_clock(cpu); + if (idle_start && clock > idle_start) + res->idle += clock - idle_start; + } while (read_seqretry(&se->idle_seqlock, seq)); + + ineff = schedstat_val(se->cg_ineffective_sum); + ineff_start = schedstat_val(se->cg_ineffective_start); + if (ineff_start) + __schedstat_add(ineff, clock - ineff_start); + + spin_lock_irqsave(&se->iowait_lock, flags); + res->iowait = schedstat_val(se->cg_iowait_sum); + iowait_start = schedstat_val(se->cg_iowait_start); + if (iowait_start) + __schedstat_add(res->iowait, clock - iowait_start); + spin_unlock_irqrestore(&se->iowait_lock, flags); + + res->steal = 0; + + elapse = clock - schedstat_val(se->cg_init_time); + complement = res->idle + se->sum_exec_raw + ineff; + if (elapse > complement) + res->steal = elapse - complement; + + res->idle -= res->iowait; + } else { + res->idle = res->iowait = res->steal = 0; + } + + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; + res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; +} + +static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + struct cgroup *cgrp = seq_css(sf)->cgroup; + u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + u64 nr_migrations = 0; + struct cpuacct_alistats *alistats; + unsigned long load, avnrun[3], avnrun_r[3]; + unsigned long nr_run = 0, nr_uninter = 0; + int cpu; + + user = nice = system = idle = iowait = + irq = softirq = steal = guest = 0; + + if (ca != &root_cpuacct) { + struct cpuacct_usage_result res; + + for_each_possible_cpu(cpu) { + if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN)) + continue; + + rcu_read_lock(); + __cpuacct_get_usage_result(ca, cpu, + cgroup_tg(cgrp), &res); + rcu_read_unlock(); + + user += res.user; + nice += res.nice; + system += res.system; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest += res.guest_nice; + iowait += res.iowait; + idle += res.idle; + + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; + nr_run += ca_running(ca, cpu); + nr_uninter += ca_uninterruptible(ca, cpu); + } + + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0, false); + __get_cgroup_avenrun(ca, avnrun_r, FIXED_1/200, 0, true); + } else { + struct kernel_cpustat *kcpustat; + + for_each_possible_cpu(cpu) { + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + user += kcpustat->cpustat[CPUTIME_USER]; + nice += kcpustat->cpustat[CPUTIME_NICE]; + system += kcpustat->cpustat[CPUTIME_SYSTEM]; + irq += kcpustat->cpustat[CPUTIME_IRQ]; + softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; + guest += kcpustat->cpustat[CPUTIME_GUEST]; + guest += kcpustat->cpustat[CPUTIME_GUEST_NICE]; + idle += get_idle_time(kcpustat, cpu); + iowait += get_iowait_time(kcpustat, cpu); + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; + } + + nr_run = nr_running(); + nr_uninter = nr_uninterruptible(); + + get_avenrun(avnrun, FIXED_1/200, 0); + get_avenrun_r(avnrun_r, FIXED_1/200, 0); + } + + seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); + seq_printf(sf, "nice %lld\n", nsec_to_clock_t(nice)); + seq_printf(sf, "system %lld\n", nsec_to_clock_t(system)); + seq_printf(sf, "idle %lld\n", nsec_to_clock_t(idle)); + seq_printf(sf, "iowait %lld\n", nsec_to_clock_t(iowait)); + seq_printf(sf, "irq %lld\n", nsec_to_clock_t(irq)); + seq_printf(sf, "softirq %lld\n", nsec_to_clock_t(softirq)); + seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); + seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + + load = LOAD_INT(avnrun[0]) * 100 + LOAD_FRAC(avnrun[0]); + seq_printf(sf, "load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[1]) * 100 + LOAD_FRAC(avnrun[1]); + seq_printf(sf, "load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[2]) * 100 + LOAD_FRAC(avnrun[2]); + seq_printf(sf, "load average(15min) %lld\n", (u64)load); + + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); + if ((long) nr_uninter < 0) + nr_uninter = 0; + seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); + + load = LOAD_INT(avnrun_r[0]) * 100 + LOAD_FRAC(avnrun_r[0]); + seq_printf(sf, "running load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[1]) * 100 + LOAD_FRAC(avnrun_r[1]); + seq_printf(sf, "running load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[2]) * 100 + LOAD_FRAC(avnrun_r[2]); + seq_printf(sf, "running load average(15min) %lld\n", (u64)load); + + return 0; +} + +static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + goto rcu_unlock_show; + } + + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } +rcu_unlock_show: + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype files[] = { { .name = "usage", @@ -323,6 +965,45 @@ static struct cftype files[] = { .name = "stat", .seq_show = cpuacct_stats_show, }, +#ifdef CONFIG_SCHED_SLI + { + .name = "proc_stat", + .seq_show = cpuacct_proc_stats_show, + }, + { + .name = "enable_sli", + .read_u64 = enable_sli_read, + .write_u64 = enable_sli_write + }, + { + .name = "sched_cfs_statistics", + .seq_show = cpuacct_sched_cfs_show, + }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, +#endif { } /* terminate */ }; @@ -355,9 +1036,364 @@ void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) __this_cpu_add(ca->cpustat->cpustat[index], val); } +static void cpuacct_cgroup_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(task, css, tset) + if (task->pid && is_child_reaper(task_pid(task))) + create_rich_container_reaper(task); +} + struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, +#ifdef CONFIG_SCHED_SLI + .css_offline = cpuacct_css_offline, +#endif + .attach = cpuacct_cgroup_attach, .legacy_cftypes = files, .early_init = true, }; + +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_FALSE(async_load_calc); + +bool async_load_calc_enabled(void) +{ + return static_branch_likely(&async_load_calc); +} + +static int async_load_calc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", async_load_calc_enabled()); + return 0; +} + +static int async_load_calc_open(struct inode *inode, struct file *file) +{ + return single_open(file, async_load_calc_show, NULL); +} + +static void async_calc_cgroup_load(void) +{ + int cnt; + struct cpuacct *ca; + +again: + cnt = 1; + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) { + unsigned long next_update = ca->next_load_update; + + /* + * Need per ca check since after break the list + * could have been changed, otherwise the loop + * will be endless. + */ + if (time_before(jiffies, next_update + 10)) + continue; + + cpuacct_calc_load(ca); + ca->next_load_update = jiffies + LOAD_FREQ; + + /* Take a break for every 100 ca */ + if (cnt++ >= 100) { + rcu_read_unlock(); + cond_resched(); + goto again; + } + } + rcu_read_unlock(); +} + +int load_calc_func(void *unsed) +{ + unsigned long next_update = jiffies + LOAD_FREQ; + + while (!kthread_should_stop()) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/5); + set_current_state(TASK_RUNNING); + + if (time_before(jiffies, next_update + 10)) + continue; + + async_calc_cgroup_load(); + next_update += LOAD_FREQ; + } + + return 0; +} + +static struct task_struct *load_calc_p; + +static int mod_async_load_calc(bool enable) +{ + if (enable == async_load_calc_enabled()) + return 0; + + if (enable) { + load_calc_p = kthread_create(load_calc_func, NULL, "load_calc"); + if (!load_calc_p) + return -ENOMEM; + + wake_up_process(load_calc_p); + static_branch_enable(&async_load_calc); + } else { + kthread_stop(load_calc_p); + load_calc_p = NULL; + + static_branch_disable(&async_load_calc); + } + + return 0; +} + +static DEFINE_MUTEX(load_calc_mutex); + +static ssize_t async_load_calc_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = 0; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + mutex_lock(&load_calc_mutex); + + switch (val) { + case '0': + ret = mod_async_load_calc(false); + break; + case '1': + ret = mod_async_load_calc(true); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&load_calc_mutex); +out: + return ret ? ret : count; +} + +static const struct proc_ops async_load_calc_opt = { + .proc_open = async_load_calc_open, + .proc_read = seq_read, + .proc_write = async_load_calc_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init async_load_calc_init(void) +{ + if (!proc_create("async_load_calc", 0600, NULL, + &async_load_calc_opt)) { + pr_err("Failed to register async_load_calc interface\n"); + return 0; + } + + if (mod_async_load_calc(true)) + pr_err("Failed to enable async_load_calc\n"); + + return 0; +} +late_initcall_sync(async_load_calc_init); +#endif + +#ifdef CONFIG_RICH_CONTAINER + +/* 0 - cpu quota; 1 - cpuset.cpus; 2 - cpu.shares */ +int sysctl_rich_container_cpuinfo_source; +/* when cpu.shares */ +unsigned int sysctl_rich_container_cpuinfo_sharesbase = 1024; + +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + +static inline struct task_group *task_tg(struct task_struct *tsk) +{ + return css_tg(task_css(tsk, cpu_cgrp_id)); +} + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ + struct task_group *tg; + int i, cpus; + + /* cfs quota source */ + if (sysctl_rich_container_cpuinfo_source == 0) { + long quota, period; + + rcu_read_lock(); + tg = task_tg(tsk); + quota = tg_get_cfs_quota(tg); + period = tg_get_cfs_period(tg); + rcu_read_unlock(); + + if (quota == -1) { + /* Fallback to use cpuset.cpus if quota not set */ + goto cpuset_source; + } else { + /* period can't be 0 */ + cpus = (quota + period - 1) / period; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + } + + return; + } + + /* cpu.shares source */ + if (sysctl_rich_container_cpuinfo_source == 2) { + unsigned long shares; + + rcu_read_lock(); + tg = task_tg(tsk); + shares = scale_load_down(tg->shares); + rcu_read_unlock(); + + /* sysctl_rich_container_cpuinfo_sharesbase can't be 0 */ + cpus = (shares + sysctl_rich_container_cpuinfo_sharesbase - 1) / + sysctl_rich_container_cpuinfo_sharesbase; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + + return; + } + +cpuset_source: + /* cpuset.cpus source */ + cpuset_cpus_allowed(tsk, pmask); +} + +bool child_cpuacct(struct task_struct *tsk) +{ + struct cpuacct *ca = task_ca(tsk); + + if (ca && ca != &root_cpuacct) + return true; + + return false; +} + + +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; + bool in_rich; + int i, id = 0; + + rcu_read_lock(); + in_rich = in_rich_container(current); + rcu_read_unlock(); + if (!in_rich) + return false; + + *rich_container = true; + + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + rich_container_get_cpus(scenario, &cpuset_allowed); + put_task_struct(scenario); + + *total = cpumask_weight(&cpuset_allowed); + if (cpumask_test_cpu(cpu, &cpuset_allowed)) { + for_each_cpu(i, &cpuset_allowed) { + if (i == cpu) + break; + id++; + } + *index = id; + return false; + } + + /* Hide this cpu in the container */ + return true; +} + +void rich_container_source(enum rich_container_source *from) +{ + if (sysctl_rich_container_source == 1) + *from = RICH_CONTAINER_REAPER; + else + *from = RICH_CONTAINER_CURRENT; +} + +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca_src; + struct task_group *tg; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + tg = cgroup_tg(ca_src->css.cgroup); + __cpuacct_get_usage_result(ca_src, cpu, tg, res); + rcu_read_unlock(); +} + +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + struct cpuacct *ca_src; + unsigned long nr; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + nr = ca_running(ca_src, cpu); + rcu_read_unlock(); + + return nr; +} + +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + struct cpuacct *ca_src; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + __get_cgroup_avenrun(ca_src, loads, offset, shift, running); + rcu_read_unlock(); +} + +#endif diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index 0adeda93b5fb56e3086a3a059338ab2cc8fc58ba..aad1c1c3e19afc99ec00f1f4214bc8f0dfbb1ef7 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -13,6 +13,11 @@ struct cpudl { int size; cpumask_var_t free_cpus; struct cpudl_item *elements; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_SMP diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index b453f8a6a7c76473ae8bdfb1b4ee668b3d110244..e3e3bf3c39d4965a028620c980f51ffbb292c35f 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -24,14 +24,28 @@ DEFINE_PER_CPU(struct irqtime, cpu_irqtime); static int sched_clock_irqtime; +static int no_sched_clock_irqtime; + +static int __init irqtime_account_setup(char *str) +{ + if (!strcmp(str, "off")) { + no_sched_clock_irqtime = 1; + pr_info("The irqtime account is currently disabled!"); + } + return 1; +} +__setup("irqtime_account=", irqtime_account_setup); + void enable_sched_clock_irqtime(void) { - sched_clock_irqtime = 1; + if (!no_sched_clock_irqtime) + sched_clock_irqtime = 1; } void disable_sched_clock_irqtime(void) { - sched_clock_irqtime = 0; + if (!no_sched_clock_irqtime) + sched_clock_irqtime = 0; } static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, @@ -142,8 +156,6 @@ void account_user_time(struct task_struct *p, u64 cputime) */ void account_guest_time(struct task_struct *p, u64 cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; - /* Add guest time to process. */ p->utime += cputime; account_group_user_time(p, cputime); @@ -152,10 +164,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { task_group_account_field(p, CPUTIME_NICE, cputime); - cpustat[CPUTIME_GUEST_NICE] += cputime; + task_group_account_field(p, CPUTIME_GUEST_NICE, cputime); } else { task_group_account_field(p, CPUTIME_USER, cputime); - cpustat[CPUTIME_GUEST] += cputime; + task_group_account_field(p, CPUTIME_GUEST, cputime); } } @@ -231,17 +243,32 @@ void account_idle_time(u64 cputime) } -#ifdef CONFIG_SCHED_CORE +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) /* - * Account for forceidle time due to core scheduling. + * Account for sibidle, and for forceidle time due to core scheduling. * * REQUIRES: schedstat is enabled. */ -void __account_forceidle_time(struct task_struct *p, u64 delta) +void __account_sibidle_time(struct task_struct *p, u64 delta, u64 delta_task, bool fi) { - __schedstat_add(p->stats.core_forceidle_sum, delta); - - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + unsigned int cpu = task_cpu(p); + + __schedstat_add(p->stats.core_sibidle_sum, delta); + __schedstat_add(p->stats.core_sibidle_task_sum, delta_task); + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE_TASK] += delta_task; + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE_TASK, delta_task); +#ifdef CONFIG_SCHED_CORE + if (fi) { + __schedstat_add(p->stats.core_forceidle_sum, delta); + __schedstat_add(p->stats.core_forceidle_task_sum, delta_task); + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE_TASK] += delta_task; + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE_TASK, delta_task); + } +#endif } #endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 4c3d0d9f3db6326703f92aab707771c39921a543..075750fbf7b1831a5950a9c8692952f9a2185995 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -519,6 +519,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); } @@ -1022,6 +1023,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); PN_SCHEDSTAT(iowait_sum); P_SCHEDSTAT(iowait_count); @@ -1059,6 +1061,11 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); + PN_SCHEDSTAT(core_forceidle_task_sum); +#endif +#ifdef CONFIG_SCHED_ACPU + PN_SCHEDSTAT(core_sibidle_sum); + PN_SCHEDSTAT(core_sibidle_task_sum); #endif } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b2cfdb8d788d96bbbb0ddc6683edacfac317ac7..98abc91a9a56ea13cdfd91728dadcb1dd011c53c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -161,6 +161,15 @@ static struct ctl_table sched_fair_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "sched_cfs_bw_burst_onset_percent", + .data = &sysctl_sched_cfs_bw_burst_onset_percent, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, #endif #ifdef CONFIG_NUMA_BALANCING { @@ -1150,6 +1159,18 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) } #endif /* CONFIG_SMP */ +static inline void +update_exec_raw(struct cfs_rq *cfs_rq, struct sched_entity *curr) +{ + u64 now = rq_clock(rq_of(cfs_rq)); + + curr->sum_exec_raw += now - curr->exec_start_raw; + curr->exec_start_raw = now; +} + +/* + * Update the current task's runtime statistics. + */ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) { u64 now = rq_clock_task(rq); @@ -1218,6 +1239,7 @@ static void update_curr(struct cfs_rq *cfs_rq) update_curr_task(task_of(curr), delta_exec); account_cfs_rq_runtime(cfs_rq, delta_exec); + update_exec_raw(cfs_rq, curr); } static void update_curr_fair(struct rq *rq) @@ -1228,8 +1250,10 @@ static void update_curr_fair(struct rq *rq) static inline void update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + u64 parent_wait_sum, delta, clock = rq_clock(rq_of(cfs_rq)); + struct sched_entity *pse = parent_entity(se); if (!schedstat_enabled()) return; @@ -1240,19 +1264,36 @@ update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_start(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + __schedstat_set(stats->parent_wait_sum_base, parent_wait_sum); } static inline void update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + struct sched_entity *pse = parent_entity(se); + u64 parent_wait_sum, clock = rq_clock(rq_of(cfs_rq)); + u64 delta; if (!schedstat_enabled()) return; stats = __schedstats_from_se(se); + delta = clock - schedstat_val(stats->wait_start); + /* * When the sched_schedstat changes from 0 to 1, some sched se * maybe already in the runqueue, the se->statistics.wait_start @@ -1265,7 +1306,23 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (entity_is_task(se)) p = task_of(se); + cpu_update_latency(se, delta); + __update_stats_wait_end(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + /* pick_next_task_fair() can update parent wait_start to 0 */ + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + delta = parent_wait_sum - schedstat_val(stats->parent_wait_sum_base); + __schedstat_add(stats->parent_wait_contrib, delta); } static inline void @@ -1344,6 +1401,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); + se->exec_start_raw = rq_clock(rq_of(cfs_rq)); } /************************************************** @@ -1423,6 +1481,12 @@ struct numa_group { struct rcu_head rcu; unsigned long total_faults; unsigned long max_faults_cpu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * faults[] array is split into two regions: faults_mem and faults_cpu. * @@ -1806,12 +1870,12 @@ static bool pgdat_free_space_enough(struct pglist_data *pgdat) * The smaller the hint page fault latency, the higher the possibility * for the page to be hot. */ -static int numa_hint_fault_latency(struct page *page) +static int numa_hint_fault_latency(struct folio *folio) { int last_time, time; time = jiffies_to_msecs(jiffies); - last_time = xchg_page_access_time(page, time); + last_time = folio_xchg_access_time(folio, time); return (time - last_time) & PAGE_ACCESS_TIME_MASK; } @@ -1868,7 +1932,7 @@ static void numa_promotion_adjust_threshold(struct pglist_data *pgdat, } } -bool should_numa_migrate_memory(struct task_struct *p, struct page * page, +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, int src_nid, int dst_cpu) { struct numa_group *ng = deref_curr_numa_group(p); @@ -1898,16 +1962,16 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); th = pgdat->nbp_threshold ? : def_th; - latency = numa_hint_fault_latency(page); + latency = numa_hint_fault_latency(folio); if (latency >= th) return false; return !numa_promotion_rate_limit(pgdat, rate_limit, - thp_nr_pages(page)); + folio_nr_pages(folio)); } this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); - last_cpupid = page_cpupid_xchg_last(page, this_cpupid); + last_cpupid = folio_xchg_last_cpupid(folio, this_cpupid); if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid)) @@ -5594,21 +5658,27 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, + u64 overrun) { - s64 runtime; + u64 refill, runtime; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; - cfs_b->runtime += cfs_b->quota; - runtime = cfs_b->runtime_snap - cfs_b->runtime; - if (runtime > 0) { - cfs_b->burst_time += runtime; - cfs_b->nr_burst++; + if (cfs_b->runtime_snap > cfs_b->runtime) { + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > cfs_b->quota) { + cfs_b->burst_time += runtime - cfs_b->quota; + cfs_b->nr_burst++; + } } - cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); + cfs_b->current_buffer = max(cfs_b->runtime, cfs_b->buffer); + overrun = min(overrun, cfs_b->max_overrun); + refill = cfs_b->quota * overrun; + cfs_b->runtime += refill; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->current_buffer); cfs_b->runtime_snap = cfs_b->runtime; } @@ -5631,7 +5701,7 @@ static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 0); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -5804,6 +5874,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se->on_rq) goto done; + if (se->my_q != cfs_rq) + cgroup_idle_start(se); + dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -5852,6 +5925,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { + struct cfs_rq *bottom_cfs_rq = cfs_rq; struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; @@ -5895,6 +5969,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (se->on_rq) break; + + if (se->my_q != bottom_cfs_rq) + cgroup_idle_end(se); enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -6101,7 +6178,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u cfs_b->nr_periods += overrun; /* Refill extra burst quota even if cfs_b->idle */ - __refill_cfs_bandwidth_runtime(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b, overrun); /* * idle depends on !throttled (for the case of a large deficit), and if @@ -6356,8 +6433,17 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) new = old * 2; if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); - cfs_b->quota *= 2; - cfs_b->burst *= 2; + cfs_b->quota = min(cfs_b->quota * 2, + max_cfs_runtime); + + cfs_b->burst = min(cfs_b->burst * 2, + max_cfs_runtime); + + cfs_b->buffer = min(max_cfs_runtime, + cfs_b->quota + cfs_b->burst); + /* Add 1 in case max_overrun becomes 0. */ + cfs_b->max_overrun >>= 1; + cfs_b->max_overrun++; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -6390,6 +6476,8 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->init_buffer = 0; + cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -6413,16 +6501,26 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) #endif } -void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + + /* + * When period timer stops, quota for the following period is not + * refilled, however period timer is already forwarded. We should + * accumulate quota once more than overrun here. + */ + if (!init) + __refill_cfs_bandwidth_runtime(cfs_b, overrun + 1); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -6760,6 +6858,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_end(se); + cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -6767,8 +6868,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (cfs_rq->nr_running == 1) + cgroup_idle_end(se->parent); +#endif goto enqueue_throttle; + } flags = ENQUEUE_WAKEUP; } @@ -6838,6 +6944,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_start(se); + cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; @@ -6845,8 +6954,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (!cfs_rq->nr_running) + cgroup_idle_start(se->parent); +#endif goto dequeue_throttle; + } /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -7444,6 +7558,30 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool } } + if (static_branch_unlikely(&sched_cluster_active)) { + struct sched_group *sg = sd->groups; + + if (sg->flags & SD_CLUSTER) { + for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i = select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <= 0) + return -1; + idle_cpu = __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_group_span(sg)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i = select_idle_core(p, cpu, cpus, &idle_cpu); @@ -7451,7 +7589,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool return i; } else { - if (!--nr) + if (--nr <= 0) return -1; idle_cpu = __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -7553,7 +7691,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) bool has_idle_core = false; struct sched_domain *sd; unsigned long task_util, util_min, util_max; - int i, recent_used_cpu; + int i, recent_used_cpu, prev_aff = -1; /* * On asymmetric system, update task utilization because we will check @@ -7580,8 +7718,14 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_cpu(task_util, util_min, util_max, prev)) - return prev; + asym_fits_cpu(task_util, util_min, util_max, prev)) { + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(prev, target)) + return prev; + + prev_aff = prev; + } /* * Allow a per-cpu kthread to stack with the wakee if the @@ -7608,7 +7752,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { - return recent_used_cpu; + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(recent_used_cpu, target)) + return recent_used_cpu; + + } else { + recent_used_cpu = -1; } /* @@ -7649,6 +7799,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if ((unsigned)i < nr_cpumask_bits) return i; + /* + * For cluster machines which have lower sharing cache like L2 or + * LLC Tag, we tend to find an idle CPU in the target's cluster + * first. But prev_cpu or recent_used_cpu may also be a good candidate, + * use them if possible when no idle CPU found in select_idle_cpu(). + */ + if ((unsigned int)prev_aff < nr_cpumask_bits) + return prev_aff; + if ((unsigned int)recent_used_cpu < nr_cpumask_bits) + return recent_used_cpu; + return target; } @@ -12591,6 +12752,32 @@ static int task_is_throttled_fair(struct task_struct *p, int cpu) #endif return throttled_hierarchy(cfs_rq); } + +#ifdef CONFIG_CFS_BANDWIDTH +void account_ht_aware_quota(struct task_struct *p, u64 delta) +{ + struct sched_entity *se; + unsigned int ht_ratio; + struct cfs_rq *cfs_rq; + + /* We only account ht_aware_quota for cookied task. */ + if (sched_feat(SCHED_CORE_HT_AWARE_QUOTA) && p->core_cookie) { + se = &p->se; + cfs_rq = task_cfs_rq(p); + + if (se->parent) { + ht_ratio = se->parent->ht_ratio; + if (ht_ratio > 100 && ht_ratio <= 200) { + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + account_cfs_rq_runtime(cfs_rq, + delta * (ht_ratio - 100) / 100); + } + } + } + } +} +#endif #else static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} #endif @@ -12811,6 +12998,46 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) } #ifdef CONFIG_FAIR_GROUP_SCHED + +#ifdef CONFIG_SCHED_SLI +static void update_nr_iowait_fair(struct task_struct *p, long inc) +{ + unsigned long flags; + struct sched_entity *se = p->se.parent; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(cpu_rq(task_cpu(p))); + + for_each_sched_entity(se) { + /* + * Avoid locking rq->lock from try_to_wakeup hot path, in + * the price of poor consistency among cgroup hierarchy, + * which we can tolerate. + * While accessing se->on_rq does need to hold rq->lock. We + * already do, because when inc==1, the caller is __schedule + * and task_move_group_fair + */ + spin_lock_irqsave(&se->iowait_lock, flags); + if (!se->on_rq && !schedstat_val(se->cg_nr_iowait) && inc > 0) + __schedstat_set(se->cg_iowait_start, clock); + if (schedstat_val(se->cg_iowait_start) > 0 && + schedstat_val(se->cg_nr_iowait) + inc == 0) { + __schedstat_add(se->cg_iowait_sum, clock - + schedstat_val(se->cg_iowait_start)); + __schedstat_set(se->cg_iowait_start, 0); + } + __schedstat_add(se->cg_nr_iowait, inc); + spin_unlock_irqrestore(&se->iowait_lock, flags); + } +} +#else +static void update_nr_iowait_fair(struct task_struct *p, long inc) {} +#endif + + static void task_change_group_fair(struct task_struct *p) { /* @@ -12820,6 +13047,22 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; + /* + * p->in_iowait is obvious. If p is in_iowait, we should transfer + * iowait to the new cgroup, otherwise try_to_wake_up will decrease + * from the new cgroup, leaving old cgroup's nr_iowait to be 1, and + * new cgroup's nr_iowait to be -1 + * + * !p->on_rq is necessary too, because iowait and on_rq are not + * updated at the same time. After try_to_wake_up, p->in_iowait + * remains 1, while on_rq becomes 1. In this case, p is not at all + * in_iowait already, so don't be stupid to transfer nr_iowait. + * Similarly, when io_schedule, there's a window between setting + * p->in_iowait to 1 and setting p->on_rq to 0, don't either. + */ + if (p->in_iowait && !p->on_rq) + update_nr_iowait_fair(p, -1); + detach_task_cfs_rq(p); #ifdef CONFIG_SMP @@ -12828,6 +13071,9 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); + /* Same as above */ + if (p->in_iowait && !p->on_rq) + update_nr_iowait_fair(p, 1); } void free_fair_sched_group(struct task_group *tg) @@ -12876,6 +13122,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + se->ht_ratio = 100; +#endif } return 1; @@ -12960,6 +13209,9 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; + seqlock_init(&se->idle_seqlock); + spin_lock_init(&se->iowait_lock); + se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); @@ -13118,6 +13370,16 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task return rr_interval; } +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_fair(struct task_struct *p, long inc) +{ + struct sched_entity *se = &p->se; + + for_each_sched_entity(se) + cfs_rq_of(se)->nr_uninterruptible += inc; +} +#endif + /* * All the scheduling class methods: */ @@ -13165,6 +13427,10 @@ DEFINE_SCHED_CLASS(fair) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_fair, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_fair, + .update_nr_iowait = update_nr_iowait_fair, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/features.h b/kernel/sched/features.h index f770168230ae4a09dd0f240957c0c7d749001a50..ee7fb7220ed8a9b711abd7364321092f52217e1a 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -89,3 +89,7 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) SCHED_FEAT(LATENCY_WARN, false) SCHED_FEAT(HZ_BW, true) + +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +SCHED_FEAT(SCHED_CORE_HT_AWARE_QUOTA, false) +#endif diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 82e2f7fc7c267d841450cbf0bf270110d4f3f4a0..33dcd35605abc8ee462a60db7aeba852df007212 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -53,8 +53,32 @@ int housekeeping_any_cpu(enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_any_cpu); +#ifdef CONFIG_CGROUP_SCHED +/* + * dyn_allowed -- allowed CPUs for wild tasks. + * + * dyn_isolated -- isolated CPUs for wild tasks. + * + * dyn_possible -- possible CPUs for dynamical isolation. + */ +static cpumask_var_t dyn_allowed; +static cpumask_var_t dyn_isolated; +static cpumask_var_t dyn_possible; + +static bool dyn_isolcpus_ready; + +DEFINE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +EXPORT_SYMBOL_GPL(dyn_isolcpus_enabled); +#endif + const struct cpumask *housekeeping_cpumask(enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return dyn_allowed; +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return housekeeping.cpumasks[type]; @@ -72,6 +96,12 @@ EXPORT_SYMBOL_GPL(housekeeping_affine); bool housekeeping_test_cpu(int cpu, enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return cpumask_test_cpu(cpu, dyn_allowed); +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]); @@ -79,10 +109,30 @@ bool housekeeping_test_cpu(int cpu, enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_test_cpu); +#ifdef CONFIG_CGROUP_SCHED +static inline void free_dyn_masks(void) +{ + free_cpumask_var(dyn_allowed); + free_cpumask_var(dyn_isolated); + free_cpumask_var(dyn_possible); +} +#endif + void __init housekeeping_init(void) { enum hk_type type; +#ifdef CONFIG_CGROUP_SCHED + if (zalloc_cpumask_var(&dyn_allowed, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_isolated, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_possible, GFP_KERNEL)) { + cpumask_copy(dyn_allowed, cpu_possible_mask); + cpumask_copy(dyn_possible, cpu_possible_mask); + dyn_isolcpus_ready = true; + } else + free_dyn_masks(); +#endif + if (!housekeeping.flags) return; @@ -95,6 +145,13 @@ void __init housekeeping_init(void) /* We need at least one CPU to handle housekeeping work */ WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } +#ifdef CONFIG_CGROUP_SCHED + if (dyn_isolcpus_ready && (housekeeping.flags & HK_FLAG_DOMAIN) && + type < HK_TYPE_MAX) { + cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); + cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); + } +#endif } static void __init housekeeping_setup_type(enum hk_type type, @@ -244,3 +301,134 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup); + +#ifdef CONFIG_CGROUP_SCHED +static int dyn_isolcpus_show(struct seq_file *s, void *p) +{ + seq_printf(s, "%*pbl\n", cpumask_pr_args(dyn_isolated)); + + return 0; +} + +static int dyn_isolcpus_open(struct inode *inode, struct file *file) +{ + return single_open(file, dyn_isolcpus_show, NULL); +} + +void wilds_cpus_allowed(struct cpumask *pmask) +{ + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + cpumask_and(pmask, pmask, dyn_allowed); +} + +void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) +{ + struct task_struct *g, *task; + + rcu_read_lock(); + for_each_process_thread(g, task) { + if (task->flags & PF_KTHREAD) + continue; + + if (!cpumask_equal(task->cpus_ptr, old_allowed)) + continue; + + set_cpus_allowed_ptr(task, new_allowed); + } + rcu_read_unlock(); +} + +static DEFINE_MUTEX(dyn_isolcpus_mutex); + +static ssize_t write_dyn_isolcpus(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = count; + cpumask_var_t isolated; + cpumask_var_t new_allowed; + cpumask_var_t old_allowed; + + mutex_lock(&dyn_isolcpus_mutex); + + if (!zalloc_cpumask_var(&isolated, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + if (!zalloc_cpumask_var(&new_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_isolated; + } + + if (!zalloc_cpumask_var(&old_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_new_allowed; + } + + if (cpumask_parselist_user(buf, count, isolated)) { + ret = -EINVAL; + goto free_all; + } + + if (!cpumask_subset(isolated, dyn_possible)) { + ret = -EINVAL; + goto free_all; + } + + /* At least reserve one for wild tasks to run */ + cpumask_andnot(new_allowed, dyn_possible, isolated); + if (!cpumask_intersects(new_allowed, cpu_online_mask)) { + ret = -EINVAL; + goto free_all; + } + + cpumask_copy(old_allowed, dyn_allowed); + cpumask_copy(dyn_allowed, new_allowed); + cpumask_copy(dyn_isolated, isolated); + + if (cpumask_empty(dyn_isolated)) + static_branch_disable(&dyn_isolcpus_enabled); + else + static_branch_enable(&dyn_isolcpus_enabled); + + update_wilds_cpumask(new_allowed, old_allowed); + + rebuild_sched_domains(); + workqueue_set_unbound_cpumask(new_allowed); + +free_all: + free_cpumask_var(old_allowed); +free_new_allowed: + free_cpumask_var(new_allowed); +free_isolated: + free_cpumask_var(isolated); +out: + mutex_unlock(&dyn_isolcpus_mutex); + + return ret; +} + +static const struct proc_ops proc_dyn_isolcpus_operations = { + .proc_open = dyn_isolcpus_open, + .proc_read = seq_read, + .proc_write = write_dyn_isolcpus, + .proc_lseek = noop_llseek, + .proc_release = single_release, +}; + +static int __init dyn_isolcpus_init(void) +{ + if (dyn_isolcpus_ready && + !proc_create("dyn_isolcpus", 0200, NULL, + &proc_dyn_isolcpus_operations)) { + dyn_isolcpus_ready = false; + free_dyn_masks(); + } + + if (!dyn_isolcpus_ready) + pr_err("Initialize Dynamical Isolation Failed\n"); + + return 0; +} +early_initcall(dyn_isolcpus_init); +#endif diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 52c8f8226b0d3590425ec8237634b5d8e0148b99..13761c5d1bfbacf06fd604e92209679faf48d075 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -56,8 +56,10 @@ /* Variables and functions for calc_load */ atomic_long_t calc_load_tasks; -unsigned long calc_load_update; +atomic_long_t calc_load_tasks_r; unsigned long avenrun[3]; +unsigned long avenrun_r[3]; +unsigned long calc_load_update; EXPORT_SYMBOL(avenrun); /* should be removed */ /** @@ -90,6 +92,29 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } +#ifdef CONFIG_SCHED_SLI +void get_avenrun_r(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun_r[0] + offset) << shift; + loads[1] = (avenrun_r[1] + offset) << shift; + loads[2] = (avenrun_r[2] + offset) << shift; +} + +long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + long nr_active, delta = 0; + + nr_active = this_rq->nr_running - adjust; + + if (nr_active != this_rq->calc_load_active_r) { + delta = nr_active - this_rq->calc_load_active_r; + this_rq->calc_load_active_r = nr_active; + } + + return delta; +} +#endif + /** * fixed_power_int - compute: x^n, in O(log n) time * @@ -203,6 +228,9 @@ calc_load_n(unsigned long load, unsigned long exp, * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_nohz[2]; +#ifdef CONFIG_SCHED_SLI +static atomic_long_t calc_load_nohz_r[2]; +#endif static int calc_load_idx; static inline int calc_load_write_idx(void) @@ -233,13 +261,17 @@ static inline int calc_load_read_idx(void) static void calc_load_nohz_fold(struct rq *rq) { long delta; + int idx = calc_load_write_idx(); delta = calc_load_fold_active(rq, 0); - if (delta) { - int idx = calc_load_write_idx(); - + if (delta) atomic_long_add(delta, &calc_load_nohz[idx]); - } + +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_nohz_r[idx]); +#endif } void calc_load_nohz_start(void) @@ -291,6 +323,19 @@ static long calc_load_nohz_read(void) return delta; } +#ifdef CONFIG_SCHED_SLI +static long calc_load_nohz_r_read(void) +{ + int idx = calc_load_read_idx(); + long delta = 0; + + if (atomic_long_read(&calc_load_nohz_r[idx])) + delta = atomic_long_xchg(&calc_load_nohz_r[idx], 0); + + return delta; +} +#endif + /* * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into @@ -320,6 +365,16 @@ static void calc_global_nohz(void) avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); +#ifdef CONFIG_SCHED_SLI + /* Calc avenrun_r */ + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load_n(avenrun_r[0], EXP_1, active, n); + avenrun_r[1] = calc_load_n(avenrun_r[1], EXP_5, active, n); + avenrun_r[2] = calc_load_n(avenrun_r[2], EXP_15, active, n); +#endif + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); } @@ -336,6 +391,7 @@ static void calc_global_nohz(void) #else /* !CONFIG_NO_HZ_COMMON */ static inline long calc_load_nohz_read(void) { return 0; } +static inline long calc_load_nohz_r_read(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -369,8 +425,28 @@ void calc_global_load(void) avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); +#ifdef CONFIG_SCHED_SLI + /* + * Calculate load 1/5/15 for running tasks only. We do not + * invent common functions to keep the same layout as upstream. + */ + delta = calc_load_nohz_r_read(); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); + + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load(avenrun_r[0], EXP_1, active); + avenrun_r[1] = calc_load(avenrun_r[1], EXP_5, active); + avenrun_r[2] = calc_load(avenrun_r[2], EXP_15, active); +#endif + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); + if (!async_load_calc_enabled()) + calc_cgroup_load(); + /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals * catch up in bulk. @@ -393,5 +469,11 @@ void calc_global_load_tick(struct rq *this_rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(this_rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif + this_rq->calc_load_update += LOAD_FREQ; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b89223a973168f6aa4664b158205a5360bb4fecc..ec6d87169794fbcf7799af0e4bc7cad72ce7d96a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2695,6 +2695,16 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) } #endif +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_rt(struct task_struct *p, long inc) +{ + struct sched_rt_entity *se = &p->rt; + + for_each_sched_rt_entity(se) + rt_rq_of_se(se)->nr_uninterruptible += inc; +} +#endif + DEFINE_SCHED_CLASS(rt) = { .enqueue_task = enqueue_task_rt, @@ -2731,6 +2741,9 @@ DEFINE_SCHED_CLASS(rt) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_rt, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_rt, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d48c6a292a83dbcd79d873a60fb5543ce3d9fcd6..ab688805a67412ba777b6c8ff06f293dd733e3ea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -89,6 +89,7 @@ #endif #include +#include #include "cpupri.h" #include "cpudeadline.h" @@ -106,16 +107,31 @@ struct cpuidle_state; #define TASK_ON_RQ_QUEUED 1 #define TASK_ON_RQ_MIGRATING 2 +#ifdef CONFIG_CFS_BANDWIDTH +extern const u64 max_cfs_runtime; +extern unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; extern atomic_long_t calc_load_tasks; +extern atomic_long_t calc_load_tasks_r; extern unsigned int sysctl_sched_child_runs_first; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +#ifdef CONFIG_SCHED_SLI +extern long calc_load_fold_active_r(struct rq *this_rq, long adjust); +#else +static inline long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + return 0; +} +#endif + extern void call_trace_sched_update_nr_running(struct rq *rq, int count); extern unsigned int sysctl_sched_rt_period; @@ -284,6 +300,11 @@ struct rt_bandwidth { u64 rt_runtime; struct hrtimer rt_period_timer; unsigned int rt_period_active; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline int dl_bandwidth_enabled(void) @@ -313,6 +334,11 @@ struct dl_bw { raw_spinlock_t lock; u64 bw; u64 total_bw; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern void init_dl_bw(struct dl_bw *dl_b); @@ -333,6 +359,52 @@ struct rt_rq; extern struct list_head task_groups; +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH raw_spinlock_t lock; @@ -340,6 +412,10 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 init_buffer; + u64 current_buffer; + u64 buffer; + u64 max_overrun; u64 runtime_snap; s64 hierarchical_quota; @@ -356,6 +432,11 @@ struct cfs_bandwidth { int nr_burst; u64 throttled_time; u64 burst_time; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) #endif }; @@ -411,7 +492,22 @@ struct task_group { /* Effective clamp values used for a task group */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif +#ifdef CONFIG_SCHED_SLI + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; +#endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -456,8 +552,7 @@ extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *parent); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern bool cfs_task_bw_constrained(struct task_struct *p); @@ -649,6 +744,17 @@ struct cfs_rq { #endif #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + + unsigned long nr_uninterruptible; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline int rt_bandwidth_enabled(void) @@ -695,6 +801,13 @@ struct rt_rq { struct rq *rq; struct task_group *tg; #endif + + unsigned long nr_uninterruptible; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) @@ -763,6 +876,11 @@ struct dl_rq { * by the GRUB algorithm. */ u64 bw_ratio; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -889,6 +1007,11 @@ struct root_domain { * CPUs of the rd. Protected by RCU. */ struct perf_domain __rcu *pd; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern void init_defrootdomain(void); @@ -1017,7 +1140,14 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; + + /* + * Frequent writing to prev_mm and clock_update_flags on local + * CPU causes cacheline containing idle to be invalidated on + * other CPUs. Put prev_mm and sequential fields on a new + * cacheline to fix it. + */ + struct mm_struct *prev_mm ____cacheline_aligned; unsigned int clock_update_flags; u64 clock; @@ -1104,6 +1234,9 @@ struct rq { /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; +#ifdef CONFIG_SCHED_SLI + long calc_load_active_r; +#endif #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP @@ -1156,8 +1289,10 @@ struct rq { unsigned long core_cookie; unsigned int core_forceidle_count; unsigned int core_forceidle_seq; - unsigned int core_forceidle_occupation; - u64 core_forceidle_start; + unsigned int core_sibidle_occupation; + u64 core_sibidle_start; + u64 core_sibidle_start_task; + unsigned int core_sibidle_count; #endif /* Scratch cpumask to be temporarily used under rq_lock */ @@ -1167,6 +1302,23 @@ struct rq { call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; #endif + +#ifdef CONFIG_SCHED_ACPU + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 sibidle_task_sum; + u64 last_acpu_update_time; + u64 last_acpu_update_time_task; +#endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1319,6 +1471,11 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); extern void sched_core_get(void); extern void sched_core_put(void); +#ifdef CONFIG_CFS_BANDWIDTH +extern void account_ht_aware_quota(struct task_struct *p, u64 delta); +#else +void account_ht_aware_quota(struct task_struct *p, u64 delta) {} +#endif #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) @@ -1467,6 +1624,11 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) extern void update_rq_clock(struct rq *rq); +static inline u64 __rq_clock_broken(struct rq *rq) +{ + return READ_ONCE(rq->clock); +} + /* * rq::clock_update_flags bits * @@ -1869,11 +2031,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_share_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; static __always_inline bool sched_asym_cpucap_active(void) { @@ -1896,6 +2060,11 @@ struct sched_group_capacity { int id; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + unsigned long cpumask[]; /* Balance mask */ }; @@ -1909,6 +2078,11 @@ struct sched_group { int asym_prefer_cpu; /* CPU of highest priority in group */ int flags; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * The CPUs this group covers. * @@ -1960,12 +2134,12 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p) #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) -extern void __sched_core_account_forceidle(struct rq *rq); +extern void __sched_core_account_sibidle(struct rq *rq); -static inline void sched_core_account_forceidle(struct rq *rq) +static inline void sched_core_account_sibidle(struct rq *rq) { if (schedstat_enabled()) - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } extern void __sched_core_tick(struct rq *rq); @@ -1978,7 +2152,7 @@ static inline void sched_core_tick(struct rq *rq) #else -static inline void sched_core_account_forceidle(struct rq *rq) {} +static inline void sched_core_account_sibidle(struct rq *rq) {} static inline void sched_core_tick(struct rq *rq) {} @@ -2299,6 +2473,13 @@ struct sched_class { #ifdef CONFIG_SCHED_CORE int (*task_is_throttled)(struct task_struct *p, int cpu); #endif + void (*update_nr_uninterruptible)(struct task_struct *p, long inc); + void (*update_nr_iowait)(struct task_struct *p, long inc); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -2312,6 +2493,11 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void update_nr_iowait(struct task_struct *p, long inc) +{ + if (p->sched_class->update_nr_iowait) + p->sched_class->update_nr_iowait(p, inc); +} /* * Helper to define a sched_class instance; each one is placed in a separate @@ -3532,4 +3718,32 @@ static inline void init_sched_mm_cid(struct task_struct *t) { } extern u64 avg_vruntime(struct cfs_rq *cfs_rq); extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); +#ifdef CONFIG_SCHED_SLI +extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); +extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +extern void task_ca_increase_nr_migrations(struct task_struct *tsk); +void cpu_update_latency(struct sched_entity *se, u64 delta); +void task_cpu_update_block(struct task_struct *tsk, u64 runtime); +void calc_cgroup_load(void); +bool async_load_calc_enabled(void); +struct task_group *cgroup_tg(struct cgroup *cgrp); +int sched_lat_stat_show(struct seq_file *sf, void *v); +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val); +#else +static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } +static inline void cpu_update_latency(struct sched_entity *se, + u64 delta) { } +static inline void task_cpu_update_block(struct task_struct *tsk, + u64 runtime) { } +static inline void calc_cgroup_load(void) { } +static inline bool async_load_calc_enabled(void) +{ + return false; +} +#endif + +long tg_get_cfs_quota(struct task_group *tg); +long tg_get_cfs_period(struct task_group *tg); + #endif /* _KERNEL_SCHED_SCHED_H */ diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 966f4eacfe51d67b15c9cfae3da39d0a4a77c2ed..6fd147039ebdd8ec841633a8d56f9a9b60273ee2 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,6 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { + task_cpu_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 3a13cecf177402a15b7fc8d3447909165ee9cc65..cc53396fb0c77464bb1e84283ea44db2e6752792 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -666,11 +666,14 @@ static void destroy_sched_domains(struct sched_domain *sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_share_id); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); static void update_top_cache_domain(int cpu) { @@ -691,6 +694,17 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) = id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); + sd = lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id = cpumask_first(sched_domain_span(sd)); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_share_id, cpu) = id; + sd = lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); @@ -1548,6 +1562,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) @@ -2369,6 +2384,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct rq *rq = NULL; int i, ret = -ENOMEM; bool has_asym = false; + bool has_cluster = false; if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2472,6 +2488,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } +#if IS_ENABLED(CONFIG_X86) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) { + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) + sd->flags |= SD_ASYM_PACKING; + } + } +#endif + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map)) @@ -2494,12 +2521,18 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); cpu_attach_domain(sd, d.rd, i); + + if (lowest_flag_domain(i, SD_CLUSTER)) + has_cluster = true; } rcu_read_unlock(); if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2599,6 +2632,9 @@ static void detach_destroy_domains(const struct cpumask *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); + if (static_branch_unlikely(&sched_cluster_active)) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f526ad6688168443913385eda101fa1..d46492e60ca257876e8a884c2425ffb61f3cb26a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -96,6 +97,11 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); static const int six_hundred_forty_kb = 640 * 1024; #endif +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +extern int userns_max_level; +extern int userns_max_level_max; +#endif static const int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -130,6 +136,8 @@ enum sysctl_writes_mode { static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; #endif /* CONFIG_PROC_SYSCTL */ +extern int sysctl_enable_context_readahead; + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) int sysctl_legacy_va_layout; @@ -2042,6 +2050,82 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, +#endif +#ifdef CONFIG_USER_NS + { + .procname = "unprivileged_userns_clone", + .data = &unprivileged_userns_clone, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "userns_max_level", + .data = &userns_max_level, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &userns_max_level_max, + }, +#endif +#ifdef CONFIG_SCHED_ACPU + { + .procname = "sched_acpu", + .data = &sysctl_sched_acpu_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_acpu_enable_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif /* CONFIG_SCHED_ACPU*/ +#ifdef CONFIG_RICH_CONTAINER + { + .procname = "rich_container_enable", + .data = &sysctl_rich_container_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "rich_container_source", + .data = &sysctl_rich_container_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "rich_container_cpuinfo_source", + .data = &sysctl_rich_container_cpuinfo_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_TWO, + }, + { + .procname = "rich_container_cpuinfo_sharesbase", + .data = &sysctl_rich_container_cpuinfo_sharesbase, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_TWO, + }, + { + .procname = "rich_container_ext_enable", + .data = &sysctl_rich_container_ext_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + #endif { } }; @@ -2249,6 +2333,15 @@ static struct ctl_table vm_table[] = { .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif + { + .procname = "enable_context_readahead", + .data = &sysctl_enable_context_readahead, + .maxlen = sizeof(sysctl_enable_context_readahead), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index aab43ba3daeb515f2e9c72c04b08e97dc3e7e847..c4dd9325861e8d6d92d428dc5de14a7cf61171a6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1254,9 +1254,7 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { }; #ifdef CONFIG_KEYS -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); +__bpf_kfunc_start_defs(); /** * bpf_lookup_user_key - lookup a key by its serial @@ -1406,16 +1404,16 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, } #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ -__diag_pop(); +__bpf_kfunc_end_defs(); -BTF_SET8_START(key_sig_kfunc_set) +BTF_KFUNCS_START(key_sig_kfunc_set) BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) #ifdef CONFIG_SYSTEM_DATA_VERIFICATION BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) #endif -BTF_SET8_END(key_sig_kfunc_set) +BTF_KFUNCS_END(key_sig_kfunc_set) static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { .owner = THIS_MODULE, @@ -3107,7 +3105,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, .uprobe = uprobe, }; struct bpf_prog *prog = link->link.prog; - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; struct bpf_run_ctx *old_run_ctx; if (link->task && current->mm != link->task->mm) diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 1d8e47bed3f118ae071b4c675a6812001c78e238..8846049c8fa3c210edd814ee09a49f191cfb3191 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -22,6 +22,19 @@ #include #include +/* + * sysctl determining whether unprivileged users may unshare a new + * userns. Allowed by default + */ +int unprivileged_userns_clone = 1; + +/* + * sysctl determining the maximum of nested level. + * Default to 33 to keep compatible with upstream. + */ +int userns_max_level = 33; +int userns_max_level_max = 33; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); @@ -88,7 +101,7 @@ int create_user_ns(struct cred *new) int ret, i; ret = -ENOSPC; - if (parent_ns->level > 32) + if (parent_ns->level >= userns_max_level) goto fail; ucounts = inc_user_namespaces(parent_ns, owner); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5cd6d4e269157973acecd00afc91646720fee2be..616b21b4ca6a33db1ddbafeb93dcf14f4e6c21e5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static bool is_hardlockup(unsigned int cpu) return false; } +NOKPROBE_SYMBOL(is_hardlockup); static void watchdog_hardlockup_kick(void) { @@ -184,6 +186,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) per_cpu(watchdog_hardlockup_warned, cpu) = false; } } +NOKPROBE_SYMBOL(watchdog_hardlockup_check); #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ @@ -559,8 +562,12 @@ static void watchdog_enable(unsigned int cpu) /* Initialize timestamp */ update_touch_ts(); /* Enable the hardlockup detector */ - if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) - watchdog_hardlockup_enable(cpu); + if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) { + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_enable(cpu); + else + sdei_watchdog_hardlockup_enable(cpu); + } } static void watchdog_disable(unsigned int cpu) @@ -574,7 +581,10 @@ static void watchdog_disable(unsigned int cpu) * delay between disabling the timer and disabling the hardlockup * detector causes a false positive. */ - watchdog_hardlockup_disable(cpu); + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_disable(cpu); + else + sdei_watchdog_hardlockup_disable(cpu); hrtimer_cancel(hrtimer); wait_for_completion(this_cpu_ptr(&softlockup_completion)); } @@ -843,7 +853,7 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, return err; } -static const int sixty = 60; +static int one_hundred_fifty = 150; static struct ctl_table watchdog_sysctls[] = { { @@ -862,7 +872,7 @@ static struct ctl_table watchdog_sysctls[] = { .mode = 0644, .proc_handler = proc_watchdog_thresh, .extra1 = SYSCTL_ZERO, - .extra2 = (void *)&sixty, + .extra2 = &one_hundred_fifty, }, { .procname = "watchdog_cpumask", @@ -1019,7 +1029,8 @@ void __init lockup_detector_init(void) cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_TYPE_TIMER)); - if (!watchdog_hardlockup_probe()) + if ((!disable_sdei_nmi_watchdog && !sdei_watchdog_hardlockup_probe()) || + (disable_sdei_nmi_watchdog && !watchdog_hardlockup_probe())) watchdog_hardlockup_available = true; else allow_lockup_detector_init_retry = true; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7fa1c7c9151aef4eecefc132b6266ad7952272b0..bb8c6520225ecf79cdbbb40e97f6db1ae3eb3590 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -199,6 +199,9 @@ struct worker_pool { * from get_work_pool(). */ struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -323,6 +326,9 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static struct kmem_cache *pwq_cache; diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index f6275944ada776814787ac5a1cfa01f891bfe934..f8158ed6637ae7e707f252e5ec5e95d1f0ea3951 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -59,6 +59,11 @@ struct worker { /* used only by rescuers to point to the target workqueue */ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** diff --git a/lib/.gitignore b/lib/.gitignore index 54596b634ecbfff06dff6f6e10a535b0982e6a3a..101a4aa92fb537086e37ee12581ba7a190fa08af 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -5,5 +5,3 @@ /gen_crc32table /gen_crc64table /oid_registry_data.c -/test_fortify.log -/test_fortify/*.log diff --git a/lib/Kconfig b/lib/Kconfig index ee365b7402f193ef510ebd46cabae9c004da11b5..c8e32e86b84805a7959d74b93059c15806dd1634 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -532,7 +532,7 @@ config CHECK_SIGNATURE bool config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + bool "Force CPU masks off stack" help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f94c3e957b8298bb14662d398f3ce9e3a57437ca..32843f12dae85f1929849024ab8ea9ba1b958702 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1046,6 +1046,12 @@ config HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on SMP default y +config SDEI_WATCHDOG + bool "SDEI NMI Watchdog support" + depends on ARM_SDE_INTERFACE + depends on HARDLOCKUP_DETECTOR + select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER + # # Global switch whether to build a hardlockup detector at all. It is available # only when the architecture supports at least one implementation. There are @@ -1062,6 +1068,7 @@ config HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH imply HARDLOCKUP_DETECTOR_PERF imply HARDLOCKUP_DETECTOR_BUDDY + imply SDEI_WATCHDOG imply HARDLOCKUP_DETECTOR_ARCH select LOCKUP_DETECTOR @@ -1256,6 +1263,13 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. +config SCHED_ACPU + bool "ACPU info: account idle time of smt to task" + depends on DEBUG_KERNEL && PROC_FS && SMP && SCHED_SMT + default y + help + Add ACPU info in /proc//sched. + endmenu config DEBUG_TIMEKEEPING diff --git a/lib/Makefile b/lib/Makefile index 740109b6e2c89f0748d4ed5013139db0803c9bb2..7ab8f09de8ece38def2a383a86de21b33e339c9d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -405,36 +405,4 @@ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o -# FORTIFY_SOURCE compile-time behavior tests -TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c) -TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS)) -TEST_FORTIFY_LOG = test_fortify.log - -quiet_cmd_test_fortify = TEST $@ - cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \ - $< $@ "$(NM)" $(CC) $(c_flags) \ - $(call cc-disable-warning,fortify-source) \ - -DKBUILD_EXTRA_WARN1 - -targets += $(TEST_FORTIFY_LOGS) -clean-files += $(TEST_FORTIFY_LOGS) -clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS)) -$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \ - $(src)/test_fortify/test_fortify.h \ - $(srctree)/include/linux/fortify-string.h \ - $(srctree)/scripts/test_fortify.sh \ - FORCE - $(call if_changed,test_fortify) - -quiet_cmd_gen_fortify_log = GEN $@ - cmd_gen_fortify_log = cat /dev/null > $@ || true - -targets += $(TEST_FORTIFY_LOG) -clean-files += $(TEST_FORTIFY_LOG) -$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE - $(call if_changed,gen_fortify_log) - -# Fake dependency to trigger the fortify tests. -ifeq ($(CONFIG_FORTIFY_SOURCE),y) -$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG) -endif +subdir-$(CONFIG_FORTIFY_SOURCE) += test_fortify diff --git a/lib/buildid.c b/lib/buildid.c index 9fc46366597e78803a74f8dc9a9c33e93c10585b..853b8388df80299ecad9b3cfa9ddd1da6f8ac50b 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -5,27 +5,177 @@ #include #include #include +#include #define BUILD_ID 3 +#define MAX_PHDR_CNT 256 + +struct freader { + void *buf; + u32 buf_sz; + int err; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + loff_t folio_off; + bool may_fault; + }; + struct { + const char *data; + u64 data_sz; + }; + }; +}; + +static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, + struct file *file, bool may_fault) +{ + memset(r, 0, sizeof(*r)); + r->buf = buf; + r->buf_sz = buf_sz; + r->file = file; + r->may_fault = may_fault; +} + +static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) +{ + memset(r, 0, sizeof(*r)); + r->data = data; + r->data_sz = data_sz; +} + +static void freader_put_folio(struct freader *r) +{ + if (!r->folio) + return; + kunmap_local(r->addr); + folio_put(r->folio); + r->folio = NULL; +} + +static int freader_get_folio(struct freader *r, loff_t file_off) +{ + /* check if we can just reuse current folio */ + if (r->folio && file_off >= r->folio_off && + file_off < r->folio_off + folio_size(r->folio)) + return 0; + + freader_put_folio(r); + + /* reject secretmem folios created with memfd_secret() */ + if (secretmem_mapping(r->file->f_mapping)) + return -EFAULT; + + r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); + + /* if sleeping is allowed, wait for the page, if necessary */ + if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) { + filemap_invalidate_lock_shared(r->file->f_mapping); + r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT, + NULL, r->file); + filemap_invalidate_unlock_shared(r->file->f_mapping); + } + + if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { + if (!IS_ERR(r->folio)) + folio_put(r->folio); + r->folio = NULL; + return -EFAULT; + } + + r->folio_off = folio_pos(r->folio); + r->addr = kmap_local_folio(r->folio, 0); + + return 0; +} + +static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz) +{ + size_t folio_sz; + + /* provided internal temporary buffer should be sized correctly */ + if (WARN_ON(r->buf && sz > r->buf_sz)) { + r->err = -E2BIG; + return NULL; + } + + if (unlikely(file_off + sz < file_off)) { + r->err = -EOVERFLOW; + return NULL; + } + + /* working with memory buffer is much more straightforward */ + if (!r->buf) { + if (file_off + sz > r->data_sz) { + r->err = -ERANGE; + return NULL; + } + return r->data + file_off; + } + + /* fetch or reuse folio for given file offset */ + r->err = freader_get_folio(r, file_off); + if (r->err) + return NULL; + + /* if requested data is crossing folio boundaries, we have to copy + * everything into our local buffer to keep a simple linear memory + * access interface + */ + folio_sz = folio_size(r->folio); + if (file_off + sz > r->folio_off + folio_sz) { + int part_sz = r->folio_off + folio_sz - file_off; + + /* copy the part that resides in the current folio */ + memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz); + + /* fetch next folio */ + r->err = freader_get_folio(r, r->folio_off + folio_sz); + if (r->err) + return NULL; + + /* copy the rest of requested data */ + memcpy(r->buf + part_sz, r->addr, sz - part_sz); + + return r->buf; + } + + /* if data fits in a single folio, just return direct pointer */ + return r->addr + (file_off - r->folio_off); +} + +static void freader_cleanup(struct freader *r) +{ + if (!r->buf) + return; /* non-file-backed mode */ + + freader_put_folio(r); +} + /* * Parse build id from the note segment. This logic can be shared between * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are * identical. */ -static int parse_build_id_buf(unsigned char *build_id, - __u32 *size, - const void *note_start, - Elf32_Word note_size) +static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, + loff_t note_off, Elf32_Word note_size) { const char note_name[] = "GNU"; const size_t note_name_sz = sizeof(note_name); - u64 note_off = 0, new_off, name_sz, desc_sz; + u32 build_id_off, new_off, note_end, name_sz, desc_sz; + const Elf32_Nhdr *nhdr; const char *data; - while (note_off + sizeof(Elf32_Nhdr) < note_size && - note_off + sizeof(Elf32_Nhdr) > note_off /* overflow */) { - Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_off); + if (check_add_overflow(note_off, note_size, ¬e_end)) + return -EINVAL; + + while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { + nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); + if (!nhdr) + return r->err; name_sz = READ_ONCE(nhdr->n_namesz); desc_sz = READ_ONCE(nhdr->n_descsz); @@ -33,14 +183,20 @@ static int parse_build_id_buf(unsigned char *build_id, new_off = note_off + sizeof(Elf32_Nhdr); if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) || check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) || - new_off > note_size) + new_off > note_end) break; if (nhdr->n_type == BUILD_ID && name_sz == note_name_sz && memcmp(nhdr + 1, note_name, note_name_sz) == 0 && desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { - data = note_start + note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); + build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); + + /* freader_fetch() will invalidate nhdr pointer */ + data = freader_fetch(r, build_id_off, desc_sz); + if (!data) + return r->err; + memcpy(build_id, data, desc_sz); memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); if (size) @@ -54,120 +210,105 @@ static int parse_build_id_buf(unsigned char *build_id, return -EINVAL; } -static inline int parse_build_id(const void *page_addr, - unsigned char *build_id, - __u32 *size, - const void *note_start, - Elf32_Word note_size) +/* Parse build ID from 32-bit ELF */ +static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) { - /* check for overflow */ - if (note_start < page_addr || note_start + note_size < note_start) - return -EINVAL; + const Elf32_Ehdr *ehdr; + const Elf32_Phdr *phdr; + __u32 phnum, phoff, i; - /* only supports note that fits in the first page */ - if (note_start + note_size > page_addr + PAGE_SIZE) - return -EINVAL; + ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); + if (!ehdr) + return r->err; - return parse_build_id_buf(build_id, size, note_start, note_size); -} + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ + phnum = READ_ONCE(ehdr->e_phnum); + phoff = READ_ONCE(ehdr->e_phoff); -/* Parse build ID from 32-bit ELF */ -static int get_build_id_32(const void *page_addr, unsigned char *build_id, - __u32 *size) -{ - Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; - Elf32_Phdr *phdr; - __u32 i, phnum; - - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf32_Ehdr)) - return -EINVAL; + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; - phnum = READ_ONCE(ehdr->e_phnum); - /* only supports phdr that fits in one page */ - if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) return -EINVAL; - phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); - for (i = 0; i < phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + READ_ONCE(phdr[i].p_offset), - READ_ONCE(phdr[i].p_filesz))) + phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); + if (!phdr) + return r->err; + + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } return -EINVAL; } /* Parse build ID from 64-bit ELF */ -static int get_build_id_64(const void *page_addr, unsigned char *build_id, - __u32 *size) +static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size) { - Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; - Elf64_Phdr *phdr; - __u32 i, phnum; - - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf64_Ehdr)) - return -EINVAL; + const Elf64_Ehdr *ehdr; + const Elf64_Phdr *phdr; + __u32 phnum, i; + __u64 phoff; + ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); + if (!ehdr) + return r->err; + + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ phnum = READ_ONCE(ehdr->e_phnum); - /* only supports phdr that fits in one page */ - if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) - return -EINVAL; + phoff = READ_ONCE(ehdr->e_phoff); - phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; + + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) + return -EINVAL; for (i = 0; i < phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + READ_ONCE(phdr[i].p_offset), - READ_ONCE(phdr[i].p_filesz))) + phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); + if (!phdr) + return r->err; + + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } + return -EINVAL; } -/* - * Parse build ID of ELF file mapped to vma - * @vma: vma object - * @build_id: buffer to store build id, at least BUILD_ID_SIZE long - * @size: returns actual build id size in case of success - * - * Return: 0 on success, -EINVAL otherwise - */ -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size) +/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ +#define MAX_FREADER_BUF_SZ 64 + +static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, + __u32 *size, bool may_fault) { - Elf32_Ehdr *ehdr; - struct page *page; - void *page_addr; + const Elf32_Ehdr *ehdr; + struct freader r; + char buf[MAX_FREADER_BUF_SZ]; int ret; /* only works for page backed storage */ if (!vma->vm_file) return -EINVAL; - page = find_get_page(vma->vm_file->f_mapping, 0); - if (!page) - return -EFAULT; /* page not mapped */ - if (!PageUptodate(page)) { - put_page(page); - return -EFAULT; + freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); + + /* fetch first 18 bytes of ELF header for checks */ + ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); + if (!ehdr) { + ret = r.err; + goto out; } ret = -EINVAL; - page_addr = kmap_atomic(page); - ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) @@ -178,15 +319,46 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, goto out; if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) - ret = get_build_id_32(page_addr, build_id, size); + ret = get_build_id_32(&r, build_id, size); else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) - ret = get_build_id_64(page_addr, build_id, size); + ret = get_build_id_64(&r, build_id, size); out: - kunmap_atomic(page_addr); - put_page(page); + freader_cleanup(&r); return ret; } +/* + * Parse build ID of ELF file mapped to vma + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes no page fault can be taken, so if relevant portions of ELF file are + * not already paged in, fetching of build ID fails. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(vma, build_id, size, false /* !may_fault */); +} + +/* + * Parse build ID of ELF file mapped to VMA + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes faultable context and can cause page faults to bring in file data + * into page cache. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(vma, build_id, size, true /* may_fault */); +} + /** * build_id_parse_buf - Get build ID from a buffer * @buf: ELF note section(s) to parse @@ -197,7 +369,15 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, */ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) { - return parse_build_id_buf(build_id, NULL, buf, buf_size); + struct freader r; + int err; + + freader_init_from_mem(&r, buf, buf_size); + + err = parse_build_id(&r, build_id, NULL, 0, buf_size); + + freader_cleanup(&r); + return err; } #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9073430dc8659c7e37aa262874b991c3677d6b06..44dd133594d4d6a005321d27acbf76f9aa1d4c95 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -278,6 +278,85 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) } EXPORT_SYMBOL(__percpu_counter_compare); +/* + * Compare counter, and add amount if total is: less than or equal to limit if + * amount is positive, or greater than or equal to limit if amount is negative. + * Return true if amount is added, or false if total would be beyond the limit. + * + * Negative limit is allowed, but unusual. + * When negative amounts (subs) are given to percpu_counter_limited_add(), + * the limit would most naturally be 0 - but other limits are also allowed. + * + * Overflow beyond S64_MAX is not allowed for: counter, limit and amount + * are all assumed to be sane (far from S64_MIN and S64_MAX). + */ +bool __percpu_counter_limited_add(struct percpu_counter *fbc, + s64 limit, s64 amount, s32 batch) +{ + s64 count; + s64 unknown; + unsigned long flags; + bool good = false; + + if (amount == 0) + return true; + + local_irq_save(flags); + unknown = batch * num_online_cpus(); + count = __this_cpu_read(*fbc->counters); + + /* Skip taking the lock when safe */ + if (abs(count + amount) <= batch && + ((amount > 0 && fbc->count + unknown <= limit) || + (amount < 0 && fbc->count - unknown >= limit))) { + this_cpu_add(*fbc->counters, amount); + local_irq_restore(flags); + return true; + } + + raw_spin_lock(&fbc->lock); + count = fbc->count + amount; + + /* Skip percpu_counter_sum() when safe */ + if (amount > 0) { + if (count - unknown > limit) + goto out; + if (count + unknown <= limit) + good = true; + } else { + if (count + unknown < limit) + goto out; + if (count - unknown >= limit) + good = true; + } + + if (!good) { + s32 *pcount; + int cpu; + + for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { + pcount = per_cpu_ptr(fbc->counters, cpu); + count += *pcount; + } + if (amount > 0) { + if (count > limit) + goto out; + } else { + if (count < limit) + goto out; + } + good = true; + } + + count = __this_cpu_read(*fbc->counters); + fbc->count += count + amount; + __this_cpu_sub(*fbc->counters, count); +out: + raw_spin_unlock(&fbc->lock); + local_irq_restore(flags); + return good; +} + static int __init percpu_counter_startup(void) { int ret; diff --git a/lib/test_fortify/.gitignore b/lib/test_fortify/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c1ba37d14b50e30fca202b624a6bef5b7bc5b400 --- /dev/null +++ b/lib/test_fortify/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +/*.log diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1c3f82ad8bb24f227290ee1184f46e1e8789f4c5 --- /dev/null +++ b/lib/test_fortify/Makefile @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := $(call cc-disable-warning,fortify-source) + +quiet_cmd_test_fortify = TEST $@ + cmd_test_fortify = $(CONFIG_SHELL) $(src)/test_fortify.sh \ + $< $@ "$(NM)" $(CC) $(c_flags) -DKBUILD_EXTRA_WARN1 + +$(obj)/%.log: $(src)/%.c $(src)/test_fortify.sh FORCE + $(call if_changed_dep,test_fortify) + +logs = $(patsubst $(src)/%.c, %.log, $(wildcard $(src)/*-*.c)) +targets += $(logs) + +quiet_cmd_gen_fortify_log = CAT $@ + cmd_gen_fortify_log = cat $(or $(real-prereqs),/dev/null) > $@ + +$(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE + $(call if_changed,gen_fortify_log) + +# GCC<=7 does not always produce *.d files. +# Run the tests only for GCC>=8 or Clang. +always-$(call gcc-min-version, 80000) += test_fortify.log +always-$(CONFIG_CC_IS_CLANG) += test_fortify.log + +# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. +# Pass CFLAGS_KASAN to avoid warnings. +KASAN_SANITIZE := y diff --git a/scripts/test_fortify.sh b/lib/test_fortify/test_fortify.sh similarity index 100% rename from scripts/test_fortify.sh rename to lib/test_fortify/test_fortify.sh diff --git a/mm/Kconfig b/mm/Kconfig index c11cd01169e8d1b28ba7ed5b7847f833c534e52f..4d1b98f6bb5e6181a003269c602ec313a928ff9e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1164,6 +1164,15 @@ config DMAPOOL_TEST config ARCH_HAS_PTE_SPECIAL bool +config MEMSLI + bool "Support memory latency histogram, a.k.a, memsli" + depends on MEMCG + default y + help + This probes and calculates various kinds of memory latency, and then + groups into the corresponding latency histogram in every memory + cgroup, respectively. + # # Some architectures require a special hugepage directory format that is # required to support multiple hugepage sizes. For example a4fe3ce76 @@ -1282,6 +1291,30 @@ config LOCK_MM_AND_FIND_VMA bool depends on !STACK_GROWSUP +config ASYNC_FORK + bool "Support copy mm asynchronously when fork" + depends on MMU && MEMCG + default y + help + This introduces a new mechanism to reduce block duration when parent + process calls fork(2). It will be very effective for processes with + large memory usage. For example, DB uses fork(2) to create a memory + snapshot, and wants fork(2) returns as soon as possible to reduce + unserviceable duration. Note that it won't speed up child's return + from fork(2). + source "mm/damon/Kconfig" +config PAGECACHE_LIMIT + bool "Enable pagecache limit function" + depends on MMU && MEMCG + default n + help + This feature is used to solve the problem that the system has large number memory of + pagecache, which may cause insufficient remaining memory in the system, and, service + performance and usage are affected. Therefore, in some scenarios, you need to limit + the page cache usage to ensure that the remaining memory can meet service requirements. + + If unsure, say N. + endmenu diff --git a/mm/Makefile b/mm/Makefile index ec65984e2adeee6719c54932280cbe0a9857d55c..8ecb638c9cc49f508652e8ee2384f34e1b6ff9a2 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_NUMA) += memory-tiers.o obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o memcg_zombie_reaper.o ifdef CONFIG_SWAP obj-$(CONFIG_MEMCG) += swap_cgroup.o endif @@ -138,3 +138,5 @@ obj-$(CONFIG_IO_MAPPING) += io-mapping.o obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o +obj-$(CONFIG_ASYNC_FORK) += async_fork.o +obj-$(CONFIG_PAGECACHE_LIMIT) += pagecache_limit.o diff --git a/mm/async_fork.c b/mm/async_fork.c new file mode 100644 index 0000000000000000000000000000000000000000..0a2981086429cd889db18d06b7f1552b4ced5746 --- /dev/null +++ b/mm/async_fork.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +DEFINE_STATIC_KEY_FALSE(async_fork_enabled_key); +DEFINE_STATIC_KEY_FALSE(async_fork_staging_key); + +noinline int async_fork_cpr_fast(struct vm_area_struct *vma, + struct vm_area_struct *mpnt) +{ + return -EOPNOTSUPP; +} + +noinline void async_fork_cpr_bind(struct mm_struct *oldmm, + struct mm_struct *mm, int err) +{ +} + +noinline void async_fork_cpr_rest(void) +{ +} + +noinline void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l) +{ +} + +noinline bool __is_pmd_async_fork(pmd_t pmd) +{ + return false; +} + +noinline void __async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr) +{ +} + +noinline void __async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ +} diff --git a/mm/backing-dev.c b/mm/backing-dev.c index e039d05304dd9ca52da735962c0ef951fb448ec5..67d71ce4472dc13a5eb3cb5f35e6fcd5ee1fceea 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -41,9 +41,25 @@ struct workqueue_struct *bdi_wq; static struct dentry *bdi_debug_root; +#ifdef CONFIG_CGROUP_WRITEBACK +static struct dentry *memcg_blkcg_file; +static const struct file_operations memcg_blkcg_debug_fops; +#endif + static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); + +#ifdef CONFIG_CGROUP_WRITEBACK + if (!bdi_debug_root) + return; + + if (!cgwb_v1) + return; + + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, + NULL, &memcg_blkcg_debug_fops); +#endif } static int bdi_debug_stats_show(struct seq_file *m, void *v) @@ -499,6 +515,210 @@ static void wb_exit(struct bdi_writeback *wb) #include +struct memcg_blkcg_link { + struct list_head list; + struct rcu_head rcu; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; +}; + +static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); +static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); + +static int memcg_blkcg_link_show(struct seq_file *m, void *v) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + seq_puts(m, "memory <---> blkio\n"); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + seq_printf(m, "%s:%5lu <---> %s:%5lu\n", + link->memcg_css->cgroup->kn->name, + kernfs_ino(link->memcg_css->cgroup->kn), + (link->blkcg_css == blkcg_root_css) ? + "root" : link->blkcg_css->cgroup->kn->name, + kernfs_ino(link->blkcg_css->cgroup->kn)); + } + rcu_read_unlock(); + + return 0; +} + +static int memcg_blkcg_link_open(struct inode *inode, struct file *file) +{ + return single_open(file, memcg_blkcg_link_show, inode->i_private); +} + +static const struct file_operations memcg_blkcg_debug_fops = { + .open = memcg_blkcg_link_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + struct memcg_blkcg_link *link; + int i; + + if (!cgroup_writeback_support_v1()) + return 0; + + for (i = 0; i < count; i++) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + free_memcg_blkcg_links(tmp_links); + return -ENOMEM; + } + list_add(&link->list, tmp_links); + } + return 0; +} + +static void link_free(struct rcu_head *head) +{ + struct memcg_blkcg_link *link = container_of(head, + struct memcg_blkcg_link, rcu); + kfree(link); +} + +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + int err; + + if (!cgroup_writeback_support_v1()) + return; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + WARN_ON(list_empty(tmp_links)); + + memcg_css = cset->subsys[memory_cgrp_id]; + blkcg_css = cset->subsys[io_cgrp_id]; + + if ((memcg_css == &root_mem_cgroup->css) || + (blkcg_css == blkcg_root_css)) + return; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link && ((link->blkcg_css == blkcg_css) || + (link->blkcg_css == blkcg_root_css))) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + trace_insert_memcg_blkcg_link(memcg_css, blkcg_css, + link ? link->blkcg_css : NULL); + + spin_lock(&memcg_blkcg_tree_lock); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + blkcg_css = blkcg_root_css; + } + + link = list_first_entry(tmp_links, struct memcg_blkcg_link, list); + list_del_init(&link->list); + + link->memcg_css = memcg_css; + link->blkcg_css = blkcg_css; + err = radix_tree_insert(&memcg_blkcg_tree, memcg_css->id, link); + WARN_ON(err); + + spin_unlock(&memcg_blkcg_tree_lock); +} + +void free_memcg_blkcg_links(struct list_head *links_to_free) +{ + struct memcg_blkcg_link *link, *tmp_link; + + list_for_each_entry_safe(link, tmp_link, links_to_free, list) { + list_del(&link->list); + kfree(link); + } +} + +static void delete_memcg_link(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + + spin_lock(&memcg_blkcg_tree_lock); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + spin_lock(&memcg_blkcg_tree_lock); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + if (link->blkcg_css == blkcg_css) { + radix_tree_delete(&memcg_blkcg_tree, link->memcg_css->id); + call_rcu(&link->rcu, link_free); + } + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ + if (!cgroup_writeback_support_v1()) + return; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + if (ss->id == io_cgrp_id) + delete_blkcg_link(css); + if (ss->id == memory_cgrp_id) + delete_memcg_link(css); +} + +static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + blkcg_css = link->blkcg_css; + if (css_tryget_online(blkcg_css)) + goto out; + } + + /* + * If not blkcg_root_css and tryget failed, + * get a reference of blkcg_root_css and return. + */ + blkcg_css = blkcg_root_css; + css_get(blkcg_css); + +out: + rcu_read_unlock(); + return blkcg_css; +} + /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. @@ -583,7 +803,10 @@ static int cgwb_create(struct backing_dev_info *bdi, int ret = 0; memcg = mem_cgroup_from_css(memcg_css); - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); @@ -704,7 +927,10 @@ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); diff --git a/mm/cma.c b/mm/cma.c index ac363f16d3923c81356e846485bfd84cc7d76846..304a4e69180c98cb52787d3ef648417aff28517f 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -36,7 +36,10 @@ #include "internal.h" #include "cma.h" -struct cma cma_areas[MAX_CMA_AREAS]; +static struct cma cma_areas_data[MAX_CMA_AREAS]; +static unsigned int cma_areas_size = MAX_CMA_AREAS; +struct cma *cma_areas = cma_areas_data; + unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); @@ -159,6 +162,25 @@ void __init cma_reserve_pages_on_error(struct cma *cma) cma->reserve_pages_on_error = true; } +int __init cma_alloc_areas(unsigned int max_cma_size) +{ + struct cma *data; + + if (max_cma_size <= MAX_CMA_AREAS) + return 0; + + if (cma_area_count || cma_areas != cma_areas_data) + return -EPERM; + + data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES); + if (!data) + return -ENOMEM; + + cma_areas = data; + cma_areas_size = max_cma_size; + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -179,7 +201,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma *cma; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -252,7 +274,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -470,10 +492,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -587,3 +611,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 88a0595670b766cd9780476f7dacb71fd9979e34..50275c1d98cc656497018577da57bb9417e47846 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; @@ -33,7 +34,7 @@ struct cma { bool reserve_pages_on_error; }; -extern struct cma cma_areas[MAX_CMA_AREAS]; +extern struct cma *cma_areas; extern unsigned cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma) diff --git a/mm/compaction.c b/mm/compaction.c index 61c741f11e9bb3fd2229ad2f704ef0d67b317d03..219e6b48f5fc45981c155e8c701411b639936b0f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -40,9 +40,22 @@ static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } + +/* + * order == -1 is expected when compacting proactively via + * 1. /proc/sys/vm/compact_memory + * 2. /sys/devices/system/node/nodex/compact + * 3. /proc/sys/vm/compaction_proactiveness + */ +static inline bool is_via_compact_memory(int order) +{ + return order == -1; +} + #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) +static inline bool is_via_compact_memory(int order) { return false; } #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA @@ -66,45 +79,56 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif -static unsigned long release_freepages(struct list_head *freelist) +static void split_map_pages(struct list_head *freepages) { + unsigned int i, order; struct page *page, *next; - unsigned long high_pfn = 0; + LIST_HEAD(tmp_list); - list_for_each_entry_safe(page, next, freelist, lru) { - unsigned long pfn = page_to_pfn(page); - list_del(&page->lru); - __free_page(page); - if (pfn > high_pfn) - high_pfn = pfn; - } + for (order = 0; order < NR_PAGE_ORDERS; order++) { + list_for_each_entry_safe(page, next, &freepages[order], lru) { + unsigned int nr_pages; - return high_pfn; + list_del(&page->lru); + + nr_pages = 1 << order; + + post_alloc_hook(page, order, __GFP_MOVABLE); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } + } + list_splice_init(&tmp_list, &freepages[0]); + } } -static void split_map_pages(struct list_head *list) +static unsigned long release_free_list(struct list_head *freepages) { - unsigned int i, order, nr_pages; - struct page *page, *next; - LIST_HEAD(tmp_list); - - list_for_each_entry_safe(page, next, list, lru) { - list_del(&page->lru); + int order; + unsigned long high_pfn = 0; - order = page_private(page); - nr_pages = 1 << order; + for (order = 0; order < NR_PAGE_ORDERS; order++) { + struct page *page, *next; - post_alloc_hook(page, order, __GFP_MOVABLE); - if (order) - split_page(page, order); + list_for_each_entry_safe(page, next, &freepages[order], lru) { + unsigned long pfn = page_to_pfn(page); - for (i = 0; i < nr_pages; i++) { - list_add(&page->lru, &tmp_list); - page++; + list_del(&page->lru); + /* + * Convert free pages into post allocation pages, so + * that we can free them via __free_page. + */ + post_alloc_hook(page, order, __GFP_MOVABLE); + __free_pages(page, order); + if (pfn > high_pfn) + high_pfn = pfn; } } - - list_splice(&tmp_list, list); + return high_pfn; } #ifdef CONFIG_COMPACTION @@ -657,7 +681,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; - list_add_tail(&page->lru, freelist); + list_add_tail(&page->lru, &freelist[order]); if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; @@ -723,7 +747,11 @@ isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_start_pfn, block_end_pfn; - LIST_HEAD(freelist); + int order; + struct list_head tmp_freepages[NR_PAGE_ORDERS]; + + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&tmp_freepages[order]); pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); @@ -754,7 +782,7 @@ isolate_freepages_range(struct compact_control *cc, break; isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, &freelist, 0, true); + block_end_pfn, tmp_freepages, 0, true); /* * In strict mode, isolate_freepages_block() returns 0 if @@ -771,15 +799,15 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* __isolate_free_page() does not map the pages */ - split_map_pages(&freelist); - if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ - release_freepages(&freelist); + release_free_list(tmp_freepages); return 0; } + /* __isolate_free_page() does not map the pages */ + split_map_pages(tmp_freepages); + /* We don't use freelists for anything. */ return pfn; } @@ -817,6 +845,32 @@ static bool too_many_isolated(struct compact_control *cc) return too_many; } +/** + * skip_isolation_on_order() - determine when to skip folio isolation based on + * folio order and compaction target order + * @order: to-be-isolated folio order + * @target_order: compaction target order + * + * This avoids unnecessary folio isolations during compaction. + */ +static bool skip_isolation_on_order(int order, int target_order) +{ + /* + * Unless we are performing global compaction (i.e., + * is_via_compact_memory), skip any folios that are larger than the + * target order: we wouldn't be here if we'd have a free folio with + * the desired target_order, so migrating this folio would likely fail + * later. + */ + if (!is_via_compact_memory(target_order) && order >= target_order) + return true; + /* + * We limit memory compaction to pageblocks and won't try + * creating free blocks of memory that are larger than that. + */ + return order >= pageblock_order; +} + /** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock @@ -947,7 +1001,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } - if (PageHuge(page) && cc->alloc_contig) { + if (PageHuge(page)) { + /* + * skip hugetlbfs if we are not compacting for pages + * bigger than its order. THPs and other compound pages + * are handled below. + */ + if (!cc->alloc_contig) { + const unsigned int order = compound_order(page); + + if (order <= MAX_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; + } + /* for alloc_contig case */ if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; @@ -1008,21 +1077,24 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Regardless of being on LRU, compound pages such as THP and - * hugetlbfs are not to be compacted unless we are attempting - * an allocation much larger than the huge page size (eg CMA). - * We can potentially save a lot of iterations if we skip them - * at once. The check is racy, but we can consider only valid - * values and the only danger is skipping too much. + * Regardless of being on LRU, compound pages such as THP + * (hugetlbfs is handled above) are not to be compacted unless + * we are attempting an allocation larger than the compound + * page size. We can potentially save a lot of iterations if we + * skip them at once. The check is racy, but we can consider + * only valid values and the only danger is skipping too much. */ if (PageCompound(page) && !cc->alloc_contig) { const unsigned int order = compound_order(page); - if (likely(order <= MAX_ORDER)) { - low_pfn += (1UL << order) - 1; - nr_scanned += (1UL << order) - 1; + /* Skip based on page order and compaction target order. */ + if (skip_isolation_on_order(order, cc->order)) { + if (order <= MAX_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; } - goto isolate_fail; } /* @@ -1147,10 +1219,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * folio become large since the non-locked check, - * and it's on LRU. + * Check LRU folio order under the lock */ - if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { + if (unlikely(skip_isolation_on_order(folio_order(folio), + cc->order) && + !cc->alloc_contig)) { low_pfn += folio_nr_pages(folio) - 1; nr_scanned += folio_nr_pages(folio) - 1; folio_set_lru(folio); @@ -1347,12 +1420,14 @@ static bool suitable_migration_target(struct compact_control *cc, { /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { + int order = cc->order > 0 ? cc->order : pageblock_order; + /* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ - if (buddy_order_unsafe(page) >= pageblock_order) + if (buddy_order_unsafe(page) >= order) return false; } @@ -1440,7 +1515,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn) if (!page) return; - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); + isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (start_pfn == end_pfn && !cc->no_set_skip_hint) @@ -1569,7 +1644,7 @@ static void fast_isolate_freepages(struct compact_control *cc) nr_scanned += nr_isolated - 1; total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; - list_add_tail(&page->lru, &cc->freepages); + list_add_tail(&page->lru, &cc->freepages[order]); count_compact_events(COMPACTISOLATED, nr_isolated); } else { /* If isolation fails, abort the search */ @@ -1643,13 +1718,12 @@ static void isolate_freepages(struct compact_control *cc) unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ - struct list_head *freelist = &cc->freepages; unsigned int stride; /* Try a small search of the free lists for a candidate */ fast_isolate_freepages(cc); if (cc->nr_freepages) - goto splitmap; + return; /* * Initialise the free scanner. The starting point is where we last @@ -1709,7 +1783,7 @@ static void isolate_freepages(struct compact_control *cc) /* Found a block suitable for isolating free pages from. */ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, freelist, stride, false); + block_end_pfn, cc->freepages, stride, false); /* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn) @@ -1750,10 +1824,6 @@ static void isolate_freepages(struct compact_control *cc) * and the loop terminated due to isolate_start_pfn < low_pfn */ cc->free_pfn = isolate_start_pfn; - -splitmap: - /* __isolate_free_page() does not map the pages */ - split_map_pages(freelist); } /* @@ -1764,19 +1834,47 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; + int order = folio_order(src); + bool has_isolated_pages = false; + int start_order; + struct page *freepage; + unsigned long size; + +again: + for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) + if (!list_empty(&cc->freepages[start_order])) + break; - if (list_empty(&cc->freepages)) { - isolate_freepages(cc); - - if (list_empty(&cc->freepages)) + /* no free pages in the list */ + if (start_order == NR_PAGE_ORDERS) { + if (has_isolated_pages) return NULL; + isolate_freepages(cc); + has_isolated_pages = true; + goto again; } - dst = list_entry(cc->freepages.next, struct folio, lru); - list_del(&dst->lru); - cc->nr_freepages--; + freepage = list_first_entry(&cc->freepages[start_order], struct page, + lru); + size = 1 << start_order; + + list_del(&freepage->lru); + + while (start_order > order) { + start_order--; + size >>= 1; + + list_add(&freepage[size].lru, &cc->freepages[start_order]); + set_page_private(&freepage[size], start_order); + } + dst = (struct folio *)freepage; - return dst; + post_alloc_hook(&dst->page, order, __GFP_MOVABLE); + if (order) + prep_compound_page(&dst->page, order); + cc->nr_freepages -= 1 << order; + cc->nr_migratepages -= 1 << order; + return page_rmappable_folio(&dst->page); } /* @@ -1787,9 +1885,19 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; + int order = folio_order(dst); + struct page *page = &dst->page; - list_add(&dst->lru, &cc->freepages); - cc->nr_freepages++; + if (folio_put_testzero(dst)) { + free_pages_prepare(page, order); + list_add(&dst->lru, &cc->freepages[order]); + cc->nr_freepages += 1 << order; + } + cc->nr_migratepages += 1 << order; + /* + * someone else has referenced the page, we cannot take it back to our + * free list. + */ } /* possible outcome of isolate_migratepages */ @@ -1809,7 +1917,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ -static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; +static unsigned int __read_mostly sysctl_compaction_proactiveness; static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; @@ -2065,15 +2173,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } -/* - * order == -1 is expected when compacting via - * /proc/sys/vm/compact_memory - */ -static inline bool is_via_compact_memory(int order) -{ - return order == -1; -} - /* * Determine whether kswapd is (or recently was!) running on this node. * @@ -2386,7 +2485,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) unsigned long last_migrated_pfn; const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; - unsigned int nr_succeeded = 0; + unsigned int nr_succeeded = 0, nr_migratepages; + int order; /* * These counters track activities during zone compaction. Initialize @@ -2396,7 +2496,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; - INIT_LIST_HEAD(&cc->freepages); + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&cc->freepages[order]); INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); @@ -2512,11 +2613,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) pageblock_start_pfn(cc->migrate_pfn - 1)); } + /* + * Record the number of pages to migrate since the + * compaction_alloc/free() will update cc->nr_migratepages + * properly. + */ + nr_migratepages = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION, &nr_succeeded); - trace_mm_compaction_migratepages(cc, nr_succeeded); + trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded); /* All pages were either migrated or will be released */ cc->nr_migratepages = 0; @@ -2590,7 +2697,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) * so we don't leave any returned pages behind in the next attempt. */ if (cc->nr_freepages > 0) { - unsigned long free_pfn = release_freepages(&cc->freepages); + unsigned long free_pfn = release_free_list(cc->freepages); cc->nr_freepages = 0; VM_BUG_ON(free_pfn == 0); @@ -2609,7 +2716,6 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); - VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); return ret; diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index ce06b2884789a2bd97016b93916c8aa81dc288b7..9d4d27399f801530501e9bfb9fddf9cb973d505c 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -243,27 +243,6 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size, } #endif -#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) - -void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) -{ - unsigned long slop, clen; - char *p; - - while (size) { - slop = offset_in_page(src); - clen = size; - if (clen > MAX_MAP_CHUNK - slop) - clen = MAX_MAP_CHUNK - slop; - p = early_memremap(src & PAGE_MASK, clen + slop); - memcpy(dest, p + slop, clen); - early_memunmap(p, clen + slop); - dest += clen; - src += clen; - size -= clen; - } -} - #else /* CONFIG_MMU */ void __init __iomem * diff --git a/mm/filemap.c b/mm/filemap.c index 2c308413387ffbf8058e701a495d3ff4706bacca..92c9cbb8823be870db873e88eda238a774886cc3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -47,6 +47,9 @@ #include #include #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include "internal.h" #define CREATE_TRACE_POINTS @@ -113,11 +116,11 @@ * ->i_pages lock (try_to_unmap_one) * ->lruvec->lru_lock (follow_page->mark_page_accessed) * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) - * ->private_lock (page_remove_rmap->set_page_dirty) - * ->i_pages lock (page_remove_rmap->set_page_dirty) - * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) - * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) + * ->private_lock (folio_remove_rmap_pte->set_page_dirty) + * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) + * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) + * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) + * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) @@ -131,11 +134,8 @@ static void page_cache_delete(struct address_space *mapping, mapping_set_update(&xas, mapping); - /* hugetlb pages are represented by a single entry in the xarray */ - if (!folio_test_hugetlb(folio)) { - xas_set_order(&xas, folio->index, folio_order(folio)); - nr = folio_nr_pages(folio); - } + xas_set_order(&xas, folio->index, folio_order(folio)); + nr = folio_nr_pages(folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); @@ -234,7 +234,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio) if (free_folio) free_folio(folio); - if (folio_test_large(folio) && !folio_test_hugetlb(folio)) + if (folio_test_large(folio)) refs = folio_nr_pages(folio); folio_put_refs(folio, refs); } @@ -857,14 +857,18 @@ noinline int __filemap_add_folio(struct address_space *mapping, if (!huge) { int error = mem_cgroup_charge(folio, NULL, gfp); - VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); if (error) return error; charged = true; - xas_set_order(&xas, index, folio_order(folio)); - nr = folio_nr_pages(folio); +#ifdef CONFIG_PAGECACHE_LIMIT + memcg_pagecache_shrink(folio_memcg(folio), gfp); +#endif } + VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); + xas_set_order(&xas, index, folio_order(folio)); + nr = folio_nr_pages(folio); + gfp &= GFP_RECLAIM_MASK; folio_ref_add(folio, nr); folio->mapping = mapping; @@ -999,9 +1003,15 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) folio = __folio_alloc_node(gfp, order, n); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); + if (folio) + count_mthp_stat(order, MTHP_STAT_FILE_ALLOC); return folio; } - return folio_alloc(gfp, order); + + folio = folio_alloc(gfp, order); + if (folio) + count_mthp_stat(order, MTHP_STAT_FILE_ALLOC); + return folio; } EXPORT_SYMBOL(filemap_alloc_folio); #endif @@ -1555,6 +1565,28 @@ void folio_unlock(struct folio *folio) } EXPORT_SYMBOL(folio_unlock); +/** + * folio_end_read - End read on a folio. + * @folio: The folio. + * @success: True if all reads completed successfully. + * + * When all reads against a folio have completed, filesystems should + * call this function to let the pagecache know that no more reads + * are outstanding. This will unlock the folio and wake up any thread + * sleeping on the lock. The folio will also be marked uptodate if all + * reads succeeded. + * + * Context: May be called from interrupt or process context. May not be + * called from NMI context. + */ +void folio_end_read(struct folio *folio, bool success) +{ + if (likely(success)) + folio_mark_uptodate(folio); + folio_unlock(folio); +} +EXPORT_SYMBOL(folio_end_read); + /** * folio_end_private_2 - Clear PG_private_2 and wake any waiters. * @folio: The folio. @@ -1654,8 +1686,10 @@ EXPORT_SYMBOL(folio_end_writeback); */ void __folio_lock(struct folio *folio) { + task_set_wait_res(TASK_WAIT_FOLIO, folio); folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, EXCLUSIVE); + task_clear_wait_res(); } EXPORT_SYMBOL(__folio_lock); @@ -1932,6 +1966,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, no_page: if (!folio && (fgp_flags & FGP_CREAT)) { unsigned order = FGF_GET_ORDER(fgp_flags); + unsigned long orders; int err; if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) @@ -1947,13 +1982,15 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, if (!mapping_large_folio_support(mapping)) order = 0; - if (order > MAX_PAGECACHE_ORDER) - order = MAX_PAGECACHE_ORDER; + + orders = file_orders_always() | BIT(0); + orders &= BIT(order + 1) - 1; /* If we're not aligned, allocate a smaller folio */ if (index & ((1UL << order) - 1)) - order = __ffs(index); + orders &= BIT(__ffs(index) + 1) - 1; + order = highest_order(orders); - do { + while (orders) { gfp_t alloc_gfp = gfp; err = -ENOMEM; @@ -1972,7 +2009,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, break; folio_put(folio); folio = NULL; - } while (order-- > 0); + + order = next_order(&orders, order); + }; if (err == -EEXIST) goto repeat; @@ -2059,17 +2098,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, if (!folio_batch_add(fbatch, folio)) break; } - rcu_read_unlock(); if (folio_batch_count(fbatch)) { - unsigned long nr = 1; + unsigned long nr; int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; - if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) + if (!xa_is_value(folio)) nr = folio_nr_pages(folio); - *start = indices[idx] + nr; + else + nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); + *start = round_down(indices[idx] + nr, nr); } + rcu_read_unlock(); + return folio_batch_count(fbatch); } @@ -2101,10 +2143,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { + unsigned long base; + unsigned long nr; + if (!xa_is_value(folio)) { - if (folio->index < *start) + nr = folio_nr_pages(folio); + base = folio->index; + /* Omit large folio which begins before the start */ + if (base < *start) goto put; - if (folio_next_index(folio) - 1 > end) + /* Omit large folio which extends beyond the end */ + if (base + nr - 1 > end) goto put; if (!folio_trylock(folio)) goto put; @@ -2113,7 +2162,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, goto unlock; VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), folio); + } else { + nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index); + base = xas.xa_index & ~(nr - 1); + /* Omit order>0 value which begins before the start */ + if (base < *start) + continue; + /* Omit order>0 value which extends beyond the end */ + if (base + nr - 1 > end) + break; } + + /* Update start now so that last update is correct on return */ + *start = base + nr; indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; @@ -2125,15 +2186,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, } rcu_read_unlock(); - if (folio_batch_count(fbatch)) { - unsigned long nr = 1; - int idx = folio_batch_count(fbatch) - 1; - - folio = fbatch->folios[idx]; - if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) - nr = folio_nr_pages(folio); - *start = indices[idx] + nr; - } return folio_batch_count(fbatch); } @@ -2171,9 +2223,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } @@ -2239,9 +2288,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (!folio_batch_add(fbatch, folio)) { nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } @@ -2258,10 +2304,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (nr) { folio = fbatch->folios[nr - 1]; - if (folio_test_hugetlb(folio)) - *start = folio->index + 1; - else - *start = folio_next_index(folio); + *start = folio->index + folio_nr_pages(folio); } out: rcu_read_unlock(); @@ -2299,9 +2342,6 @@ unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } @@ -3180,6 +3220,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct file *fpin = NULL; unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; + int exec_order = file_exec_order(); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ @@ -3199,6 +3240,16 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) } #endif + /* If explicit order is set for exec mappings, use it. */ + if ((vm_flags & VM_EXEC) && exec_order >= 0) { + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + ra->size = 1UL << exec_order; + ra->async_size = 0; + ractl._index &= ~((unsigned long)ra->size - 1); + page_cache_ra_order(&ractl, ra, exec_order); + return fpin; + } + /* If we don't want any read-ahead, don't bother */ if (vm_flags & VM_RAND_READ) return fpin; @@ -3511,7 +3562,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, - unsigned int *mmap_miss) + unsigned long *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3537,6 +3588,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + *rss += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3551,6 +3603,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { set_pte_range(vmf, folio, page, count, addr); + *rss += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3563,7 +3616,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, - unsigned int *mmap_miss) + unsigned long *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = &folio->page; @@ -3585,6 +3638,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); + (*rss)++; folio_ref_inc(folio); return ret; @@ -3601,7 +3655,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, XA_STATE(xas, &mapping->i_pages, start_pgoff); struct folio *folio; vm_fault_t ret = 0; - unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; + unsigned long rss = 0; + unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); @@ -3620,6 +3675,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, folio_put(folio); goto out; } + + folio_type = mm_counter_file(&folio->page); do { unsigned long end; @@ -3631,15 +3688,16 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, if (!folio_test_large(folio)) ret |= filemap_map_order0_folio(vmf, - folio, addr, &mmap_miss); + folio, addr, &rss, &mmap_miss); else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &mmap_miss); + nr_pages, &rss, &mmap_miss); folio_unlock(folio); folio_put(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); + add_mm_counter(vma->vm_mm, folio_type, rss); pte_unmap_unlock(vmf->pte, vmf->ptl); out: rcu_read_unlock(); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 10c3247542cbefc18871f109c1c70a2ff2bfb12f..cde4a40f6645030cf50390ec4542594d4711a5ea 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -64,6 +64,12 @@ int __set_page_dirty_nobuffers(struct page *page) } EXPORT_SYMBOL(__set_page_dirty_nobuffers); +int set_page_dirty_lock(struct page *page) +{ + return folio_mark_dirty_lock(page_folio(page)); +} +EXPORT_SYMBOL(set_page_dirty_lock); + bool clear_page_dirty_for_io(struct page *page) { return folio_clear_dirty_for_io(page_folio(page)); @@ -77,12 +83,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, } EXPORT_SYMBOL(redirty_page_for_writepage); -void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma) -{ - folio_add_lru_vma(page_folio(page), vma); -} - int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp) { @@ -122,13 +122,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } - -#ifdef CONFIG_MMU -void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address) -{ - VM_BUG_ON_PAGE(PageTail(page), page); - - return folio_add_new_anon_rmap((struct folio *)page, vma, address); -} -#endif diff --git a/mm/gup.c b/mm/gup.c index fdd75384160d8dcdd411a746bae696ff48ae6255..fb07b00460d774f47a2304f3dc133be7d158968f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -188,6 +188,19 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); +/** + * unpin_folio() - release a dma-pinned folio + * @folio: pointer to folio to be released + * + * Folios that were pinned via memfd_pin_folios() or other similar routines + * must be released either using unpin_folio() or unpin_folios(). + */ +void unpin_folio(struct folio *folio) +{ + gup_put_folio(folio, 1, FOLL_PIN); +} +EXPORT_SYMBOL_GPL(unpin_folio); + /** * folio_add_pin - Try to get an additional pin on a pinned folio * @folio: The folio to be pinned @@ -400,6 +413,40 @@ void unpin_user_pages(struct page **pages, unsigned long npages) } EXPORT_SYMBOL(unpin_user_pages); +/** + * unpin_folios() - release an array of gup-pinned folios. + * @folios: array of folios to be marked dirty and released. + * @nfolios: number of folios in the @folios array. + * + * For each folio in the @folios array, release the folio using gup_put_folio. + * + * Please see the unpin_folio() documentation for details. + */ +void unpin_folios(struct folio **folios, unsigned long nfolios) +{ + unsigned long i = 0, j; + + /* + * If this WARN_ON() fires, then the system *might* be leaking folios + * (by leaving them pinned), but probably not. More likely, gup/pup + * returned a hard -ERRNO error to the caller, who erroneously passed + * it here. + */ + if (WARN_ON(IS_ERR_VALUE(nfolios))) + return; + + while (i < nfolios) { + for (j = i + 1; j < nfolios; j++) + if (folios[i] != folios[j]) + break; + + if (folios[i]) + gup_put_folio(folios[i], j - i, FOLL_PIN); + i = j; + } +} +EXPORT_SYMBOL_GPL(unpin_folios); + /* * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's * lifecycle. Avoid setting the bit unless necessary, or it might cause write @@ -503,6 +550,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); + if (is_pmd_transient(*pmd)) { + fixup_pmd(vma, pmd, address); + if (is_pmd_transient(*pmd)) + return no_page_table(vma, flags); + } + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags); @@ -2362,12 +2415,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked); #ifdef CONFIG_HAVE_FAST_GUP /* - * Used in the GUP-fast path to determine whether a pin is permitted for a - * specific folio. + * Used in the GUP-fast path to determine whether GUP is permitted to work on + * a specific folio. * * This call assumes the caller has pinned the folio, that the lowest page table * level still points to this folio, and that interrupts have been disabled. * + * GUP-fast must reject all secretmem folios. + * * Writing to pinned file-backed dirty tracked folios is inherently problematic * (see comment describing the writable_file_mapping_allowed() function). We * therefore try to avoid the most egregious case of a long-term mapping doing @@ -2377,25 +2432,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * in the fast path, so instead we whitelist known good cases and if in doubt, * fall back to the slow path. */ -static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) +static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) { + bool reject_file_backed = false; struct address_space *mapping; + bool check_secretmem = false; unsigned long mapping_flags; /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the one we disallow. */ - if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != + if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) - return true; + reject_file_backed = true; - /* The folio is pinned, so we can safely access folio fields. */ + /* We hold a folio reference, so we can safely access folio fields. */ + + /* secretmem folios are always order-0 folios. */ + if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) + check_secretmem = true; + + if (!reject_file_backed && !check_secretmem) + return true; if (WARN_ON_ONCE(folio_test_slab(folio))) return false; - /* hugetlb mappings do not require dirty-tracking. */ + /* hugetlb neither requires dirty-tracking nor can be secretmem. */ if (folio_test_hugetlb(folio)) return true; @@ -2431,10 +2495,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) /* * At this point, we know the mapping is non-null and points to an - * address_space object. The only remaining whitelisted file system is - * shmem. + * address_space object. */ - return shmem_mapping(mapping); + if (check_secretmem && secretmem_mapping(mapping)) + return false; + /* The only remaining allowed file system is shmem. */ + return !reject_file_backed || shmem_mapping(mapping); } static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, @@ -2576,6 +2642,9 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; + if (is_pmd_transient(pmd)) + return 0; + ptem = ptep = pte_offset_map(&pmd, addr); if (!ptep) return 0; @@ -2616,18 +2685,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, if (!folio) goto pte_unmap; - if (unlikely(folio_is_secretmem(folio))) { - gup_put_folio(folio, 1, flags); - goto pte_unmap; - } - if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } @@ -2824,7 +2888,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2895,7 +2959,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2939,7 +3003,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2984,7 +3048,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 635f0f0f6860e8afa71a40fd2da642aa2ebb69cf..7611126e04a1c62ecbf88a21199baf9bb1665f72 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -71,37 +72,72 @@ static struct shrinker deferred_split_shrinker; static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; +unsigned long huge_anon_orders_always __read_mostly; +unsigned long huge_anon_orders_madvise __read_mostly; +unsigned long huge_anon_orders_inherit __read_mostly; +unsigned long huge_file_orders_always __read_mostly; +int huge_file_exec_order __read_mostly = -1; +static bool anon_orders_configured __initdata; +static bool file_orders_configured; + +unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + unsigned long supported_orders; + + /* Check the intersection of requested and supported orders. */ + if (vma_is_anonymous(vma)) + supported_orders = THP_ORDERS_ALL_ANON; + else if (vma_is_dax(vma)) + supported_orders = THP_ORDERS_ALL_FILE_DAX; + else + supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; + + orders &= supported_orders; + if (!orders) + return 0; -bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps, bool in_pf, bool enforce_sysfs) -{ if (!vma->vm_mm) /* vdso */ - return false; + return 0; if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) return false; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ if (vma_is_dax(vma)) - return in_pf; + return in_pf ? orders : 0; /* - * Special VMA and hugetlb VMA. + * khugepaged special VMA and hugetlb VMA. * Must be checked after dax since some dax mappings may have * VM_MIXEDMAP set. */ - if (vm_flags & VM_NO_KHUGEPAGED) - return false; + if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) + return 0; /* - * Check alignment for file vma and size for both file and anon vma. + * Check alignment for file vma and size for both file and anon vma by + * filtering out the unsuitable orders. * * Skip the check for page fault. Huge fault does the check in fault - * handlers. And this check is not suitable for huge PUD fault. + * handlers. */ - if (!in_pf && - !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) - return false; + if (!in_pf) { + int order = highest_order(orders); + unsigned long addr; + + while (orders) { + addr = vma->vm_end - (PAGE_SIZE << order); + if (thp_vma_suitable_order(vma, addr, order)) + break; + order = next_order(&orders, order); + } + + if (!orders) + return 0; + } /* * Enabled via shmem mount options or sysfs settings. @@ -109,24 +145,34 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, * own flags. */ if (!in_pf && shmem_file(vma->vm_file)) - return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, - !enforce_sysfs, vma->vm_mm, vm_flags); + return shmem_allowable_huge_orders(file_inode(vma->vm_file), + vma, vma->vm_pgoff, + !enforce_sysfs); - /* Enforce sysfs THP requirements as necessary */ - if (enforce_sysfs && - (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && - !hugepage_flags_always()))) - return false; - - /* Only regular file is valid */ - if (!in_pf && file_thp_enabled(vma)) - return true; + if (!vma_is_anonymous(vma)) { + /* + * Enforce sysfs THP requirements as necessary. Anonymous vmas + * were already handled in thp_vma_allowable_orders(). + */ + if (enforce_sysfs && + (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) && + !hugepage_global_always()))) + return 0; - if (!vma_is_anonymous(vma)) - return false; + /* + * Trust that ->huge_fault() handlers know what they are doing + * in fault path. + */ + if (((in_pf || smaps)) && vma->vm_ops->huge_fault) + return orders; + /* Only regular file is valid in collapse path */ + if (((!in_pf || smaps)) && file_thp_enabled(vma)) + return orders; + return 0; + } if (vma_is_temporary_stack(vma)) - return false; + return 0; /* * THPeligible bit of smaps should show 1 for proper VMAs even @@ -136,9 +182,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, * the first page fault. */ if (!vma->anon_vma) - return (smaps || in_pf); + return (smaps || in_pf) ? orders : 0; - return true; + return orders; } static bool get_huge_zero_page(void) @@ -396,9 +442,354 @@ static const struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; +static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); +static void thpsize_release(struct kobject *kobj); +static DEFINE_SPINLOCK(huge_anon_orders_lock); +static DEFINE_SPINLOCK(huge_file_orders_lock); +static LIST_HEAD(thpsize_list); + +static ssize_t anon_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + + if (test_bit(order, &huge_anon_orders_always)) + output = "[always] inherit madvise never"; + else if (test_bit(order, &huge_anon_orders_inherit)) + output = "always [inherit] madvise never"; + else if (test_bit(order, &huge_anon_orders_madvise)) + output = "always inherit [madvise] never"; + else + output = "always inherit madvise [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t anon_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_inherit); + clear_bit(order, &huge_anon_orders_madvise); + set_bit(order, &huge_anon_orders_always); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "inherit")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_madvise); + set_bit(order, &huge_anon_orders_inherit); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "madvise")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_inherit); + set_bit(order, &huge_anon_orders_madvise); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "never")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_inherit); + clear_bit(order, &huge_anon_orders_madvise); + spin_unlock(&huge_anon_orders_lock); + } else + ret = -EINVAL; + + if (ret > 0) { + int err; + + err = start_stop_khugepaged(); + if (err) + ret = err; + } + return ret; +} + +static ssize_t file_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + bool exec; + + if (test_bit(order, &huge_file_orders_always)) { + exec = READ_ONCE(huge_file_exec_order) == order; + output = exec ? "always [always+exec] never" : + "[always] always+exec never"; + } else { + output = "always always+exec [never]"; + } + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t file_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + spin_lock(&huge_file_orders_lock); + + if (sysfs_streq(buf, "always")) { + set_bit(order, &huge_file_orders_always); + if (huge_file_exec_order == order) + huge_file_exec_order = -1; + } else if (sysfs_streq(buf, "always+exec")) { + set_bit(order, &huge_file_orders_always); + huge_file_exec_order = order; + } else if (sysfs_streq(buf, "never")) { + clear_bit(order, &huge_file_orders_always); + if (huge_file_exec_order == order) + huge_file_exec_order = -1; + } else { + ret = -EINVAL; + } + + spin_unlock(&huge_file_orders_lock); + return ret; +} + +static struct kobj_attribute anon_enabled_attr = + __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); + +static struct attribute *anon_ctrl_attrs[] = { + &anon_enabled_attr.attr, + NULL, +}; + +static const struct attribute_group anon_ctrl_attr_grp = { + .attrs = anon_ctrl_attrs, +}; + +static struct kobj_attribute file_enabled_attr = + __ATTR(file_enabled, 0644, file_enabled_show, file_enabled_store); + +static struct attribute *file_ctrl_attrs[] = { + &file_enabled_attr.attr, +#ifdef CONFIG_SHMEM + &thpsize_shmem_enabled_attr.attr, +#endif + NULL, +}; + +static const struct attribute_group file_ctrl_attr_grp = { + .attrs = file_ctrl_attrs, +}; + +static struct attribute *any_ctrl_attrs[] = { + NULL, +}; + +static const struct attribute_group any_ctrl_attr_grp = { + .attrs = any_ctrl_attrs, +}; + +static const struct kobj_type thpsize_ktype = { + .release = &thpsize_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}}; + +static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item) +{ + unsigned long sum = 0; + int cpu; + + for_each_possible_cpu(cpu) { + struct mthp_stat *this = &per_cpu(mthp_stats, cpu); + + sum += this->stats[order][item]; + } + + return sum; +} + +#define DEFINE_MTHP_STAT_ATTR(_name, _index) \ +static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + int order = to_thpsize(kobj)->order; \ + \ + return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \ +} \ +static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); +DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); +DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); +DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); +#ifdef CONFIG_SHMEM +DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); +DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); +DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); +#endif +DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); +DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); +DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); +DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); +DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); +DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC); + +static struct attribute *anon_stats_attrs[] = { + &anon_fault_alloc_attr.attr, + &anon_fault_fallback_attr.attr, + &anon_fault_fallback_charge_attr.attr, +#ifndef CONFIG_SHMEM + &swpout_attr.attr, + &swpout_fallback_attr.attr, +#endif + &split_deferred_attr.attr, + &nr_anon_attr.attr, + &nr_anon_partially_mapped_attr.attr, + NULL, +}; + +static struct attribute_group anon_stats_attr_grp = { + .name = "stats", + .attrs = anon_stats_attrs, +}; + +static struct attribute *file_stats_attrs[] = { + &file_alloc_attr.attr, +#ifdef CONFIG_SHMEM + &shmem_alloc_attr.attr, + &shmem_fallback_attr.attr, + &shmem_fallback_charge_attr.attr, +#endif + NULL, +}; + +static struct attribute_group file_stats_attr_grp = { + .name = "stats", + .attrs = file_stats_attrs, +}; + +static struct attribute *any_stats_attrs[] = { +#ifdef CONFIG_SHMEM + &swpout_attr.attr, + &swpout_fallback_attr.attr, +#endif + &split_attr.attr, + &split_failed_attr.attr, + NULL, +}; + +static struct attribute_group any_stats_attr_grp = { + .name = "stats", + .attrs = any_stats_attrs, +}; + +static int sysfs_add_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + int ret = -ENOENT; + + /* + * If the group is named, try to merge first, assuming the subdirectory + * was already created. This avoids the warning emitted by + * sysfs_create_group() if the directory already exists. + */ + if (grp->name) + ret = sysfs_merge_group(kobj, grp); + if (ret) + ret = sysfs_create_group(kobj, grp); + + return ret; +} + +static struct thpsize *thpsize_create(int order, struct kobject *parent) +{ + unsigned long size = (PAGE_SIZE << order) / SZ_1K; + struct thpsize *thpsize; + int ret = -ENOMEM; + + thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); + if (!thpsize) + goto err; + + thpsize->order = order; + + ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, + "hugepages-%lukB", size); + if (ret) { + kfree(thpsize); + goto err; + } + + + ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); + if (ret) + goto err_put; + + if (BIT(order) & THP_ORDERS_ALL_ANON) { + ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); + if (ret) + goto err_put; + } + + if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) { + ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); + if (ret) + goto err_put; + } + + return thpsize; +err_put: + kobject_put(&thpsize->kobj); +err: + return ERR_PTR(ret); +} + +static void thpsize_release(struct kobject *kobj) +{ + kfree(to_thpsize(kobj)); +} + static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; + struct thpsize *thpsize; + unsigned long orders; + int order; + + /* + * Default to setting PMD-sized THP to inherit the global setting and + * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time + * constant so we have to do this here. + */ + if (!anon_orders_configured) + huge_anon_orders_inherit = BIT(PMD_ORDER); + + /* + * For pagecache, default to enabling all orders. powerpc's PMD_ORDER + * (and therefore THP_ORDERS_ALL_FILE_DEFAULT) isn't a compile-time + * constant so we have to do this here. + */ + if (!file_orders_configured) { + huge_file_orders_always = THP_ORDERS_ALL_FILE_DEFAULT; + file_orders_configured = true; + } *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { @@ -418,8 +809,24 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) goto remove_hp_group; } + orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; + order = highest_order(orders); + while (orders) { + thpsize = thpsize_create(order, *hugepage_kobj); + if (IS_ERR(thpsize)) { + pr_err("failed to create thpsize for order %d\n", order); + err = PTR_ERR(thpsize); + goto remove_all; + } + list_add(&thpsize->node, &thpsize_list); + order = next_order(&orders, order); + } + return 0; +remove_all: + hugepage_exit_sysfs(*hugepage_kobj); + return err; remove_hp_group: sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); delete_obj: @@ -429,6 +836,13 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) { + struct thpsize *thpsize, *tmp; + + list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { + list_del(&thpsize->node); + kobject_put(&thpsize->kobj); + } + sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); kobject_put(hugepage_kobj); @@ -538,6 +952,173 @@ static int __init setup_transparent_hugepage(char *str) } __setup("transparent_hugepage=", setup_transparent_hugepage); +static char str_dup[PAGE_SIZE] __initdata; +static int __init setup_thp_anon(char *str) +{ + char *token, *range, *policy, *subtoken; + unsigned long always, inherit, madvise; + char *start_size, *end_size; + int start, end, nr; + char *p; + + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); + + always = huge_anon_orders_always; + madvise = huge_anon_orders_madvise; + inherit = huge_anon_orders_inherit; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; + + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; + + start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON); + end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON); + } else { + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_ANON); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_anon boot parameter\n", start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_anon boot parameter\n", end_size); + goto err; + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + } else if (!strcmp(policy, "madvise")) { + bitmap_set(&madvise, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "inherit")) { + bitmap_set(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "never")) { + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else { + pr_err("invalid policy %s in thp_anon boot parameter\n", policy); + goto err; + } + } + } + + huge_anon_orders_always = always; + huge_anon_orders_madvise = madvise; + huge_anon_orders_inherit = inherit; + anon_orders_configured = true; + return 1; + +err: + pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str); + return 0; +} +__setup("thp_anon=", setup_thp_anon); + +static int __init setup_thp_file(char *str) +{ + char *token, *range, *policy, *subtoken; + unsigned long always; + char *start_size, *end_size; + int start, end, nr, exec; + char *p; + + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); + + always = huge_file_orders_always; + exec = huge_file_exec_order; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; + + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; + + start = get_order_from_str(start_size, + THP_ORDERS_ALL_FILE_DEFAULT); + end = get_order_from_str(end_size, + THP_ORDERS_ALL_FILE_DEFAULT); + } else { + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_FILE_DEFAULT); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + end_size); + goto err; + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + } else if (!strcmp(policy, "always+exec")) { + if (nr != 1) + goto err; + bitmap_set(&always, start, nr); + exec = start; + } else if (!strcmp(policy, "never")) { + bitmap_clear(&always, start, nr); + if (exec != -1 && !test_bit(exec, &always)) + exec = -1; + } else { + pr_err("invalid policy %s in thp_file boot parameter\n", policy); + goto err; + } + } + } + + huge_file_orders_always = always; + huge_file_exec_order = exec; + file_orders_configured = true; + return 1; +err: + pr_warn("thp_file=%s: cannot parse, ignored\n", str); + return 0; +} +__setup("thp_file=", setup_thp_file); + pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) @@ -589,7 +1170,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, { loff_t off_end = off + len; loff_t off_align = round_up(off, size); - unsigned long len_pad, ret; + unsigned long len_pad, ret, off_sub; if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) return 0; @@ -618,7 +1199,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, if (ret == addr) return addr; - ret += (off - ret) & (size - 1); + off_sub = (off - ret) & (size - 1); + + if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown && + !off_sub) + return ret + size; + + ret += off_sub; return ret; } @@ -651,6 +1238,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, folio_put(folio); count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK_CHARGE); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); return VM_FAULT_FALLBACK; } folio_throttle_swaprate(folio, gfp); @@ -691,7 +1280,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - folio_add_new_anon_rmap(folio, vma, haddr); + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); @@ -700,6 +1289,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, mm_inc_nr_ptes(vma->vm_mm); spin_unlock(vmf->ptl); count_vm_event(THP_FAULT_ALLOC); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); } @@ -771,7 +1361,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; - if (!transhuge_vma_suitable(vma, haddr)) + if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; @@ -820,6 +1410,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); if (unlikely(!folio)) { count_vm_event(THP_FAULT_FALLBACK); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); @@ -1058,6 +1649,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, { spinlock_t *dst_ptl, *src_ptl; struct page *src_page; + struct folio *src_folio; pmd_t pmd; pgtable_t pgtable = NULL; int ret = -ENOMEM; @@ -1124,11 +1716,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page); + src_folio = page_folio(src_page); - get_page(src_page); - if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { + folio_get(src_folio); + if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { /* Page maybe pinned: split and retry the fault on PTEs. */ - put_page(src_page); + folio_put(src_folio); pte_free(dst_mm, pgtable); spin_unlock(src_ptl); spin_unlock(dst_ptl); @@ -1237,8 +1830,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, } /* - * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() - * and split if duplicating fails. + * TODO: once we support anonymous pages, use + * folio_try_dup_anon_rmap_*() and split if duplicating fails. */ pudp_set_wrprotect(src_mm, addr, src_pud); pud = pud_mkold(pud_wrprotect(pud)); @@ -1342,7 +1935,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) if (folio_ref_count(folio) == 1) { pmd_t entry; - page_move_anon_rmap(page, vma); + folio_move_anon_rmap(folio, vma); + SetPageAnonExclusive(page); folio_unlock(folio); reuse: if (unlikely(unshare)) { @@ -1483,9 +2077,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; pmd_t oldpmd = vmf->orig_pmd; pmd_t pmd; - struct page *page; + struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; - int page_nid = NUMA_NO_NODE; + int nid = NUMA_NO_NODE; int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); bool migrated = false, writable = false; int flags = 0; @@ -1507,37 +2101,35 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) can_change_pmd_writable(vma, vmf->address, pmd)) writable = true; - page = vm_normal_page_pmd(vma, haddr, pmd); - if (!page) + folio = vm_normal_folio_pmd(vma, haddr, pmd); + if (!folio) goto out_map; /* See similar comment in do_numa_page for explanation */ if (!writable) flags |= TNF_NO_GROUP; - page_nid = page_to_nid(page); + nid = folio_nid(folio); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ - if (node_is_toptier(page_nid)) - last_cpupid = page_cpupid_last(page); - target_nid = numa_migrate_prep(page, vma, haddr, page_nid, - &flags); - + if (node_is_toptier(nid)) + last_cpupid = folio_last_cpupid(folio); + target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { - put_page(page); + folio_put(folio); goto out_map; } spin_unlock(vmf->ptl); writable = false; - migrated = migrate_misplaced_page(page, vma, target_nid); + migrated = migrate_misplaced_folio(folio, vma, target_nid); if (migrated) { flags |= TNF_MIGRATED; - page_nid = target_nid; - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); + nid = target_nid; + task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); return 0; } @@ -1557,8 +2149,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); - if (page_nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); return 0; } @@ -1596,7 +2188,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto out; if (!folio_trylock(folio)) @@ -1679,7 +2271,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_present(orig_pmd)) { page = pmd_page(orig_pmd); - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(page_folio(page), page, vma); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(!PageHead(page), page); } else if (thp_migration_supported()) { @@ -1814,7 +2406,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (is_swap_pmd(*pmd)) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - struct page *page = pfn_swap_entry_to_page(entry); + struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); pmd_t newpmd; VM_BUG_ON(!is_pmd_migration_entry(*pmd)); @@ -1823,7 +2415,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * A protection check is difficult so * just be safe and disable write */ - if (PageAnon(page)) + if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry(swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); @@ -1845,7 +2437,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #endif if (prot_numa) { - struct page *page; + struct folio *folio; bool toptier; /* * Avoid trapping faults against the zero page. The read-only @@ -1858,8 +2450,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock; - page = pmd_page(*pmd); - toptier = node_is_toptier(page_to_nid(page)); + folio = page_folio(pmd_page(*pmd)); + toptier = node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa * balancing is disabled @@ -1870,7 +2462,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, jiffies_to_msecs(jiffies)); + folio_xchg_access_time(folio, + jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical @@ -2056,6 +2649,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) { struct mm_struct *mm = vma->vm_mm; + struct folio *folio; struct page *page; pgtable_t pgtable; pmd_t old_pmd, _pmd; @@ -2090,12 +2684,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, page = pfn_swap_entry_to_page(entry); } else { page = pmd_page(old_pmd); - if (!PageDirty(page) && pmd_dirty(old_pmd)) - set_page_dirty(page); - if (!PageReferenced(page) && pmd_young(old_pmd)) - SetPageReferenced(page); - page_remove_rmap(page, vma, true); - put_page(page); + folio = page_folio(page); + if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) + folio_mark_dirty(folio); + if (!folio_test_referenced(folio) && pmd_young(old_pmd)) + folio_set_referenced(folio); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); } add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; @@ -2154,16 +2749,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ old_pmd = pmdp_invalidate(vma, haddr, pmd); page = pmd_page(old_pmd); + folio = page_folio(page); if (pmd_dirty(old_pmd)) { dirty = true; - SetPageDirty(page); + folio_set_dirty(folio); } write = pmd_write(old_pmd); young = pmd_young(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd); uffd_wp = pmd_uffd_wp(old_pmd); - VM_BUG_ON_PAGE(!page_count(page), page); + VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); /* * Without "freeze", we'll simply split the PMD, propagating the @@ -2178,13 +2775,21 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * In case we cannot clear PageAnonExclusive(), split the PMD * only and let try_to_migrate_one() fail later. * - * See page_try_share_anon_rmap(): invalidate PMD first. + * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); - if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) + anon_exclusive = PageAnonExclusive(page); + if (freeze && anon_exclusive && + folio_try_share_anon_rmap_pmd(folio, page)) freeze = false; - if (!freeze) - page_ref_add(page, HPAGE_PMD_NR - 1); + if (!freeze) { + rmap_t rmap_flags = RMAP_NONE; + + folio_ref_add(folio, HPAGE_PMD_NR - 1); + if (anon_exclusive) + rmap_flags |= RMAP_EXCLUSIVE; + folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, + vma, haddr, rmap_flags); + } } /* @@ -2196,15 +2801,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte); - for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { - pte_t entry; - /* - * Note that NUMA hinting access restrictions are not - * transferred to avoid any possibility of altering - * permissions across VMAs. - */ - if (freeze || pmd_migration) { + + /* + * Note that NUMA hinting access restrictions are not transferred to + * avoid any possibility of altering permissions across VMAs. + */ + if (freeze || pmd_migration) { + for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { + pte_t entry; swp_entry_t swp_entry; + if (write) swp_entry = make_writable_migration_entry( page_to_pfn(page + i)); @@ -2223,31 +2829,35 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_swp_mksoft_dirty(entry); if (uffd_wp) entry = pte_swp_mkuffd_wp(entry); - } else { - entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); - if (write) - entry = pte_mkwrite(entry, vma); - if (anon_exclusive) - SetPageAnonExclusive(page + i); - if (!young) - entry = pte_mkold(entry); - /* NOTE: this may set soft-dirty too on some archs */ - if (dirty) - entry = pte_mkdirty(entry); - if (soft_dirty) - entry = pte_mksoft_dirty(entry); - if (uffd_wp) - entry = pte_mkuffd_wp(entry); - page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); + + VM_WARN_ON(!pte_none(ptep_get(pte + i))); + set_pte_at(mm, addr, pte + i, entry); } - VM_BUG_ON(!pte_none(ptep_get(pte))); - set_pte_at(mm, addr, pte, entry); - pte++; + } else { + pte_t entry; + + entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); + if (write) + entry = pte_mkwrite(entry, vma); + if (!young) + entry = pte_mkold(entry); + /* NOTE: this may set soft-dirty too on some archs */ + if (dirty) + entry = pte_mkdirty(entry); + if (soft_dirty) + entry = pte_mksoft_dirty(entry); + if (uffd_wp) + entry = pte_mkuffd_wp(entry); + + for (i = 0; i < HPAGE_PMD_NR; i++) + VM_WARN_ON(!pte_none(ptep_get(pte + i))); + + set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); } - pte_unmap(pte - 1); + pte_unmap(pte); if (!pmd_migration) - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(folio, page, vma); if (freeze) put_page(page); @@ -2475,7 +3085,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail, if (page_is_idle(head)) set_page_idle(page_tail); - page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); + folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); /* * always add to the tail because some iterators expect new @@ -2558,9 +3168,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, shmem_uncharge(head->mapping->host, nr_dropped); remap_page(folio, nr); - if (folio_test_swapcache(folio)) - split_swap_cluster(folio->swap); - for (i = 0; i < nr; i++) { struct page *subpage = head + i; if (subpage == page) @@ -2618,8 +3225,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); XA_STATE(xas, &folio->mapping->i_pages, folio->index); - struct anon_vma *anon_vma = NULL; + bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; + struct anon_vma *anon_vma = NULL; + int order = folio_order(folio); int extra_pins, ret; pgoff_t end; bool is_hzp; @@ -2636,7 +3245,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_test_writeback(folio)) return -EBUSY; - if (folio_test_anon(folio)) { + if (is_anon) { /* * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a @@ -2723,6 +3332,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); } spin_unlock(&ds_queue->split_queue_lock); @@ -2742,6 +3352,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) } } + if (is_anon) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + __split_huge_page(page, list, end); ret = 0; } else { @@ -2763,7 +3376,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); + if (order == HPAGE_PMD_ORDER) + count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); + count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); return ret; } @@ -2793,6 +3408,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); unqueued = true; } @@ -2831,7 +3447,10 @@ void deferred_split_folio(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (list_empty(&folio->_deferred_list)) { - count_vm_event(THP_DEFERRED_SPLIT_PAGE); + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_DEFERRED_SPLIT_PAGE); + count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG @@ -2879,6 +3498,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, list_move(&folio->_deferred_list, &list); } else { /* We lost race with folio_put() */ + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); ds_queue->split_queue_len--; } @@ -3211,6 +3831,7 @@ late_initcall(split_huge_pages_debugfs); int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; @@ -3225,15 +3846,15 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); pmdval = pmdp_invalidate(vma, address, pvmw->pmd); - /* See page_try_share_anon_rmap(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); - if (anon_exclusive && page_try_share_anon_rmap(page)) { + /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ + anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); + if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); return -EBUSY; } if (pmd_dirty(pmdval)) - set_page_dirty(page); + folio_mark_dirty(folio); if (pmd_write(pmdval)) entry = make_writable_migration_entry(page_to_pfn(page)); else if (anon_exclusive) @@ -3250,8 +3871,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, if (pmd_uffd_wp(pmdval)) pmdswp = pmd_swp_mkuffd_wp(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); - page_remove_rmap(page, vma, true); - put_page(page); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); trace_set_migration_pmd(address, pmd_val(pmdswp)); return 0; @@ -3259,6 +3880,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) { + struct folio *folio = page_folio(new); struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; @@ -3270,7 +3892,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) return; entry = pmd_to_swp_entry(*pvmw->pmd); - get_page(new); + folio_get(folio); pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); if (pmd_swp_soft_dirty(*pvmw->pmd)) pmde = pmd_mksoft_dirty(pmde); @@ -3281,20 +3903,20 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (!is_migration_entry_young(entry)) pmde = pmd_mkold(pmde); /* NOTE: this may contain setting soft-dirty on some archs */ - if (PageDirty(new) && is_migration_entry_dirty(entry)) + if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pmde = pmd_mkdirty(pmde); - if (PageAnon(new)) { - rmap_t rmap_flags = RMAP_COMPOUND; + if (folio_test_anon(folio)) { + rmap_t rmap_flags = RMAP_NONE; if (!is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; - page_add_anon_rmap(new, vma, haddr, rmap_flags); + folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); } else { - page_add_file_rmap(new, vma, true); + folio_add_file_rmap_pmd(folio, new, vma); } - VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); + VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); set_pmd_at(mm, haddr, pvmw->pmd, pmde); /* No need to invalidate - it was non-present before */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92b955cc5a41d38cd74a7de4597681aa3eef8bf1..d55fd1203ccbb556549d7e17fe7404c99240e689 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -984,7 +984,7 @@ static long region_count(struct resv_map *resv, long f, long t) /* * Convert the address within this vma to the page offset within - * the mapping, in pagecache page units; huge pages here. + * the mapping, huge page units here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) @@ -993,13 +993,6 @@ static pgoff_t vma_hugecache_offset(struct hstate *h, (vma->vm_pgoff >> huge_page_order(h)); } -pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address) -{ - return vma_hugecache_offset(hstate_vma(vma), vma, address); -} -EXPORT_SYMBOL_GPL(linear_hugepage_index); - /** * vma_kernel_pagesize - Page size granularity for this VMA. * @vma: The user mapping. @@ -2093,20 +2086,6 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) return NULL; } -pgoff_t hugetlb_basepage_index(struct page *page) -{ - struct page *page_head = compound_head(page); - pgoff_t index = page_index(page_head); - unsigned long compound_idx; - - if (compound_order(page_head) > MAX_ORDER) - compound_idx = page_to_pfn(page) - page_to_pfn(page_head); - else - compound_idx = page - page_head; - - return (index << compound_order(page_head)) + compound_idx; -} - static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) @@ -5019,7 +4998,7 @@ hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long add pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); __folio_mark_uptodate(new_folio); - hugepage_add_new_anon_rmap(new_folio, vma, addr); + hugetlb_add_new_anon_rmap(new_folio, vma, addr); if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); @@ -5142,9 +5121,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * sleep during the process. */ if (!folio_test_anon(pte_folio)) { - page_dup_file_rmap(&pte_folio->page, true); - } else if (page_try_dup_anon_rmap(&pte_folio->page, - true, src_vma)) { + hugetlb_add_file_rmap(pte_folio); + } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; @@ -5410,7 +5388,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, make_pte_marker(PTE_MARKER_UFFD_WP), sz); hugetlb_count_sub(pages_per_huge_page(h), mm); - page_remove_rmap(page, vma, true); + hugetlb_remove_rmap(page_folio(page)); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); @@ -5607,8 +5585,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, * owner and can reuse this page. */ if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { - if (!PageAnonExclusive(&old_folio->page)) - page_move_anon_rmap(&old_folio->page, vma); + if (!PageAnonExclusive(&old_folio->page)) { + folio_move_anon_rmap(old_folio, vma); + SetPageAnonExclusive(&old_folio->page); + } if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); @@ -5719,8 +5699,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); - page_remove_rmap(&old_folio->page, vma, true); - hugepage_add_new_anon_rmap(new_folio, vma, haddr); + hugetlb_remove_rmap(old_folio); + hugetlb_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); @@ -5754,7 +5734,7 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping = vma->vm_file->f_mapping; - pgoff_t idx = vma_hugecache_offset(h, vma, address); + pgoff_t idx = linear_page_index(vma, address); struct folio *folio; folio = filemap_get_folio(mapping, idx); @@ -5771,6 +5751,7 @@ int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping struct hstate *h = hstate_inode(inode); int err; + idx <<= huge_page_order(h); __folio_set_locked(folio); err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); @@ -5878,7 +5859,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * before we get page_table_lock. */ new_folio = false; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) @@ -6008,9 +5989,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, goto backout; if (anon_rmap) - hugepage_add_new_anon_rmap(folio, vma, haddr); + hugetlb_add_new_anon_rmap(folio, vma, haddr); else - page_dup_file_rmap(&folio->page, true); + hugetlb_add_file_rmap(folio); new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); /* @@ -6187,7 +6168,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); - pagecache_folio = filemap_lock_folio(mapping, idx); + pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(pagecache_folio)) pagecache_folio = NULL; } @@ -6320,7 +6301,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, if (is_continue) { ret = -EFAULT; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) goto out; folio_in_pagecache = true; @@ -6434,9 +6415,9 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out_release_unlock; if (folio_in_pagecache) - page_dup_file_rmap(&folio->page, true); + hugetlb_add_file_rmap(folio); else - hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); + hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); /* * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY diff --git a/mm/internal.h b/mm/internal.h index f773db493a99d039129a593dfa6f217710e8cd6b..c11dd833b58a50d232f339807e5713f062157eb9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -8,9 +8,12 @@ #define __MM_INTERNAL_H #include +#include #include #include #include +#include +#include #include struct folio_batch; @@ -54,12 +57,12 @@ void page_writeback_init(void); /* * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, - * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit + * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. */ -#define COMPOUND_MAPPED 0x800000 -#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1) +#define ENTIRELY_MAPPED 0x800000 +#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) /* * Flags passed to __show_mem() and show_free_areas() to suppress output in @@ -127,6 +130,185 @@ static inline void vma_close(struct vm_area_struct *vma) vma->vm_ops = &vma_dummy_vm_ops; } } +#ifdef CONFIG_MMU + +/* Flags for folio_pte_batch(). */ +typedef int __bitwise fpb_t; + +/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ +#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) + +/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ +#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) + +static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) +{ + if (flags & FPB_IGNORE_DIRTY) + pte = pte_mkclean(pte); + if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) + pte = pte_clear_soft_dirty(pte); + return pte_wrprotect(pte_mkold(pte)); +} + +/** + * folio_pte_batch - detect a PTE batch for a large folio + * @folio: The large folio to detect a PTE batch for. + * @addr: The user virtual address the first page is mapped at. + * @start_ptep: Page table pointer for the first entry. + * @pte: Page table entry for the first page. + * @max_nr: The maximum number of table entries to consider. + * @flags: Flags to modify the PTE batch semantics. + * @any_writable: Optional pointer to indicate whether any entry except the + * first one is writable. + * @any_young: Optional pointer to indicate whether any entry except the + * first one is young. + * + * Detect a PTE batch: consecutive (present) PTEs that map consecutive + * pages of the same large folio. + * + * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, + * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and + * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). + * + * start_ptep must map any page of the folio. max_nr must be at least one and + * must be limited by the caller so scanning cannot exceed a single page table. + * + * Return: the number of table entries in the batch. + */ +static inline int folio_pte_batch(struct folio *folio, unsigned long addr, + pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, + bool *any_writable, bool *any_young) +{ + unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t expected_pte, *ptep; + bool writable, young; + int nr; + + if (any_writable) + *any_writable = false; + if (any_young) + *any_young = false; + + VM_WARN_ON_FOLIO(!pte_present(pte), folio); + VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); + VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); + + nr = pte_batch_hint(start_ptep, pte); + expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); + ptep = start_ptep + nr; + + while (ptep < end_ptep) { + pte = ptep_get(ptep); + if (any_writable) + writable = !!pte_write(pte); + if (any_young) + young = !!pte_young(pte); + pte = __pte_batch_clear_ignored(pte, flags); + + if (!pte_same(pte, expected_pte)) + break; + + /* + * Stop immediately once we reached the end of the folio. In + * corner cases the next PFN might fall into a different + * folio. + */ + if (pte_pfn(pte) >= folio_end_pfn) + break; + + if (any_writable) + *any_writable |= writable; + if (any_young) + *any_young |= young; + + nr = pte_batch_hint(ptep, pte); + expected_pte = pte_advance_pfn(expected_pte, nr); + ptep += nr; + } + + return min(ptep - start_ptep, max_nr); +} + +/** + * pte_move_swp_offset - Move the swap entry offset field of a swap pte + * forward or backward by delta + * @pte: The initial pte state; is_swap_pte(pte) must be true and + * non_swap_entry() must be false. + * @delta: The direction and the offset we are moving; forward if delta + * is positive; backward if delta is negative + * + * Moves the swap offset, while maintaining all other fields, including + * swap type, and any swp pte bits. The resulting pte is returned. + */ +static inline pte_t pte_move_swp_offset(pte_t pte, long delta) +{ + swp_entry_t entry = pte_to_swp_entry(pte); + pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), + (swp_offset(entry) + delta))); + + if (pte_swp_soft_dirty(pte)) + new = pte_swp_mksoft_dirty(new); + if (pte_swp_exclusive(pte)) + new = pte_swp_mkexclusive(new); + if (pte_swp_uffd_wp(pte)) + new = pte_swp_mkuffd_wp(new); + + return new; +} + + +/** + * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. + * @pte: The initial pte state; is_swap_pte(pte) must be true and + * non_swap_entry() must be false. + * + * Increments the swap offset, while maintaining all other fields, including + * swap type, and any swp pte bits. The resulting pte is returned. + */ +static inline pte_t pte_next_swp_offset(pte_t pte) +{ + return pte_move_swp_offset(pte, 1); +} + +/** + * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries + * @start_ptep: Page table pointer for the first entry. + * @max_nr: The maximum number of table entries to consider. + * @pte: Page table entry for the first entry. + * + * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs + * containing swap entries all with consecutive offsets and targeting the same + * swap type, all with matching swp pte bits. + * + * max_nr must be at least one and must be limited by the caller so scanning + * cannot exceed a single page table. + * + * Return: the number of table entries in the batch. + */ +static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) +{ + pte_t expected_pte = pte_next_swp_offset(pte); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t *ptep = start_ptep + 1; + + VM_WARN_ON(max_nr < 1); + VM_WARN_ON(!is_swap_pte(pte)); + VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte))); + + while (ptep < end_ptep) { + pte = ptep_get(ptep); + + if (!pte_same(pte, expected_pte)) + break; + + expected_pte = pte_next_swp_offset(expected_pte); + ptep++; + } + + return ptep - start_ptep; +} +#endif /* CONFIG_MMU */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, int nr_throttled); @@ -508,6 +690,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); +extern bool free_pages_prepare(struct page *page, unsigned int order); + extern int user_min_free_kbytes; extern void free_unref_page(struct page *page, unsigned int order); @@ -542,7 +726,7 @@ int split_free_page(struct page *free_page, * completes when free_pfn <= migrate_pfn */ struct compact_control { - struct list_head freepages; /* List of free pages to migrate to */ + struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ @@ -579,6 +763,9 @@ struct compact_control { * ensure forward progress. */ bool alloc_contig; /* alloc_contig_range allocation */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -655,22 +842,68 @@ extern long faultin_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool write, int *locked); extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes); + +/* + * NOTE: This function can't tell whether the folio is "fully mapped" in the + * range. + * "fully mapped" means all the pages of folio is associated with the page + * table of range while this function just check whether the folio range is + * within the range [start, end). Funcation caller nees to do page table + * check if it cares about the page table association. + * + * Typical usage (like mlock or madvise) is: + * Caller knows at least 1 page of folio is associated with page table of VMA + * and the range [start, end) is intersect with the VMA range. Caller wants + * to know whether the folio is fully associated with the range. It calls + * this function to check whether the folio is in the range first. Then checks + * the page table to know whether the folio is fully mapped to the range. + */ +static inline bool +folio_within_range(struct folio *folio, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + pgoff_t pgoff, addr; + unsigned long vma_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); + if (start > end) + return false; + + if (start < vma->vm_start) + start = vma->vm_start; + + if (end > vma->vm_end) + end = vma->vm_end; + + pgoff = folio_pgoff(folio); + + /* if folio start address is not in vma range */ + if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) + return false; + + addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + + return !(addr < start || end - addr < folio_size(folio)); +} + +static inline bool +folio_within_vma(struct folio *folio, struct vm_area_struct *vma) +{ + return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); +} + /* * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, * under page table lock for the pte/pmd being added or removed. * - * mlock is usually called at the end of page_add_*_rmap(), munlock at - * the end of page_remove_rmap(); but new anon folios are managed by + * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at + * the end of folio_remove_rmap_*(); but new anon folios are managed by * folio_add_lru_vma() calling mlock_new_folio(). - * - * @compound is used to include pmd mappings of THPs, but filter out - * pte mappings of THPs, which cannot be consistently counted: a pte - * mapping of the THP head cannot be distinguished by the page alone. */ void mlock_folio(struct folio *folio); static inline void mlock_vma_folio(struct folio *folio, - struct vm_area_struct *vma, bool compound) + struct vm_area_struct *vma) { /* * The VM_SPECIAL check here serves two purposes. @@ -680,17 +913,24 @@ static inline void mlock_vma_folio(struct folio *folio, * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may * still be set while VM_SPECIAL bits are added: so ignore it then. */ - if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) && - (compound || !folio_test_large(folio))) + if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) mlock_folio(folio); } void munlock_folio(struct folio *folio); static inline void munlock_vma_folio(struct folio *folio, - struct vm_area_struct *vma, bool compound) + struct vm_area_struct *vma) { - if (unlikely(vma->vm_flags & VM_LOCKED) && - (compound || !folio_test_large(folio))) + /* + * munlock if the function is called. Ideally, we should only + * do munlock if any page of folio is unmapped from VMA and + * cause folio not fully mapped to VMA. + * + * But it's not easy to confirm that's the situation. So we + * always munlock the folio and page reclaim will correct it + * if it's wrong. + */ + if (unlikely(vma->vm_flags & VM_LOCKED)) munlock_folio(folio); } @@ -999,7 +1239,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); void free_zone_device_page(struct page *page); @@ -1018,6 +1258,28 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); +/* + * Parses a string with mem suffixes into its order. Useful to parse kernel + * parameters. + */ +static inline int get_order_from_str(const char *size_str, + unsigned long valid_orders) +{ + unsigned long size; + char *endptr; + int order; + + size = memparse(size_str, &endptr); + + if (!is_power_of_2(size)) + return -EINVAL; + order = get_order(size); + if (BIT(order) & ~valid_orders) + return -EINVAL; + + return order; +} + enum { /* mark page accessed */ FOLL_TOUCH = 1 << 16, @@ -1050,7 +1312,7 @@ enum { * * Ordinary GUP: Using the PT lock * * GUP-fast and fork(): mm->write_protect_seq * * GUP-fast and KSM or temporary unmapping (swap, migration): see - * page_try_share_anon_rmap() + * folio_try_share_anon_rmap_*() * * Must be called with the (sub)page that's actually referenced via the * page table entry, which might not necessarily be the head page for a @@ -1093,7 +1355,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, return is_cow_mapping(vma->vm_flags); } - /* Paired with a memory barrier in page_try_share_anon_rmap(). */ + /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_rmb(); diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 89895f38f722423c02744f359799db4bb7f12dc2..ac607c306292f406bfdc88dcb379b3c7ecda0036 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -106,6 +106,10 @@ static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, } } +void __weak __meminit kernel_pte_init(void *addr) +{ +} + static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end) { @@ -126,8 +130,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, if (slab_is_available()) p = pte_alloc_one_kernel(&init_mm); - else + else { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + kernel_pte_init(p); + } if (!p) return -ENOMEM; diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 3872528d096380b0c412897e7cc7b56b4cb59eb9..8e6c412ca38023a382807d49231b51c87910cdbb 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include @@ -40,8 +42,9 @@ ({ \ const bool __cond = WARN_ON(cond); \ if (unlikely(__cond)) { \ - WRITE_ONCE(kfence_enabled, false); \ disabled_by_warn = true; \ + WRITE_ONCE(kfence_enabled, false); \ + static_branch_disable(&kfence_allocation_key); \ } \ __cond; \ }) @@ -50,8 +53,27 @@ static bool kfence_enabled __read_mostly; static bool disabled_by_warn __read_mostly; +/* true = node mode, false = global mode. */ +static bool kfence_pool_node_mode __read_mostly; +static DEFINE_MUTEX(kfence_mutex); +unsigned long kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS; +EXPORT_SYMBOL_GPL(kfence_num_objects); +static unsigned long kfence_num_objects_snap __read_mostly; /* Used to record upstream ver. */ +static int *kfence_node_map __read_mostly; /* Map real node to "virtual kfence node". */ +bool kfence_panic_on_fault __read_mostly; +struct kfence_alloc_node_cond { + long need; + long allocated; +}; +/* + * An array to record how many objects need to be allocated + * and how many has been allocated on each node. + */ +static struct kfence_alloc_node_cond *kfence_num_objects_stat; +/* Only used in BOOTING, record partition info about __kfence_pool_area[] */ +static unsigned long kfence_nr_areas_per_node; -unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #ifdef MODULE_PARAM_PREFIX @@ -59,25 +81,41 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." -static int kfence_enable_late(void); +DEFINE_STATIC_KEY_FALSE(kfence_short_canary); +DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); +static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); +DEFINE_STATIC_KEY_TRUE(kfence_order0_page); + +#define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) + +static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; - int ret = kstrtoul(val, 0, &num); + long num; + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - /* Using 0 to indicate KFENCE is disabled. */ - if (!num && READ_ONCE(kfence_enabled)) { - pr_info("disabled\n"); - WRITE_ONCE(kfence_enabled, false); + if (system_state == SYSTEM_BOOTING) { + *((long *)kp->arg) = num; + return 0; } - *((unsigned long *)kp->arg) = num; + /* Not allow sample interval switching between positive and negative */ + if ((kfence_sample_interval > 0 && num < 0) || + (kfence_sample_interval < 0 && num > 0)) { + return -EINVAL; + } + + if (!num) /* Using 0 to indicate KFENCE is disabled. */ + kfence_disable(); + + *((long *)kp->arg) = num; + + if (num && !READ_ONCE(kfence_enabled)) + return disabled_by_warn ? -EINVAL : (kfence_enable_late(), 0); - if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -86,7 +124,7 @@ static int param_get_sample_interval(char *buffer, const struct kernel_param *kp if (!READ_ONCE(kfence_enabled)) return sprintf(buffer, "0\n"); - return param_get_ulong(buffer, kp); + return param_get_long(buffer, kp); } static const struct kernel_param_ops sample_interval_param_ops = { @@ -95,6 +133,134 @@ static const struct kernel_param_ops sample_interval_param_ops = { }; module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); +static int param_set_num_objects(const char *val, const struct kernel_param *kp) +{ + unsigned long num; + int ret = kstrtoul(val, 0, &num); + + if (ret < 0) + return ret; + +#ifdef CONFIG_ARM64 + if (system_state == SYSTEM_BOOTING) + return 0; +#endif + + if (!num) + return -EINVAL; + + mutex_lock(&kfence_mutex); + + if (READ_ONCE(kfence_enabled)) { + ret = -EBUSY; /* can not change num_objects when enabled */ + goto out_unlock; + } + + *((unsigned long *)kp->arg) = num; + ret = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + return ret; +} + +static int param_get_num_objects(char *buffer, const struct kernel_param *kp) +{ + return param_get_ulong(buffer, kp); +} + +static const struct kernel_param_ops num_objects_param_ops = { + .set = param_set_num_objects, + .get = param_get_num_objects, +}; +module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); + +static int param_set_pool_mode(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (READ_ONCE(kfence_enabled)) + return -EINVAL; /* can not change mode when enabled */ + + if (!strcmp(s, "global")) + mode = false; + else if (!strcmp(s, "node")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_pool_mode(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "node" : "global"); +} + +static const struct kernel_param_ops pool_mode_param_ops = { + .set = param_set_pool_mode, + .get = param_get_pool_mode, +}; +module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); + +static int param_set_order0_page(const char *val, const struct kernel_param *kp) +{ + bool res; + int ret = kstrtobool(val, &res); + + if (ret < 0) + return ret; + + if (res) + static_branch_enable(&kfence_order0_page); + else + static_branch_disable(&kfence_order0_page); + + return 0; +} + +static int param_get_order0_page(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%d\n", static_branch_likely(&kfence_order0_page) ? 1 : 0); +} + +static const struct kernel_param_ops order0_page_param_ops = { + .set = param_set_order0_page, + .get = param_get_order0_page, +}; +module_param_cb(order0_page, &order0_page_param_ops, NULL, 0600); + +static int param_set_fault(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (!strcmp(s, "report")) + mode = false; + else if (!strcmp(s, "panic")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_fault(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "panic" : "report"); +} + +static const struct kernel_param_ops fault_param_ops = { + .set = param_set_fault, + .get = param_get_fault, +}; +module_param_cb(fault, &fault_param_ops, &kfence_panic_on_fault, 0600); + /* Pool usage% threshold when currently covered allocations are skipped. */ static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); @@ -107,28 +273,39 @@ module_param_named(deferrable, kfence_deferrable, bool, 0444); static bool kfence_check_on_panic __read_mostly; module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); -/* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __read_mostly; -EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ - /* - * Per-object metadata, with one-to-one mapping of object metadata to - * backing pages (in __kfence_pool). + * The pool of pages used for guard pages and objects. + * Only used in booting init state. Will be cleared after that. */ -static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); -struct kfence_metadata *kfence_metadata __read_mostly; +char **__kfence_pool_area; /* - * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). - * So introduce kfence_metadata_init to initialize metadata, and then make - * kfence_metadata visible after initialization is successful. This prevents - * potential UAF or access to uninitialized metadata. + * The pool of pages should be reserved earlier than kfence initialization. It's + * only assigned in arm64 architecture. */ -static struct kfence_metadata *kfence_metadata_init __read_mostly; +char *__kfence_pool_early_init; + +/* The binary tree maintaining all kfence pool areas */ +struct rb_root kfence_pool_root = RB_ROOT; +EXPORT_SYMBOL_GPL(kfence_pool_root); /* Freelist with available objects. */ -static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); -static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ +struct kfence_freelist_node { + struct list_head freelist; + raw_spinlock_t lock; +}; + +struct kfence_freelist_cpu { + struct list_head freelist; + unsigned long count; +}; + +struct kfence_freelist { + struct kfence_freelist_node *node; + struct kfence_freelist_cpu __percpu *cpu; +}; +static struct kfence_freelist freelist; +static atomic_t kfence_flush_res, kfence_refkill_res; /* * The static key to set up a KFENCE allocation; or if static keys are not used @@ -150,11 +327,11 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM */ #define ALLOC_COVERED_HNUM 2 -#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) -#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) -#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) +static unsigned long alloc_covered_order __ro_after_init; +#define ALLOC_COVERED_HNEXT(h) hash_32(h, alloc_covered_order) +#define ALLOC_COVERED_SIZE (1 << alloc_covered_order) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) -static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; +static atomic_t *alloc_covered __read_mostly; /* Stack depth used to determine uniqueness of an allocation. */ #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) @@ -171,18 +348,27 @@ enum kfence_counter_id { KFENCE_COUNTER_ALLOCS, KFENCE_COUNTER_FREES, KFENCE_COUNTER_ZOMBIES, + KFENCE_COUNTER_ALLOCATED_PAGE, + KFENCE_COUNTER_ALLOCS_PAGE, + KFENCE_COUNTER_FREES_PAGE, KFENCE_COUNTER_BUGS, KFENCE_COUNTER_SKIP_INCOMPAT, KFENCE_COUNTER_SKIP_CAPACITY, KFENCE_COUNTER_SKIP_COVERED, KFENCE_COUNTER_COUNT, }; -static atomic_long_t counters[KFENCE_COUNTER_COUNT]; +struct kfence_counter { + s64 counter[KFENCE_COUNTER_COUNT]; +}; +static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { - [KFENCE_COUNTER_ALLOCATED] = "currently allocated", - [KFENCE_COUNTER_ALLOCS] = "total allocations", - [KFENCE_COUNTER_FREES] = "total frees", - [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", + [KFENCE_COUNTER_ALLOCATED] = "currently slab allocated", + [KFENCE_COUNTER_ALLOCS] = "total slab allocations", + [KFENCE_COUNTER_FREES] = "total slab frees", + [KFENCE_COUNTER_ZOMBIES] = "zombie slab allocations", + [KFENCE_COUNTER_ALLOCATED_PAGE] = "currently page allocated", + [KFENCE_COUNTER_ALLOCS_PAGE] = "total page allocations", + [KFENCE_COUNTER_FREES_PAGE] = "total page frees", [KFENCE_COUNTER_BUGS] = "total bugs", [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", @@ -194,13 +380,28 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); static inline bool should_skip_covered(void) { - unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; + unsigned long thresh; + s64 sum; + int cpu; + + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return false; - return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; + thresh = (kfence_num_objects_snap * kfence_skip_covered_thresh) / 100; + sum = 0; + /* This may take some time but should be acceptable in sampling mode. */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[KFENCE_COUNTER_ALLOCATED]; + + return sum > thresh; } static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) { + if (!kfence_num_objects_snap) + return 0; + num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); num_entries = filter_irq_stacks(stack_entries, num_entries); return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); @@ -210,10 +411,14 @@ static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries * Adds (or subtracts) count @val for allocation stack trace hash * @alloc_stack_hash from Counting Bloom filter. */ -static void alloc_covered_add(u32 alloc_stack_hash, int val) +static inline void alloc_covered_add(u32 alloc_stack_hash, int val) { int i; + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return; + for (i = 0; i < ALLOC_COVERED_HNUM; i++) { atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); @@ -249,14 +454,14 @@ static bool kfence_unprotect(unsigned long addr) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { - unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; - unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; + struct kfence_pool_area *kpa = meta->kpa; + unsigned long offset = (meta - kpa->meta + 1) * PAGE_SIZE * 2; + unsigned long pageaddr = (unsigned long)&kpa->addr[offset]; /* The checks do not affect performance; only called from slow-paths. */ /* Only call with a pointer into kfence_metadata. */ - if (KFENCE_WARN_ON(meta < kfence_metadata || - meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) + if (KFENCE_WARN_ON(meta < kpa->meta || meta >= kpa->meta + kpa->nr_objects)) return 0; /* @@ -269,6 +474,13 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m return pageaddr; } +static inline bool kfence_obj_allocated(const struct kfence_metadata *meta) +{ + enum kfence_object_state state = READ_ONCE(meta->state); + + return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING; +} + /* * Update the object's metadata state, including updating the alloc/free stacks * depending on the state transition. @@ -278,10 +490,14 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex unsigned long *stack_entries, size_t num_stack_entries) { struct kfence_track *track = - next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; + next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track; lockdep_assert_held(&meta->lock); + /* Stack has been saved when calling rcu, skip. */ + if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING) + goto out; + if (stack_entries) { memcpy(track->stack_entries, stack_entries, num_stack_entries * sizeof(stack_entries[0])); @@ -297,6 +513,7 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex track->cpu = raw_smp_processor_id(); track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ +out: /* * Pairs with READ_ONCE() in * kfence_shutdown_cache(), @@ -314,7 +531,7 @@ static inline bool check_canary_byte(u8 *addr) if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) return true; - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; meta = addr_to_metadata((unsigned long)addr); raw_spin_lock_irqsave(&meta->lock, flags); @@ -327,24 +544,36 @@ static inline bool check_canary_byte(u8 *addr) static inline void set_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * The canary may be written to part of the object memory, but it does * not affect it. The user should initialize the object before using it. */ - for (; addr < meta->addr; addr += sizeof(u64)) + for (addr = start; addr < meta->addr; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) + for (; addr < end; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; } static inline void check_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * We'll iterate over each canary byte per-side until a corrupted byte @@ -356,7 +585,7 @@ static inline void check_canary(const struct kfence_metadata *meta) */ /* Apply to left of object. */ - for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { + for (addr = start; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) break; } @@ -376,7 +605,7 @@ static inline void check_canary(const struct kfence_metadata *meta) if (unlikely(!check_canary_byte((u8 *)addr))) return; } - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { + for (; addr < end; addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) { for (; addr - pageaddr < PAGE_SIZE; addr++) { @@ -387,47 +616,103 @@ static inline void check_canary(const struct kfence_metadata *meta) } } -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, - unsigned long *stack_entries, size_t num_stack_entries, - u32 alloc_stack_hash) +static inline struct kfence_metadata * +get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) { - struct kfence_metadata *meta = NULL; + struct kfence_metadata *object = NULL; unsigned long flags; - struct slab *slab; - void *addr; - const bool random_right_allocate = get_random_u32_below(2); - const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && - !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - /* Try to obtain a free object. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - if (!list_empty(&kfence_freelist)) { - meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); - list_del_init(&meta->list); - } - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); - if (!meta) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); - return NULL; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + if (!list_empty(&kfence_freelist->freelist)) { + object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); + list_del_init(&object->list); } + if (object) + percpu_ref_get(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); - if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { - /* - * This is extremely unlikely -- we are reporting on a - * use-after-free, which locked meta->lock, and the reporting - * code via printk calls kmalloc() which ends up in - * kfence_alloc() and tries to grab the same object that we're - * reporting on. While it has never been observed, lockdep does - * report that there is a possibility of deadlock. Fix it by - * using trylock and bailing out gracefully. - */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - /* Put the object back on the freelist. */ - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + return object; +} - return NULL; +#define KFENCE_FREELIST_PERCPU_SIZE 100 + +static struct kfence_metadata * +get_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct kfence_metadata *object = NULL; + struct list_head *entry = &kfence_freelist->freelist; + + KFENCE_WARN_ON(!list_empty(&c->freelist)); + + raw_spin_lock(&kfence_freelist->lock); + + if (list_empty(&kfence_freelist->freelist)) + goto out; + + object = list_first_entry(entry, struct kfence_metadata, list); + list_del_init(&object->list); + + do { + entry = READ_ONCE(entry->next); + + if (entry == &kfence_freelist->freelist) { + entry = entry->prev; + break; + } + + c->count++; + } while (c->count < KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&c->freelist, &kfence_freelist->freelist, entry); + +out: + raw_spin_unlock(&kfence_freelist->lock); + + return object; +} + +static struct kfence_metadata *get_free_meta(int real_node) +{ + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *object; + int node = kfence_node_map[real_node]; + + if (node >= 0) + kfence_freelist = &freelist.node[node]; + else + kfence_freelist = &freelist.node[real_node]; + + /* If target page not on current node, directly get from its nodelist */ + if (unlikely(node != kfence_node_map[numa_node_id()] || kfence_num_objects_snap)) + return get_free_meta_from_node(kfence_freelist); + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + if (unlikely(!c->count)) { + object = get_free_meta_slowpath(c, kfence_freelist); + } else { + object = list_first_entry(&c->freelist, struct kfence_metadata, list); + list_del_init(&object->list); + c->count--; } + if (object) + percpu_ref_get(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); + + return object; +} + +static inline void __init_meta(struct kfence_metadata *meta, size_t size, struct kmem_cache *cache, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); meta->addr = metadata_to_pageaddr(meta); /* Unprotect if we're reusing this page. */ @@ -442,27 +727,72 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (random_right_allocate) { + if (cache && this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS] % 2) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); } - addr = (void *)meta->addr; - /* Update remaining metadata. */ metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ WRITE_ONCE(meta->cache, cache); meta->size = size; meta->alloc_stack_hash = alloc_stack_hash; +} + +static void put_free_meta(struct kfence_metadata *object); +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash, int node) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + struct slab *slab; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + */ + /* Put the object back on the freelist. */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, size, cache, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); + addr = (void *)meta->addr; alloc_covered_add(alloc_stack_hash, 1); /* Set required slab fields. */ - slab = virt_to_slab((void *)meta->addr); + page = virt_to_page(addr); + slab = page_slab(page); + __SetPageSlab(page); slab->slab_cache = cache; +#ifdef CONFIG_MEMCG + slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; +#endif #if defined(CONFIG_SLUB) slab->objects = 1; #elif defined(CONFIG_SLAB) @@ -485,81 +815,238 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS]++; return addr; } -static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +static struct page *kfence_guarded_alloc_page(int node, unsigned long *stack_entries, + size_t num_stack_entries, u32 alloc_stack_hash) { - struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; unsigned long flags; - bool init; - - raw_spin_lock_irqsave(&meta->lock, flags); + struct page *page; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { - /* Invalid or double-free, bail out. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, false, NULL, meta, - KFENCE_ERROR_INVALID_FREE); - raw_spin_unlock_irqrestore(&meta->lock, flags); - return; + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; } - /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ - kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, - KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, - &assert_page_exclusive); - - if (CONFIG_KFENCE_STRESS_TEST_FAULTS) - kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + * Put the object back on the freelist. + */ + put_free_meta(meta); - /* Restore page protection if there was an OOB access. */ - if (meta->unprotected_page) { - memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); - kfence_protect(meta->unprotected_page); - meta->unprotected_page = 0; + return NULL; } - /* Mark the object as freed. */ - metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); - init = slab_want_init_on_free(meta->cache); + __init_meta(meta, PAGE_SIZE, NULL, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); - alloc_covered_add(meta->alloc_stack_hash, -1); + addr = (void *)meta->addr; + alloc_covered_add(alloc_stack_hash, 1); - /* Check canary bytes for memory corruption. */ - check_canary(meta); + page = virt_to_page(addr); + if (PageSlab(page)) { + struct slab *slab = page_slab(page); - /* - * Clear memory if init-on-free is set. While we protect the page, the - * data is still there, and after a use-after-free is detected, we - * unprotect the page, so the data is still accessible. - */ - if (!zombie && unlikely(init)) - memzero_explicit(addr, meta->size); + /* + * For performance considerations, + * we clean slab info here (when allocating pages). + * So that slabs can reuse their flags and obj_cgroups + * without being cleared or freed if the previous user + * is slab too. + */ + slab->slab_cache = NULL; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + page->mapping = NULL; +#ifdef CONFIG_DEBUG_VM + atomic_set(&page->_refcount, 0); +#endif + + if (random_fault) + kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS_PAGE]++; + + return page; +} + +static inline void put_free_meta_to_node(struct kfence_metadata *object, + struct kfence_freelist_node *kfence_freelist) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_add_tail(&object->list, &kfence_freelist->freelist); + percpu_ref_put(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static void put_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct list_head *entry = &c->freelist, new_list; + + do { + entry = entry->next; + c->count--; + } while (c->count > KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&new_list, &c->freelist, entry); + raw_spin_lock(&kfence_freelist->lock); + list_splice_tail(&new_list, &kfence_freelist->freelist); + raw_spin_unlock(&kfence_freelist->lock); +} + +static void put_free_meta(struct kfence_metadata *object) +{ + int node = object->kpa->node; + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist = &freelist.node[node]; + + KFENCE_WARN_ON(!list_empty(&object->list)); + + /* If meta not on current node, just return it to its own nodelist */ + if (unlikely(!kfence_node_map || node != kfence_node_map[numa_node_id()] || + kfence_num_objects_snap)) { + put_free_meta_to_node(object, kfence_freelist); + return; + } + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + list_add_tail(&object->list, &c->freelist); + c->count++; + + if (unlikely(c->count == KFENCE_FREELIST_PERCPU_SIZE * 2)) + put_free_meta_slowpath(c, kfence_freelist); + + percpu_ref_put(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); +} + +static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zombie, bool is_page) +{ + struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + unsigned long flags; + bool init; + + raw_spin_lock_irqsave(&meta->lock, flags); + + if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) { + /* Invalid or double-free, bail out. */ + this_cpu_counter->counter[KFENCE_COUNTER_BUGS]++; + kfence_report_error((unsigned long)addr, false, NULL, meta, + KFENCE_ERROR_INVALID_FREE); + raw_spin_unlock_irqrestore(&meta->lock, flags); + return false; + } + + /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ + kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, + KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, + &assert_page_exclusive); + + if (CONFIG_KFENCE_STRESS_TEST_FAULTS) + kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ + + /* Restore page protection if there was an OOB access. */ + if (meta->unprotected_page) { + memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); + kfence_protect(meta->unprotected_page); + meta->unprotected_page = 0; + } + + /* Mark the object as freed. */ + metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); + if (!is_page) + init = slab_want_init_on_free(meta->cache); + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + alloc_covered_add(meta->alloc_stack_hash, -1); + + if (!is_page) { + /* Check canary bytes for memory corruption. */ + check_canary(meta); + + /* + * Clear memory if init-on-free is set. While we protect the page, the + * data is still there, and after a use-after-free is detected, we + * unprotect the page, so the data is still accessible. + */ + if (!zombie && unlikely(init)) + memzero_explicit(addr, meta->size); + } /* Protect to detect use-after-frees. */ kfence_protect((unsigned long)addr); kcsan_end_scoped_access(&assert_page_exclusive); + + return true; +} + +static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, zombie, false)) + return; + if (!zombie) { /* Add it to the tail of the freelist for reuse. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - KFENCE_WARN_ON(!list_empty(&meta->list)); - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + put_free_meta(meta); - atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES]++; } else { /* See kfence_shutdown_cache(). */ - atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); + this_cpu_counter->counter[KFENCE_COUNTER_ZOMBIES]++; } } +static void kfence_guarded_free_page(struct page *page, void *addr, struct kfence_metadata *meta) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, false, true)) + return; + + put_free_meta(meta); + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES_PAGE]++; + +} + static void rcu_guarded_free(struct rcu_head *h) { struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); @@ -567,22 +1054,39 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -/* - * Initialization of the KFENCE pool after its allocation. - * Returns 0 on success; otherwise returns the address up to - * which partial initialization succeeded. - */ -static unsigned long kfence_init_pool(void) +static void kfence_clear_page_info(unsigned long addr, unsigned long size) { - unsigned long addr; + unsigned long i; + + for (i = addr; i < addr + size; i += PAGE_SIZE) { + struct page *page = virt_to_page((void *)i); + + if (PageSlab(page)) { +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + __ClearPageKfence(page); + page->mapping = NULL; + atomic_set(&page->_refcount, 1); + kfence_unprotect(i); + } +} + +static bool __kfence_init_pool_area(struct kfence_pool_area *kpa) +{ + char *__kfence_pool = kpa->addr; + struct kfence_metadata *kfence_metadata = kpa->meta; + struct kfence_freelist_node *kfence_freelist = &freelist.node[kpa->node]; + unsigned long addr = (unsigned long)__kfence_pool, flags; struct page *pages; int i; - if (!arch_kfence_init_pool()) - return (unsigned long)__kfence_pool; + if (!__kfence_pool_early_init && !arch_kfence_init_pool(kpa)) + goto err; - addr = (unsigned long)__kfence_pool; - pages = virt_to_page(__kfence_pool); + pages = virt_to_page((void *)addr); /* * Set up object pages: they must have PG_slab set, to avoid freeing @@ -592,17 +1096,10 @@ static unsigned long kfence_init_pool(void) * fast-path in SLUB, and therefore need to ensure kfree() correctly * enters __slab_free() slow-path. */ - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; + for (i = 0; i < kpa->pool_size / PAGE_SIZE; i++) { + struct page *page = nth_page(pages, i); - __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; -#endif + __SetPageKfence(page); } /* @@ -613,96 +1110,676 @@ static unsigned long kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - return addr; + goto err; addr += PAGE_SIZE; } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata_init[i]; + /* Protect the right redzone. */ + for (i = 0; i < kpa->nr_objects; i++) { + if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + goto err; + addr += 2 * PAGE_SIZE; + } + + addr = (unsigned long)__kfence_pool + 2 * PAGE_SIZE; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; /* Initialize metadata. */ INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state = KFENCE_OBJECT_UNUSED; meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); + meta->kpa = kpa; + list_add_tail(&meta->list, &kfence_freelist->freelist); + /* No fail after here, since we've added this pool to freelist. */ + + addr += 2 * PAGE_SIZE; + } + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + /* + * The pool is live and will never be deallocated from this point on. + * Remove the pool object from the kmemleak object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. + */ + if (PageReserved(pages)) + kmemleak_ignore_phys(__pa(__kfence_pool)); + + return true; + +err: + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + return false; +} + +static bool kfence_rb_less(struct rb_node *a, const struct rb_node *b) +{ + return (unsigned long)kfence_rbentry(a)->addr < (unsigned long)kfence_rbentry(b)->addr; +} + +static void __init kfence_alloc_pool_node(int node) +{ + unsigned long nr_need = kfence_num_objects_stat[node].need; + unsigned long nr_request = min(nr_need, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long index = kfence_nr_areas_per_node * node; + + while (nr_need) { + unsigned long kfence_pool_size = (nr_request + 1) * 2 * PAGE_SIZE; + + __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, PUD_SIZE, node); + if (!__kfence_pool_area[index]) { + pr_err("kfence alloc pool on node %d failed\n", node); + break; + } + index++; + nr_need -= nr_request; + nr_request = min(nr_request, nr_need); + } +} + +static void kpa_release(struct percpu_ref *ref); +static void kfence_free_area(struct work_struct *work); +static inline bool init_kpa(struct kfence_pool_area *kpa, char *__kfence_pool, int node, + unsigned long nr_objects, unsigned long pool_size) +{ + kpa->meta = vzalloc_node(sizeof(struct kfence_metadata) * nr_objects, node); + if (!kpa->meta) + goto fail; + if (percpu_ref_init(&kpa->refcnt, kpa_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto fail; + INIT_WORK(&kpa->work, kfence_free_area); + kpa->addr = __kfence_pool; + kpa->pool_size = pool_size; + kpa->nr_objects = nr_objects; + kpa->node = node; + atomic_set(&kpa->_ref, 1); /* held by rb tree */ + + if (!__kfence_init_pool_area(kpa)) + goto fail; + + return true; + +fail: + vfree(kpa->meta); + percpu_ref_exit(&kpa->refcnt); + + return false; +} + +static bool __init kfence_init_pool_area(int node, int area) +{ + int index = node * kfence_nr_areas_per_node + area; + char *__kfence_pool = __kfence_pool_area[index]; + struct kfence_pool_area *kpa; + unsigned long nr_objects, pool_size; + + if (!__kfence_pool) + return false; + + nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, __kfence_pool, node, nr_objects, pool_size)) + goto fail; + + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + __kfence_pool_area[index] = NULL; + kfence_num_objects_stat[node].allocated += nr_objects; + + return true; + +fail: + memblock_free_late(__pa(__kfence_pool), pool_size); + __kfence_pool_area[index] = NULL; + kfree(kpa); + + return false; +} + +static bool __init kfence_init_pool(void) +{ + int area, node; + bool success_once = false; + + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + if (kfence_init_pool_area(node, area)) + success_once = true; + } + } + + return success_once; +} + +static void kfence_alloc_pool_late_node(int node, struct list_head *ready, bool fallback) +{ + unsigned long nr_need, nr_request; + struct kfence_alloc_node_cond *knos = &kfence_num_objects_stat[node]; + gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; + + if (knos->allocated >= knos->need) + return; + + nr_need = roundup(knos->need - knos->allocated, KFENCE_MAX_OBJECTS_PER_AREA); + nr_request = KFENCE_MAX_OBJECTS_PER_AREA; + if (!fallback) + gfp_mask |= __GFP_THISNODE; + + while (nr_need) { + struct page *page; + struct kfence_pool_area *kpa; + unsigned long nr_pages = (nr_request + 1) * 2; +#ifdef CONFIG_CONTIG_ALLOC + page = alloc_contig_pages(nr_pages, gfp_mask, node, NULL); +#else + pr_warn("anolis kfence only supports enabled later with CONFIG_CONTIG_ALLOC\n"); + page = NULL; +#endif + if (!page) { + pr_err("kfence alloc pool on node %d failed\n", node); + return; + } + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, page_to_virt(page), node, nr_request, nr_pages * PAGE_SIZE)) + goto fail; + + list_add(&kpa->list, ready); + nr_need -= nr_request; + knos->allocated += nr_request; + nr_request = min(nr_request, nr_need); + + continue; + +fail: +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(page), nr_pages); +#endif + kfree(kpa); + + return; + } +} + +static void kfence_free_pool_area(struct kfence_pool_area *kpa) +{ + phys_addr_t base = __pa(kpa->addr), size = kpa->pool_size; + phys_addr_t cursor = PFN_UP(base); + phys_addr_t end = PFN_DOWN(base + size); + + kmemleak_free_part_phys(base, size); + for (; cursor < end; cursor++) { + __free_pages_core(pfn_to_page(cursor), 0); + totalram_pages_inc(); + } +} + +static void kfence_free_pool_late_area(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(kpa->addr)), kpa->pool_size / PAGE_SIZE); +#endif +} + +static void get_kpa(struct kfence_pool_area *kpa) +{ + atomic_inc(&kpa->_ref); +} + +static void put_kpa(struct kfence_pool_area *kpa) +{ + if (atomic_dec_and_test(&kpa->_ref)) + kfree(kpa); +} + +static int kfence_update_pool_root(void *info) +{ + struct list_head *ready_list = info; + struct kfence_pool_area *kpa; + struct rb_node *cur, *next; + + for (cur = rb_first(&kfence_pool_root); cur; cur = next) { + kpa = kfence_rbentry(cur); + next = rb_next(cur); + if (!kpa->nr_objects) { + rb_erase(&kpa->rb_node, &kfence_pool_root); + put_kpa(kpa); + } else { + percpu_ref_resurrect(&kpa->refcnt); + } + } + + while (!list_empty(ready_list)) { + kpa = list_first_entry(ready_list, struct kfence_pool_area, list); + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + list_del(&kpa->list); + } + + return 0; +} + +/* + * Flush this cpu's per cpu freelist to per node freelist. + * + * We don't need more sync methods to prevent race, because we can + * only reach here in two routes (with both kfence is disabled + * so no new allocatings will occur): + * + * 1) from update_kfence_node_map() when enabling kfence + * Since kfence_node_map is set to NULL, the objects + * will be directly freed to the per node freelist. + * + * 2) from kfence_free_area() when a kpa being released + * Since the refcnt of this kpa is down to 0, no objects + * from this kpa will be freed to per cpu freelist. + * If some objects from other kpas are freed after this + * check, it is ok because we will only free the space + * of our target kpa. Just let objects from other kpas + * remain in per cpu freelist. + */ +static void kfence_flush(struct kfence_freelist_cpu *c) +{ + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *meta; + unsigned long flags; + + if (list_empty(&c->freelist)) { + if (KFENCE_WARN_ON(c->count)) + c->count = 0; + return; + } + + meta = list_first_entry(&c->freelist, struct kfence_metadata, list); + kfence_freelist = &freelist.node[meta->kpa->node]; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_splice_tail_init(&c->freelist, &kfence_freelist->freelist); + c->count = 0; + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_flush_wait); +static void kfence_flush_call(void *info) +{ + struct kfence_freelist_cpu *c = get_cpu_ptr(freelist.cpu); + + kfence_flush(c); + put_cpu_ptr(c); + + if (!atomic_dec_return(&kfence_flush_res)) + wake_up(&kfence_flush_wait); +} + +/* Flush percpu freelists on all cpus and wait for return. */ +static void kfence_flush_all_and_wait(void) +{ + int cpu; + + cpus_read_lock(); + atomic_set(&kfence_flush_res, num_online_cpus()); + on_each_cpu(kfence_flush_call, NULL, 0); + + /* Flush offline cpus. */ + preempt_disable(); + for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) { + kfence_flush(per_cpu_ptr(freelist.cpu, cpu)); + } + preempt_enable(); + cpus_read_unlock(); + + wait_event_idle(kfence_flush_wait, !atomic_read(&kfence_flush_res)); +} + +static bool kfence_can_recover_tlb(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_X86_64 + /* only recover 1GiB aligned tlb */ + return kpa->pool_size == PUD_SIZE; +#else + /* + * On arm64, the direct mapping area is already splited to page granularity + * with CONFIG_RODATA_FULL_DEFAULT_ENABLED=y, or CONFIG_KFENCE=y. So we will + * not recover tlb to pud huge. See upstream commit 840b23986344 + * ("arm64, kfence: enable KFENCE for ARM64") in detail. + */ + return false; +#endif +} + +static inline void __kfence_recover_tlb(unsigned long addr) +{ + if (!arch_kfence_free_pool(addr)) + pr_warn("fail to recover tlb to 1G at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); +} + +static inline void kfence_recover_tlb(struct kfence_pool_area *kpa) +{ + unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, PUD_SIZE); + + if (kfence_can_recover_tlb(kpa)) + __kfence_recover_tlb(base); +} + +/* Free a specific area. The refcnt has been down to 0. */ +static void kfence_free_area(struct work_struct *work) +{ + unsigned long flags, i; + struct page *page; + struct kfence_pool_area *kpa = container_of(work, struct kfence_pool_area, work); + struct kfence_freelist_node *kfence_freelist; + + mutex_lock(&kfence_mutex); + if (!kpa->nr_objects || !percpu_ref_is_zero(&kpa->refcnt)) + goto out_unlock; + + kfence_flush_all_and_wait(); + + kfence_freelist = &freelist.node[kpa->node]; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) + list_del(&kpa->meta[i].list); + + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + pr_info("freed %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + kfence_recover_tlb(kpa); + page = virt_to_page(kpa->addr); + + if (PageReserved(page)) + kfence_free_pool_area(kpa); + else + kfence_free_pool_late_area(kpa); + + vfree(kpa->meta); + kpa->meta = NULL; + percpu_ref_exit(&kpa->refcnt); + kpa->nr_objects = 0; + kpa->pool_size = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + put_kpa(kpa); +} + +static void kpa_release(struct percpu_ref *ref) +{ + struct kfence_pool_area *kpa = container_of(ref, struct kfence_pool_area, refcnt); + + get_kpa(kpa); + if (!queue_work(system_long_wq, &kpa->work)) + put_kpa(kpa); +} + +static void calculate_need_alloc(void) +{ + int node, nr_kpas, base, remain, nr_node_has_cpu; + enum node_states node_stat = N_CPU; + + if (!kfence_num_objects_stat) + return; + + if (kfence_pool_node_mode) { + for_each_node(node) { + kfence_num_objects_stat[node].need = kfence_num_objects; + } + return; + } + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + kfence_num_objects_stat[first_online_node].need = kfence_num_objects; + return; + } + + /* In global mode, we only alloc on nodes with cpus (i.e., not on pmem nodes) */ + nr_node_has_cpu = num_node_state(node_stat); + if (!nr_node_has_cpu) { + node_stat = N_ONLINE; + nr_node_has_cpu = num_node_state(node_stat); + } + nr_kpas = kfence_num_objects / KFENCE_MAX_OBJECTS_PER_AREA; + base = nr_kpas / nr_node_has_cpu; + remain = nr_kpas - base * nr_node_has_cpu; + for_each_node_state(node, node_stat) { + kfence_num_objects_stat[node].need = (base + (!!remain)) * + KFENCE_MAX_OBJECTS_PER_AREA; + if (remain) + remain--; + } +} + +static inline bool __check_map_change(int *new_node_map) +{ + int node; + + for_each_node(node) { + if (kfence_node_map[node] != new_node_map[node]) + return true; + } + + return false; +} + +static void update_kfence_node_map(int *new_node_map) +{ + int *old_node_map; + int node; + enum node_states node_stat = N_CPU; + struct zonelist *zonelist; + struct zone *zone; + struct zoneref *z; + + memset(new_node_map, -1, sizeof(int) * nr_node_ids); + + if (!num_node_state(node_stat)) + node_stat = N_ONLINE; + + for_each_node_state(node, node_stat) { + if (kfence_num_objects_stat[node].allocated) { + new_node_map[node] = node; + continue; + } + + /* We borrow from zonelist to get the nearest node to map. */ + zonelist = node_zonelist(node, GFP_KERNEL); + for_each_zone_zonelist_nodemask(zone, z, zonelist, ZONE_NORMAL, NULL) { + if (kfence_num_objects_stat[zone_to_nid(zone)].allocated) { + new_node_map[node] = zone_to_nid(zone); + break; + } + } + } + + /* It's the first time of init */ + if (!kfence_node_map) { + kfence_node_map = new_node_map; + return; + } + + if (!__check_map_change(new_node_map)) { + kfree(new_node_map); + return; + } + + old_node_map = kfence_node_map; + kfence_node_map = NULL; + synchronize_rcu(); + + kfence_flush_all_and_wait(); + + kfence_node_map = new_node_map; + kfree(old_node_map); +} + +/* + * Get the last kfence.booting_max= from boot cmdline. + * Mainly copied from get_last_crashkernel(). + */ +static __init char *get_last_kfence_booting_max(char *name) +{ + char *p = boot_command_line, *ck_cmdline = NULL; + + /* find kfence.booting_max and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + + if (!end_p) + end_p = p + strlen(p); + ck_cmdline = p; + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + ck_cmdline += strlen(name); + return ck_cmdline; +} + +/* + * This function parses command lines in the format + * + * kfence.booting_max=ramsize-range:size[,...] + * + * The function returns 0 on success and -EINVAL on failure. + * Mainly copied from parse_crashkernel_mem(). + */ +static int __init parse_kfence_booting_max(char *cmdline, + unsigned long long system_ram, + unsigned long long *reserve_max) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warn("kfence.booting_max: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warn("kfence.booting_max: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warn("kfence.booting_max: ':' expected\n"); + return -EINVAL; + } + cur++; - /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto reset_slab; + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; - addr += 2 * PAGE_SIZE; - } + /* match ? */ + if (system_ram >= start && system_ram < end) { + *reserve_max = size; + break; + } + } while (*cur++ == ','); - /* - * Make kfence_metadata visible only when initialization is successful. - * Otherwise, if the initialization fails and kfence_metadata is freed, - * it may cause UAF in kfence_shutdown_cache(). - */ - smp_store_release(&kfence_metadata, kfence_metadata_init); - return 0; + if (!*reserve_max) + pr_info("kfence.booting_max size resulted in zero bytes, disabled\n"); -reset_slab: - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); + return 0; +} - if (!i || (i % 2)) - continue; -#ifdef CONFIG_MEMCG - slab->memcg_data = 0; -#endif - __folio_clear_slab(slab_folio(slab)); - } +/* === DebugFS Interface ==================================================== */ - return addr; +static void print_pool_size(struct seq_file *seq, unsigned long byte) +{ + if (byte < SZ_1K) + seq_printf(seq, "%lu B\n", byte); + else if (byte < SZ_1M) + seq_printf(seq, "%lu KB\n", byte / SZ_1K); + else if (byte < SZ_1G) + seq_printf(seq, "%lu MB\n", byte / SZ_1M); + else + seq_printf(seq, "%lu GB\n", byte / SZ_1G); } -static bool __init kfence_init_pool_early(void) +static int stats_show(struct seq_file *seq, void *v) { - unsigned long addr; + int i, cpu; + struct kfence_pool_area *kpa; + struct rb_node *iter; + unsigned long *size_count; - if (!__kfence_pool) - return false; + seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - addr = kfence_init_pool(); + if (!counters) + return 0; - if (!addr) { + for (i = 0; i < KFENCE_COUNTER_COUNT; i++) { + s64 sum = 0; /* - * The pool is live and will never be deallocated from this point on. - * Ignore the pool object from the kmemleak phys object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. + * This calculation may not accurate, but don't mind since we are + * mostly interested in bugs and zombies. They are rare and likely + * not changed during calculating. */ - kmemleak_ignore_phys(__pa(__kfence_pool)); - return true; + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[i]; + seq_printf(seq, "%-35s:%20lld\n", counter_names[i], sum); } - /* - * Only release unprotected pages, and do not try to go back and change - * page attributes due to risk of failing to do so as well. If changing - * page attributes for some pages fails, it is very likely that it also - * fails for the first page, and therefore expect addr==__kfence_pool in - * most failure cases. - */ - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); - __kfence_pool = NULL; - - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); - kfence_metadata_init = NULL; - - return false; -} + size_count = kmalloc_array(nr_node_ids * 2, sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); + if (!size_count) + return 0; -/* === DebugFS Interface ==================================================== */ + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (!kpa->nr_objects) + continue; + size_count[kpa->node] += kpa->nr_objects; + size_count[kpa->node + nr_node_ids] += kpa->pool_size; + } + mutex_unlock(&kfence_mutex); -static int stats_show(struct seq_file *seq, void *v) -{ - int i; + seq_puts(seq, "\nnode\tobject_size\tpool_size\n"); + for_each_node(i) { + seq_printf(seq, "%-8d%-16lu", i, size_count[i]); + print_pool_size(seq, size_count[i + nr_node_ids]); + } - seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - for (i = 0; i < KFENCE_COUNTER_COUNT; i++) - seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); + kfree(size_count); return 0; } @@ -715,28 +1792,59 @@ DEFINE_SHOW_ATTRIBUTE(stats); */ static void *start_object(struct seq_file *seq, loff_t *pos) { - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); + loff_t index = *pos; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (index >= kpa->nr_objects) { + index -= kpa->nr_objects; + continue; + } + return &kpa->meta[index]; + } return NULL; } static void stop_object(struct seq_file *seq, void *v) { + mutex_unlock(&kfence_mutex); } static void *next_object(struct seq_file *seq, void *v, loff_t *pos) { + struct kfence_metadata *meta = (struct kfence_metadata *)v; + struct kfence_pool_area *kpa = meta->kpa; + struct rb_node *cur = &kpa->rb_node; + ++*pos; - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); - return NULL; + ++meta; + if (meta - kpa->meta < kpa->nr_objects) + return meta; + seq_puts(seq, "---------------------------------\n"); +next_meta: + cur = rb_next(cur); + if (!cur) + return NULL; + kpa = kfence_rbentry(cur); + if (!kpa->nr_objects) + goto next_meta; + + return kpa->meta; } static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta = (struct kfence_metadata *)v; unsigned long flags; + char buf[20]; + + if (!meta) + return 0; + sprintf(buf, "node %d:\n", meta->kpa->node); + seq_puts(seq, buf); raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -753,14 +1861,10 @@ static const struct seq_operations objects_sops = { }; DEFINE_SEQ_ATTRIBUTE(objects); -static int kfence_debugfs_init(void) +static int __init kfence_debugfs_init(void) { - struct dentry *kfence_dir; - - if (!READ_ONCE(kfence_enabled)) - return 0; + struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); - kfence_dir = debugfs_create_dir("kfence", NULL); debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; @@ -772,13 +1876,17 @@ late_initcall(kfence_debugfs_init); static void kfence_check_all_canary(void) { + struct kfence_pool_area *kpa; + struct rb_node *iter; int i; - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata[i]; + kfence_for_each_area(kpa, iter) { + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kpa->meta[i]; - if (meta->state == KFENCE_OBJECT_ALLOCATED) - check_canary(meta); + if (kfence_obj_allocated(meta)) + check_canary(meta); + } } } @@ -840,36 +1948,79 @@ static void toggle_allocation_gate(struct work_struct *work) /* === Public interface ===================================================== */ -void __init kfence_alloc_pool_and_metadata(void) +int __init update_kfence_booting_max(void) { - if (!kfence_sample_interval) - return; + static bool done __initdata; + + unsigned long long parse_mem = PUD_SIZE; + unsigned long nr_pages, nr_obj_max; + char *cmdline; + int ret; /* - * If the pool has already been initialized by arch, there is no need to - * re-allocate the memory pool. + * We may reach here twice because some arch like aarch64 + * will call this function first. */ - if (!__kfence_pool) - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (done) + return 0; + done = true; - if (!__kfence_pool) { - pr_err("failed to allocate pool\n"); - return; - } + /* Boot cmdline is not set. Just leave. */ + cmdline = get_last_kfence_booting_max("kfence.booting_max="); + if (!cmdline) + return 0; - /* The memory allocated by memblock has been zeroed out. */ - kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); - if (!kfence_metadata_init) { - pr_err("failed to allocate metadata\n"); - memblock_free(__kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = NULL; + ret = parse_kfence_booting_max(cmdline, memblock_phys_mem_size(), &parse_mem); + /* disable booting kfence on parsing fail. */ + if (ret) + goto nokfence; + + nr_pages = min_t(unsigned long, parse_mem, PUD_SIZE) / PAGE_SIZE; + /* We need at least 4 pages to enable KFENCE. */ + if (nr_pages < 4) + goto nokfence; + + nr_obj_max = nr_pages / 2 - 1; + if (kfence_num_objects > nr_obj_max) { + kfence_num_objects = nr_obj_max; + return 1; } + + return 0; + +nokfence: + kfence_num_objects = 0; + return 1; } -static void kfence_init_enable(void) +/* Only run for the first time. */ +static bool kfence_setup_once(void) { - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + int i; + + /* + * freelist.node, freelist.cpu, counters are inited together, + * we only need to check one of them and know whether + * we are now in re-enabling. + */ + if (counters) + return true; + + freelist.node = kmalloc_array(nr_node_ids, sizeof(struct kfence_freelist_node), + GFP_KERNEL); + freelist.cpu = alloc_percpu(struct kfence_freelist_cpu); + counters = alloc_percpu(struct kfence_counter); + + if (!freelist.node || !freelist.cpu || !counters) + goto fail; + + for_each_node(i) { + INIT_LIST_HEAD(&freelist.node[i].freelist); + raw_spin_lock_init(&freelist.node[i].lock); + } + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu_ptr(freelist.cpu, i)->freelist); if (kfence_deferrable) INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); @@ -879,119 +2030,328 @@ static void kfence_init_enable(void) if (kfence_check_on_panic) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + return true; + +fail: + kfree(freelist.node); + freelist.node = NULL; + free_percpu(freelist.cpu); + freelist.cpu = NULL; + free_percpu(counters); + counters = NULL; + return false; +} + +static void start_kfence(void) +{ + unsigned long total_nr_objects = 0; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + kfence_for_each_area(kpa, iter) { + pr_info("initialized - using %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + total_nr_objects += kpa->nr_objects; + } + + /* Update kfence_num_objects to export to /sys/module/ */ + if (total_nr_objects > KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = rounddown(total_nr_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects = total_nr_objects; + + /* Forget upstream mode. */ + if (kfence_num_objects_snap && total_nr_objects > kfence_num_objects_snap) { + kfence_num_objects_snap = 0; + kvfree(alloc_covered); + alloc_covered = NULL; + } + WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + static_branch_enable(&kfence_once_enabled); + static_branch_enable(&kfence_allocation_key); + if (kfence_sample_interval < 0) { + static_branch_enable(&kfence_short_canary); + static_branch_enable(&kfence_skip_interval); + } else { + static_branch_disable(&kfence_skip_interval); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + } +} + +void __init kfence_alloc_pool_and_metadata(void) +{ + int node; + + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ + if (!READ_ONCE(kfence_sample_interval)) + return; + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + /* + * Not allow both pool size < 1GiB and enabling node mode. + * Not allow both pool size < 1GiB and non-interval alloc. + */ + if (kfence_pool_node_mode || kfence_sample_interval < 0) + goto fail; + + /* + * Only limit upstream mode for online environment, + * as it makes no sense for limiting debug setup. + */ + update_kfence_booting_max(); + if (!kfence_num_objects) + goto fail; + } + + kfence_num_objects_stat = memblock_alloc(sizeof(struct kfence_alloc_node_cond) * + nr_node_ids, PAGE_SIZE); + if (!kfence_num_objects_stat) + goto fail; + + /* + * If pool size less than 1GiB, use the upstream mode; + * else, align pool size up to 1GiB, for tlb split and + * recover thought. + */ + if (kfence_num_objects >= KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects_snap = kfence_num_objects; + + calculate_need_alloc(); + + for_each_node(node) { + if (kfence_nr_areas_per_node < kfence_num_objects_stat[node].need) + kfence_nr_areas_per_node = kfence_num_objects_stat[node].need; + } + kfence_nr_areas_per_node /= KFENCE_MAX_OBJECTS_PER_AREA; + if (!kfence_nr_areas_per_node) + kfence_nr_areas_per_node = 1; + + __kfence_pool_area = memblock_alloc(sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node, PAGE_SIZE); + if (!__kfence_pool_area) + goto fail; + + if (__kfence_pool_early_init) { + __kfence_pool_area[first_online_node] = __kfence_pool_early_init; + return; + } - pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + for_each_node(node) + kfence_alloc_pool_node(node); + + return; + +fail: + if (kfence_num_objects_stat) { + memblock_free(kfence_num_objects_stat, + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } + WRITE_ONCE(kfence_sample_interval, 0); } void __init kfence_init(void) { + unsigned long nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long kfence_pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + int node, area, index; + int *new_node_map; + stack_hash_seed = get_random_u32(); /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!READ_ONCE(kfence_sample_interval)) return; - if (!kfence_init_pool_early()) { - pr_err("%s failed\n", __func__); - return; + if (!kfence_setup_once()) + goto fail_alloc; + + if (kfence_num_objects_snap) { + alloc_covered_order = ilog2(kfence_num_objects_snap) + 2; + alloc_covered = kvmalloc_array(ALLOC_COVERED_SIZE, sizeof(atomic_t), + GFP_KERNEL | __GFP_ZERO); + if (!alloc_covered) + goto fail_alloc; + } + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail_coverd; + + if (!kfence_init_pool()) { + pr_err("%s failed on all nodes!\n", __func__); + goto fail_node_map; + } + + update_kfence_node_map(new_node_map); + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail_coverd: + kvfree(alloc_covered); + alloc_covered = NULL; +fail_alloc: + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + index = kfence_nr_areas_per_node * node + area; + if (__kfence_pool_area[index]) { + memblock_free_late(__pa(__kfence_pool_area[index]), + kfence_pool_size); + __kfence_pool_area[index] = NULL; + } + } } - kfence_init_enable(); +out: + memblock_free_late(__pa(__kfence_pool_area), sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node); + __kfence_pool_area = NULL; + memblock_free_late(__pa(kfence_num_objects_stat), + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } -static int kfence_init_late(void) +static DECLARE_WAIT_QUEUE_HEAD(kfence_refkill_wait); +static void kfence_kill_confirm(struct percpu_ref *ref) { - const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE; - const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE; - unsigned long addr = (unsigned long)__kfence_pool; - unsigned long free_size = KFENCE_POOL_SIZE; - int err = -ENOMEM; + if (!atomic_dec_return(&kfence_refkill_res)) + wake_up(&kfence_refkill_wait); +} -#ifdef CONFIG_CONTIG_ALLOC - struct page *pages; +static void kfence_enable_late(void) +{ + struct kfence_pool_area *kpa; + LIST_HEAD(ready_list); + struct rb_node *iter; + int *new_node_map; + int node; - pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, - NULL); - if (!pages) - return -ENOMEM; + if (!READ_ONCE(kfence_sample_interval)) + return; - __kfence_pool = page_to_virt(pages); - pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, - NULL); - if (pages) - kfence_metadata_init = page_to_virt(pages); -#else - if (nr_pages_pool > MAX_ORDER_NR_PAGES || - nr_pages_meta > MAX_ORDER_NR_PAGES) { - pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); - return -EINVAL; - } + /* + * If kfence pool is initialized later, the early init kfence pool has + * been released, reset the pointer here to avoid re-initialization if + * split_linear_mapping disabled. + */ + __kfence_pool_early_init = NULL; - __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); - if (!__kfence_pool) - return -ENOMEM; + mutex_lock(&kfence_mutex); - kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); -#endif + if (READ_ONCE(kfence_enabled)) + goto out; + + /* + * Keep upstream mode remaining the same. + * Otherwise we "forget" the upstream version whose pool size < 1GiB. + */ + if (kfence_num_objects > kfence_num_objects_snap || kfence_pool_node_mode) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); - if (!kfence_metadata_init) - goto free_pool; + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA && kfence_sample_interval < 0) + goto fail; - memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE); - addr = kfence_init_pool(); - if (!addr) { - kfence_init_enable(); - kfence_debugfs_init(); - return 0; + if (!kfence_setup_once()) + goto fail; + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail; + + kfence_num_objects_stat = kmalloc_array(nr_node_ids, sizeof(struct kfence_alloc_node_cond), + GFP_KERNEL | __GFP_ZERO); + if (!kfence_num_objects_stat) + goto fail_node_map; + + calculate_need_alloc(); + + kfence_for_each_area(kpa, iter) { + if (kpa->nr_objects >= KFENCE_MAX_OBJECTS_PER_AREA || kfence_num_objects_snap) + kfence_num_objects_stat[kpa->node].allocated += kpa->nr_objects; } - pr_err("%s failed\n", __func__); - free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); - err = -EBUSY; + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, false); -#ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)), - nr_pages_meta); -free_pool: - free_contig_range(page_to_pfn(virt_to_page((void *)addr)), - free_size / PAGE_SIZE); -#else - free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE); -free_pool: - free_pages_exact((void *)addr, free_size); -#endif + /* + * Try to alloc again if there exists some nodes we fail to alloc on. + * These nodes may have no enough contig memory, so fallback to find on + * other nodes. + */ + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, true); - kfence_metadata_init = NULL; - __kfence_pool = NULL; - return err; + update_kfence_node_map(new_node_map); + kfree(kfence_num_objects_stat); + kfence_num_objects_stat = NULL; + + stop_machine(kfence_update_pool_root, &ready_list, NULL); + + if (RB_EMPTY_ROOT(&kfence_pool_root)) + goto fail; + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail: + WRITE_ONCE(kfence_sample_interval, 0); +out: + mutex_unlock(&kfence_mutex); } -static int kfence_enable_late(void) +void kfence_disable(void) { - if (!__kfence_pool) - return kfence_init_late(); + struct kfence_pool_area *kpa; + struct rb_node *iter; - WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("re-enabled\n"); - return 0; + mutex_lock(&kfence_mutex); + + if (!xchg(&kfence_enabled, false)) + goto out_unlock; + + synchronize_rcu(); + + atomic_set(&kfence_allocation_gate, 1); +#ifdef CONFIG_KFENCE_STATIC_KEYS + wake_up(&allocation_wait); +#endif + static_branch_disable(&kfence_allocation_key); + + atomic_set(&kfence_refkill_res, 0); + kfence_for_each_area(kpa, iter) { + atomic_inc(&kfence_refkill_res); + percpu_ref_kill_and_confirm(&kpa->refcnt, kfence_kill_confirm); + } + + /* + * We must wait here until all percpu_ref being killed. + * After all tasks finished, then release the mutex lock. + */ + wait_event_idle(kfence_refkill_wait, !atomic_read(&kfence_refkill_res)); + +out_unlock: + mutex_unlock(&kfence_mutex); } -void kfence_shutdown_cache(struct kmem_cache *s) +static void kfence_shutdown_cache_area(struct kmem_cache *s, struct kfence_pool_area *kpa) { unsigned long flags; - struct kfence_metadata *meta; + struct kfence_metadata *meta, *kfence_metadata = kpa->meta; int i; - /* Pairs with release in kfence_init_pool(). */ - if (!smp_load_acquire(&kfence_metadata)) - return; - - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { bool in_use; meta = &kfence_metadata[i]; @@ -1003,12 +2363,11 @@ void kfence_shutdown_cache(struct kmem_cache *s) * the lock will not help, as different critical section * serialization will have the same outcome. */ - if (READ_ONCE(meta->cache) != s || - READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) + if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta)) continue; raw_spin_lock_irqsave(&meta->lock, flags); - in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; + in_use = meta->cache == s && kfence_obj_allocated(meta); raw_spin_unlock_irqrestore(&meta->lock, flags); if (in_use) { @@ -1030,7 +2389,7 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { meta = &kfence_metadata[i]; /* See above. */ @@ -1044,7 +2403,19 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +void kfence_shutdown_cache(struct kmem_cache *s) +{ + struct kfence_pool_area *kpa; + struct rb_node *iter; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return; + + kfence_for_each_area(kpa, iter) + kfence_shutdown_cache_area(s, kpa); +} + +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) { unsigned long stack_entries[KFENCE_STACK_DEPTH]; size_t num_stack_entries; @@ -1055,7 +2426,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * we don't disable KFENCE without making an allocation. */ if (size > PAGE_SIZE) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1066,7 +2437,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) */ if ((flags & GFP_ZONEMASK) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1077,6 +2448,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (s->flags & SLAB_SKIP_KFENCE) return NULL; + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + if (atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -1093,28 +2467,94 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) } #endif +alloc: if (!READ_ONCE(kfence_enabled)) return NULL; num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } + } + + if (node == NUMA_NO_NODE) + node = numa_node_id(); + + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, + alloc_stack_hash, node); +} + +#define GFP_KFENCE_NOT_ALLOC ((GFP_ZONEMASK & ~__GFP_HIGHMEM) | __GFP_NOKFENCE | __GFP_THISNODE) +struct page *__kfence_alloc_page(int node, gfp_t flags) +{ + unsigned long stack_entries[KFENCE_STACK_DEPTH]; + size_t num_stack_entries; + u32 alloc_stack_hash; + + if (!static_branch_likely(&kfence_order0_page)) + return NULL; + + if ((flags & GFP_KFENCE_NOT_ALLOC) || (flags & GFP_USER) == GFP_USER) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; + return NULL; + } + + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + + if (atomic_inc_return(&kfence_allocation_gate) > 1) + return NULL; +#ifdef CONFIG_KFENCE_STATIC_KEYS /* - * Do expensive check for coverage of allocation in slow-path after - * allocation_gate has already become non-zero, even though it might - * mean not making any allocation within a given sample interval. - * - * This ensures reasonable allocation coverage when the pool is almost - * full, including avoiding long-lived allocations of the same source - * filling up the pool (e.g. pagecache allocations). + * waitqueue_active() is fully ordered after the update of + * kfence_allocation_gate per atomic_inc_return(). */ - alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); - if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); + if (waitqueue_active(&allocation_wait)) { + /* + * Calling wake_up() here may deadlock when allocations happen + * from within timer code. Use an irq_work to defer it. + */ + irq_work_queue(&wake_up_kfence_timer_work); + } +#endif + +alloc: + if (!READ_ONCE(kfence_enabled)) return NULL; + + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } } - return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, - alloc_stack_hash); + return kfence_guarded_alloc_page(node, stack_entries, num_stack_entries, alloc_stack_hash); } size_t kfence_ksize(const void *addr) @@ -1130,7 +2570,12 @@ size_t kfence_ksize(const void *addr) void *kfence_object_start(const void *addr) { - const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + struct kfence_metadata *meta; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return NULL; + + meta = addr_to_metadata((unsigned long)addr); /* * Read locklessly -- if there is a race with __kfence_alloc(), this is @@ -1151,27 +2596,49 @@ void __kfence_free(void *addr) * the object, as the object page may be recycled for other-typed * objects once it has been freed. meta->cache may be NULL if the cache * was destroyed. + * Save the stack trace here so that reports show where the user freed + * the object. */ - if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) + if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) { + unsigned long flags; + + raw_spin_lock_irqsave(&meta->lock, flags); + metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0); + raw_spin_unlock_irqrestore(&meta->lock, flags); call_rcu(&meta->rcu_head, rcu_guarded_free); - else + } else { kfence_guarded_free(addr, meta, false); + } +} + +void __kfence_free_page(struct page *page, void *addr) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + kfence_guarded_free_page(page, addr, meta); } bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { - const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; enum kfence_error_type error_type; + struct kfence_pool_area *kpa; unsigned long flags; + int page_index; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return false; - if (!is_kfence_address((void *)addr)) + kpa = get_kfence_pool_area((void *)addr); + if (!kpa) return false; if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ return kfence_unprotect(addr); /* ... unprotect and proceed. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; + + page_index = (addr - (unsigned long)kpa->addr) / PAGE_SIZE; if (page_index % 2) { /* This is a redzone, report a buffer overflow. */ @@ -1179,14 +2646,14 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs int distance = 0; meta = addr_to_metadata(addr - PAGE_SIZE); - if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + if (meta && kfence_obj_allocated(meta)) { to_report = meta; /* Data race ok; distance calculation approximate. */ distance = addr - data_race(meta->addr + meta->size); } meta = addr_to_metadata(addr + PAGE_SIZE); - if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + if (meta && kfence_obj_allocated(meta)) { /* Data race ok; distance calculation approximate. */ if (!to_report || distance > data_race(meta->addr) - addr) to_report = meta; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index f46fbb03062b9c050e1a3fb698743b5b4d46b912..e30b7578c52f2b2d1f448c58f5fe79167bc6cb28 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -38,6 +38,7 @@ enum kfence_object_state { KFENCE_OBJECT_UNUSED, /* Object is unused. */ KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */ + KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */ KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */ }; @@ -100,46 +101,83 @@ struct kfence_metadata { #ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif + struct kfence_pool_area *kpa; }; -#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \ - CONFIG_KFENCE_NUM_OBJECTS) +extern bool kfence_panic_on_fault; +DECLARE_STATIC_KEY_FALSE(kfence_short_canary); -extern struct kfence_metadata *kfence_metadata; +/* KFENCE error types for report generation. */ +enum kfence_error_type { + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ + KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ + KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ + KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ + KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ +}; + +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type); + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); +void kfence_disable(void); +extern void __free_pages_core(struct page *page, unsigned int order); + +extern struct rb_root kfence_pool_root; +#define kfence_rbentry(cur) rb_entry((cur), struct kfence_pool_area, rb_node) +#define kfence_for_each_area(kpa, iter) \ + for ((iter) = rb_first(&kfence_pool_root); \ + (iter) && ((kpa) = kfence_rbentry((iter)));\ + (iter) = rb_next((iter))) + +/** + * get_kfence_pool_area() - find the kfence pool area of the address + * @addr: address to check + * + * Return: the kfence pool area, NULL if not a kfence address + */ +static inline struct kfence_pool_area *get_kfence_pool_area(const void *addr) +{ + struct rb_node *cur; + struct kfence_pool_area *res = NULL; + + for (cur = kfence_pool_root.rb_node; cur;) { + struct kfence_pool_area *kpa = kfence_rbentry(cur); + + if ((unsigned long)addr < (unsigned long)kpa->addr) + cur = cur->rb_left; + else { + res = kpa; + cur = cur->rb_right; + } + } + + return is_kfence_address_area(addr, res) ? res : NULL; +} static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) { long index; + struct kfence_metadata *kfence_metadata; + struct kfence_pool_area *kpa = get_kfence_pool_area((void *)addr); /* The checks do not affect performance; only called from slow-paths. */ - if (!is_kfence_address((void *)addr)) + if (!kpa) return NULL; + kfence_metadata = kpa->meta; + /* * May be an invalid index if called with an address at the edge of * __kfence_pool, in which case we would report an "invalid access" * error. */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + index = (addr - (unsigned long)kpa->addr) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= kpa->nr_objects) return NULL; return &kfence_metadata[index]; } -/* KFENCE error types for report generation. */ -enum kfence_error_type { - KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ - KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ - KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ - KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ - KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ -}; - -void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, - const struct kfence_metadata *meta, enum kfence_error_type type); - -void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); - #endif /* MM_KFENCE_KFENCE_H */ diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 95b2b84c296d08d3b3a989f1da1df1e55fa951fe..6e26b9b7ed4a71a8349675befab767c1098b2bbd 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -226,6 +226,8 @@ static __always_inline void test_free(void *ptr) kfree(ptr); } +#define test_free_page(addr) free_page((unsigned long)addr) + /* * If this should be a KFENCE allocation, and on which side the allocation and * the closest guard page should be. @@ -243,6 +245,7 @@ enum allocation_policy { */ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) { + long _kfence_sample_interval = kfence_sample_interval; void *alloc; unsigned long timeout, resched_after; const char *policy_name; @@ -269,13 +272,15 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled. */ - resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); do { if (test_cache) alloc = kmem_cache_alloc(test_cache, gfp); @@ -305,6 +310,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat } else if (policy == ALLOCATE_NONE) return alloc; + if (kfence_sample_interval < 0 && policy == ALLOCATE_NONE) + return alloc; + test_free(alloc); if (time_after(jiffies, resched_after)) @@ -315,6 +323,50 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat return NULL; /* Unreachable. */ } +static struct page *test_alloc_page(struct kunit *test, bool is_vmalloc) +{ + long _kfence_sample_interval = kfence_sample_interval; + struct page *alloc; + void *addr; + unsigned long timeout, resched_after; + + kunit_info(test, "%s: size=%zu vmalloc=%d\n", __func__, PAGE_SIZE, is_vmalloc); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); + do { + if (is_vmalloc) { + addr = vmalloc(PAGE_SIZE); + alloc = vmalloc_to_page(addr); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + vfree(addr); + } else { + alloc = alloc_page(GFP_KERNEL); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + __free_page(alloc); + } + + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate page from KFENCE"); + return NULL; /* Unreachable. */ +} + static void test_out_of_bounds_read(struct kunit *test) { size_t size = 32; @@ -349,6 +401,33 @@ static void test_out_of_bounds_read(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_read_page, + .is_write = false, + }; + char *buf; + struct page *page; + + /* Test both sides. */ + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf + PAGE_SIZE; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_out_of_bounds_write(struct kunit *test) { size_t size = 32; @@ -367,6 +446,24 @@ static void test_out_of_bounds_write(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_write_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_write_page, + .is_write = true, + }; + char *buf; + struct page *page; + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_use_after_free_read(struct kunit *test) { const size_t size = 32; @@ -383,6 +480,22 @@ static void test_use_after_free_read(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } +static void test_use_after_free_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_use_after_free_read_page, + .is_write = false, + }; + struct page *page; + + page = test_alloc_page(test, false); + expect.addr = page_address(page); + test_free_page(expect.addr); + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + static void test_double_free(struct kunit *test) { const size_t size = 32; @@ -609,7 +722,7 @@ static void test_gfpzero(struct kunit *test) int i; /* Skip if we think it'd take too long. */ - KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100 && kfence_num_objects <= 255); setup_test_cache(test, size, 0, NULL); buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); @@ -624,7 +737,7 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) { + if (kthread_should_stop() || (i == kfence_num_objects)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } @@ -641,12 +754,19 @@ static void test_gfpzero(struct kunit *test) static void test_invalid_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .type = KFENCE_ERROR_INVALID, .fn = test_invalid_access, - .addr = &__kfence_pool[10], .is_write = false, }; + struct rb_node *cur = kfence_pool_root.rb_node; + char *__kfence_pool; + + if (!cur) + return; + + __kfence_pool = kfence_rbentry(cur)->addr; + expect.addr = &__kfence_pool[10]; READ_ONCE(__kfence_pool[10]); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -731,6 +851,7 @@ static void test_krealloc(struct kunit *test) /* Test that some objects from a bulk allocation belong to KFENCE pool. */ static void test_memcache_alloc_bulk(struct kunit *test) { + long _kfence_sample_interval = kfence_sample_interval; const size_t size = 32; bool pass = false; unsigned long timeout; @@ -741,7 +862,9 @@ static void test_memcache_alloc_bulk(struct kunit *test) * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), @@ -767,6 +890,37 @@ static void test_memcache_alloc_bulk(struct kunit *test) KUNIT_EXPECT_FALSE(test, report_available()); } +static void test_kernel_stack(struct kunit *test) +{ + unsigned long vaddr = (unsigned long)current->stack; + struct page *page; + int i; + + KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_VMAP_STACK) && kfence_sample_interval < 0); + + for (i = 0 ; i < 1<pid, + show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ? + "rcu freeing" : "freed", track->pid, track->cpu, (unsigned long)ts_sec, rem_nsec / 1000); if (track->num_stack_entries) { @@ -128,9 +129,11 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) { + struct kfence_metadata *kfence_metadata = meta->kpa->meta; const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; + struct page *page = virt_to_page((void *)start); lockdep_assert_held(&meta->lock); @@ -141,11 +144,12 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n", meta - kfence_metadata, (void *)start, (void *)(start + size - 1), - size, (cache && cache->name) ? cache->name : ""); + size, (cache && cache->name) ? cache->name : PageSlab(page) ? + "" : "PAGE"); kfence_print_stack(seq, meta, true); - if (meta->state == KFENCE_OBJECT_FREED) { + if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) { seq_con_printf(seq, "\n"); kfence_print_stack(seq, meta, false); } @@ -163,7 +167,11 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, /* Do not show contents of object nor read into following guard page. */ end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) - : min(show_until_addr, PAGE_ALIGN(address))); + : static_branch_likely(&kfence_short_canary) ? + min(show_until_addr, + ALIGN(meta->addr + meta->size + 1, + L1_CACHE_BYTES)) : + min(show_until_addr, PAGE_ALIGN(address))); pr_cont("["); for (cur = (const u8 *)address; cur < end; cur++) { @@ -186,7 +194,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; - const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + ptrdiff_t object_index = -1; int num_stack_entries; int skipnr = 0; @@ -201,8 +209,11 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) return; - if (meta) + if (meta) { lockdep_assert_held(&meta->lock); + object_index = meta - meta->kpa->meta; + } + /* * Because we may generate reports in printk-unfriendly parts of the * kernel, such as scheduler code, the use of printk() could deadlock. @@ -272,7 +283,8 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r lockdep_on(); - check_panic_on_warn("KFENCE"); + if (kfence_panic_on_fault) + panic("kfence.fault=panic set ...\n"); /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); @@ -314,7 +326,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla kpp->kp_slab_cache = meta->cache; kpp->kp_objp = (void *)meta->addr; kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack); - if (meta->state == KFENCE_OBJECT_FREED) + if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack); /* get_stack_skipnr() ensures the first entry is outside allocator. */ kpp->kp_ret = kpp->kp_stack[0]; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a87cfe1d4b7beb71ad471ad7ed46a6f64378d7ac..11cb513add0c1b408e27eb99a0b7685e5186f67f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -409,6 +409,30 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } +static bool hugepage_pmd_enabled(void) +{ + /* + * We cover the anon, shmem and the file-backed case here; file-backed + * hugepages, when configured in, are determined by the global control. + * Anon pmd-sized hugepages are determined by the pmd-size control. + * Shmem pmd-sized hugepages are also determined by its pmd-size control, + * except when the global shmem_huge is set to SHMEM_HUGE_DENY. + */ + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + hugepage_global_enabled()) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_always)) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_madvise)) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) && + hugepage_global_enabled()) + return true; + if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled()) + return true; + return false; +} + void __khugepaged_enter(struct mm_struct *mm) { struct khugepaged_mm_slot *mm_slot; @@ -445,8 +469,9 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - hugepage_flags_enabled()) { - if (hugepage_vma_check(vma, vm_flags, false, false, true)) + hugepage_pmd_enabled()) { + if (thp_vma_allowable_order(vma, vm_flags, false, false, true, + PMD_ORDER)) __khugepaged_enter(vma->vm_mm); } } @@ -493,11 +518,6 @@ static void release_pte_folio(struct folio *folio) folio_putback_lru(folio); } -static void release_pte_page(struct page *page) -{ - release_pte_folio(page_folio(page)); -} - static void release_pte_pages(pte_t *pte, pte_t *_pte, struct list_head *compound_pagelist) { @@ -524,15 +544,17 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, } } -static bool is_refcount_suitable(struct page *page) +static bool is_refcount_suitable(struct folio *folio) { - int expected_refcount; + int expected_refcount = folio_mapcount(folio); + + if (!folio_test_anon(folio) || folio_test_swapcache(folio)) + expected_refcount += folio_nr_pages(folio); - expected_refcount = total_mapcount(page); - if (PageSwapCache(page)) - expected_refcount += compound_nr(page); + if (folio_test_private(folio)) + expected_refcount++; - return page_count(page) == expected_refcount; + return folio_ref_count(folio) == expected_refcount; } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, @@ -542,6 +564,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, struct list_head *compound_pagelist) { struct page *page = NULL; + struct folio *folio = NULL; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; bool writable = false; @@ -576,7 +599,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, goto out; } - VM_BUG_ON_PAGE(!PageAnon(page), page); + folio = page_folio(page); + VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); if (page_mapcount(page) > 1) { ++shared; @@ -588,16 +612,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, } } - if (PageCompound(page)) { - struct page *p; - page = compound_head(page); + if (folio_test_large(folio)) { + struct folio *f; /* * Check if we have dealt with the compound page * already */ - list_for_each_entry(p, compound_pagelist, lru) { - if (page == p) + list_for_each_entry(f, compound_pagelist, lru) { + if (folio == f) goto next; } } @@ -608,7 +631,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * is needed to serialize against split_huge_page * when invoked from the VM. */ - if (!trylock_page(page)) { + if (!folio_trylock(folio)) { result = SCAN_PAGE_LOCK; goto out; } @@ -624,8 +647,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ - if (!is_refcount_suitable(page)) { - unlock_page(page); + if (!is_refcount_suitable(folio)) { + folio_unlock(folio); result = SCAN_PAGE_COUNT; goto out; } @@ -634,27 +657,27 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ - if (!isolate_lru_page(page)) { - unlock_page(page); + if (!folio_isolate_lru(folio)) { + folio_unlock(folio); result = SCAN_DEL_PAGE_LRU; goto out; } - mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_lru(page), - compound_nr(page)); - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageLRU(page), page); - - if (PageCompound(page)) - list_add_tail(&page->lru, compound_pagelist); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + + if (folio_test_large(folio)) + list_add_tail(&folio->lru, compound_pagelist); next: /* * If collapse was initiated by khugepaged, check that there is * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && - (pte_young(pteval) || page_is_young(page) || - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + (pte_young(pteval) || folio_test_young(folio) || + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; @@ -668,13 +691,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } out: release_pte_pages(pte, _pte, compound_pagelist); - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } @@ -685,6 +708,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, spinlock_t *ptl, struct list_head *compound_pagelist) { + struct folio *src_folio; struct page *src_page; struct page *tmp; pte_t *_pte; @@ -706,16 +730,17 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, } } else { src_page = pte_page(pteval); - if (!PageCompound(src_page)) - release_pte_page(src_page); + src_folio = page_folio(src_page); + if (!folio_test_large(src_folio)) + release_pte_folio(src_folio); /* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats - * inside page_remove_rmap(). + * inside folio_remove_rmap_pte(). */ spin_lock(ptl); ptep_clear(vma->vm_mm, address, _pte); - page_remove_rmap(src_page, vma, false); + folio_remove_rmap_pte(src_folio, src_page, vma); spin_unlock(ptl); free_page_and_swap_cache(src_page); } @@ -763,7 +788,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte, * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. * * @pte: starting of the PTEs to copy from - * @page: the new hugepage to copy contents to + * @folio: the new hugepage to copy contents to * @pmd: pointer to the new hugepage's PMD * @orig_pmd: the original raw pages' PMD * @vma: the original raw pages' virtual memory area @@ -771,33 +796,29 @@ static void __collapse_huge_page_copy_failed(pte_t *pte, * @ptl: lock on raw pages' PTEs * @compound_pagelist: list that stores compound pages */ -static int __collapse_huge_page_copy(pte_t *pte, - struct page *page, - pmd_t *pmd, - pmd_t orig_pmd, - struct vm_area_struct *vma, - unsigned long address, - spinlock_t *ptl, - struct list_head *compound_pagelist) +static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio, + pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, + unsigned long address, spinlock_t *ptl, + struct list_head *compound_pagelist) { - struct page *src_page; - pte_t *_pte; - pte_t pteval; - unsigned long _address; + unsigned int i; int result = SCAN_SUCCEED; /* * Copying pages' contents is subject to memory poison at any iteration. */ - for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; - _pte++, page++, _address += PAGE_SIZE) { - pteval = ptep_get(_pte); + for (i = 0; i < HPAGE_PMD_NR; i++) { + pte_t pteval = ptep_get(pte + i); + struct page *page = folio_page(folio, i); + unsigned long src_addr = address + i * PAGE_SIZE; + struct page *src_page; + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - clear_user_highpage(page, _address); + clear_user_highpage(page, src_addr); continue; } src_page = pte_page(pteval); - if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { + if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) { result = SCAN_COPY_MC; break; } @@ -907,16 +928,16 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!vma) return SCAN_VMA_NULL; - if (!transhuge_vma_suitable(vma, address)) + if (!thp_vma_suitable_order(vma, address, PMD_ORDER)) return SCAN_ADDRESS_RANGE; - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, - cc->is_khugepaged)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, + cc->is_khugepaged, PMD_ORDER)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then * remapped to file after khugepaged reaquired the mmap_lock. * - * hugepage_vma_check may return true for qualified file + * thp_vma_allowable_order may return true for qualified file * vmas. */ if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) @@ -1078,7 +1099,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, pte_t *pte; pgtable_t pgtable; struct folio *folio; - struct page *hpage; spinlock_t *pmd_ptl, *pte_ptl; int result = SCAN_FAIL; struct vm_area_struct *vma; @@ -1095,7 +1115,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmap_read_unlock(mm); result = alloc_charge_folio(&folio, mm, cc); - hpage = &folio->page; if (result != SCAN_SUCCEED) goto out_nolock; @@ -1191,7 +1210,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ anon_vma_unlock_write(vma->anon_vma); - result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, + result = __collapse_huge_page_copy(pte, folio, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); @@ -1206,26 +1225,26 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, __folio_mark_uptodate(folio); pgtable = pmd_pgtable(_pmd); - _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); + _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); - folio_add_new_anon_rmap(folio, vma, address); + folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); - hpage = NULL; + folio = NULL; result = SCAN_SUCCEED; out_up_write: mmap_write_unlock(mm); out_nolock: - if (hpage) - put_page(hpage); + if (folio) + folio_put(folio); trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); return result; } @@ -1240,6 +1259,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, int result = SCAN_FAIL, referenced = 0; int none_or_zero = 0, shared = 0; struct page *page = NULL; + struct folio *folio = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; @@ -1326,29 +1346,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, } } - page = compound_head(page); - + folio = page_folio(page); /* * Record which node the original page is from and save this * information to cc->node_load[]. * Khugepaged will allocate hugepage from the node has the max * hit record. */ - node = page_to_nid(page); + node = folio_nid(folio); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; goto out_unmap; } cc->node_load[node]++; - if (!PageLRU(page)) { + if (!folio_test_lru(folio)) { result = SCAN_PAGE_LRU; goto out_unmap; } - if (PageLocked(page)) { + if (folio_test_locked(folio)) { result = SCAN_PAGE_LOCK; goto out_unmap; } - if (!PageAnon(page)) { + if (!folio_test_anon(folio)) { result = SCAN_PAGE_ANON; goto out_unmap; } @@ -1363,7 +1382,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. */ - if (!is_refcount_suitable(page)) { + if (!is_refcount_suitable(folio)) { result = SCAN_PAGE_COUNT; goto out_unmap; } @@ -1373,8 +1392,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && - (pte_young(pteval) || page_is_young(page) || - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + (pte_young(pteval) || folio_test_young(folio) || + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; } @@ -1396,7 +1415,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, *mmap_locked = false; } out: - trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, + trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced, none_or_zero, result, unmapped); return result; } @@ -1466,7 +1485,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, bool notified = false; unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = vma_lookup(mm, haddr); - struct page *hpage; + struct folio *folio; pte_t *start_pte, *pte; pmd_t *pmd, pgt_pmd; spinlock_t *pml = NULL, *ptl; @@ -1492,26 +1511,22 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * and map it by a PMD, regardless of sysfs THP settings. As such, let's * analogously elide sysfs THP settings here. */ - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false, + PMD_ORDER)) return SCAN_VMA_CHECK; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ if (userfaultfd_wp(vma)) return SCAN_PTE_UFFD_WP; - hpage = find_lock_page(vma->vm_file->f_mapping, + folio = filemap_lock_folio(vma->vm_file->f_mapping, linear_page_index(vma, haddr)); - if (!hpage) + if (IS_ERR(folio)) return SCAN_PAGE_NULL; - if (!PageHead(hpage)) { - result = SCAN_FAIL; - goto drop_hpage; - } - - if (compound_order(hpage) != HPAGE_PMD_ORDER) { + if (folio_order(folio) != HPAGE_PMD_ORDER) { result = SCAN_PAGE_COMPOUND; - goto drop_hpage; + goto drop_folio; } result = find_pmd_or_thp_or_none(mm, haddr, &pmd); @@ -1525,13 +1540,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, */ goto maybe_install_pmd; default: - goto drop_hpage; + goto drop_folio; } result = SCAN_FAIL; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); if (!start_pte) /* mmap_lock + page lock should prevent this */ - goto drop_hpage; + goto drop_folio; /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; @@ -1556,7 +1571,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * Note that uprobe, debugger, or MAP_PRIVATE may change the * page table, but the new page will not be a subpage of hpage. */ - if (hpage + i != page) + if (folio_page(folio, i) != page) goto abort; } @@ -1571,7 +1586,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * page_table_lock) ptl nests inside pml. The less time we hold pml, * the better; but userfaultfd's mfill_atomic_pte() on a private VMA * inserts a valid as-if-COWed PTE without even looking up page cache. - * So page lock of hpage does not protect from it, so we must not drop + * So page lock of folio does not protect from it, so we must not drop * ptl before pgt_pmd is removed, so uffd private needs pml taken now. */ if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) @@ -1595,7 +1610,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, continue; /* * We dropped ptl after the first scan, to do the mmu_notifier: - * page lock stops more PTEs of the hpage being faulted in, but + * page lock stops more PTEs of the folio being faulted in, but * does not stop write faults COWing anon copies from existing * PTEs; and does not stop those being swapped out or migrated. */ @@ -1604,7 +1619,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, goto abort; } page = vm_normal_page(vma, addr, ptent); - if (hpage + i != page) + if (folio_page(folio, i) != page) goto abort; /* @@ -1613,7 +1628,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * PTE dirty? Shmem page is already dirty; file is read-only. */ ptep_clear(mm, addr, pte); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); nr_ptes++; } @@ -1623,8 +1638,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, /* step 3: set proper refcount and mm_counters. */ if (nr_ptes) { - page_ref_sub(hpage, nr_ptes); - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); + folio_ref_sub(folio, nr_ptes); + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); } /* step 4: remove empty page table */ @@ -1648,14 +1663,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, maybe_install_pmd: /* step 5: install pmd entry */ result = install_pmd - ? set_huge_pmd(vma, haddr, pmd, hpage) + ? set_huge_pmd(vma, haddr, pmd, &folio->page) : SCAN_SUCCEED; - goto drop_hpage; + goto drop_folio; abort: if (nr_ptes) { flush_tlb_mm(mm); - page_ref_sub(hpage, nr_ptes); - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); + folio_ref_sub(folio, nr_ptes); + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); } if (start_pte) pte_unmap_unlock(start_pte, ptl); @@ -1663,9 +1678,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, spin_unlock(pml); if (notified) mmu_notifier_invalidate_range_end(&range); -drop_hpage: - unlock_page(hpage); - put_page(hpage); +drop_folio: + folio_unlock(folio); + folio_put(folio); return result; } @@ -1783,9 +1798,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, struct collapse_control *cc) { struct address_space *mapping = file->f_mapping; - struct page *page; - struct page *tmp, *dst; - struct folio *folio, *new_folio; + struct page *dst; + struct folio *folio, *tmp, *new_folio; pgoff_t index = 0, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); @@ -1821,13 +1835,13 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } while (1); - for (index = start; index < end; index++) { + for (index = start; index < end;) { xas_set(&xas, index); - page = xas_load(&xas); + folio = xas_load(&xas); VM_BUG_ON(index != xas.xa_index); if (is_shmem) { - if (!page) { + if (!folio) { /* * Stop if extent has been truncated or * hole-punched, and is now completely @@ -1840,10 +1854,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } nr_none++; + index++; continue; } - if (xa_is_value(page) || !PageUptodate(page)) { + if (xa_is_value(folio) || !folio_test_uptodate(folio)) { xas_unlock_irq(&xas); /* swap in or instantiate fallocated page */ if (shmem_get_folio(mapping->host, index, @@ -1853,28 +1868,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); - page = folio_file_page(folio, index); - } else if (trylock_page(page)) { - get_page(page); + } else if (folio_trylock(folio)) { + folio_get(folio); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; goto xa_locked; } } else { /* !is_shmem */ - if (!page || xa_is_value(page)) { + if (!folio || xa_is_value(folio)) { xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); - page = find_lock_page(mapping, index); - if (unlikely(page == NULL)) { + folio = filemap_lock_folio(mapping, index); + if (IS_ERR(folio)) { result = SCAN_FAIL; goto xa_unlocked; } - } else if (PageDirty(page)) { + } else if (folio_test_dirty(folio)) { /* * khugepaged only works on read-only fd, * so this page is dirty because it hasn't @@ -1892,12 +1906,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, filemap_flush(mapping); result = SCAN_FAIL; goto xa_unlocked; - } else if (PageWriteback(page)) { + } else if (folio_test_writeback(folio)) { xas_unlock_irq(&xas); result = SCAN_FAIL; goto xa_unlocked; - } else if (trylock_page(page)) { - get_page(page); + } else if (folio_trylock(folio)) { + folio_get(folio); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; @@ -1906,35 +1920,29 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* - * The page must be locked, so we can drop the i_pages lock + * The folio must be locked, so we can drop the i_pages lock * without racing with truncate. */ - VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - /* make sure the page is up to date */ - if (unlikely(!PageUptodate(page))) { + /* make sure the folio is up to date */ + if (unlikely(!folio_test_uptodate(folio))) { result = SCAN_FAIL; goto out_unlock; } /* * If file was truncated then extended, or hole-punched, before - * we locked the first page, then a THP might be there already. + * we locked the first folio, then a THP might be there already. * This will be discovered on the first iteration. */ - if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - result = compound_order(head) == HPAGE_PMD_ORDER && - head->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; goto out_unlock; } - folio = page_folio(page); - if (folio_mapping(folio) != mapping) { result = SCAN_TRUNCATED; goto out_unlock; @@ -1944,7 +1952,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, folio_test_writeback(folio))) { /* * khugepaged only works on read-only fd, so this - * page is dirty because it hasn't been flushed + * folio is dirty because it hasn't been flushed * since first write. */ result = SCAN_FAIL; @@ -1968,33 +1976,35 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, xas_lock_irq(&xas); - VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); + VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio); /* - * We control three references to the page: + * We control 2 + nr_pages references to the folio: * - we hold a pin on it; - * - one reference from page cache; - * - one from isolate_lru_page; - * If those are the only references, then any new usage of the - * page will have to fetch it from the page cache. That requires - * locking the page to handle truncate, so any new usage will be - * blocked until we unlock page after collapse/during rollback. + * - nr_pages reference from page cache; + * - one from lru_isolate_folio; + * If those are the only references, then any new usage + * of the folio will have to fetch it from the page + * cache. That requires locking the folio to handle + * truncate, so any new usage will be blocked until we + * unlock folio after collapse/during rollback. */ - if (page_count(page) != 3) { + if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) { result = SCAN_PAGE_COUNT; xas_unlock_irq(&xas); - putback_lru_page(page); + folio_putback_lru(folio); goto out_unlock; } /* - * Accumulate the pages that are being collapsed. + * Accumulate the folios that are being collapsed. */ - list_add_tail(&page->lru, &pagelist); + list_add_tail(&folio->lru, &pagelist); + index += folio_nr_pages(folio); continue; out_unlock: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); goto xa_unlocked; } @@ -2033,22 +2043,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* - * The old pages are locked, so they won't change anymore. + * The old folios are locked, so they won't change anymore. */ index = start; dst = folio_page(new_folio, 0); - list_for_each_entry(page, &pagelist, lru) { - while (index < page->index) { + list_for_each_entry(folio, &pagelist, lru) { + int i, nr_pages = folio_nr_pages(folio); + + while (index < folio->index) { clear_highpage(dst); index++; dst++; } - if (copy_mc_highpage(dst, page) > 0) { - result = SCAN_COPY_MC; - goto rollback; + + for (i = 0; i < nr_pages; i++) { + if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) { + result = SCAN_COPY_MC; + goto rollback; + } + index++; + dst++; } - index++; - dst++; } while (index < end) { clear_highpage(dst); @@ -2155,15 +2170,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, folio_unlock(new_folio); /* - * The collapse has succeeded, so free the old pages. + * The collapse has succeeded, so free the old folios. */ - list_for_each_entry_safe(page, tmp, &pagelist, lru) { - list_del(&page->lru); - page->mapping = NULL; - ClearPageActive(page); - ClearPageUnevictable(page); - unlock_page(page); - folio_put_refs(page_folio(page), 3); + list_for_each_entry_safe(folio, tmp, &pagelist, lru) { + list_del(&folio->lru); + folio->mapping = NULL; + folio_clear_active(folio); + folio_clear_unevictable(folio); + folio_unlock(folio); + folio_put_refs(folio, 2 + folio_nr_pages(folio)); } goto out; @@ -2177,11 +2192,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, shmem_uncharge(mapping->host, nr_none); } - list_for_each_entry_safe(page, tmp, &pagelist, lru) { - list_del(&page->lru); - unlock_page(page); - putback_lru_page(page); - put_page(page); + list_for_each_entry_safe(folio, tmp, &pagelist, lru) { + list_del(&folio->lru); + folio_unlock(folio); + folio_putback_lru(folio); + folio_put(folio); } /* * Undo the updates of filemap_nr_thps_inc for non-SHMEM @@ -2211,7 +2226,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { - struct page *page = NULL; + struct folio *folio = NULL; struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); int present, swap; @@ -2223,12 +2238,12 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); rcu_read_lock(); - xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { - if (xas_retry(&xas, page)) + xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) { + if (xas_retry(&xas, folio)) continue; - if (xa_is_value(page)) { - ++swap; + if (xa_is_value(folio)) { + swap += 1 << xas_get_order(&xas); if (cc->is_khugepaged && swap > khugepaged_max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; @@ -2238,18 +2253,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, continue; } - /* - * TODO: khugepaged should compact smaller compound pages - * into a PMD sized page - */ - if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - result = compound_order(head) == HPAGE_PMD_ORDER && - head->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; /* * For SCAN_PTE_MAPPED_HUGEPAGE, further processing * by the caller won't touch the page cache, and so @@ -2259,31 +2266,31 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, break; } - node = page_to_nid(page); + node = folio_nid(folio); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } cc->node_load[node]++; - if (!PageLRU(page)) { + if (!folio_test_lru(folio)) { result = SCAN_PAGE_LRU; break; } - if (page_count(page) != - 1 + page_mapcount(page) + page_has_private(page)) { + if (!is_refcount_suitable(folio)) { result = SCAN_PAGE_COUNT; break; } /* - * We probably should check if the page is referenced here, but - * nobody would transfer pte_young() to PageReferenced() for us. - * And rmap walk here is just too costly... + * We probably should check if the folio is referenced + * here, but nobody would transfer pte_young() to + * folio_test_referenced() for us. And rmap walk here + * is just too costly... */ - present++; + present += folio_nr_pages(folio); if (need_resched()) { xas_pause(&xas); @@ -2302,7 +2309,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, } } - trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); + trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result); return result; } #else @@ -2351,6 +2358,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; +#ifdef CONFIG_ASYNC_FORK + /* Don't scan processes in the state of async fork. */ + if (mm->async_fork_mm) + vma = NULL; +#endif + progress++; if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; @@ -2364,7 +2377,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, progress++; break; } - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, + true, PMD_ORDER)) { skip: progress++; continue; @@ -2466,8 +2480,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, static int khugepaged_has_work(void) { - return !list_empty(&khugepaged_scan.mm_head) && - hugepage_flags_enabled(); + return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled(); } static int khugepaged_wait_event(void) @@ -2540,7 +2553,7 @@ static void khugepaged_wait_work(void) return; } - if (hugepage_flags_enabled()) + if (hugepage_pmd_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } @@ -2571,7 +2584,7 @@ static void set_recommended_min_free_kbytes(void) int nr_zones = 0; unsigned long recommended_min; - if (!hugepage_flags_enabled()) { + if (!hugepage_pmd_enabled()) { calculate_min_free_kbytes(); goto update_wmarks; } @@ -2621,7 +2634,7 @@ int start_stop_khugepaged(void) int err = 0; mutex_lock(&khugepaged_mutex); - if (hugepage_flags_enabled()) { + if (hugepage_pmd_enabled()) { if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); @@ -2647,7 +2660,7 @@ int start_stop_khugepaged(void) void khugepaged_min_free_kbytes_update(void) { mutex_lock(&khugepaged_mutex); - if (hugepage_flags_enabled() && khugepaged_thread) + if (hugepage_pmd_enabled() && khugepaged_thread) set_recommended_min_free_kbytes(); mutex_unlock(&khugepaged_mutex); } @@ -2701,7 +2714,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, *prev = vma; - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false, + PMD_ORDER)) return -EINVAL; cc = kmalloc(sizeof(*cc), GFP_KERNEL); diff --git a/mm/ksm.c b/mm/ksm.c index 2e4cd681622defa97d7bca09dd47548891e3aed5..0bc985c93f5d6ef39c499f4a3d950c965c3ec8d0 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -140,6 +140,9 @@ struct ksm_scan { unsigned long address; struct ksm_rmap_item **rmap_list; unsigned long seqnr; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1148,8 +1151,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, goto out_unlock; } - /* See page_try_share_anon_rmap(): clear PTE first. */ - if (anon_exclusive && page_try_share_anon_rmap(page)) { + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ + if (anon_exclusive && + folio_try_share_anon_rmap_pte(page_folio(page), page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } @@ -1186,6 +1190,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { + struct folio *kfolio = page_folio(kpage); struct mm_struct *mm = vma->vm_mm; struct folio *folio; pmd_t *pmd; @@ -1225,15 +1230,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, goto out_mn; } VM_BUG_ON_PAGE(PageAnonExclusive(page), page); - VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); + VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), + kfolio); /* * No need to check ksm_use_zero_pages here: we can only have a * zero_page here if ksm_use_zero_pages was enabled already. */ if (!is_zero_pfn(page_to_pfn(kpage))) { - get_page(kpage); - page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); + folio_get(kfolio); + folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { /* @@ -1263,7 +1269,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, set_pte_at_notify(mm, addr, ptep, newpte); folio = page_folio(page); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); if (!folio_mapped(folio)) folio_free_swap(folio); folio_put(folio); @@ -2787,49 +2793,53 @@ void __ksm_exit(struct mm_struct *mm) trace_ksm_exit(mm); } -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = page_folio(page); + struct page *page = folio_page(folio, 0); struct anon_vma *anon_vma = folio_anon_vma(folio); - struct page *new_page; + struct folio *new_folio; + + if (folio_test_large(folio)) + return folio; - if (PageKsm(page)) { - if (page_stable_node(page) && + if (folio_test_ksm(folio)) { + if (folio_stable_node(folio) && !(ksm_run & KSM_RUN_UNMERGE)) - return page; /* no need to copy it */ + return folio; /* no need to copy it */ } else if (!anon_vma) { - return page; /* no need to copy it */ - } else if (page->index == linear_page_index(vma, address) && + return folio; /* no need to copy it */ + } else if (folio->index == linear_page_index(vma, addr) && anon_vma->root == vma->anon_vma->root) { - return page; /* still no need to copy it */ + return folio; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); - if (!PageUptodate(page)) - return page; /* let do_swap_page report the error */ - - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); - if (new_page && - mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { - put_page(new_page); - new_page = NULL; - } - if (new_page) { - if (copy_mc_user_highpage(new_page, page, address, vma)) { - put_page(new_page); - memory_failure_queue(page_to_pfn(page), 0); + if (!folio_test_uptodate(folio)) + return folio; /* let do_swap_page report the error */ + + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (new_folio && + mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { + folio_put(new_folio); + new_folio = NULL; + } + if (new_folio) { + if (copy_mc_user_highpage(folio_page(new_folio, 0), page, + addr, vma)) { + folio_put(new_folio); + memory_failure_queue(folio_pfn(folio), 0); return ERR_PTR(-EHWPOISON); } - SetPageDirty(new_page); - __SetPageUptodate(new_page); - __SetPageLocked(new_page); + folio_set_dirty(new_folio); + __folio_mark_uptodate(new_folio); + __folio_set_locked(new_folio); #ifdef CONFIG_SWAP count_vm_event(KSM_SWPIN_COPY); #endif } - return new_page; + return new_folio; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) diff --git a/mm/madvise.c b/mm/madvise.c index 98fdb9288a68a8537b60f80c3da69e0d6d7be3d3..9dce9cb038835a1f2a4af0e8150245dab7b4acdc 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -353,6 +353,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, struct folio *folio = NULL; LIST_HEAD(folio_list); bool pageout_anon_only_filter; + int nr; if (fatal_signal_pending(current)) return -EINTR; @@ -383,7 +384,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pfn_folio(pmd_pfn(orig_pmd)); /* Do not interfere with other mappings of this folio */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -439,7 +440,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr < end; pte++, addr += PAGE_SIZE) { + for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) @@ -453,55 +455,66 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, continue; /* - * Creating a THP page is expensive so split it only if we - * are sure it's worth. Split it if we are only owner. + * If we encounter a large folio, only split it if it is not + * fully mapped within the range we are operating on. Otherwise + * leave it as is so that it can be swapped out whole. If we + * fail to split a folio, leave it in place and advance to the + * next pte in the range. */ if (folio_test_large(folio)) { - int err; - - if (folio_estimated_sharers(folio) != 1) - break; - if (pageout_anon_only_filter && !folio_test_anon(folio)) - break; - if (!folio_trylock(folio)) - break; - folio_get(folio); - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(start_pte, ptl); - start_pte = NULL; - err = split_folio(folio); - folio_unlock(folio); - folio_put(folio); - if (err) - break; - start_pte = pte = - pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!start_pte) - break; - arch_enter_lazy_mmu_mode(); - pte--; - addr -= PAGE_SIZE; - continue; + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | + FPB_IGNORE_SOFT_DIRTY; + int max_nr = (end - addr) / PAGE_SIZE; + bool any_young; + + nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, + fpb_flags, NULL, &any_young); + if (any_young) + ptent = pte_mkyoung(ptent); + + if (nr < folio_nr_pages(folio)) { + int err; + + if (folio_likely_mapped_shared(folio)) + continue; + if (pageout_anon_only_filter && !folio_test_anon(folio)) + continue; + if (!folio_trylock(folio)) + continue; + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); + folio_unlock(folio); + folio_put(folio); + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); + if (!err) + nr = 0; + continue; + } } /* * Do not interfere with other mappings of this folio and - * non-LRU folio. + * non-LRU folio. If we have a large folio at this point, we + * know it is fully mapped so if its mapcount is the same as its + * number of pages, it must be exclusive. */ - if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) + if (!folio_test_lru(folio) || + folio_mapcount(folio) != folio_nr_pages(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - if (pte_young(ptent)) { - ptent = ptep_get_and_clear_full(mm, addr, pte, - tlb->fullmm); - ptent = pte_mkold(ptent); - set_pte_at(mm, addr, pte, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); + mkold_ptes(vma, addr, pte, nr); + tlb_remove_tlb_entries(tlb, pte, nr, addr); } /* @@ -634,6 +647,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, struct folio *folio; int nr_swap = 0; unsigned long next; + int nr, max_nr; next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) @@ -646,7 +660,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr != end; pte++, addr += PAGE_SIZE) { + for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { + nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) @@ -661,9 +676,11 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) { - nr_swap--; - free_swap_and_cache(entry); - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + nr_swap -= nr; + free_swap_and_cache_nr(entry, nr); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); @@ -683,7 +700,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (folio_test_large(folio)) { int err; - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) break; if (!folio_trylock(folio)) break; @@ -1017,6 +1034,8 @@ static int madvise_vma_behavior(struct vm_area_struct *vma, struct anon_vma_name *anon_name; unsigned long new_flags = vma->vm_flags; + fixup_vma(vma); + switch (behavior) { case MADV_REMOVE: return madvise_remove(vma, prev, start, end); diff --git a/mm/memcg_zombie_reaper.c b/mm/memcg_zombie_reaper.c new file mode 100644 index 0000000000000000000000000000000000000000..995b819d028c29b3a4b9287ad43e0c2e1c39fde6 --- /dev/null +++ b/mm/memcg_zombie_reaper.c @@ -0,0 +1,322 @@ +/* + * Reap zombie memcgs: + * - reap at background periodically + * echo 1 > /sys/kernel/mm/memcg_reaper/reap_background + * - one-shot reap triggerred by users + * echo 1 > /sys/kernel/mm/memcg_reaper/reap + * + * Copyright (C) 2019 Alibaba + * Author: Xunlei Pang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include /* try_to_free_mem_cgroup_pages */ + +#define for_each_mem_cgroup_tree(iter, root) \ + for (iter = mem_cgroup_iter(root, NULL, NULL); \ + iter != NULL; \ + iter = mem_cgroup_iter(root, iter, NULL)) + +/* Reap by kthread at background, off by default */ +#define REAP_BACKGROUND_GLOBAL (1 << 0) +#define REAP_BACKGROUND_MEMCG (1 << 1) +static unsigned int reaper_kthread_on; +static unsigned int reaper_verbose; +static unsigned int reaper_scan_interval = 5; /* in seconds */ +/* pages one scan, 5GiB for 4KiB page size */ +static unsigned int reaper_pages_scan = 1310720; + +static DECLARE_WAIT_QUEUE_HEAD(reaper_waitq); + +void memcg_reap_background_set(void) +{ + reaper_kthread_on |= REAP_BACKGROUND_MEMCG; + wake_up_interruptible(&reaper_waitq); +} + +void memcg_reap_background_clear(void) +{ + reaper_kthread_on &= ~REAP_BACKGROUND_MEMCG; +} + +#ifdef CONFIG_SYSFS +static void reap_zombie_memcgs(bool background); + +#define REAPER_ATTR(_name) \ + static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +static ssize_t pages_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_pages_scan); +} + +static ssize_t pages_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long pages; + int err; + + err = kstrtoul(buf, 10, &pages); + if (err || pages > UINT_MAX) + return -EINVAL; + + reaper_pages_scan = pages; + + return count; +} +REAPER_ATTR(pages_scan); + +static ssize_t scan_interval_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_scan_interval); +} + +static ssize_t scan_interval_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long interval; + + err = kstrtoul(buf, 10, &interval); + if (err || interval > UINT_MAX || interval == 0) + return -EINVAL; + + reaper_scan_interval = interval; + + return count; +} +REAPER_ATTR(scan_interval); + +static ssize_t verbose_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_verbose); +} + +static ssize_t verbose_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long verbose; + + err = kstrtoul(buf, 10, &verbose); + if (err || (verbose != 0 && verbose != 1)) + return -EINVAL; + + reaper_verbose = verbose; + + return count; +} +REAPER_ATTR(verbose); + +static ssize_t reap_background_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_kthread_on); +} + +static ssize_t reap_background_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long enable; + + err = kstrtoul(buf, 10, &enable); + if (err || (enable != 0 && enable != 1)) + return -EINVAL; + + reaper_kthread_on &= ~REAP_BACKGROUND_GLOBAL; + + if (enable) { + reaper_kthread_on |= REAP_BACKGROUND_GLOBAL; + wake_up_interruptible(&reaper_waitq); + } + + return count; +} +REAPER_ATTR(reap_background); + +static ssize_t reap_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", 0); +} + +static ssize_t reap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long enable; + + err = kstrtoul(buf, 10, &enable); + if (err || enable != 1) + return -EINVAL; + + reap_zombie_memcgs(false); + + return count; +} +REAPER_ATTR(reap); + +static struct attribute *reaper_attrs[] = { + &pages_scan_attr.attr, + &scan_interval_attr.attr, + &verbose_attr.attr, + &reap_background_attr.attr, + &reap_attr.attr, + NULL, +}; + +static struct attribute_group reaper_attr_group = { + .attrs = reaper_attrs, + .name = "memcg_reaper", +}; +#endif + +static char name_buf[1024]; +static unsigned long +do_reap_zombie_memcg(struct mem_cgroup *memcg, bool background) +{ + unsigned long did_some = 0; + bool drained = false; + unsigned int jiffies_thresh = dirty_expire_interval * HZ / 100; + + /* Let dirty dying memcgs be controlled a while by writeback */ + if (background && + time_before(jiffies, memcg->offline_jiffies + jiffies_thresh) && + (memcg_page_state(memcg, NR_FILE_DIRTY) + + memcg_page_state(memcg, NR_WRITEBACK))) + return 0; + + /* try to free all pages in this cgroup */ + while (page_counter_read(&memcg->memory)) { + unsigned int ret; + + ret = try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); + did_some += ret; + if (ret) + continue; + + if (drained == false) { + drain_all_stock(memcg); + drained = true; + } else { + break; + } + } + + if (reaper_verbose) { + cgroup_name(memcg->css.cgroup, name_buf, sizeof(name_buf)); + if (page_counter_read(&memcg->memory) == 0) { + printk_ratelimited("empty zombie memcg: 0x%lx: %s\n", + (unsigned long)memcg, name_buf); + } else { + printk_ratelimited("non-empty zombie memcg: 0x%lx, counter %ld, %s\n", + (unsigned long)memcg, + page_counter_read(&memcg->memory), + name_buf); + } + } + + return did_some; +} + +static void reap_zombie_memcgs(bool background) +{ + unsigned long reclaimed; + unsigned long reclaimed_threshold; + struct mem_cgroup *iter; + + reclaimed = 0; + reclaimed_threshold = reaper_pages_scan; + for_each_mem_cgroup_tree(iter, NULL) { + if (background && + (reclaimed >= reclaimed_threshold)) { + mem_cgroup_iter_break(NULL, iter); + break; + } + if (mem_cgroup_online(iter)) + continue; + if (background && + !(reaper_kthread_on & REAP_BACKGROUND_GLOBAL) && + !((reaper_kthread_on & REAP_BACKGROUND_MEMCG) && + (iter->reap_background))) + continue; + reclaimed += do_reap_zombie_memcg(iter, background); + cond_resched(); + } + + if (background && reaper_scan_interval) + msleep_interruptible(reaper_scan_interval*1000); +} + +static int zombie_reaper_thread(void *unused) +{ + set_freezable(); + + /* Lower its priority to avoid hogging too much cpu */ + set_user_nice(current, 19); + + while (!kthread_should_stop()) { + if (reaper_kthread_on) { + reap_zombie_memcgs(true); + } else { + wait_event_freezable(reaper_waitq, + kthread_should_stop() || reaper_kthread_on); + } + + try_to_freeze(); + } + + return 0; +} + +static int __init memcg_zombie_reaper_init(void) +{ + static struct task_struct *zombie_reaper; + int err; + + zombie_reaper = kthread_run(zombie_reaper_thread, + NULL, "zombie_memcg_reaper"); + if (IS_ERR(zombie_reaper)) { + pr_err("%s: Unable to start reaper kthread\n", __func__); + return PTR_ERR(zombie_reaper); + } + +#ifdef CONFIG_SYSFS + err = sysfs_create_group(mm_kobj, &reaper_attr_group); + if (err) { + kthread_stop(zombie_reaper); + pr_err("%s: Unable to populate sysfs files\n", __func__); + return err; + } +#endif + + return 0; +} + +module_init(memcg_zombie_reaper_init); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d2ceadd11b1004f2166b699da1d240aa70ee455a..28dbcb0625d8d297b8a8258865994d24a3d952f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -64,6 +65,8 @@ #include #include #include +#include +#include #include "internal.h" #include #include @@ -71,6 +74,9 @@ #include "swap.h" #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include @@ -89,6 +95,11 @@ static bool cgroup_memory_nosocket __ro_after_init; /* Kernel memory accounting disabled? */ static bool cgroup_memory_nokmem __ro_after_init; +#ifdef CONFIG_MEMSLI +/* Cgroup memory SLI disabled? */ +static DEFINE_STATIC_KEY_FALSE(cgroup_memory_nosli); +#endif /* CONFIG_MEMSLI */ + /* BPF memory accounting disabled? */ static bool cgroup_memory_nobpf __ro_after_init; @@ -96,6 +107,8 @@ static bool cgroup_memory_nobpf __ro_after_init; static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); #endif +static struct workqueue_struct *memcg_wmark_wq; + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { @@ -122,9 +135,10 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; -/* for OOM */ +/* for OOM and MEMSLI */ struct mem_cgroup_eventfd_list { struct list_head list; + struct rcu_head rcu; struct eventfd_ctx *eventfd; }; @@ -371,7 +385,12 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); +#ifdef CONFIG_CGROUP_WRITEBACK + if (!memcg || + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) +#else if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) +#endif memcg = root_mem_cgroup; return &memcg->css; @@ -704,6 +723,8 @@ static const unsigned int memcg_vm_event_stat[] = { #ifdef CONFIG_TRANSPARENT_HUGEPAGE THP_FAULT_ALLOC, THP_COLLAPSE_ALLOC, + THP_SWPOUT, + THP_SWPOUT_FALLBACK, #endif }; @@ -735,6 +756,9 @@ struct memcg_vmstats_percpu { /* Cgroup1: threshold notifications & softlimit tree updates */ unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct memcg_vmstats { @@ -1248,6 +1272,133 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) dead_memcg); } +/* memcg oom priority */ +/* + * do_mem_cgroup_account_oom_skip - account the memcg with OOM-unkillable task + * @memcg: mem_cgroup struct with OOM-unkillable task + * @oc: oom_control struct + * + * Account OOM-unkillable task to its cgroup and up to the OOMing cgroup's + * @num_oom_skip, if all the tasks of one cgroup hierarchy are OOM-unkillable + * we skip this cgroup hierarchy when select the victim cgroup. + * + * The @num_oom_skip must be reset when bad process selection has finished, + * since before the next round bad process selection, these OOM-unkillable + * tasks might become killable. + * + */ +static void do_mem_cgroup_account_oom_skip(struct mem_cgroup *memcg, + struct oom_control *oc) +{ + struct mem_cgroup *root; + struct cgroup_subsys_state *css; + + if (!oc->use_priority_oom) + return; + if (unlikely(!memcg)) + return; + root = oc->memcg; + if (!root) + root = root_mem_cgroup; + + css = &memcg->css; + while (css) { + struct mem_cgroup *tmp; + + tmp = mem_cgroup_from_css(css); + tmp->num_oom_skip++; + /* + * Put these cgroups into a list to + * reduce the iteration time when reset + * the @num_oom_skip. + */ + if (!tmp->next_reset) { + css_get(&tmp->css); + tmp->next_reset = oc->reset_list; + oc->reset_list = tmp; + } + + if (mem_cgroup_from_css(css) == root) + break; + + css = css->parent; + } +} + +void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc) +{ + rcu_read_lock(); + do_mem_cgroup_account_oom_skip(mem_cgroup_from_task(task), oc); + rcu_read_unlock(); +} + +static struct mem_cgroup * +mem_cgroup_select_victim_cgroup(struct mem_cgroup *memcg) +{ + struct cgroup_subsys_state *chosen, *parent; + struct cgroup_subsys_state *victim; + int chosen_priority; + +again: + victim = NULL; + parent = &memcg->css; + rcu_read_lock(); + while (parent) { + struct cgroup_subsys_state *pos; + struct mem_cgroup *parent_mem; + + parent_mem = mem_cgroup_from_css(parent); + + if (parent->nr_procs <= parent_mem->num_oom_skip) + break; + victim = parent; + chosen = NULL; + chosen_priority = DEF_PRIORITY + 1; + list_for_each_entry_rcu(pos, &parent->children, sibling) { + struct mem_cgroup *tmp, *chosen_mem; + + tmp = mem_cgroup_from_css(pos); + + if (pos->nr_procs <= tmp->num_oom_skip) + continue; + if (tmp->priority > chosen_priority) + continue; + if (tmp->priority < chosen_priority) { + chosen_priority = tmp->priority; + chosen = pos; + continue; + } + + chosen_mem = mem_cgroup_from_css(chosen); + + if (do_memsw_account()) { + if (page_counter_read(&tmp->memsw) > + page_counter_read(&chosen_mem->memsw)) + chosen = pos; + } else if (page_counter_read(&tmp->memory) > + page_counter_read(&chosen_mem->memory)) { + chosen = pos; + } + } + parent = chosen; + } + + if (likely(victim)) { + if (!css_tryget(victim)) { + rcu_read_unlock(); + goto again; + } + } + + rcu_read_unlock(); + + if (likely(victim)) + return mem_cgroup_from_css(victim); + + return NULL; +} + /** * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy * @memcg: hierarchy root @@ -1259,7 +1410,6 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) * value, the function breaks the iteration loop. Otherwise, it will iterate * over all tasks and return 0. * - * This function must not be called for the root memory cgroup. */ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*fn)(struct task_struct *, void *), void *arg) @@ -1267,8 +1417,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, struct mem_cgroup *iter; int ret = 0; - BUG_ON(mem_cgroup_is_root(memcg)); - for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; struct task_struct *task; @@ -1284,6 +1432,49 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, } } +void mem_cgroup_select_bad_process(struct oom_control *oc) +{ + struct mem_cgroup *memcg, *victim, *iter; + + memcg = oc->memcg; + + if (!memcg) + memcg = root_mem_cgroup; + + oc->use_priority_oom = memcg->use_priority_oom; + victim = memcg; + +retry: + if (oc->use_priority_oom) { + victim = mem_cgroup_select_victim_cgroup(memcg); + if (!victim) { + if (mem_cgroup_is_root(memcg) && oc->num_skip) + oc->chosen = (void *)-1UL; + goto out; + } + } + + mem_cgroup_scan_tasks(victim, oom_evaluate_task, oc); + if (oc->use_priority_oom) { + css_put(&victim->css); + if (oc->chosen == (void *)-1UL) + goto out; + if (!oc->chosen && victim != memcg) { + do_mem_cgroup_account_oom_skip(victim, oc); + goto retry; + } + } +out: + /* See commets in mem_cgroup_account_oom_skip() */ + while (oc->reset_list) { + iter = oc->reset_list; + iter->num_oom_skip = 0; + oc->reset_list = iter->next_reset; + iter->next_reset = NULL; + css_put(&iter->css); + } +} + #ifdef CONFIG_DEBUG_VM void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) { @@ -2047,9 +2238,6 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, struct mem_cgroup *oom_group = NULL; struct mem_cgroup *memcg; - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - return NULL; - if (!oom_domain) oom_domain = root_mem_cgroup; @@ -2329,7 +2517,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) * Drains all per-CPU charge caches for given root_memcg resp. subtree * of the hierarchy under it. */ -static void drain_all_stock(struct mem_cgroup *root_memcg) +void drain_all_stock(struct mem_cgroup *root_memcg) { int cpu, curcpu; @@ -2380,6 +2568,53 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) return 0; } +static void reclaim_wmark(struct mem_cgroup *memcg) +{ + long nr_pages; + unsigned long pflags; + struct mem_cgroup *iter; + u64 start, duration; + + if (is_wmark_ok(memcg, false)) + return; + + nr_pages = page_counter_read(&memcg->memory) - + memcg->memory.wmark_low; + if (nr_pages <= 0) + return; + + nr_pages = max_t(unsigned long, SWAP_CLUSTER_MAX, nr_pages); + + /* + * Typically, we would like to record the actual cpu% of reclaim_wmark + * work, excluding any sleep/resched time. However, currently we just + * simply record the whole duration of reclaim_wmark work for the + * overhead-accuracy trade-off. + */ + start = ktime_get_ns(); + psi_memstall_enter(&pflags); + try_to_free_mem_cgroup_pages(memcg, nr_pages, GFP_KERNEL, true); + psi_memstall_leave(&pflags); + duration = ktime_get_ns() - start; + + css_get(&memcg->css); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + this_cpu_add(iter->exstat_cpu->item[MEMCG_WMARK_RECLAIM], + duration); + css_put(&memcg->css); +} + +static void wmark_work_func(struct work_struct *work) +{ + struct mem_cgroup *memcg; + + memcg = container_of(work, struct mem_cgroup, wmark_work); + + current->flags |= PF_MEMALLOC | PF_KSWAPD; + reclaim_wmark(memcg); + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD); +} + static unsigned long reclaim_high(struct mem_cgroup *memcg, unsigned int nr_pages, gfp_t gfp_mask) @@ -2564,12 +2799,14 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask) int nr_retries = MAX_RECLAIM_RETRIES; struct mem_cgroup *memcg; bool in_retry = false; + u64 start; if (likely(!nr_pages)) return; memcg = get_mem_cgroup_from_mm(current->mm); current->memcg_nr_pages_over_high = 0; + memcg_lat_stat_start(&start); retry_reclaim: /* @@ -2631,6 +2868,7 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask) psi_memstall_leave(&pflags); out: + memcg_lat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); css_put(&memcg->css); } @@ -2647,6 +2885,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, bool drained = false; bool raised_max_event = false; unsigned long pflags; + u64 start; retry: if (consume_stock(memcg, nr_pages)) @@ -2687,10 +2926,12 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, memcg_memory_event(mem_over_limit, MEMCG_MAX); raised_max_event = true; + memcg_lat_stat_start(&start); psi_memstall_enter(&pflags); nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, gfp_mask, reclaim_options); psi_memstall_leave(&pflags); + memcg_lat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; @@ -2786,6 +3027,11 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, do { bool mem_high, swap_high; + if (!is_wmark_ok(memcg, true)) { + queue_work(memcg_wmark_wq, &memcg->wmark_work); + break; + } + mem_high = page_counter_read(&memcg->memory) > READ_ONCE(memcg->memory.high); swap_high = page_counter_read(&memcg->swap) > @@ -3467,6 +3713,34 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, } #endif +static void setup_memcg_wmark(struct mem_cgroup *memcg) +{ + unsigned long high_wmark; + unsigned long low_wmark; + unsigned long max = memcg->memory.high > memcg->memory.max ? + memcg->memory.max : memcg->memory.high; + unsigned int wmark_ratio = memcg->wmark_ratio; + unsigned int wmark_scale_factor = memcg->wmark_scale_factor; + unsigned long gap; + + if (wmark_ratio) { + high_wmark = (max * wmark_ratio) / 100; + + /* + * Set the memcg watermark distance according to the + * scale factor in proportion to max limit. + */ + gap = mult_frac(max, wmark_scale_factor, 10000); + low_wmark = high_wmark - gap; + + page_counter_set_wmark_low(&memcg->memory, low_wmark); + page_counter_set_wmark_high(&memcg->memory, high_wmark); + } else { + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); + } +} + static DEFINE_MUTEX(memcg_max_mutex); static int mem_cgroup_resize_max(struct mem_cgroup *memcg, @@ -3517,8 +3791,15 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg, } } while (true); - if (!ret && enlarge) - memcg_oom_recover(memcg); + if (!ret) { + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + + if (enlarge) + memcg_oom_recover(memcg); + } return ret; } @@ -3662,6 +3943,27 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, return -EINVAL; } +static u64 mem_cgroup_priority_oom_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->use_priority_oom; +} + +static int mem_cgroup_priority_oom_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + + memcg->use_priority_oom = val; + + return 0; +} + static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) { unsigned long val; @@ -3690,6 +3992,8 @@ enum { RES_MAX_USAGE, RES_FAILCNT, RES_SOFT_LIMIT, + WMARK_HIGH_LIMIT, + WMARK_LOW_LIMIT, }; static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, @@ -3730,6 +4034,10 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, return counter->failcnt; case RES_SOFT_LIMIT: return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; + case WMARK_HIGH_LIMIT: + return (u64)counter->wmark_high * PAGE_SIZE; + case WMARK_LOW_LIMIT: + return (u64)counter->wmark_low * PAGE_SIZE; default: BUG(); } @@ -4173,6 +4481,34 @@ static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) #endif } +static u64 memcg_exstat_gather(struct mem_cgroup *memcg, + enum memcg_exstat_item idx) +{ + u64 sum = 0; + int cpu; + + for_each_online_cpu(cpu) + sum += per_cpu_ptr(memcg->exstat_cpu, cpu)->item[idx]; + + return sum; +} + +static int memcg_exstat_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + seq_printf(m, "wmark_min_throttled_ms %llu\n", + memcg_exstat_gather(memcg, MEMCG_WMARK_MIN)); + seq_printf(m, "wmark_reclaim_work_ms %llu\n", + memcg_exstat_gather(memcg, MEMCG_WMARK_RECLAIM) >> 20); + +#ifdef CONFIG_PAGECACHE_LIMIT + seq_printf(m, "pagecache_limit_reclaimed_kb %llu\n", + memcg_exstat_gather(memcg, MEMCG_PGCACHE_RECLAIM) * PAGE_SIZE >> 10); +#endif + return 0; +} + static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -4197,120 +4533,617 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, return 0; } -static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) -{ - struct mem_cgroup_threshold_ary *t; - unsigned long usage; - int i; - - rcu_read_lock(); - if (!swap) - t = rcu_dereference(memcg->thresholds.primary); - else - t = rcu_dereference(memcg->memsw_thresholds.primary); +#ifdef CONFIG_MEMSLI +#define MEMCG_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct mem_cgroup *memcg = (struct mem_cgroup *)info; \ + int i; \ + \ + for (i = MEM_LAT_0_1; i < MEM_LAT_NR_COUNT; i++) \ + this_cpu_write(memcg->lat_stat_cpu->item[sidx][i], 0); \ +} + +MEMCG_LAT_STAT_SMP_WRITE(global_direct_reclaim, MEM_LAT_GLOBAL_DIRECT_RECLAIM) +MEMCG_LAT_STAT_SMP_WRITE(memcg_direct_reclaim, MEM_LAT_MEMCG_DIRECT_RECLAIM) +MEMCG_LAT_STAT_SMP_WRITE(direct_compact, MEM_LAT_DIRECT_COMPACT) +MEMCG_LAT_STAT_SMP_WRITE(global_direct_swapout, MEM_LAT_GLOBAL_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_SMP_WRITE(memcg_direct_swapout, MEM_LAT_MEMCG_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_SMP_WRITE(direct_swapin, MEM_LAT_DIRECT_SWAPIN) + +smp_call_func_t smp_memcg_lat_write_funcs[] = { + smp_write_global_direct_reclaim, + smp_write_memcg_direct_reclaim, + smp_write_direct_compact, + smp_write_global_direct_swapout, + smp_write_memcg_direct_swapout, + smp_write_direct_swapin, +}; - if (!t) - goto unlock; +static int memcg_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + enum mem_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_memcg_lat_write_funcs[idx]; - usage = mem_cgroup_usage(memcg, swap); + if (val != 0) + return -EINVAL; - /* - * current_threshold points to threshold just below or equal to usage. - * If it's not true, a threshold was crossed after last - * call of __mem_cgroup_threshold(). - */ - i = t->current_threshold; + func((void *)memcg); + smp_call_function(func, (void *)memcg, 1); - /* - * Iterate backward over array of thresholds starting from - * current_threshold and check if a threshold is crossed. - * If none of thresholds below usage is crossed, we read - * only one element of the array here. - */ - for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) - eventfd_signal(t->entries[i].eventfd, 1); + return 0; +} - /* i = current_threshold + 1 */ - i++; +static u64 memcg_lat_stat_gather(struct mem_cgroup *memcg, + enum mem_lat_stat_item sidx, + enum mem_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; - /* - * Iterate forward over array of thresholds starting from - * current_threshold+1 and check if a threshold is crossed. - * If none of thresholds above usage is crossed, we read - * only one element of the array here. - */ - for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) - eventfd_signal(t->entries[i].eventfd, 1); + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(memcg->lat_stat_cpu, cpu)->item[sidx][cidx]; - /* Update current_threshold */ - t->current_threshold = i - 1; -unlock: - rcu_read_unlock(); + return sum; } -static void mem_cgroup_threshold(struct mem_cgroup *memcg) +static int memcg_lat_stat_show(struct seq_file *m, void *v) { - while (memcg) { - __mem_cgroup_threshold(memcg, false); - if (do_memsw_account()) - __mem_cgroup_threshold(memcg, true); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + enum mem_lat_stat_item idx = seq_cft(m)->private; - memcg = parent_mem_cgroup(memcg); - } + seq_printf(m, "0-1ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_0_1)); + seq_printf(m, "1-5ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_1_5)); + seq_printf(m, "5-10ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_5_10)); + seq_printf(m, "10-100ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_10_100)); + seq_printf(m, "100-500ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_100_500)); + seq_printf(m, "500-1000ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_500_1000)); + seq_printf(m, ">=1000ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_1000_INF)); + seq_printf(m, "total(ms): \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_TOTAL) >> 20); + + return 0; } -static int compare_thresholds(const void *a, const void *b) +static int __memcg_lat_stat_register_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, const char *args, + enum mem_lat_stat_item sidx) { - const struct mem_cgroup_threshold *_a = a; - const struct mem_cgroup_threshold *_b = b; + struct mem_cgroup_eventfd_list *evt; - if (_a->threshold > _b->threshold) - return 1; + evt = kmalloc(sizeof(*evt), GFP_KERNEL); + if (!evt) + return -ENOMEM; - if (_a->threshold < _b->threshold) - return -1; + mutex_lock(&memcg->lat_stat_notify_lock); + + evt->eventfd = eventfd; + list_add_rcu(&evt->list, &memcg->lat_stat_notify[sidx]); + + mutex_unlock(&memcg->lat_stat_notify_lock); return 0; } -static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) +static void __memcg_lat_stat_unregister_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, enum mem_lat_stat_item sidx) { - struct mem_cgroup_eventfd_list *ev; + struct mem_cgroup_eventfd_list *evt, *tmp; - spin_lock(&memcg_oom_lock); + mutex_lock(&memcg->lat_stat_notify_lock); - list_for_each_entry(ev, &memcg->oom_notify, list) - eventfd_signal(ev->eventfd, 1); + list_for_each_entry_safe(evt, tmp, &memcg->lat_stat_notify[sidx], + list) { + if (evt->eventfd == eventfd) { + list_del_rcu(&evt->list); + kfree_rcu(evt, rcu); + } + } - spin_unlock(&memcg_oom_lock); - return 0; + mutex_unlock(&memcg->lat_stat_notify_lock); } -static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) -{ - struct mem_cgroup *iter; - - for_each_mem_cgroup_tree(iter, memcg) - mem_cgroup_oom_notify_cb(iter); +#define MEMCG_LAT_STAT_REGISTER_EVENT(name, sidx) \ +static int register_event_##name(struct mem_cgroup *memcg, \ + struct eventfd_ctx *eventfd, const char *args) \ +{ \ + return __memcg_lat_stat_register_event(memcg, eventfd, args, sidx); \ +} \ +static void unregister_event_##name(struct mem_cgroup *memcg, \ + struct eventfd_ctx *eventfd) \ +{ \ + return __memcg_lat_stat_unregister_event(memcg, eventfd, sidx); \ } -static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, - struct eventfd_ctx *eventfd, const char *args, enum res_type type) +MEMCG_LAT_STAT_REGISTER_EVENT(global_direct_reclaim, + MEM_LAT_GLOBAL_DIRECT_RECLAIM) +MEMCG_LAT_STAT_REGISTER_EVENT(memcg_direct_reclaim, + MEM_LAT_MEMCG_DIRECT_RECLAIM) +MEMCG_LAT_STAT_REGISTER_EVENT(direct_compact, + MEM_LAT_DIRECT_COMPACT) +MEMCG_LAT_STAT_REGISTER_EVENT(global_direct_swapout, + MEM_LAT_GLOBAL_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_REGISTER_EVENT(memcg_direct_swapout, + MEM_LAT_MEMCG_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_REGISTER_EVENT(direct_swapin, + MEM_LAT_DIRECT_SWAPIN) + +static void memcg_lat_stat_notify_event(struct mem_cgroup *memcg, + enum mem_lat_stat_item sidx) { - struct mem_cgroup_thresholds *thresholds; - struct mem_cgroup_threshold_ary *new; - unsigned long threshold; - unsigned long usage; - int i, size, ret; + struct mem_cgroup_eventfd_list *evt; - ret = page_counter_memparse(args, "-1", &threshold); - if (ret) - return ret; + rcu_read_lock(); - mutex_lock(&memcg->thresholds_lock); + list_for_each_entry_rcu(evt, &memcg->lat_stat_notify[sidx], list) + eventfd_signal(evt->eventfd, 1); - if (type == _MEM) { - thresholds = &memcg->thresholds; + rcu_read_unlock(); +} + +static enum mem_lat_count_t get_mem_lat_count_idx(u64 duration) +{ + enum mem_lat_count_t idx; + + duration = duration >> 20; + if (duration < 1) + idx = MEM_LAT_0_1; + else if (duration < 5) + idx = MEM_LAT_1_5; + else if (duration < 10) + idx = MEM_LAT_5_10; + else if (duration < 100) + idx = MEM_LAT_10_100; + else if (duration < 500) + idx = MEM_LAT_100_500; + else if (duration < 1000) + idx = MEM_LAT_500_1000; + else + idx = MEM_LAT_1000_INF; + + return idx; +} + +void memcg_lat_stat_start(u64 *start) +{ + if (!static_branch_unlikely(&cgroup_memory_nosli) && + !mem_cgroup_disabled()) + *start = ktime_get_ns(); + else + *start = 0; +} + +void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start) +{ + struct mem_cgroup *memcg, *iter; + enum mem_lat_count_t cidx; + u64 duration; + + if (static_branch_unlikely(&cgroup_memory_nosli) || + mem_cgroup_disabled()) + return; + + if (start == 0) + return; + + duration = ktime_get_ns() - start; + cidx = get_mem_lat_count_idx(duration); + memcg = get_mem_cgroup_from_mm(current->mm); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + this_cpu_inc(iter->lat_stat_cpu->item[sidx][cidx]); + this_cpu_add(iter->lat_stat_cpu->item[sidx][MEM_LAT_TOTAL], + duration); + memcg_lat_stat_notify_event(iter, sidx); + } + css_put(&memcg->css); +} +#endif /* CONFIG_MEMSLI */ + +static u64 mem_cgroup_priority_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->priority; +} + +static int mem_cgroup_priority_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > MEMCG_OOM_PRIORITY) + return -EINVAL; + + memcg->priority = val; + + return 0; +} + +#ifdef CONFIG_ASYNC_FORK +static u64 mem_cgroup_async_fork_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->async_fork; +} + +static int mem_cgroup_async_fork_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + memcg->async_fork = val; + return 0; +} +#endif + +static int memory_wmark_ratio_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned int wmark_ratio = READ_ONCE(memcg->wmark_ratio); + + seq_printf(m, "%d\n", wmark_ratio); + + return 0; +} + +static ssize_t memory_wmark_ratio_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_ratio; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtouint(buf, 0, &wmark_ratio); + if (ret) + return ret; + + if (wmark_ratio > 100) + return -EINVAL; + + xchg(&memcg->wmark_ratio, wmark_ratio); + + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + + return nbytes; +} + +static int memory_wmark_scale_factor_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned int wmark_scale_factor; + + wmark_scale_factor = READ_ONCE(memcg->wmark_scale_factor); + + seq_printf(m, "%d\n", wmark_scale_factor); + + return 0; +} + +static ssize_t memory_wmark_scale_factor_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_scale_factor; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtouint(buf, 0, &wmark_scale_factor); + if (ret) + return ret; + + if (wmark_scale_factor > 1000 || wmark_scale_factor < 1) + return -EINVAL; + + xchg(&memcg->wmark_scale_factor, wmark_scale_factor); + + setup_memcg_wmark(memcg); + + return nbytes; +} + +/* + * Figure out the maximal(most conservative) @wmark_min_adj along + * the hierarchy but excluding intermediate default zero, as the + * effective one. Example: + * root + * / \ + * A D + * / \ + * B C + * / \ + * E F + * + * wmark_min_adj: A -10, B -25, C 0, D 50, E -25, F 50 + * wmark_min_eadj: A -10, B -10, C 0, D 50, E -10, F 50 + */ +static void memcg_update_wmark_min_adj(struct mem_cgroup *memcg, int val) +{ + struct mem_cgroup *p; + struct mem_cgroup *iter; + + mutex_lock(&cgroup_mutex); + memcg->wmark_min_adj = val; + /* update hierarchical wmark_min_eadj, pre-order iteration */ + for_each_mem_cgroup_tree(iter, memcg) { + if (!mem_cgroup_online(iter)) + continue; + val = iter->wmark_min_adj; + p = parent_mem_cgroup(iter); + if (p && p->wmark_min_eadj && p->wmark_min_eadj > val) + val = p->wmark_min_eadj; + iter->wmark_min_eadj = val; + } + mutex_unlock(&cgroup_mutex); +} + +static int memory_wmark_min_adj_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + /* show the final effective value */ + seq_printf(m, "%d\n", memcg->wmark_min_eadj); + + return 0; +} + +static ssize_t memory_wmark_min_adj_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_min_adj; + + buf = strstrip(buf); + ret = kstrtoint(buf, 0, &wmark_min_adj); + if (ret) + return ret; + + if (wmark_min_adj < -25 || wmark_min_adj > 50) + return -EINVAL; + + memcg_update_wmark_min_adj(memcg, wmark_min_adj); + + return nbytes; +} + +int memcg_get_wmark_min_adj(struct task_struct *curr) +{ + struct mem_cgroup *memcg; + int val; + + if (mem_cgroup_disabled()) + return 0; + + rcu_read_lock(); + memcg = mem_cgroup_from_css(task_css(curr, memory_cgrp_id)); + if (mem_cgroup_is_root(memcg)) + val = 0; + else + val = memcg->wmark_min_eadj; + rcu_read_unlock(); + + return val; +} + +/* + * Scheduled by global page allocation to be executed from the userland + * return path and throttle when free is under memcg's global WMARK_MIN. + */ +void mem_cgroup_wmark_min_throttle(void) +{ + unsigned int msec = current->wmark_min_throttle_ms; + unsigned long pflags; + struct mem_cgroup *memcg, *iter; + + if (likely(!msec)) + return; + psi_memstall_enter(&pflags); + msleep_interruptible(msec); + psi_memstall_leave(&pflags); + current->wmark_min_throttle_ms = 0; + + /* Account throttled time hierarchically, ignore premature sleep */ + memcg = get_mem_cgroup_from_mm(current->mm); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + __this_cpu_add(iter->exstat_cpu->item[MEMCG_WMARK_MIN], msec); + css_put(&memcg->css); +} + +#define WMARK_MIN_THROTTLE_MS 100UL +/* + * Tasks in memcg having positive memory.wmark_min_adj has its + * own global min watermark higher than the global WMARK_MIN: + * "WMARK_MIN + (WMARK_LOW - WMARK_MIN) * memory.wmark_min_adj" + * + * Positive memory.wmark_min_adj means low QoS requirements. When + * allocation broke memcg min watermark, it should trigger direct + * reclaim traditionally, here trigger throttle instead to further + * prevent them from disturbing others. + * + * The throttle time is simply linearly proportional to the pages + * consumed below memcg's min watermark. + * + * The base throttle time is WMARK_MIN_THROTTLE_MS, and the maximal + * throttle time is ten times WMARK_MIN_THROTTLE_MS. + * + * The actual throttling will be executed from the userland return + * path, see mem_cgroup_wmark_min_throttle(). + */ +void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac) +{ + struct zoneref *z; + struct zone *zone; + unsigned long wmark_min, wmark, min_low_gap, free_pages; + int wmark_min_adj = memcg_get_wmark_min_adj(curr); + + if (wmark_min_adj <= 0) + return; + + if (curr->wmark_min_throttle_ms) + return; + + z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); + for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, + ac->nodemask) { + if (cpusets_enabled() && + !__cpuset_zone_allowed(zone, __GFP_HARDWALL)) + continue; + + wmark_min = min_wmark_pages(zone); + min_low_gap = low_wmark_pages(zone) - wmark_min; + free_pages = zone_page_state(zone, NR_FREE_PAGES); + wmark = wmark_min + min_low_gap * wmark_min_adj / 100; + if (free_pages < wmark && wmark > wmark_min) { + unsigned long msec; + + /* + * The throttle time is simply linearly proportional + * to the pages consumed below memcg's min watermark. + */ + msec = (wmark - free_pages) * WMARK_MIN_THROTTLE_MS / + (wmark - wmark_min); + msec = clamp(msec, 1UL, 10 * WMARK_MIN_THROTTLE_MS); + curr->wmark_min_throttle_ms = msec; + set_notify_resume(curr); + break; + } + } +} + +static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) +{ + struct mem_cgroup_threshold_ary *t; + unsigned long usage; + int i; + + rcu_read_lock(); + if (!swap) + t = rcu_dereference(memcg->thresholds.primary); + else + t = rcu_dereference(memcg->memsw_thresholds.primary); + + if (!t) + goto unlock; + + usage = mem_cgroup_usage(memcg, swap); + + /* + * current_threshold points to threshold just below or equal to usage. + * If it's not true, a threshold was crossed after last + * call of __mem_cgroup_threshold(). + */ + i = t->current_threshold; + + /* + * Iterate backward over array of thresholds starting from + * current_threshold and check if a threshold is crossed. + * If none of thresholds below usage is crossed, we read + * only one element of the array here. + */ + for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) + eventfd_signal(t->entries[i].eventfd, 1); + + /* i = current_threshold + 1 */ + i++; + + /* + * Iterate forward over array of thresholds starting from + * current_threshold+1 and check if a threshold is crossed. + * If none of thresholds above usage is crossed, we read + * only one element of the array here. + */ + for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) + eventfd_signal(t->entries[i].eventfd, 1); + + /* Update current_threshold */ + t->current_threshold = i - 1; +unlock: + rcu_read_unlock(); +} + +static void mem_cgroup_threshold(struct mem_cgroup *memcg) +{ + while (memcg) { + __mem_cgroup_threshold(memcg, false); + if (do_memsw_account()) + __mem_cgroup_threshold(memcg, true); + + memcg = parent_mem_cgroup(memcg); + } +} + +static int compare_thresholds(const void *a, const void *b) +{ + const struct mem_cgroup_threshold *_a = a; + const struct mem_cgroup_threshold *_b = b; + + if (_a->threshold > _b->threshold) + return 1; + + if (_a->threshold < _b->threshold) + return -1; + + return 0; +} + +static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) +{ + struct mem_cgroup_eventfd_list *ev; + + spin_lock(&memcg_oom_lock); + + list_for_each_entry(ev, &memcg->oom_notify, list) + eventfd_signal(ev->eventfd, 1); + + spin_unlock(&memcg_oom_lock); + return 0; +} + +static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) +{ + struct mem_cgroup *iter; + + for_each_mem_cgroup_tree(iter, memcg) + mem_cgroup_oom_notify_cb(iter); +} + +static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, const char *args, enum res_type type) +{ + struct mem_cgroup_thresholds *thresholds; + struct mem_cgroup_threshold_ary *new; + unsigned long threshold; + unsigned long usage; + int i, size, ret; + + ret = page_counter_memparse(args, "-1", &threshold); + if (ret) + return ret; + + mutex_lock(&memcg->thresholds_lock); + + if (type == _MEM) { + thresholds = &memcg->thresholds; usage = mem_cgroup_usage(memcg, false); } else if (type == _MEMSWAP) { thresholds = &memcg->memsw_thresholds; @@ -4549,6 +5382,37 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, return 0; } +static int memory_oom_group_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + seq_printf(m, "%d\n", memcg->oom_group); + + return 0; +} + +static ssize_t memory_oom_group_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, oom_group; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtoint(buf, 0, &oom_group); + if (ret) + return ret; + + if (oom_group != 0 && oom_group != 1) + return -EINVAL; + + memcg->oom_group = oom_group; + + return nbytes; +} + #ifdef CONFIG_CGROUP_WRITEBACK #include @@ -4953,6 +5817,26 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { event->register_event = memsw_cgroup_usage_register_event; event->unregister_event = memsw_cgroup_usage_unregister_event; +#ifdef CONFIG_MEMSLI + } else if (!strcmp(name, "memory.direct_reclaim_global_latency")) { + event->register_event = register_event_global_direct_reclaim; + event->unregister_event = unregister_event_global_direct_reclaim; + } else if (!strcmp(name, "memory.direct_reclaim_memcg_latency")) { + event->register_event = register_event_memcg_direct_reclaim; + event->unregister_event = unregister_event_memcg_direct_reclaim; + } else if (!strcmp(name, "memory.direct_compact_latency")) { + event->register_event = register_event_direct_compact; + event->unregister_event = unregister_event_direct_compact; + } else if (!strcmp(name, "memory.direct_swapout_global_latency")) { + event->register_event = register_event_global_direct_swapout; + event->unregister_event = unregister_event_global_direct_swapout; + } else if (!strcmp(name, "memory.direct_swapout_memcg_latency")) { + event->register_event = register_event_memcg_direct_swapout; + event->unregister_event = unregister_event_memcg_direct_swapout; + } else if (!strcmp(name, "memory.direct_swapin_latency")) { + event->register_event = register_event_direct_swapin; + event->unregister_event = unregister_event_direct_swapin; +#endif } else { ret = -EINVAL; goto out_put_cfile; @@ -4999,21 +5883,127 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, out_kfree: kfree(event); - return ret; + return ret; +} + +static u64 memcg_reap_background_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return mem_cgroup_from_css(css)->reap_background; +} + +extern void memcg_reap_background_set(void); +extern void memcg_reap_background_clear(void); +static int memcg_reap_background_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *iter, *memcg = mem_cgroup_from_css(css); + + /* Only 0 and 1 are allowed */ + if (val > 1) + return -EINVAL; + + for_each_mem_cgroup_tree(iter, memcg) + iter->reap_background = val; + + if (val) + memcg_reap_background_set(); + else if (mem_cgroup_is_root(memcg)) + memcg_reap_background_clear(); + + return 0; +} + +#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) +static int mem_cgroup_slab_show(struct seq_file *m, void *p) +{ + /* + * Deprecated. + * Please, take a look at tools/cgroup/memcg_slabinfo.py . + */ + return 0; +} +#endif + +static int memory_stat_show(struct seq_file *m, void *v); + +#ifdef CONFIG_PAGECACHE_LIMIT +static u64 mem_cgroup_allow_pgcache_limit_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return READ_ONCE(memcg->allow_pgcache_limit); +} + +static int mem_cgroup_allow_pgcache_limit_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + + memcg->allow_pgcache_limit = val; + + return 0; +} + +static u64 mem_cgroup_pgcache_limit_size_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + unsigned long size; + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + size = READ_ONCE(memcg->pgcache_limit_size); + + return size; +} + +static ssize_t mem_cgroup_pgcache_limit_size_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + struct page_counter *counter = &memcg->memory; + unsigned long size, max = counter->max * PAGE_SIZE; + + buf = strstrip(buf); + size = (unsigned long)memparse(buf, NULL); + if (size > max) + memcg->pgcache_limit_size = max; + else + memcg->pgcache_limit_size = size; + + return nbytes; +} + +static u64 mem_cgroup_allow_pgcache_sync_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return READ_ONCE(memcg->pgcache_limit_sync); } -#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) -static int mem_cgroup_slab_show(struct seq_file *m, void *p) +static int mem_cgroup_allow_pgcache_sync_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) { - /* - * Deprecated. - * Please, take a look at tools/cgroup/memcg_slabinfo.py . - */ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + if (memcg->pgcache_limit_sync == val) + return 0; + + if (val) + memcg->pgcache_limit_sync = PGCACHE_RECLAIM_DIRECT; + else + memcg->pgcache_limit_sync = PGCACHE_RECLAIM_ASYNC; + return 0; } -#endif - -static int memory_stat_show(struct seq_file *m, void *v); +#endif /* CONFIG_PAGECACHE_LIMIT */ static struct cftype mem_cgroup_legacy_files[] = { { @@ -5049,6 +6039,78 @@ static struct cftype mem_cgroup_legacy_files[] = { .name = "stat", .seq_show = memory_stat_show, }, +#ifdef CONFIG_MEMSLI + { + .name = "direct_reclaim_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_reclaim_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_compact_latency", + .private = MEM_LAT_DIRECT_COMPACT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapin_latency", + .private = MEM_LAT_DIRECT_SWAPIN, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, +#endif /* CONFIG_MEMSLI */ + { + .name = "exstat", + .seq_show = memcg_exstat_show, + }, + { + .name = "wmark_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_ratio_show, + .write = memory_wmark_ratio_write, + }, + { + .name = "wmark_high", + .flags = CFTYPE_NOT_ON_ROOT, + .private = MEMFILE_PRIVATE(_MEM, WMARK_HIGH_LIMIT), + .read_u64 = mem_cgroup_read_u64, + }, + { + .name = "wmark_low", + .flags = CFTYPE_NOT_ON_ROOT, + .private = MEMFILE_PRIVATE(_MEM, WMARK_LOW_LIMIT), + .read_u64 = mem_cgroup_read_u64, + }, + { + .name = "wmark_scale_factor", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_scale_factor_show, + .write = memory_wmark_scale_factor_write, + }, + { + .name = "wmark_min_adj", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_min_adj_show, + .write = memory_wmark_min_adj_write, + }, { .name = "force_empty", .write = mem_cgroup_force_empty_write, @@ -5058,6 +6120,11 @@ static struct cftype mem_cgroup_legacy_files[] = { .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, + { + .name = "use_priority_oom", + .write_u64 = mem_cgroup_priority_oom_write, + .read_u64 = mem_cgroup_priority_oom_read, + }, { .name = "cgroup.event_control", /* XXX: for compat */ .write = memcg_write_event_control, @@ -5068,6 +6135,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .read_u64 = mem_cgroup_swappiness_read, .write_u64 = mem_cgroup_swappiness_write, }, + { + .name = "priority", + .read_u64 = mem_cgroup_priority_read, + .write_u64 = mem_cgroup_priority_write, + .flags = CFTYPE_NOT_ON_ROOT, + }, { .name = "move_charge_at_immigrate", .read_u64 = mem_cgroup_move_charge_read, @@ -5078,6 +6151,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .seq_show = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, }, + { + .name = "oom.group", + .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, + .seq_show = memory_oom_group_show, + .write = memory_oom_group_write, + }, { .name = "pressure_level", .seq_show = mem_cgroup_dummy_seq_show, @@ -5141,6 +6220,35 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = mem_cgroup_reset, .read_u64 = mem_cgroup_read_u64, }, +#ifdef CONFIG_ASYNC_FORK + { + .name = "async_fork", + .read_u64 = mem_cgroup_async_fork_read, + .write_u64 = mem_cgroup_async_fork_write, + }, +#endif + { + .name = "reap_background", + .read_u64 = memcg_reap_background_read, + .write_u64 = memcg_reap_background_write, + }, +#ifdef CONFIG_PAGECACHE_LIMIT + { + .name = "pagecache_limit.enable", + .read_u64 = mem_cgroup_allow_pgcache_limit_read, + .write_u64 = mem_cgroup_allow_pgcache_limit_write, + }, + { + .name = "pagecache_limit.size", + .read_u64 = mem_cgroup_pgcache_limit_size_read, + .write = mem_cgroup_pgcache_limit_size_write, + }, + { + .name = "pagecache_limit.sync", + .read_u64 = mem_cgroup_allow_pgcache_sync_read, + .write_u64 = mem_cgroup_allow_pgcache_sync_write, + }, +#endif { }, /* terminate */ }; @@ -5293,6 +6401,10 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) free_mem_cgroup_per_node_info(memcg, node); kfree(memcg->vmstats); free_percpu(memcg->vmstats_percpu); + free_percpu(memcg->exstat_cpu); +#ifdef CONFIG_MEMSLI + free_percpu(memcg->lat_stat_cpu); +#endif kfree(memcg); } @@ -5329,6 +6441,20 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (!memcg->vmstats_percpu) goto fail; +#ifdef CONFIG_MEMSLI + memcg->lat_stat_cpu = alloc_percpu_gfp(struct mem_cgroup_lat_stat_cpu, + GFP_KERNEL_ACCOUNT); + if (!memcg->lat_stat_cpu) + goto fail; + for (i = 0; i < MEM_LAT_NR_STAT; i++) + INIT_LIST_HEAD(&memcg->lat_stat_notify[i]); + mutex_init(&memcg->lat_stat_notify_lock); +#endif + + memcg->exstat_cpu = alloc_percpu(struct mem_cgroup_exstat_cpu); + if (!memcg->exstat_cpu) + goto fail; + for_each_node(node) if (alloc_mem_cgroup_per_node_info(memcg, node)) goto fail; @@ -5337,6 +6463,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) goto fail; INIT_WORK(&memcg->high_work, high_work_func); + INIT_WORK(&memcg->wmark_work, wmark_work_func); +#ifdef CONFIG_PAGECACHE_LIMIT + INIT_WORK(&memcg->pgcache_limit_work, memcg_pgcache_limit_work_func); +#endif INIT_LIST_HEAD(&memcg->oom_notify); mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); @@ -5388,7 +6518,20 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); - + WRITE_ONCE(memcg->wmark_ratio, READ_ONCE(parent->wmark_ratio)); + WRITE_ONCE(memcg->wmark_min_adj, READ_ONCE(parent->wmark_min_adj)); + WRITE_ONCE(memcg->wmark_min_eadj, READ_ONCE(parent->wmark_min_eadj)); + memcg->reap_background = parent->reap_background; + /* Default gap is 0.5% max limit */ + memcg->wmark_scale_factor = parent->wmark_scale_factor ? + : 50; +#ifdef CONFIG_ASYNC_FORK + memcg->async_fork = parent->async_fork; +#endif +#ifdef CONFIG_PAGECACHE_LIMIT + memcg->allow_pgcache_limit = parent->allow_pgcache_limit; + memcg->pgcache_limit_sync = parent->pgcache_limit_sync; +#endif page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->swap, &parent->swap); page_counter_init(&memcg->kmem, &parent->kmem); @@ -5400,9 +6543,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->tcpmem, NULL); + /* initializing memcg wmark */ + setup_memcg_wmark(memcg); + root_mem_cgroup = memcg; return &memcg->css; } + setup_memcg_wmark(memcg); if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) static_branch_inc(&memcg_sockets_enabled_key); @@ -5466,6 +6613,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_event *event, *tmp; + memcg->offline_jiffies = jiffies; + /* * Unregister events and notify userspace. * Notify userspace about cgroup removing only after rmdir of cgroup @@ -5481,6 +6630,9 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); + memcg_offline_kmem(memcg); reparent_shrinker_deferred(memcg); wb_memcg_offline(memcg); @@ -5521,6 +6673,10 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); + cancel_work_sync(&memcg->wmark_work); +#ifdef CONFIG_PAGECACHE_LIMIT + cancel_work_sync(&memcg->pgcache_limit_work); +#endif mem_cgroup_remove_from_trees(memcg); free_shrinker_info(memcg); mem_cgroup_free(memcg); @@ -5549,6 +6705,8 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); @@ -6567,10 +7725,41 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, break; } + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + memcg_wb_domain_size_changed(memcg); return nbytes; } +static int memory_wmark_low_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long wmark_low = READ_ONCE(memcg->memory.wmark_low); + + if (wmark_low == PAGE_COUNTER_MAX) + seq_puts(m, "max\n"); + else + seq_printf(m, "%llu\n", (u64)wmark_low * PAGE_SIZE); + + return 0; +} + +static int memory_wmark_high_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long wmark_high = READ_ONCE(memcg->memory.wmark_high); + + if (wmark_high == PAGE_COUNTER_MAX) + seq_puts(m, "max\n"); + else + seq_printf(m, "%llu\n", (u64)wmark_high * PAGE_SIZE); + + return 0; +} + static int memory_max_show(struct seq_file *m, void *v) { return seq_puts_memcg_tunable(m, @@ -6620,6 +7809,11 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, break; } + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + memcg_wb_domain_size_changed(memcg); return nbytes; } @@ -6704,37 +7898,6 @@ static int memory_numa_stat_show(struct seq_file *m, void *v) } #endif -static int memory_oom_group_show(struct seq_file *m, void *v) -{ - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); - - seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); - - return 0; -} - -static ssize_t memory_oom_group_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - int ret, oom_group; - - buf = strstrip(buf); - if (!buf) - return -EINVAL; - - ret = kstrtoint(buf, 0, &oom_group); - if (ret) - return ret; - - if (oom_group != 0 && oom_group != 1) - return -EINVAL; - - WRITE_ONCE(memcg->oom_group, oom_group); - - return nbytes; -} - static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -6812,6 +7975,87 @@ static struct cftype memory_files[] = { .seq_show = memory_max_show, .write = memory_max_write, }, + { + .name = "wmark_min_adj", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_min_adj_show, + .write = memory_wmark_min_adj_write, + }, + { + .name = "priority", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = mem_cgroup_priority_read, + .write_u64 = mem_cgroup_priority_write, + }, + { + .name = "use_priority_oom", + .write_u64 = mem_cgroup_priority_oom_write, + .read_u64 = mem_cgroup_priority_oom_read, + }, + { + .name = "wmark_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_ratio_show, + .write = memory_wmark_ratio_write, + }, + { + .name = "wmark_high", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_high_show, + }, + { + .name = "wmark_low", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_low_show, + }, + { + .name = "wmark_scale_factor", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_scale_factor_show, + .write = memory_wmark_scale_factor_write, + }, +#ifdef CONFIG_MEMSLI + { + .name = "direct_reclaim_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_reclaim_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_compact_latency", + .private = MEM_LAT_DIRECT_COMPACT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapin_latency", + .private = MEM_LAT_DIRECT_SWAPIN, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, +#endif /* CONFIG_MEMSLI */ + { + .name = "exstat", + .seq_show = memcg_exstat_show, + }, { .name = "events", .flags = CFTYPE_NOT_ON_ROOT, @@ -6845,6 +8089,35 @@ static struct cftype memory_files[] = { .flags = CFTYPE_NS_DELEGATABLE, .write = memory_reclaim, }, +#ifdef CONFIG_ASYNC_FORK + { + .name = "async_fork", + .read_u64 = mem_cgroup_async_fork_read, + .write_u64 = mem_cgroup_async_fork_write, + }, +#endif + { + .name = "reap_background", + .read_u64 = memcg_reap_background_read, + .write_u64 = memcg_reap_background_write, + }, +#ifdef CONFIG_PAGECACHE_LIMIT + { + .name = "pagecache_limit.enable", + .read_u64 = mem_cgroup_allow_pgcache_limit_read, + .write_u64 = mem_cgroup_allow_pgcache_limit_write, + }, + { + .name = "pagecache_limit.size", + .read_u64 = mem_cgroup_pgcache_limit_size_read, + .write = mem_cgroup_pgcache_limit_size_write, + }, + { + .name = "pagecache_limit.sync", + .read_u64 = mem_cgroup_allow_pgcache_sync_read, + .write_u64 = mem_cgroup_allow_pgcache_sync_write, + }, +#endif { } /* terminate */ }; @@ -7106,14 +8379,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, /* * mem_cgroup_swapin_uncharge_swap - uncharge swap slot - * @entry: swap entry for which the page is charged + * @entry: the first swap entry for which the pages are charged + * @nr_pages: number of pages which will be uncharged * * Call this function after successfully adding the charged page to swapcache. * * Note: This function assumes the page for which swap slot is being uncharged * is order 0 page. */ -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { /* * Cgroup1's unified memory+swap counter has been charged with the @@ -7133,7 +8407,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) * let's not wait for it. The page already received a * memory+swap charge, drop the swap entry duplicate. */ - mem_cgroup_uncharge_swap(entry, 1); + mem_cgroup_uncharge_swap(entry, nr_pages); } } @@ -7416,6 +8690,70 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +#ifdef CONFIG_CGROUP_WRITEBACK +bool cgwb_v1; + +static int __init enable_cgroup_writeback_v1(char *s) +{ + cgwb_v1 = true; + + return 0; +} +__setup("cgwb_v1", enable_cgroup_writeback_v1); +#endif + +#ifdef CONFIG_MEMSLI +static int memsli_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cgroup_memory_nosli)); + return 0; +} + +static int memsli_enabled_open(struct inode *inode, struct file *file) +{ + return single_open(file, memsli_enabled_show, NULL); +} + +static ssize_t memsli_enabled_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cgroup_memory_nosli); + break; + case '1': + static_branch_disable(&cgroup_memory_nosli); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops memsli_enabled_proc_ops = { + .proc_open = memsli_enabled_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = memsli_enabled_write, + .proc_release = single_release, +}; +#endif /* CONFIG_MEMSLI */ + /* * subsys_initcall() for memory controller. * @@ -7427,6 +8765,10 @@ __setup("cgroup.memory=", cgroup_memory); static int __init mem_cgroup_init(void) { int cpu, node; +#ifdef CONFIG_MEMSLI + proc_mkdir("memsli", NULL); + proc_create("memsli/enabled", 0600, NULL, &memsli_enabled_proc_ops); +#endif /* CONFIG_MEMSLI */ /* * Currently s32 type (can refer to struct batched_lruvec_stat) is @@ -7436,6 +8778,22 @@ static int __init mem_cgroup_init(void) */ BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); + memcg_wmark_wq = alloc_workqueue("memcg_wmark", WQ_MEM_RECLAIM | + WQ_UNBOUND | WQ_FREEZABLE, + WQ_UNBOUND_MAX_ACTIVE); + + if (!memcg_wmark_wq) + return -ENOMEM; +#ifdef CONFIG_PAGECACHE_LIMIT + memcg_pgcache_limit_wq = alloc_workqueue("memcg_pgcache_limit", + WQ_FREEZABLE | + WQ_UNBOUND | WQ_MEM_RECLAIM, + WQ_UNBOUND_MAX_ACTIVE); + + if (!memcg_pgcache_limit_wq) + return -ENOMEM; +#endif + cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); @@ -7965,4 +9323,113 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +#endif /* CONFIG_MEMCG_SWAP */ + +#ifdef CONFIG_RICH_CONTAINER +static inline struct mem_cgroup *css_memcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +/* with rcu lock held */ +struct mem_cgroup *rich_container_get_memcg(void) +{ + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg_src; + + if (sysctl_rich_container_source == 1) + css = NULL; + else + css = task_css(current, memory_cgrp_id); + + if (css) { + memcg_src = css_memcg(css); + } else { + read_lock(&tasklist_lock); + memcg_src = mem_cgroup_from_task(task_active_pid_ns(current)->child_reaper); + read_unlock(&tasklist_lock); + } + + if (css_tryget(&memcg_src->css)) + return memcg_src; + else + return NULL; +} + +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ + struct mem_cgroup *iter; + unsigned long limit, memsw_limit, usage, totalram_pages_tmp; + unsigned long pagecache, memcg_wmark, swap_size; + int i; + + ext->file_dirty = memcg_page_state(memcg, NR_FILE_DIRTY); + ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); + ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); + ext->file_mapped = memcg_page_state(memcg, NR_FILE_MAPPED); + ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; + ext->slab_unreclaimable = + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; + ext->kernel_stack_kb = memcg_page_state(memcg, NR_KERNEL_STACK_KB); + ext->writeback_temp = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ext->anon_thps = memcg_page_state(memcg, NR_ANON_THPS); +#endif + ext->shmem_thps = 0; + ext->shmem_pmd_mapped = 0; + + swap_size = memcg_page_state(memcg, MEMCG_SWAP); + limit = memsw_limit = PAGE_COUNTER_MAX; + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + limit = min(limit, iter->memory.max); + memsw_limit = min(memsw_limit, iter->memsw.max); + } + usage = mem_cgroup_usage(memcg, false); + totalram_pages_tmp = totalram_pages(); + info->totalram = limit > totalram_pages_tmp ? totalram_pages_tmp : limit; + info->sharedram = memcg_page_state(memcg, NR_SHMEM); + info->freeram = info->totalram - usage; + /* these are not accounted by memcg yet */ + /* if give bufferram the global value, free may show a quite + * large number in the ±buffers/caches row, the reason is + * it's equal to group_used - global_buffer - group_cached, + * if global_buffer > group_used, we get a rewind large value. + */ + info->bufferram = 0; + info->totalhigh = totalhigh_pages(); + info->freehigh = nr_free_highpages(); + info->mem_unit = PAGE_SIZE; + + /* fill in swinfo */ + si_swapinfo(info); + if (memsw_limit < info->totalswap) + info->totalswap = memsw_limit; + info->freeswap = info->totalswap - swap_size; + + for (i = 0; i < NR_LRU_LISTS; i++) + ext->lrupages[i] = memcg_page_state(memcg, NR_LRU_BASE + i); + + /* Like what si_mem_available() does */ + + // TODO: memcg_wmark depends on background async page reclaim, waiting + // for it. + + //memcg_wmark = memcg->memory.wmark_high; + //if (memcg->wmark_ratio && info->totalram > memcg_wmark) + // memcg_wmark = info->totalram - memcg_wmark; + //else + // memcg_wmark = 0; + memcg_wmark = 0; + + pagecache = ext->lrupages[LRU_ACTIVE_FILE] + + ext->lrupages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, memcg_wmark); + ext->available = info->freeram + pagecache; + ext->available += ext->slab_reclaimable - + min(ext->slab_reclaimable / 2, memcg_wmark); + ext->cached = usage - ext->lrupages[LRU_INACTIVE_ANON] - + ext->lrupages[LRU_ACTIVE_ANON]; +} + #endif /* CONFIG_SWAP */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9018a1162efc9d165e353353962080bf8f57c412..60b9b55821a3b0cb9fbe9cfd3a9fd8c62e93610b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -60,6 +60,7 @@ #include #include #include +#include #include "swap.h" #include "internal.h" #include "ras/ras_event.h" @@ -988,7 +989,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p, int count = page_count(p) - 1; if (extra_pins) - count -= 1; + count -= folio_nr_pages(page_folio(p)); if (count > 0) { pr_err("%#lx: %s still referenced by %d users\n", @@ -1383,6 +1384,9 @@ void ClearPageHWPoisonTakenOff(struct page *page) */ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) { + if (PageSlab(page)) + return false; + /* Soft offline could migrate non-LRU movable pages */ if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) return true; @@ -2328,8 +2332,8 @@ int memory_failure(unsigned long pfn, int flags) * We use page flags to determine what action should be taken, but * the flags can be modified by the error containment action. One * example is an mlocked page, where PG_mlocked is cleared by - * page_remove_rmap() in try_to_unmap_one(). So to determine page status - * correctly, we save a copy of the page flags at this time. + * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page + * status correctly, we save a copy of the page flags at this time. */ page_flags = p->flags; @@ -2454,10 +2458,17 @@ static void memory_failure_work_func(struct work_struct *work) raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; - if (entry.flags & MF_SOFT_OFFLINE) + if (entry.flags & MF_SOFT_OFFLINE) { soft_offline_page(entry.pfn, entry.flags); - else - memory_failure(entry.pfn, entry.flags); + } else if (!memory_failure(entry.pfn, entry.flags)) { + /* + * If the pfn reported by ghes can not be recovered, set + * the corresponding page table of linear mapping range + * to be non-present, which avoids the speculative + * access of corrupted memory. + */ + set_memory_np((unsigned long)page_to_virt(pfn_to_page(entry.pfn)), 1); + } } } diff --git a/mm/memory.c b/mm/memory.c index 742c2f65c2c85780c8dfa270c676e642f2c8a209..3a32fa031a0b787e4441f662f6e1b36da9467f0e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include @@ -691,12 +692,23 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, out: return pfn_to_page(pfn); } + +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd) +{ + struct page *page = vm_normal_page_pmd(vma, addr, pmd); + + if (page) + return page_folio(page); + return NULL; +} #endif static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { + struct folio *folio = page_folio(page); pte_t orig_pte; pte_t pte; swp_entry_t entry; @@ -712,14 +724,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); - VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); + VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && + PageAnonExclusive(page)), folio); /* * No need to take a page reference as one was already * created when the swap entry was made. */ - if (PageAnon(page)) - page_add_anon_rmap(page, vma, address, RMAP_NONE); + if (folio_test_anon(folio)) + folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); else /* * Currently device exclusive access only supports anonymous @@ -770,6 +783,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long vm_flags = dst_vma->vm_flags; pte_t orig_pte = ptep_get(src_pte); pte_t pte = orig_pte; + struct folio *folio; struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); @@ -814,6 +828,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, } } else if (is_device_private_entry(entry)) { page = pfn_swap_entry_to_page(entry); + folio = page_folio(page); /* * Update rss count even for unaddressable pages, as @@ -824,10 +839,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * for unaddressable pages, at some point. But for now * keep things as they are. */ - get_page(page); + folio_get(folio); rss[mm_counter(page)]++; /* Cannot fail as these pages cannot get pinned. */ - BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); + folio_try_dup_anon_rmap_pte(folio, page, src_vma); /* * We do not preserve soft-dirty information, because so @@ -901,7 +916,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma *prealloc = NULL; copy_user_highpage(&new_folio->page, page, addr, src_vma); __folio_mark_uptodate(new_folio); - folio_add_new_anon_rmap(new_folio, dst_vma, addr); + folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, dst_vma); rss[MM_ANONPAGES]++; @@ -915,76 +930,124 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma return 0; } +static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, + pte_t pte, unsigned long addr, int nr) +{ + struct mm_struct *src_mm = src_vma->vm_mm; + + /* If it's a COW mapping, write protect it both processes. */ + if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { + wrprotect_ptes(src_mm, addr, src_pte, nr); + pte = pte_wrprotect(pte); + } + + /* If it's a shared mapping, mark it clean in the child. */ + if (src_vma->vm_flags & VM_SHARED) + pte = pte_mkclean(pte); + pte = pte_mkold(pte); + + if (!userfaultfd_wp(dst_vma)) + pte = pte_clear_uffd_wp(pte); + + set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); +} + /* - * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page - * is required to copy this pte. + * Copy one present PTE, trying to batch-process subsequent PTEs that map + * consecutive pages of the same folio by copying them as well. + * + * Returns -EAGAIN if one preallocated page is required to copy the next PTE. + * Otherwise, returns the number of copied PTEs (at least 1). */ static inline int -copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, - pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, - struct folio **prealloc) +copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, + pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, + int max_nr, int *rss, struct folio **prealloc) { - struct mm_struct *src_mm = src_vma->vm_mm; - unsigned long vm_flags = src_vma->vm_flags; - pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; + bool any_writable; + fpb_t flags = 0; + int err, nr; page = vm_normal_page(src_vma, addr, pte); - if (page) - folio = page_folio(page); - if (page && folio_test_anon(folio)) { + if (unlikely(!page)) + goto copy_pte; + + folio = page_folio(page); + + /* + * If we likely have to copy, just don't bother with batching. Make + * sure that the common "small folio" case is as fast as possible + * by keeping the batching logic separate. + */ + if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { + if (src_vma->vm_flags & VM_SHARED) + flags |= FPB_IGNORE_DIRTY; + if (!vma_soft_dirty_enabled(src_vma)) + flags |= FPB_IGNORE_SOFT_DIRTY; + + nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, + &any_writable, NULL); + folio_ref_add(folio, nr); + if (folio_test_anon(folio)) { + if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, + nr, src_vma))) { + folio_ref_sub(folio, nr); + return -EAGAIN; + } + rss[MM_ANONPAGES] += nr; + VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); + } else { + folio_dup_file_rmap_ptes(folio, page, nr); + rss[mm_counter_file(page)] += nr; + } + if (any_writable) + pte = pte_mkwrite(pte, src_vma); + __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, + addr, nr); + return nr; + } + + folio_get(folio); + if (folio_test_anon(folio)) { /* * If this page may have been pinned by the parent process, * copy the page immediately for the child so that we'll always * guarantee the pinned page won't be randomly replaced in the * future. */ - folio_get(folio); - if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); - return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, - addr, rss, prealloc, page); + err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, + addr, rss, prealloc, page); + return err ? err : 1; } rss[MM_ANONPAGES]++; - } else if (page) { - folio_get(folio); - page_dup_file_rmap(page, false); + VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); + } else { + folio_dup_file_rmap_pte(folio, page); rss[mm_counter_file(page)]++; } - /* - * If it's a COW mapping, write protect it both - * in the parent and the child - */ - if (is_cow_mapping(vm_flags) && pte_write(pte)) { - ptep_set_wrprotect(src_mm, addr, src_pte); - pte = pte_wrprotect(pte); - } - VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page)); - - /* - * If it's a shared mapping, mark it clean in - * the child - */ - if (vm_flags & VM_SHARED) - pte = pte_mkclean(pte); - pte = pte_mkold(pte); - - if (!userfaultfd_wp(dst_vma)) - pte = pte_clear_uffd_wp(pte); - - set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); - return 0; +copy_pte: + __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); + return 1; } -static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, - struct vm_area_struct *vma, unsigned long addr) +static inline struct folio *folio_prealloc(struct mm_struct *src_mm, + struct vm_area_struct *vma, unsigned long addr, bool need_zero) { struct folio *new_folio; - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (need_zero) + new_folio = vma_alloc_zeroed_movable_folio(vma, addr); + else + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, + addr, false); + if (!new_folio) return NULL; @@ -1008,10 +1071,11 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *src_pte, *dst_pte; pte_t ptent; spinlock_t *src_ptl, *dst_ptl; - int progress, ret = 0; + int progress, max_nr, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; struct folio *prealloc = NULL; + int nr; again: progress = 0; @@ -1042,6 +1106,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, arch_enter_lazy_mmu_mode(); do { + nr = 1; + /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. @@ -1071,6 +1137,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, progress += 8; continue; } + ptent = ptep_get(src_pte); + VM_WARN_ON_ONCE(!pte_present(ptent)); /* * Device exclusive entry restored, continue by copying @@ -1078,9 +1146,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, */ WARN_ON_ONCE(ret != -ENOENT); } - /* copy_present_pte() will clear `*prealloc' if consumed */ - ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, - addr, rss, &prealloc); + /* copy_present_ptes() will clear `*prealloc' if consumed */ + max_nr = (end - addr) / PAGE_SIZE; + ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, + ptent, addr, max_nr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. @@ -1097,8 +1166,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, folio_put(prealloc); prealloc = NULL; } - progress += 8; - } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + nr = ret; + progress += 8 * nr; + } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, + addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(orig_src_pte, src_ptl); @@ -1116,10 +1187,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, } else if (ret == -EBUSY) { goto out; } else if (ret == -EAGAIN) { - prealloc = page_copy_prealloc(src_mm, src_vma, addr); + prealloc = folio_prealloc(src_mm, src_vma, addr, false); if (!prealloc) return -ENOMEM; - } else if (ret) { + } else if (ret < 0) { VM_WARN_ON_ONCE(1); } @@ -1349,19 +1420,16 @@ static inline bool should_zap_cows(struct zap_details *details) return details->even_cows; } -/* Decides whether we should zap this page with the page pointer specified */ -static inline bool should_zap_page(struct zap_details *details, struct page *page) +/* Decides whether we should zap this folio with the folio pointer specified */ +static inline bool should_zap_folio(struct zap_details *details, + struct folio *folio) { - /* If we can make a decision without *page.. */ + /* If we can make a decision without *folio.. */ if (should_zap_cows(details)) return true; - /* E.g. the caller passes NULL for the case of a zero page */ - if (!page) - return true; - - /* Otherwise we should only zap non-anon pages */ - return !PageAnon(page); + /* Otherwise we should only zap non-anon folios */ + return !folio_test_anon(folio); } static inline bool zap_drop_file_uffd_wp(struct zap_details *details) @@ -1378,7 +1446,7 @@ static inline bool zap_drop_file_uffd_wp(struct zap_details *details) */ static inline void zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte, + unsigned long addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval) { /* Zap on anonymous always means dropping everything */ @@ -1388,7 +1456,113 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, if (zap_drop_file_uffd_wp(details)) return; - pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); + for (;;) { + /* the PFN in the PTE is irrelevant. */ + pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); + if (--nr == 0) + break; + pte++; + addr += PAGE_SIZE; + } +} + +static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, + struct vm_area_struct *vma, struct folio *folio, + struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, + unsigned long addr, struct zap_details *details, int *rss, + bool *force_flush, bool *force_break) +{ + struct mm_struct *mm = tlb->mm; + bool delay_rmap = false; + + if (!folio_test_anon(folio)) { + ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); + if (pte_dirty(ptent)) { + folio_mark_dirty(folio); + if (tlb_delay_rmap(tlb)) { + delay_rmap = true; + *force_flush = true; + } + } + if (pte_young(ptent) && likely(vma_has_recency(vma))) + folio_mark_accessed(folio); + rss[mm_counter(page)] -= nr; + } else { + /* We don't need up-to-date accessed/dirty bits. */ + clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); + rss[MM_ANONPAGES] -= nr; + } + /* Checking a single PTE in a batch is sufficient. */ + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entries(tlb, pte, nr, addr); + if (unlikely(userfaultfd_pte_wp(vma, ptent))) + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, + ptent); + + if (!delay_rmap) { + folio_remove_rmap_ptes(folio, page, nr, vma); + + /* Only sanity-check the first page in a batch. */ + if (unlikely(page_mapcount(page) < 0)) + print_bad_pte(vma, addr, ptent, page); + } + if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { + *force_flush = true; + *force_break = true; + } +} + +/* + * Zap or skip at least one present PTE, trying to batch-process subsequent + * PTEs that map consecutive pages of the same folio. + * + * Returns the number of processed (skipped or zapped) PTEs (at least 1). + */ +static inline int zap_present_ptes(struct mmu_gather *tlb, + struct vm_area_struct *vma, pte_t *pte, pte_t ptent, + unsigned int max_nr, unsigned long addr, + struct zap_details *details, int *rss, bool *force_flush, + bool *force_break) +{ + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; + struct mm_struct *mm = tlb->mm; + struct folio *folio; + struct page *page; + int nr; + + page = vm_normal_page(vma, addr, ptent); + if (!page) { + /* We don't need up-to-date accessed/dirty bits. */ + ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + if (userfaultfd_pte_wp(vma, ptent)) + zap_install_uffd_wp_if_needed(vma, addr, pte, 1, + details, ptent); + ksm_might_unmap_zero_page(mm, ptent); + return 1; + } + + folio = page_folio(page); + if (unlikely(!should_zap_folio(details, folio))) + return 1; + + /* + * Make sure that the common "small folio" case is as fast as possible + * by keeping the batching logic separate. + */ + if (unlikely(folio_test_large(folio) && max_nr != 1)) { + nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, + NULL, NULL); + + zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, + addr, details, rss, force_flush, + force_break); + return nr; + } + zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, + details, rss, force_flush, force_break); + return 1; } static unsigned long zap_pte_range(struct mmu_gather *tlb, @@ -1396,13 +1570,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, struct zap_details *details) { + bool force_flush = false, force_break = false; struct mm_struct *mm = tlb->mm; - int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; swp_entry_t entry; + int nr; tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); @@ -1414,8 +1589,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); + struct folio *folio; struct page *page; + int max_nr; + nr = 1; if (pte_none(ptent)) continue; @@ -1423,43 +1601,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, break; if (pte_present(ptent)) { - unsigned int delay_rmap; - - page = vm_normal_page(vma, addr, ptent); - if (unlikely(!should_zap_page(details, page))) - continue; - ptent = ptep_get_and_clear_full(mm, addr, pte, - tlb->fullmm); - arch_check_zapped_pte(vma, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); - zap_install_uffd_wp_if_needed(vma, addr, pte, details, - ptent); - if (unlikely(!page)) { - ksm_might_unmap_zero_page(mm, ptent); - continue; - } - - delay_rmap = 0; - if (!PageAnon(page)) { - if (pte_dirty(ptent)) { - set_page_dirty(page); - if (tlb_delay_rmap(tlb)) { - delay_rmap = 1; - force_flush = 1; - } - } - if (pte_young(ptent) && likely(vma_has_recency(vma))) - mark_page_accessed(page); - } - rss[mm_counter(page)]--; - if (!delay_rmap) { - page_remove_rmap(page, vma, false); - if (unlikely(page_mapcount(page) < 0)) - print_bad_pte(vma, addr, ptent, page); - } - if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { - force_flush = 1; - addr += PAGE_SIZE; + max_nr = (end - addr) / PAGE_SIZE; + nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, + addr, details, rss, &force_flush, + &force_break); + if (unlikely(force_break)) { + addr += nr * PAGE_SIZE; break; } continue; @@ -1469,7 +1616,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (is_device_private_entry(entry) || is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); - if (unlikely(!should_zap_page(details, page))) + folio = page_folio(page); + if (unlikely(!should_zap_folio(details, folio))) continue; /* * Both device private/exclusive mappings should only @@ -1480,20 +1628,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(page)]--; if (is_device_private_entry(entry)) - page_remove_rmap(page, vma, false); - put_page(page); + folio_remove_rmap_pte(folio, page, vma); + folio_put(folio); } else if (!non_swap_entry(entry)) { - /* Genuine swap entry, hence a private anon page */ + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) continue; - rss[MM_SWAPENTS]--; - if (unlikely(!free_swap_and_cache(entry))) - print_bad_pte(vma, addr, ptent, NULL); + rss[MM_SWAPENTS] -= nr; + free_swap_and_cache_nr(entry, nr); } else if (is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); - if (!should_zap_page(details, page)) + folio = page_folio(page); + if (!should_zap_folio(details, folio)) continue; - rss[mm_counter(page)]--; + rss[mm_counter(&folio->page)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only @@ -1510,9 +1660,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, /* We should have covered all the swap entry types */ WARN_ON_ONCE(1); } - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); - } while (pte++, addr += PAGE_SIZE, addr != end); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); + } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); @@ -1566,6 +1716,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, */ spin_unlock(ptl); } + + fixup_pmd(vma, pmd, addr); + if (pmd_none(*pmd)) { addr = next; continue; @@ -1770,6 +1923,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(&tlb); hugetlb_zap_end(vma, details); } +EXPORT_SYMBOL_GPL(zap_page_range_single); /** * zap_vma_ptes - remove ptes mapping the vma @@ -1827,21 +1981,26 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, static int validate_page_before_insert(struct page *page) { - if (PageAnon(page) || PageSlab(page) || page_has_type(page)) + struct folio *folio = page_folio(page); + + if (folio_test_anon(folio) || folio_test_slab(folio) || + page_has_type(page)) return -EINVAL; - flush_dcache_page(page); + flush_dcache_folio(folio); return 0; } static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { + struct folio *folio = page_folio(page); + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ - get_page(page); + folio_get(folio); inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); + folio_add_file_rmap_pte(folio, page, vma); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } @@ -2575,6 +2734,176 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long } EXPORT_SYMBOL(vm_iomap_memory); +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static int insert_page_into_pte_locked_mkspecial(struct mm_struct *mm, pte_t *pte, + unsigned long addr, struct page *page, pgprot_t prot) +{ + /* + * The page to be inserted should be either anonymous page or file page. + * + * In general, the anonymous page used in dio should be pinned, while + * the file page used in buffer IO is either locked (read) or writeback + * (sync). On the other hand, file page used in IO metadata read (e.g., + * ext4_get_inode_loc) can be unlocked, and the buffer_head is locked + * instead. + * + * Finally, it is the caller's responsibility to ensure the validity of + * pages to be inserted, i.e., such pages are used for IO requests. + */ + if (!PageAnon(page) && !page_is_file_lru(page)) + return -EINVAL; + + flush_dcache_page(page); + + if (!pte_none(*pte)) + return -EBUSY; + set_pte_at(mm, addr, pte, pte_mkspecial(mk_pte(page, prot))); + return 0; +} + +static int insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte; + spinlock_t *ptl; + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = insert_page_into_pte_locked_mkspecial(mm, pte, addr, page, prot); + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_page_mkspecial(vma, addr, page, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); + +#ifdef pte_index +/* + * insert_pages_mkspecial() amortizes the cost of spinlock operations + * when inserting pages in a loop. Arch *must* define pte_index. + */ +static int insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num, pgprot_t prot) +{ + pmd_t *pmd = NULL; + pte_t *start_pte, *pte; + spinlock_t *pte_lock; + struct mm_struct *const mm = vma->vm_mm; + unsigned long curr_page_idx = 0; + unsigned long remaining_pages_total = *num; + unsigned long pages_to_write_in_pmd; + int ret; +more: + ret = -EFAULT; + pmd = walk_to_pmd(mm, addr); + if (!pmd) + goto out; + + pages_to_write_in_pmd = min_t(unsigned long, + remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); + + /* Allocate the PTE if necessary; takes PMD lock once only. */ + ret = -ENOMEM; + if (pte_alloc(mm, pmd)) + goto out; + + while (pages_to_write_in_pmd) { + int pte_idx = 0; + const int batch_size = min_t(int, pages_to_write_in_pmd, 8); + + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { + int err = insert_page_into_pte_locked_mkspecial(mm, pte, + addr, pages[curr_page_idx], prot); + if (unlikely(err)) { + pte_unmap_unlock(start_pte, pte_lock); + ret = err; + remaining_pages_total -= pte_idx; + goto out; + } + addr += PAGE_SIZE; + ++curr_page_idx; + } + pte_unmap_unlock(start_pte, pte_lock); + pages_to_write_in_pmd -= batch_size; + remaining_pages_total -= batch_size; + } + if (remaining_pages_total) + goto more; + ret = 0; +out: + *num = remaining_pages_total; + return ret; +} +#endif /* pte_index */ + +/* + * vm_insert_pages_mkspecial - variant of vm_insert_pages using insert_pfn. + * + * The main purpose of vm_insert_pages_mkspecial is to combine the advantages of + * vm_insert_pages (batching the pmd lock) and remap_pfn_range_notrack (skipping + * track_pfn_insert). + * + * The caller should ensure the isolation (refcounted, PG_locked, PG_writeback, etc.) + * of @pages, and account for error case where a subset of @pages are mapped. + */ +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ +#ifdef pte_index + const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; + + if (addr < vma->vm_start || end_addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_pages_mkspecial(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err = -EINVAL; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page_mkspecial(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* pte_index */ +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#else +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, @@ -3023,23 +3352,24 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline void wp_page_reuse(struct vm_fault *vmf) +static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; - struct page *page = vmf->page; pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); - VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); - /* - * Clear the pages cpupid information as the existing - * information potentially belongs to a now completely - * unrelated process. - */ - if (page) - page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); + if (folio) { + VM_BUG_ON(folio_test_anon(folio) && + !PageAnonExclusive(vmf->page)); + /* + * Clear the folio's cpupid information as the existing + * information potentially belongs to a now completely + * unrelated process. + */ + folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); + } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); @@ -3078,6 +3408,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) int page_copied = 0; struct mmu_notifier_range range; int ret; + bool pfn_is_zero; delayacct_wpcopy_start(); @@ -3086,16 +3417,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (unlikely(anon_vma_prepare(vma))) goto oom; - if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { - new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); - if (!new_folio) - goto oom; - } else { - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, - vmf->address, false); - if (!new_folio) - goto oom; + pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); + new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); + if (!new_folio) + goto oom; + if (!pfn_is_zero) { ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); if (ret) { /* @@ -3115,10 +3442,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) kmsan_copy_page_meta(&new_folio->page, vmf->page); } - if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL)) - goto oom_free_new; - folio_throttle_swaprate(new_folio, GFP_KERNEL); - __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, @@ -3160,7 +3483,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * some TLBs while the old PTE remains in others. */ ptep_clear_flush(vma, vmf->address, vmf->pte); - folio_add_new_anon_rmap(new_folio, vma, vmf->address); + folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); /* * We call the notify macro here because, when using secondary @@ -3181,10 +3504,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * threads. * * The critical issue is to order this - * page_remove_rmap with the ptp_clear_flush above. - * Those stores are ordered by (if nothing else,) + * folio_remove_rmap_pte() with the ptp_clear_flush + * above. Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative - * in page_remove_rmap. + * in folio_remove_rmap_pte(); * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the @@ -3193,7 +3516,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ - page_remove_rmap(vmf->page, vma, false); + folio_remove_rmap_pte(old_folio, vmf->page, vma); } /* Free the old page.. */ @@ -3217,8 +3540,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) delayacct_wpcopy_end(); return 0; -oom_free_new: - folio_put(new_folio); oom: if (old_folio) folio_put(old_folio); @@ -3232,6 +3553,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * writeable once the page is prepared * * @vmf: structure describing the fault + * @folio: the folio of vmf->page * * This function handles all that is needed to finish a write page fault in a * shared mapping due to PTE being read-only once the mapped page is prepared. @@ -3243,7 +3565,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) +static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, @@ -3259,7 +3581,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; } @@ -3284,9 +3606,9 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; - return finish_mkwrite_fault(vmf); + return finish_mkwrite_fault(vmf, NULL); } - wp_page_reuse(vmf); + wp_page_reuse(vmf, NULL); return 0; } @@ -3314,14 +3636,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) folio_put(folio); return tmp; } - tmp = finish_mkwrite_fault(vmf); + tmp = finish_mkwrite_fault(vmf, folio); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { folio_unlock(folio); folio_put(folio); return tmp; } } else { - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); @@ -3330,20 +3652,58 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) return ret; } -/* - * This routine handles present pages, when - * * users try to write to a shared page (FAULT_FLAG_WRITE) - * * GUP wants to take a R/O pin on a possibly shared anonymous page - * (FAULT_FLAG_UNSHARE) - * - * It is done by copying the page to a new address and decrementing the - * shared-page counter for the old page. - * - * Note that this routine assumes that the protection checks have been - * done by the caller (the low-level page fault routine in most cases). - * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've - * done any necessary COW. - * +static bool wp_can_reuse_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + /* + * We have to verify under folio lock: these early checks are + * just an optimization to avoid locking the folio and freeing + * the swapcache if there is little hope that we can reuse. + * + * KSM doesn't necessarily raise the folio refcount. + */ + if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) + return false; + if (!folio_test_lru(folio)) + /* + * We cannot easily detect+handle references from + * remote LRU caches or references to LRU folios. + */ + lru_add_drain(); + if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) + return false; + if (!folio_trylock(folio)) + return false; + if (folio_test_swapcache(folio)) + folio_free_swap(folio); + if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { + folio_unlock(folio); + return false; + } + /* + * Ok, we've got the only folio reference from our mapping + * and the folio is locked, it's dark out, and we're wearing + * sunglasses. Hit it. + */ + folio_move_anon_rmap(folio, vma); + folio_unlock(folio); + return true; +} + +/* + * This routine handles present pages, when + * * users try to write to a shared page (FAULT_FLAG_WRITE) + * * GUP wants to take a R/O pin on a possibly shared anonymous page + * (FAULT_FLAG_UNSHARE) + * + * It is done by copying the page to a new address and decrementing the + * shared-page counter for the old page. + * + * Note that this routine assumes that the protection checks have been + * done by the caller (the low-level page fault routine in most cases). + * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've + * done any necessary COW. + * * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even * though the page will change only once the write actually happens. This * avoids a few races, and potentially makes it more efficient. @@ -3399,56 +3759,22 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) /* * Private mapping: create an exclusive anonymous page copy if reuse * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. + * + * If we encounter a page that is marked exclusive, we must reuse + * the page without further checks. */ - if (folio && folio_test_anon(folio)) { - /* - * If the page is exclusive to this process we must reuse the - * page without further checks. - */ - if (PageAnonExclusive(vmf->page)) - goto reuse; - - /* - * We have to verify under folio lock: these early checks are - * just an optimization to avoid locking the folio and freeing - * the swapcache if there is little hope that we can reuse. - * - * KSM doesn't necessarily raise the folio refcount. - */ - if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) - goto copy; - if (!folio_test_lru(folio)) - /* - * We cannot easily detect+handle references from - * remote LRU caches or references to LRU folios. - */ - lru_add_drain(); - if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) - goto copy; - if (!folio_trylock(folio)) - goto copy; - if (folio_test_swapcache(folio)) - folio_free_swap(folio); - if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { - folio_unlock(folio); - goto copy; - } - /* - * Ok, we've got the only folio reference from our mapping - * and the folio is locked, it's dark out, and we're wearing - * sunglasses. Hit it. - */ - page_move_anon_rmap(vmf->page, vma); - folio_unlock(folio); -reuse: + if (folio && folio_test_anon(folio) && + (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { + if (!PageAnonExclusive(vmf->page)) + SetPageAnonExclusive(vmf->page); if (unlikely(unshare)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; } -copy: + if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) { pte_unmap_unlock(vmf->pte, vmf->ptl); vma_end_read(vmf->vma); @@ -3658,7 +3984,7 @@ static inline bool should_try_to_free_swap(struct folio *folio, * reference only in case it's likely that we'll be the exlusive user. */ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && - folio_ref_count(folio) == 2; + folio_ref_count(folio) == (1 + folio_nr_pages(folio)); } static vm_fault_t pte_marker_clear(struct vm_fault *vmf) @@ -3728,6 +4054,192 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +static struct folio *__alloc_swap_folio(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct folio *folio; + swp_entry_t entry; + + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, + vmf->address, false); + if (!folio) + return NULL; + + entry = pte_to_swp_entry(vmf->orig_pte); + if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, + GFP_KERNEL, entry)) { + folio_put(folio); + return NULL; + } + + return folio; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) +{ + struct swap_info_struct *si = swp_swap_info(entry); + pgoff_t offset = swp_offset(entry); + int i; + + /* + * While allocating a large folio and doing swap_read_folio, which is + * the case the being faulted pte doesn't have swapcache. We need to + * ensure all PTEs have no cache as well, otherwise, we might go to + * swap devices while the content is in swapcache. + */ + for (i = 0; i < max_nr; i++) { + if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) + return i; + } + + return i; +} + +/* + * Check if the PTEs within a range are contiguous swap entries + * and have consistent swapcache, zeromap. + */ +static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) +{ + unsigned long addr; + swp_entry_t entry; + int idx; + pte_t pte; + + addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); + idx = (vmf->address - addr) / PAGE_SIZE; + pte = ptep_get(ptep); + + if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) + return false; + entry = pte_to_swp_entry(pte); + if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) + return false; + + /* + * swap_read_folio() can't handle the case a large folio is hybridly + * from different backends. And they are likely corner cases. Similar + * things might be added once zswap support large folios. + */ + if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages)) + return false; + + return true; +} + +static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset, + unsigned long addr, + unsigned long orders) +{ + int order, nr; + + order = highest_order(orders); + + /* + * To swap in a THP with nr pages, we require that its first swap_offset + * is aligned with that number, as it was when the THP was swapped out. + * This helps filter out most invalid entries. + */ + while (orders) { + nr = 1 << order; + if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr) + break; + order = next_order(&orders, order); + } + + return orders; +} + +static struct folio *alloc_swap_folio(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long orders; + struct folio *folio; + unsigned long addr; + swp_entry_t entry; + spinlock_t *ptl; + pte_t *pte; + gfp_t gfp; + int order; + + /* + * If uffd is active for the vma we need per-page fault fidelity to + * maintain the uffd semantics. + */ + if (unlikely(userfaultfd_armed(vma))) + goto fallback; + + /* + * A large swapped out folio could be partially or fully in zswap. We + * lack handling for such cases, so fallback to swapping in order-0 + * folio. + */ + if (!zswap_never_enabled()) + goto fallback; + + entry = pte_to_swp_entry(vmf->orig_pte); + /* + * Get a list of all the (large) orders below PMD_ORDER that are enabled + * and suitable for swapping THP. + */ + orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true, + BIT(PMD_ORDER) - 1); + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + orders = thp_swap_suitable_orders(swp_offset(entry), + vmf->address, orders); + + if (!orders) + goto fallback; + + pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address & PMD_MASK, &ptl); + if (unlikely(!pte)) + goto fallback; + + /* + * For do_swap_page, find the highest order where the aligned range is + * completely swap entries with contiguous swap offsets. + */ + order = highest_order(orders); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) + break; + order = next_order(&orders, order); + } + + pte_unmap_unlock(pte, ptl); + + /* Try allocating the highest of the remaining orders. */ + gfp = vma_thp_gfp_mask(vma); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + folio = vma_alloc_folio(gfp, order, vma, addr, true); + if (folio) { + if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, + gfp, entry)) + return folio; + folio_put(folio); + } + order = next_order(&orders, order); + } + +fallback: + return __alloc_swap_folio(vmf); +} +#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ +static inline bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) +{ + return false; +} + +static struct folio *alloc_swap_folio(struct vm_fault *vmf) +{ + return __alloc_swap_folio(vmf); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -3749,6 +4261,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; + int nr_pages; + unsigned long page_idx; + unsigned long address; + pte_t *ptep; if (!pte_unmap_same(vmf)) goto out; @@ -3812,35 +4328,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!folio) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1) { - /* - * Prevent parallel swapin from proceeding with - * the cache flag. Otherwise, another thread may - * finish swapin first, free the entry, and swapout - * reusing the same entry. It's undetectable as - * pte_same() returns true due to entry reuse. - */ - if (swapcache_prepare(entry)) { - /* Relax a bit to prevent rapid repeated page faults */ - schedule_timeout_uninterruptible(1); - goto out; - } - need_clear_cache = true; - /* skip swapcache */ - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, - vma, vmf->address, false); + folio = alloc_swap_folio(vmf); page = &folio->page; if (folio) { __folio_set_locked(folio); __folio_set_swapbacked(folio); - if (mem_cgroup_swapin_charge_folio(folio, - vma->vm_mm, GFP_KERNEL, - entry)) { - ret = VM_FAULT_OOM; + nr_pages = folio_nr_pages(folio); + if (folio_test_large(folio)) + entry.val = ALIGN_DOWN(entry.val, nr_pages); + /* + * Prevent parallel swapin from proceeding with + * the cache flag. Otherwise, another thread + * may finish swapin first, free the entry, and + * swapout reusing the same entry. It's + * undetectable as pte_same() returns true due + * to entry reuse. + */ + if (swapcache_prepare(entry, nr_pages)) { + /* + * Relax a bit to prevent rapid + * repeated page faults. + */ + schedule_timeout_uninterruptible(1); goto out_page; } - mem_cgroup_swapin_uncharge_swap(entry); + need_clear_cache = true; + + mem_cgroup_swapin_uncharge_swap(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) @@ -3908,15 +4424,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * page->index of !PageKSM() pages would be nonlinear inside the * anon VMA -- PageKSM() is lost on actual swapout. */ - page = ksm_might_need_to_copy(page, vma, vmf->address); - if (unlikely(!page)) { + folio = ksm_might_need_to_copy(folio, vma, vmf->address); + if (unlikely(!folio)) { ret = VM_FAULT_OOM; + folio = swapcache; goto out_page; - } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) { + } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { ret = VM_FAULT_HWPOISON; + folio = swapcache; goto out_page; } - folio = page_folio(page); + if (folio != swapcache) + page = folio_page(folio, 0); /* * If we want to map a page that's in the swapcache writable, we @@ -3944,6 +4463,56 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_nomap; } + /* allocated large folios for SWP_SYNCHRONOUS_IO */ + if (folio_test_large(folio) && !folio_test_swapcache(folio)) { + unsigned long nr = folio_nr_pages(folio); + unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); + unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; + pte_t *folio_ptep = vmf->pte - idx; + pte_t folio_pte = ptep_get(folio_ptep); + + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) + goto out_nomap; + + page_idx = idx; + address = folio_start; + ptep = folio_ptep; + goto check_folio; + } + + nr_pages = 1; + page_idx = 0; + address = vmf->address; + ptep = vmf->pte; + if (folio_test_large(folio) && folio_test_swapcache(folio)) { + int nr = folio_nr_pages(folio); + unsigned long idx = folio_page_idx(folio, page); + unsigned long folio_start = address - idx * PAGE_SIZE; + unsigned long folio_end = folio_start + nr * PAGE_SIZE; + pte_t *folio_ptep; + pte_t folio_pte; + + if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) + goto check_folio; + if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) + goto check_folio; + + folio_ptep = vmf->pte - idx; + folio_pte = ptep_get(folio_ptep); + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) + goto check_folio; + + page_idx = idx; + address = folio_start; + ptep = folio_ptep; + nr_pages = nr; + entry = folio->swap; + page = &folio->page; + } + +check_folio: /* * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte * must never point at an anonymous page in the swapcache that is @@ -4003,12 +4572,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * We're already holding a reference on the page but haven't mapped it * yet. */ - swap_free(entry); + swap_free_nr(entry, nr_pages); if (should_try_to_free_swap(folio, vma, vmf->flags)) folio_free_swap(folio); - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - dec_mm_counter(vma->vm_mm, MM_SWAPENTS); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); pte = mk_pte(page, vma->vm_page_prot); /* @@ -4025,25 +4594,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } rmap_flags |= RMAP_EXCLUSIVE; } - flush_icache_page(vma, page); + folio_ref_add(folio, nr_pages - 1); + flush_icache_pages(vma, page, nr_pages); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); if (pte_swp_uffd_wp(vmf->orig_pte)) pte = pte_mkuffd_wp(pte); - vmf->orig_pte = pte; + vmf->orig_pte = pte_advance_pfn(pte, page_idx); /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { - page_add_new_anon_rmap(page, vma, vmf->address); + folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); + } else if (!folio_test_anon(folio)) { + /* + * We currently only expect small !anon folios which are either + * fully exclusive or fully shared, or new allocated large + * folios which are fully exclusive. If we ever get large + * folios within swapcache here, we have to be careful. + */ + VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio)); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + folio_add_new_anon_rmap(folio, vma, address, rmap_flags); } else { - page_add_anon_rmap(page, vma, vmf->address, rmap_flags); + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, + rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); - arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); + arch_do_swap_page_nr(vma->vm_mm, vma, address, + pte, pte, nr_pages); folio_unlock(folio); if (folio != swapcache && swapcache) { @@ -4067,14 +4649,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } /* No need to invalidate - it was non-present before */ - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); out: /* Clear the swap cache pin for direct swapin after PTL unlock */ if (need_clear_cache) - swapcache_clear(si, entry); + swapcache_clear(si, entry, nr_pages); if (si) put_swap_device(si); return ret; @@ -4090,12 +4672,98 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_put(swapcache); } if (need_clear_cache) - swapcache_clear(si, entry); + swapcache_clear(si, entry, nr_pages); if (si) put_swap_device(si); return ret; } +static bool pte_range_none(pte_t *pte, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + if (!pte_none(ptep_get_lockless(pte + i))) + return false; + } + + return true; +} + +static struct folio *alloc_anon_folio(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + unsigned long orders; + struct folio *folio; + unsigned long addr; + pte_t *pte; + gfp_t gfp; + int order; + + /* + * If uffd is active for the vma we need per-page fault fidelity to + * maintain the uffd semantics. + */ + if (unlikely(userfaultfd_armed(vma))) + goto fallback; + + /* + * Get a list of all the (large) orders below PMD_ORDER that are enabled + * for this vma. Then filter out the orders that can't be allocated over + * the faulting address and still be fully contained in the vma. + */ + orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true, + BIT(PMD_ORDER) - 1); + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + + if (!orders) + goto fallback; + + pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); + if (!pte) + return ERR_PTR(-EAGAIN); + + /* + * Find the highest order where the aligned range is completely + * pte_none(). Note that all remaining orders will be completely + * pte_none(). + */ + order = highest_order(orders); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + if (pte_range_none(pte + pte_index(addr), 1 << order)) + break; + order = next_order(&orders, order); + } + + pte_unmap(pte); + + /* Try allocating the highest of the remaining orders. */ + gfp = vma_thp_gfp_mask(vma); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + folio = vma_alloc_folio(gfp, order, vma, addr, true); + if (folio) { + if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); + folio_put(folio); + goto next; + } + folio_throttle_swaprate(folio, gfp); + clear_huge_page(&folio->page, vmf->address, 1 << order); + return folio; + } +next: + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); + order = next_order(&orders, order); + } + +fallback: +#endif + return folio_prealloc(vma->vm_mm, vma, vmf->address, true); +} + /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -4105,8 +4773,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address; struct folio *folio; vm_fault_t ret = 0; + int nr_pages = 1; pte_t entry; /* File mapping without ->vm_ops ? */ @@ -4147,13 +4817,15 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; - folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); + /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ + folio = alloc_anon_folio(vmf); + if (IS_ERR(folio)) + return 0; if (!folio) goto oom; - if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) - goto oom_free_page; - folio_throttle_swaprate(folio, GFP_KERNEL); + nr_pages = folio_nr_pages(folio); + addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); /* * The memory barrier inside __folio_mark_uptodate makes sure that @@ -4167,12 +4839,14 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry), vma); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); if (!vmf->pte) goto release; - if (vmf_pte_changed(vmf)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); + if (nr_pages == 1 && vmf_pte_changed(vmf)) { + update_mmu_tlb(vma, addr, vmf->pte); + goto release; + } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } @@ -4187,16 +4861,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) return handle_userfault(vmf, VM_UFFD_MISSING); } - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - folio_add_new_anon_rmap(folio, vma, vmf->address); + folio_ref_add(folio, nr_pages - 1); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); + count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); setpte: if (uffd_wp) entry = pte_mkuffd_wp(entry); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); + set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); /* No need to invalidate - it was non-present before */ - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -4204,8 +4880,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) release: folio_put(folio); goto unlock; -oom_free_page: - folio_put(folio); oom: return VM_FAULT_OOM; } @@ -4287,6 +4961,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; @@ -4302,12 +4977,12 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) return ret; - if (!transhuge_vma_suitable(vma, haddr)) + if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; - page = compound_head(page); - if (compound_order(page) != HPAGE_PMD_ORDER) + if (folio_order(folio) != HPAGE_PMD_ORDER) return ret; + page = &folio->page; /* * Just backoff if any subpage of a THP is corrupted otherwise @@ -4315,7 +4990,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ - if (unlikely(PageHasHWPoisoned(page))) + if (unlikely(folio_test_has_hwpoisoned(folio))) return ret; /* @@ -4339,7 +5014,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); - page_add_file_rmap(page, vma, true); + folio_add_file_rmap_pmd(folio, page, vma); /* * deposit and withdraw with pmd lock held @@ -4396,13 +5071,11 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); - folio_add_new_anon_rmap(folio, vma, addr); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); - folio_add_file_rmap_range(folio, page, nr, vma, false); + folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); @@ -4437,10 +5110,15 @@ vm_fault_t finish_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page; + struct folio *folio; vm_fault_t ret; + int type, nr_pages; + unsigned long addr = vmf->address; + bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED); /* Did we COW the page? */ - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if (is_cow) page = vmf->cow_page; else page = vmf->page; @@ -4468,22 +5146,62 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } + folio = page_folio(page); + nr_pages = folio_nr_pages(folio); + + /* + * Using per-page fault to maintain the uffd semantics, and same + * approach also applies to non-anonymous-shmem faults to avoid + * inflating the RSS of the process. + */ + if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { + nr_pages = 1; + } else if (nr_pages > 1) { + pgoff_t idx = folio_page_idx(folio, page); + /* The page offset of vmf->address within the VMA. */ + pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; + /* The index of the entry in the pagetable for fault page. */ + pgoff_t pte_off = pte_index(vmf->address); + + /* + * Fallback to per-page fault in case the folio size in page + * cache beyond the VMA limits and PMD pagetable limits. + */ + if (unlikely(vma_off < idx || + vma_off + (nr_pages - idx) > vma_pages(vma) || + pte_off < idx || + pte_off + (nr_pages - idx) > PTRS_PER_PTE)) { + nr_pages = 1; + } else { + /* Now we can set mappings for the whole large folio. */ + addr = vmf->address - idx * PAGE_SIZE; + page = &folio->page; + } + } + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + addr, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* Re-check under ptl */ - if (likely(!vmf_pte_changed(vmf))) { - struct folio *folio = page_folio(page); - - set_pte_range(vmf, folio, page, 1, vmf->address); - ret = 0; - } else { - update_mmu_tlb(vma, vmf->address, vmf->pte); + if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { + update_mmu_tlb(vma, addr, vmf->pte); + ret = VM_FAULT_NOPAGE; + goto unlock; + } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); ret = VM_FAULT_NOPAGE; + goto unlock; } + folio_ref_add(folio, nr_pages - 1); + set_pte_range(vmf, folio, page, nr_pages, addr); + type = is_cow ? MM_ANONPAGES : mm_counter_file(page); + add_mm_counter(vma->vm_mm, type, nr_pages); + ret = 0; + +unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; } @@ -4629,6 +5347,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) static vm_fault_t do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; + struct folio *folio; vm_fault_t ret; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { @@ -4639,16 +5358,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); - if (!vmf->cow_page) + folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); + if (!folio) return VM_FAULT_OOM; - if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, - GFP_KERNEL)) { - put_page(vmf->cow_page); - return VM_FAULT_OOM; - } - folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL); + vmf->cow_page = &folio->page; ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) @@ -4657,7 +5371,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) return ret; copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); - __SetPageUptodate(vmf->cow_page); + __folio_mark_uptodate(folio); ret |= finish_fault(vmf); unlock_page(vmf->page); @@ -4666,7 +5380,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) goto uncharge_out; return ret; uncharge_out: - put_page(vmf->cow_page); + folio_put(folio); return ret; } @@ -4765,10 +5479,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf) return ret; } -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) { - get_page(page); + folio_get(folio); /* Record the current PID acceesing VMA */ vma_set_access_pid_bit(vma); @@ -4779,19 +5493,74 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, *flags |= TNF_FAULT_LOCAL; } - return mpol_misplaced(page, vma, addr); + return mpol_misplaced(folio, vma, addr); +} + +static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long fault_addr, pte_t *fault_pte, + bool writable) +{ + pte_t pte, old_pte; + + old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); + pte = pte_modify(old_pte, vma->vm_page_prot); + pte = pte_mkyoung(pte); + if (writable) + pte = pte_mkwrite(pte, vma); + ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); + update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); +} + +static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + struct folio *folio, pte_t fault_pte, + bool ignore_writable, bool pte_write_upgrade) +{ + int nr = pte_pfn(fault_pte) - folio_pfn(folio); + unsigned long start, end, addr = vmf->address; + unsigned long addr_start = addr - (nr << PAGE_SHIFT); + unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); + pte_t *start_ptep; + + /* Stay within the VMA and within the page table. */ + start = max3(addr_start, pt_start, vma->vm_start); + end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, + vma->vm_end); + start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); + + /* Restore all PTEs' mapping of the large folio */ + for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { + pte_t ptent = ptep_get(start_ptep); + bool writable = false; + + if (!pte_present(ptent) || !pte_protnone(ptent)) + continue; + + if (pfn_folio(pte_pfn(ptent)) != folio) + continue; + + if (!ignore_writable) { + ptent = pte_modify(ptent, vma->vm_page_prot); + writable = pte_write(ptent); + if (!writable && pte_write_upgrade && + can_change_pte_writable(vma, addr, ptent)) + writable = true; + } + + numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); + } } static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *page = NULL; - int page_nid = NUMA_NO_NODE; - bool writable = false; + struct folio *folio = NULL; + int nid = NUMA_NO_NODE; + bool writable = false, ignore_writable = false; + bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); int last_cpupid; int target_nid; pte_t pte, old_pte; - int flags = 0; + int flags = 0, nr_pages; /* * The "pte" at this point cannot be used safely without @@ -4813,16 +5582,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * is only valid while holding the PT lock. */ writable = pte_write(pte); - if (!writable && vma_wants_manual_pte_write_upgrade(vma) && + if (!writable && pte_write_upgrade && can_change_pte_writable(vma, vmf->address, pte)) writable = true; - page = vm_normal_page(vma, vmf->address, pte); - if (!page || is_zone_device_page(page)) - goto out_map; - - /* TODO: handle PTE-mapped THP */ - if (PageCompound(page)) + folio = vm_normal_folio(vma, vmf->address, pte); + if (!folio || folio_is_zone_device(folio)) goto out_map; /* @@ -4837,36 +5602,37 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) flags |= TNF_NO_GROUP; /* - * Flag if the page is shared between multiple address spaces. This + * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) + if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; - page_nid = page_to_nid(page); + nid = folio_nid(folio); + nr_pages = folio_nr_pages(folio); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && - !node_is_toptier(page_nid)) + !node_is_toptier(nid)) last_cpupid = (-1 & LAST_CPUPID_MASK); else - last_cpupid = page_cpupid_last(page); - target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, - &flags); + last_cpupid = folio_last_cpupid(folio); + target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); if (target_nid == NUMA_NO_NODE) { - put_page(page); + folio_put(folio); goto out_map; } pte_unmap_unlock(vmf->pte, vmf->ptl); writable = false; + ignore_writable = true; /* Migrate to the requested node */ - if (migrate_misplaced_page(page, vma, target_nid)) { - page_nid = target_nid; + if (migrate_misplaced_folio(folio, vma, target_nid)) { + nid = target_nid; flags |= TNF_MIGRATED; - task_numa_fault(last_cpupid, page_nid, 1, flags); + task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } @@ -4879,22 +5645,22 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } + out_map: /* * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ - old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); - pte = pte_modify(old_pte, vma->vm_page_prot); - pte = pte_mkyoung(pte); - if (writable) - pte = pte_mkwrite(pte, vma); - ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + if (folio && folio_test_large(folio)) + numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, + pte_write_upgrade); + else + numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, + writable); pte_unmap_unlock(vmf->pte, vmf->ptl); - if (page_nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, page_nid, 1, flags); + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } @@ -5025,8 +5791,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (!vmf->pte) return do_pte_missing(vmf); - if (!pte_present(vmf->orig_pte)) - return do_swap_page(vmf); + if (!pte_present(vmf->orig_pte)) { + vm_fault_t retval; + u64 start; + + memcg_lat_stat_start(&start); + retval = do_swap_page(vmf); + if (retval & (VM_FAULT_MAJOR | VM_FAULT_OOM)) + memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); + return retval; + } if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); @@ -5100,7 +5874,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5134,7 +5908,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, goto retry_pud; if (pmd_none(*vmf.pmd) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5162,6 +5936,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return 0; } } + + fixup_pmd(vma, vmf.pmd, address); } return handle_pte_fault(&vmf); @@ -6167,3 +6943,336 @@ void ptlock_free(struct ptdesc *ptdesc) kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif + +/* Fast reflink */ +static inline bool is_pmd_tbl_wrprotect(pmd_t pmd) +{ +#if defined(CONFIG_ARM64) +#define PMD_SECT_AP_WRPROTECT (_AT(pmdval_t, 2) << 61) /* APTable[1:0] */ + return (pmd_val(pmd) & PMD_TABLE_BIT) && + (pmd_val(pmd) & PMD_SECT_AP_WRPROTECT); +#elif defined(CONFIG_X86) + return (pmd_flags(pmd) & ~_PAGE_USER) == (_KERNPG_TABLE & ~_PAGE_RW); +#else + return false; +#endif +} + +static inline void pmdp_set_tbl_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ +#if defined(CONFIG_ARM64) + set_pmd(pmdp, __pmd(pmd_val(*pmdp) | PMD_SECT_AP_WRPROTECT)); +#elif defined(CONFIG_X86) + pmdp_set_wrprotect(mm, addr, pmdp); +#endif +} + +static inline void pmdp_clear_tbl_wrprotect(pmd_t *pmdp, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_ARM64) + set_pmd(pmdp, __pmd(pmd_val(*pmdp) & ~PMD_SECT_AP_WRPROTECT)); +#elif defined(CONFIG_X86) + set_pmd(pmdp, pmd_mkwrite(*pmdp, vma)); +#endif +} + +bool is_pmd_fast_reflink(pmd_t pmd) +{ + return !is_swap_pmd(pmd) && !pmd_trans_huge(pmd) && + !pmd_devmap(pmd) && is_pmd_tbl_wrprotect(pmd); +} + +static int follow_pmd(struct mm_struct *mm, unsigned long address, + pmd_t **pmdp) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + goto out; + + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) + goto out; + + pud = pud_offset(p4d, address); + if (pud_none(*pud) || unlikely(pud_bad(*pud))) + goto out; + + pmd = pmd_offset(pud, address); + if (pmd_huge(*pmd)) + goto found; + + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + goto out; + +found: + *pmdp = pmd; + return 0; +out: + return -EINVAL; +} + +static void fr_apply_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + pte_t *start_pte; + pte_t *ptep, pte; + spinlock_t *ptl; + + start_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); + ptep = start_pte; + + do { + pte = *ptep; + if (pte_none(pte)) + continue; + + if (!pte_dirty(pte) && !pte_write(pte)) + continue; + + /* The caller is responsible for tlb flush. */ + pte = ptep_get_and_clear(vma->vm_mm, start, ptep); + pte = pte_wrprotect(pte); + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, start, ptep, pte); + } while (ptep++, start += PAGE_SIZE, start != end); + + pte_unmap_unlock(start_pte, ptl); +} + +static void fr_apply_vma(struct vm_area_struct *vma) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + unsigned long next; + spinlock_t *pml; + pmd_t *pmdp = NULL; + pmd_t pmd; + bool applied = false; + + do { + next = pmd_addr_end(start, end); + if (follow_pmd(mm, start, &pmdp)) + continue; + + pml = pmd_lock(mm, pmdp); + if (pmd_huge(*pmdp)) { +#ifdef CONFIG_FS_DAX_PMD + if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) + goto unlock_pmd; + + pmd = pmdp_invalidate(vma, start, pmdp); + pmd = pmd_wrprotect(pmd); + pmd = pmd_mkclean(pmd); + set_pmd_at(mm, start, pmdp, pmd); +unlock_pmd: +#endif + spin_unlock(pml); + continue; + } + + if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) { + spin_unlock(pml); + continue; + } + + if (IS_ALIGNED(start, PMD_SIZE) && (start + PMD_SIZE <= end)) { + pmdp_set_tbl_wrprotect(mm, start, pmdp); + flush_tlb_range(vma, start, start + PMD_SIZE); + applied = true; + spin_unlock(pml); + continue; + } else { + spin_unlock(pml); + fr_apply_pte_range(vma, pmdp, start, next); + flush_tlb_range(vma, start, next); + continue; + } + } while (start = next, start != end); + + if (applied) + vma->fast_reflink = applied; +} + +static void fast_reflink_fixup(struct work_struct *work); +int fast_reflink_apply(struct address_space *mapping, pgoff_t start, + pgoff_t end) +{ + struct vm_area_struct *vma; + + i_mmap_lock_read(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { + if (!(vma->vm_flags & VM_SHARED)) + continue; + + fr_apply_vma(vma); + } + i_mmap_unlock_read(mapping); + + if (!mapping->fast_reflink_work) { + struct fast_reflink_work *fr_work; + + fr_work = kmalloc(sizeof(*fr_work), GFP_KERNEL|__GFP_NOFAIL); + INIT_WORK(&fr_work->work, fast_reflink_fixup); + fr_work->mapping = mapping; + mapping->fast_reflink_work = fr_work; + } + schedule_work(&mapping->fast_reflink_work->work); + + return 0; +} + +static void fr_fixup_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + pte_t *start_pte; + pte_t *ptep, pte; + spinlock_t *ptl; + + start_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); + ptep = start_pte; + + /* Already fixed up */ + if (unlikely(!is_pmd_fast_reflink(*pmd))) + goto out; + + do { + pte = *ptep; + if (pte_none(pte)) + continue; + + if (!pte_dirty(pte) && !pte_write(pte)) + continue; + + /* The caller is responsible for tlb flush. */ + pte = ptep_get_and_clear(vma->vm_mm, start, ptep); + pte = pte_wrprotect(pte); + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, start, ptep, pte); + } while (ptep++, start += PAGE_SIZE, start != end); + +out: + pte_unmap_unlock(start_pte, ptl); +} + +static void fr_fixup_pmd_range(struct vm_area_struct *vma, pud_t *pud, + unsigned long start, unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + spinlock_t *pml; + + pmd = pmd_offset(pud, start); + do { + next = pmd_addr_end(start, end); + if (pmd_none(*pmd)) + continue; + + pml = pmd_lock(vma->vm_mm, pmd); + if (is_pmd_fast_reflink(*pmd)) { + spin_unlock(pml); + fr_fixup_pte_range(vma, pmd, start, next); + + pml = pmd_lock(vma->vm_mm, pmd); + if (is_pmd_fast_reflink(*pmd)) + pmdp_clear_tbl_wrprotect(pmd, vma); + } + spin_unlock(pml); + } while (pmd++, start = next, start != end); +} + +static void fr_fixup_pud_range(struct vm_area_struct *vma, p4d_t *p4d, + unsigned long start, unsigned long end) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(p4d, start); + do { + next = pud_addr_end(start, end); + if (pud_none_or_clear_bad(pud)) + continue; + fr_fixup_pmd_range(vma, pud, start, next); + } while (pud++, start = next, start != end); +} + +static void fr_fixup_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, + unsigned long start, unsigned long end) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_offset(pgd, start); + do { + next = p4d_addr_end(start, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + fr_fixup_pud_range(vma, p4d, start, next); + } while (p4d++, start = next, start != end); +} + +static void fr_fixup_page_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + pgd_t *pgd; + unsigned long next; + + pgd = pgd_offset(vma->vm_mm, start); + do { + next = pgd_addr_end(start, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + fr_fixup_p4d_range(vma, pgd, start, next); + } while (pgd++, start = next, start != end); +} + +/* The mmap_lock (read/write) of vma->vm_mm is held */ +void fast_reflink_fixup_vma(struct vm_area_struct *vma) +{ + if (!vma->fast_reflink) + return; + + fr_fixup_page_range(vma, vma->vm_start, vma->vm_end); + vma->fast_reflink = false; +#ifdef CONFIG_ARM64 + flush_tlb_range(vma, vma->vm_start, vma->vm_end); +#endif +} + +/* The mmap_lock (read) of vma->vm_mm is held */ +void fast_reflink_fixup_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr) +{ + if (!is_pmd_fast_reflink(*pmd) || !vma->fast_reflink) + return; + + addr &= PMD_MASK; + fr_fixup_page_range(vma, addr, addr + PMD_SIZE); + VM_WARN_ON_ONCE(is_pmd_fast_reflink(*pmd)); + +#ifdef CONFIG_ARM64 + flush_tlb_range(vma, addr & PMD_MASK, (addr & PMD_MASK) + PMD_SIZE); +#endif +} + +static void fast_reflink_fixup(struct work_struct *work) +{ + struct fast_reflink_work *fr_work; + struct address_space *mapping; + struct vm_area_struct *vma; + + fr_work = container_of(work, struct fast_reflink_work, work); + mapping = fr_work->mapping; + + i_mmap_lock_read(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) + fast_reflink_fixup_vma(vma); + i_mmap_unlock_read(mapping); +} diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9beed7c71a8e91391a9301f21cc3f8a926c9d753..6384603b61c13c9e900ff1fc293de21798b19356 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1205,6 +1205,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, /* reinitialise watermarks and update pcp limits */ init_per_zone_wmark_min(); + init_min_cache_kbytes(); kswapd_run(nid); kcompactd_run(nid); @@ -2014,6 +2015,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, /* reinitialise watermarks and update pcp limits */ init_per_zone_wmark_min(); + init_min_cache_kbytes(); if (!populated_zone(zone)) { zone_pcp_reset(zone); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 94c74c594d102af9b7a5e8000ddcca2c06465b63..39d703a41a5c4c900ca77b0b50e6e52686feb966 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -607,7 +607,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, * expensive, so check the estimated sharers of the folio instead. */ if ((flags & MPOL_MF_MOVE_ALL) || - (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte))) + (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) if (!isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: @@ -634,6 +634,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct mmu_gather tlb; long nr_updated; + fixup_vma(vma); + tlb_gather_mmu(&tlb, vma->vm_mm); nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); @@ -1026,11 +1028,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated sharers of the folio instead. + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { + if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, @@ -2543,24 +2544,25 @@ static void sp_free(struct sp_node *n) } /** - * mpol_misplaced - check whether current page node is valid in policy + * mpol_misplaced - check whether current folio node is valid in policy * - * @page: page to be checked - * @vma: vm area where page mapped - * @addr: virtual address where page mapped + * @folio: folio to be checked + * @vma: vm area where folio mapped + * @addr: virtual address in @vma for shared policy lookup and interleave policy * - * Lookup current policy node id for vma,addr and "compare to" page's + * Lookup current policy node id for vma,addr and "compare to" folio's * node id. Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. * * Return: NUMA_NO_NODE if the page is in a node that is valid for this - * policy, or a suitable node ID to allocate a replacement page from. + * policy, or a suitable node ID to allocate a replacement folio from. */ -int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) +int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol; struct zoneref *z; - int curnid = page_to_nid(page); + int curnid = folio_nid(folio); unsigned long pgoff; int thiscpu = raw_smp_processor_id(); int thisnid = cpu_to_node(thiscpu); @@ -2616,11 +2618,12 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long BUG(); } - /* Migrate the page towards the node whose CPU is referencing it */ + /* Migrate the folio towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { polnid = thisnid; - if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) + if (!should_numa_migrate_memory(current, folio, curnid, + thiscpu)) goto out; } diff --git a/mm/memremap.c b/mm/memremap.c index bee85560a243403006cddaa5759dc5156f746de8..19ed6855f96ffa371bec80007ee2d8082102ae05 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -485,21 +485,11 @@ void free_zone_device_page(struct page *page) __ClearPageAnonExclusive(page); /* - * When a device managed page is freed, the page->mapping field + * When a device managed page is freed, the folio->mapping field * may still contain a (stale) mapping value. For example, the - * lower bits of page->mapping may still identify the page as an - * anonymous page. Ultimately, this entire field is just stale - * and wrong, and it will cause errors if not cleared. One - * example is: - * - * migrate_vma_pages() - * migrate_vma_insert_page() - * page_add_new_anon_rmap() - * __page_set_anon_rmap() - * ...checks page->mapping, via PageAnon(page) call, - * and incorrectly concludes that the page is an - * anonymous page. Therefore, it incorrectly, - * silently fails to set up the new anon rmap. + * lower bits of folio->mapping may still identify the folio as an + * anonymous folio. Ultimately, this entire field is just stale + * and wrong, and it will cause errors if not cleared. * * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need diff --git a/mm/migrate.c b/mm/migrate.c index c5ed8caf6a406a26437bad724ef5f3405acef767..33bdbe29ba4037494ee7dd7173690a082f9e85b5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -157,8 +157,8 @@ void putback_movable_pages(struct list_head *l) list_del(&folio->lru); /* * We isolated non-lru movable folio so here we can use - * __PageMovable because LRU folio's mapping cannot have - * PAGE_MAPPING_MOVABLE. + * __folio_test_movable because LRU folio's mapping cannot + * have PAGE_MAPPING_MOVABLE. */ if (unlikely(__folio_test_movable(folio))) { VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); @@ -249,20 +249,20 @@ static bool remove_migration_pte(struct folio *folio, pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) - hugepage_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + hugetlb_add_anon_rmap(folio, vma, pvmw.address, + rmap_flags); else - page_dup_file_rmap(new, true); + hugetlb_add_file_rmap(folio); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, psize); } else #endif { if (folio_test_anon(folio)) - page_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + folio_add_anon_rmap_pte(folio, new, vma, + pvmw.address, rmap_flags); else - page_add_file_rmap(new, vma, false); + folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } if (vma->vm_flags & VM_LOCKED) @@ -415,6 +415,8 @@ int folio_migrate_mapping(struct address_space *mapping, /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); @@ -436,6 +438,8 @@ int folio_migrate_mapping(struct address_space *mapping, */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ if (folio_test_swapbacked(folio)) { __folio_set_swapbacked(newfolio); @@ -531,7 +535,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, int expected_count; xas_lock_irq(&xas); - expected_count = 2 + folio_has_private(src); + expected_count = folio_expected_refs(mapping, src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; @@ -540,11 +544,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, dst->index = src->index; dst->mapping = src->mapping; - folio_get(dst); + folio_ref_add(dst, folio_nr_pages(dst)); xas_store(&xas, dst); - folio_ref_unfreeze(src, expected_count - 1); + folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); xas_unlock_irq(&xas); @@ -595,20 +599,20 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ - cpupid = page_cpupid_xchg_last(&folio->page, -1); + cpupid = folio_xchg_last_cpupid(folio, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { - bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); - bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); + bool f_toptier = node_is_toptier(folio_nid(folio)); + bool t_toptier = node_is_toptier(folio_nid(newfolio)); if (f_toptier != t_toptier) cpupid = -1; } - page_cpupid_xchg_last(&newfolio->page, cpupid); + folio_xchg_last_cpupid(newfolio, cpupid); folio_migrate_ksm(newfolio, folio); /* @@ -953,7 +957,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) { int rc = -EAGAIN; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); VM_BUG_ON_FOLIO(!folio_test_locked(src), src); VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); @@ -1000,7 +1004,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, * src is freed; but stats require that PageAnon be left as PageAnon. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (__PageMovable(&src->page)) { + if (__folio_test_movable(src)) { VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); /* @@ -1091,7 +1095,7 @@ static void migrate_folio_done(struct folio *src, /* * Compaction can migrate also non-LRU pages which are * not accounted to NR_ISOLATED_*. They can be recognized - * as __PageMovable + * as __folio_test_movable */ if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION) mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + @@ -1112,7 +1116,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, int rc = -EAGAIN; int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); bool locked = false; bool dst_locked = false; @@ -1273,7 +1277,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, int rc; int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); struct list_head *prev; __migrate_folio_extract(dst, &old_page_state, &anon_vma); @@ -2070,8 +2074,8 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, struct vm_area_struct *vma; unsigned long addr; struct page *page; + struct folio *folio; int err; - bool isolated; mmap_read_lock(mm); addr = (unsigned long)untagged_addr_remote(mm, p); @@ -2092,45 +2096,38 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, if (!page) goto out; - if (is_zone_device_page(page)) - goto out_putpage; + folio = page_folio(page); + if (folio_is_zone_device(folio)) + goto out_putfolio; err = 0; - if (page_to_nid(page) == node) - goto out_putpage; + if (folio_nid(folio) == node) + goto out_putfolio; err = -EACCES; if (page_mapcount(page) > 1 && !migrate_all) - goto out_putpage; + goto out_putfolio; - if (PageHuge(page)) { - if (PageHead(page)) { - isolated = isolate_hugetlb(page_folio(page), pagelist); - err = isolated ? 1 : -EBUSY; - } + err = -EBUSY; + if (folio_test_hugetlb(folio)) { + if (isolate_hugetlb(folio, pagelist)) + err = 1; } else { - struct page *head; - - head = compound_head(page); - isolated = isolate_lru_page(head); - if (!isolated) { - err = -EBUSY; - goto out_putpage; - } + if (!folio_isolate_lru(folio)) + goto out_putfolio; err = 1; - list_add_tail(&head->lru, pagelist); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_lru(head), - thp_nr_pages(head)); + list_add_tail(&folio->lru, pagelist); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); } -out_putpage: +out_putfolio: /* - * Either remove the duplicate refcount from - * isolate_lru_page() or drop the page ref if it was - * not isolated. + * Either remove the duplicate refcount from folio_isolate_lru() + * or drop the folio ref if it was not isolated. */ - put_page(page); + folio_put(folio); out: mmap_read_unlock(mm); return err; @@ -2501,16 +2498,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, return __folio_alloc_node(gfp, order, nid); } -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) { - int nr_pages = thp_nr_pages(page); - int order = compound_order(page); - - VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); - - /* Do not migrate THP mapped by multiple processes */ - if (PageTransHuge(page) && total_mapcount(page) > 1) - return 0; + int nr_pages = folio_nr_pages(folio); /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { @@ -2530,75 +2520,79 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) if (z < 0) return 0; - wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); + wakeup_kswapd(pgdat->node_zones + z, 0, + folio_order(folio), ZONE_MOVABLE); return 0; } - if (!isolate_lru_page(page)) + if (!folio_isolate_lru(folio)) return 0; - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), + node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), nr_pages); /* - * Isolating the page has taken another reference, so the - * caller's reference can be safely dropped without the page + * Isolating the folio has taken another reference, so the + * caller's reference can be safely dropped without the folio * disappearing underneath us during migration. */ - put_page(page); + folio_put(folio); return 1; } /* - * Attempt to migrate a misplaced page to the specified destination + * Attempt to migrate a misplaced folio to the specified destination * node. Caller is expected to have an elevated reference count on - * the page that will be dropped by this function before returning. + * the folio that will be dropped by this function before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, + int node) { pg_data_t *pgdat = NODE_DATA(node); int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); - int nr_pages = thp_nr_pages(page); + int nr_pages = folio_nr_pages(folio); /* - * Don't migrate file pages that are mapped in multiple processes + * Don't migrate file folios that are mapped in multiple processes * with execute permissions as they are probably shared libraries. + * + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if (page_mapcount(page) != 1 && page_is_file_lru(page) && + if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) && (vma->vm_flags & VM_EXEC)) goto out; /* - * Also do not migrate dirty pages as not all filesystems can move - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. + * Also do not migrate dirty folios as not all filesystems can move + * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles. */ - if (page_is_file_lru(page) && PageDirty(page)) + if (folio_is_file_lru(folio) && folio_test_dirty(folio)) goto out; - isolated = numamigrate_isolate_page(pgdat, page); + isolated = numamigrate_isolate_folio(pgdat, folio); if (!isolated) goto out; - list_add(&page->lru, &migratepages); + list_add(&folio->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { if (!list_empty(&migratepages)) { - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -nr_pages); - putback_lru_page(page); + list_del(&folio->lru); + node_stat_mod_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio), -nr_pages); + folio_putback_lru(folio); } isolated = 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) + if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node)) mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded); } @@ -2606,7 +2600,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, return isolated; out: - put_page(page); + folio_put(folio); return 0; } #endif /* CONFIG_NUMA_BALANCING */ diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 8ac1f79f754a290445a9a40c0e71f7e5b5853491..1bebdfae2286f643ba6e278281e48992e157e70f 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -107,6 +107,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn = 0, pfn; + struct folio *folio; struct page *page; swp_entry_t entry; pte_t pte; @@ -168,41 +169,43 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, } /* - * By getting a reference on the page we pin it and that blocks + * By getting a reference on the folio we pin it and that blocks * any kind of migration. Side effect is that it "freezes" the * pte. * - * We drop this reference after isolating the page from the lru - * for non device page (device page are not on the lru and thus + * We drop this reference after isolating the folio from the lru + * for non device folio (device folio are not on the lru and thus * can't be dropped from it). */ - get_page(page); + folio = page_folio(page); + folio_get(folio); /* - * We rely on trylock_page() to avoid deadlock between + * We rely on folio_trylock() to avoid deadlock between * concurrent migrations where each is waiting on the others - * page lock. If we can't immediately lock the page we fail this + * folio lock. If we can't immediately lock the folio we fail this * migration as it is only best effort anyway. * - * If we can lock the page it's safe to set up a migration entry - * now. In the common case where the page is mapped once in a + * If we can lock the folio it's safe to set up a migration entry + * now. In the common case where the folio is mapped once in a * single process setting up the migration entry now is an * optimisation to avoid walking the rmap later with * try_to_migrate(). */ - if (trylock_page(page)) { + if (folio_trylock(folio)) { bool anon_exclusive; pte_t swp_pte; flush_cache_page(vma, addr, pte_pfn(pte)); - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = folio_test_anon(folio) && + PageAnonExclusive(page); if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); - if (page_try_share_anon_rmap(page)) { + if (folio_try_share_anon_rmap_pte(folio, page)) { set_pte_at(mm, addr, ptep, pte); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); mpfn = 0; goto next; } @@ -214,7 +217,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pte)) - folio_mark_dirty(page_folio(page)); + folio_mark_dirty(folio); /* Setup special migration page table entry */ if (mpfn & MIGRATE_PFN_WRITE) @@ -248,16 +251,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, /* * This is like regular unmap: we remove the rmap and - * drop page refcount. Page won't be freed, as we took - * a reference just above. + * drop the folio refcount. The folio won't be freed, as + * we took a reference just above. */ - page_remove_rmap(page, vma, false); - put_page(page); + folio_remove_rmap_pte(folio, page, vma); + folio_put(folio); if (pte_present(pte)) unmapped++; } else { - put_page(page); + folio_put(folio); mpfn = 0; } @@ -564,6 +567,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, struct page *page, unsigned long *src) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = migrate->vma; struct mm_struct *mm = vma->vm_mm; bool flush = false; @@ -596,17 +600,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto abort; if (unlikely(anon_vma_prepare(vma))) goto abort; - if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) goto abort; /* - * The memory barrier inside __SetPageUptodate makes sure that - * preceding stores to the page contents become visible before + * The memory barrier inside __folio_mark_uptodate makes sure that + * preceding stores to the folio contents become visible before * the set_pte_at() write. */ - __SetPageUptodate(page); + __folio_mark_uptodate(folio); - if (is_device_private_page(page)) { + if (folio_is_device_private(folio)) { swp_entry_t swp_entry; if (vma->vm_flags & VM_WRITE) @@ -617,8 +621,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, page_to_pfn(page)); entry = swp_entry_to_pte(swp_entry); } else { - if (is_zone_device_page(page) && - !is_device_coherent_page(page)) { + if (folio_is_zone_device(folio) && + !folio_is_device_coherent(folio)) { pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); goto abort; } @@ -652,10 +656,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto unlock_abort; inc_mm_counter(mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, addr); - if (!is_zone_device_page(page)) - lru_cache_add_inactive_or_unevictable(page, vma); - get_page(page); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); + if (!folio_is_zone_device(folio)) + folio_add_lru_vma(folio, vma); + folio_get(folio); if (flush) { flush_cache_page(vma, addr, pte_pfn(orig_pte)); diff --git a/mm/mlock.c b/mm/mlock.c index 06bdfab83b58af92f0abd43d068567de52d7f57d..7e984843d7332a83759a06b604f019bebd251d51 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -305,6 +305,62 @@ void munlock_folio(struct folio *folio) local_unlock(&mlock_fbatch.lock); } +static inline unsigned int folio_mlock_step(struct folio *folio, + pte_t *pte, unsigned long addr, unsigned long end) +{ + unsigned int count, i, nr = folio_nr_pages(folio); + unsigned long pfn = folio_pfn(folio); + pte_t ptent = ptep_get(pte); + + if (!folio_test_large(folio)) + return 1; + + count = pfn + nr - pte_pfn(ptent); + count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT); + + for (i = 0; i < count; i++, pte++) { + pte_t entry = ptep_get(pte); + + if (!pte_present(entry)) + break; + if (pte_pfn(entry) - pfn >= nr) + break; + } + + return i; +} + +static inline bool allow_mlock_munlock(struct folio *folio, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned int step) +{ + /* + * For unlock, allow munlock large folio which is partially + * mapped to VMA. As it's possible that large folio is + * mlocked and VMA is split later. + * + * During memory pressure, such kind of large folio can + * be split. And the pages are not in VM_LOCKed VMA + * can be reclaimed. + */ + if (!(vma->vm_flags & VM_LOCKED)) + return true; + + /* folio_within_range() cannot take KSM, but any small folio is OK */ + if (!folio_test_large(folio)) + return true; + + /* folio not in range [start, end), skip mlock */ + if (!folio_within_range(folio, vma, start, end)) + return false; + + /* folio is not fully mapped, skip mlock */ + if (step != folio_nr_pages(folio)) + return false; + + return true; +} + static int mlock_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -314,6 +370,8 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, pte_t *start_pte, *pte; pte_t ptent; struct folio *folio; + unsigned int step = 1; + unsigned long start = addr; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { @@ -334,6 +392,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, walk->action = ACTION_AGAIN; return 0; } + for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) @@ -341,12 +400,19 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; - if (folio_test_large(folio)) - continue; + + step = folio_mlock_step(folio, pte, addr, end); + if (!allow_mlock_munlock(folio, vma, start, end, step)) + goto next_entry; + if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); + +next_entry: + pte += step - 1; + addr += (step - 1) << PAGE_SHIFT; } pte_unmap(start_pte); out: @@ -425,6 +491,8 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; + fixup_vma(vma); + pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(vmi, mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), diff --git a/mm/mmap.c b/mm/mmap.c index e4dfeaef668a8e30aea8cd4bf26dfa9b075654ac..39ede193a189879110fb4aa9093ffc9a020c7ded 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -350,6 +350,10 @@ anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; +#ifdef CONFIG_ASYNC_FORK + WARN_ON_ONCE(vma->async_fork_vma); +#endif + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } @@ -888,6 +892,9 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, if (vm_flags & VM_SPECIAL) return NULL; + if (prev) + fixup_vma(prev); + /* Does the input range span an existing VMA? (cases 5 - 8) */ curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); @@ -897,6 +904,9 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, else next = NULL; /* case 5 */ + if (next) + fixup_vma(next); + if (prev) { vma_start = prev->vm_start; vma_pgoff = prev->vm_pgoff; @@ -1369,8 +1379,17 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * memory use of this mapping. */ if (flags & MAP_NORESERVE) { + struct rich_container_ext *ext = NULL; + /* We honor MAP_NORESERVE if allowed to overcommit */ - if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + if (ext->overcommit_memory != OVERCOMMIT_NEVER) + vm_flags |= VM_NORESERVE; + } else if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ @@ -1827,12 +1846,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. - * do_mmap() will clear pgoff, so match alignment. */ - pgoff = 0; get_area = shmem_get_unmapped_area; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + /* Ensures that larger anonymous mappings are THP aligned. */ + get_area = thp_get_unmapped_area; } + /* Always treat pgoff as zero for anonymous memory. */ + if (!file) + pgoff = 0; + addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; @@ -2000,6 +2024,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + fixup_vma(vma); + /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* @@ -2093,6 +2119,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + fixup_vma(vma); + /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* @@ -2357,6 +2385,8 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *new; int err; + fixup_vma(vma); + WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); @@ -2505,6 +2535,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (error) goto end_split_failed; } + fixup_vma(next); vma_start_write(next); mas_set(&mas_detach, count); error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 4f559f4ddd217180362b9af41727998aad3c6d46..99b3e9408aa0fb8961c980fe7cf18162fde1d427 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -50,12 +50,21 @@ static bool tlb_next_batch(struct mmu_gather *tlb) #ifdef CONFIG_SMP static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) { + struct encoded_page **pages = batch->encoded_pages; + for (int i = 0; i < batch->nr; i++) { - struct encoded_page *enc = batch->encoded_pages[i]; + struct encoded_page *enc = pages[i]; - if (encoded_page_flags(enc)) { + if (encoded_page_flags(enc) & ENCODED_PAGE_BIT_DELAY_RMAP) { struct page *page = encoded_page_ptr(enc); - page_remove_rmap(page, vma, false); + unsigned int nr_pages = 1; + + if (unlikely(encoded_page_flags(enc) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_pages = encoded_nr_pages(pages[++i]); + + folio_remove_rmap_ptes(page_folio(page), page, nr_pages, + vma); } } } @@ -82,26 +91,62 @@ void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) } #endif -static void tlb_batch_pages_flush(struct mmu_gather *tlb) +/* + * We might end up freeing a lot of pages. Reschedule on a regular + * basis to avoid soft lockups in configurations without full + * preemption enabled. The magic number of 512 folios seems to work. + */ +#define MAX_NR_FOLIOS_PER_FREE 512 + +static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) { - struct mmu_gather_batch *batch; + struct encoded_page **pages = batch->encoded_pages; + unsigned int nr, nr_pages; - for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { - struct encoded_page **pages = batch->encoded_pages; + while (batch->nr) { + if (!page_poisoning_enabled_static() && !want_init_on_free()) { + nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); - do { /* - * limit free batch count when PAGE_SIZE > 4K + * Make sure we cover page + nr_pages, and don't leave + * nr_pages behind when capping the number of entries. + */ + if (unlikely(encoded_page_flags(pages[nr - 1]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr++; + } else { + /* + * With page poisoning and init_on_free, the time it + * takes to free memory grows proportionally with the + * actual memory size. Therefore, limit based on the + * actual memory size and not the number of involved + * folios. */ - unsigned int nr = min(512U, batch->nr); + for (nr = 0, nr_pages = 0; + nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; + nr++) { + if (unlikely(encoded_page_flags(pages[nr]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_pages += encoded_nr_pages(pages[++nr]); + else + nr_pages++; + } + } - free_pages_and_swap_cache(pages, nr); - pages += nr; - batch->nr -= nr; + free_pages_and_swap_cache(pages, nr); + pages += nr; + batch->nr -= nr; - cond_resched(); - } while (batch->nr); + cond_resched(); } +} + +static void tlb_batch_pages_flush(struct mmu_gather *tlb) +{ + struct mmu_gather_batch *batch; + + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) + __tlb_batch_free_encoded_pages(batch); tlb->active = &tlb->local; } @@ -116,14 +161,19 @@ static void tlb_batch_list_free(struct mmu_gather *tlb) tlb->local.next = NULL; } -bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) +static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap, + int page_size) { + int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0; struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); #ifdef CONFIG_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); + VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); + VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); #endif batch = tlb->active; @@ -131,17 +181,40 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, i * Add the page and check if we are full. If so * force a flush. */ - batch->encoded_pages[batch->nr++] = page; - if (batch->nr == batch->max) { + if (likely(nr_pages == 1)) { + batch->encoded_pages[batch->nr++] = encode_page(page, flags); + } else { + flags |= ENCODED_PAGE_BIT_NR_PAGES_NEXT; + batch->encoded_pages[batch->nr++] = encode_page(page, flags); + batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); + } + /* + * Make sure that we can always add another "page" + "nr_pages", + * requiring two entries instead of only a single one. + */ + if (batch->nr >= batch->max - 1) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } - VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page)); + VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); return false; } +bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, + unsigned int nr_pages, bool delay_rmap) +{ + return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, + PAGE_SIZE); +} + +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + bool delay_rmap, int page_size) +{ + return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); +} + #endif /* MMU_GATHER_NO_GATHER */ #ifdef CONFIG_MMU_GATHER_TABLE_FREE diff --git a/mm/mmzone.c b/mm/mmzone.c index 68e1511be12de6052b91eed6c86287042dd69d73..b594d3f268fe6e1fe2e6b8b9144ec44194416fbd 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -93,19 +93,19 @@ void lruvec_init(struct lruvec *lruvec) } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) -int page_cpupid_xchg_last(struct page *page, int cpupid) +int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { unsigned long old_flags, flags; int last_cpupid; - old_flags = READ_ONCE(page->flags); + old_flags = READ_ONCE(folio->flags); do { flags = old_flags; last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags))); return last_cpupid; } diff --git a/mm/mprotect.c b/mm/mprotect.c index 7e870a8c9402aa50bb516a7912fefc36b03b4db6..c295085068ed2ba84db5c28beba593aefd0994a5 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -114,7 +114,7 @@ static long change_pte_range(struct mmu_gather *tlb, * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { - struct page *page; + struct folio *folio; int nid; bool toptier; @@ -122,13 +122,15 @@ static long change_pte_range(struct mmu_gather *tlb, if (pte_protnone(oldpte)) continue; - page = vm_normal_page(vma, addr, oldpte); - if (!page || is_zone_device_page(page) || PageKsm(page)) + folio = vm_normal_folio(vma, addr, oldpte); + if (!folio || folio_is_zone_device(folio) || + folio_test_ksm(folio)) continue; /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - page_count(page) != 1) + (folio_maybe_dma_pinned(folio) || + folio_likely_mapped_shared(folio))) continue; /* @@ -136,14 +138,15 @@ static long change_pte_range(struct mmu_gather *tlb, * it cannot move them all from MIGRATE_ASYNC * context. */ - if (page_is_file_lru(page) && PageDirty(page)) + if (folio_is_file_lru(folio) && + folio_test_dirty(folio)) continue; /* * Don't mess with PTEs if page is already on the node * a single-threaded process is running on. */ - nid = page_to_nid(page); + nid = folio_nid(folio); if (target_node == nid) continue; toptier = node_is_toptier(nid); @@ -157,7 +160,7 @@ static long change_pte_range(struct mmu_gather *tlb, continue; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, + folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); } @@ -765,6 +768,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len, unsigned long newflags; int new_vma_pkey; + fixup_vma(vma); + if (vma->vm_start != tmp) { error = -ENOMEM; break; diff --git a/mm/mremap.c b/mm/mremap.c index df71010baabe7ec61557e8e19a4f20b52eaf9417..b688b238b61f18634e4474302c1f256605e2f722 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -761,6 +761,8 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (!vma) return ERR_PTR(-EFAULT); + fixup_vma(vma); + /* * !old_len is a special case where an attempt is made to 'duplicate' * a mapping. This makes no sense for private mappings as it will diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 44bde56ecd025a5ba90ca501adf00fb0b5e52091..ca42526c4573222bfd3fb4feddb5b25b716be74b 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -305,17 +305,21 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) return CONSTRAINT_NONE; } -static int oom_evaluate_task(struct task_struct *task, void *arg) +int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; long points; - if (oom_unkillable_task(task)) + if (oom_unkillable_task(task)) { + mem_cgroup_account_oom_skip(task, oc); goto next; + } /* p may not have freeable memory in nodemask */ - if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) + if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) { + mem_cgroup_account_oom_skip(task, oc); goto next; + } /* * This task already has access to memory reserves and is being killed. @@ -324,8 +328,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) * any memory is quite low. */ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { - if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) + if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) { + mem_cgroup_account_oom_skip(task, oc); + oc->num_skip++; goto next; + } goto abort; } @@ -339,7 +346,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) } points = oom_badness(task, oc->totalpages); - if (points == LONG_MIN || points < oc->chosen_points) + if (points == LONG_MIN) { + mem_cgroup_account_oom_skip(task, oc); + goto next; + } + if (points < oc->chosen_points) goto next; select: @@ -365,8 +376,8 @@ static void select_bad_process(struct oom_control *oc) { oc->chosen_points = LONG_MIN; - if (is_memcg_oom(oc)) - mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); + if (is_memcg_oom(oc) || root_memcg_use_priority_oom()) + mem_cgroup_select_bad_process(oc); else { struct task_struct *p; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e632ec9b642109e4fd7a626c677680c1d65cc5c4..ed27e185d95f8da8580694bbbf3d610157c357e0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2808,25 +2808,25 @@ bool folio_mark_dirty(struct folio *folio) EXPORT_SYMBOL(folio_mark_dirty); /* - * set_page_dirty() is racy if the caller has no reference against - * page->mapping->host, and if the page is unlocked. This is because another - * CPU could truncate the page off the mapping and then free the mapping. + * folio_mark_dirty() is racy if the caller has no reference against + * folio->mapping->host, and if the folio is unlocked. This is because another + * CPU could truncate the folio off the mapping and then free the mapping. * - * Usually, the page _is_ locked, or the caller is a user-space process which + * Usually, the folio _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * - * In other cases, the page should be locked before running set_page_dirty(). + * In other cases, the folio should be locked before running folio_mark_dirty(). */ -int set_page_dirty_lock(struct page *page) +bool folio_mark_dirty_lock(struct folio *folio) { - int ret; + bool ret; - lock_page(page); - ret = set_page_dirty(page); - unlock_page(page); + folio_lock(folio); + ret = folio_mark_dirty(folio); + folio_unlock(folio); return ret; } -EXPORT_SYMBOL(set_page_dirty_lock); +EXPORT_SYMBOL(folio_mark_dirty_lock); /* * This cancels just the dirty bit on the kernel page itself, it does NOT diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f47439e0ef1085b3cc7740c04f2c5f239c1215be..fccb6908192ac754ea6ce6886ed88b519be376e2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -523,7 +524,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order) #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order > PAGE_ALLOC_COSTLY_ORDER) { - VM_BUG_ON(order != pageblock_order); + VM_BUG_ON(order != HPAGE_PMD_ORDER); movable = migratetype == MIGRATE_MOVABLE; @@ -542,7 +543,7 @@ static inline int pindex_to_order(unsigned int pindex) #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pindex >= NR_LOWORDER_PCP_LISTS) - order = pageblock_order; + order = HPAGE_PMD_ORDER; #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); #endif @@ -555,7 +556,7 @@ static inline bool pcp_allowed_order(unsigned int order) if (order <= PAGE_ALLOC_COSTLY_ORDER) return true; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (order == pageblock_order) + if (order == HPAGE_PMD_ORDER) return true; #endif return false; @@ -958,6 +959,12 @@ static inline bool free_page_is_bad(struct page *page) if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) return false; +#ifdef CONFIG_KFENCE + /* It's not performance sensitive when reaching here */ + if (PageKfence(page)) + return false; +#endif + /* Something has gone sideways, find it */ free_page_is_bad_report(page); return true; @@ -1057,7 +1064,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) * on-demand allocation and then freed again before the deferred pages * initialization is done, but this is not likely to happen. */ -static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) +static inline bool should_skip_kasan_poison(struct page *page) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return deferred_pages_enabled(); @@ -1076,11 +1083,11 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); } -static __always_inline bool free_pages_prepare(struct page *page, - unsigned int order, fpi_t fpi_flags) +__always_inline bool free_pages_prepare(struct page *page, + unsigned int order) { int bad = 0; - bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); + bool skip_kasan_poison = should_skip_kasan_poison(page); bool init = want_init_on_free(); struct folio *folio = page_folio(page); @@ -1139,8 +1146,11 @@ static __always_inline bool free_pages_prepare(struct page *page, (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; } } - if (PageMappingFlags(page)) + if (PageMappingFlags(page)) { + if (PageAnon(page)) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); page->mapping = NULL; + } if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); if (is_check_pages_enabled()) { @@ -1151,7 +1161,7 @@ static __always_inline bool free_pages_prepare(struct page *page, } page_cpupid_reset_last(page); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE; reset_page_owner(page, order); page_table_check_free(page, order); @@ -1282,7 +1292,10 @@ static void __free_pages_ok(struct page *page, unsigned int order, unsigned long pfn = page_to_pfn(page); struct zone *zone = page_zone(page); - if (!free_pages_prepare(page, order, fpi_flags)) + if (!free_pages_prepare(page, order)) + return; + + if (unlikely(!order && kfence_free_page(page))) return; /* @@ -2334,7 +2347,7 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn, { int migratetype; - if (!free_pages_prepare(page, order, FPI_NONE)) + if (!free_pages_prepare(page, order)) return false; migratetype = get_pfnblock_migratetype(page, pfn); @@ -2398,6 +2411,10 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, bool free_high; __count_vm_events(PGFREE, 1 << order); + + if (unlikely(!order && kfence_free_page(page))) + return; + pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; @@ -2861,6 +2878,14 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, long min = mark; int o; + /* apply negative memory.wmark_min_adj */ + if ((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) { + int min_adj = memcg_get_wmark_min_adj(current); + + if (min_adj < 0) + min -= mark * (-min_adj) / 100; + } + /* free_pages may go negative - that's OK */ free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); @@ -2893,6 +2918,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, min -= min / 2; } + /* + * Only happens due to memory.wmark_min_adj. + * Guarantee safe min after memory.wmark_min_adj? + */ + if (min < mark / 4) + min = mark / 4; + /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead @@ -3392,10 +3424,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; unsigned long pflags; unsigned int noreclaim_flag; + u64 start; if (!order) return NULL; + memcg_lat_stat_start(&start); psi_memstall_enter(&pflags); delayacct_compact_start(); noreclaim_flag = memalloc_noreclaim_save(); @@ -3405,6 +3439,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); psi_memstall_leave(&pflags); + memcg_lat_stat_end(MEM_LAT_DIRECT_COMPACT, start); delayacct_compact_end(); if (*compact_result == COMPACT_SKIPPED) @@ -3636,11 +3671,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, { unsigned int noreclaim_flag; unsigned long progress; + u64 start; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); + memcg_lat_stat_start(&start); fs_reclaim_acquire(gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); @@ -3649,6 +3686,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(gfp_mask); + memcg_lat_stat_end(MEM_LAT_GLOBAL_DIRECT_RECLAIM, start); cond_resched(); @@ -4191,6 +4229,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: + + if (ac->migratetype == MIGRATE_MOVABLE) + memcg_check_wmark_min_adj(current, ac); + return page; } @@ -4361,7 +4403,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, continue; } - page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, + page = kfence_alloc_page(0, preferred_nid, gfp); + if (likely(!page)) + page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ @@ -4445,6 +4489,12 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + page = kfence_alloc_page(order, preferred_nid, alloc_gfp); + if (unlikely(page)) { + prep_new_page(page, 0, alloc_gfp, alloc_flags); + goto out; + } + /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) @@ -5834,6 +5884,96 @@ static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int wr return 0; } +static void setup_min_cache_kbytes(void) +{ + pg_data_t *pgdat; + struct zone *zone; + unsigned long lowmem_pages = 0; + unsigned long min_cache_pages = sysctl_min_cache_kbytes >> (PAGE_SHIFT - 10); + + for_each_online_pgdat(pgdat) + pgdat->min_cache_pages = 0; + + for_each_zone(zone) { + if (!is_highmem(zone)) + lowmem_pages += zone_managed_pages(zone); + } + + for_each_zone(zone) { + u64 tmp; + + /* + * Make sure that lowmem zone reserve a mount of file pages + * to avoid thrashing. highmem zone is allowed to eat up + * memory as soon as possible. + */ + if (!is_highmem(zone)) { + tmp = zone_managed_pages(zone) * min_cache_pages; + do_div(tmp, lowmem_pages); + zone->zone_pgdat->min_cache_pages += tmp; + } + } +} + +/* + * Initialise min_cache_kbytes. + * + * 0 < total memory <= 4G, min_cache_kbytes: 150M + * 4G < total memory <= 8G, min_cache_kbytes: 300M + * 8G < total memory <= 16G, min_cache_kbytes: 400M + * 16G < total memory <= 128G, min_cache_kbytes: 500M + * total memory > 128G, min_cache_kbytes: 1024M + */ + +int __meminit init_min_cache_kbytes(void) +{ + unsigned long total_ram_bytes = totalram_pages() << PAGE_SHIFT; + + if (total_ram_bytes <= 4UL * SZ_1G) + /* limit min_cache_kbytes to 1/2 of total memory at most */ + if (total_ram_bytes / 2 < 150 * SZ_1M) + sysctl_min_cache_kbytes = total_ram_bytes / 2 / SZ_1K; + else + sysctl_min_cache_kbytes = 150 * SZ_1K; + else if (total_ram_bytes <= 8UL * SZ_1G) + sysctl_min_cache_kbytes = 300 * SZ_1K; + else if (total_ram_bytes <= 16UL * SZ_1G) + sysctl_min_cache_kbytes = 400 * SZ_1K; + else if (total_ram_bytes <= 128UL * SZ_1G) + sysctl_min_cache_kbytes = 500 * SZ_1K; + else + sysctl_min_cache_kbytes = 1024 * SZ_1K; + + setup_min_cache_kbytes(); + + return 0; +} +postcore_initcall(init_min_cache_kbytes) + +static int sysctl_min_cache_kbytes_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + unsigned long min_cache_pages; + unsigned long old_min_cache_kbytes = sysctl_min_cache_kbytes; + + rc = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + if (write) { + min_cache_pages = sysctl_min_cache_kbytes >> (PAGE_SHIFT - 10); + if (min_cache_pages > totalram_pages() / 2) { + sysctl_min_cache_kbytes = old_min_cache_kbytes; + return -EINVAL; + } + + setup_min_cache_kbytes(); + } + + return 0; +} + #ifdef CONFIG_NUMA static void setup_min_unmapped_ratio(void) { @@ -5995,6 +6135,14 @@ static struct ctl_table page_alloc_sysctl_table[] = { .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, + { + .procname = "min_cache_kbytes", + .data = &sysctl_min_cache_kbytes, + .maxlen = sizeof(sysctl_min_cache_kbytes), + .mode = 0644, + .proc_handler = sysctl_min_cache_kbytes_sysctl_handler, + .extra1 = SYSCTL_LONG_ZERO, + }, #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", diff --git a/mm/page_counter.c b/mm/page_counter.c index db20d6452b715cdfd143f8e1bdf085a406a8f094..ce6f551ca7d2ba13863c81fbd671214108e33bb7 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -234,6 +234,18 @@ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) propagate_protected_usage(c, atomic_long_read(&c->usage)); } +void page_counter_set_wmark_high(struct page_counter *counter, + unsigned long nr_pages) +{ + xchg(&counter->wmark_high, nr_pages); +} + +void page_counter_set_wmark_low(struct page_counter *counter, + unsigned long nr_pages) +{ + xchg(&counter->wmark_low, nr_pages); +} + /** * page_counter_memparse - memparse() for page counter limits * @buf: string to parse diff --git a/mm/page_io.c b/mm/page_io.c index fe4c21af23f269a6bdc913e967f855007f8ccada..42a11cee4a46cbe989f3ebfd9f29661c8cc296f4 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -208,8 +208,11 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) static inline void count_swpout_vm_event(struct folio *folio) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (unlikely(folio_test_pmd_mappable(folio))) + if (unlikely(folio_test_pmd_mappable(folio))) { + count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); + } + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } @@ -278,9 +281,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret) set_page_dirty(page); ClearPageReclaim(page); } - } else { - for (p = 0; p < sio->pages; p++) - count_swpout_vm_event(page_folio(sio->bvec[p].bv_page)); } for (p = 0; p < sio->pages; p++) @@ -296,6 +296,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) struct file *swap_file = sis->swap_file; loff_t pos = page_file_offset(page); + count_swpout_vm_event(page_folio(page)); set_page_writeback(page); unlock_page(page); if (wbc->swap_plug) @@ -514,7 +515,6 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) delayacct_swapin_start(); if (zswap_load(folio)) { - folio_mark_uptodate(folio); folio_unlock(folio); } else if (data_race(sis->flags & SWP_FS_OPS)) { swap_readpage_fs(page, plug); diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index e0b368e545ed00149e6756cad6d55c3c40da85f0..74d2de15fb5e09324bf265d06d4e48aac3cd803e 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -268,7 +268,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) * cleared *pmd but not decremented compound_mapcount(). */ if ((pvmw->flags & PVMW_SYNC) && - transhuge_vma_suitable(vma, pvmw->address) && + thp_vma_suitable_order(vma, pvmw->address, + PMD_ORDER) && (pvmw->nr_pages >= HPAGE_PMD_NR)) { spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c new file mode 100644 index 0000000000000000000000000000000000000000..bf5b75b34381fd4a7f450ed8ef60314b771a2fda --- /dev/null +++ b/mm/pagecache_limit.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "pagecache_limit: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); +struct workqueue_struct *memcg_pgcache_limit_wq; + +static int __init setup_pagecache_limit(char *s) +{ + if (!strcmp(s, "1")) + static_branch_enable(&pagecache_limit_enabled_key); + else if (!strcmp(s, "0")) + static_branch_disable(&pagecache_limit_enabled_key); + return 1; +} +__setup("pagecache_limit=", setup_pagecache_limit); + +bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg) +{ + if (!pagecache_limit_enabled()) + return false; + + return READ_ONCE(memcg->allow_pgcache_limit); +} + +static inline unsigned long memcg_get_pgcache_nr_pages(struct mem_cgroup *memcg) +{ + /* + * There use 'NR_INACTIVE_FILE' + 'NR_ACTIVE_FILE' + * to represent pagecache. + * Due to changes in the memcg state update strategy, + * we need to proactively perform a refresh so that + * we could read accurate per-memcg lruvec stats. + */ + cgroup_rstat_flush(memcg->css.cgroup); + + return memcg_page_state(memcg, NR_INACTIVE_FILE) + + memcg_page_state(memcg, NR_ACTIVE_FILE); +} + +unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg) +{ + unsigned long limit_pgcache, total_pgcache; + + limit_pgcache = READ_ONCE(memcg->pgcache_limit_size) / PAGE_SIZE; + if (!limit_pgcache) + return 0; + + total_pgcache = memcg_get_pgcache_nr_pages(memcg); + if (total_pgcache > limit_pgcache) + return total_pgcache - limit_pgcache; + + return 0; +} + +void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr) +{ + struct mem_cgroup *iter; + + preempt_disable(); + + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + __this_cpu_add(iter->exstat_cpu->item[MEMCG_PGCACHE_RECLAIM], + nr); + + preempt_enable(); +} + +void memcg_pgcache_limit_work_func(struct work_struct *work) +{ + struct mem_cgroup *memcg; + + memcg = container_of(work, struct mem_cgroup, pgcache_limit_work); + if (!is_memcg_pgcache_limit_enabled(memcg)) + return; + + current->flags |= PF_MEMALLOC | PF_KSWAPD; + __memcg_pagecache_shrink(memcg, true, GFP_KERNEL); + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD); +} + +void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask) +{ + struct mem_cgroup *tmp_memcg = memcg; + + if (!memcg || !is_memcg_pgcache_limit_enabled(memcg)) + return; + + /* + * We support pagecache to check not only current memcg, but also + * there parent memcg, to prevent the parent group which has large + * number of pagecache but not release it in time. + */ + do { + if (!memcg_get_pgcache_overflow_size(tmp_memcg)) + continue; + /* + * In direct memory reclaim path, we default support file pagecache + * which is unmapped, but we also concern most of pagecache are mapped, + * it would lead to "pagecache limit" has no effect, so in "sc.priority" + * traverses, we select the appropriate time to enable mapped pagecache + * to be reclaimed. + */ + if (tmp_memcg->pgcache_limit_sync == PGCACHE_RECLAIM_DIRECT) + __memcg_pagecache_shrink(tmp_memcg, false, gfp_mask); + else + queue_work(memcg_pgcache_limit_wq, + &tmp_memcg->pgcache_limit_work); + } while ((tmp_memcg = parent_mem_cgroup(tmp_memcg)) && + is_memcg_pgcache_limit_enabled(tmp_memcg)); +} + +#ifdef CONFIG_SYSFS +static ssize_t pagecache_limit_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !!static_branch_unlikely(&pagecache_limit_enabled_key)); +} + +static ssize_t pagecache_limit_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + static DEFINE_MUTEX(mutex); + ssize_t ret = count; + + mutex_lock(&mutex); + + if (!strncmp(buf, "1", 1)) + static_branch_enable(&pagecache_limit_enabled_key); + else if (!strncmp(buf, "0", 1)) + static_branch_disable(&pagecache_limit_enabled_key); + else + ret = -EINVAL; + + mutex_unlock(&mutex); + return ret; +} + +static struct kobj_attribute pagecache_limit_enabled_attr = + __ATTR(enabled, 0644, pagecache_limit_enabled_show, + pagecache_limit_enabled_store); + +static struct attribute *pagecache_limit_attrs[] = { + &pagecache_limit_enabled_attr.attr, + NULL, +}; + +static struct attribute_group pagecache_limit_attr_group = { + .attrs = pagecache_limit_attrs, +}; + +static int __init pagecache_limit_init_sysfs(void) +{ + int err; + struct kobject *pagecache_limit_kobj; + + pagecache_limit_kobj = kobject_create_and_add("pagecache_limit", mm_kobj); + if (!pagecache_limit_kobj) { + pr_err("failed to create pagecache_limit kobject\n"); + return -ENOMEM; + } + err = sysfs_create_group(pagecache_limit_kobj, &pagecache_limit_attr_group); + if (err) { + pr_err("failed to register pagecache_limit group\n"); + goto delete_obj; + } + + return 0; + +delete_obj: + kobject_put(pagecache_limit_kobj); + return err; +} +#endif /* CONFIG_SYSFS */ + +static int __init pagecache_limit_init(void) +{ + int ret = -EINVAL; + +#ifdef CONFIG_SYSFS + ret = pagecache_limit_init_sysfs(); +#endif + + return ret; +} +module_init(pagecache_limit_init); diff --git a/mm/readahead.c b/mm/readahead.c index f1595c032ce7e3abf20d64127bb5560cd512abdd..8eaabf7c91f40609cb436194ffd74e8bfd43c015 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -131,6 +131,9 @@ #include "internal.h" +/* enable context readahead default */ +int sysctl_enable_context_readahead = 1; + /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -483,6 +486,34 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, return 0; } +static int select_new_order(int old_order, int max_order, unsigned long orders) +{ + unsigned long hi_orders, lo_orders; + + /* + * Select the next order to use from the set in `orders`, while ensuring + * we don't go above max_order. Prefer the next + 1 highest allowed + * order after old_order, unless there isn't one, in which case return + * the closest allowed order, which is either the next highest allowed + * order or less than or equal to old_order. The "next + 1" skip + * behaviour is intended to allow ramping up to large folios quickly. + */ + + orders &= BIT(max_order + 1) - 1; + VM_WARN_ON(!orders); + hi_orders = orders & ~(BIT(old_order + 1) - 1); + + if (hi_orders) { + old_order = lowest_order(hi_orders); + hi_orders &= ~BIT(old_order); + if (hi_orders) + return lowest_order(hi_orders); + } + + lo_orders = orders & (BIT(old_order + 1) - 1); + return highest_order(lo_orders); +} + void page_cache_ra_order(struct readahead_control *ractl, struct file_ra_state *ra, unsigned int new_order) { @@ -493,19 +524,15 @@ void page_cache_ra_order(struct readahead_control *ractl, unsigned int nofs; int err = 0; gfp_t gfp = readahead_gfp_mask(mapping); + unsigned long orders; - if (!mapping_large_folio_support(mapping) || ra->size < 4) + if (!mapping_large_folio_support(mapping)) goto fallback; limit = min(limit, index + ra->size - 1); - if (new_order < MAX_PAGECACHE_ORDER) { - new_order += 2; - if (new_order > MAX_PAGECACHE_ORDER) - new_order = MAX_PAGECACHE_ORDER; - while ((1 << new_order) > ra->size) - new_order--; - } + orders = file_orders_always() | BIT(0); + new_order = select_new_order(new_order, ilog2(ra->size), orders); /* See comment in page_cache_ra_unbounded() */ nofs = memalloc_nofs_save(); @@ -515,9 +542,10 @@ void page_cache_ra_order(struct readahead_control *ractl, /* Align with smaller pages if needed */ if (index & ((1UL << order) - 1)) - order = __ffs(index); + order = select_new_order(order, __ffs(index), orders); /* Don't allocate pages past EOF */ - while (index + (1UL << order) - 1 > limit) + while (index + (1UL << order) - 1 > limit && + (BIT(order) & orders) == 0) order--; err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) @@ -633,9 +661,11 @@ static void ondemand_readahead(struct readahead_control *ractl, * Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ - if (try_context_readahead(ractl->mapping, ra, index, req_size, - max_pages)) + if (sysctl_enable_context_readahead && + try_context_readahead(ractl->mapping, ra, index, req_size, + max_pages)) { goto readit; + } /* * standalone, small random read diff --git a/mm/rmap.c b/mm/rmap.c index 9f795b93cf40f5fa57c3dc38f7f18c4d4020d17d..4bb0ef91d69af4395f4476b792b4cd521fd62138 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -470,7 +470,7 @@ void __init anon_vma_init(void) /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * - * Since there is no serialization what so ever against page_remove_rmap() + * Since there is no serialization what so ever against folio_remove_rmap_*() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * @@ -487,7 +487,7 @@ void __init anon_vma_init(void) * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from - * page_remove_rmap() that the anon_vma pointer from page->mapping is valid + * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. */ @@ -798,6 +798,7 @@ struct folio_referenced_arg { unsigned long vm_flags; struct mem_cgroup *memcg; }; + /* * arg: folio_referenced_arg will be passed */ @@ -807,17 +808,33 @@ static bool folio_referenced_one(struct folio *folio, struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; + unsigned long start = address, ptes = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; - if ((vma->vm_flags & VM_LOCKED) && - (!folio_test_large(folio) || !pvmw.pte)) { - /* Restore the mlock which got missed */ - mlock_vma_folio(folio, vma, !pvmw.pte); - page_vma_mapped_walk_done(&pvmw); - pra->vm_flags |= VM_LOCKED; - return false; /* To break the loop */ + if (vma->vm_flags & VM_LOCKED) { + if (!folio_test_large(folio) || !pvmw.pte) { + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + page_vma_mapped_walk_done(&pvmw); + pra->vm_flags |= VM_LOCKED; + return false; /* To break the loop */ + } + /* + * For large folio fully mapped to VMA, will + * be handled after the pvmw loop. + * + * For large folio cross VMA boundaries, it's + * expected to be picked by page reclaim. But + * should skip reference of pages which are in + * the range of VM_LOCKED vma. As page reclaim + * should just count the reference of pages out + * the range of VM_LOCKED vma. + */ + ptes++; + pra->mapcount--; + continue; } if (pvmw.pte) { @@ -842,6 +859,23 @@ static bool folio_referenced_one(struct folio *folio, pra->mapcount--; } + if ((vma->vm_flags & VM_LOCKED) && + folio_test_large(folio) && + folio_within_vma(folio, vma)) { + unsigned long s_align, e_align; + + s_align = ALIGN_DOWN(start, PMD_SIZE); + e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); + + /* folio doesn't cross page table boundary and fully mapped */ + if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + pra->vm_flags |= VM_LOCKED; + return false; /* To break the loop */ + } + } + if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) @@ -1093,20 +1127,60 @@ int folio_total_mapcount(struct folio *folio) return mapcount; } +static __always_inline unsigned int __folio_add_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level, + int *nr_pmdmapped) +{ + atomic_t *mapped = &folio->_nr_pages_mapped; + int first, nr = 0; + + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + first = atomic_inc_and_test(&page->_mapcount); + if (first && folio_test_large(folio)) { + first = atomic_inc_return_relaxed(mapped); + first = (first < ENTIRELY_MAPPED); + } + + if (first) + nr++; + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + first = atomic_inc_and_test(&folio->_entire_mapcount); + if (first) { + nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { + *nr_pmdmapped = folio_nr_pages(folio); + nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + /* Raced ahead of a remove and another add? */ + if (unlikely(nr < 0)) + nr = 0; + } else { + /* Raced ahead of a remove of ENTIRELY_MAPPED */ + nr = 0; + } + } + break; + } + return nr; +} + /** - * page_move_anon_rmap - move a page to our anon_vma - * @page: the page to move to our anon_vma - * @vma: the vma the page belongs to + * folio_move_anon_rmap - move a folio to our anon_vma + * @folio: The folio to move to our anon_vma + * @vma: The vma the folio belongs to * - * When a page belongs exclusively to one process after a COW event, - * that page can be moved into the anon_vma that belongs to just that - * process, so the rmap code will not search the parent or sibling - * processes. + * When a folio belongs exclusively to one process after a COW event, + * that folio can be moved into the anon_vma that belongs to just that + * process, so the rmap code will not search the parent or sibling processes. */ -void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) +void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; - struct folio *folio = page_folio(page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); @@ -1118,31 +1192,25 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); - SetPageAnonExclusive(page); } /** - * __page_set_anon_rmap - set up new anonymous rmap - * @folio: Folio which contains page. - * @page: Page to add to rmap. - * @vma: VM area to add page to. + * __folio_set_anon - set up a new anonymous rmap for a folio + * @folio: The folio to set up the new anonymous rmap for. + * @vma: VM area to add the folio to. * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process + * @exclusive: Whether the folio is exclusive to the process. */ -static void __page_set_anon_rmap(struct folio *folio, struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); - if (folio_test_anon(folio)) - goto out; - /* - * If the page isn't exclusively mapped into this vma, - * we must use the _oldest_ possible anon_vma for the - * page mapping! + * If the folio isn't exclusive to this vma, we must use the _oldest_ + * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; @@ -1156,9 +1224,6 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page, anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); -out: - if (exclusive) - SetPageAnonExclusive(page); } /** @@ -1175,12 +1240,12 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * - * We have exclusion against page_add_anon_rmap because the caller + * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * - * We have exclusion against page_add_new_anon_rmap because those pages + * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked - * over the call to page_add_new_anon_rmap. + * over the call to folio_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); @@ -1188,72 +1253,100 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, page); } -/** - * page_add_anon_rmap - add pte mapping to an anonymous page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @address: the user virtual address mapped - * @flags: the rmap flags - * - * The caller needs to hold the pte lock, and the page must be locked in - * the anon_vma case: to serialize mapping,index checking after setting, - * and to ensure that PageAnon is not being upgraded racily to PageKsm - * (but PageKsm is never downgraded to PageAnon). - */ -void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address, rmap_t flags) +static __always_inline void __folio_add_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + unsigned long address, rmap_t flags, enum rmap_level level) { - struct folio *folio = page_folio(page); - atomic_t *mapped = &folio->_nr_pages_mapped; - int nr = 0, nr_pmdmapped = 0; - bool compound = flags & RMAP_COMPOUND; - bool first = true; - - /* Is page being mapped by PTE? Is this its first map to be added? */ - if (likely(!compound)) { - first = atomic_inc_and_test(&page->_mapcount); - nr = first; - if (first && folio_test_large(folio)) { - nr = atomic_inc_return_relaxed(mapped); - nr = (nr < COMPOUND_MAPPED); - } - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ - - first = atomic_inc_and_test(&folio->_entire_mapcount); - if (first) { - nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); - /* Raced ahead of a remove and another add? */ - if (unlikely(nr < 0)) - nr = 0; - } else { - /* Raced ahead of a remove of COMPOUND_MAPPED */ - nr = 0; - } - } - } + int i, nr, nr_pmdmapped = 0; - VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); - VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - if (likely(!folio_test_ksm(folio))) { - /* address might be in next vma when migration races vma_merge */ - if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); - else - __page_check_anon_rmap(folio, page, vma, address); + if (likely(!folio_test_ksm(folio))) + __page_check_anon_rmap(folio, page, vma, address); + + if (flags & RMAP_EXCLUSIVE) { + switch (level) { + case RMAP_LEVEL_PTE: + for (i = 0; i < nr_pages; i++) + SetPageAnonExclusive(page + i); + break; + case RMAP_LEVEL_PMD: + SetPageAnonExclusive(page); + break; + } + } + for (i = 0; i < nr_pages; i++) { + struct page *cur_page = page + i; + + /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ + VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || + (folio_test_large(folio) && + folio_entire_mapcount(folio) > 1)) && + PageAnonExclusive(cur_page), folio); } - mlock_vma_folio(folio, vma, compound); + /* + * For large folio, only mlock it if it's fully mapped to VMA. It's + * not easy to check whether the large folio is fully mapped to VMA + * here. Only mlock normal 4K folio and leave page reclaim to handle + * large folio. + */ + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); +} + +/** + * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages which will be mapped + * @vma: The vm area in which the mappings are added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + nr_pages) + * + * The caller needs to hold the page table lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting, + * and to ensure that an anon folio is not being upgraded racily to a KSM folio + * (but KSM folios are never downgraded). + */ +void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma, unsigned long address, + rmap_t flags) +{ + __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, + RMAP_LEVEL_PTE); +} + +/** + * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting. + */ +void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma, unsigned long address, rmap_t flags) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif } /** @@ -1261,209 +1354,232 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped + * @flags: The rmap flags * - * Like page_add_anon_rmap() but must only be called on *new* folios. + * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. - * The folio does not have to be locked. + * The folio doesn't necessarily need to be locked while it's exclusive + * unless two threads map it concurrently. However, the folio must be + * locked if it's shared. * - * If the folio is large, it is accounted as a THP. As the folio - * is new, it's assumed to be mapped exclusively by a single process. + * If the folio is pmd-mappable, it is accounted as a THP. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, - unsigned long address) + unsigned long address, rmap_t flags) { - int nr; + const int nr = folio_nr_pages(folio); + const bool exclusive = flags & RMAP_EXCLUSIVE; - VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); - __folio_set_swapbacked(folio); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); + VM_BUG_ON_VMA(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end, vma); - if (likely(!folio_test_pmd_mappable(folio))) { + if (!folio_test_swapbacked(folio)) + __folio_set_swapbacked(folio); + __folio_set_anon(folio, vma, address, exclusive); + + if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); - nr = 1; + if (exclusive) + SetPageAnonExclusive(&folio->page); + } else if (!folio_test_pmd_mappable(folio)) { + int i; + + for (i = 0; i < nr; i++) { + struct page *page = folio_page(folio, i); + + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); + if (exclusive) + SetPageAnonExclusive(page); + } + + atomic_set(&folio->_nr_pages_mapped, nr); } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); - atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); - nr = folio_nr_pages(folio); + atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); + if (exclusive) + SetPageAnonExclusive(&folio->page); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } -/** - * folio_add_file_rmap_range - add pte mapping to page range of a folio - * @folio: The folio to add the mapping to - * @page: The first page to add - * @nr_pages: The number of pages which will be mapped - * @vma: the vm area in which the mapping is added - * @compound: charge the page as compound or small page - * - * The page range of folio is defined by [first_page, first_page + nr_pages) - * - * The caller needs to hold the pte lock. - */ -void folio_add_file_rmap_range(struct folio *folio, struct page *page, - unsigned int nr_pages, struct vm_area_struct *vma, - bool compound) +static __always_inline void __folio_add_file_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level) { - atomic_t *mapped = &folio->_nr_pages_mapped; - unsigned int nr_pmdmapped = 0, first; - int nr = 0; + pg_data_t *pgdat = folio_pgdat(folio); + int nr, nr_pmdmapped = 0; - VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); - - /* Is page being mapped by PTE? Is this its first map to be added? */ - if (likely(!compound)) { - do { - first = atomic_inc_and_test(&page->_mapcount); - if (first && folio_test_large(folio)) { - first = atomic_inc_return_relaxed(mapped); - first = (first < COMPOUND_MAPPED); - } - - if (first) - nr++; - } while (page++, --nr_pages > 0); - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ - - first = atomic_inc_and_test(&folio->_entire_mapcount); - if (first) { - nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); - /* Raced ahead of a remove and another add? */ - if (unlikely(nr < 0)) - nr = 0; - } else { - /* Raced ahead of a remove of COMPOUND_MAPPED */ - nr = 0; - } - } - } + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) - __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? + __mod_node_page_state(pgdat, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); - mlock_vma_folio(folio, vma, compound); + /* See comments in folio_add_anon_rmap_*() */ + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); } /** - * page_add_file_rmap - add pte mapping to a file page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @compound: charge the page as compound or small page + * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages that will be mapped using PTEs + * @vma: The vm area in which the mappings are added + * + * The page range of the folio is defined by [page, page + nr_pages) * - * The caller needs to hold the pte lock. + * The caller needs to hold the page table lock. */ -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, - bool compound) +void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) { - struct folio *folio = page_folio(page); - unsigned int nr_pages; - - VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); - - if (likely(!compound)) - nr_pages = 1; - else - nr_pages = folio_nr_pages(folio); - - folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); + __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** - * page_remove_rmap - take down pte mapping from a page - * @page: page to remove mapping from - * @vma: the vm area from which the mapping is removed - * @compound: uncharge the page as compound or small page + * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added * - * The caller needs to hold the pte lock. + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. */ -void page_remove_rmap(struct page *page, struct vm_area_struct *vma, - bool compound) +void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif +} + +static __always_inline void __folio_remove_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level) { - struct folio *folio = page_folio(page); atomic_t *mapped = &folio->_nr_pages_mapped; - int nr = 0, nr_pmdmapped = 0; - bool last; + pg_data_t *pgdat = folio_pgdat(folio); + int last, nr = 0, nr_pmdmapped = 0; enum node_stat_item idx; - VM_BUG_ON_PAGE(compound && !PageHead(page), page); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); - /* Hugetlb pages are not counted in NR_*MAPPED */ - if (unlikely(folio_test_hugetlb(folio))) { - /* hugetlb pages are always mapped with pmds */ - atomic_dec(&folio->_entire_mapcount); - return; - } - - /* Is page being unmapped by PTE? Is this its last map to be removed? */ - if (likely(!compound)) { - last = atomic_add_negative(-1, &page->_mapcount); - nr = last; - if (last && folio_test_large(folio)) { - nr = atomic_dec_return_relaxed(mapped); - nr = (nr < COMPOUND_MAPPED); - } - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ + switch (level) { + case RMAP_LEVEL_PTE: + do { + last = atomic_add_negative(-1, &page->_mapcount); + if (last && folio_test_large(folio)) { + last = atomic_dec_return_relaxed(mapped); + last = (last < ENTIRELY_MAPPED); + } + if (last) + nr++; + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { - nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED)) { + nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { - /* An add of COMPOUND_MAPPED raced ahead */ + /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } } + break; } if (nr_pmdmapped) { + /* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */ if (folio_test_anon(folio)) - idx = NR_ANON_THPS; - else if (folio_test_swapbacked(folio)) - idx = NR_SHMEM_PMDMAPPED; + __lruvec_stat_mod_folio(folio, NR_ANON_THPS, -nr_pmdmapped); else - idx = NR_FILE_PMDMAPPED; - __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); + __mod_node_page_state(pgdat, + folio_test_swapbacked(folio) ? + NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, + -nr_pmdmapped); } if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; __lruvec_stat_mod_folio(folio, idx, -nr); /* - * Queue anon THP for deferred split if at least one + * Queue anon large folio for deferred split if at least one * page of the folio is unmapped and at least one page * is still mapped. */ - if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) - if (!compound || nr < nr_pmdmapped) + if (folio_test_large(folio) && folio_test_anon(folio)) + if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped) deferred_split_folio(folio); } /* * It would be tidy to reset folio_test_anon mapping when fully - * unmapped, but that might overwrite a racing page_add_anon_rmap + * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. */ - munlock_vma_folio(folio, vma, compound); + munlock_vma_folio(folio, vma); +} + +/** + * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio + * @folio: The folio to remove the mappings from + * @page: The first page to remove + * @nr_pages: The number of pages that will be removed from the mapping + * @vma: The vm area from which the mappings are removed + * + * The page range of the folio is defined by [page, page + nr_pages) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) +{ + __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); +} + +/** + * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif } /* @@ -1484,7 +1600,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, /* * When racing against e.g. zap_pte_range() on another cpu, - * in between its ptep_get_and_clear_full() and page_remove_rmap(), + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ @@ -1528,7 +1644,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ - mlock_vma_folio(folio, vma, false); + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); ret = false; break; @@ -1721,9 +1838,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, break; } - /* See page_try_share_anon_rmap(): clear PTE first. */ + /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && - page_try_share_anon_rmap(subpage)) { + folio_try_share_anon_rmap_pte(folio, subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; @@ -1761,7 +1878,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -1829,7 +1949,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* * When racing against e.g. zap_pte_range() on another cpu, - * in between its ptep_get_and_clear_full() and page_remove_rmap(), + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ @@ -1994,7 +2114,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, pte_t swp_pte; if (anon_exclusive) - BUG_ON(page_try_share_anon_rmap(subpage)); + WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, + subpage)); /* * Store the pfn of the page in a special migration @@ -2065,14 +2186,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); - /* See page_try_share_anon_rmap(): clear PTE first. */ - if (anon_exclusive && - page_try_share_anon_rmap(subpage)) { - if (folio_test_hugetlb(folio)) + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ + if (folio_test_hugetlb(folio)) { + if (anon_exclusive && + hugetlb_try_share_anon_rmap(folio)) { set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); - else - set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } + } else if (anon_exclusive && + folio_try_share_anon_rmap_pte(folio, subpage)) { + set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; @@ -2114,7 +2240,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ } - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -2253,7 +2382,7 @@ static bool page_make_device_exclusive_one(struct folio *folio, * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ - page_remove_rmap(subpage, vma, false); + folio_remove_rmap_pte(folio, subpage, vma); } mmu_notifier_invalidate_range_end(&range); @@ -2537,34 +2666,30 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. - * - * RMAP_COMPOUND is ignored. */ -void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address, rmap_t flags) +void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, rmap_t flags) { - struct folio *folio = page_folio(page); - struct anon_vma *anon_vma = vma->anon_vma; - int first; - - BUG_ON(!folio_test_locked(folio)); - BUG_ON(!anon_vma); - /* address might be in next vma when migration races vma_merge */ - first = atomic_inc_and_test(&folio->_entire_mapcount); - VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); - VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); - if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + + atomic_inc(&folio->_entire_mapcount); + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(&folio->page); + VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(&folio->page), folio); } -void hugepage_add_new_anon_rmap(struct folio *folio, - struct vm_area_struct *vma, unsigned long address) +void hugetlb_add_new_anon_rmap(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ diff --git a/mm/shmem.c b/mm/shmem.c index db7dd45c918158e8302e3bb8b44c6a6cf64e084e..fbf6ff38a0a3c658f70e347e7968e069d7392d53 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -130,6 +130,14 @@ struct shmem_options { #define SHMEM_SEEN_QUOTA 32 }; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static unsigned long huge_shmem_orders_always __read_mostly; +static unsigned long huge_shmem_orders_madvise __read_mostly; +static unsigned long huge_shmem_orders_inherit __read_mostly; +static unsigned long huge_shmem_orders_within_size __read_mostly; +static bool shmem_orders_configured __initdata; +#endif + #ifdef CONFIG_TMPFS static unsigned long shmem_default_max_blocks(void) { @@ -146,9 +154,8 @@ static unsigned long shmem_default_max_inodes(void) #endif static int shmem_swapin_folio(struct inode *inode, pgoff_t index, - struct folio **foliop, enum sgp_type sgp, - gfp_t gfp, struct vm_area_struct *vma, - vm_fault_t *fault_type); + struct folio **foliop, enum sgp_type sgp, gfp_t gfp, + struct vm_area_struct *vma, vm_fault_t *fault_type); static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { @@ -189,10 +196,10 @@ static inline int shmem_reacct_size(unsigned long flags, /* * ... whereas tmpfs objects are accounted incrementally as * pages are allocated, in order to allow large sparse files. - * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, + * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */ -static inline int shmem_acct_block(unsigned long flags, long pages) +static inline int shmem_acct_blocks(unsigned long flags, long pages) { if (!(flags & VM_NORESERVE)) return 0; @@ -207,26 +214,26 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); } -static int shmem_inode_acct_block(struct inode *inode, long pages) +static int shmem_inode_acct_blocks(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); int err = -ENOSPC; - if (shmem_acct_block(info->flags, pages)) + if (shmem_acct_blocks(info->flags, pages)) return err; might_sleep(); /* when quotas */ if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, - sbinfo->max_blocks - pages) > 0) + if (!percpu_counter_limited_add(&sbinfo->used_blocks, + sbinfo->max_blocks, pages)) goto unacct; err = dquot_alloc_block_nodirty(inode, pages); - if (err) + if (err) { + percpu_counter_sub(&sbinfo->used_blocks, pages); goto unacct; - - percpu_counter_add(&sbinfo->used_blocks, pages); + } } else { err = dquot_alloc_block_nodirty(inode, pages); if (err) @@ -447,7 +454,7 @@ bool shmem_charge(struct inode *inode, long pages) { struct address_space *mapping = inode->i_mapping; - if (shmem_inode_acct_block(inode, pages)) + if (shmem_inode_acct_blocks(inode, pages)) return false; /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ @@ -535,10 +542,11 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_is_huge(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, - unsigned long vm_flags) +static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, + unsigned long vm_flags) { + struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; if (!S_ISREG(inode->i_mode)) @@ -568,34 +576,50 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index, } } -bool shmem_is_huge(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); + return __shmem_huge_global_enabled(inode, index, shmem_huge_force, + vma, vm_flags); } -#if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { + int huge; + + if (!str) + return -EINVAL; + if (!strcmp(str, "never")) - return SHMEM_HUGE_NEVER; - if (!strcmp(str, "always")) - return SHMEM_HUGE_ALWAYS; - if (!strcmp(str, "within_size")) - return SHMEM_HUGE_WITHIN_SIZE; - if (!strcmp(str, "advise")) - return SHMEM_HUGE_ADVISE; - if (!strcmp(str, "deny")) - return SHMEM_HUGE_DENY; - if (!strcmp(str, "force")) - return SHMEM_HUGE_FORCE; - return -EINVAL; + huge = SHMEM_HUGE_NEVER; + else if (!strcmp(str, "always")) + huge = SHMEM_HUGE_ALWAYS; + else if (!strcmp(str, "within_size")) + huge = SHMEM_HUGE_WITHIN_SIZE; + else if (!strcmp(str, "advise")) + huge = SHMEM_HUGE_ADVISE; + else if (!strcmp(str, "deny")) + huge = SHMEM_HUGE_DENY; + else if (!strcmp(str, "force")) + huge = SHMEM_HUGE_FORCE; + else + return -EINVAL; + + if (!has_transparent_hugepage() && + huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) + return -EINVAL; + + /* Do not override huge allocation policy with non-PMD sized mTHP */ + if (huge == SHMEM_HUGE_FORCE && + huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) + return -EINVAL; + + return huge; } -#endif #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) static const char *shmem_format_huge(int huge) @@ -758,39 +782,34 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, { return 0; } + +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return false; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * Like filemap_add_folio, but error if expected item has gone. + * Somewhat like filemap_add_folio, but error if expected item has gone. */ static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, - pgoff_t index, void *expected, gfp_t gfp, - struct mm_struct *charge_mm) + pgoff_t index, void *expected, gfp_t gfp) { XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); long nr = folio_nr_pages(folio); - int error; VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); - VM_BUG_ON(expected && folio_test_large(folio)); folio_ref_add(folio, nr); folio->mapping = mapping; folio->index = index; - if (!folio_test_swapcache(folio)) { - error = mem_cgroup_charge(folio, charge_mm, gfp); - if (error) { - if (folio_test_pmd_mappable(folio)) { - count_vm_event(THP_FILE_FALLBACK); - count_vm_event(THP_FILE_FALLBACK_CHARGE); - } - goto error; - } - } + gfp &= GFP_RECLAIM_MASK; folio_throttle_swaprate(folio, gfp); do { @@ -806,31 +825,26 @@ static int shmem_add_to_page_cache(struct folio *folio, xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; - if (folio_test_pmd_mappable(folio)) { - count_vm_event(THP_FILE_ALLOC); + if (folio_test_pmd_mappable(folio)) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); - } - mapping->nrpages += nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); + mapping->nrpages += nr; unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); if (xas_error(&xas)) { - error = xas_error(&xas); - goto error; + folio->mapping = NULL; + folio_ref_sub(folio, nr); + return xas_error(&xas); } return 0; -error: - folio->mapping = NULL; - folio_ref_sub(folio, nr); - return error; } /* - * Like delete_from_page_cache, but substitutes swap for @folio. + * Somewhat like filemap_remove_folio, but substitutes swap for @folio. */ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) { @@ -845,23 +859,27 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); xa_unlock_irq(&mapping->i_pages); - folio_put(folio); + folio_put_refs(folio, nr); BUG_ON(error); } /* - * Remove swap entry from page cache, free the swap and its page cache. + * Remove swap entry from page cache, free the swap and its page cache. Returns + * the number of pages being freed. 0 means entry not found in XArray (0 pages + * being freed). */ -static int shmem_free_swap(struct address_space *mapping, - pgoff_t index, void *radswap) +static long shmem_free_swap(struct address_space *mapping, + pgoff_t index, void *radswap) { + int order = xa_get_order(&mapping->i_pages, index); void *old; old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); if (old != radswap) - return -ENOENT; - free_swap_and_cache(radix_to_swp_entry(radswap)); - return 0; + return 0; + free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order); + + return 1 << order; } /* @@ -884,7 +902,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) - swapped++; + swapped += 1 << xa_get_order(xas.xa, xas.xa_index); if (xas.xa_index == max) break; if (need_resched()) { @@ -892,7 +910,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, cond_resched_rcu(); } } - rcu_read_unlock(); return swapped << PAGE_SHIFT; @@ -1014,7 +1031,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (xa_is_value(folio)) { if (unfalloc) continue; - nr_swaps_freed += !shmem_free_swap(mapping, + nr_swaps_freed += shmem_free_swap(mapping, indices[i], folio); continue; } @@ -1081,14 +1098,17 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, folio = fbatch.folios[i]; if (xa_is_value(folio)) { + long swaps_freed; + if (unfalloc) continue; - if (shmem_free_swap(mapping, indices[i], folio)) { + swaps_freed = shmem_free_swap(mapping, indices[i], folio); + if (!swaps_freed) { /* Swap was replaced by page: retry */ index = indices[i]; break; } - nr_swaps_freed++; + nr_swaps_freed += swaps_freed; continue; } @@ -1160,7 +1180,7 @@ static int shmem_getattr(struct mnt_idmap *idmap, STATX_ATTR_NODUMP); generic_fillattr(idmap, request_mask, inode, stat); - if (shmem_is_huge(inode, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, false, NULL, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -1235,7 +1255,6 @@ static int shmem_setattr(struct mnt_idmap *idmap, if (i_uid_needs_update(idmap, attr, inode) || i_gid_needs_update(idmap, attr, inode)) { error = dquot_transfer(idmap, inode, attr); - if (error) return error; } @@ -1348,10 +1367,8 @@ static int shmem_unuse_swap_entries(struct inode *inode, if (!xa_is_value(folio)) continue; - error = shmem_swapin_folio(inode, indices[i], - &folio, SGP_CACHE, - mapping_gfp_mask(mapping), - NULL, NULL); + error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, + mapping_gfp_mask(mapping), NULL, NULL); if (error == 0) { folio_unlock(folio); folio_put(folio); @@ -1450,6 +1467,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); swp_entry_t swap; pgoff_t index; + int nr_pages; + bool split = false; /* * Our capabilities prevent regular writeback or sync from ever calling @@ -1468,20 +1487,33 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) goto redirty; /* - * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or - * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, - * and its shmem_writeback() needs them to be split when swapping. + * If CONFIG_THP_SWAP is not enabled, the large folio should be + * split when swapping. + * + * And shrinkage of pages beyond i_size does not split swap, so + * swapout of a large folio crossing i_size needs to split too + * (unless fallocate has been used to preallocate beyond EOF). */ if (folio_test_large(folio)) { + index = shmem_fallocend(inode, + DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)); + if ((index > folio->index && index < folio_next_index(folio)) || + !IS_ENABLED(CONFIG_THP_SWAP)) + split = true; + } + + if (split) { +try_split: /* Ensure the subpages are still dirty */ folio_test_set_dirty(folio); - if (split_huge_page(page) < 0) + if (split_huge_page_to_list(page, wbc->list)) goto redirty; folio = page_folio(page); folio_clear_dirty(folio); } index = folio->index; + nr_pages = folio_nr_pages(folio); /* * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC @@ -1516,8 +1548,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) } swap = folio_alloc_swap(folio); - if (!swap.val) + if (!swap.val) { + if (nr_pages > 1) + goto try_split; + goto redirty; + } /* * Add inode to shmem_unuse()'s list of swapped-out inodes, @@ -1534,8 +1570,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) if (add_to_swap_cache(folio, swap, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, NULL) == 0) { - shmem_recalc_inode(inode, 0, 1); - swap_shmem_alloc(swap); + shmem_recalc_inode(inode, 0, nr_pages); + swap_shmem_alloc(swap, nr_pages); shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); mutex_unlock(&shmem_swaplist_mutex); @@ -1649,70 +1685,246 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) return result; } -static struct folio *shmem_alloc_hugefolio(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool shmem_hpage_pmd_enabled(void) { - struct vm_area_struct pvma; - struct address_space *mapping = info->vfs_inode.i_mapping; - pgoff_t hindex; - struct folio *folio; + if (shmem_huge == SHMEM_HUGE_DENY) + return false; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) && + shmem_huge != SHMEM_HUGE_NEVER) + return true; - hindex = round_down(index, HPAGE_PMD_NR); - if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, - XA_PRESENT)) - return NULL; + return false; +} - shmem_pseudo_vma_init(&pvma, info, hindex); - folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); - shmem_pseudo_vma_destroy(&pvma); - if (!folio) - count_vm_event(THP_FILE_FALLBACK); - return folio; +unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool shmem_huge_force) +{ + unsigned long mask = READ_ONCE(huge_shmem_orders_always); + unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); + unsigned long vm_flags = vma ? vma->vm_flags : 0; + bool global_huge; + loff_t i_size; + int order; + + if (vma && ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) + return 0; + + /* If the hardware/firmware marked hugepage support disabled. */ + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) + return 0; + + global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force, + vma, vm_flags); + if (!vma || !vma_is_anon_shmem(vma)) { + /* + * For tmpfs, we now only support PMD sized THP if huge page + * is enabled, otherwise fallback to order 0. + */ + return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; + } + + /* + * Following the 'deny' semantics of the top level, force the huge + * option off from all mounts. + */ + if (shmem_huge == SHMEM_HUGE_DENY) + return 0; + + /* + * Only allow inherit orders if the top-level value is 'force', which + * means non-PMD sized THP can not override 'huge' mount option now. + */ + if (shmem_huge == SHMEM_HUGE_FORCE) + return READ_ONCE(huge_shmem_orders_inherit); + + /* Allow mTHP that will be fully within i_size. */ + order = highest_order(within_size_orders); + while (within_size_orders) { + index = round_up(index + 1, order); + i_size = round_up(i_size_read(inode), PAGE_SIZE); + if (i_size >> PAGE_SHIFT >= index) { + mask |= within_size_orders; + break; + } + + order = next_order(&within_size_orders, order); + } + + if (vm_flags & VM_HUGEPAGE) + mask |= READ_ONCE(huge_shmem_orders_madvise); + + if (global_huge) + mask |= READ_ONCE(huge_shmem_orders_inherit); + + return THP_ORDERS_ALL_FILE_DEFAULT & mask; } -static struct folio *shmem_alloc_folio(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) +static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, + struct address_space *mapping, pgoff_t index, + unsigned long orders) +{ + struct vm_area_struct *vma = vmf ? vmf->vma : NULL; + pgoff_t aligned_index; + unsigned long pages; + int order; + + if (vma) { + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + if (!orders) + return 0; + } + + /* Find the highest order that can add into the page cache */ + order = highest_order(orders); + while (orders) { + pages = 1UL << order; + aligned_index = round_down(index, pages); + /* + * Check for conflict before waiting on a huge allocation. + * Conflict might be that a huge page has just been allocated + * and added to page cache by a racing thread, or that there + * is already at least one small page in the huge extent. + * Be careful to retry when appropriate, but not forever! + * Elsewhere -EEXIST would be the right code, but not here. + */ + if (!xa_find(&mapping->i_pages, &aligned_index, + aligned_index + pages - 1, XA_PRESENT)) + break; + order = next_order(&orders, order); + } + + return orders; +} +#else +static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, + struct address_space *mapping, pgoff_t index, + unsigned long orders) +{ + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static struct folio *shmem_alloc_folio(gfp_t gfp, int order, + struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct folio *folio; shmem_pseudo_vma_init(&pvma, info, index); - folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); + folio = vma_alloc_folio(gfp, order, &pvma, 0, order > 0); shmem_pseudo_vma_destroy(&pvma); return folio; } -static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, - pgoff_t index, bool huge) +static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, + gfp_t gfp, struct inode *inode, pgoff_t index, + struct mm_struct *fault_mm, unsigned long orders) { + struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - struct folio *folio; - int nr; - int err; + unsigned long suitable_orders = 0; + struct folio *folio = NULL; + long pages; + int error, order; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) - huge = false; - nr = huge ? HPAGE_PMD_NR : 1; + orders = 0; - err = shmem_inode_acct_block(inode, nr); - if (err) - goto failed; + if (orders > 0) { + suitable_orders = shmem_suitable_orders(inode, vmf, + mapping, index, orders); - if (huge) - folio = shmem_alloc_hugefolio(gfp, info, index); - else - folio = shmem_alloc_folio(gfp, info, index); - if (folio) { - __folio_set_locked(folio); - __folio_set_swapbacked(folio); - return folio; + order = highest_order(suitable_orders); + while (suitable_orders) { + pages = 1UL << order; + index = round_down(index, pages); + folio = shmem_alloc_folio(gfp, order, info, index); + if (folio) + goto allocated; + + if (pages == HPAGE_PMD_NR) + count_vm_event(THP_FILE_FALLBACK); + count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); + order = next_order(&suitable_orders, order); + } + } else { + pages = 1; + folio = shmem_alloc_folio(gfp, 0, info, index); } + if (!folio) + return ERR_PTR(-ENOMEM); - err = -ENOMEM; - shmem_inode_unacct_blocks(inode, nr); -failed: - return ERR_PTR(err); +allocated: + __folio_set_locked(folio); + __folio_set_swapbacked(folio); + + gfp &= GFP_RECLAIM_MASK; + error = mem_cgroup_charge(folio, fault_mm, gfp); + if (error) { + if (xa_find(&mapping->i_pages, &index, + index + pages - 1, XA_PRESENT)) { + error = -EEXIST; + } else if (pages > 1) { + if (pages == HPAGE_PMD_NR) { + count_vm_event(THP_FILE_FALLBACK); + count_vm_event(THP_FILE_FALLBACK_CHARGE); + } + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); + } + goto unlock; + } + + error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); + if (error) + goto unlock; + + error = shmem_inode_acct_blocks(inode, pages); + if (error) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + long freed; + /* + * Try to reclaim some space by splitting a few + * large folios beyond i_size on the filesystem. + */ + shmem_unused_huge_shrink(sbinfo, NULL, 2); + /* + * And do a shmem_recalc_inode() to account for freed pages: + * except our folio is there in cache, so not quite balanced. + */ + spin_lock(&info->lock); + freed = pages + info->alloced - info->swapped - + READ_ONCE(mapping->nrpages); + if (freed > 0) + info->alloced -= freed; + spin_unlock(&info->lock); + if (freed > 0) + shmem_inode_unacct_blocks(inode, freed); + error = shmem_inode_acct_blocks(inode, pages); + if (error) { + filemap_remove_folio(folio); + goto unlock; + } + } + + shmem_recalc_inode(inode, pages, 0); + folio_add_lru(folio); + return folio; + +unlock: + folio_unlock(folio); + folio_put(folio); + return ERR_PTR(error); } /* @@ -1733,30 +1945,35 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) } static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, + struct vm_area_struct *vma) { - struct folio *old, *new; - struct address_space *swap_mapping; - swp_entry_t entry; - pgoff_t swap_index; - int error; - - old = *foliop; - entry = old->swap; - swap_index = swp_offset(entry); - swap_mapping = swap_address_space(entry); + struct folio *new, *old = *foliop; + swp_entry_t entry = old->swap; + struct address_space *swap_mapping = swap_address_space(entry); + pgoff_t swap_index = swp_offset(entry); + XA_STATE(xas, &swap_mapping->i_pages, swap_index); + int nr_pages = folio_nr_pages(old); + int error = 0, i; /* * We have arrived here because our zones are constrained, so don't * limit chance of success by further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; - VM_BUG_ON_FOLIO(folio_test_large(old), old); - new = shmem_alloc_folio(gfp, info, index); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (nr_pages > 1) { + gfp_t huge_gfp = vma_thp_gfp_mask(vma); + + gfp = limit_gfp_mask(huge_gfp, gfp); + } +#endif + + new = shmem_alloc_folio(gfp, folio_order(old), info, index); if (!new) return -ENOMEM; - folio_get(new); + folio_ref_add(new, nr_pages); folio_copy(new, old); flush_dcache_folio(new); @@ -1766,18 +1983,25 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, new->swap = entry; folio_set_swapcache(new); - /* - * Our caller will very soon move newpage out of swapcache, but it's - * a nice clean interface for us to replace oldpage by newpage there. - */ + /* Swap cache still stores N entries instead of a high-order entry */ xa_lock_irq(&swap_mapping->i_pages); - error = shmem_replace_entry(swap_mapping, swap_index, old, new); + for (i = 0; i < nr_pages; i++) { + void *item = xas_load(&xas); + + if (item != old) { + error = -ENOENT; + break; + } + + xas_store(&xas, new); + xas_next(&xas); + } if (!error) { mem_cgroup_migrate(old, new); - __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); - __lruvec_stat_mod_folio(new, NR_SHMEM, 1); - __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); - __lruvec_stat_mod_folio(old, NR_SHMEM, -1); + __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages); + __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages); + __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages); + __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages); } xa_unlock_irq(&swap_mapping->i_pages); @@ -1797,7 +2021,12 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, old->private = NULL; folio_unlock(old); - folio_put_refs(old, 2); + /* + * The old folio are removed from swap cache, drop the 'nr_pages' + * reference, as well as one temporary reference getting from swap + * cache. + */ + folio_put_refs(old, nr_pages + 1); return error; } @@ -1807,6 +2036,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; swp_entry_t swapin_error; void *old; + int nr_pages; swapin_error = make_poisoned_swp_entry(); old = xa_cmpxchg_irq(&mapping->i_pages, index, @@ -1815,6 +2045,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, if (old != swp_to_radix_entry(swap)) return; + nr_pages = folio_nr_pages(folio); folio_wait_writeback(folio); delete_from_swap_cache(folio); /* @@ -1822,8 +2053,86 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) * in shmem_evict_inode(). */ - shmem_recalc_inode(inode, -1, -1); - swap_free(swap); + shmem_recalc_inode(inode, -nr_pages, -nr_pages); + swap_free_nr(swap, nr_pages); +} + +static int shmem_split_large_entry(struct inode *inode, pgoff_t index, + swp_entry_t swap, gfp_t gfp) +{ + struct address_space *mapping = inode->i_mapping; + XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); + void *alloced_shadow = NULL; + int alloced_order = 0, i; + + /* Convert user data gfp flags to xarray node gfp flags */ + gfp &= GFP_RECLAIM_MASK; + + for (;;) { + int order = -1, split_order = 0; + void *old = NULL; + + xas_lock_irq(&xas); + old = xas_load(&xas); + if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) { + xas_set_err(&xas, -EEXIST); + goto unlock; + } + + order = xas_get_order(&xas); + + /* Swap entry may have changed before we re-acquire the lock */ + if (alloced_order && + (old != alloced_shadow || order != alloced_order)) { + xas_destroy(&xas); + alloced_order = 0; + } + + /* Try to split large swap entry in pagecache */ + if (order > 0) { + if (!alloced_order) { + split_order = order; + goto unlock; + } + xas_split(&xas, old, order); + + /* + * Re-set the swap entry after splitting, and the swap + * offset of the original large entry must be continuous. + */ + for (i = 0; i < 1 << order; i++) { + pgoff_t aligned_index = round_down(index, 1 << order); + swp_entry_t tmp; + + tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); + __xa_store(&mapping->i_pages, aligned_index + i, + swp_to_radix_entry(tmp), 0); + } + } + +unlock: + xas_unlock_irq(&xas); + + /* split needed, alloc here and retry. */ + if (split_order) { + xas_split_alloc(&xas, old, split_order, gfp); + if (xas_error(&xas)) + goto error; + alloced_shadow = old; + alloced_order = split_order; + xas_reset(&xas); + continue; + } + + if (!xas_nomem(&xas, gfp)) + break; + } + +error: + if (xas_error(&xas)) + return xas_error(&xas); + + return alloced_order; } /* @@ -1838,12 +2147,13 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; + struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL; struct shmem_inode_info *info = SHMEM_I(inode); - struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; struct swap_info_struct *si; struct folio *folio = NULL; swp_entry_t swap; - int error; + int error, nr_pages; + u64 start; VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); swap = radix_to_swp_entry(*foliop); @@ -1863,14 +2173,41 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, /* Look it up and read it in.. */ folio = swap_cache_get_folio(swap, NULL, 0); if (!folio) { + int split_order; + /* Or update major stats only when swapin succeeds?? */ if (fault_type) { *fault_type |= VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); - count_memcg_event_mm(charge_mm, PGMAJFAULT); + count_memcg_event_mm(fault_mm, PGMAJFAULT); + } + + /* + * Now swap device can only swap in order 0 folio, then we + * should split the large swap entry stored in the pagecache + * if necessary. + */ + split_order = shmem_split_large_entry(inode, index, swap, gfp); + if (split_order < 0) { + error = split_order; + goto failed; + } + + /* + * If the large swap entry has already been split, it is + * necessary to recalculate the new swap entry based on + * the old order alignment. + */ + if (split_order > 0) { + pgoff_t offset = index - round_down(index, 1 << split_order); + + swap = swp_entry(swp_type(swap), swp_offset(swap) + offset); } + /* Here we actually start the io */ + memcg_lat_stat_start(&start); folio = shmem_swapin(swap, gfp, info, index); + memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); if (!folio) { error = -ENOMEM; goto failed; @@ -1890,6 +2227,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, goto failed; } folio_wait_writeback(folio); + nr_pages = folio_nr_pages(folio); /* * Some architectures may have to restore extra metadata to the @@ -1898,25 +2236,25 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, arch_swap_restore(swap, folio); if (shmem_should_replace_folio(folio, gfp)) { - error = shmem_replace_folio(&folio, gfp, info, index); + error = shmem_replace_folio(&folio, gfp, info, index, vma); if (error) goto failed; } - error = shmem_add_to_page_cache(folio, mapping, index, - swp_to_radix_entry(swap), gfp, - charge_mm); + error = shmem_add_to_page_cache(folio, mapping, + round_down(index, nr_pages), + swp_to_radix_entry(swap), gfp); if (error) goto failed; - shmem_recalc_inode(inode, 0, -1); + shmem_recalc_inode(inode, 0, -nr_pages); if (sgp == SGP_WRITE) folio_mark_accessed(folio); delete_from_swap_cache(folio); folio_mark_dirty(folio); - swap_free(swap); + swap_free_nr(swap, nr_pages); put_swap_device(si); *foliop = folio; @@ -1943,37 +2281,30 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache. * - * vma, vmf, and fault_type are only supplied by shmem_fault: - * otherwise they are NULL. + * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. */ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, - struct vm_area_struct *vma, struct vm_fault *vmf, - vm_fault_t *fault_type) + struct vm_fault *vmf, vm_fault_t *fault_type) { - struct address_space *mapping = inode->i_mapping; - struct shmem_inode_info *info = SHMEM_I(inode); - struct shmem_sb_info *sbinfo; - struct mm_struct *charge_mm; + struct vm_area_struct *vma = vmf ? vmf->vma : NULL; + struct mm_struct *fault_mm; struct folio *folio; - pgoff_t hindex; - gfp_t huge_gfp; int error; - int once = 0; - int alloced = 0; + bool alloced; + unsigned long orders = 0; if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; repeat: if (sgp <= SGP_CACHE && - ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { + ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) return -EINVAL; - } - sbinfo = SHMEM_SB(inode->i_sb); - charge_mm = vma ? vma->vm_mm : NULL; + alloced = false; + fault_mm = vma ? vma->vm_mm : NULL; - folio = filemap_get_entry(mapping, index); + folio = filemap_get_entry(inode->i_mapping, index); if (folio && vma && userfaultfd_minor(vma)) { if (!xa_is_value(folio)) folio_put(folio); @@ -1983,7 +2314,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (xa_is_value(folio)) { error = shmem_swapin_folio(inode, index, &folio, - sgp, gfp, vma, fault_type); + sgp, gfp, vma, fault_type); if (error == -EEXIST) goto repeat; @@ -1995,7 +2326,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, folio_lock(folio); /* Has the folio been truncated or swapped out? */ - if (unlikely(folio->mapping != mapping)) { + if (unlikely(folio->mapping != inode->i_mapping)) { folio_unlock(folio); folio_put(folio); goto repeat; @@ -2030,58 +2361,41 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, return 0; } - if (!shmem_is_huge(inode, index, false, - vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) - goto alloc_nohuge; - - huge_gfp = vma_thp_gfp_mask(vma); - huge_gfp = limit_gfp_mask(huge_gfp, gfp); - folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); - if (IS_ERR(folio)) { -alloc_nohuge: - folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); + /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */ + orders = shmem_allowable_huge_orders(inode, vma, index, false); + if (orders > 0) { + gfp_t huge_gfp; + + huge_gfp = vma_thp_gfp_mask(vma); + huge_gfp = limit_gfp_mask(huge_gfp, gfp); + folio = shmem_alloc_and_add_folio(vmf, huge_gfp, + inode, index, fault_mm, orders); + if (!IS_ERR(folio)) { + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_FILE_ALLOC); + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); + goto alloced; + } + if (PTR_ERR(folio) == -EEXIST) + goto repeat; } - if (IS_ERR(folio)) { - int retry = 5; + folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0); + if (IS_ERR(folio)) { error = PTR_ERR(folio); + if (error == -EEXIST) + goto repeat; folio = NULL; - if (error != -ENOSPC) - goto unlock; - /* - * Try to reclaim some space by splitting a large folio - * beyond i_size on the filesystem. - */ - while (retry--) { - int ret; - - ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); - if (ret == SHRINK_STOP) - break; - if (ret) - goto alloc_nohuge; - } goto unlock; } - hindex = round_down(index, folio_nr_pages(folio)); - - if (sgp == SGP_WRITE) - __folio_set_referenced(folio); - - error = shmem_add_to_page_cache(folio, mapping, hindex, - NULL, gfp & GFP_RECLAIM_MASK, - charge_mm); - if (error) - goto unacct; - - folio_add_lru(folio); - shmem_recalc_inode(inode, folio_nr_pages(folio), 0); +alloced: alloced = true; - - if (folio_test_pmd_mappable(folio) && + if (folio_test_large(folio) && DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < folio_next_index(folio) - 1) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + struct shmem_inode_info *info = SHMEM_I(inode); /* * Part of the large folio is beyond i_size: subject * to shrink under memory pressure. @@ -2099,6 +2413,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, spin_unlock(&sbinfo->shrinklist_lock); } + if (sgp == SGP_WRITE) + folio_set_referenced(folio); /* * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. */ @@ -2122,11 +2438,6 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, /* Perhaps the file has been truncated since we checked */ if (sgp <= SGP_CACHE && ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { - if (alloced) { - folio_clear_dirty(folio); - filemap_remove_folio(folio); - shmem_recalc_inode(inode, 0, 0); - } error = -EINVAL; goto unlock; } @@ -2137,25 +2448,14 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, /* * Error recovery. */ -unacct: - shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); - - if (folio_test_large(folio)) { - folio_unlock(folio); - folio_put(folio); - goto alloc_nohuge; - } unlock: + if (alloced) + filemap_remove_folio(folio); + shmem_recalc_inode(inode, 0, 0); if (folio) { folio_unlock(folio); folio_put(folio); } - if (error == -ENOSPC && !once++) { - shmem_recalc_inode(inode, 0, 0); - goto repeat; - } - if (error == -EEXIST) - goto repeat; return error; } @@ -2163,7 +2463,7 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp) { return shmem_get_folio_gfp(inode, index, foliop, sgp, - mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); + mapping_gfp_mask(inode->i_mapping), NULL, NULL); } /* @@ -2171,87 +2471,99 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, * entry unconditionally - even if something else had already woken the * target. */ -static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) +static int synchronous_wake_function(wait_queue_entry_t *wait, + unsigned int mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); list_del_init(&wait->entry); return ret; } +/* + * Trinity finds that probing a hole which tmpfs is punching can + * prevent the hole-punch from ever completing: which in turn + * locks writers out with its hold on i_rwsem. So refrain from + * faulting pages into the hole while it's being punched. Although + * shmem_undo_range() does remove the additions, it may be unable to + * keep up, as each new page needs its own unmap_mapping_range() call, + * and the i_mmap tree grows ever slower to scan if new vmas are added. + * + * It does not matter if we sometimes reach this check just before the + * hole-punch begins, so that one fault then races with the punch: + * we just need to make racing faults a rare case. + * + * The implementation below would be much simpler if we just used a + * standard mutex or completion: but we cannot take i_rwsem in fault, + * and bloating every shmem inode for this unlikely case would be sad. + */ +static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) +{ + struct shmem_falloc *shmem_falloc; + struct file *fpin = NULL; + vm_fault_t ret = 0; + + spin_lock(&inode->i_lock); + shmem_falloc = inode->i_private; + if (shmem_falloc && + shmem_falloc->waitq && + vmf->pgoff >= shmem_falloc->start && + vmf->pgoff < shmem_falloc->next) { + wait_queue_head_t *shmem_falloc_waitq; + DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); + + ret = VM_FAULT_NOPAGE; + fpin = maybe_unlock_mmap_for_io(vmf, NULL); + shmem_falloc_waitq = shmem_falloc->waitq; + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&inode->i_lock); + schedule(); + + /* + * shmem_falloc_waitq points into the shmem_fallocate() + * stack of the hole-punching task: shmem_falloc_waitq + * is usually invalid by the time we reach here, but + * finish_wait() does not dereference it in that case; + * though i_lock needed lest racing with wake_up_all(). + */ + spin_lock(&inode->i_lock); + finish_wait(shmem_falloc_waitq, &shmem_fault_wait); + } + spin_unlock(&inode->i_lock); + if (fpin) { + fput(fpin); + ret = VM_FAULT_RETRY; + } + return ret; +} + static vm_fault_t shmem_fault(struct vm_fault *vmf) { - struct vm_area_struct *vma = vmf->vma; - struct inode *inode = file_inode(vma->vm_file); + struct inode *inode = file_inode(vmf->vma->vm_file); gfp_t gfp = mapping_gfp_mask(inode->i_mapping); struct folio *folio = NULL; + vm_fault_t ret = 0; int err; - vm_fault_t ret = VM_FAULT_LOCKED; /* * Trinity finds that probing a hole which tmpfs is punching can - * prevent the hole-punch from ever completing: which in turn - * locks writers out with its hold on i_rwsem. So refrain from - * faulting pages into the hole while it's being punched. Although - * shmem_undo_range() does remove the additions, it may be unable to - * keep up, as each new page needs its own unmap_mapping_range() call, - * and the i_mmap tree grows ever slower to scan if new vmas are added. - * - * It does not matter if we sometimes reach this check just before the - * hole-punch begins, so that one fault then races with the punch: - * we just need to make racing faults a rare case. - * - * The implementation below would be much simpler if we just used a - * standard mutex or completion: but we cannot take i_rwsem in fault, - * and bloating every shmem inode for this unlikely case would be sad. + * prevent the hole-punch from ever completing: noted in i_private. */ if (unlikely(inode->i_private)) { - struct shmem_falloc *shmem_falloc; - - spin_lock(&inode->i_lock); - shmem_falloc = inode->i_private; - if (shmem_falloc && - shmem_falloc->waitq && - vmf->pgoff >= shmem_falloc->start && - vmf->pgoff < shmem_falloc->next) { - struct file *fpin; - wait_queue_head_t *shmem_falloc_waitq; - DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); - - ret = VM_FAULT_NOPAGE; - fpin = maybe_unlock_mmap_for_io(vmf, NULL); - if (fpin) - ret = VM_FAULT_RETRY; - - shmem_falloc_waitq = shmem_falloc->waitq; - prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, - TASK_UNINTERRUPTIBLE); - spin_unlock(&inode->i_lock); - schedule(); - - /* - * shmem_falloc_waitq points into the shmem_fallocate() - * stack of the hole-punching task: shmem_falloc_waitq - * is usually invalid by the time we reach here, but - * finish_wait() does not dereference it in that case; - * though i_lock needed lest racing with wake_up_all(). - */ - spin_lock(&inode->i_lock); - finish_wait(shmem_falloc_waitq, &shmem_fault_wait); - spin_unlock(&inode->i_lock); - - if (fpin) - fput(fpin); + ret = shmem_falloc_wait(vmf, inode); + if (ret) return ret; - } - spin_unlock(&inode->i_lock); } + WARN_ON_ONCE(vmf->page != NULL); err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, - gfp, vma, vmf, &ret); + gfp, vmf, &ret); if (err) return vmf_error(err); - if (folio) + if (folio) { vmf->page = folio_file_page(folio, vmf->pgoff); + ret |= VM_FAULT_LOCKED; + } return ret; } @@ -2266,6 +2578,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, unsigned long inflated_len; unsigned long inflated_addr; unsigned long inflated_offset; + unsigned long hpage_size; if (len > TASK_SIZE) return -ENOMEM; @@ -2284,8 +2597,6 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (shmem_huge == SHMEM_HUGE_DENY) return addr; - if (len < HPAGE_PMD_SIZE) - return addr; if (flags & MAP_FIXED) return addr; /* @@ -2297,8 +2608,11 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (uaddr == addr) return addr; + hpage_size = HPAGE_PMD_SIZE; if (shmem_huge != SHMEM_HUGE_FORCE) { struct super_block *sb; + unsigned long __maybe_unused hpage_orders; + int order = 0; if (file) { VM_BUG_ON(file->f_op != &shmem_file_operations); @@ -2311,18 +2625,38 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (IS_ERR(shm_mnt)) return addr; sb = shm_mnt->mnt_sb; + + /* + * Find the highest mTHP order used for anonymous shmem to + * provide a suitable alignment address. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + hpage_orders = READ_ONCE(huge_shmem_orders_always); + hpage_orders |= READ_ONCE(huge_shmem_orders_within_size); + hpage_orders |= READ_ONCE(huge_shmem_orders_madvise); + if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) + hpage_orders |= READ_ONCE(huge_shmem_orders_inherit); + + if (hpage_orders > 0) { + order = highest_order(hpage_orders); + hpage_size = PAGE_SIZE << order; + } +#endif } - if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) + if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order) return addr; } - offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); - if (offset && offset + len < 2 * HPAGE_PMD_SIZE) + if (len < hpage_size) return addr; - if ((addr & (HPAGE_PMD_SIZE-1)) == offset) + + offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1); + if (offset && offset + len < 2 * hpage_size) + return addr; + if ((addr & (hpage_size - 1)) == offset) return addr; - inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; + inflated_len = len + hpage_size - PAGE_SIZE; if (inflated_len > TASK_SIZE) return addr; if (inflated_len < len) @@ -2334,10 +2668,10 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (inflated_addr & ~PAGE_MASK) return addr; - inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); + inflated_offset = inflated_addr & (hpage_size - 1); inflated_addr += offset - inflated_offset; if (inflated_offset > offset) - inflated_addr += HPAGE_PMD_SIZE; + inflated_addr += hpage_size; if (inflated_addr > TASK_SIZE - len) return addr; @@ -2464,7 +2798,6 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, if (err) return ERR_PTR(err); - inode = new_inode(sb); if (!inode) { shmem_free_inode(sb, 0); @@ -2489,11 +2822,10 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, shmem_set_inode_flags(inode, info->fsflags); INIT_LIST_HEAD(&info->shrinklist); INIT_LIST_HEAD(&info->swaplist); - INIT_LIST_HEAD(&info->swaplist); - if (sbinfo->noswap) - mapping_set_unevictable(inode->i_mapping); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); + if (sbinfo->noswap) + mapping_set_unevictable(inode->i_mapping); mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { @@ -2584,7 +2916,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, int ret; pgoff_t max_off; - if (shmem_inode_acct_block(inode, 1)) { + if (shmem_inode_acct_blocks(inode, 1)) { /* * We may have got a page, returned -ENOENT triggering a retry, * and now we find ourselves with -ENOMEM. Release the page, to @@ -2599,7 +2931,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, if (!*foliop) { ret = -ENOMEM; - folio = shmem_alloc_folio(gfp, info, pgoff); + folio = shmem_alloc_folio(gfp, 0, info, pgoff); if (!folio) goto out_unacct_blocks; @@ -2656,8 +2988,10 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, if (unlikely(pgoff >= max_off)) goto out_release; - ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, - gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); + ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); + if (ret) + goto out_release; + ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); if (ret) goto out_release; @@ -2705,7 +3039,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping, } ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); - if (ret) return ret; @@ -2755,27 +3088,19 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) unsigned long offset; int error = 0; ssize_t retval = 0; - loff_t *ppos = &iocb->ki_pos; - - index = *ppos >> PAGE_SHIFT; - offset = *ppos & ~PAGE_MASK; for (;;) { struct folio *folio = NULL; struct page *page = NULL; - pgoff_t end_index; unsigned long nr, ret; - loff_t i_size = i_size_read(inode); + loff_t end_offset, i_size = i_size_read(inode); + bool fallback_page_copy = false; + size_t fsize; - end_index = i_size >> PAGE_SHIFT; - if (index > end_index) + if (unlikely(iocb->ki_pos >= i_size)) break; - if (index == end_index) { - nr = i_size & ~PAGE_MASK; - if (nr <= offset) - break; - } + index = iocb->ki_pos >> PAGE_SHIFT; error = shmem_get_folio(inode, index, &folio, SGP_READ); if (error) { if (error == -EINVAL) @@ -2791,24 +3116,29 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) error = -EIO; break; } + + if (folio_test_large(folio) && + folio_test_has_hwpoisoned(folio)) + fallback_page_copy = true; } /* * We must evaluate after, since reads (unlike writes) * are called without i_rwsem protection against truncate */ - nr = PAGE_SIZE; i_size = i_size_read(inode); - end_index = i_size >> PAGE_SHIFT; - if (index == end_index) { - nr = i_size & ~PAGE_MASK; - if (nr <= offset) { - if (folio) - folio_put(folio); - break; - } + if (unlikely(iocb->ki_pos >= i_size)) { + if (folio) + folio_put(folio); + break; } - nr -= offset; + end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count); + if (folio && likely(!fallback_page_copy)) + fsize = folio_size(folio); + else + fsize = PAGE_SIZE; + offset = iocb->ki_pos & (fsize - 1); + nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset); if (folio) { /* @@ -2816,10 +3146,15 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ - if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + if (mapping_writably_mapped(mapping)) { + if (likely(!fallback_page_copy)) + flush_dcache_folio(folio); + else + flush_dcache_page(page); + } + /* - * Mark the page accessed if we read the beginning. + * Mark the folio accessed if we read the beginning. */ if (!offset) folio_mark_accessed(folio); @@ -2827,9 +3162,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... */ - ret = copy_page_to_iter(page, offset, nr, to); + if (likely(!fallback_page_copy)) + ret = copy_folio_to_iter(folio, offset, nr, to); + else + ret = copy_page_to_iter(page, offset, nr, to); folio_put(folio); - } else if (user_backed_iter(to)) { /* * Copy to user tends to be so well optimized, but @@ -2847,9 +3184,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) } retval += ret; - offset += ret; - index += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; + iocb->ki_pos += ret; if (!iov_iter_count(to)) break; @@ -2860,7 +3195,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) cond_resched(); } - *ppos = ((loff_t) index << PAGE_SHIFT) + offset; file_accessed(file); return retval ? retval : error; } @@ -3237,8 +3571,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, error = simple_acl_create(dir, inode); if (error) goto out_iput; - error = security_inode_init_security(inode, dir, - &dentry->d_name, + error = security_inode_init_security(inode, dir, &dentry->d_name, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; @@ -3267,14 +3600,11 @@ shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, int error; inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); - if (IS_ERR(inode)) { error = PTR_ERR(inode); goto err_out; } - - error = security_inode_init_security(inode, dir, - NULL, + error = security_inode_init_security(inode, dir, NULL, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; @@ -3311,7 +3641,8 @@ static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, /* * Link a file.. */ -static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) +static int shmem_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); int ret = 0; @@ -3342,7 +3673,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr inode_inc_iversion(dir); inc_nlink(inode); ihold(inode); /* New dentry reference */ - dget(dentry); /* Extra pinning count for the created dentry */ + dget(dentry); /* Extra pinning count for the created dentry */ d_instantiate(dentry, inode); out: return ret; @@ -3362,7 +3693,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry) inode_set_ctime_current(inode)); inode_inc_iversion(dir); drop_nlink(inode); - dput(dentry); /* Undo the count from "create" - this does all the work */ + dput(dentry); /* Undo the count from "create" - does all the work */ return 0; } @@ -3472,7 +3803,6 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, VM_NORESERVE); - if (IS_ERR(inode)) return PTR_ERR(inode); @@ -3526,8 +3856,7 @@ static void shmem_put_link(void *arg) folio_put(arg); } -static const char *shmem_get_link(struct dentry *dentry, - struct inode *inode, +static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct folio *folio = NULL; @@ -3601,8 +3930,7 @@ static int shmem_fileattr_set(struct mnt_idmap *idmap, * Callback for security_inode_init_security() for acquiring xattrs. */ static int shmem_initxattrs(struct inode *inode, - const struct xattr *xattr_array, - void *fs_info) + const struct xattr *xattr_array, void *fs_info) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); @@ -3786,7 +4114,6 @@ static struct dentry *shmem_find_alias(struct inode *inode) return alias ?: d_find_any_alias(inode); } - static struct dentry *shmem_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { @@ -4353,7 +4680,9 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) #ifdef CONFIG_TMPFS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif - uuid_gen(&sb->s_uuid); + uuid_t uuid; + uuid_gen(&uuid); + super_set_uuid(sb, uuid.b, sizeof(uuid)); #ifdef CONFIG_TMPFS_QUOTA if (ctx->seen & SHMEM_SEEN_QUOTA) { @@ -4370,8 +4699,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) } #endif /* CONFIG_TMPFS_QUOTA */ - inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, - VM_NORESERVE); + inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, + S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto failed; @@ -4643,6 +4972,13 @@ void __init shmem_init(void) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ + + /* + * Default to setting PMD-sized THP to inherit the global setting and + * disable all other multi-size THPs. + */ + if (!shmem_orders_configured) + huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); #endif return; @@ -4674,11 +5010,9 @@ static ssize_t shmem_enabled_show(struct kobject *kobj, for (i = 0; i < ARRAY_SIZE(values); i++) { len += sysfs_emit_at(buf, len, - shmem_huge == values[i] ? "%s[%s]" : "%s%s", - i ? " " : "", - shmem_format_huge(values[i])); + shmem_huge == values[i] ? "%s[%s]" : "%s%s", + i ? " " : "", shmem_format_huge(values[i])); } - len += sysfs_emit_at(buf, len, "\n"); return len; @@ -4688,7 +5022,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { char tmp[16]; - int huge; + int huge, err; if (count + 1 > sizeof(tmp)) return -EINVAL; @@ -4699,20 +5033,223 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, huge = shmem_parse_huge(tmp); if (huge == -EINVAL) - return -EINVAL; - if (!has_transparent_hugepage() && - huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) - return -EINVAL; + return huge; shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; - return count; + + err = start_stop_khugepaged(); + return err ? err : count; } struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); +static DEFINE_SPINLOCK(huge_shmem_orders_lock); + +static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + + if (test_bit(order, &huge_shmem_orders_always)) + output = "[always] inherit within_size advise never"; + else if (test_bit(order, &huge_shmem_orders_inherit)) + output = "always [inherit] within_size advise never"; + else if (test_bit(order, &huge_shmem_orders_within_size)) + output = "always inherit [within_size] advise never"; + else if (test_bit(order, &huge_shmem_orders_madvise)) + output = "always inherit within_size [advise] never"; + else + output = "always inherit within_size advise [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_madvise); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_always); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "inherit")) { + /* Do not override huge allocation policy with non-PMD sized mTHP */ + if (shmem_huge == SHMEM_HUGE_FORCE && + order != HPAGE_PMD_ORDER) + return -EINVAL; + + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_madvise); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_inherit); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "within_size")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_madvise); + set_bit(order, &huge_shmem_orders_within_size); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "advise")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_madvise); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "never")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_within_size); + clear_bit(order, &huge_shmem_orders_madvise); + spin_unlock(&huge_shmem_orders_lock); + } else { + ret = -EINVAL; + } + + if (ret > 0) { + int err = start_stop_khugepaged(); + + if (err) + ret = err; + } + return ret; +} + +struct kobj_attribute thpsize_shmem_enabled_attr = + __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) + +static int __init setup_transparent_hugepage_shmem(char *str) +{ + int huge; + + huge = shmem_parse_huge(str); + if (huge == -EINVAL) { + pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n"); + return huge; + } + + shmem_huge = huge; + return 1; +} +__setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem); + +static char str_dup[PAGE_SIZE] __initdata; +static int __init setup_thp_shmem(char *str) +{ + char *token, *range, *policy, *subtoken; + unsigned long always, inherit, madvise, within_size; + char *start_size, *end_size; + int start, end, nr; + char *p; + + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); + + always = huge_shmem_orders_always; + inherit = huge_shmem_orders_inherit; + madvise = huge_shmem_orders_madvise; + within_size = huge_shmem_orders_within_size; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; + + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; + + start = get_order_from_str(start_size, + THP_ORDERS_ALL_FILE_DEFAULT); + end = get_order_from_str(end_size, + THP_ORDERS_ALL_FILE_DEFAULT); + } else { + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_FILE_DEFAULT); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + end_size); + goto err; + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "advise")) { + bitmap_set(&madvise, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "inherit")) { + bitmap_set(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "within_size")) { + bitmap_set(&within_size, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "never")) { + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else { + pr_err("invalid policy %s in thp_shmem boot parameter\n", policy); + goto err; + } + } + } + + huge_shmem_orders_always = always; + huge_shmem_orders_madvise = madvise; + huge_shmem_orders_inherit = inherit; + huge_shmem_orders_within_size = within_size; + shmem_orders_configured = true; + return 1; + +err: + pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str); + return 0; +} +__setup("thp_shmem=", setup_thp_shmem); + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #else /* !CONFIG_SHMEM */ /* @@ -4775,8 +5312,9 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) -static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, - umode_t mode, dev_t dev, unsigned long flags) +static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, + struct super_block *sb, struct inode *dir, + umode_t mode, dev_t dev, unsigned long flags) { struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); return inode ? inode : ERR_PTR(-ENOSPC); @@ -4786,8 +5324,8 @@ static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct supe /* common code */ -static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, - unsigned long flags, unsigned int i_flags) +static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, + loff_t size, unsigned long flags, unsigned int i_flags) { struct inode *inode; struct file *res; @@ -4806,7 +5344,6 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); - if (IS_ERR(inode)) { shmem_unacct_size(flags, size); return ERR_CAST(inode); @@ -4916,7 +5453,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping, BUG_ON(!shmem_mapping(mapping)); error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, - gfp, NULL, NULL, NULL); + gfp, NULL, NULL); if (error) return ERR_PTR(error); diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5e0b32dcad0c544975cdb6431f648..dba95fd61ffb7bf011964247132fe046f5bf39e6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3222,7 +3222,7 @@ slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, if (unlikely(!cachep)) return NULL; - objp = kfence_alloc(cachep, orig_size, flags); + objp = kfence_alloc_node(cachep, orig_size, flags, nodeid); if (unlikely(objp)) goto out; diff --git a/mm/slub.c b/mm/slub.c index d2544c88a5c43c8d4f3d9589df5b8703515699cd..801abe7a206ff43e7359c54b040ef294c8fbfe06 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1872,6 +1872,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct slab *slab; unsigned int order = oo_order(oo); + flags |= __GFP_NOKFENCE; if (node == NUMA_NO_NODE) folio = (struct folio *)alloc_pages(flags, order); else @@ -3468,7 +3469,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list if (!s) return NULL; - object = kfence_alloc(s, orig_size, gfpflags); + object = kfence_alloc_node(s, orig_size, gfpflags, node); if (unlikely(object)) goto out; diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index a2cbe44c48e10f2257503dfdcc3026c5622a4b50..2628fc02be08b96c2bb802d8be7c0585f2de2668 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -184,6 +184,10 @@ static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) return p; } +void __weak __meminit kernel_pte_init(void *addr) +{ +} + pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) { pmd_t *pmd = pmd_offset(pud, addr); @@ -191,6 +195,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; + kernel_pte_init(p); pmd_populate_kernel(&init_mm, pmd, p); } return pmd; diff --git a/mm/swap.c b/mm/swap.c index 42082eba42de3e72516b40e0edd6bb6e107f98a9..ddc765b4288fbe776b81a0db5705efdd5bfa9e27 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -201,10 +201,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; - /* block memcg migration while the folio moves between lru */ - if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) - continue; - lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); move_fn(lruvec, folio); @@ -246,11 +242,16 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) void folio_rotate_reclaimable(struct folio *folio) { if (!folio_test_locked(folio) && !folio_test_dirty(folio) && - !folio_test_unevictable(folio) && folio_test_lru(folio)) { + !folio_test_unevictable(folio)) { struct folio_batch *fbatch; unsigned long flags; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock_irqsave(&lru_rotate.lock, flags); fbatch = this_cpu_ptr(&lru_rotate.fbatch); folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); @@ -343,11 +344,15 @@ static void folio_activate_drain(int cpu) void folio_activate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_active(folio) && - !folio_test_unevictable(folio)) { + if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.activate); folio_batch_add_and_move(fbatch, folio, folio_activate_fn); @@ -688,6 +693,11 @@ void deactivate_file_folio(struct folio *folio) return; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); @@ -704,11 +714,16 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_unevictable(folio) && - (folio_test_active(folio) || lru_gen_enabled())) { + if (!folio_test_unevictable(folio) && (folio_test_active(folio) || + lru_gen_enabled())) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); @@ -725,12 +740,16 @@ void folio_deactivate(struct folio *folio) */ void folio_mark_lazyfree(struct folio *folio) { - if (folio_test_lru(folio) && folio_test_anon(folio) && - folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && - !folio_test_unevictable(folio)) { + if (folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); @@ -959,11 +978,17 @@ void release_pages(release_pages_arg arg, int nr) unsigned int lock_batch; for (i = 0; i < nr; i++) { + unsigned int nr_refs = 1; struct folio *folio; /* Turn any of the argument types into a folio */ folio = page_folio(encoded_page_ptr(encoded[i])); + /* Is our next entry actually "nr_pages" -> "nr_refs" ? */ + if (unlikely(encoded_page_flags(encoded[i]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_refs = encoded_nr_pages(encoded[++i]); + /* * Make sure the IRQ-safe lock-holding time does not get * excessive with a continuous string of pages from the @@ -982,14 +1007,14 @@ void release_pages(release_pages_arg arg, int nr) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_page(&folio->page)) + if (put_devmap_managed_page_refs(&folio->page, nr_refs)) continue; - if (folio_put_testzero(folio)) + if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_page(&folio->page); continue; } - if (!folio_put_testzero(folio)) + if (!folio_ref_sub_and_test(folio, nr_refs)) continue; if (folio_test_large(folio)) { diff --git a/mm/swap.h b/mm/swap.h index 693d1b2815598f67ac28bdd6c16e14b02612c703..500f99202776627be33e61be98d30931872e7c2b 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -38,7 +38,7 @@ void __delete_from_swap_cache(struct folio *folio, void delete_from_swap_cache(struct folio *folio); void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry); +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); struct folio *filemap_get_incore_folio(struct address_space *mapping, @@ -97,7 +97,7 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc) return 0; } -static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) +static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { } @@ -149,4 +149,5 @@ static inline unsigned int folio_swap_flags(struct folio *folio) return 0; } #endif /* CONFIG_SWAP */ + #endif /* _MM_SWAP_H */ diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 0bec1f705f8e09313e1fcdcf87568cd5bf68da38..dceef07c7a5d5fab08876f194a69378954b8582f 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -264,7 +264,7 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache) cache->cur = 0; if (swap_slot_cache_active) cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, - cache->slots, 1); + cache->slots, 0); return cache->nr; } @@ -308,7 +308,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) if (folio_test_large(folio)) { if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) - get_swap_pages(1, &entry, folio_nr_pages(folio)); + get_swap_pages(1, &entry, folio_order(folio)); goto out; } @@ -340,7 +340,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) goto out; } - get_swap_pages(1, &entry, 1); + get_swap_pages(1, &entry, 0); out: if (mem_cgroup_try_charge_swap(folio, entry)) { put_swap_folio(folio, entry); diff --git a/mm/swap_state.c b/mm/swap_state.c index b3b14bd0dd6447f47aea2df42e8348f15660c73a..d331f21749f8e562d58784bf1f7e165c12f126c4 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -310,8 +310,19 @@ void free_page_and_swap_cache(struct page *page) void free_pages_and_swap_cache(struct encoded_page **pages, int nr) { lru_add_drain(); - for (int i = 0; i < nr; i++) - free_swap_cache(encoded_page_ptr(pages[i])); + for (int i = 0; i < nr; i++) { + struct page *page = encoded_page_ptr(pages[i]); + + /* + * Skip over the "nr_pages" entry. It's sufficient to call + * free_swap_cache() only once per folio. + */ + if (unlikely(encoded_page_flags(pages[i]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + i++; + + free_swap_cache(page); + } release_pages(pages, nr); } @@ -460,7 +471,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* * Swap entry may have been freed since our caller observed it. */ - err = swapcache_prepare(entry); + err = swapcache_prepare(entry, 1); if (!err) break; @@ -492,7 +503,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; - mem_cgroup_swapin_uncharge_swap(entry); + mem_cgroup_swapin_uncharge_swap(entry, 1); if (shadow) workingset_refault(folio, shadow); diff --git a/mm/swapfile.c b/mm/swapfile.c index c856d6bb2daf3cc04961f8bacb82418994f40696..7ec4cfe8002b70f3d17ac288e11d8902e4ccd786 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -52,6 +52,15 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); +static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry, + unsigned int nr_pages); +static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, + unsigned int nr_entries); +static bool folio_swapcache_freeable(struct folio *folio); +static struct swap_cluster_info *lock_cluster_or_swap_info( + struct swap_info_struct *si, unsigned long offset); +static void unlock_cluster_or_swap_info(struct swap_info_struct *si, + struct swap_cluster_info *ci); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -126,20 +135,51 @@ static inline unsigned char swap_count(unsigned char ent) * corresponding page */ #define TTRS_UNMAPPED 0x2 -/* Reclaim the swap entry if swap is getting full*/ +/* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 +/* Reclaim directly, bypass the slot cache and don't touch device lock */ +#define TTRS_DIRECT 0x8 + +static bool swap_is_has_cache(struct swap_info_struct *si, + unsigned long offset, int nr_pages) +{ + unsigned char *map = si->swap_map + offset; + unsigned char *map_end = map + nr_pages; + + do { + VM_BUG_ON(!(*map & SWAP_HAS_CACHE)); + if (*map != SWAP_HAS_CACHE) + return false; + } while (++map < map_end); + + return true; +} -/* returns 1 if swap entry is freed */ +/* + * returns number of pages in the folio that backs the swap entry. If positive, + * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no + * folio was associated with the swap entry. + */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { swp_entry_t entry = swp_entry(si->type, offset); + struct address_space *address_space = swap_address_space(entry); + struct swap_cluster_info *ci; struct folio *folio; - int ret = 0; + int ret, nr_pages; + bool need_reclaim; - folio = filemap_get_folio(swap_address_space(entry), offset); + folio = filemap_get_folio(address_space, offset); if (IS_ERR(folio)) return 0; + + /* offset could point to the middle of a large folio */ + entry = folio->swap; + offset = swp_offset(entry); + nr_pages = folio_nr_pages(folio); + ret = -nr_pages; + /* * When this function is called from scan_swap_map_slots() and it's * called by vmscan.c at reclaiming folios. So we hold a folio lock @@ -147,13 +187,50 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, * case and you should use folio_free_swap() with explicit folio_lock() * in usual operations. */ - if (folio_trylock(folio)) { - if ((flags & TTRS_ANYWAY) || - ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || - ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))) - ret = folio_free_swap(folio); - folio_unlock(folio); + if (!folio_trylock(folio)) + goto out; + + need_reclaim = ((flags & TTRS_ANYWAY) || + ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || + ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))); + if (!need_reclaim || !folio_swapcache_freeable(folio)) + goto out_unlock; + + /* + * It's safe to delete the folio from swap cache only if the folio's + * swap_map is HAS_CACHE only, which means the slots have no page table + * reference or pending writeback, and can't be allocated to others. + */ + ci = lock_cluster_or_swap_info(si, offset); + need_reclaim = swap_is_has_cache(si, offset, nr_pages); + unlock_cluster_or_swap_info(si, ci); + if (!need_reclaim) + goto out_unlock; + + if (!(flags & TTRS_DIRECT)) { + /* Free through slot cache */ + delete_from_swap_cache(folio); + folio_set_dirty(folio); + ret = nr_pages; + goto out_unlock; } + + xa_lock_irq(&address_space->i_pages); + __delete_from_swap_cache(folio, entry, NULL); + xa_unlock_irq(&address_space->i_pages); + folio_ref_sub(folio, nr_pages); + folio_set_dirty(folio); + + spin_lock(&si->lock); + /* Only sinple page folio can be backed by zswap */ + if (nr_pages == 1) + zswap_invalidate(si->type, offset); + swap_entry_range_free(si, entry, nr_pages); + spin_unlock(&si->lock); + ret = nr_pages; +out_unlock: + folio_unlock(folio); +out: folio_put(folio); return ret; } @@ -272,86 +349,33 @@ static void discard_swap_cluster(struct swap_info_struct *si, #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR -#define swap_entry_size(size) (size) +#define swap_entry_order(order) (order) #else #define SWAPFILE_CLUSTER 256 /* - * Define swap_entry_size() as constant to let compiler to optimize + * Define swap_entry_order() as constant to let compiler to optimize * out some code if !CONFIG_THP_SWAP */ -#define swap_entry_size(size) 1 +#define swap_entry_order(order) 0 #endif #define LATENCY_LIMIT 256 -static inline void cluster_set_flag(struct swap_cluster_info *info, - unsigned int flag) -{ - info->flags = flag; -} - -static inline unsigned int cluster_count(struct swap_cluster_info *info) -{ - return info->data; -} - -static inline void cluster_set_count(struct swap_cluster_info *info, - unsigned int c) -{ - info->data = c; -} - -static inline void cluster_set_count_flag(struct swap_cluster_info *info, - unsigned int c, unsigned int f) -{ - info->flags = f; - info->data = c; -} - -static inline unsigned int cluster_next(struct swap_cluster_info *info) -{ - return info->data; -} - -static inline void cluster_set_next(struct swap_cluster_info *info, - unsigned int n) -{ - info->data = n; -} - -static inline void cluster_set_next_flag(struct swap_cluster_info *info, - unsigned int n, unsigned int f) -{ - info->flags = f; - info->data = n; -} - static inline bool cluster_is_free(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_FREE; } -static inline bool cluster_is_null(struct swap_cluster_info *info) -{ - return info->flags & CLUSTER_FLAG_NEXT_NULL; -} - -static inline void cluster_set_null(struct swap_cluster_info *info) +static inline unsigned int cluster_index(struct swap_info_struct *si, + struct swap_cluster_info *ci) { - info->flags = CLUSTER_FLAG_NEXT_NULL; - info->data = 0; + return ci - si->cluster_info; } -static inline bool cluster_is_huge(struct swap_cluster_info *info) +static inline unsigned int cluster_offset(struct swap_info_struct *si, + struct swap_cluster_info *ci) { - if (IS_ENABLED(CONFIG_THP_SWAP)) - return info->flags & CLUSTER_FLAG_HUGE; - return false; -} - -static inline void cluster_clear_huge(struct swap_cluster_info *info) -{ - info->flags &= ~CLUSTER_FLAG_HUGE; + return cluster_index(si, ci) * SWAPFILE_CLUSTER; } static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, @@ -400,65 +424,11 @@ static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, spin_unlock(&si->lock); } -static inline bool cluster_list_empty(struct swap_cluster_list *list) -{ - return cluster_is_null(&list->head); -} - -static inline unsigned int cluster_list_first(struct swap_cluster_list *list) -{ - return cluster_next(&list->head); -} - -static void cluster_list_init(struct swap_cluster_list *list) -{ - cluster_set_null(&list->head); - cluster_set_null(&list->tail); -} - -static void cluster_list_add_tail(struct swap_cluster_list *list, - struct swap_cluster_info *ci, - unsigned int idx) -{ - if (cluster_list_empty(list)) { - cluster_set_next_flag(&list->head, idx, 0); - cluster_set_next_flag(&list->tail, idx, 0); - } else { - struct swap_cluster_info *ci_tail; - unsigned int tail = cluster_next(&list->tail); - - /* - * Nested cluster lock, but both cluster locks are - * only acquired when we held swap_info_struct->lock - */ - ci_tail = ci + tail; - spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); - cluster_set_next(ci_tail, idx); - spin_unlock(&ci_tail->lock); - cluster_set_next_flag(&list->tail, idx, 0); - } -} - -static unsigned int cluster_list_del_first(struct swap_cluster_list *list, - struct swap_cluster_info *ci) -{ - unsigned int idx; - - idx = cluster_next(&list->head); - if (cluster_next(&list->tail) == idx) { - cluster_set_null(&list->head); - cluster_set_null(&list->tail); - } else - cluster_set_next_flag(&list->head, - cluster_next(&ci[idx]), 0); - - return idx; -} - /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, - unsigned int idx) + struct swap_cluster_info *ci) { + unsigned int idx = cluster_index(si, ci); /* * If scan_swap_map_slots() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't @@ -468,17 +438,23 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); - cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); - + VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); + list_move_tail(&ci->list, &si->discard_clusters); + ci->flags = 0; schedule_work(&si->discard_work); } -static void __free_cluster(struct swap_info_struct *si, unsigned long idx) +static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - struct swap_cluster_info *ci = si->cluster_info; + lockdep_assert_held(&si->lock); + lockdep_assert_held(&ci->lock); - cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); - cluster_list_add_tail(&si->free_clusters, ci, idx); + if (ci->flags) + list_move_tail(&ci->list, &si->free_clusters); + else + list_add_tail(&ci->list, &si->free_clusters); + ci->flags = CLUSTER_FLAG_FREE; + ci->order = 0; } /* @@ -487,24 +463,24 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx) */ static void swap_do_scheduled_discard(struct swap_info_struct *si) { - struct swap_cluster_info *info, *ci; + struct swap_cluster_info *ci; unsigned int idx; - info = si->cluster_info; - - while (!cluster_list_empty(&si->discard_clusters)) { - idx = cluster_list_del_first(&si->discard_clusters, info); + while (!list_empty(&si->discard_clusters)) { + ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); + list_del(&ci->list); + idx = cluster_index(si, ci); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&si->lock); - ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); - __free_cluster(si, idx); + spin_lock(&ci->lock); + __free_cluster(si, ci); memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); - unlock_cluster(ci); + spin_unlock(&ci->lock); } } @@ -527,20 +503,15 @@ static void swap_users_ref_free(struct percpu_ref *ref) complete(&si->comp); } -static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) +static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - struct swap_cluster_info *ci = si->cluster_info; - - VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); - cluster_list_del_first(&si->free_clusters, ci); - cluster_set_count_flag(ci + idx, 0, 0); -} + VM_BUG_ON(ci->count != 0); + lockdep_assert_held(&si->lock); + lockdep_assert_held(&ci->lock); -static void free_cluster(struct swap_info_struct *si, unsigned long idx) -{ - struct swap_cluster_info *ci = si->cluster_info + idx; + if (ci->flags & CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]--; - VM_BUG_ON(cluster_count(ci) != 0); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -548,133 +519,356 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx) */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { - swap_cluster_schedule_discard(si, idx); + swap_cluster_schedule_discard(si, ci); return; } - __free_cluster(si, idx); + __free_cluster(si, ci); } /* - * The cluster corresponding to page_nr will be used. The cluster will be - * removed from free cluster list and its usage counter will be increased. + * The cluster corresponding to page_nr will be used. The cluster will not be + * added to free cluster list and its usage counter will be increased by 1. + * Only used for initialization. */ static void inc_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; + struct swap_cluster_info *ci; if (!cluster_info) return; - if (cluster_is_free(&cluster_info[idx])) - alloc_cluster(p, idx); - VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); - cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) + 1); + ci = cluster_info + idx; + ci->count++; + + VM_BUG_ON(ci->count > SWAPFILE_CLUSTER); + VM_BUG_ON(ci->flags); } /* - * The cluster corresponding to page_nr decreases one usage. If the usage - * counter becomes 0, which means no page in the cluster is in using, we can - * optionally discard the cluster and add it to free cluster list. + * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0, + * which means no page in the cluster is in use, we can optionally discard + * the cluster and add it to free cluster list. */ static void dec_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr) + struct swap_cluster_info *ci, int nr_pages) { - unsigned long idx = page_nr / SWAPFILE_CLUSTER; + if (!p->cluster_info) + return; - if (!cluster_info) + VM_BUG_ON(ci->count < nr_pages); + VM_BUG_ON(cluster_is_free(ci)); + lockdep_assert_held(&p->lock); + lockdep_assert_held(&ci->lock); + ci->count -= nr_pages; + + if (!ci->count) { + free_cluster(p, ci); return; + } - VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); - cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) - 1); + if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { + VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); + if (ci->flags & CLUSTER_FLAG_FRAG) + p->frag_cluster_nr[ci->order]--; + list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]); + ci->flags = CLUSTER_FLAG_NONFULL; + } +} - if (cluster_count(&cluster_info[idx]) == 0) - free_cluster(p, idx); +static bool cluster_reclaim_range(struct swap_info_struct *si, + struct swap_cluster_info *ci, + unsigned long start, unsigned long end) +{ + unsigned char *map = si->swap_map; + unsigned long offset; + + spin_unlock(&ci->lock); + spin_unlock(&si->lock); + + for (offset = start; offset < end; offset++) { + switch (READ_ONCE(map[offset])) { + case 0: + continue; + case SWAP_HAS_CACHE: + if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0) + continue; + goto out; + default: + goto out; + } + } +out: + spin_lock(&si->lock); + spin_lock(&ci->lock); + + /* + * Recheck the range no matter reclaim succeeded or not, the slot + * could have been be freed while we are not holding the lock. + */ + for (offset = start; offset < end; offset++) + if (READ_ONCE(map[offset])) + return false; + + return true; } -/* - * It's possible scan_swap_map_slots() uses a free cluster in the middle of free - * cluster list. Avoiding such abuse to avoid list corruption. - */ -static bool -scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, - unsigned long offset) +static bool cluster_scan_range(struct swap_info_struct *si, + struct swap_cluster_info *ci, + unsigned long start, unsigned int nr_pages) { - struct percpu_cluster *percpu_cluster; - bool conflict; + unsigned long offset, end = start + nr_pages; + unsigned char *map = si->swap_map; + bool need_reclaim = false; - offset /= SWAPFILE_CLUSTER; - conflict = !cluster_list_empty(&si->free_clusters) && - offset != cluster_list_first(&si->free_clusters) && - cluster_is_free(&si->cluster_info[offset]); + for (offset = start; offset < end; offset++) { + switch (READ_ONCE(map[offset])) { + case 0: + continue; + case SWAP_HAS_CACHE: + if (!vm_swap_full()) + return false; + need_reclaim = true; + continue; + default: + return false; + } + } - if (!conflict) - return false; + if (need_reclaim) + return cluster_reclaim_range(si, ci, start, end); - percpu_cluster = this_cpu_ptr(si->percpu_cluster); - cluster_set_null(&percpu_cluster->index); return true; } +static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, + unsigned int start, unsigned char usage, + unsigned int order) +{ + unsigned int nr_pages = 1 << order; + + if (cluster_is_free(ci)) { + if (nr_pages < SWAPFILE_CLUSTER) { + list_move_tail(&ci->list, &si->nonfull_clusters[order]); + ci->flags = CLUSTER_FLAG_NONFULL; + } + ci->order = order; + } + + memset(si->swap_map + start, usage, nr_pages); + swap_range_alloc(si, start, nr_pages); + ci->count += nr_pages; + + if (ci->count == SWAPFILE_CLUSTER) { + VM_BUG_ON(!(ci->flags & + (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); + if (ci->flags & CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]--; + list_move_tail(&ci->list, &si->full_clusters); + ci->flags = CLUSTER_FLAG_FULL; + } +} + +static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset, + unsigned int *foundp, unsigned int order, + unsigned char usage) +{ + unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1); + unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); + unsigned int nr_pages = 1 << order; + struct swap_cluster_info *ci; + + if (end < nr_pages) + return SWAP_NEXT_INVALID; + end -= nr_pages; + + ci = lock_cluster(si, offset); + if (ci->count + nr_pages > SWAPFILE_CLUSTER) { + offset = SWAP_NEXT_INVALID; + goto done; + } + + while (offset <= end) { + if (cluster_scan_range(si, ci, offset, nr_pages)) { + cluster_alloc_range(si, ci, offset, usage, order); + *foundp = offset; + if (ci->count == SWAPFILE_CLUSTER) { + offset = SWAP_NEXT_INVALID; + goto done; + } + offset += nr_pages; + break; + } + offset += nr_pages; + } + if (offset > end) + offset = SWAP_NEXT_INVALID; +done: + unlock_cluster(ci); + return offset; +} + +static void swap_reclaim_full_clusters(struct swap_info_struct *si) +{ + long to_scan = 1; + unsigned long offset, end; + struct swap_cluster_info *ci; + unsigned char *map = si->swap_map; + int nr_reclaim, total_reclaimed = 0; + + if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER) + to_scan = si->inuse_pages / SWAPFILE_CLUSTER; + + while (!list_empty(&si->full_clusters)) { + ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list); + list_move_tail(&ci->list, &si->full_clusters); + offset = cluster_offset(si, ci); + end = min(si->max, offset + SWAPFILE_CLUSTER); + to_scan--; + + while (offset < end) { + if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { + spin_unlock(&si->lock); + nr_reclaim = __try_to_reclaim_swap(si, offset, + TTRS_ANYWAY | TTRS_DIRECT); + spin_lock(&si->lock); + if (nr_reclaim > 0) { + offset += nr_reclaim; + total_reclaimed += nr_reclaim; + continue; + } else if (nr_reclaim < 0) { + offset += -nr_reclaim; + continue; + } + } + offset++; + } + if (to_scan <= 0 || total_reclaimed) + break; + } +} + /* - * Try to get a swap entry from current cpu's swap entry pool (a cluster). This - * might involve allocating a new cluster for current CPU too. + * Try to get swap entries with specified order from current cpu's swap entry + * pool (a cluster). This might involve allocating a new cluster for current CPU + * too. */ -static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, - unsigned long *offset, unsigned long *scan_base) +static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, + unsigned char usage) { struct percpu_cluster *cluster; struct swap_cluster_info *ci; - unsigned long tmp, max; + unsigned int offset, found = 0; new_cluster: + lockdep_assert_held(&si->lock); cluster = this_cpu_ptr(si->percpu_cluster); - if (cluster_is_null(&cluster->index)) { - if (!cluster_list_empty(&si->free_clusters)) { - cluster->index = si->free_clusters.head; - cluster->next = cluster_next(&cluster->index) * - SWAPFILE_CLUSTER; - } else if (!cluster_list_empty(&si->discard_clusters)) { - /* - * we don't have free cluster but have some clusters in - * discarding, do discard now and reclaim them, then - * reread cluster_next_cpu since we dropped si->lock - */ - swap_do_scheduled_discard(si); - *scan_base = this_cpu_read(*si->cluster_next_cpu); - *offset = *scan_base; - goto new_cluster; - } else - return false; + offset = cluster->next[order]; + if (offset) { + offset = alloc_swap_scan_cluster(si, offset, &found, order, usage); + if (found) + goto done; } - /* - * Other CPUs can use our cluster if they can't find a free cluster, - * check if there is still free entry in the cluster - */ - tmp = cluster->next; - max = min_t(unsigned long, si->max, - (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); - if (tmp < max) { - ci = lock_cluster(si, tmp); - while (tmp < max) { - if (!si->swap_map[tmp]) + if (!list_empty(&si->free_clusters)) { + ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); + VM_BUG_ON(!found); + goto done; + } + + if (order < PMD_ORDER) { + unsigned int frags = 0; + + while (!list_empty(&si->nonfull_clusters[order])) { + ci = list_first_entry(&si->nonfull_clusters[order], + struct swap_cluster_info, list); + list_move_tail(&ci->list, &si->frag_clusters[order]); + ci->flags = CLUSTER_FLAG_FRAG; + si->frag_cluster_nr[order]++; + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + frags++; + if (found) break; - tmp++; } - unlock_cluster(ci); + + if (!found) { + /* + * Nonfull clusters are moved to frag tail if we reached + * here, count them too, don't over scan the frag list. + */ + while (frags < si->frag_cluster_nr[order]) { + ci = list_first_entry(&si->frag_clusters[order], + struct swap_cluster_info, list); + /* + * Rotate the frag list to iterate, they were all failing + * high order allocation or moved here due to per-CPU usage, + * this help keeping usable cluster ahead. + */ + list_move_tail(&ci->list, &si->frag_clusters[order]); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + frags++; + if (found) + break; + } + } } - if (tmp >= max) { - cluster_set_null(&cluster->index); + + if (found) + goto done; + + if (!list_empty(&si->discard_clusters)) { + /* + * we don't have free cluster but have some clusters in + * discarding, do discard now and reclaim them, then + * reread cluster_next_cpu since we dropped si->lock + */ + swap_do_scheduled_discard(si); goto new_cluster; } - cluster->next = tmp + 1; - *offset = tmp; - *scan_base = tmp; - return true; + + if (order) + goto done; + + /* Order 0 stealing from higher order */ + for (int o = 1; o < SWAP_NR_ORDERS; o++) { + /* + * Clusters here have at least one usable slots and can't fail order 0 + * allocation, but reclaim may drop si->lock and race with another user. + */ + while (!list_empty(&si->frag_clusters[o])) { + ci = list_first_entry(&si->frag_clusters[o], + struct swap_cluster_info, list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, 0, usage); + if (found) + goto done; + } + + while (!list_empty(&si->nonfull_clusters[o])) { + ci = list_first_entry(&si->nonfull_clusters[o], + struct swap_cluster_info, list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, 0, usage); + if (found) + goto done; + } + } + +done: + /* Try reclaim from full clusters if device is nearfull */ + if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) { + swap_reclaim_full_clusters(si); + if (!found && !order && si->pages != si->inuse_pages) + goto new_cluster; + } + + cluster->next[order] = offset; + return found; } static void __del_from_avail_list(struct swap_info_struct *p) @@ -796,15 +990,34 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si, return false; } +static int cluster_alloc_swap(struct swap_info_struct *si, + unsigned char usage, int nr, + swp_entry_t slots[], int order) +{ + int n_ret = 0; + + VM_BUG_ON(!si->cluster_info); + + while (n_ret < nr) { + unsigned long offset = cluster_alloc_swap_entry(si, order, usage); + + if (!offset) + break; + slots[n_ret++] = swp_entry(si->type, offset); + } + + return n_ret; +} + static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, - swp_entry_t slots[]) + swp_entry_t slots[], int order) { - struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; + unsigned int nr_pages = 1 << order; int n_ret = 0; bool scanned_many = false; @@ -819,23 +1032,35 @@ static int scan_swap_map_slots(struct swap_info_struct *si, * And we let swap pages go all over an SSD partition. Hugh */ + if (order > 0) { + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (!IS_ENABLED(CONFIG_THP_SWAP) || + nr_pages > SWAPFILE_CLUSTER) { + VM_WARN_ON_ONCE(1); + return 0; + } + + /* + * Swapfile is not block device or not using clusters so unable + * to allocate large entries. + */ + if (!(si->flags & SWP_BLKDEV) || !si->cluster_info) + return 0; + } + + if (si->cluster_info) + return cluster_alloc_swap(si, usage, nr, slots, order); + si->flags += SWP_SCANNING; - /* - * Use percpu scan base for SSD to reduce lock contention on - * cluster and swap cache. For HDD, sequential access is more - * important. - */ - if (si->flags & SWP_SOLIDSTATE) - scan_base = this_cpu_read(*si->cluster_next_cpu); - else - scan_base = si->cluster_next; + + /* For HDD, sequential access is more important. */ + scan_base = si->cluster_next; offset = scan_base; - /* SSD algorithm */ - if (si->cluster_info) { - if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) - goto scan; - } else if (unlikely(!si->cluster_nr--)) { + if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; @@ -846,8 +1071,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, /* * If seek is expensive, start searching for new cluster from * start of partition, to minimize the span of allocated swap. - * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info - * case, just handled by scan_swap_map_try_ssd_cluster() above. */ scan_base = offset = si->lowest_bit; last_in_cluster = offset + SWAPFILE_CLUSTER - 1; @@ -875,16 +1098,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } checks: - if (si->cluster_info) { - while (scan_swap_map_ssd_cluster_conflict(si, offset)) { - /* take a break if we already got some slots */ - if (n_ret) - goto done; - if (!scan_swap_map_try_ssd_cluster(si, &offset, - &scan_base)) - goto scan; - } - } if (!(si->flags & SWP_WRITEOK)) goto no_page; if (!si->highest_bit) @@ -892,32 +1105,27 @@ static int scan_swap_map_slots(struct swap_info_struct *si, if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; - ci = lock_cluster(si, offset); /* reuse swap entry of cache-only swap if not busy. */ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; - unlock_cluster(ci); spin_unlock(&si->lock); - swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); + swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ - if (swap_was_freed) + if (swap_was_freed > 0) goto checks; goto scan; /* check next one */ } if (si->swap_map[offset]) { - unlock_cluster(ci); if (!n_ret) goto scan; else goto done; } - WRITE_ONCE(si->swap_map[offset], usage); - inc_cluster_info_page(si, si->cluster_info, offset); - unlock_cluster(ci); + memset(si->swap_map + offset, usage, nr_pages); - swap_range_alloc(si, offset, 1); + swap_range_alloc(si, offset, nr_pages); slots[n_ret++] = swp_entry(si->type, offset); /* got enough slots or reach max slots? */ @@ -936,11 +1144,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, latency_ration = LATENCY_LIMIT; } - /* try to get more slots in cluster */ - if (si->cluster_info) { - if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) - goto checks; - } else if (si->cluster_nr && !si->swap_map[++offset]) { + if (si->cluster_nr && !si->swap_map[++offset]) { /* non-ssd case, still more slots in cluster? */ --si->cluster_nr; goto checks; @@ -966,11 +1170,13 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } done: - set_cluster_next(si, offset + 1); + if (order == 0) + set_cluster_next(si, offset + 1); si->flags -= SWP_SCANNING; return n_ret; scan: + VM_WARN_ON(order > 0); spin_unlock(&si->lock); while (++offset <= READ_ONCE(si->highest_bit)) { if (unlikely(--latency_ration < 0)) { @@ -999,62 +1205,15 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return n_ret; } -static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) -{ - unsigned long idx; - struct swap_cluster_info *ci; - unsigned long offset; - - /* - * Should not even be attempting cluster allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP)) { - VM_WARN_ON_ONCE(1); - return 0; - } - - if (cluster_list_empty(&si->free_clusters)) - return 0; - - idx = cluster_list_first(&si->free_clusters); - offset = idx * SWAPFILE_CLUSTER; - ci = lock_cluster(si, offset); - alloc_cluster(si, idx); - cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); - - memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); - unlock_cluster(ci); - swap_range_alloc(si, offset, SWAPFILE_CLUSTER); - *slot = swp_entry(si->type, offset); - - return 1; -} - -static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) -{ - unsigned long offset = idx * SWAPFILE_CLUSTER; - struct swap_cluster_info *ci; - - ci = lock_cluster(si, offset); - memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); - cluster_set_count_flag(ci, 0, 0); - free_cluster(si, idx); - unlock_cluster(ci); - swap_range_free(si, offset, SWAPFILE_CLUSTER); -} - -int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) +int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { - unsigned long size = swap_entry_size(entry_size); + int order = swap_entry_order(entry_order); + unsigned long size = 1 << order; struct swap_info_struct *si, *next; long avail_pgs; int n_ret = 0; int node; - /* Only single cluster request supported */ - WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); - spin_lock(&swap_avail_lock); avail_pgs = atomic_long_read(&nr_swap_pages) / size; @@ -1090,14 +1249,10 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) spin_unlock(&si->lock); goto nextsi; } - if (size == SWAPFILE_CLUSTER) { - if (si->flags & SWP_BLKDEV) - n_ret = swap_alloc_cluster(si, swp_entries); - } else - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries); + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, + n_goal, swp_entries, order); spin_unlock(&si->lock); - if (n_ret || size == SWAPFILE_CLUSTER) + if (n_ret || size > 1) goto check_out; cond_resched(); @@ -1309,34 +1464,80 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p, return usage; } -static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) +/* + * Drop the last HAS_CACHE flag of swap entries, caller have to + * ensure all entries belong to the same cgroup. + */ +static void swap_entry_range_free(struct swap_info_struct *p, swp_entry_t entry, + unsigned int nr_pages) { - struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); - unsigned char count; + unsigned char *map = p->swap_map + offset; + unsigned char *map_end = map + nr_pages; + struct swap_cluster_info *ci; ci = lock_cluster(p, offset); - count = p->swap_map[offset]; - VM_BUG_ON(count != SWAP_HAS_CACHE); - p->swap_map[offset] = 0; - dec_cluster_info_page(p, p->cluster_info, offset); + do { + VM_BUG_ON(*map != SWAP_HAS_CACHE); + *map = 0; + } while (++map < map_end); + dec_cluster_info_page(p, ci, nr_pages); unlock_cluster(ci); - mem_cgroup_uncharge_swap(entry, 1); - swap_range_free(p, offset, 1); + mem_cgroup_uncharge_swap(entry, nr_pages); + swap_range_free(p, offset, nr_pages); +} + +static void cluster_swap_free_nr(struct swap_info_struct *sis, + unsigned long offset, int nr_pages, + unsigned char usage) +{ + struct swap_cluster_info *ci; + DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 }; + int i, nr; + + ci = lock_cluster_or_swap_info(sis, offset); + while (nr_pages) { + nr = min(BITS_PER_LONG, nr_pages); + for (i = 0; i < nr; i++) { + if (!__swap_entry_free_locked(sis, offset + i, usage)) + bitmap_set(to_free, i, 1); + } + if (!bitmap_empty(to_free, BITS_PER_LONG)) { + unlock_cluster_or_swap_info(sis, ci); + for_each_set_bit(i, to_free, BITS_PER_LONG) + free_swap_slot(swp_entry(sis->type, offset + i)); + if (nr == nr_pages) + return; + bitmap_clear(to_free, 0, BITS_PER_LONG); + ci = lock_cluster_or_swap_info(sis, offset); + } + offset += nr; + nr_pages -= nr; + } + unlock_cluster_or_swap_info(sis, ci); } /* * Caller has made sure that the swap device corresponding to entry * is still around or has not been recycled. */ -void swap_free(swp_entry_t entry) +void swap_free_nr(swp_entry_t entry, int nr_pages) { - struct swap_info_struct *p; + int nr; + struct swap_info_struct *sis; + unsigned long offset = swp_offset(entry); - p = _swap_info_get(entry); - if (p) - __swap_entry_free(p, entry); + sis = _swap_info_get(entry); + if (!sis) + return; + + while (nr_pages) { + nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); + cluster_swap_free_nr(sis, offset, nr, 1); + offset += nr; + nr_pages -= nr; + } } /* @@ -1345,39 +1546,23 @@ void swap_free(swp_entry_t entry) void put_swap_folio(struct folio *folio, swp_entry_t entry) { unsigned long offset = swp_offset(entry); - unsigned long idx = offset / SWAPFILE_CLUSTER; struct swap_cluster_info *ci; struct swap_info_struct *si; - unsigned char *map; - unsigned int i, free_entries = 0; - unsigned char val; - int size = swap_entry_size(folio_nr_pages(folio)); + int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); if (!si) return; ci = lock_cluster_or_swap_info(si, offset); - if (size == SWAPFILE_CLUSTER) { - VM_BUG_ON(!cluster_is_huge(ci)); - map = si->swap_map + offset; - for (i = 0; i < SWAPFILE_CLUSTER; i++) { - val = map[i]; - VM_BUG_ON(!(val & SWAP_HAS_CACHE)); - if (val == SWAP_HAS_CACHE) - free_entries++; - } - cluster_clear_huge(ci); - if (free_entries == SWAPFILE_CLUSTER) { - unlock_cluster_or_swap_info(si, ci); - spin_lock(&si->lock); - mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); - swap_free_cluster(si, idx); - spin_unlock(&si->lock); - return; - } + if (size > 1 && swap_is_has_cache(si, offset, size)) { + unlock_cluster_or_swap_info(si, ci); + spin_lock(&si->lock); + swap_entry_range_free(si, entry, size); + spin_unlock(&si->lock); + return; } - for (i = 0; i < size; i++, entry.val++) { + for (int i = 0; i < size; i++, entry.val++) { if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { unlock_cluster_or_swap_info(si, ci); free_swap_slot(entry); @@ -1389,23 +1574,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster_or_swap_info(si, ci); } -#ifdef CONFIG_THP_SWAP -int split_swap_cluster(swp_entry_t entry) -{ - struct swap_info_struct *si; - struct swap_cluster_info *ci; - unsigned long offset = swp_offset(entry); - - si = _swap_info_get(entry); - if (!si) - return -EBUSY; - ci = lock_cluster(si, offset); - cluster_clear_huge(ci); - unlock_cluster(ci); - return 0; -} -#endif - static int swp_entry_cmp(const void *ent1, const void *ent2) { const swp_entry_t *e1 = ent1, *e2 = ent2; @@ -1434,7 +1602,7 @@ void swapcache_free_entries(swp_entry_t *entries, int n) for (i = 0; i < n; ++i) { p = swap_info_get_cont(entries[i], prev); if (p) - swap_entry_free(p, entries[i]); + swap_entry_range_free(p, entries[i], 1); prev = p; } if (p) @@ -1513,22 +1681,23 @@ int swp_swapcount(swp_entry_t entry) } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, - swp_entry_t entry) + swp_entry_t entry, int order) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; + unsigned int nr_pages = 1 << order; unsigned long roffset = swp_offset(entry); - unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); + unsigned long offset = round_down(roffset, nr_pages); int i; bool ret = false; ci = lock_cluster_or_swap_info(si, offset); - if (!ci || !cluster_is_huge(ci)) { + if (!ci || nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } - for (i = 0; i < SWAPFILE_CLUSTER; i++) { + for (i = 0; i < nr_pages; i++) { if (swap_count(map[offset + i])) { ret = true; break; @@ -1550,19 +1719,10 @@ static bool folio_swapped(struct folio *folio) if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_swapcount(si, entry) != 0; - return swap_page_trans_huge_swapped(si, entry); + return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } -/** - * folio_free_swap() - Free the swap space used for this folio. - * @folio: The folio to remove. - * - * If swap is getting full, or if there are no more mappings of this folio, - * then call folio_free_swap to free its swap space. - * - * Return: true if we were able to release the swap space. - */ -bool folio_free_swap(struct folio *folio) +static bool folio_swapcache_freeable(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); @@ -1570,8 +1730,6 @@ bool folio_free_swap(struct folio *folio) return false; if (folio_test_writeback(folio)) return false; - if (folio_swapped(folio)) - return false; /* * Once hibernation has begun to create its image of memory, @@ -1591,38 +1749,112 @@ bool folio_free_swap(struct folio *folio) if (pm_suspended_storage()) return false; + return true; +} + +/** + * folio_free_swap() - Free the swap space used for this folio. + * @folio: The folio to remove. + * + * If swap is getting full, or if there are no more mappings of this folio, + * then call folio_free_swap to free its swap space. + * + * Return: true if we were able to release the swap space. + */ +bool folio_free_swap(struct folio *folio) +{ + if (!folio_swapcache_freeable(folio)) + return false; + if (folio_swapped(folio)) + return false; + delete_from_swap_cache(folio); folio_set_dirty(folio); return true; } -/* - * Free the swap entry like above, but also try to - * free the page cache entry if it is the last user. +/** + * free_swap_and_cache_nr() - Release reference on range of swap entries and + * reclaim their cache if no more references remain. + * @entry: First entry of range. + * @nr: Number of entries in range. + * + * For each swap entry in the contiguous range, release a reference. If any swap + * entries become free, try to reclaim their underlying folios, if present. The + * offset range is defined by [entry.offset, entry.offset + nr). */ -int free_swap_and_cache(swp_entry_t entry) +void free_swap_and_cache_nr(swp_entry_t entry, int nr) { - struct swap_info_struct *p; + const unsigned long start_offset = swp_offset(entry); + const unsigned long end_offset = start_offset + nr; + unsigned int type = swp_type(entry); + struct swap_info_struct *si; + bool any_only_cache = false; + unsigned long offset; unsigned char count; if (non_swap_entry(entry)) - return 1; + return; - p = get_swap_device(entry); - if (p) { - if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { - put_swap_device(p); - return 0; + si = get_swap_device(entry); + if (!si) + return; + + if (WARN_ON(end_offset > si->max)) + goto out; + + /* + * First free all entries in the range. + */ + for (offset = start_offset; offset < end_offset; offset++) { + if (data_race(si->swap_map[offset])) { + count = __swap_entry_free(si, swp_entry(type, offset)); + if (count == SWAP_HAS_CACHE) + any_only_cache = true; + } else { + WARN_ON_ONCE(1); } + } - count = __swap_entry_free(p, entry); - if (count == SWAP_HAS_CACHE && - !swap_page_trans_huge_swapped(p, entry)) - __try_to_reclaim_swap(p, swp_offset(entry), - TTRS_UNMAPPED | TTRS_FULL); - put_swap_device(p); + /* + * Short-circuit the below loop if none of the entries had their + * reference drop to zero. + */ + if (!any_only_cache) + goto out; + + /* + * Now go back over the range trying to reclaim the swap cache. This is + * more efficient for large folios because we will only try to reclaim + * the swap once per folio in the common case. If we do + * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the + * latter will get a reference and lock the folio for every individual + * page but will only succeed once the swap slot for every subpage is + * zero. + */ + for (offset = start_offset; offset < end_offset; offset += nr) { + nr = 1; + if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { + /* + * Folios are always naturally aligned in swap so + * advance forward to the next boundary. Zero means no + * folio was found for the swap entry, so advance by 1 + * in this case. Negative value means folio was found + * but could not be reclaimed. Here we can still advance + * to the next boundary. + */ + nr = __try_to_reclaim_swap(si, offset, + TTRS_UNMAPPED | TTRS_FULL); + if (nr == 0) + nr = 1; + else if (nr < 0) + nr = -nr; + nr = ALIGN(offset + 1, nr) - offset; + } } - return p != NULL; + +out: + put_swap_device(si); } #ifdef CONFIG_HIBERNATION @@ -1637,7 +1869,7 @@ swp_entry_t get_swap_page_of_type(int type) /* This is called for allocating swap entry, not cache */ spin_lock(&si->lock); - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) atomic_long_dec(&nr_swap_pages); spin_unlock(&si->lock); fail: @@ -1752,18 +1984,24 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { - struct page *page = folio_file_page(folio, swp_offset(entry)); - struct page *swapcache; + struct page *page; + struct folio *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; - bool hwpoisoned = PageHWPoison(page); + bool hwpoisoned = false; int ret = 1; - swapcache = page; - page = ksm_might_need_to_copy(page, vma, addr); - if (unlikely(!page)) + swapcache = folio; + folio = ksm_might_need_to_copy(folio, vma, addr); + if (unlikely(!folio)) return -ENOMEM; - else if (unlikely(PTR_ERR(page) == -EHWPOISON)) + else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { + hwpoisoned = true; + folio = swapcache; + } + + page = folio_file_page(folio, swp_offset(entry)); + if (PageHWPoison(page)) hwpoisoned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); @@ -1775,13 +2013,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, old_pte = ptep_get(pte); - if (unlikely(hwpoisoned || !PageUptodate(page))) { + if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { - swp_entry = make_hwpoison_entry(swapcache); - page = swapcache; + swp_entry = make_hwpoison_entry(page); } else { swp_entry = make_poisoned_swp_entry(); } @@ -1795,31 +2032,37 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ - arch_swap_restore(entry, page_folio(page)); - - /* See do_swap_page() */ - BUG_ON(!PageAnon(page) && PageMappedToDisk(page)); - BUG_ON(PageAnon(page) && PageAnonExclusive(page)); + arch_swap_restore(entry, folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - get_page(page); - if (page == swapcache) { + folio_get(folio); + if (folio == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* - * See do_swap_page(): PageWriteback() would be problematic. - * However, we do a wait_on_page_writeback() just before this - * call and have the page locked. + * See do_swap_page(): writeback would be problematic. + * However, we do a folio_wait_writeback() just before this + * call and have the folio locked. */ - VM_BUG_ON_PAGE(PageWriteback(page), page); + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; - - page_add_anon_rmap(page, vma, addr, rmap_flags); + /* + * We currently only expect small !anon folios, which are either + * fully exclusive or fully shared. If we ever get large folios + * here, we have to be careful. + */ + if (!folio_test_anon(folio)) { + VM_WARN_ON_ONCE(folio_test_large(folio)); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + folio_add_new_anon_rmap(folio, vma, addr, rmap_flags); + } else { + folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); + } } else { /* ksm created a completely new copy */ - page_add_new_anon_rmap(page, vma, addr); - lru_cache_add_inactive_or_unevictable(page, vma); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); + folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) @@ -1832,9 +2075,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, out: if (pte) pte_unmap_unlock(pte, ptl); - if (page != swapcache) { - unlock_page(page); - put_page(page); + if (folio != swapcache) { + folio_unlock(folio); + folio_put(folio); } return ret; } @@ -2921,8 +3164,15 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, nr_good_pages = maxpages - 1; /* omit header page */ - cluster_list_init(&p->free_clusters); - cluster_list_init(&p->discard_clusters); + INIT_LIST_HEAD(&p->free_clusters); + INIT_LIST_HEAD(&p->full_clusters); + INIT_LIST_HEAD(&p->discard_clusters); + + for (i = 0; i < SWAP_NR_ORDERS; i++) { + INIT_LIST_HEAD(&p->nonfull_clusters[i]); + INIT_LIST_HEAD(&p->frag_clusters[i]); + p->frag_cluster_nr[i] = 0; + } for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; @@ -2965,7 +3215,6 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, if (!cluster_info) return nr_extents; - /* * Reduce false cache line sharing between cluster_info and * sharing same address space. @@ -2973,14 +3222,18 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, for (k = 0; k < SWAP_CLUSTER_COLS; k++) { j = (k + col) % SWAP_CLUSTER_COLS; for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { + struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; + ci = cluster_info + idx; if (idx >= nr_clusters) continue; - if (cluster_count(&cluster_info[idx])) + if (ci->count) { + ci->flags = CLUSTER_FLAG_NONFULL; + list_add_tail(&ci->list, &p->nonfull_clusters[0]); continue; - cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); - cluster_list_add_tail(&p->free_clusters, cluster_info, - idx); + } + ci->flags = CLUSTER_FLAG_FREE; + list_add_tail(&ci->list, &p->free_clusters); } } return nr_extents; @@ -3086,7 +3339,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags |= SWP_SYNCHRONOUS_IO; if (p->bdev && bdev_nonrot(p->bdev)) { - int cpu; + int cpu, i; unsigned long ci, nr_cluster; p->flags |= SWP_SOLIDSTATE; @@ -3122,8 +3375,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; + cluster = per_cpu_ptr(p->percpu_cluster, cpu); - cluster_set_null(&cluster->index); + for (i = 0; i < SWAP_NR_ORDERS; i++) + cluster->next[i] = SWAP_NEXT_INVALID; } } else { atomic_inc(&nr_rotate_swap); @@ -3266,7 +3521,7 @@ void si_swapinfo(struct sysinfo *val) } /* - * Verify that a swap entry is valid and increment its swap map count. + * Verify that nr swap entries are valid and increment their swap map counts. * * Returns error code in following case. * - success -> 0 @@ -3276,59 +3531,73 @@ void si_swapinfo(struct sysinfo *val) * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ -static int __swap_duplicate(swp_entry_t entry, unsigned char usage) +static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) { struct swap_info_struct *p; struct swap_cluster_info *ci; unsigned long offset; unsigned char count; unsigned char has_cache; - int err; + int err, i; p = swp_swap_info(entry); offset = swp_offset(entry); + VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); + VM_WARN_ON(usage == 1 && nr > 1); ci = lock_cluster_or_swap_info(p, offset); - count = p->swap_map[offset]; - - /* - * swapin_readahead() doesn't check if a swap entry is valid, so the - * swap entry could be SWAP_MAP_BAD. Check here with lock held. - */ - if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { - err = -ENOENT; - goto unlock_out; - } - - has_cache = count & SWAP_HAS_CACHE; - count &= ~SWAP_HAS_CACHE; err = 0; + for (i = 0; i < nr; i++) { + count = p->swap_map[offset + i]; - if (usage == SWAP_HAS_CACHE) { + /* + * swapin_readahead() doesn't check if a swap entry is valid, so the + * swap entry could be SWAP_MAP_BAD. Check here with lock held. + */ + if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { + err = -ENOENT; + goto unlock_out; + } - /* set SWAP_HAS_CACHE if there is no cache and entry is used */ - if (!has_cache && count) - has_cache = SWAP_HAS_CACHE; - else if (has_cache) /* someone else added cache */ - err = -EEXIST; - else /* no users remaining */ + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; + + if (!count && !has_cache) { err = -ENOENT; + } else if (usage == SWAP_HAS_CACHE) { + if (has_cache) + err = -EEXIST; + } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) { + err = -EINVAL; + } - } else if (count || has_cache) { + if (err) + goto unlock_out; + } + + for (i = 0; i < nr; i++) { + count = p->swap_map[offset + i]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; - if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) + if (usage == SWAP_HAS_CACHE) + has_cache = SWAP_HAS_CACHE; + else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) count += usage; - else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) - err = -EINVAL; - else if (swap_count_continued(p, offset, count)) + else if (swap_count_continued(p, offset + i, count)) count = COUNT_CONTINUED; - else + else { + /* + * Don't need to rollback changes, because if + * usage == 1, there must be nr == 1. + */ err = -ENOMEM; - } else - err = -ENOENT; /* unused swap entry */ + goto unlock_out; + } - WRITE_ONCE(p->swap_map[offset], count | has_cache); + WRITE_ONCE(p->swap_map[offset + i], count | has_cache); + } unlock_out: unlock_cluster_or_swap_info(p, ci); @@ -3339,9 +3608,9 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) * Help swapoff by noting that swap entry belongs to shmem/tmpfs * (in which case its reference count is never incremented). */ -void swap_shmem_alloc(swp_entry_t entry) +void swap_shmem_alloc(swp_entry_t entry, int nr) { - __swap_duplicate(entry, SWAP_MAP_SHMEM); + __swap_duplicate(entry, SWAP_MAP_SHMEM, nr); } /* @@ -3355,35 +3624,29 @@ int swap_duplicate(swp_entry_t entry) { int err = 0; - while (!err && __swap_duplicate(entry, 1) == -ENOMEM) + while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM) err = add_swap_count_continuation(entry, GFP_ATOMIC); return err; } /* - * @entry: swap entry for which we allocate swap cache. + * @entry: first swap entry from which we allocate nr swap cache. * - * Called when allocating swap cache for existing swap entry, + * Called when allocating swap cache for existing swap entries, * This can return error codes. Returns 0 at success. * -EEXIST means there is a swap cache. * Note: return code is different from swap_duplicate(). */ -int swapcache_prepare(swp_entry_t entry) +int swapcache_prepare(swp_entry_t entry, int nr) { - return __swap_duplicate(entry, SWAP_HAS_CACHE); + return __swap_duplicate(entry, SWAP_HAS_CACHE, nr); } -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { - struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); - unsigned char usage; - ci = lock_cluster_or_swap_info(si, offset); - usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE); - unlock_cluster_or_swap_info(si, ci); - if (!usage) - free_swap_slot(entry); + cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE); } struct swap_info_struct *swp_swap_info(swp_entry_t entry) diff --git a/mm/truncate.c b/mm/truncate.c index 70c09213bb9200cd8aa569d4b4e99afb49324ff1..daaaf558db60756c738fa08bbe2943ca345e31d9 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -469,6 +469,14 @@ void truncate_inode_pages_final(struct address_space *mapping) */ mapping_set_exiting(mapping); + /* Flush fast reflink work if any. */ + if (unlikely(mapping->fast_reflink_work)) { + flush_work(&mapping->fast_reflink_work->work); + + kfree(mapping->fast_reflink_work); + mapping->fast_reflink_work = NULL; + } + if (!mapping_empty(mapping)) { /* * As truncation uses a lockless tree lookup, cycle diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 92fe2a76f4b512a3231a5b331d7fccdbf6223f05..79ec6c3387bb9630fa442caf5c6df6c3bdfe3902 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -114,9 +114,9 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, /* Usually, cache pages are already added to LRU */ if (newly_allocated) folio_add_lru(folio); - page_add_file_rmap(page, dst_vma, false); + folio_add_file_rmap_pte(folio, page, dst_vma); } else { - page_add_new_anon_rmap(page, dst_vma, dst_addr); + folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, dst_vma); } diff --git a/mm/util.c b/mm/util.c index 08d49489655221f3cd79258b19fb2a6c741b5df0..a0e1a2939ed7948870beaaa2d3c30f51653ec492 100644 --- a/mm/util.c +++ b/mm/util.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include @@ -135,6 +137,23 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp) } EXPORT_SYMBOL(kmemdup); +/** + * kmemdup_array - duplicate a given array. + * + * @src: array to duplicate. + * @element_size: size of each element of array. + * @count: number of elements to duplicate from array. + * @gfp: GFP mask to use. + * + * Return: duplicated array of @src or %NULL in case of error, + * result is physically contiguous. Use kfree() to free. + */ +void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) +{ + return kmemdup(src, size_mul(element_size, count), gfp); +} +EXPORT_SYMBOL(kmemdup_array); + /** * kvmemdup - duplicate region of memory * @@ -809,6 +828,7 @@ void folio_copy(struct folio *dst, struct folio *src) cond_resched(); } } +EXPORT_SYMBOL(folio_copy); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; @@ -821,10 +841,26 @@ int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; + struct ctl_table t; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + t = *table; + t.data = &ext->overcommit_ratio; + table = &t; + } ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - sysctl_overcommit_kbytes = 0; + if (ret == 0 && write) { + if (ext) + ext->overcommit_kbytes = 0; + else + sysctl_overcommit_kbytes = 0; + } return ret; } @@ -833,12 +869,66 @@ static void sync_overcommit_as(struct work_struct *dummy) percpu_counter_sync(&vm_committed_as); } +#ifdef CONFIG_SMP +/* Sync overcommit as manually, since schedule_on_each_cpu + * cannot pass rich_container_ext directly + */ +static void rich_container_sync_overcommit_as(struct rich_container_ext *ext) +{ + struct percpu_counter *fbc = &ext->vm_committed_as; + unsigned long flags; + int cpu; + s32 *pcount; + s32 count; + + raw_spin_lock_irqsave(&fbc->lock, flags); + for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { + pcount = per_cpu_ptr(fbc->counters, cpu); + count = *pcount; + fbc->count += count; + *pcount -= count; + } + raw_spin_unlock_irqrestore(&fbc->lock, flags); +} + +void rich_container_mm_compute_batch(struct rich_container_ext *ext, + int overcommit_policy) +{ + u64 memsized_batch; + s32 nr = num_present_cpus(); + s32 batch = max_t(s32, nr*2, 32); + unsigned long ram_pages = totalram_pages(); + + if (overcommit_policy == OVERCOMMIT_NEVER) + memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); + else + memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); + + ext->as_batch = max_t(s32, memsized_batch, batch); +} +#else +static void rich_container_sync_overcommit_as(struct rich_container_ext *ext) +{ +} + +void rich_container_mm_compute_batch(struct rich_container_ext *ext, + int overcommit_policy) +{ +} +#endif + int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); /* * The deviation of sync_overcommit_as could be big with loose policy @@ -858,11 +948,23 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, if (ret || new_policy == -1) return ret; + if (ext) { + rich_container_mm_compute_batch(ext, new_policy); + if (new_policy == OVERCOMMIT_NEVER) + rich_container_sync_overcommit_as(ext); + ext->overcommit_memory = new_policy; + return ret; + } mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { + if (ext) { + t = *table; + t.data = &ext->overcommit_memory; + table = &t; + } ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } @@ -873,10 +975,26 @@ int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; + struct ctl_table t; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + t = *table; + t.data = &ext->overcommit_kbytes; + table = &t; + } ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - sysctl_overcommit_ratio = 0; + if (ret == 0 && write) { + if (ext) + ext->overcommit_ratio = 0; + else + sysctl_overcommit_ratio = 0; + } return ret; } @@ -897,6 +1015,28 @@ unsigned long vm_commit_limit(void) return allowed; } +#ifdef CONFIG_MEMCG +unsigned long rich_container_vm_commit_limit(struct rich_container_ext *ext, + struct mem_cgroup *memcg) +{ + unsigned long allowed; + struct mem_cgroup *iter; + unsigned long limit; + + if (ext->overcommit_kbytes) + allowed = ext->overcommit_kbytes >> (PAGE_SHIFT - 10); + else { + limit = totalram_pages() - hugetlb_total_pages(); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + limit = min(limit, iter->memory.max); + allowed = (limit * ext->overcommit_ratio / 100); + } + allowed += min_t(unsigned long, total_swap_pages, memcg->swap.max); + + return allowed; +} +#endif + /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. @@ -918,6 +1058,14 @@ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; */ unsigned long vm_memory_committed(void) { + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) + return percpu_counter_sum_positive(&ext->vm_committed_as); return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); @@ -941,16 +1089,33 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; + int overcommit = sysctl_overcommit_memory; +#ifdef CONFIG_MEMCG + struct rich_container_ext *ext = NULL; + struct mem_cgroup *memcg = NULL; + long memcg_allowed; + + rcu_read_lock(); + if (in_rich_container(current)) { + ext = rich_container_get_ext(); + memcg = rich_container_get_memcg(); + } + rcu_read_unlock(); + if (ext) { + overcommit = ext->overcommit_memory; + memcg_allowed = rich_container_vm_commit_limit(ext, memcg); + } +#endif vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ - if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) + if (overcommit == OVERCOMMIT_ALWAYS) return 0; - if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { + if (overcommit == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; @@ -972,6 +1137,10 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) allowed -= min_t(long, mm->total_vm / 32, reserve); } +#ifdef CONFIG_MEMCG + if (ext && percpu_counter_read_positive(&ext->vm_committed_as) < memcg_allowed) + return 0; +#endif if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: diff --git a/mm/vmscan.c b/mm/vmscan.c index 49456b72575529faddab50caa449ffa9dc642195..f49a349fe3a29726bb322887da8d5deba7e0956e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -57,6 +57,9 @@ #include #include #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include #include @@ -137,6 +140,9 @@ struct scan_control { /* Always discard instead of demoting to lower tier memory */ unsigned int no_demotion:1; + /* The file pages on the current node are not allowed to reclaim */ + unsigned int file_is_reserved:1; + /* Allocation order */ s8 order; @@ -187,6 +193,44 @@ struct scan_control { * From 0 .. 200. Higher means more swappy. */ int vm_swappiness = 60; +/* The min page cache should be reserved in the system */ +unsigned long sysctl_min_cache_kbytes; + +/* + * Even vm_swappiness is set to 0, swapout can happen in + * global reclaim when there is few page cache. When + * strict_swappiness is set, such global swapout can be + * completely disabled. + */ +static int strict_swappiness; + +#ifdef CONFIG_DEBUG_FS +static int strict_swappiness_get(void *data, u64 *val) +{ + *val = strict_swappiness; + return 0; +} + +static int strict_swappiness_set(void *data, u64 val) +{ + if (val > 1) + return -EINVAL; + + strict_swappiness = val; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(strict_swappiness_fops, + strict_swappiness_get, strict_swappiness_set, "%llu\n"); + +static int __init strict_swappiness_debugfs(void) +{ + debugfs_create_file_unsafe("strict_swappiness", 0644, NULL, NULL, + &strict_swappiness_fops); + return 0; +} +late_initcall(strict_swappiness_debugfs); +#endif LIST_HEAD(shrinker_list); DECLARE_RWSEM(shrinker_rwsem); @@ -1310,7 +1354,7 @@ typedef enum { * Calls ->writepage(). */ static pageout_t pageout(struct folio *folio, struct address_space *mapping, - struct swap_iocb **plug) + struct swap_iocb **plug, struct list_head *folio_list) { /* * If the folio is dirty, only perform writeback if that write @@ -1358,6 +1402,14 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, .swap_plug = plug, }; + /* + * The large shmem folio can be split if CONFIG_THP_SWAP is + * not enabled or contiguous swap entries are failed to + * allocate. + */ + if (shmem_mapping(mapping) && folio_test_large(folio)) + wbc.list = folio_list; + folio_set_reclaim(folio); res = mapping->a_ops->writepage(&folio->page, &wbc); if (res < 0) @@ -1718,8 +1770,12 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, LIST_HEAD(demote_folios); unsigned int nr_reclaimed = 0; unsigned int pgactivate = 0; + u64 start = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; + struct lruvec *target_lruvec; + + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); memset(stat, 0, sizeof(*stat)); cond_resched(); @@ -1828,7 +1884,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, /* Case 1 above */ if (current_is_kswapd() && folio_test_reclaim(folio) && - test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { + test_bit(LRUVEC_WRITEBACK, &pgdat->flags)) { stat->nr_immediate += nr_pages; goto activate_locked; @@ -1905,34 +1961,32 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (!can_split_folio(folio, NULL)) goto activate_locked; /* - * Split folios without a PMD map right - * away. Chances are some or all of the - * tail pages can be freed without IO. + * Split partially mapped folios right away. + * We can free the unmapped pages without IO. */ - if (!folio_entire_mapcount(folio) && - split_folio_to_list(folio, - folio_list)) + if (data_race(!list_empty(&folio->_deferred_list)) && + split_folio_to_list(folio, folio_list)) goto activate_locked; } if (!add_to_swap(folio)) { + int __maybe_unused order = folio_order(folio); + if (!folio_test_large(folio)) goto activate_locked_split; /* Fallback to swap normal pages */ - if (split_folio_to_list(folio, - folio_list)) + if (split_folio_to_list(folio, folio_list)) goto activate_locked; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - count_vm_event(THP_SWPOUT_FALLBACK); + if (nr_pages >= HPAGE_PMD_NR) { + count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); + count_vm_event(THP_SWPOUT_FALLBACK); + } + count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; } } - } else if (folio_test_swapbacked(folio) && - folio_test_large(folio)) { - /* Split shmem folio */ - if (split_folio_to_list(folio, folio_list)) - goto keep_locked; } /* @@ -1992,7 +2046,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (folio_is_file_lru(folio) && (!current_is_kswapd() || !folio_test_reclaim(folio) || - !test_bit(PGDAT_DIRTY, &pgdat->flags))) { + !test_bit(LRUVEC_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. * Similar in principle to folio_deactivate() @@ -2019,12 +2073,32 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, * starts and then write it out here. */ try_to_unmap_flush_dirty(); - switch (pageout(folio, mapping, &plug)) { + if (!current_is_kswapd()) + memcg_lat_stat_start(&start); + switch (pageout(folio, mapping, &plug, folio_list)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: + /* + * If shmem folio is split when writeback to swap, + * the tail pages will make their own pass through + * this function and be accounted then. + */ + if (nr_pages > 1 && !folio_test_large(folio)) { + sc->nr_scanned -= (nr_pages - 1); + nr_pages = 1; + } goto activate_locked; case PAGE_SUCCESS: + if (!current_is_kswapd()) + memcg_lat_stat_end(cgroup_reclaim(sc) ? + MEM_LAT_MEMCG_DIRECT_SWAPOUT : + MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + start); + if (nr_pages > 1 && !folio_test_large(folio)) { + sc->nr_scanned -= (nr_pages - 1); + nr_pages = 1; + } stat->nr_pageout += nr_pages; if (folio_test_writeback(folio)) @@ -2973,6 +3047,7 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) if (!cgroup_reclaim(sc)) { unsigned long total_high_wmark = 0; unsigned long free, anon; + unsigned long min_cache_kbytes; int z; free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); @@ -2999,6 +3074,15 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) file + free <= total_high_wmark && !(sc->may_deactivate & DEACTIVATE_ANON) && anon >> sc->priority; + + /* + * Reserve a specified amount of page caches in case of thrashing. + * OOM killer is preferred when the system page cache is below the + * given watermark. + */ + min_cache_kbytes = READ_ONCE(sysctl_min_cache_kbytes); + if (min_cache_kbytes) + sc->file_is_reserved = file <= pgdat->min_cache_pages; } } @@ -3053,7 +3137,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, /* * If the system is almost out of file pages, force-scan anon. */ - if (sc->file_is_tiny) { + if (sc->file_is_tiny && !strict_swappiness) { scan_balance = SCAN_ANON; goto out; } @@ -3202,6 +3286,9 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, BUG(); } + if (sc->file_is_reserved && file) + scan = 0; + nr[lru] = scan; } } @@ -4958,7 +5045,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (zone > sc->reclaim_idx) { + if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; @@ -6531,6 +6618,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); + /* + * Memcg background reclaim would break iter once water + * mark is satisfied. + */ + if (cgroup_reclaim(sc) && current_is_kswapd() && + is_wmark_ok(target_memcg, false)) { + mem_cgroup_iter_break(target_memcg, memcg); + break; + } } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); } @@ -6588,11 +6684,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) * in the nr_immediate check below. */ if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) - set_bit(PGDAT_WRITEBACK, &pgdat->flags); + set_bit(LRUVEC_WRITEBACK, &target_lruvec->flags); /* Allow kswapd to start writing pages during reclaim.*/ if (sc->nr.unqueued_dirty == sc->nr.file_taken) - set_bit(PGDAT_DIRTY, &pgdat->flags); + set_bit(LRUVEC_DIRTY, &target_lruvec->flags); /* * If kswapd scans pages marked for immediate @@ -6616,7 +6712,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); - if (current_is_kswapd()) + if (current_is_kswapd() && !cgroup_reclaim(sc)) set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); } @@ -6855,6 +6951,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); do { + if (current_is_kswapd() && cgroup_reclaim(sc) && + is_wmark_ok(sc->target_mem_cgroup, false)) + break; + if (!sc->proactive) vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); @@ -6890,6 +6990,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, zone->zone_pgdat); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); + if (current_is_kswapd()) { + clear_bit(LRUVEC_DIRTY, &lruvec->flags); + clear_bit(LRUVEC_WRITEBACK, &lruvec->flags); + } } } @@ -6918,8 +7022,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, goto retry; } - /* Untapped cgroup reserves? Don't OOM, retry. */ - if (sc->memcg_low_skipped) { + /* + * Untapped cgroup reserves? Don't OOM, retry. + * + * Memcg kswapd should not break low protection. + */ + if (sc->memcg_low_skipped && !current_is_kswapd()) { sc->priority = initial_priority; sc->force_deactivate = 0; sc->memcg_low_reclaim = 1; @@ -7280,8 +7388,8 @@ static void clear_pgdat_congested(pg_data_t *pgdat) clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); - clear_bit(PGDAT_DIRTY, &pgdat->flags); - clear_bit(PGDAT_WRITEBACK, &pgdat->flags); + clear_bit(LRUVEC_DIRTY, &lruvec->flags); + clear_bit(LRUVEC_WRITEBACK, &lruvec->flags); } /* @@ -8169,3 +8277,118 @@ void check_move_unevictable_folios(struct folio_batch *fbatch) } } EXPORT_SYMBOL_GPL(check_move_unevictable_folios); + +#ifdef CONFIG_PAGECACHE_LIMIT +static int __pagecache_shrink(struct mem_cgroup *memcg, + struct scan_control *sc) +{ + unsigned long has_reclaimed = sc->nr_reclaimed; + struct mem_cgroup *new = memcg, *tmp; + struct lruvec *lruvec; + pg_data_t *pgdat; + int ret = 0, nid, reserved_nid = -1, current_nid = numa_node_id(); + + for_each_online_node(nid) { + /* there we fisrt select local numa node */ + if (reserved_nid < 0) { + reserved_nid = nid; + pgdat = NODE_DATA(current_nid); + } else if (nid == current_nid) { + pgdat = NODE_DATA(reserved_nid); + } else { + pgdat = NODE_DATA(nid); + } + + /* handle sc->may_deactivate etc. */ + prepare_scan_count(pgdat, sc); + + tmp = mem_cgroup_iter(new, NULL, NULL); + do { + + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + + /* + * In case pagecahe limit is suddenly disabled, but + * the reclaim operation is still being performed. + */ + if (!is_memcg_pgcache_limit_enabled(memcg)) { + mem_cgroup_iter_break(new, tmp); + ret = -1; + goto out; + } + + lruvec = mem_cgroup_lruvec(tmp, pgdat); + shrink_lruvec(lruvec, sc); + if (sc->nr_reclaimed >= sc->nr_to_reclaim) { + mem_cgroup_iter_break(new, tmp); + goto out; + } + } while ((tmp = mem_cgroup_iter(new, tmp, NULL))); + } + +out: + memcg_add_pgcache_limit_reclaimed(memcg, + sc->nr_reclaimed - has_reclaimed); + return ret; +} + +void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask) +{ + unsigned long nr_should_reclaim; + struct scan_control sc = { + .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | + (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), + .reclaim_idx = ZONE_MOVABLE, + .may_swap = 0, + .may_unmap = may_unmap, + .may_writepage = 0, + .priority = DEF_PRIORITY, + .target_mem_cgroup = memcg, + }; + + /* + * We recheck here mainly in case the pagecache is already satisfied, + * especially in asynchronous scenarios. + */ + nr_should_reclaim = memcg_get_pgcache_overflow_size(memcg); + if (!nr_should_reclaim) + return; + + sc.nr_to_reclaim = max(nr_should_reclaim, SWAP_CLUSTER_MAX); + do { + if (!is_memcg_pgcache_limit_enabled(memcg)) + break; + + if (sc.nr_reclaimed >= sc.nr_to_reclaim) + break; + /* + * In case there no enough pagecache to be reclaimed during + * driect reclaim, we only enable mapped pages to be reclaimed + * when priority value is smaller than DEF_PRIORITY - 4. + */ + if (memcg->pgcache_limit_sync && + (sc.priority < DEF_PRIORITY - 4)) + sc.may_unmap = 1; + + /* + * We only enable dirty pages to be reclaimed when priority + * value is smaller than DEF_PRIORITY - 2, and the reclaim + * must be in asynchronous scenario, in order to minimize the + * performance jitter when dirty pages to be reclaimed. + */ + if (current_is_kswapd() && !memcg->pgcache_limit_sync && + (sc.priority < DEF_PRIORITY - 2)) + sc.may_writepage = 1; + + if (__pagecache_shrink(memcg, &sc) < 0) + break; + } while (--sc.priority >= 0); +} +#endif diff --git a/mm/zswap.c b/mm/zswap.c index 69681b9173fdcbf9f90967e21b5159760a16c601..25ee19e45be8f2aae52e665e349fb8880edf3ff0 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -84,6 +84,7 @@ static bool zswap_pool_reached_full; static int zswap_setup(void); /* Enable/disable zswap */ +static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); static int zswap_enabled_param_set(const char *, const struct kernel_param *); @@ -144,6 +145,11 @@ module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); /* Number of zpools in zswap_pool (empirically determined for scalability) */ #define ZSWAP_NR_ZPOOLS 32 +bool zswap_never_enabled(void) +{ + return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); +} + /********************************* * data structures **********************************/ @@ -1410,6 +1416,20 @@ bool zswap_load(struct folio *folio) VM_WARN_ON_ONCE(!folio_test_locked(folio)); + if (zswap_never_enabled()) + return false; + + /* + * Large folios should not be swapped in while zswap is being used, as + * they are not properly handled. Zswap does not properly load large + * folios, and a large folio may only be partially in zswap. + * + * Return true without marking the folio uptodate so that an IO error is + * emitted (e.g. do_swap_page() will sigbus). + */ + if (WARN_ON_ONCE(folio_test_large(folio))) + return true; + /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); @@ -1479,6 +1499,8 @@ bool zswap_load(struct folio *folio) zswap_entry_put(tree, entry); spin_unlock(&tree->lock); + if (ret) + folio_mark_uptodate(folio); return ret; } @@ -1611,6 +1633,7 @@ static int zswap_setup(void) zpool_get_type(pool->zpools[0])); list_add(&pool->list, &zswap_pools); zswap_has_pool = true; + static_branch_enable(&zswap_ever_enabled); } else { pr_err("pool creation failed\n"); zswap_enabled = false; diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 5918d1b32e196005330fe51b0dabcb5b7709b910..c931905ca4d53437c15fcecb9dc8f27e8b1dc163 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -7,16 +7,23 @@ #include #include -extern struct bpf_struct_ops bpf_bpf_dummy_ops; +static struct bpf_struct_ops bpf_bpf_dummy_ops; /* A common type for test_N with return value in bpf_dummy_ops */ typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...); +static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...) +{ + return 0; +} + struct bpf_dummy_ops_test_args { u64 args[MAX_BPF_FUNC_ARGS]; struct bpf_dummy_ops_state state; }; +static struct btf *bpf_dummy_ops_btf; + static struct bpf_dummy_ops_test_args * dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr) { @@ -62,7 +69,7 @@ static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args) static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args) { - dummy_ops_test_ret_fn test = (void *)image; + dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset(); struct bpf_dummy_ops_state *state = NULL; /* state needs to be NULL if args[0] is 0 */ @@ -85,9 +92,15 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, void *image = NULL; unsigned int op_idx; int prog_ret; + s32 type_id; int err; - if (prog->aux->attach_btf_id != st_ops->type_id) + type_id = btf_find_by_name_kind(bpf_dummy_ops_btf, + bpf_bpf_dummy_ops.name, + BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + if (prog->aux->attach_btf_id != type_id) return -EOPNOTSUPP; func_proto = prog->aux->attach_func_proto; @@ -120,6 +133,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, op_idx = prog->expected_attach_type; err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[op_idx], + &dummy_ops_test_ret_function, image, image + PAGE_SIZE); if (err < 0) goto out; @@ -143,6 +157,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, static int bpf_dummy_init(struct btf *btf) { + bpf_dummy_ops_btf = btf; return 0; } @@ -164,7 +179,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t, case offsetof(struct bpf_dummy_ops, test_sleepable): break; default: - if (prog->aux->sleepable) + if (prog->sleepable) return -EINVAL; } @@ -220,7 +235,29 @@ static void bpf_dummy_unreg(void *kdata) { } -struct bpf_struct_ops bpf_bpf_dummy_ops = { +static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb) +{ + return 0; +} + +static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, + char a3, unsigned long a4) +{ + return 0; +} + +static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb) +{ + return 0; +} + +static struct bpf_dummy_ops __bpf_bpf_dummy_ops = { + .test_1 = bpf_dummy_test_1, + .test_2 = bpf_dummy_test_2, + .test_sleepable = bpf_dummy_test_sleepable, +}; + +static struct bpf_struct_ops bpf_bpf_dummy_ops = { .verifier_ops = &bpf_dummy_verifier_ops, .init = bpf_dummy_init, .check_member = bpf_dummy_ops_check_member, @@ -228,4 +265,12 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = { .reg = bpf_dummy_reg, .unreg = bpf_dummy_unreg, .name = "bpf_dummy_ops", + .cfi_stubs = &__bpf_bpf_dummy_ops, + .owner = THIS_MODULE, }; + +static int __init bpf_dummy_struct_ops_init(void) +{ + return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops); +} +late_initcall(bpf_dummy_struct_ops_init); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 905de361f8623f90e1594532300d458b3e480a86..07589fdc64456952a44686fd80491f6b1f7c7e67 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -504,9 +504,8 @@ static int bpf_test_finish(const union bpf_attr *kattr, * architecture dependent calling conventions. 7+ can be supported in the * future. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); + __bpf_kfunc int bpf_fentry_test1(int a) { return a + 1; @@ -602,27 +601,38 @@ __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) refcount_dec(&p->cnt); } +__bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p) +{ + bpf_kfunc_call_test_release(p); +} +CFI_NOSEAL(bpf_kfunc_call_test_release_dtor); + __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) { } -__diag_pop(); +__bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p) +{ +} +CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor); + +__bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_test_modify_return_ids) +BTF_KFUNCS_START(bpf_test_modify_return_ids) BTF_ID_FLAGS(func, bpf_modify_return_test) BTF_ID_FLAGS(func, bpf_modify_return_test2) BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) -BTF_SET8_END(bpf_test_modify_return_ids) +BTF_KFUNCS_END(bpf_test_modify_return_ids) static const struct btf_kfunc_id_set bpf_test_modify_return_set = { .owner = THIS_MODULE, .set = &bpf_test_modify_return_ids, }; -BTF_SET8_START(test_sk_check_kfunc_ids) +BTF_KFUNCS_START(test_sk_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) -BTF_SET8_END(test_sk_check_kfunc_ids) +BTF_KFUNCS_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, u32 size, u32 headroom, u32 tailroom) @@ -1679,9 +1689,9 @@ static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) BTF_ID(struct, prog_test_ref_kfunc) -BTF_ID(func, bpf_kfunc_call_test_release) +BTF_ID(func, bpf_kfunc_call_test_release_dtor) BTF_ID(struct, prog_test_member) -BTF_ID(func, bpf_kfunc_call_memb_release) +BTF_ID(func, bpf_kfunc_call_memb_release_dtor) static int __init bpf_prog_test_run_init(void) { diff --git a/net/core/filter.c b/net/core/filter.c index 34320ce70096ac65cebe1103e8a82a0403b6d787..a9c8582ede943b0daf9f4debca79ffd6bbd6f854 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11844,9 +11844,7 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) return func; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, struct bpf_dynptr_kern *ptr__uninit) { @@ -11893,7 +11891,7 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, return 0; } -__diag_pop(); +__bpf_kfunc_end_defs(); int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, struct bpf_dynptr_kern *ptr__uninit) @@ -11909,17 +11907,17 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, return 0; } -BTF_SET8_START(bpf_kfunc_check_set_skb) -BTF_ID_FLAGS(func, bpf_dynptr_from_skb) -BTF_SET8_END(bpf_kfunc_check_set_skb) +BTF_KFUNCS_START(bpf_kfunc_check_set_skb) +BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS) +BTF_KFUNCS_END(bpf_kfunc_check_set_skb) -BTF_SET8_START(bpf_kfunc_check_set_xdp) +BTF_KFUNCS_START(bpf_kfunc_check_set_xdp) BTF_ID_FLAGS(func, bpf_dynptr_from_xdp) -BTF_SET8_END(bpf_kfunc_check_set_xdp) +BTF_KFUNCS_END(bpf_kfunc_check_set_xdp) -BTF_SET8_START(bpf_kfunc_check_set_sock_addr) +BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr) BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path) -BTF_SET8_END(bpf_kfunc_check_set_sock_addr) +BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr) static const struct btf_kfunc_id_set bpf_kfunc_set_skb = { .owner = THIS_MODULE, @@ -11950,16 +11948,14 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, &bpf_kfunc_set_sock_addr); } late_initcall(bpf_kfunc_init); -/* Disables missing prototype warnings */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); /* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code. * @@ -11993,11 +11989,11 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock) return sk->sk_prot->diag_destroy(sk, ECONNABORTED); } -__diag_pop() +__bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_sk_iter_kfunc_ids) +BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_sk_iter_kfunc_ids) +BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids) static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id) { diff --git a/net/core/xdp.c b/net/core/xdp.c index 5ee3f8f165e5aa68bcc1134087a25956ea685a33..39738e00e732408c670d2dc317312ab3e5112fab 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -692,9 +692,7 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) return nxdpf; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); /** * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. @@ -734,13 +732,13 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, return -EOPNOTSUPP; } -__diag_pop(); +__bpf_kfunc_end_defs(); -BTF_SET8_START(xdp_metadata_kfunc_ids) +BTF_KFUNCS_START(xdp_metadata_kfunc_ids) #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC -BTF_SET8_END(xdp_metadata_kfunc_ids) +BTF_KFUNCS_END(xdp_metadata_kfunc_ids) static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h index f6b5fea2e13cddfc8207a61aab3f6165db848621..4c5b1d13bcfa6ec08c51a6ae76e7b6e85e9e3ea5 100644 --- a/net/devlink/devl_internal.h +++ b/net/devlink/devl_internal.h @@ -52,6 +52,10 @@ struct devlink { */ struct mutex lock; struct lock_class_key lock_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + u8 reload_failed:1; refcount_t refcount; struct rcu_work rwork; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 39dcccf0f174b95f2ec4cd7bed3cc86a73dede3b..5d83d7b058faffe722916888635cb4c4690f1e23 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -12,7 +12,7 @@ #include /* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */ -extern struct bpf_struct_ops bpf_tcp_congestion_ops; +static struct bpf_struct_ops bpf_tcp_congestion_ops; static u32 unsupported_ops[] = { offsetof(struct tcp_congestion_ops, get_info), @@ -20,6 +20,7 @@ static u32 unsupported_ops[] = { static const struct btf_type *tcp_sock_type; static u32 tcp_sock_id, sock_id; +static const struct btf_type *tcp_congestion_ops_type; static int bpf_tcp_ca_init(struct btf *btf) { @@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf) tcp_sock_id = type_id; tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); + type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + tcp_congestion_ops_type = btf_type_by_id(btf, type_id); + return 0; } @@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog) u32 midx; midx = prog->expected_attach_type; - t = bpf_tcp_congestion_ops.type; + t = tcp_congestion_ops_type; m = &btf_type_member(t)[midx]; return __btf_member_bit_offset(t, m) / 8; @@ -195,13 +201,13 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, } } -BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids) +BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids) BTF_ID_FLAGS(func, tcp_reno_ssthresh) BTF_ID_FLAGS(func, tcp_reno_cong_avoid) BTF_ID_FLAGS(func, tcp_reno_undo_cwnd) BTF_ID_FLAGS(func, tcp_slow_start) BTF_ID_FLAGS(func, tcp_cong_avoid_ai) -BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids) +BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids) static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = { .owner = THIS_MODULE, @@ -271,7 +277,75 @@ static int bpf_tcp_ca_validate(void *kdata) return tcp_validate_congestion_control(kdata); } -struct bpf_struct_ops bpf_tcp_congestion_ops = { +static u32 bpf_tcp_ca_ssthresh(struct sock *sk) +{ + return 0; +} + +static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked) +{ +} + +static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state) +{ +} + +static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev) +{ +} + +static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags) +{ +} + +static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample) +{ +} + +static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk) +{ + return 0; +} + +static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs) +{ +} + +static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk) +{ + return 0; +} + +static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk) +{ + return 0; +} + +static void __bpf_tcp_ca_init(struct sock *sk) +{ +} + +static void __bpf_tcp_ca_release(struct sock *sk) +{ +} + +static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = { + .ssthresh = bpf_tcp_ca_ssthresh, + .cong_avoid = bpf_tcp_ca_cong_avoid, + .set_state = bpf_tcp_ca_set_state, + .cwnd_event = bpf_tcp_ca_cwnd_event, + .in_ack_event = bpf_tcp_ca_in_ack_event, + .pkts_acked = bpf_tcp_ca_pkts_acked, + .min_tso_segs = bpf_tcp_ca_min_tso_segs, + .cong_control = bpf_tcp_ca_cong_control, + .undo_cwnd = bpf_tcp_ca_undo_cwnd, + .sndbuf_expand = bpf_tcp_ca_sndbuf_expand, + + .init = __bpf_tcp_ca_init, + .release = __bpf_tcp_ca_release, +}; + +static struct bpf_struct_ops bpf_tcp_congestion_ops = { .verifier_ops = &bpf_tcp_ca_verifier_ops, .reg = bpf_tcp_ca_reg, .unreg = bpf_tcp_ca_unreg, @@ -281,10 +355,17 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .init = bpf_tcp_ca_init, .validate = bpf_tcp_ca_validate, .name = "tcp_congestion_ops", + .cfi_stubs = &__bpf_ops_tcp_congestion_ops, + .owner = THIS_MODULE, }; static int __init bpf_tcp_ca_kfunc_init(void) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set); + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops); + + return ret; } late_initcall(bpf_tcp_ca_kfunc_init); diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c index 3760a14b6b576b07c60879bf58831c382cd103bc..06e5572f296f1e70d49e10cd343ab09d3aa30009 100644 --- a/net/ipv4/fou_bpf.c +++ b/net/ipv4/fou_bpf.c @@ -22,9 +22,7 @@ enum bpf_fou_encap_type { FOU_BPF_ENCAP_GUE, }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in BTF"); +__bpf_kfunc_start_defs(); /* bpf_skb_set_fou_encap - Set FOU encap parameters * @@ -100,12 +98,12 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, return 0; } -__diag_pop() +__bpf_kfunc_end_defs(); -BTF_SET8_START(fou_kfunc_set) +BTF_KFUNCS_START(fou_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_set_fou_encap) BTF_ID_FLAGS(func, bpf_skb_get_fou_encap) -BTF_SET8_END(fou_kfunc_set) +BTF_KFUNCS_END(fou_kfunc_set) static const struct btf_kfunc_id_set fou_bpf_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 285482060082f84b650ab3be88c2dc63c54bcccd..2eaa8c1ba92f14478062fd7d128590acdc4aaf2a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1494,6 +1494,9 @@ struct uncached_list { spinlock_t lock; struct list_head head; struct list_head quarantine; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 146792cd26fed4e61cd72a5d85263b2c7c7b2636..56bb7a9621ab4d042b16c600e066a80b46991a8a 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -1154,7 +1154,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { .set_state = bbr_set_state, }; -BTF_SET8_START(tcp_bbr_check_kfunc_ids) +BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, bbr_init) @@ -1167,7 +1167,7 @@ BTF_ID_FLAGS(func, bbr_min_tso_segs) BTF_ID_FLAGS(func, bbr_set_state) #endif #endif -BTF_SET8_END(tcp_bbr_check_kfunc_ids) +BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 0fd78ecb67e756c2869430b720cbd24bd66b86b7..44869ea089e34696ff03b5e35a4ca52e9f120e0a 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { .name = "cubic", }; -BTF_SET8_START(tcp_cubic_check_kfunc_ids) +BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, cubictcp_init) @@ -496,7 +496,7 @@ BTF_ID_FLAGS(func, cubictcp_cwnd_event) BTF_ID_FLAGS(func, cubictcp_acked) #endif #endif -BTF_SET8_END(tcp_cubic_check_kfunc_ids) +BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 8ad62713b0ba2b2561ded013057a73900cba51f0..b004280855f878ee1cb90e576c9e302e8a8762ef 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -271,7 +271,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = { .name = "dctcp-reno", }; -BTF_SET8_START(tcp_dctcp_check_kfunc_ids) +BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, dctcp_init) @@ -282,7 +282,7 @@ BTF_ID_FLAGS(func, dctcp_cwnd_undo) BTF_ID_FLAGS(func, dctcp_state) #endif #endif -BTF_SET8_END(tcp_dctcp_check_kfunc_ids) +BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f6a213bae5cccb2a3d970283897d0f931481de21..70b67ea8017f419a8bb48d0e7413c43f522f631e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -809,6 +809,8 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) now = tcp_jiffies32; + trace_tcp_pkt_recv(sk, skb); + if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. @@ -3498,6 +3500,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ } + trace_tcp_data_acked(sk); + if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, .rtt_us = sack->rate->rtt_us }; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cfddc94508f0b7cd0ddce46cfc513c35e0f5f506..fc6902b14e23ae3582033589d40e01d239ff777a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2793,6 +2793,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk, false); + + trace_tcp_data_send(sk); return false; } return !tp->packets_out && !tcp_write_queue_empty(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2e4e5356039480749a42d6a8dd46c01b084fc0f4..61b70ebaa17019c5a451a2cf0551e5098f4910c2 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -409,7 +409,6 @@ static int compute_score(struct sock *sk, struct net *net, return score; } -INDIRECT_CALLABLE_SCOPE u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { @@ -420,6 +419,7 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } +EXPORT_SYMBOL(udp_ehashfn); /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, @@ -479,6 +479,159 @@ static struct sock *udp4_lib_lookup2(struct net *net, return result; } +#if IS_ENABLED(CONFIG_BASE_SMALL) +static struct sock *udp4_lib_lookup4(struct net *net, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned int hnum, + int dif, int sdif, + struct udp_table *udptable) +{ + return NULL; +} + +static void udp_rehash4(struct udp_table *udptable, struct sock *sk, + u16 newhash4) +{ +} + +static void udp_unhash4(struct udp_table *udptable, struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +static struct sock *udp4_lib_lookup4(struct net *net, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned int hnum, + int dif, int sdif, + struct udp_table *udptable) +{ + const __portpair ports = INET_COMBINED_PORTS(sport, hnum); + const struct hlist_nulls_node *node; + struct udp_hslot *hslot4; + unsigned int hash4, slot; + struct udp_sock *up; + struct sock *sk; + + hash4 = udp_ehashfn(net, daddr, hnum, saddr, sport); + slot = hash4 & udptable->mask; + hslot4 = &udptable->hash4[slot]; + INET_ADDR_COOKIE(acookie, saddr, daddr); + +begin: + /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */ + udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { + sk = (struct sock *)up; + if (inet_match(net, sk, acookie, ports, dif, sdif)) + return sk; + } + + /* if the nulls value we got at the end of this lookup is not the + * expected one, we must restart lookup. We probably met an item that + * was moved to another chain due to rehash. + */ + if (get_nulls_value(node) != slot) + goto begin; + + return NULL; +} + +/* In hash4, rehash can happen in connect(), where hash4_cnt keeps unchanged. */ +static void udp_rehash4(struct udp_table *udptable, struct sock *sk, + u16 newhash4) +{ + struct udp_hslot *hslot4, *nhslot4; + + hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); + nhslot4 = udp_hashslot4(udptable, newhash4); + udp_sk(sk)->udp_lrpa_hash = newhash4; + + if (hslot4 != nhslot4) { + spin_lock_bh(&hslot4->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); + hslot4->count--; + spin_unlock_bh(&hslot4->lock); + + spin_lock_bh(&nhslot4->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, + &nhslot4->nulls_head); + nhslot4->count++; + spin_unlock_bh(&nhslot4->lock); + } +} + +static void udp_unhash4(struct udp_table *udptable, struct sock *sk) +{ + struct udp_hslot *hslot2, *hslot4; + + if (udp_hashed4(sk)) { + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); + + spin_lock(&hslot4->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); + hslot4->count--; + spin_unlock(&hslot4->lock); + + spin_lock(&hslot2->lock); + udp_hash4_dec(hslot2); + spin_unlock(&hslot2->lock); + } +} + +void udp_lib_hash4(struct sock *sk, u16 hash) +{ + struct udp_hslot *hslot, *hslot2, *hslot4; + struct net *net = sock_net(sk); + struct udp_table *udptable; + + /* Connected udp socket can re-connect to another remote address, + * so rehash4 is needed. + */ + udptable = net->ipv4.udp_table; + if (udp_hashed4(sk)) { + udp_rehash4(udptable, sk, hash); + return; + } + + hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash); + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + hslot4 = udp_hashslot4(udptable, hash); + udp_sk(sk)->udp_lrpa_hash = hash; + + spin_lock_bh(&hslot->lock); + if (rcu_access_pointer(sk->sk_reuseport_cb)) + reuseport_detach_sock(sk); + + spin_lock(&hslot4->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, + &hslot4->nulls_head); + hslot4->count++; + spin_unlock(&hslot4->lock); + + spin_lock(&hslot2->lock); + udp_hash4_inc(hslot2); + spin_unlock(&hslot2->lock); + + spin_unlock_bh(&hslot->lock); +} +EXPORT_SYMBOL(udp_lib_hash4); + +/* call with sock lock */ +void udp4_hash4(struct sock *sk) +{ + struct net *net = sock_net(sk); + unsigned int hash; + + if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY)) + return; + + hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + + udp_lib_hash4(sk, hash); +} +EXPORT_SYMBOL(udp4_hash4); +#endif /* CONFIG_BASE_SMALL */ + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -487,13 +640,19 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, int sdif, struct udp_table *udptable, struct sk_buff *skb) { unsigned short hnum = ntohs(dport); - unsigned int hash2, slot2; struct udp_hslot *hslot2; struct sock *result, *sk; + unsigned int hash2; hash2 = ipv4_portaddr_hash(net, daddr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); + + if (udp_has_hash4(hslot2)) { + result = udp4_lib_lookup4(net, saddr, sport, daddr, hnum, + dif, sdif, udptable); + if (result) /* udp4_lib_lookup4 return sk or NULL */ + return result; + } /* Lookup connected or non-wildcard socket */ result = udp4_lib_lookup2(net, saddr, sport, @@ -520,8 +679,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, /* Lookup wildcard sockets */ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); result = udp4_lib_lookup2(net, saddr, sport, htonl(INADDR_ANY), hnum, dif, sdif, @@ -1933,6 +2091,18 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } EXPORT_SYMBOL(udp_pre_connect); +static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip4_datagram_connect(sk, uaddr, addr_len); + if (!res) + udp4_hash4(sk); + release_sock(sk); + return res; +} + int __udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); @@ -1992,6 +2162,8 @@ void udp_lib_unhash(struct sock *sk) hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); + + udp_unhash4(udptable, sk); } spin_unlock_bh(&hslot->lock); } @@ -2001,7 +2173,7 @@ EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ -void udp_lib_rehash(struct sock *sk, u16 newhash) +void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4) { if (sk_hashed(sk)) { struct udp_table *udptable = udp_get_table_prot(sk); @@ -2033,6 +2205,19 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) spin_unlock(&nhslot2->lock); } + if (udp_hashed4(sk)) { + udp_rehash4(udptable, sk, newhash4); + + if (hslot2 != nhslot2) { + spin_lock(&hslot2->lock); + udp_hash4_dec(hslot2); + spin_unlock(&hslot2->lock); + + spin_lock(&nhslot2->lock); + udp_hash4_inc(nhslot2); + spin_unlock(&nhslot2->lock); + } + } spin_unlock_bh(&hslot->lock); } } @@ -2044,7 +2229,11 @@ void udp_v4_rehash(struct sock *sk) u16 new_hash = ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); - udp_lib_rehash(sk, new_hash); + u16 new_hash4 = udp_ehashfn(sock_net(sk), + sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + + udp_lib_rehash(sk, new_hash, new_hash4); } static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) @@ -2266,7 +2455,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, udptable->mask; hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udptable->hash2[hash2]; + hslot = &udptable->hash2[hash2].hslot; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } @@ -2537,14 +2726,13 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, struct udp_table *udptable = net->ipv4.udp_table; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); unsigned short hnum = ntohs(loc_port); - unsigned int hash2, slot2; struct udp_hslot *hslot2; + unsigned int hash2; __portpair ports; struct sock *sk; hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); ports = INET_COMBINED_PORTS(rmt_port, hnum); udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { @@ -2923,7 +3111,7 @@ struct proto udp_prot = { .owner = THIS_MODULE, .close = udp_lib_close, .pre_connect = udp_pre_connect, - .connect = ip4_datagram_connect, + .connect = udp_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udp_init_sock, @@ -3170,7 +3358,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq) batch_sks = 0; for (; state->bucket <= udptable->mask; state->bucket++) { - struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; + struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot; if (hlist_empty(&hslot2->head)) continue; @@ -3411,10 +3599,12 @@ __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { - unsigned int i; + unsigned int i, slot_size; + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + + udp_hash4_slot_size(); table->hash = alloc_large_system_hash(name, - 2 * sizeof(struct udp_hslot), + slot_size, uhash_entries, 21, /* one slot per 2 MB */ 0, @@ -3423,17 +3613,18 @@ void __init udp_table_init(struct udp_table *table, const char *name) UDP_HTABLE_SIZE_MIN, UDP_HTABLE_SIZE_MAX); - table->hash2 = table->hash + (table->mask + 1); + table->hash2 = (void *)(table->hash + (table->mask + 1)); for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash[i].head); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { - INIT_HLIST_HEAD(&table->hash2[i].head); - table->hash2[i].count = 0; - spin_lock_init(&table->hash2[i].lock); + INIT_HLIST_HEAD(&table->hash2[i].hslot.head); + table->hash2[i].hslot.count = 0; + spin_lock_init(&table->hash2[i].hslot.lock); } + udp_table_hash4_init(table); } u32 udp_flow_hashrnd(void) @@ -3459,18 +3650,21 @@ static void __net_init udp_sysctl_init(struct net *net) static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries) { struct udp_table *udptable; + unsigned int slot_size; int i; udptable = kmalloc(sizeof(*udptable), GFP_KERNEL); if (!udptable) goto out; - udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot), + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + + udp_hash4_slot_size(); + udptable->hash = vmalloc_huge(hash_entries * slot_size, GFP_KERNEL_ACCOUNT); if (!udptable->hash) goto free_table; - udptable->hash2 = udptable->hash + hash_entries; + udptable->hash2 = (void *)(udptable->hash + hash_entries); udptable->mask = hash_entries - 1; udptable->log = ilog2(hash_entries); @@ -3479,10 +3673,11 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent udptable->hash[i].count = 0; spin_lock_init(&udptable->hash[i].lock); - INIT_HLIST_HEAD(&udptable->hash2[i].head); - udptable->hash2[i].count = 0; - spin_lock_init(&udptable->hash2[i].lock); + INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head); + udptable->hash2[i].hslot.count = 0; + spin_lock_init(&udptable->hash2[i].hslot.lock); } + udp_table_hash4_init(udptable); return udptable; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fc5c5346202530ba38d2f944ae11e5c5078b4ae0..c214af56c126bcdd09a58b3f1134bd8acfd2830e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -132,6 +132,9 @@ struct uncached_list { spinlock_t lock; struct list_head head; struct list_head quarantine; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 954afe6ba883e7fc00fe01d098469bd783956fa4..2590ef4437338bb737a0342964e5e5be29af22dc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -113,8 +113,19 @@ void udp_v6_rehash(struct sock *sk) u16 new_hash = ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, inet_sk(sk)->inet_num); + u16 new_hash4; - udp_lib_rehash(sk, new_hash); + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) { + new_hash4 = udp_ehashfn(sock_net(sk), + sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + } else { + new_hash4 = udp6_ehashfn(sock_net(sk), + &sk->sk_v6_rcv_saddr, sk->sk_num, + &sk->sk_v6_daddr, sk->sk_dport); + } + + udp_lib_rehash(sk, new_hash, new_hash4); } static int compute_score(struct sock *sk, struct net *net, @@ -219,6 +230,74 @@ static struct sock *udp6_lib_lookup2(struct net *net, return result; } +#if IS_ENABLED(CONFIG_BASE_SMALL) +static struct sock *udp6_lib_lookup4(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum, int dif, int sdif, + struct udp_table *udptable) +{ + return NULL; +} + +static void udp6_hash4(struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +static struct sock *udp6_lib_lookup4(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum, int dif, int sdif, + struct udp_table *udptable) +{ + const __portpair ports = INET_COMBINED_PORTS(sport, hnum); + const struct hlist_nulls_node *node; + struct udp_hslot *hslot4; + unsigned int hash4, slot; + struct udp_sock *up; + struct sock *sk; + + hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport); + slot = hash4 & udptable->mask; + hslot4 = &udptable->hash4[slot]; + +begin: + udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { + sk = (struct sock *)up; + if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) + return sk; + } + + /* if the nulls value we got at the end of this lookup is not the + * expected one, we must restart lookup. We probably met an item that + * was moved to another chain due to rehash. + */ + if (get_nulls_value(node) != slot) + goto begin; + + return NULL; +} + +static void udp6_hash4(struct sock *sk) +{ + struct net *net = sock_net(sk); + unsigned int hash; + + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) { + udp4_hash4(sk); + return; + } + + if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr)) + return; + + hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num, + &sk->sk_v6_daddr, sk->sk_dport); + + udp_lib_hash4(sk, hash); +} +#endif /* CONFIG_BASE_SMALL */ + /* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -227,13 +306,19 @@ struct sock *__udp6_lib_lookup(struct net *net, struct sk_buff *skb) { unsigned short hnum = ntohs(dport); - unsigned int hash2, slot2; struct udp_hslot *hslot2; struct sock *result, *sk; + unsigned int hash2; hash2 = ipv6_portaddr_hash(net, daddr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); + + if (udp_has_hash4(hslot2)) { + result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum, + dif, sdif, udptable); + if (result) /* udp6_lib_lookup4 return sk or NULL */ + return result; + } /* Lookup connected or non-wildcard sockets */ result = udp6_lib_lookup2(net, saddr, sport, @@ -260,8 +345,7 @@ struct sock *__udp6_lib_lookup(struct net *net, /* Lookup wildcard sockets */ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, sdif, @@ -862,7 +946,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, udptable->mask; hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udptable->hash2[hash2]; + hslot = &udptable->hash2[hash2].hslot; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } @@ -1068,14 +1152,13 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, { struct udp_table *udptable = net->ipv4.udp_table; unsigned short hnum = ntohs(loc_port); - unsigned int hash2, slot2; struct udp_hslot *hslot2; + unsigned int hash2; __portpair ports; struct sock *sk; hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); ports = INET_COMBINED_PORTS(rmt_port, hnum); udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { @@ -1171,6 +1254,18 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); } +static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip6_datagram_connect(sk, uaddr, addr_len); + if (!res) + udp6_hash4(sk); + release_sock(sk); + return res; +} + /** * udp6_hwcsum_outgoing - handle outgoing HW checksumming * @sk: socket we are sending on @@ -1770,7 +1865,7 @@ struct proto udpv6_prot = { .owner = THIS_MODULE, .close = udp_lib_close, .pre_connect = udpv6_pre_connect, - .connect = ip6_datagram_connect, + .connect = udpv6_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udpv6_init_sock, diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index b21799d468d2815413cc1334cb6c5e5df7fe231d..d2492d050fe601fbb1ce8e3cb598603914712a83 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -230,9 +230,7 @@ static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log, return 0; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in nf_conntrack BTF"); +__bpf_kfunc_start_defs(); /* bpf_xdp_ct_alloc - Allocate a new CT entry * @@ -467,9 +465,9 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status) return nf_ct_change_status_common(nfct, status); } -__diag_pop() +__bpf_kfunc_end_defs(); -BTF_SET8_START(nf_ct_kfunc_set) +BTF_KFUNCS_START(nf_ct_kfunc_set) BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL) @@ -480,7 +478,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS) -BTF_SET8_END(nf_ct_kfunc_set) +BTF_KFUNCS_END(nf_ct_kfunc_set) static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c index 141ee7783223dad0e8511565c233873223c3e2e2..481be15609b16ab892aa1ba891d6906477623891 100644 --- a/net/netfilter/nf_nat_bpf.c +++ b/net/netfilter/nf_nat_bpf.c @@ -12,9 +12,7 @@ #include #include -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in nf_nat BTF"); +__bpf_kfunc_start_defs(); /* bpf_ct_set_nat_info - Set source or destination nat address * @@ -54,11 +52,11 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct, return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; } -__diag_pop() +__bpf_kfunc_end_defs(); -BTF_SET8_START(nf_nat_kfunc_set) +BTF_KFUNCS_START(nf_nat_kfunc_set) BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS) -BTF_SET8_END(nf_nat_kfunc_set) +BTF_KFUNCS_END(nf_nat_kfunc_set) static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/socket.c b/net/socket.c index bad58f23f307229e398354efc0024f058e843eb0..0c3e8a8153475b682a96369dc130a72e27ce3b44 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1692,20 +1692,16 @@ struct file *__sys_socket_file(int family, int type, int protocol) * Therefore, __weak is needed to ensure that the call is still * emitted, by telling the compiler that we don't know what the * function might eventually be. - * - * __diag_* below are needed to dismiss the missing prototype warning. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "A fmod_ret entry point for BPF programs"); +__bpf_hook_start(); __weak noinline int update_socket_protocol(int family, int type, int protocol) { return protocol; } -__diag_pop(); +__bpf_hook_end(); int __sys_socket(int family, int type, int protocol) { diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c index d74f3fd20f2bf25f0166b293fc378823b2612e8b..5ea15037ebd1049ee1938a0bac0a335d04dd87c5 100644 --- a/net/xfrm/xfrm_interface_bpf.c +++ b/net/xfrm/xfrm_interface_bpf.c @@ -27,9 +27,7 @@ struct bpf_xfrm_info { int link; }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in xfrm_interface BTF"); +__bpf_kfunc_start_defs(); /* bpf_skb_get_xfrm_info - Get XFRM metadata * @@ -93,12 +91,12 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp return 0; } -__diag_pop() +__bpf_kfunc_end_defs(); -BTF_SET8_START(xfrm_ifc_kfunc_set) +BTF_KFUNCS_START(xfrm_ifc_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info) BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info) -BTF_SET8_END(xfrm_ifc_kfunc_set) +BTF_KFUNCS_END(xfrm_ifc_kfunc_set) static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = { .owner = THIS_MODULE, diff --git a/scripts/Makefile b/scripts/Makefile index 576cf64be6677c28e1a048e082594e303a4c7f78..72af39a5945c555ebd7fb8d3422e8bba95c6e5c1 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -32,8 +32,10 @@ HOSTLDLIBS_sign-file = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null | ifdef CONFIG_UNWINDER_ORC ifeq ($(ARCH),x86_64) ARCH := x86 -endif HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include +else +HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/$(ARCH)/include +endif HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index e702552fb131af438af4d36c3ddb78a3a46ca403..f614b138b04668d164dd9df36994e00c562c4104 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -252,6 +252,13 @@ ifdef CONFIG_OBJTOOL objtool := $(objtree)/tools/objtool/objtool +ifdef CONFIG_FRAME_POINTER_VALIDATION + +objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval +objtool-args-$(CONFIG_UNWINDER_ORC) += --orc + +else + objtool-args-$(CONFIG_HAVE_JUMP_LABEL_HACK) += --hacks=jump_label objtool-args-$(CONFIG_HAVE_NOINSTR_HACK) += --hacks=noinstr objtool-args-$(CONFIG_CALL_DEPTH_TRACKING) += --hacks=skylake @@ -271,6 +278,8 @@ objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES) +endif + objtool-args = $(objtool-args-y) \ $(if $(delay-objtool), --link) \ $(if $(part-of-module), --module) diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 4eee155121a8b37c201e70c8327c3ea556d5cd94..6e97230421d4bc264acbc0dc6068dd738b3e7595 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -90,6 +90,14 @@ else $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG) endif +anolis_defconfig: $(obj)/conf + $(Q)DIST_DO_GENERATE_DOT_CONFIG=Y DIST_CONFIG_KERNEL_NAME=ANCK $(MAKE) -C $(srctree)/anolis/ dist-defconfig + $(Q)$(MAKE) -C $(srctree) olddefconfig + +anolis-debug_defconfig: $(obj)/conf + $(Q)DIST_DO_GENERATE_DOT_CONFIG=Y DIST_CONFIG_KERNEL_NAME=ANCK $(MAKE) -C $(srctree)/anolis/ dist-debug-defconfig + $(Q)$(MAKE) -C $(srctree) olddefconfig + %_defconfig: $(obj)/conf $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig) @@ -118,6 +126,8 @@ clean-files += tests/.cache # Help text used by make help help: @echo 'Configuration targets:' + @echo ' anolis_defconfig - Generate anolis config for production environment' + @echo ' anolis-debug_defconfig - Generate anolis config for testing environment' @echo ' config - Update current config utilising a line-oriented program' @echo ' nconfig - Update current config utilising a ncurses menu based program' @echo ' menuconfig - Update current config utilising a menu based program' diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 65b4ea50296219e2cfed406dddd3cb4eac0737ea..93158943a4f73d868660e80eb488a3475512e7ac 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -64,6 +64,9 @@ case "${ARCH}" in alpha) [ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}" ;; + sw_64) + [ -f "${objtree}/arch/sw_64/boot/vmlinux.bin" ] && cp -v -- "${objtree}/arch/sw_64/boot/vmlinux.bin" "${tmpdir}/boot/vmlinux-bin-${KERNELRELEASE}" + ;; parisc*) [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}" [ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}" diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index 5044224cf6714b3e5738f1e6d30dda05c589e3ff..2586bcd5f43a3ab28b3ac512e6136e7c1bede7fd 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -26,7 +26,7 @@ set_debarch() { # Attempt to find the correct Debian architecture case "$UTS_MACHINE" in - i386|ia64|alpha|m68k|riscv*) + i386|ia64|alpha|m68k|riscv*|sw_64) debarch="$UTS_MACHINE" ;; x86_64) debarch=amd64 ;; diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 40ae6b2c7a6da590f36d33caa543fd1376ba4945..73558f7eb690543f6da7eff1b6c93c4c5d4b0bad 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -52,6 +52,12 @@ #define R_AARCH64_CALL26 283 +#ifndef EM_SW64 +#define EM_SW64 0x9916 +#define R_SW64_NONE 0 +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -326,6 +332,16 @@ static int make_nop_arm64(void *map, size_t const offset) return 0; } +static unsigned char ideal_nop4_sw64[4] = {0x5f, 0x07, 0xff, 0x43}; + +static int make_nop_sw64(void *map, size_t const offset) +{ + /* Convert to nop */ + ulseek(offset, SEEK_SET); + uwrite(ideal_nop, 4); + return 0; +} + static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; @@ -475,6 +491,21 @@ static int LARCH64_is_fake_mcount(Elf64_Rel const *rp) return 1; } +#define SW64_FAKEMCOUNT_OFFSET 4 + +static int sw64_is_fake_mcount(Elf64_Rel const *rp) +{ + static Elf64_Addr old_r_offset = ~(Elf64_Addr)0; + Elf64_Addr current_r_offset = _w(rp->r_offset); + int is_fake; + + is_fake = (old_r_offset != ~(Elf64_Addr)0) && + (current_r_offset - old_r_offset == SW64_FAKEMCOUNT_OFFSET); + old_r_offset = current_r_offset; + + return is_fake; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -598,6 +629,14 @@ static int do_file(char const *const fname) case EM_S390: /* reltype: e_class */ break; case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break; case EM_SPARCV9: reltype = R_SPARC_64; break; + case EM_SW64: + reltype = R_SW64_REFQUAD; + make_nop = make_nop_sw64; + rel_type_nop = R_SW64_NONE; + ideal_nop = ideal_nop4_sw64; + mcount_adjust_64 = -12; + is_fake_mcount64 = sw64_is_fake_mcount; + break; case EM_X86_64: make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files index 8b1a636f854342154a54884ad47f743174ebf40a..38eb84eb605b64f1a064a50a6dd20628049e8a83 100755 --- a/scripts/remove-stale-files +++ b/scripts/remove-stale-files @@ -39,3 +39,5 @@ rm -rf include/ksym find . -name '*.usyms' | xargs rm -f rm -f binkernel.spec + +rm -f lib/test_fortify.log diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index a6bd817efc1a699b2500a555f2f1d9c901407aa5..cebba0ab51bc7530bf20ad32e23df584c56ceb00 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -11,6 +11,7 @@ config IMA select TCG_TPM if HAS_IOMEM select TCG_TIS if TCG_TPM && X86 select TCG_CRB if TCG_TPM && ACPI + select TCG_HYGON if TCG_TPM && ACPI && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES select INTEGRITY_AUDIT if AUDIT help diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 7f3a000fab0ce0f9827f215156fb35cffc6c49b6..df37a85cf27cc79d938406c2b54a778d3945b66f 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -410,7 +410,10 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; - snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + if (bus->hygon_dword_access) + snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + else + snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) @@ -475,7 +478,10 @@ static void azx_int_disable(struct hdac_bus *bus) /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_INT_MASK, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index fe0958f9969c307138a4272676723d5c02b89377..2312266939b2121b824b6d7bd612a13c6d5e8740 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -146,11 +146,15 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ - if (bus->access_sdnctl_in_dword) + if (bus->access_sdnctl_in_dword || bus->hygon_dword_access) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else @@ -166,11 +170,21 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_start); */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { - snd_hdac_stream_updateb(azx_dev, SD_CTL, - SD_CTL_DMA_START | SD_INT_MASK, 0); - snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + struct hdac_bus *bus = azx_dev->bus; + + if (bus->hygon_dword_access) { + snd_hdac_stream_updatel(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } else { + snd_hdac_stream_updateb(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } azx_dev->running = false; } @@ -225,12 +239,16 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; + struct hdac_bus *bus = azx_dev->bus; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; - snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); @@ -238,7 +256,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 406779625fb5935cf54b34036bea3f48c03407e5..b69e7b94673c6a8130a007af3e8e9524bf894081 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1061,6 +1061,16 @@ static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) } } +static void azx_rirb_zxdelay(struct azx *chip, int enable) +{ + if (chip->remap_diu_addr) { + if (!enable) + writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); + else + writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); + } +} + irqreturn_t azx_interrupt(int irq, void *dev_id) { struct azx *chip = dev_id; @@ -1103,9 +1113,14 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { - if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) + if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || + (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { + azx_rirb_zxdelay(chip, 1); udelay(80); + } snd_hdac_bus_update_rirb(bus); + if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) + azx_rirb_zxdelay(chip, 0); } } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 8556031bcd68e48c4ad1c6d4d4a094fce0b2a3fb..9db89f4c7b3f84300c6e654c70ea02bdd7c8422b 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,6 +45,7 @@ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ +#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 31) /* Put a delay before read */ enum { AZX_SNOOP_TYPE_NONE, @@ -143,6 +144,8 @@ struct azx { unsigned int disabled:1; /* disabled by vga_switcheroo */ unsigned int pm_prepared:1; + void __iomem *remap_diu_addr; + /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 134c6f6e0959ae6356f49e60bd96fa21590fb059..950288f20522ed3bce8385e04a01f2b3f5c8aba3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -237,7 +237,9 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, + AZX_DRIVER_ZXHDMI, AZX_DRIVER_LOONGSON, + AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -349,7 +351,9 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", + [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", + [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -371,6 +375,31 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg, pci_write_config_byte(pci, reg, data); } +static int azx_init_pci_zx(struct azx *chip) +{ + struct snd_card *card = chip->card; + unsigned int diu_reg; + struct pci_dev *diu_pci = NULL; + + azx_bus(chip)->polling_mode = 1; + diu_pci = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x3a03, NULL); + if (!diu_pci) { + dev_info(card->dev, "zx_hda no KX-5000 device.\n"); + return -ENXIO; + } + pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); + chip->remap_diu_addr = ioremap(diu_reg, 0x50000); + pci_dev_put(diu_pci); + dev_info(card->dev, "zx_hda %x %p\n", diu_reg, chip->remap_diu_addr); + return 0; +} + +static void azx_free_pci_zx(struct azx *chip) +{ + if (chip->remap_diu_addr) + iounmap(chip->remap_diu_addr); +} + static void azx_init_pci(struct azx *chip) { int snoop_type = azx_get_snoop_type(chip); @@ -1360,6 +1389,9 @@ static void azx_free(struct azx *chip) hda->init_failed = 1; /* to be sure */ complete_all(&hda->probe_wait); + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_free_pci_zx(chip); + if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); @@ -1547,7 +1579,8 @@ static int check_position_fix(struct azx *chip, int fix) } /* Check VIA/ATI HD Audio Controller exist */ - if (chip->driver_type == AZX_DRIVER_VIA) { + if (chip->driver_type == AZX_DRIVER_VIA || + chip->driver_type == AZX_DRIVER_ZHAOXIN) { dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } @@ -1701,7 +1734,7 @@ static void azx_check_snoop_available(struct azx *chip) snoop = true; if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && - chip->driver_type == AZX_DRIVER_VIA) { + (chip->driver_type == AZX_DRIVER_VIA || chip->driver_type == AZX_DRIVER_ZHAOXIN)) { /* force to non-snoop mode for a new VIA controller * when BIOS is set */ @@ -1753,6 +1786,8 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; + case AZX_DRIVER_ZXHDMI: + return 128; default: return 32; } @@ -1878,6 +1913,15 @@ static int azx_first_init(struct azx *chip) bus->access_sdnctl_in_dword = 1; } + if (chip->driver_type == AZX_DRIVER_HYGON && + chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) + bus->hygon_dword_access = 1; + + chip->remap_diu_addr = NULL; + + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_init_pci_zx(chip); + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -1979,6 +2023,7 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: + case AZX_DRIVER_ZXHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2693,6 +2738,15 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(VIA, 0x9170), .driver_data = AZX_DRIVER_GENERIC }, /* VIA GFX VT6122/VX11 */ { PCI_VDEVICE(VIA, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* SIS966 */ { PCI_VDEVICE(SI, 0x7502), .driver_data = AZX_DRIVER_SIS }, /* ULI M5461 */ @@ -2748,11 +2802,25 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, + { PCI_VDEVICE(ZHAOXIN, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(ZHAOXIN, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON }, { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI), .driver_data = AZX_DRIVER_LOONGSON }, + /* Hygon HDAudio */ + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_HDA), + .driver_data = AZX_DRIVER_HYGON }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index f030700cd60d75f034514e3743fdcba4de5082d9..81c3a7ed34823d0d9583bee794711b0198b77eb2 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4496,6 +4496,20 @@ static int patch_via_hdmi(struct hda_codec *codec) return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } +/* Zhaoxin HDMI Implementation */ +static int patch_zhaoxin_hdmi(struct hda_codec *codec) +{ + int err; + + err = patch_generic_hdmi(codec); + codec->no_sticky_stream = 1; + + if (err) + return err; + + return 0; +} + static int patch_gf_hdmi(struct hda_codec *codec) { int err; @@ -4618,6 +4632,15 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4651,6 +4674,15 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), {} /* terminator */ diff --git a/tools/arch/arm64/include/asm/orc_types.h b/tools/arch/arm64/include/asm/orc_types.h new file mode 100644 index 0000000000000000000000000000000000000000..e18971fdf867fcadb2ae0c9e34d36bf77c33bbf9 --- /dev/null +++ b/tools/arch/arm64/include/asm/orc_types.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * We only use base registers SP and FP -- which the previous SP is based on -- + * and PREV_SP and UNDEFINED -- which the previous FP is based on. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:4; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:3; + unsigned signal:1; + unsigned type:4; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/tools/arch/arm64/include/asm/unwind_hints.h b/tools/arch/arm64/include/asm/unwind_hints.h new file mode 100644 index 0000000000000000000000000000000000000000..e11a0586b434c436203cf1e8b430ba051e0f84ec --- /dev/null +++ b/tools/arch/arm64/include/asm/unwind_hints.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_ARM64_UNWIND_HINTS_H +#define _ASM_ARM64_UNWIND_HINTS_H + +#include + +#include "orc_types.h" + +#ifdef CONFIG_STACK_VALIDATION + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_STACK_VALIDATION */ + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_STACK_VALIDATION */ +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_FTRACE, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_FTRACE + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_REGS, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_REGS + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_IRQ, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_IRQ_STACK + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARM64_UNWIND_HINTS_H */ diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h new file mode 100644 index 0000000000000000000000000000000000000000..c25b5853181dba4cb087c85af0e025cb18c53272 --- /dev/null +++ b/tools/arch/loongarch/include/asm/inst.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_INST_H +#define _ASM_INST_H + +#include + +#define LOONGARCH_INSN_NOP 0x03400000 + +enum reg0i15_op { + break_op = 0x54, +}; + +enum reg0i26_op { + b_op = 0x14, + bl_op = 0x15, +}; + +enum reg1i21_op { + beqz_op = 0x10, + bnez_op = 0x11, + bceqz_op = 0x12, /* bits[9:8] = 0x00 */ + bcnez_op = 0x12, /* bits[9:8] = 0x01 */ +}; + +enum reg2_op { + ertn_op = 0x1920e, +}; + +enum reg2i12_op { + addid_op = 0x0b, + andi_op = 0x0d, + ldd_op = 0xa3, + std_op = 0xa7, +}; + +enum reg2i14_op { + ldptrd_op = 0x26, + stptrd_op = 0x27, +}; + +enum reg2i16_op { + jirl_op = 0x13, + beq_op = 0x16, + bne_op = 0x17, + blt_op = 0x18, + bge_op = 0x19, + bltu_op = 0x1a, + bgeu_op = 0x1b, +}; + +struct reg0i15_format { + unsigned int immediate : 15; + unsigned int opcode : 17; +}; + +struct reg0i26_format { + unsigned int immediate_h : 10; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg1i21_format { + unsigned int immediate_h : 5; + unsigned int rj : 5; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int opcode : 22; +}; + +struct reg2i12_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 12; + unsigned int opcode : 10; +}; + +struct reg2i14_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 14; + unsigned int opcode : 8; +}; + +struct reg2i16_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 16; + unsigned int opcode : 6; +}; + +union loongarch_instruction { + unsigned int word; + struct reg0i15_format reg0i15_format; + struct reg0i26_format reg0i26_format; + struct reg1i21_format reg1i21_format; + struct reg2_format reg2_format; + struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; + struct reg2i16_format reg2i16_format; +}; + +#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) + +enum loongarch_gpr { + LOONGARCH_GPR_ZERO = 0, + LOONGARCH_GPR_RA = 1, + LOONGARCH_GPR_TP = 2, + LOONGARCH_GPR_SP = 3, + LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */ + LOONGARCH_GPR_A1, /* Reused as V1 for return value */ + LOONGARCH_GPR_A2, + LOONGARCH_GPR_A3, + LOONGARCH_GPR_A4, + LOONGARCH_GPR_A5, + LOONGARCH_GPR_A6, + LOONGARCH_GPR_A7, + LOONGARCH_GPR_T0 = 12, + LOONGARCH_GPR_T1, + LOONGARCH_GPR_T2, + LOONGARCH_GPR_T3, + LOONGARCH_GPR_T4, + LOONGARCH_GPR_T5, + LOONGARCH_GPR_T6, + LOONGARCH_GPR_T7, + LOONGARCH_GPR_T8, + LOONGARCH_GPR_FP = 22, + LOONGARCH_GPR_S0 = 23, + LOONGARCH_GPR_S1, + LOONGARCH_GPR_S2, + LOONGARCH_GPR_S3, + LOONGARCH_GPR_S4, + LOONGARCH_GPR_S5, + LOONGARCH_GPR_S6, + LOONGARCH_GPR_S7, + LOONGARCH_GPR_S8, + LOONGARCH_GPR_MAX +}; + +#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rd, \ + int offset) \ +{ \ + insn->reg2i16_format.opcode = OP; \ + insn->reg2i16_format.immediate = offset; \ + insn->reg2i16_format.rj = rj; \ + insn->reg2i16_format.rd = rd; \ +} + +DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +#endif /* _ASM_INST_H */ diff --git a/tools/arch/loongarch/include/asm/orc_types.h b/tools/arch/loongarch/include/asm/orc_types.h new file mode 100644 index 0000000000000000000000000000000000000000..caf1f71a1057b699887873c0973c1fe5d832f0c8 --- /dev/null +++ b/tools/arch/loongarch/include/asm/orc_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * The most commonly used base registers are SP and FP -- which the previous SP + * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is + * usually based on. + * + * The rest of the base registers are needed for special cases like entry code + * and GCC realigned stacks. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and FP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; + s16 ra_offset; + unsigned int sp_reg:4; + unsigned int fp_reg:4; + unsigned int ra_reg:4; + unsigned int type:3; + unsigned int signal:1; +}; +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/tools/arch/sw_64/include/asm/barrier.h b/tools/arch/sw_64/include/asm/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4aeffeb6819026d3760f38d237477dd6d62709 --- /dev/null +++ b/tools/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_ASM_SW64_BARRIER_H +#define _TOOLS_LINUX_ASM_SW64_BARRIER_H + +#define mb() __asm__ __volatile__("mb" : : : "memory") +#define rmb() __asm__ __volatile__("mb" : : : "memory") +#define wmb() __asm__ __volatile__("mb" : : : "memory") + +#endif /* _TOOLS_LINUX_ASM_SW64_BARRIER_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/bitsperlong.h b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..f6a510c2823340fb47701a6066989456bde5bca4 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_SW64_BITSPERLONG_H +#define __ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_SW64_BITSPERLONG_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/errno.h b/tools/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 0000000000000000000000000000000000000000..2a43a943581a9b80dd4c7549aff13c14f2479213 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _SW64_ERRNO_H +#define _SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _SW64_ERRNO_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/mman.h b/tools/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..a9603c93a34bf4416c6db36701263ce678325e1c --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define MADV_DODUMP 17 +#define MADV_DOFORK 11 +#define MADV_DONTDUMP 16 +#define MADV_DONTFORK 10 +#define MADV_DONTNEED 6 +#define MADV_FREE 8 +#define MADV_HUGEPAGE 14 +#define MADV_MERGEABLE 12 +#define MADV_NOHUGEPAGE 15 +#define MADV_NORMAL 0 +#define MADV_RANDOM 1 +#define MADV_REMOVE 9 +#define MADV_SEQUENTIAL 2 +#define MADV_UNMERGEABLE 13 +#define MADV_WILLNEED 3 +#define MAP_ANONYMOUS 0x10 +#define MAP_DENYWRITE 0x02000 +#define MAP_EXECUTABLE 0x04000 +#define MAP_FILE 0 +#define MAP_FIXED 0x100 +#define MAP_GROWSDOWN 0x01000 +#define MAP_HUGETLB 0x100000 +#define MAP_LOCKED 0x08000 +#define MAP_NONBLOCK 0x40000 +#define MAP_NORESERVE 0x10000 +#define MAP_POPULATE 0x20000 +#define MAP_STACK 0x80000 +#define PROT_EXEC 0x4 +#define PROT_GROWSDOWN 0x01000000 +#define PROT_GROWSUP 0x02000000 +#define PROT_NONE 0x0 +#define PROT_READ 0x1 +#define PROT_SEM 0x8 +#define PROT_WRITE 0x2 +/* MADV_HWPOISON is undefined on alpha, fix it for perf */ +#define MADV_HWPOISON 100 +/* MADV_SOFT_OFFLINE is undefined on alpha, fix it for perf */ +#define MADV_SOFT_OFFLINE 101 +/* MAP_32BIT is undefined on alpha, fix it for perf */ +#define MAP_32BIT 0 +/* MAP_UNINITIALIZED is undefined on alpha, fix it for perf */ +#define MAP_UNINITIALIZED 0 +#endif /* TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/perf_regs.h b/tools/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..892be52610265f85cab2fff62d256c093e5593ff --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _ASM_SW64_PERF_REGS_H +#define _ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _ASM_SW64_PERF_REGS_H */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 845a4023ba44e2330d3d794e5a96c437f635bb92..bc82ca1bb3462d70205e048fdae15c444946191d 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -443,6 +443,9 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 23 */ +#define X86_FEATURE_ZXPAUSE (23*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index fafe9be7a6f4ff6b7adc0ae3ea34d30b3d9ba79d..be3fef5e80ba38f7b56a1364cb2d2c4a549d4486 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -131,6 +131,9 @@ #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK22 0 +#define DISABLED_MASK23 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 1d111350197f3169a8eec402d77980dd617c6b95..3456f6deca51684feb87ebcd6ed21116565b6e2e 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -753,6 +764,13 @@ #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0x00000000 #define MSR_IA32_P5_MC_TYPE 0x00000001 diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 7ba1726b71c7b8bfc95888dc78508998bba263fe..6a3de575bec6a06ef41e3ac281cffbbd9248f1a5 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -99,6 +99,9 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK23 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 90ae2ea61324cc7e94a020499d2501b8b4009073..1a49233761e960035fc72226b5a836f2c46cc845 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -442,7 +442,7 @@ static void print_prog_header_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses); } -static void print_prog_json(struct bpf_prog_info *info, int fd) +static void print_prog_json(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -461,6 +461,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "uid", info->created_by_uid); } + jsonw_bool_field(json_wtr, "orphaned", orphaned); jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len); if (info->jited_prog_len) { @@ -527,7 +528,7 @@ static void print_prog_header_plain(struct bpf_prog_info *info, int fd) printf("\n"); } -static void print_prog_plain(struct bpf_prog_info *info, int fd) +static void print_prog_plain(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -554,6 +555,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf(" memlock %sB", memlock); free(memlock); + if (orphaned) + printf(" orphaned"); + if (info->nr_map_ids) show_prog_maps(fd, info->nr_map_ids); @@ -581,15 +585,15 @@ static int show_prog(int fd) int err; err = bpf_prog_get_info_by_fd(fd, &info, &len); - if (err) { + if (err && err != -ENODEV) { p_err("can't get prog info: %s", strerror(errno)); return -1; } if (json_output) - print_prog_json(&info, fd); + print_prog_json(&info, fd, err == -ENODEV); else - print_prog_plain(&info, fd); + print_prog_plain(&info, fd, err == -ENODEV); return 0; } diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 934e2777a2dbcd9062f873c039f1853867937ed3..fb290a90b263d10b94d23d359e0a49498c4b5602 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -54,6 +54,7 @@ FEATURE_TESTS_BASIC := \ libtracefs \ libcrypto \ libunwind \ + libunwind-sw_64 \ pthread-attr-setaffinity-np \ pthread-barrier \ reallocarray \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dad79ede4e0ae0030ee401a1daf5d59ff871dcb6..cb57e46cec4bd4a9c07dab7d8977e1763b39a361 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 include ../../scripts/Makefile.include +ARCH ?= $(shell uname -m) FILES= \ test-all.bin \ test-backtrace.bin \ @@ -45,6 +46,7 @@ FILES= \ test-libunwind-x86_64.bin \ test-libunwind-arm.bin \ test-libunwind-aarch64.bin \ + test-libunwind-sw_64.bin \ test-libunwind-debug-frame-arm.bin \ test-libunwind-debug-frame-aarch64.bin \ test-pthread-attr-setaffinity-np.bin \ @@ -86,7 +88,11 @@ all: $(FILES) __BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS) BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1 +ifeq ($(ARCH),sw_64) + BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz +else BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl +endif BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS) @@ -189,6 +195,9 @@ $(OUTPUT)test-libunwind-arm.bin: $(OUTPUT)test-libunwind-aarch64.bin: $(BUILD) -lelf -lunwind-aarch64 +$(OUTPUT)test-libunwind-sw_64.bin: + $(BUILD) -lelf -lunwind-sw_64 + $(OUTPUT)test-libunwind-debug-frame-arm.bin: $(BUILD) -lelf -lunwind-arm diff --git a/tools/build/feature/test-libunwind-sw_64.c b/tools/build/feature/test-libunwind-sw_64.c new file mode 100644 index 0000000000000000000000000000000000000000..274948b961f424dee01016f16b7f72475d9f5880 --- /dev/null +++ b/tools/build/feature/test-libunwind-sw_64.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +static unw_accessors_t accessors; + +int main(void) +{ + unw_addr_space_t addr_space; + + addr_space = unw_create_addr_space(&accessors, 0); + if (addr_space) + return 0; + + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + + return 0; +} diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index f18683b95ea67301e8c9cd6d5bc631074f468e5d..7319f6ced10860e3f0c30c599086384a5268740f 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -87,4 +87,15 @@ static inline __u32 rol32(__u32 word, unsigned int shift) return (word << shift) | (word >> ((-shift) & 31)); } +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static __always_inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + #endif diff --git a/tools/include/linux/objtool_types.h b/tools/include/linux/objtool_types.h index 453a4f4ef39d441d1e047b2a06e6700bf1e51b21..ca51cc50dc8221a513087206e98192af213a4b7b 100644 --- a/tools/include/linux/objtool_types.h +++ b/tools/include/linux/objtool_types.h @@ -43,6 +43,8 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain * location so that it can be restored later. + * + * UNWIND_HINT_TYPE_IRQ_STACK: Used to unwind through the IRQ stack. */ #define UNWIND_HINT_TYPE_UNDEFINED 0 #define UNWIND_HINT_TYPE_END_OF_STACK 1 @@ -53,5 +55,6 @@ struct unwind_hint { #define UNWIND_HINT_TYPE_FUNC 5 #define UNWIND_HINT_TYPE_SAVE 6 #define UNWIND_HINT_TYPE_RESTORE 7 +#define UNWIND_HINT_TYPE_IRQ_STACK 8 #endif /* _LINUX_OBJTOOL_TYPES_H */ diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index c65267afc3415b194b9f60f8c56024daf6e8a535..786eae853257d714da1f43c02484ce83e12505ac 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,10 @@ #include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" +#elif defined(__loongarch__) +#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/bitsperlong.h" #else #include #endif diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index 869379f91fe487ad3611966b09bda47462da0b70..bcfa3d742933d923aa241d5c7aff35b60515e595 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -11,6 +11,8 @@ #include "../../../arch/mips/include/uapi/asm/errno.h" #elif defined(__hppa__) #include "../../../arch/parisc/include/uapi/asm/errno.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/errno.h" #else #include #endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 977ec094bc2a6c75dd8d37d037ff89a39c8d3492..e3c9d5c7d9b8241b396ea36b51287a6ac9bb024b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1327,6 +1327,9 @@ enum { /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), + +/* Flag for value_type_btf_obj_fd, the fd is available */ + BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), }; /* Flags for BPF_PROG_QUERY. */ @@ -1400,6 +1403,11 @@ union bpf_attr { * to using 5 hash functions). */ __u64 map_extra; + + __s32 value_type_btf_obj_fd; /* fd pointing to a BTF + * type data for + * btf_vmlinux_value_type_id. + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -6475,7 +6483,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; - __u32 :32; /* alignment pad */ + __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 39e659c83cfd21a4c6f7ff5fa5235d01ba20dc19..a0aa05a28cf29c435b272e4d77e8677417b559e1 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,23 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; +}; + +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; }; /* The struct should be in sync with struct ifmap */ @@ -350,7 +370,13 @@ enum { IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, + IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ + + IFLA_DEVLINK_PORT, + IFLA_GSO_IPV4_MAX_SIZE, + IFLA_GRO_IPV4_MAX_SIZE, + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -539,6 +565,12 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, + IFLA_BRPORT_MAB, + IFLA_BRPORT_MCAST_N_GROUPS, + IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -716,7 +748,79 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -748,6 +852,8 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ + IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -781,6 +887,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -826,6 +933,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1162,6 +1271,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1179,10 +1299,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) @@ -1281,4 +1412,14 @@ enum { #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) +/* DSA section */ + +enum { + IFLA_DSA_UNSPEC, + IFLA_DSA_MASTER, + __IFLA_DSA_MAX, +}; + +#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index b0f1913763a33250987aaf4112682df4fdc6895e..af46488e4ea9530b1478ae4932c5b89275e90f7c 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -169,7 +169,8 @@ int bpf_map_create(enum bpf_map_type map_type, __u32 max_entries, const struct bpf_map_create_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, map_extra); + const size_t attr_sz = offsetofend(union bpf_attr, + value_type_btf_obj_fd); union bpf_attr attr; int fd; @@ -191,6 +192,7 @@ int bpf_map_create(enum bpf_map_type map_type, attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); + attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); attr.map_flags = OPTS_GET(opts, map_flags, 0); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 107fef7488682946f89ccd578ee38dddbcf96b75..db0ff8ade19a44227f7ea951ae5d8b7c07ba85c1 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -51,8 +51,10 @@ struct bpf_map_create_opts { __u32 numa_node; __u32 map_ifindex; + __s32 value_type_btf_obj_fd; + size_t:0; }; -#define bpf_map_create_opts__last_field map_ifindex +#define bpf_map_create_opts__last_field value_type_btf_obj_fd LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, const char *map_name, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2fad178949efe9ede8880afac6c1a0df7440a7f7..7f78edee5a437494fb2c32ec65f2b917a0660bde 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -518,6 +518,7 @@ struct bpf_map { struct bpf_map_def def; __u32 numa_node; __u32 btf_var_idx; + int mod_btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; @@ -918,22 +919,29 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t, return NULL; } +static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, + __u16 kind, struct btf **res_btf, + struct module_btf **res_mod_btf); + #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, const char *name, __u32 kind); static int -find_struct_ops_kern_types(const struct btf *btf, const char *tname, +find_struct_ops_kern_types(struct bpf_object *obj, const char *tname, + struct module_btf **mod_btf, const struct btf_type **type, __u32 *type_id, const struct btf_type **vtype, __u32 *vtype_id, const struct btf_member **data_member) { const struct btf_type *kern_type, *kern_vtype; const struct btf_member *kern_data_member; + struct btf *btf; __s32 kern_vtype_id, kern_type_id; __u32 i; - kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); + kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, + &btf, mod_btf); if (kern_type_id < 0) { pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); @@ -987,14 +995,16 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map) } /* Init the map's fields that depend on kern_btf */ -static int bpf_map__init_kern_struct_ops(struct bpf_map *map, - const struct btf *btf, - const struct btf *kern_btf) +static int bpf_map__init_kern_struct_ops(struct bpf_map *map) { const struct btf_member *member, *kern_member, *kern_data_member; const struct btf_type *type, *kern_type, *kern_vtype; __u32 i, kern_type_id, kern_vtype_id, kern_data_off; + struct bpf_object *obj = map->obj; + const struct btf *btf = obj->btf; struct bpf_struct_ops *st_ops; + const struct btf *kern_btf; + struct module_btf *mod_btf; void *data, *kern_data; const char *tname; int err; @@ -1002,16 +1012,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map, st_ops = map->st_ops; type = st_ops->type; tname = st_ops->tname; - err = find_struct_ops_kern_types(kern_btf, tname, + err = find_struct_ops_kern_types(obj, tname, &mod_btf, &kern_type, &kern_type_id, &kern_vtype, &kern_vtype_id, &kern_data_member); if (err) return err; + kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; + pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", map->name, st_ops->type_id, kern_type_id, kern_vtype_id); + map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; map->def.value_size = kern_vtype->size; map->btf_vmlinux_value_type_id = kern_vtype_id; @@ -1087,6 +1100,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map, return -ENOTSUP; } + if (mod_btf) + prog->attach_btf_obj_fd = mod_btf->fd; prog->attach_btf_id = kern_type_id; prog->expected_attach_type = kern_member_idx; @@ -1129,8 +1144,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) if (!bpf_map__is_struct_ops(map)) continue; - err = bpf_map__init_kern_struct_ops(map, obj->btf, - obj->btf_vmlinux); + err = bpf_map__init_kern_struct_ops(map); if (err) return err; } @@ -5111,8 +5125,13 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; - if (bpf_map__is_struct_ops(map)) + if (bpf_map__is_struct_ops(map)) { create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; + if (map->mod_btf_fd >= 0) { + create_attr.value_type_btf_obj_fd = map->mod_btf_fd; + create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; + } + } if (obj->btf && btf__fd(obj->btf) >= 0) { create_attr.btf_fd = btf__fd(obj->btf); @@ -9423,7 +9442,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac *btf_obj_fd = 0; *btf_type_id = 1; } else { - err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + err = find_kernel_btf_id(prog->obj, attach_name, + attach_type, btf_obj_fd, + btf_type_id); } if (err) { pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 9c4db90b92b6b55bb143d80c308085003becc20b..98373d126d9d77a7313c452c20f1a95974248b9d 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -326,6 +326,7 @@ static int probe_map_create(enum bpf_map_type map_type) case BPF_MAP_TYPE_STRUCT_OPS: /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */ opts.btf_vmlinux_value_type_id = 1; + opts.value_type_btf_obj_fd = -1; exp_err = -524; /* -ENOTSUPP */ break; case BPF_MAP_TYPE_BLOOM_FILTER: diff --git a/tools/mm/Makefile b/tools/mm/Makefile index 1c5606cc33346bd29b523deb466cc739a0c6bb33..7bb03606b9eaa2e3763962f240dfb653d2b6b8f7 100644 --- a/tools/mm/Makefile +++ b/tools/mm/Makefile @@ -3,7 +3,8 @@ # include ../scripts/Makefile.include -TARGETS=page-types slabinfo page_owner_sort +BUILD_TARGETS=page-types slabinfo page_owner_sort +INSTALL_TARGETS = $(BUILD_TARGETS) thpmaps LIB_DIR = ../lib/api LIBS = $(LIB_DIR)/libapi.a @@ -11,9 +12,9 @@ LIBS = $(LIB_DIR)/libapi.a CFLAGS += -Wall -Wextra -I../lib/ -pthread LDFLAGS += $(LIBS) -pthread -all: $(TARGETS) +all: $(BUILD_TARGETS) -$(TARGETS): $(LIBS) +$(BUILD_TARGETS): $(LIBS) $(LIBS): make -C $(LIB_DIR) @@ -29,4 +30,4 @@ sbindir ?= /usr/sbin install: all install -d $(DESTDIR)$(sbindir) - install -m 755 -p $(TARGETS) $(DESTDIR)$(sbindir) + install -m 755 -p $(INSTALL_TARGETS) $(DESTDIR)$(sbindir) diff --git a/tools/mm/thpmaps b/tools/mm/thpmaps new file mode 100644 index 0000000000000000000000000000000000000000..803e0318f2fea1c5cc19e036316c6d00fb407783 --- /dev/null +++ b/tools/mm/thpmaps @@ -0,0 +1,675 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 ARM Ltd. +# +# Utility providing smaps-like output detailing transparent hugepage usage. +# For more info, run: +# ./thpmaps --help +# +# Requires numpy: +# pip3 install numpy + + +import argparse +import collections +import math +import os +import re +import resource +import shutil +import sys +import textwrap +import time +import numpy as np + + +with open('/sys/kernel/mm/transparent_hugepage/hpage_pmd_size') as f: + PAGE_SIZE = resource.getpagesize() + PAGE_SHIFT = int(math.log2(PAGE_SIZE)) + PMD_SIZE = int(f.read()) + PMD_ORDER = int(math.log2(PMD_SIZE / PAGE_SIZE)) + + +def align_forward(v, a): + return (v + (a - 1)) & ~(a - 1) + + +def align_offset(v, a): + return v & (a - 1) + + +def kbnr(kb): + # Convert KB to number of pages. + return (kb << 10) >> PAGE_SHIFT + + +def nrkb(nr): + # Convert number of pages to KB. + return (nr << PAGE_SHIFT) >> 10 + + +def odkb(order): + # Convert page order to KB. + return (PAGE_SIZE << order) >> 10 + + +def cont_ranges_all(search, index): + # Given a list of arrays, find the ranges for which values are monotonically + # incrementing in all arrays. all arrays in search and index must be the + # same size. + sz = len(search[0]) + r = np.full(sz, 2) + d = np.diff(search[0]) == 1 + for dd in [np.diff(arr) == 1 for arr in search[1:]]: + d &= dd + r[1:] -= d + r[:-1] -= d + return [np.repeat(arr, r).reshape(-1, 2) for arr in index] + + +class ArgException(Exception): + pass + + +class FileIOException(Exception): + pass + + +class BinArrayFile: + # Base class used to read /proc//pagemap and /proc/kpageflags into a + # numpy array. Use inherrited class in a with clause to ensure file is + # closed when it goes out of scope. + def __init__(self, filename, element_size): + self.element_size = element_size + self.filename = filename + self.fd = os.open(self.filename, os.O_RDONLY) + + def cleanup(self): + os.close(self.fd) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cleanup() + + def _readin(self, offset, buffer): + length = os.preadv(self.fd, (buffer,), offset) + if len(buffer) != length: + raise FileIOException('error: {} failed to read {} bytes at {:x}' + .format(self.filename, len(buffer), offset)) + + def _toarray(self, buf): + assert(self.element_size == 8) + return np.frombuffer(buf, dtype=np.uint64) + + def getv(self, vec): + vec *= self.element_size + offsets = vec[:, 0] + lengths = (np.diff(vec) + self.element_size).reshape(len(vec)) + buf = bytearray(int(np.sum(lengths))) + view = memoryview(buf) + pos = 0 + for offset, length in zip(offsets, lengths): + offset = int(offset) + length = int(length) + self._readin(offset, view[pos:pos+length]) + pos += length + return self._toarray(buf) + + def get(self, index, nr=1): + offset = index * self.element_size + length = nr * self.element_size + buf = bytearray(length) + self._readin(offset, buf) + return self._toarray(buf) + + +PM_PAGE_PRESENT = 1 << 63 +PM_PFN_MASK = (1 << 55) - 1 + +class PageMap(BinArrayFile): + # Read ranges of a given pid's pagemap into a numpy array. + def __init__(self, pid='self'): + super().__init__(f'/proc/{pid}/pagemap', 8) + + +KPF_ANON = 1 << 12 +KPF_COMPOUND_HEAD = 1 << 15 +KPF_COMPOUND_TAIL = 1 << 16 +KPF_THP = 1 << 22 + +class KPageFlags(BinArrayFile): + # Read ranges of /proc/kpageflags into a numpy array. + def __init__(self): + super().__init__(f'/proc/kpageflags', 8) + + +vma_all_stats = set([ + "Size", + "Rss", + "Pss", + "Pss_Dirty", + "Shared_Clean", + "Shared_Dirty", + "Private_Clean", + "Private_Dirty", + "Referenced", + "Anonymous", + "KSM", + "LazyFree", + "AnonHugePages", + "ShmemPmdMapped", + "FilePmdMapped", + "Shared_Hugetlb", + "Private_Hugetlb", + "Swap", + "SwapPss", + "Locked", +]) + +vma_min_stats = set([ + "Rss", + "Anonymous", + "AnonHugePages", + "ShmemPmdMapped", + "FilePmdMapped", +]) + +VMA = collections.namedtuple('VMA', [ + 'name', + 'start', + 'end', + 'read', + 'write', + 'execute', + 'private', + 'pgoff', + 'major', + 'minor', + 'inode', + 'stats', +]) + +class VMAList: + # A container for VMAs, parsed from /proc//smaps. Iterate over the + # instance to receive VMAs. + def __init__(self, pid='self', stats=[]): + self.vmas = [] + with open(f'/proc/{pid}/smaps', 'r') as file: + for line in file: + elements = line.split() + if '-' in elements[0]: + start, end = map(lambda x: int(x, 16), elements[0].split('-')) + major, minor = map(lambda x: int(x, 16), elements[3].split(':')) + self.vmas.append(VMA( + name=elements[5] if len(elements) == 6 else '', + start=start, + end=end, + read=elements[1][0] == 'r', + write=elements[1][1] == 'w', + execute=elements[1][2] == 'x', + private=elements[1][3] == 'p', + pgoff=int(elements[2], 16), + major=major, + minor=minor, + inode=int(elements[4], 16), + stats={}, + )) + else: + param = elements[0][:-1] + if param in stats: + value = int(elements[1]) + self.vmas[-1].stats[param] = {'type': None, 'value': value} + + def __iter__(self): + yield from self.vmas + + +def thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads): + # Given 4 same-sized arrays representing a range within a page table backed + # by THPs (vfns: virtual frame numbers, pfns: physical frame numbers, anons: + # True if page is anonymous, heads: True if page is head of a THP), return a + # dictionary of statistics describing the mapped THPs. + stats = { + 'file': { + 'partial': 0, + 'aligned': [0] * (PMD_ORDER + 1), + 'unaligned': [0] * (PMD_ORDER + 1), + }, + 'anon': { + 'partial': 0, + 'aligned': [0] * (PMD_ORDER + 1), + 'unaligned': [0] * (PMD_ORDER + 1), + }, + } + + for rindex, rpfn in zip(ranges[0], ranges[2]): + index_next = int(rindex[0]) + index_end = int(rindex[1]) + 1 + pfn_end = int(rpfn[1]) + 1 + + folios = indexes[index_next:index_end][heads[index_next:index_end]] + + # Account pages for any partially mapped THP at the front. In that case, + # the first page of the range is a tail. + nr = (int(folios[0]) if len(folios) else index_end) - index_next + stats['anon' if anons[index_next] else 'file']['partial'] += nr + + # Account pages for any partially mapped THP at the back. In that case, + # the next page after the range is a tail. + if len(folios): + flags = int(kpageflags.get(pfn_end)[0]) + if flags & KPF_COMPOUND_TAIL: + nr = index_end - int(folios[-1]) + folios = folios[:-1] + index_end -= nr + stats['anon' if anons[index_end - 1] else 'file']['partial'] += nr + + # Account fully mapped THPs in the middle of the range. + if len(folios): + folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) + folio_orders = np.log2(folio_nrs).astype(np.uint64) + for index, order in zip(folios, folio_orders): + index = int(index) + order = int(order) + nr = 1 << order + vfn = int(vfns[index]) + align = 'aligned' if align_forward(vfn, nr) == vfn else 'unaligned' + anon = 'anon' if anons[index] else 'file' + stats[anon][align][order] += nr + + # Account PMD-mapped THPs spearately, so filter out of the stats. There is a + # race between acquiring the smaps stats and reading pagemap, where memory + # could be deallocated. So clamp to zero incase it would have gone negative. + anon_pmd_mapped = vma.stats['AnonHugePages']['value'] + file_pmd_mapped = vma.stats['ShmemPmdMapped']['value'] + \ + vma.stats['FilePmdMapped']['value'] + stats['anon']['aligned'][PMD_ORDER] = max(0, stats['anon']['aligned'][PMD_ORDER] - kbnr(anon_pmd_mapped)) + stats['file']['aligned'][PMD_ORDER] = max(0, stats['file']['aligned'][PMD_ORDER] - kbnr(file_pmd_mapped)) + + rstats = { + f"anon-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, + f"file-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'file', 'value': file_pmd_mapped}, + } + + def flatten_sub(type, subtype, stats): + param = f"{type}-thp-pte-{subtype}-{{}}kB" + for od, nr in enumerate(stats[2:], 2): + rstats[param.format(odkb(od))] = {'type': type, 'value': nrkb(nr)} + + def flatten_type(type, stats): + flatten_sub(type, 'aligned', stats['aligned']) + flatten_sub(type, 'unaligned', stats['unaligned']) + rstats[f"{type}-thp-pte-partial"] = {'type': type, 'value': nrkb(stats['partial'])} + + flatten_type('anon', stats['anon']) + flatten_type('file', stats['file']) + + return rstats + + +def cont_parse(vma, order, ranges, anons, heads): + # Given 4 same-sized arrays representing a range within a page table backed + # by THPs (vfns: virtual frame numbers, pfns: physical frame numbers, anons: + # True if page is anonymous, heads: True if page is head of a THP), return a + # dictionary of statistics describing the contiguous blocks. + nr_cont = 1 << order + nr_anon = 0 + nr_file = 0 + + for rindex, rvfn, rpfn in zip(*ranges): + index_next = int(rindex[0]) + index_end = int(rindex[1]) + 1 + vfn_start = int(rvfn[0]) + pfn_start = int(rpfn[0]) + + if align_offset(pfn_start, nr_cont) != align_offset(vfn_start, nr_cont): + continue + + off = align_forward(vfn_start, nr_cont) - vfn_start + index_next += off + + while index_next + nr_cont <= index_end: + folio_boundary = heads[index_next+1:index_next+nr_cont].any() + if not folio_boundary: + if anons[index_next]: + nr_anon += nr_cont + else: + nr_file += nr_cont + index_next += nr_cont + + # Account blocks that are PMD-mapped spearately, so filter out of the stats. + # There is a race between acquiring the smaps stats and reading pagemap, + # where memory could be deallocated. So clamp to zero incase it would have + # gone negative. + anon_pmd_mapped = vma.stats['AnonHugePages']['value'] + file_pmd_mapped = vma.stats['ShmemPmdMapped']['value'] + \ + vma.stats['FilePmdMapped']['value'] + nr_anon = max(0, nr_anon - kbnr(anon_pmd_mapped)) + nr_file = max(0, nr_file - kbnr(file_pmd_mapped)) + + rstats = { + f"anon-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, + f"file-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'file', 'value': file_pmd_mapped}, + } + + rstats[f"anon-cont-pte-aligned-{nrkb(nr_cont)}kB"] = {'type': 'anon', 'value': nrkb(nr_anon)} + rstats[f"file-cont-pte-aligned-{nrkb(nr_cont)}kB"] = {'type': 'file', 'value': nrkb(nr_file)} + + return rstats + + +def vma_print(vma, pid): + # Prints a VMA instance in a format similar to smaps. The main difference is + # that the pid is included as the first value. + print("{:010d}: {:016x}-{:016x} {}{}{}{} {:08x} {:02x}:{:02x} {:08x} {}" + .format( + pid, vma.start, vma.end, + 'r' if vma.read else '-', 'w' if vma.write else '-', + 'x' if vma.execute else '-', 'p' if vma.private else 's', + vma.pgoff, vma.major, vma.minor, vma.inode, vma.name + )) + + +def stats_print(stats, tot_anon, tot_file, inc_empty): + # Print a statistics dictionary. + label_field = 32 + for label, stat in stats.items(): + type = stat['type'] + value = stat['value'] + if value or inc_empty: + pad = max(0, label_field - len(label) - 1) + if type == 'anon' and tot_anon > 0: + percent = f' ({value / tot_anon:3.0%})' + elif type == 'file' and tot_file > 0: + percent = f' ({value / tot_file:3.0%})' + else: + percent = '' + print(f"{label}:{' ' * pad}{value:8} kB{percent}") + + +def vma_parse(vma, pagemap, kpageflags, contorders): + # Generate thp and cont statistics for a single VMA. + start = vma.start >> PAGE_SHIFT + end = vma.end >> PAGE_SHIFT + + pmes = pagemap.get(start, end - start) + present = pmes & PM_PAGE_PRESENT != 0 + pfns = pmes & PM_PFN_MASK + pfns = pfns[present] + vfns = np.arange(start, end, dtype=np.uint64) + vfns = vfns[present] + + pfn_vec = cont_ranges_all([pfns], [pfns])[0] + flags = kpageflags.getv(pfn_vec) + anons = flags & KPF_ANON != 0 + heads = flags & KPF_COMPOUND_HEAD != 0 + thps = flags & KPF_THP != 0 + + vfns = vfns[thps] + pfns = pfns[thps] + anons = anons[thps] + heads = heads[thps] + + indexes = np.arange(len(vfns), dtype=np.uint64) + ranges = cont_ranges_all([vfns, pfns], [indexes, vfns, pfns]) + + thpstats = thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads) + contstats = [cont_parse(vma, order, ranges, anons, heads) for order in contorders] + + tot_anon = vma.stats['Anonymous']['value'] + tot_file = vma.stats['Rss']['value'] - tot_anon + + return { + **thpstats, + **{k: v for s in contstats for k, v in s.items()} + }, tot_anon, tot_file + + +def do_main(args): + pids = set() + rollup = {} + rollup_anon = 0 + rollup_file = 0 + + if args.cgroup: + strict = False + for walk_info in os.walk(args.cgroup): + cgroup = walk_info[0] + with open(f'{cgroup}/cgroup.procs') as pidfile: + for line in pidfile.readlines(): + pids.add(int(line.strip())) + elif args.pid: + strict = True + pids = pids.union(args.pid) + else: + strict = False + for pid in os.listdir('/proc'): + if pid.isdigit(): + pids.add(int(pid)) + + if not args.rollup: + print(" PID START END PROT OFFSET DEV INODE OBJECT") + + for pid in pids: + try: + with PageMap(pid) as pagemap: + with KPageFlags() as kpageflags: + for vma in VMAList(pid, vma_all_stats if args.inc_smaps else vma_min_stats): + if (vma.read or vma.write or vma.execute) and vma.stats['Rss']['value'] > 0: + stats, vma_anon, vma_file = vma_parse(vma, pagemap, kpageflags, args.cont) + else: + stats = {} + vma_anon = 0 + vma_file = 0 + if args.inc_smaps: + stats = {**vma.stats, **stats} + if args.rollup: + for k, v in stats.items(): + if k in rollup: + assert(rollup[k]['type'] == v['type']) + rollup[k]['value'] += v['value'] + else: + rollup[k] = v + rollup_anon += vma_anon + rollup_file += vma_file + else: + vma_print(vma, pid) + stats_print(stats, vma_anon, vma_file, args.inc_empty) + except (FileNotFoundError, ProcessLookupError, FileIOException): + if strict: + raise + + if args.rollup: + stats_print(rollup, rollup_anon, rollup_file, args.inc_empty) + + +def main(): + docs_width = shutil.get_terminal_size().columns + docs_width -= 2 + docs_width = min(80, docs_width) + + def format(string): + text = re.sub(r'\s+', ' ', string) + text = re.sub(r'\s*\\n\s*', '\n', text) + paras = text.split('\n') + paras = [textwrap.fill(p, width=docs_width) for p in paras] + return '\n'.join(paras) + + def formatter(prog): + return argparse.RawDescriptionHelpFormatter(prog, width=docs_width) + + def size2order(human): + units = { + "K": 2**10, "M": 2**20, "G": 2**30, + "k": 2**10, "m": 2**20, "g": 2**30, + } + unit = 1 + if human[-1] in units: + unit = units[human[-1]] + human = human[:-1] + try: + size = int(human) + except ValueError: + raise ArgException('error: --cont value must be integer size with optional KMG unit') + size *= unit + order = int(math.log2(size / PAGE_SIZE)) + if order < 1: + raise ArgException('error: --cont value must be size of at least 2 pages') + if (1 << order) * PAGE_SIZE != size: + raise ArgException('error: --cont value must be size of power-of-2 pages') + if order > PMD_ORDER: + raise ArgException('error: --cont value must be less than or equal to PMD order') + return order + + parser = argparse.ArgumentParser(formatter_class=formatter, + description=format("""Prints information about how transparent huge + pages are mapped, either system-wide, or for a specified + process or cgroup.\\n + \\n + When run with --pid, the user explicitly specifies the set + of pids to scan. e.g. "--pid 10 [--pid 134 ...]". When run + with --cgroup, the user passes either a v1 or v2 cgroup and + all pids that belong to the cgroup subtree are scanned. When + run with neither --pid nor --cgroup, the full set of pids on + the system is gathered from /proc and scanned as if the user + had provided "--pid 1 --pid 2 ...".\\n + \\n + A default set of statistics is always generated for THP + mappings. However, it is also possible to generate + additional statistics for "contiguous block mappings" where + the block size is user-defined.\\n + \\n + Statistics are maintained independently for anonymous and + file-backed (pagecache) memory and are shown both in kB and + as a percentage of either total anonymous or total + file-backed memory as appropriate.\\n + \\n + THP Statistics\\n + --------------\\n + \\n + Statistics are always generated for fully- and + contiguously-mapped THPs whose mapping address is aligned to + their size, for each supported by the system. + Separate counters describe THPs mapped by PTE vs those + mapped by PMD. (Although note a THP can only be mapped by + PMD if it is PMD-sized):\\n + \\n + - anon-thp-pte-aligned-kB\\n + - file-thp-pte-aligned-kB\\n + - anon-thp-pmd-aligned-kB\\n + - file-thp-pmd-aligned-kB\\n + \\n + Similarly, statistics are always generated for fully- and + contiguously-mapped THPs whose mapping address is *not* + aligned to their size, for each supported by the + system. Due to the unaligned mapping, it is impossible to + map by PMD, so there are only PTE counters for this case:\\n + \\n + - anon-thp-pte-unaligned-kB\\n + - file-thp-pte-unaligned-kB\\n + \\n + Statistics are also always generated for mapped pages that + belong to a THP but where the is THP is *not* fully- and + contiguously- mapped. These "partial" mappings are all + counted in the same counter regardless of the size of the + THP that is partially mapped:\\n + \\n + - anon-thp-pte-partial\\n + - file-thp-pte-partial\\n + \\n + Contiguous Block Statistics\\n + ---------------------------\\n + \\n + An optional, additional set of statistics is generated for + every contiguous block size specified with `--cont `. + These statistics show how much memory is mapped in + contiguous blocks of and also aligned to . A + given contiguous block must all belong to the same THP, but + there is no requirement for it to be the *whole* THP. + Separate counters describe contiguous blocks mapped by PTE + vs those mapped by PMD:\\n + \\n + - anon-cont-pte-aligned-kB\\n + - file-cont-pte-aligned-kB\\n + - anon-cont-pmd-aligned-kB\\n + - file-cont-pmd-aligned-kB\\n + \\n + As an example, if monitoring 64K contiguous blocks (--cont + 64K), there are a number of sources that could provide such + blocks: a fully- and contiguously-mapped 64K THP that is + aligned to a 64K boundary would provide 1 block. A fully- + and contiguously-mapped 128K THP that is aligned to at least + a 64K boundary would provide 2 blocks. Or a 128K THP that + maps its first 100K, but contiguously and starting at a 64K + boundary would provide 1 block. A fully- and + contiguously-mapped 2M THP would provide 32 blocks. There + are many other possible permutations.\\n"""), + epilog=format("""Requires root privilege to access pagemap and + kpageflags.""")) + + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--pid', + metavar='pid', required=False, type=int, default=[], action='append', + help="""Process id of the target process. Maybe issued multiple times to + scan multiple processes. --pid and --cgroup are mutually exclusive. + If neither are provided, all processes are scanned to provide + system-wide information.""") + + group.add_argument('--cgroup', + metavar='path', required=False, + help="""Path to the target cgroup in sysfs. Iterates over every pid in + the cgroup and its children. --pid and --cgroup are mutually + exclusive. If neither are provided, all processes are scanned to + provide system-wide information.""") + + parser.add_argument('--rollup', + required=False, default=False, action='store_true', + help="""Sum the per-vma statistics to provide a summary over the whole + system, process or cgroup.""") + + parser.add_argument('--cont', + metavar='size[KMG]', required=False, default=[], action='append', + help="""Adds stats for memory that is mapped in contiguous blocks of + and also aligned to . May be issued multiple times to + track multiple sized blocks. Useful to infer e.g. arm64 contpte and + hpa mappings. Size must be a power-of-2 number of pages.""") + + parser.add_argument('--inc-smaps', + required=False, default=False, action='store_true', + help="""Include all numerical, additive /proc//smaps stats in the + output.""") + + parser.add_argument('--inc-empty', + required=False, default=False, action='store_true', + help="""Show all statistics including those whose value is 0.""") + + parser.add_argument('--periodic', + metavar='sleep_ms', required=False, type=int, + help="""Run in a loop, polling every sleep_ms milliseconds.""") + + args = parser.parse_args() + + try: + args.cont = [size2order(cont) for cont in args.cont] + except ArgException as e: + parser.print_usage() + raise + + if args.periodic: + while True: + do_main(args) + print() + time.sleep(args.periodic / 1000) + else: + do_main(args) + + +if __name__ == "__main__": + try: + main() + except Exception as e: + prog = os.path.basename(sys.argv[0]) + print(f'{prog}: {e}') + exit(1) diff --git a/tools/objtool/Build b/tools/objtool/Build index a3cdf8af6635a81d6e7c48037394ccefd351602c..b71547b660cedba1fc6c89714bd203bfb784328a 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -2,8 +2,9 @@ objtool-y += arch/$(SRCARCH)/ objtool-y += weak.o -objtool-y += check.o -objtool-y += special.o +objtool-$(STATIC_CHECK) += check.o +objtool-$(STATIC_CHECK) += special.o +objtool-$(DYNAMIC_CHECK) += dcheck.o objtool-y += builtin-check.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 83b100c1e7f6840c7c33caae8c804e5b5d9f2d46..90f0826cc3efc593b69694b4803d210d55354a63 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -55,9 +55,20 @@ BUILD_ORC := n ifeq ($(SRCARCH),x86) BUILD_ORC := y + STATIC_CHECK := y endif -export BUILD_ORC +ifeq ($(SRCARCH),arm64) + BUILD_ORC := y + DYNAMIC_CHECK := y +endif + +ifeq ($(SRCARCH),loongarch) + BUILD_ORC := y + STATIC_CHECK := y +endif + +export BUILD_ORC STATIC_CHECK DYNAMIC_CHECK export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build new file mode 100644 index 0000000000000000000000000000000000000000..8d2f99a5b1ab70ee9229692906fa605619b31a87 --- /dev/null +++ b/tools/objtool/arch/arm64/Build @@ -0,0 +1,5 @@ +objtool-y += decode.o +objtool-y += orc.o +objtool-y += unwind_hints.o +objtool-y += insn.o +objtool-y += cfi.o diff --git a/tools/objtool/arch/arm64/cfi.c b/tools/objtool/arch/arm64/cfi.c new file mode 100644 index 0000000000000000000000000000000000000000..753aa82d20c84bb399756d663b98d95a38cab9b3 --- /dev/null +++ b/tools/objtool/arch/arm64/cfi.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include +#include + +#include +#include +#include +#include +#include + +unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; + +struct cfi_init_state initial_func_cfi; +struct cfi_state init_cfi; +struct cfi_state func_cfi; +struct cfi_state force_undefined_cfi; + +void init_cfi_state(struct cfi_state *cfi) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + cfi->regs[i].base = CFI_UNDEFINED; + cfi->vals[i].base = CFI_UNDEFINED; + } + cfi->cfa.base = CFI_UNDEFINED; + cfi->drap_reg = CFI_UNDEFINED; + cfi->drap_offset = -1; +} + +static struct cfi_state *cfi_alloc(void) +{ + struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); + + if (!cfi) { + WARN("calloc failed"); + exit(1); + } + nr_cfi++; + return cfi; +} + +static int cfi_bits; +static struct hlist_head *cfi_hash; + +inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) +{ + return memcmp((void *)cfi1 + sizeof(cfi1->hash), + (void *)cfi2 + sizeof(cfi2->hash), + sizeof(struct cfi_state) - sizeof(struct hlist_node)); +} + +static inline u32 cfi_key(struct cfi_state *cfi) +{ + return jhash((void *)cfi + sizeof(cfi->hash), + sizeof(*cfi) - sizeof(cfi->hash), 0); +} + +struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + struct cfi_state *obj; + + hlist_for_each_entry(obj, head, hash) { + if (!cficmp(cfi, obj)) { + nr_cfi_cache++; + return obj; + } + } + + obj = cfi_alloc(); + *obj = *cfi; + hlist_add_head(&obj->hash, head); + + return obj; +} + +void cfi_hash_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + + hlist_add_head(&cfi->hash, head); +} + +void *cfi_hash_alloc(unsigned long size) +{ + cfi_bits = max(10, ilog2(size)); + cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, + PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON, -1, 0); + if (cfi_hash == (void *)-1L) { + WARN("mmap fail cfi_hash"); + cfi_hash = NULL; + } else if (opts.stats) { + printf("cfi_bits: %d\n", cfi_bits); + } + + return cfi_hash; +} + +void set_func_state(struct cfi_state *state) +{ + state->cfa = initial_func_cfi.cfa; + memcpy(&state->regs, &initial_func_cfi.regs, + CFI_NUM_REGS * sizeof(struct cfi_reg)); + state->stack_size = initial_func_cfi.cfa.offset; + state->type = UNWIND_HINT_TYPE_CALL; +} diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c new file mode 100644 index 0000000000000000000000000000000000000000..49d46feb6a8a22b2526dc0f57f7e381e585608e0 --- /dev/null +++ b/tools/objtool/arch/arm64/decode.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * decode.c - ARM64 instruction decoder for dynamic FP validation. Only a + * small subset of the instructions need to be decoded. The rest + * only need to be sanity checked. + * + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +unsigned long nr_insns; + +/* ARM64 instructions are all 4 bytes wide. */ +#define INSN_SIZE 4 + +/* --------------------- arch support functions ------------------------- */ + +void arch_initial_func_cfi_state(struct cfi_init_state *state) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + state->regs[i].base = CFI_UNDEFINED; + state->regs[i].offset = 0; + } + state->regs[CFI_FP].base = CFI_CFA; + + /* initial CFA (call frame address) */ + state->cfa.base = CFI_SP; + state->cfa.offset = 0; +} + +unsigned long arch_dest_reloc_offset(int addend) +{ + return addend; +} + +unsigned long arch_jump_destination(struct instruction *insn) +{ + return insn->offset + insn->immediate; +} + +int arch_decode_hint_reg(u8 sp_reg, int *base) +{ + switch (sp_reg) { + case ORC_REG_UNDEFINED: + *base = CFI_UNDEFINED; + break; + case ORC_REG_SP: + *base = CFI_SP; + break; + case ORC_REG_FP: + *base = CFI_FP; + break; + default: + return -1; + } + + return 0; +} + +/* --------------------- instruction decode structs ------------------------ */ + +struct decode_var { + u32 insn; + enum insn_type type; + s64 imm; + unsigned int mode1; + unsigned int mode2; + unsigned int check_reg; + struct stack_op **ops_list; +}; + +struct decode { + unsigned long opmask; + unsigned long op; + unsigned int width; + unsigned int shift; + unsigned int bits; + unsigned int sign_extend; + unsigned int mult; + unsigned int mode1; + unsigned int mode2; + void (*func)(struct decode *decode, struct decode_var *var); +}; + +struct class { + unsigned long opmask; + unsigned long op; + void (*check)(struct decode_var *var); +}; + +/* ------------------------ stack operations ------------------------------- */ + +static void add_stack_op(unsigned char src_reg, enum op_src_type src_type, + s64 src_offset, + unsigned char dest_reg, enum op_dest_type dest_type, + s64 dest_offset, + struct stack_op **ops_list) +{ + struct stack_op *op, *tmp; + + op = calloc(1, sizeof(*op)); + if (!op) { + WARN("calloc failed"); + return; + } + + op->src.reg = src_reg; + op->src.type = src_type; + op->src.offset = src_offset; + op->dest.reg = dest_reg; + op->dest.type = dest_type; + op->dest.offset = dest_offset; + + op->next = NULL; + + if (*ops_list == NULL) + *ops_list = op; + else { + tmp = *ops_list; + while (tmp->next) + tmp = tmp->next; + tmp->next = op; + } +} + +static void add_op(struct decode_var *var, + unsigned char rn, s64 offset, unsigned char rd) +{ + add_stack_op(rn, OP_SRC_ADD, offset, rd, OP_DEST_REG, 0, + var->ops_list); +} + +static void load_op(struct decode_var *var, s64 offset, unsigned char rd) +{ + add_stack_op(CFI_SP, OP_SRC_REG_INDIRECT, offset, rd, OP_DEST_REG, 0, + var->ops_list); +} + +static void store_op(struct decode_var *var, s64 offset, unsigned char rd) +{ + add_stack_op(CFI_SP, OP_SRC_REG, 0, rd, OP_DEST_REG_INDIRECT, offset, + var->ops_list); +} + +/* ------------------------ decode functions ------------------------------- */ + +#define is_saved_reg(rt) ((rt) == CFI_FP || (rt) == CFI_RA) +#define is_frame_reg(rt) ((rt) == CFI_FP || (rt) == CFI_SP) + +/* ----- Add/Subtract instructions. ----- */ + +#define CMN_OP 0x31000000 /* Alias of ADDS imm */ +#define CMP_OP 0x71000000 /* Alias of SUBS imm */ + +static void add(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + unsigned int shift = (var->insn >> 22) & 1; + + if (decode->op == CMN_OP || decode->op == CMP_OP) + return; + + if (!is_frame_reg(rd)) + return; + + if (is_frame_reg(rn)) { + if (shift) + var->imm <<= 12; + add_op(var, rn, var->imm, rd); + } else { + var->type = INSN_UNRELIABLE; + } +} + +#define CMN_EXT_OP 0x2B200000 /* Alias of ADDS ext */ +#define CMP_EXT_OP 0x6B200000 /* Alias of SUBS ext */ + +static void addc(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + + if (decode->op == CMN_EXT_OP || decode->op == CMP_EXT_OP) + return; + + if (is_frame_reg(rd)) + var->type = INSN_UNRELIABLE; +} + +static void sub(struct decode *decode, struct decode_var *var) +{ + var->imm = -var->imm; + return add(decode, var); +} + +/* ----- Load instructions. ----- */ + +/* + * For some instructions, the target register cannot be FP. There are 3 cases: + * + * - The register width is 32 bits. FP cannot be 32 bits. + * - The register is loaded from one that is not the SP. We do not track + * the value of other registers in static analysis. + * - The instruction does not make sense for the FP to be the target. + */ +static void check_reg(unsigned int reg, struct decode_var *var) +{ + if (reg == CFI_FP) + var->type = INSN_UNRELIABLE; +} + +static void ldp(struct decode *decode, struct decode_var *var) +{ + unsigned int rt1 = var->insn & 0x1F; + unsigned int rt2 = (var->insn >> 10) & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (rn != CFI_SP || var->check_reg) { + check_reg(rt1, var); + check_reg(rt2, var); + } + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rt1)) + load_op(var, imm, rt1); + if (is_saved_reg(rt2)) + load_op(var, imm + 8, rt2); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void ldpc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + ldp(decode, var); +} + +static void ldr(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (rn != CFI_SP || var->check_reg) + check_reg(rd, var); + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rd)) + load_op(var, imm, rd); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +/* ----- Store instructions. ----- */ + +static void stp(struct decode *decode, struct decode_var *var) +{ + unsigned int rt1 = var->insn & 0x1F; + unsigned int rt2 = (var->insn >> 10) & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (var->check_reg) { + check_reg(rt1, var); + check_reg(rt2, var); + } + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rt1)) + store_op(var, imm, rt1); + if (is_saved_reg(rt2)) + store_op(var, imm + 8, rt2); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void stpc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + stp(decode, var); +} + +static void str(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (var->check_reg) + check_reg(rd, var); + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rd)) + store_op(var, imm, rd); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void strc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + str(decode, var); +} + +/* ----- Control transfer instructions. ----- */ + +#define BR_UNCONDITIONAL 0x14000000 + +static void bra(struct decode *decode, struct decode_var *var) +{ + if (var->imm) { + if (decode->op == BR_UNCONDITIONAL) + var->type = INSN_JUMP_UNCONDITIONAL; + else + var->type = INSN_JUMP_CONDITIONAL; + } else { + var->type = INSN_JUMP_DYNAMIC; + } +} + +static void call(struct decode *decode, struct decode_var *var) +{ + var->type = var->imm ? INSN_CALL : INSN_CALL_DYNAMIC; +} + +static void ret(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_RETURN; +} + +/* ----- Miscellaneous instructions. ----- */ + +static void bug(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_BUG; +} + +static void pac(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_START; +} + +/* ------------------------ Instruction decode ----------------------------- */ + +struct decode decode_array[] = { +/* + * mask OP code mask + * opcode OP code + * width Target register width. Values can be: + * 64 (64-bit) + * 32 (32-bit), + * X (64-bit if bit X in the instruction is set) + * -X (32-bit if bit X in the instruction is set) + * shift Shift for the immediate value + * bits Number of bits in the immediate value + * sign Sign extend the immediate value + * mult Multiplier for the immediate value + * am1 Addressing mode bit 1 + * am2 Addressing mode bit 2 + * func Decode function + * + * =============================== INSTRUCTIONS =============================== + * mask opcode width shift bits sign mult am1 am2 func + * ============================================================================ + */ +{ 0x7E400000, 0x28400000, 31, 15, 7, 1, 0, 23, 24, ldp /* LDP */}, +{ 0x7E400000, 0x68400000, 32, 15, 7, 1, 4, 23, 24, ldp /* LDPSW */}, +{ 0x7FC00000, 0x28400000, 31, 15, 7, 1, 0, 0, 0, ldpc /* LDNP */}, +{ 0xBFE00000, 0xB8400000, 30, 12, 9, 1, 1, 10, 11, ldr /* LDR */}, +{ 0xBFC00000, 0xB9400000, 30, 10, 12, 0, 0, 0, 0, ldr /* LDR off */}, +{ 0xFF200400, 0xF8200400, 64, 12, 9, 1, 8, 11, 11, ldr /* LDRA */}, +{ 0xFFC00000, 0x39400000, 32, 10, 12, 0, 1, 0, 0, ldr /* LDRB off */}, +{ 0xFFE00000, 0x38400000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRB */}, +{ 0xFFC00000, 0x79400000, 32, 10, 12, 0, 2, 0, 0, ldr /* LDRH off */}, +{ 0xFFE00000, 0x78400000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRH */}, +{ 0xFF800000, 0x39800000, -22, 10, 12, 0, 1, 0, 0, ldr /* LDRSB off */}, +{ 0xFFA00000, 0x38800000, -22, 12, 9, 1, 1, 10, 11, ldr /* LDRSB */}, +{ 0xFF800000, 0x79800000, -22, 10, 12, 0, 2, 0, 0, ldr /* LDRSH off */}, +{ 0xFFA00000, 0x78800000, -22, 12, 9, 1, 1, 10, 11, ldr /* LDRSH */}, +{ 0xFFC00000, 0xB9800000, 32, 10, 12, 0, 4, 0, 0, ldr /* LDRSW off */}, +{ 0xFFE00000, 0xB8800000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRSW */}, +{ 0x7E000000, 0x28000000, 31, 15, 7, 1, 0, 23, 24, stp /* STP */}, +{ 0x7E400000, 0x28000000, 31, 15, 7, 1, 0, 23, 24, stp /* STG */}, +{ 0xFE400000, 0x68000000, 64, 15, 7, 1, 16, 23, 24, stpc /* STGP */}, +{ 0x7FC00000, 0x28000000, 31, 15, 7, 1, 0, 0, 0, stpc /* STNP */}, +{ 0xBFC00000, 0xB9000000, 30, 10, 12, 0, 0, 0, 0, str /* STR off */}, +{ 0xBFE00000, 0xB8000000, 30, 12, 9, 1, 1, 10, 11, str /* STR */}, +{ 0xFFE00000, 0xD9200000, 64, 12, 9, 1, 16, 10, 11, strc /* STG */}, +{ 0xFFE00000, 0xD9A00000, 64, 12, 9, 1, 16, 10, 11, strc /* ST2G */}, +{ 0x7F800000, 0x11000000, 31, 10, 12, 0, 1, 0, 0, add /* ADD imm */}, +{ 0x7FE00000, 0x0B200000, 31, 10, 3, 0, 1, 0, 0, addc /* ADD ext */}, +{ 0x7F800000, 0x31000000, 31, 10, 12, 0, 1, 0, 0, add /* ADDS imm */}, +{ 0x7FE00000, 0x2B200000, 31, 10, 3, 0, 1, 0, 0, addc /* ADDS ext */}, +{ 0x7F800000, 0x51000000, 31, 10, 12, 0, 1, 0, 0, sub /* SUB imm */}, +{ 0x7FE00000, 0x4B200000, 31, 10, 3, 0, 1, 0, 0, addc /* SUB ext */}, +{ 0x7F800000, 0x71000000, 31, 10, 12, 0, 1, 0, 0, sub /* SUBS imm */}, +{ 0x7FE00000, 0x6B200000, 31, 10, 3, 0, 1, 0, 0, addc /* SUBS ext */}, +{ 0xFC000000, 0x14000000, 64, 0, 26, 1, 4, 0, 0, bra /* B */}, +{ 0xFF000010, 0x54000000, 64, 5, 19, 1, 4, 0, 0, bra /* B.cond */}, +{ 0xFF000010, 0x54000010, 64, 5, 19, 1, 4, 0, 0, bra /* BC.cond */}, +{ 0xFFFFFC1F, 0xD61F0000, 64, 0, 0, 0, 0, 0, 0, bra /* BR */}, +{ 0xFEFFF800, 0xD61F0800, 64, 0, 0, 0, 0, 0, 0, bra /* BRA */}, +{ 0x7E000000, 0x34000000, 31, 5, 19, 1, 4, 0, 0, bra /* CBZ/CBNZ */}, +{ 0x7E000000, 0x36000000, 31, 5, 14, 1, 4, 0, 0, bra /* TBZ/TBNZ */}, +{ 0xFC000000, 0x94000000, 64, 0, 26, 1, 4, 0, 0, call /* BL */}, +{ 0xFFFFFC1F, 0xD63F0000, 64, 0, 0, 0, 0, 0, 0, call /* BLR */}, +{ 0xFEFFF800, 0xD63F0800, 64, 0, 0, 0, 0, 0, 0, call /* BLRA */}, +{ 0xFFFFFC1F, 0xD65F0000, 64, 0, 0, 0, 0, 0, 0, ret /* RET */}, +{ 0xFFFFFBFF, 0xD65F0BFF, 64, 0, 0, 0, 0, 0, 0, ret /* RETA */}, +{ 0xFFFFFFFF, 0xD69F03E0, 64, 0, 0, 0, 0, 0, 0, ret /* ERET */}, +{ 0xFFFFFBFF, 0xD69F0BFF, 64, 0, 0, 0, 0, 0, 0, ret /* ERETA */}, +{ 0xFFE00000, 0xD4200000, 64, 5, 16, 0, 1, 0, 0, bug /* BRK */}, +{ 0xFFFFFFFF, 0xD503233F, 64, 0, 0, 0, 1, 0, 0, pac /* PACIASP */}, +}; +unsigned int ndecode = ARRAY_SIZE(decode_array); + +static void ignore(struct decode_var *var) +{ +} + +static void check_target(struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + + check_reg(rd, var); +} + +struct class class_array[] = { +/* + * mask Class OP mask + * opcode Class OP code + * check Function to perform checks + * + * ========================== INSTRUCTION CLASSES ============================= + * mask opcode check + * ============================================================================ + */ +{ 0x1E000000, 0x00000000, ignore /* RSVD_00 */ }, +{ 0x1E000000, 0x02000000, ignore /* UNALLOC_01 */ }, +{ 0x1E000000, 0x04000000, ignore /* SVE_02 */ }, +{ 0x1E000000, 0x06000000, ignore /* UNALLOC_03 */ }, +{ 0x1E000000, 0x08000000, check_target /* LOAD_STORE_04 */ }, +{ 0x1E000000, 0x0A000000, check_target /* DP_REGISTER_05 */ }, +{ 0x1E000000, 0x0C000000, ignore /* LOAD_STORE_06 */ }, +{ 0x1E000000, 0x0E000000, ignore /* SIMD_FP_07 */ }, +{ 0x1E000000, 0x12000000, check_target /* DP_IMMEDIATE_09 */ }, +{ 0x1E000000, 0x10000000, check_target /* DP_IMMEDIATE_08 */ }, +{ 0x1E000000, 0x14000000, check_target /* BR_SYS_10 */ }, +{ 0x1E000000, 0x16000000, check_target /* BR_SYS_11 */ }, +{ 0x1E000000, 0x18000000, check_target /* LOAD_STORE_12 */ }, +{ 0x1E000000, 0x1A000000, ignore /* DP_REGISTER_13 */ }, +{ 0x1E000000, 0x1C000000, check_target /* LOAD_STORE_14 */ }, +{ 0x1E000000, 0x1E000000, ignore /* SIMD_FP_15 */ }, +}; +unsigned int nclass = ARRAY_SIZE(class_array); + +static inline s64 sign_extend(s64 imm, unsigned int bits) +{ + return (imm << (64 - bits)) >> (64 - bits); +} + +int arch_decode_instruction(struct objtool_file *file, + const struct section *sec, + unsigned long offset, unsigned int maxlen, + struct instruction *insn) +{ + struct decode *decode; + struct decode_var var; + struct class *class; + unsigned int width, mask, mult, i; + + if (maxlen < INSN_SIZE) + return -1; + insn->len = INSN_SIZE; + + var.insn = *(u32 *)(sec->data->d_buf + offset); + var.type = INSN_OTHER; + var.imm = 0; + var.ops_list = &insn->stack_ops; + + insn->type = INSN_OTHER; + + /* Decode the instruction, if listed. */ + for (i = 0; i < ndecode; i++) { + decode = &decode_array[i]; + + if ((var.insn & decode->opmask) != decode->op) + continue; + + /* Extract addressing mode (for some instructions). */ + var.mode1 = 0; + var.mode2 = 0; + if (decode->mode1) + var.mode1 = (var.insn >> decode->mode1) & 1; + if (decode->mode2) + var.mode2 = (var.insn >> decode->mode2) & 1; + + /* Determine target register width. */ + width = decode->width; + if (width < 0) + width = (var.insn & (1 << -width)) ? 32 : 64; + else if (width < 32) + width = (var.insn & (1 << width)) ? 64 : 32; + + /* + * If the target register width is 32 bits, set the check flag + * so that the target registers are checked to make sure they + * are not the FP or the RA. We should not be using 32-bit + * values in these registers. + */ + var.check_reg = (width == 32); + + /* Extract the immediate value. */ + mask = (1 << decode->bits) - 1; + var.imm = (var.insn >> decode->shift) & mask; + if (decode->sign_extend) + var.imm = sign_extend(var.imm, decode->bits); + + /* Scale the immediate value. */ + mult = decode->mult; + if (!mult) + mult = (width == 32) ? 4 : 8; + var.imm *= mult; + + /* Decode the instruction. */ + decode->func(decode, &var); + goto out; + } + + /* + * Sanity check to make sure that the compiler has not generated + * code that modifies the FP or the RA in an unexpected way. + */ + for (i = 0; i < nclass; i++) { + class = &class_array[i]; + if ((var.insn & class->opmask) == class->op) { + class->check(&var); + goto out; + } + } +out: + insn->immediate = var.imm; + insn->type = var.type; + return 0; +} + +/* + * Call the arch-specific instruction decoder for all the instructions and add + * them to the global instruction list. + */ +int decode_instructions(struct objtool_file *file) +{ + struct section *sec; + struct symbol *func; + unsigned long offset; + struct instruction *insn; + int ret; + + for_each_sec(file, sec) { + struct instruction *insns = NULL; + u8 prev_len = 0; + u8 idx = 0; + + if (!(sec->sh.sh_flags & SHF_EXECINSTR)) + continue; + + if (strcmp(sec->name, ".altinstr_replacement") && + strcmp(sec->name, ".altinstr_aux") && + strncmp(sec->name, ".discard.", 9)) + sec->text = true; + + if (!strcmp(sec->name, ".noinstr.text") || + !strcmp(sec->name, ".entry.text") || + !strcmp(sec->name, ".cpuidle.text") || + !strncmp(sec->name, ".text..__x86.", 13)) + sec->noinstr = true; + + /* + * .init.text code is ran before userspace and thus doesn't + * strictly need retpolines, except for modules which are + * loaded late, they very much do need retpoline in their + * .init.text + */ + if (!strcmp(sec->name, ".init.text") && !opts.module) + sec->init = true; + + for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { + if (!insns || idx == INSN_CHUNK_MAX) { + insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); + if (!insns) { + WARN("malloc failed"); + return -1; + } + idx = 0; + } else { + idx++; + } + insn = &insns[idx]; + insn->idx = idx; + + INIT_LIST_HEAD(&insn->call_node); + insn->sec = sec; + insn->offset = offset; + insn->prev_len = prev_len; + + ret = arch_decode_instruction(file, sec, offset, + sec->sh.sh_size - offset, + insn); + if (ret) + return ret; + + prev_len = insn->len; + + /* + * By default, "ud2" is a dead end unless otherwise + * annotated, because GCC 7 inserts it for certain + * divide-by-zero cases. + */ + if (insn->type == INSN_BUG) + insn->dead_end = true; + + hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); + nr_insns++; + } + +// printf("%s: last chunk used: %d\n", sec->name, (int)idx); + + sec_for_each_sym(sec, func) { + if (func->type != STT_NOTYPE && func->type != STT_FUNC) + continue; + + if (func->offset == sec->sh.sh_size) { + /* Heuristic: likely an "end" symbol */ + if (func->type == STT_NOTYPE) + continue; + WARN("%s(): STT_FUNC at end of section", + func->name); + return -1; + } + + if (func->embedded_insn || func->alias != func) + continue; + + if (!find_insn(file, sec, func->offset)) { + WARN("%s(): can't find starting instruction", + func->name); + return -1; + } + + sym_for_each_insn(file, func, insn) { + insn->sym = func; + if (func->type == STT_FUNC && + insn->type == INSN_ENDBR && + list_empty(&insn->call_node)) { + if (insn->offset == func->offset) { + list_add_tail(&insn->call_node, &file->endbr_list); + file->nr_endbr++; + } else { + file->nr_endbr_int++; + } + } + } + } + } + + if (opts.stats) + printf("nr_insns: %lu\n", nr_insns); + + return 0; +} diff --git a/tools/objtool/arch/arm64/include/arch/cfi.h b/tools/objtool/arch/arm64/include/arch/cfi.h new file mode 100644 index 0000000000000000000000000000000000000000..55f7a988d824cfa7407b9db0bc0480d45640376d --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/cfi.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ARCH_CFI_H +#define _OBJTOOL_ARCH_CFI_H + +#include + +#include + +void init_cfi_state(struct cfi_state *cfi); +bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2); +struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi); +void cfi_hash_add(struct cfi_state *cfi); +void *cfi_hash_alloc(unsigned long size); +void set_func_state(struct cfi_state *state); + +extern unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; +extern struct cfi_init_state initial_func_cfi; +extern struct cfi_state init_cfi; +extern struct cfi_state func_cfi; +extern struct cfi_state force_undefined_cfi; + +#endif /* _OBJTOOL_ARCH_CFI_H */ diff --git a/tools/objtool/arch/arm64/include/arch/cfi_regs.h b/tools/objtool/arch/arm64/include/arch/cfi_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..3b36ddeedddacb5c5b970638c5fc314dc51c5f91 --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/cfi_regs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _OBJTOOL_CFI_REGS_H +#define _OBJTOOL_CFI_REGS_H + +#define CFI_FP 29 +#define CFI_BP CFI_FP +#define CFI_RA 30 +#define CFI_SP 31 + +#define CFI_NUM_REGS 32 + +#endif /* _OBJTOOL_CFI_REGS_H */ diff --git a/tools/objtool/arch/arm64/include/arch/elf.h b/tools/objtool/arch/arm64/include/arch/elf.h new file mode 100644 index 0000000000000000000000000000000000000000..9f75e8a3210cd22891b8c1fa0cadddbbc1a6525f --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/elf.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ + +#ifndef _OBJTOOL_ARCH_ELF +#define _OBJTOOL_ARCH_ELF + +#define R_NONE R_AARCH64_NONE +#define R_ABS32 R_AARCH64_ABS32 +#define R_ABS64 R_AARCH64_ABS64 +#define R_DATA32 R_AARCH64_PREL32 +#define R_DATA64 R_AARCH64_PREL32 +#define R_TEXT32 R_AARCH64_PREL32 +#define R_TEXT64 R_AARCH64_PREL32 +#define R_PCREL R_AARCH64_PREL32 + +#endif /* _OBJTOOL_ARCH_ELF */ diff --git a/tools/objtool/arch/arm64/include/arch/endianness.h b/tools/objtool/arch/arm64/include/arch/endianness.h new file mode 100644 index 0000000000000000000000000000000000000000..092401687c3c30af85770b905355c5bedd13e82a --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/endianness.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ARCH_ENDIANNESS_H +#define _ARCH_ENDIANNESS_H + +#include + +#define __TARGET_BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _ARCH_ENDIANNESS_H */ diff --git a/tools/objtool/arch/arm64/include/arch/orc.h b/tools/objtool/arch/arm64/include/arch/orc.h new file mode 100644 index 0000000000000000000000000000000000000000..24fc9cf4de974c575e74ada0f0dbf2115a38fa19 --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/orc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ORC_H +#define _OBJTOOL_ORC_H + +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn); +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i); +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o); +const char *orc_type_name(unsigned int type); +void orc_print_reg(unsigned int reg, int offset); +void orc_print_sp(void); +void orc_print_fp(void); + +#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/arch/arm64/insn.c b/tools/objtool/arch/arm64/insn.c new file mode 100644 index 0000000000000000000000000000000000000000..b205868690c2f6fb6dc7ef030c7de1f5e8326c8a --- /dev/null +++ b/tools/objtool/arch/arm64/insn.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include + +#include +#include +#include +#include + +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset) +{ + struct instruction *insn; + + hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { + if (insn->sec == sec && insn->offset == offset) + return insn; + } + + return NULL; +} + +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == INSN_CHUNK_MAX) + return find_insn(file, insn->sec, insn->offset + insn->len); + + insn++; + if (!insn->len) + return NULL; + + return insn; +} + +struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = next_insn_same_sec(file, insn); + struct symbol *func = insn_func(insn); + + if (!func) + return NULL; + + if (next && insn_func(next) == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == 0) { + if (insn->prev_len) + return find_insn(file, insn->sec, insn->offset - insn->prev_len); + return NULL; + } + + return insn - 1; +} + +struct instruction *prev_insn_same_sym(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *prev = prev_insn_same_sec(file, insn); + + if (prev && insn_func(prev) == insn_func(insn)) + return prev; + + return NULL; +} + +void init_insn_state(struct objtool_file *file, struct insn_state *state, + struct section *sec) +{ + memset(state, 0, sizeof(*state)); + init_cfi_state(&state->cfi); + + /* + * We need the full vmlinux for noinstr validation, otherwise we can + * not correctly determine insn_call_dest(insn)->sec (external symbols + * do not have a section). + */ + if (opts.link && opts.noinstr && sec) + state->noinstr = sec->noinstr; +} + +struct instruction *find_last_insn(struct objtool_file *file, + struct section *sec) +{ + struct instruction *insn = NULL; + unsigned int offset; + unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; + + for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) + insn = find_insn(file, sec, offset); + + return insn; +} + +struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) +{ + struct reloc *reloc; + + if (insn->no_reloc) + return NULL; + + if (!file) + return NULL; + + reloc = find_reloc_by_dest_range(file->elf, insn->sec, + insn->offset, insn->len); + if (!reloc) { + insn->no_reloc = 1; + return NULL; + } + + return reloc; +} + +bool is_first_func_insn(struct objtool_file *file, + struct instruction *insn, struct symbol *sym) +{ + if (insn->offset == sym->offset) + return true; + + /* Allow direct CALL/JMP past ENDBR */ + if (opts.ibt) { + struct instruction *prev = prev_insn_same_sym(file, insn); + + if (prev && prev->type == INSN_ENDBR && + insn->offset == sym->offset + prev->len) + return true; + } + + return false; +} + +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, + bool print) +{ + struct cfi_state *cfi1 = insn->cfi; + int i; + + if (!cfi1) { + WARN("CFI missing"); + return false; + } + + if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { + if (print) { + WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", + cfi1->cfa.base, cfi1->cfa.offset, + cfi2->cfa.base, cfi2->cfa.offset); + } + } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { + for (i = 0; i < CFI_NUM_REGS; i++) { + if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], + sizeof(struct cfi_reg))) + continue; + if (print) { + WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", + i, cfi1->regs[i].base, cfi1->regs[i].offset, + i, cfi2->regs[i].base, cfi2->regs[i].offset); + } + break; + } + + } else if (cfi1->type != cfi2->type) { + if (print) { + WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", + cfi1->type, cfi2->type); + } + + } else if (cfi1->drap != cfi2->drap || + (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || + (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { + if (print) { + WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", + cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, + cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); + } + + } else + return true; + + return false; +} + +/* + * This is a hack for Clang. Clang is aggressive about removing section + * symbols and then some. If we cannot find something to relocate an + * instruction against, we must not generate CFI for it or the ORC + * generation will fail later. + */ +bool insn_can_reloc(struct instruction *insn) +{ + struct section *insn_sec = insn->sec; + unsigned long insn_off = insn->offset; + + if (insn_sec->sym || + find_symbol_containing(insn_sec, insn_off) || + find_symbol_containing(insn_sec, insn_off - 1)) { + /* See elf_add_reloc_to_insn(). */ + return true; + } + return false; +} diff --git a/tools/objtool/arch/arm64/orc.c b/tools/objtool/arch/arm64/orc.c new file mode 100644 index 0000000000000000000000000000000000000000..4febbca42ddcde1ed37e85e16e700ab8ae92b396 --- /dev/null +++ b/tools/objtool/arch/arm64/orc.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ +#include + +#include + +#include +#include +#include +#include + +void arch_init_orc_entry(struct orc_entry *entry) +{ + entry->fp_reg = ORC_REG_UNDEFINED; + entry->type = UNWIND_HINT_TYPE_CALL; +} + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn) +{ + struct cfi_reg *fp = &cfi->regs[CFI_FP]; + + memset(orc, 0, sizeof(*orc)); + + orc->sp_reg = ORC_REG_SP; + orc->fp_reg = ORC_REG_PREV_SP; + orc->type = UNWIND_HINT_TYPE_CALL; + + if (!cfi || cfi->cfa.base == CFI_UNDEFINED || + (cfi->type == UNWIND_HINT_TYPE_CALL && !fp->offset)) { + /* + * The frame pointer has not been set up. This instruction is + * unreliable from an unwind perspective. + */ + return 0; + } + + orc->sp_offset = cfi->cfa.offset; + orc->fp_offset = fp->offset; + orc->type = cfi->type; + orc->signal = cfi->end; + + return 0; +} + +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); + orc->fp_offset = bswap_if_needed(elf, orc->fp_offset); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "cfa"; + case ORC_REG_FP: + return "x29"; + case ORC_REG_SP: + return "sp"; + default: + return "?"; + } +} + +const char *orc_type_name(unsigned int type) +{ + switch (type) { + case UNWIND_HINT_TYPE_CALL: + return "call"; + case UNWIND_HINT_TYPE_REGS: + return "regs"; + case UNWIND_HINT_TYPE_IRQ_STACK: + return "irqstack"; + default: + return "?"; + } +} + +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + + orc_print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset)); + + printf(" fp:"); + + orc_print_reg(orc[i].fp_reg, bswap_if_needed(dummy_elf, orc[i].fp_offset)); + + printf(" signal:%d\n", orc[i].signal); +} + +void orc_print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} + +void orc_print_sp(void) +{ + printf(" cfa:"); +} + +void orc_print_fp(void) +{ + printf(" x29:"); +} + +bool orc_ignore_section(struct section *sec) +{ + return !strcmp(sec->name, ".head.text"); +} diff --git a/tools/objtool/arch/arm64/unwind_hints.c b/tools/objtool/arch/arm64/unwind_hints.c new file mode 100644 index 0000000000000000000000000000000000000000..3e5364623defcdbda6014d66740b0f5e4a32c132 --- /dev/null +++ b/tools/objtool/arch/arm64/unwind_hints.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ +#include + +#include +#include +#include +#include +#include + +int read_unwind_hints(struct objtool_file *file) +{ + struct cfi_state cfi = init_cfi; + struct section *sec; + struct unwind_hint *hint; + struct instruction *insn; + struct reloc *reloc; + u8 sp_reg, type; + int i; + + sec = find_section_by_name(file->elf, ".discard.unwind_hints"); + if (!sec) + return 0; + + if (!sec->rsec) { + WARN("missing .rela.discard.unwind_hints section"); + return -1; + } + + if (sec->sh.sh_size % sizeof(struct unwind_hint)) { + WARN("struct unwind_hint size mismatch"); + return -1; + } + + file->hints = true; + + for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { + hint = (struct unwind_hint *)sec->data->d_buf + i; + + sp_reg = bswap_if_needed(file->elf, hint->sp_reg); + type = bswap_if_needed(file->elf, hint->type); + + reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); + if (!reloc) { + WARN("can't find reloc for unwind_hints[%d]", i); + return -1; + } + + insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + if (!insn) { + WARN("can't find insn for unwind_hints[%d]", i); + return -1; + } + + insn->hint = true; + + if (type == UNWIND_HINT_TYPE_UNDEFINED) { + insn->cfi = &force_undefined_cfi; + continue; + } + + if (type == UNWIND_HINT_TYPE_SAVE) { + insn->hint = false; + insn->save = true; + continue; + } + + if (type == UNWIND_HINT_TYPE_RESTORE) { + insn->restore = true; + continue; + } + + if (type == UNWIND_HINT_TYPE_REGS_PARTIAL) { + struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); + + if (sym && sym->bind == STB_GLOBAL) { + if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { + WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); + } + } + } + + if (type == UNWIND_HINT_TYPE_FUNC) { + insn->cfi = &func_cfi; + continue; + } + + if (insn->cfi) + cfi = *(insn->cfi); + + if (arch_decode_hint_reg(sp_reg, &cfi.cfa.base)) { + WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", sp_reg); + return -1; + } + + cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); + cfi.type = type; + cfi.signal = hint->signal; + + insn->cfi = cfi_hash_find_or_add(&cfi); + } + + return 0; +} diff --git a/tools/objtool/arch/loongarch/Build b/tools/objtool/arch/loongarch/Build new file mode 100644 index 0000000000000000000000000000000000000000..1d4b784b6887abeaeeb08e1fb5ef0f8e70473f53 --- /dev/null +++ b/tools/objtool/arch/loongarch/Build @@ -0,0 +1,3 @@ +objtool-y += decode.o +objtool-y += special.o +objtool-y += orc.o diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c new file mode 100644 index 0000000000000000000000000000000000000000..69b66994f2a1557275212bf470b413af299f74f2 --- /dev/null +++ b/tools/objtool/arch/loongarch/decode.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include +#include +#include + +#ifndef EM_LOONGARCH +#define EM_LOONGARCH 258 +#endif + +int arch_ftrace_match(char *name) +{ + return !strcmp(name, "_mcount"); +} + +unsigned long arch_jump_destination(struct instruction *insn) +{ + return insn->offset + (insn->immediate << 2); +} + +unsigned long arch_dest_reloc_offset(int addend) +{ + return addend; +} + +bool arch_pc_relative_reloc(struct reloc *reloc) +{ + return false; +} + +bool arch_callee_saved_reg(unsigned char reg) +{ + switch (reg) { + case CFI_RA: + case CFI_FP: + case CFI_S0 ... CFI_S8: + return true; + default: + return false; + } +} + +int arch_decode_hint_reg(u8 sp_reg, int *base) +{ + switch (sp_reg) { + case ORC_REG_UNDEFINED: + *base = CFI_UNDEFINED; + break; + case ORC_REG_SP: + *base = CFI_SP; + break; + case ORC_REG_FP: + *base = CFI_FP; + break; + default: + return -1; + } + + return 0; +} + +static bool is_loongarch(const struct elf *elf) +{ + if (elf->ehdr.e_machine == EM_LOONGARCH) + return true; + + WARN("unexpected ELF machine type %d", elf->ehdr.e_machine); + return false; +} + +#define ADD_OP(op) \ + if (!(op = calloc(1, sizeof(*op)))) \ + return -1; \ + else for (*ops_list = op, ops_list = &op->next; op; op = NULL) + +static bool decode_insn_reg0i26_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg0i26_format.opcode) { + case b_op: + insn->type = INSN_JUMP_UNCONDITIONAL; + insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 | + inst.reg0i26_format.immediate_l, 25); + break; + case bl_op: + insn->type = INSN_CALL; + insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 | + inst.reg0i26_format.immediate_l, 25); + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg1i21_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg1i21_format.opcode) { + case beqz_op: + case bnez_op: + case bceqz_op: + insn->type = INSN_JUMP_CONDITIONAL; + insn->immediate = sign_extend64(inst.reg1i21_format.immediate_h << 16 | + inst.reg1i21_format.immediate_l, 20); + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, + struct instruction *insn, + struct stack_op **ops_list, + struct stack_op *op) +{ + switch (inst.reg2i12_format.opcode) { + case addid_op: + if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) { + /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_ADD; + op->src.reg = inst.reg2i12_format.rj; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i12_format.rd; + } + } + if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) { + /* addi.d sp,fp,si12 */ + struct symbol *func = find_func_containing(insn->sec, insn->offset); + + if (!func) + return false; + + func->frame_pointer = true; + } + break; + case ldd_op: + if (inst.reg2i12_format.rj == CFI_SP) { + /* ld.d rd,sp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_REG_INDIRECT; + op->src.reg = CFI_SP; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i12_format.rd; + } + } + break; + case std_op: + if (inst.reg2i12_format.rj == CFI_SP) { + /* st.d rd,sp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_REG; + op->src.reg = inst.reg2i12_format.rd; + op->dest.type = OP_DEST_REG_INDIRECT; + op->dest.reg = CFI_SP; + op->dest.offset = insn->immediate; + } + } + break; + case andi_op: + if (inst.reg2i12_format.rd == 0 && + inst.reg2i12_format.rj == 0 && + inst.reg2i12_format.immediate == 0) + /* andi r0,r0,0 */ + insn->type = INSN_NOP; + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i14_fomat(union loongarch_instruction inst, + struct instruction *insn, + struct stack_op **ops_list, + struct stack_op *op) +{ + switch (inst.reg2i14_format.opcode) { + case ldptrd_op: + if (inst.reg2i14_format.rj == CFI_SP) { + /* ldptr.d rd,sp,si14 */ + insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13); + ADD_OP(op) { + op->src.type = OP_SRC_REG_INDIRECT; + op->src.reg = CFI_SP; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i14_format.rd; + } + } + break; + case stptrd_op: + if (inst.reg2i14_format.rj == CFI_SP) { + /* stptr.d ra,sp,0 */ + if (inst.reg2i14_format.rd == LOONGARCH_GPR_RA && + inst.reg2i14_format.immediate == 0) + break; + + /* stptr.d rd,sp,si14 */ + insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13); + ADD_OP(op) { + op->src.type = OP_SRC_REG; + op->src.reg = inst.reg2i14_format.rd; + op->dest.type = OP_DEST_REG_INDIRECT; + op->dest.reg = CFI_SP; + op->dest.offset = insn->immediate; + } + } + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg2i16_format.opcode) { + case jirl_op: + if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.rj == CFI_RA && + inst.reg2i16_format.immediate == 0) { + /* jirl r0,ra,0 */ + insn->type = INSN_RETURN; + } else if (inst.reg2i16_format.rd == CFI_RA) { + /* jirl ra,rj,offs16 */ + insn->type = INSN_CALL_DYNAMIC; + } else if (inst.reg2i16_format.rd == CFI_A0 && + inst.reg2i16_format.immediate == 0) { + /* + * jirl a0,t0,0 + * this is a special case in loongarch_suspend_enter, + * just treat it as a call instruction. + */ + insn->type = INSN_CALL_DYNAMIC; + } else if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.immediate == 0) { + /* jirl r0,rj,0 */ + insn->type = INSN_JUMP_DYNAMIC; + } else if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.immediate != 0) { + /* + * jirl r0,t0,12 + * this is a rare case in JUMP_VIRT_ADDR, + * just ignore it due to it is harmless for tracing. + */ + break; + } else { + /* jirl rd,rj,offs16 */ + insn->type = INSN_JUMP_UNCONDITIONAL; + insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15); + } + break; + case beq_op: + case bne_op: + case blt_op: + case bge_op: + case bltu_op: + case bgeu_op: + insn->type = INSN_JUMP_CONDITIONAL; + insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15); + break; + default: + return false; + } + + return true; +} + +int arch_decode_instruction(struct objtool_file *file, const struct section *sec, + unsigned long offset, unsigned int maxlen, + struct instruction *insn) +{ + struct stack_op **ops_list = &insn->stack_ops; + const struct elf *elf = file->elf; + struct stack_op *op = NULL; + union loongarch_instruction inst; + + if (!is_loongarch(elf)) + return -1; + + if (maxlen < LOONGARCH_INSN_SIZE) + return 0; + + insn->len = LOONGARCH_INSN_SIZE; + insn->type = INSN_OTHER; + insn->immediate = 0; + + inst = *(union loongarch_instruction *)(sec->data->d_buf + offset); + + if (decode_insn_reg0i26_fomat(inst, insn)) + return 0; + if (decode_insn_reg1i21_fomat(inst, insn)) + return 0; + if (decode_insn_reg2i12_fomat(inst, insn, ops_list, op)) + return 0; + if (decode_insn_reg2i14_fomat(inst, insn, ops_list, op)) + return 0; + if (decode_insn_reg2i16_fomat(inst, insn)) + return 0; + + if (inst.word == 0) + insn->type = INSN_NOP; + else if (inst.reg0i15_format.opcode == break_op) { + /* break */ + insn->type = INSN_BUG; + } else if (inst.reg2_format.opcode == ertn_op) { + /* ertn */ + insn->type = INSN_RETURN; + } + + return 0; +} + +const char *arch_nop_insn(int len) +{ + static u32 nop; + + if (len != LOONGARCH_INSN_SIZE) + WARN("invalid NOP size: %d\n", len); + + nop = LOONGARCH_INSN_NOP; + + return (const char *)&nop; +} + +const char *arch_ret_insn(int len) +{ + static u32 ret; + + if (len != LOONGARCH_INSN_SIZE) + WARN("invalid RET size: %d\n", len); + + emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); + + return (const char *)&ret; +} + +void arch_initial_func_cfi_state(struct cfi_init_state *state) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + state->regs[i].base = CFI_UNDEFINED; + state->regs[i].offset = 0; + } + + /* initial CFA (call frame address) */ + state->cfa.base = CFI_SP; + state->cfa.offset = 0; +} diff --git a/tools/objtool/arch/loongarch/include/arch/cfi_regs.h b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..d183cc8f43bf3a70190315bdc623e8cf30f05246 --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_CFI_REGS_H +#define _OBJTOOL_ARCH_CFI_REGS_H + +#define CFI_RA 1 +#define CFI_SP 3 +#define CFI_A0 4 +#define CFI_FP 22 +#define CFI_S0 23 +#define CFI_S1 24 +#define CFI_S2 25 +#define CFI_S3 26 +#define CFI_S4 27 +#define CFI_S5 28 +#define CFI_S6 29 +#define CFI_S7 30 +#define CFI_S8 31 +#define CFI_NUM_REGS 32 + +#define CFI_BP CFI_FP + +#endif /* _OBJTOOL_ARCH_CFI_REGS_H */ diff --git a/tools/objtool/arch/loongarch/include/arch/elf.h b/tools/objtool/arch/loongarch/include/arch/elf.h new file mode 100644 index 0000000000000000000000000000000000000000..9623d663220effd0422b0d9372aba6a6c5c00a19 --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/elf.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_ELF_H +#define _OBJTOOL_ARCH_ELF_H + +/* + * See the following link for more info about ELF Relocation types: + * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html#_relocations + */ +#ifndef R_LARCH_NONE +#define R_LARCH_NONE 0 +#endif +#ifndef R_LARCH_32 +#define R_LARCH_32 1 +#endif +#ifndef R_LARCH_64 +#define R_LARCH_64 2 +#endif +#ifndef R_LARCH_32_PCREL +#define R_LARCH_32_PCREL 99 +#endif + +#define R_NONE R_LARCH_NONE +#define R_ABS32 R_LARCH_32 +#define R_ABS64 R_LARCH_64 +#define R_DATA32 R_LARCH_32_PCREL +#define R_DATA64 R_LARCH_32_PCREL +#define R_TEXT32 R_LARCH_32_PCREL +#define R_TEXT64 R_LARCH_32_PCREL + +#endif /* _OBJTOOL_ARCH_ELF_H */ diff --git a/tools/objtool/arch/loongarch/include/arch/special.h b/tools/objtool/arch/loongarch/include/arch/special.h new file mode 100644 index 0000000000000000000000000000000000000000..35fc979b550ab5a3c44460af584d08617482dddf --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/special.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_SPECIAL_H +#define _OBJTOOL_ARCH_SPECIAL_H + +/* + * See more info about struct exception_table_entry + * in arch/loongarch/include/asm/extable.h + */ +#define EX_ENTRY_SIZE 12 +#define EX_ORIG_OFFSET 0 +#define EX_NEW_OFFSET 4 + +/* + * See more info about struct jump_entry + * in include/linux/jump_label.h + */ +#define JUMP_ENTRY_SIZE 16 +#define JUMP_ORIG_OFFSET 0 +#define JUMP_NEW_OFFSET 4 +#define JUMP_KEY_OFFSET 8 + +/* + * See more info about struct alt_instr + * in arch/loongarch/include/asm/alternative.h + */ +#define ALT_ENTRY_SIZE 12 +#define ALT_ORIG_OFFSET 0 +#define ALT_NEW_OFFSET 4 +#define ALT_FEATURE_OFFSET 8 +#define ALT_ORIG_LEN_OFFSET 10 +#define ALT_NEW_LEN_OFFSET 11 + +#endif /* _OBJTOOL_ARCH_SPECIAL_H */ diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c new file mode 100644 index 0000000000000000000000000000000000000000..873536d009d91d93c6cc8abd3255de983e6f9aa2 --- /dev/null +++ b/tools/objtool/arch/loongarch/orc.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include + +#include +#include +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) +{ + struct cfi_reg *fp = &cfi->regs[CFI_FP]; + struct cfi_reg *ra = &cfi->regs[CFI_RA]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_FP: + orc->sp_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (fp->base) { + case CFI_UNDEFINED: + orc->fp_reg = ORC_REG_UNDEFINED; + orc->fp_offset = 0; + break; + case CFI_CFA: + orc->fp_reg = ORC_REG_PREV_SP; + orc->fp_offset = fp->offset; + break; + case CFI_FP: + orc->fp_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown FP base reg %d", fp->base); + return -1; + } + + switch (ra->base) { + case CFI_UNDEFINED: + orc->ra_reg = ORC_REG_UNDEFINED; + orc->ra_offset = 0; + break; + case CFI_CFA: + orc->ra_reg = ORC_REG_PREV_SP; + orc->ra_offset = ra->offset; + break; + case CFI_FP: + orc->ra_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown RA base reg %d", ra->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + + return 0; +} + +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_SP: + return "sp"; + case ORC_REG_FP: + return "fp"; + case ORC_REG_PREV_SP: + return "prevsp"; + default: + return "?"; + } +} + +static const char *orc_type_name(unsigned int type) +{ + switch (type) { + case UNWIND_HINT_TYPE_CALL: + return "call"; + case UNWIND_HINT_TYPE_REGS: + return "regs"; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +static void print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_UNDEFINED) + printf(" (und) "); + else + printf("%s + %3d", reg_name(reg), offset); + +} + +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + print_reg(orc[i].sp_reg, orc[i].sp_offset); + + printf(" fp:"); + print_reg(orc[i].fp_reg, orc[i].fp_offset); + + printf(" ra:"); + print_reg(orc[i].ra_reg, orc[i].ra_offset); + + printf(" signal:%d\n", orc[i].signal); +} diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c new file mode 100644 index 0000000000000000000000000000000000000000..9bba1e9318e0ba9ba2c7b79c9c980c8309a507e1 --- /dev/null +++ b/tools/objtool/arch/loongarch/special.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include + +bool arch_support_alt_relocation(struct special_alt *special_alt, + struct instruction *insn, + struct reloc *reloc) +{ + return false; +} + +struct reloc *arch_find_switch_table(struct objtool_file *file, + struct instruction *insn) +{ + return NULL; +} diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build index 9f7869b5c5e0caf1c0128e0f177d98e931017583..3dedb2fd8f3a0c3d7502e2536167f5e895fe1d44 100644 --- a/tools/objtool/arch/x86/Build +++ b/tools/objtool/arch/x86/Build @@ -1,5 +1,6 @@ objtool-y += special.o objtool-y += decode.o +objtool-y += orc.o inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c new file mode 100644 index 0000000000000000000000000000000000000000..b6cd943e87f936ef93ce03609d4596b6fd96553a --- /dev/null +++ b/tools/objtool/arch/x86/orc.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include + +#include +#include +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) +{ + struct cfi_reg *bp = &cfi->regs[CFI_BP]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_SP_INDIRECT: + orc->sp_reg = ORC_REG_SP_INDIRECT; + break; + case CFI_BP: + orc->sp_reg = ORC_REG_BP; + break; + case CFI_BP_INDIRECT: + orc->sp_reg = ORC_REG_BP_INDIRECT; + break; + case CFI_R10: + orc->sp_reg = ORC_REG_R10; + break; + case CFI_R13: + orc->sp_reg = ORC_REG_R13; + break; + case CFI_DI: + orc->sp_reg = ORC_REG_DI; + break; + case CFI_DX: + orc->sp_reg = ORC_REG_DX; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (bp->base) { + case CFI_UNDEFINED: + orc->bp_reg = ORC_REG_UNDEFINED; + break; + case CFI_CFA: + orc->bp_reg = ORC_REG_PREV_SP; + break; + case CFI_BP: + orc->bp_reg = ORC_REG_BP; + break; + default: + WARN_INSN(insn, "unknown BP base reg %d", bp->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + orc->bp_offset = bp->offset; + + return 0; +} + +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); + orc->bp_offset = bswap_if_needed(elf, orc->bp_offset); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "prevsp"; + case ORC_REG_DX: + return "dx"; + case ORC_REG_DI: + return "di"; + case ORC_REG_BP: + return "bp"; + case ORC_REG_SP: + return "sp"; + case ORC_REG_R10: + return "r10"; + case ORC_REG_R13: + return "r13"; + case ORC_REG_BP_INDIRECT: + return "bp(ind)"; + case ORC_REG_SP_INDIRECT: + return "sp(ind)"; + default: + return "?"; + } +} + +static const char *orc_type_name(unsigned int type) +{ + switch (type) { + case ORC_TYPE_UNDEFINED: + return "(und)"; + case ORC_TYPE_END_OF_STACK: + return "end"; + case ORC_TYPE_CALL: + return "call"; + case ORC_TYPE_REGS: + return "regs"; + case ORC_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +static void print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_BP_INDIRECT) + printf("(bp%+d)", offset); + else if (reg == ORC_REG_SP_INDIRECT) + printf("(sp)%+d", offset); + else if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} + +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset)); + + printf(" bp:"); + print_reg(orc[i].bp_reg, bswap_if_needed(dummy_elf, orc[i].bp_offset)); + + printf(" signal:%d\n", orc[i].signal); +} diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 1b242c3c2d45156dc01a14483197a219faddcac8..6e9dd6d6db37a2733de2ba584b00c19a1285256d 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -20,6 +20,7 @@ #include #include #include +#include struct alternative { struct alternative *next; @@ -584,7 +585,7 @@ static int add_dead_ends(struct objtool_file *file) struct section *rsec; struct reloc *reloc; struct instruction *insn; - s64 addend; + unsigned long offset; /* * Check for manually annotated dead ends. @@ -594,27 +595,28 @@ static int add_dead_ends(struct objtool_file *file) goto reachable; for_each_reloc(rsec, reloc) { - - if (reloc->sym->type != STT_SECTION) { + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { WARN("unexpected relocation symbol type in %s", rsec->name); return -1; } - addend = reloc_addend(reloc); - - insn = find_insn(file, reloc->sym->sec, addend); + insn = find_insn(file, reloc->sym->sec, offset); if (insn) insn = prev_insn_same_sec(file, insn); - else if (addend == reloc->sym->sec->sh.sh_size) { + else if (offset == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } } else { WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } @@ -633,27 +635,28 @@ static int add_dead_ends(struct objtool_file *file) return 0; for_each_reloc(rsec, reloc) { - - if (reloc->sym->type != STT_SECTION) { + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { WARN("unexpected relocation symbol type in %s", rsec->name); return -1; } - addend = reloc_addend(reloc); - - insn = find_insn(file, reloc->sym->sec, addend); + insn = find_insn(file, reloc->sym->sec, offset); if (insn) insn = prev_insn_same_sec(file, insn); - else if (addend == reloc->sym->sec->sh.sh_size) { + else if (offset == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { WARN("can't find reachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } } else { WARN("can't find reachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } @@ -2208,6 +2211,7 @@ static int read_unwind_hints(struct objtool_file *file) struct unwind_hint *hint; struct instruction *insn; struct reloc *reloc; + unsigned long offset; int i; sec = find_section_by_name(file->elf, ".discard.unwind_hints"); @@ -2235,7 +2239,16 @@ static int read_unwind_hints(struct objtool_file *file) return -1; } - insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { + WARN("unexpected relocation symbol type in %s", sec->rsec->name); + return -1; + } + + insn = find_insn(file, reloc->sym->sec, offset); if (!insn) { WARN("can't find insn for unwind_hints[%d]", i); return -1; @@ -2506,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file) struct symbol *func; for_each_sym(file, func) { + if (func->type == STT_NOTYPE && strstarts(func->name, ".L")) + func->local_label = true; + if (func->bind != STB_GLOBAL) continue; @@ -2959,10 +2975,27 @@ static int update_cfi_state(struct instruction *insn, break; } - if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && + insn->sym->frame_pointer) { + /* addi.d fp,sp,imm on LoongArch */ + if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { + cfa->base = CFI_BP; + cfa->offset = 0; + } + break; + } - /* lea disp(%rbp), %rsp */ - cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + /* addi.d sp,fp,imm on LoongArch */ + if (cfa->base == CFI_BP && cfa->offset == 0) { + if (insn->sym->frame_pointer) { + cfa->base = CFI_SP; + cfa->offset = -op->src.offset; + } + } else { + /* lea disp(%rbp), %rsp */ + cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + } break; } diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c new file mode 100644 index 0000000000000000000000000000000000000000..a4c342bf697da0615294d212cd4bc08386a66a03 --- /dev/null +++ b/tools/objtool/dcheck.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Find the destination instructions for all jumps. + */ +static void add_jump_destinations(struct objtool_file *file) +{ + struct instruction *insn; + struct reloc *reloc; + struct section *dest_sec; + unsigned long dest_off; + + for_each_insn(file, insn) { + if (insn->type != INSN_CALL && + insn->type != INSN_JUMP_CONDITIONAL && + insn->type != INSN_JUMP_UNCONDITIONAL) { + continue; + } + + reloc = insn_reloc(file, insn); + if (!reloc) { + dest_sec = insn->sec; + dest_off = arch_jump_destination(insn); + } else if (reloc->sym->type == STT_SECTION) { + dest_sec = reloc->sym->sec; + dest_off = arch_dest_reloc_offset(reloc_addend(reloc)); + } else if (reloc->sym->sec->idx) { + dest_sec = reloc->sym->sec; + dest_off = reloc->sym->sym.st_value + + arch_dest_reloc_offset(reloc_addend(reloc)); + } else { + /* non-func asm code jumping to another file */ + continue; + } + + insn->jump_dest = find_insn(file, dest_sec, dest_off); + } +} + +static bool update_cfi_state(struct cfi_state *cfi, struct stack_op *op) +{ + struct cfi_reg *cfa = &cfi->cfa; + struct cfi_reg *fp_reg = &cfi->regs[CFI_FP]; + struct cfi_reg *fp_val = &cfi->vals[CFI_FP]; + struct cfi_reg *ra_val = &cfi->vals[CFI_RA]; + enum op_src_type src_type = op->src.type; + enum op_dest_type dest_type = op->dest.type; + unsigned char dest_reg = op->dest.reg; + int offset; + + if (src_type == OP_SRC_ADD && dest_type == OP_DEST_REG) { + + if (op->src.reg == CFI_SP) { + if (op->dest.reg == CFI_SP) { + cfa->offset -= op->src.offset; + } else { + if (fp_reg->offset) { + /* FP is already set. */ + return false; + } + fp_reg->offset = -cfa->offset + op->src.offset; + if (fp_reg->offset != fp_val->offset) { + /* + * FP does not match the location + * where FP is stored on stack. + */ + return false; + } + } + } else { + if (op->dest.reg == CFI_SP) { + cfa->offset = + -(fp_reg->offset + op->src.offset); + } else { + /* Setting the FP from itself is unreliable. */ + return false; + } + } + /* + * When the stack pointer is restored in the frame pointer + * epilog, forget where the FP and RA were stored. + */ + if (cfa->offset < -fp_val->offset) + fp_val->offset = 0; + if (cfa->offset < -ra_val->offset) + ra_val->offset = 0; + goto out; + } + + if (src_type == OP_SRC_REG_INDIRECT && dest_type == OP_DEST_REG) { + offset = -cfa->offset + op->src.offset; + if (dest_reg == CFI_FP) { + if (!fp_val->offset || fp_val->offset != offset) { + /* + * Loading the FP from a different place than + * where it is stored. + */ + return false; + } + if (!ra_val->offset || + (ra_val->offset - fp_val->offset) != 8) { + /* FP and RA must be adjacent in a frame. */ + return false; + } + fp_reg->offset = 0; + } + goto out; + } + + if (src_type == OP_SRC_REG && dest_type == OP_DEST_REG_INDIRECT) { + offset = -cfa->offset + op->dest.offset; + if (dest_reg == CFI_FP) { + /* Record where the FP is stored on the stack. */ + fp_val->offset = offset; + } else { + /* Record where the RA is stored on the stack. */ + if (fp_val->offset && (offset - fp_val->offset) == 8) + ra_val->offset = offset; + } + goto out; + } + return false; +out: + if (cfa->offset < 0 || fp_reg->offset > 0 || + fp_val->offset > 0 || ra_val->offset > 0) { + /* Unexpected SP and FP offset values. */ + return false; + } + return true; +} + +static bool do_stack_ops(struct instruction *insn, struct insn_state *state) +{ + struct stack_op *op; + + for (op = insn->stack_ops; op; op = op->next) { + if (!update_cfi_state(&state->cfi, op)) + return false; + } + return true; +} + +static bool validate_branch(struct objtool_file *file, struct section *sec, + struct symbol *func, struct instruction *insn, + struct insn_state *state) +{ + struct symbol *insn_func = insn->sym; + struct instruction *dest; + struct cfi_state save_cfi; + struct cfi_reg *cfa; + struct cfi_reg *regs; + unsigned long start, end; + + for (; insn; insn = next_insn_same_sec(file, insn)) { + + if (insn->sym != insn_func) + return true; + + if (insn->cfi) + return insn_cfi_match(insn, &state->cfi, false); + + insn->cfi = cfi_hash_find_or_add(&state->cfi); + dest = insn->jump_dest; + + if (!do_stack_ops(insn, state)) + return false; + + switch (insn->type) { + case INSN_BUG: + return true; + + case INSN_UNRELIABLE: + return false; + + case INSN_RETURN: + cfa = &state->cfi.cfa; + regs = state->cfi.regs; + if (cfa->offset || regs[CFI_FP].offset) { + /* SP and FP offsets should be 0 on return. */ + return false; + } + return true; + + case INSN_CALL: + case INSN_CALL_DYNAMIC: + start = func->offset; + end = start + func->len; + /* Treat intra-function calls as jumps. */ + if (!dest || dest->sec != sec || + dest->offset <= start || dest->offset >= end) { + break; + } + + case INSN_JUMP_UNCONDITIONAL: + case INSN_JUMP_CONDITIONAL: + case INSN_JUMP_DYNAMIC: + if (dest) { + save_cfi = state->cfi; + if (!validate_branch(file, sec, func, dest, + state)) { + return false; + } + state->cfi = save_cfi; + } + if (insn->type == INSN_JUMP_UNCONDITIONAL || + insn->type == INSN_JUMP_DYNAMIC) { + return true; + } + break; + + default: + break; + } + } + return true; +} + +static bool walk_reachable(struct objtool_file *file, struct section *sec, + struct symbol *func) +{ + struct instruction *insn = find_insn(file, sec, func->offset); + struct insn_state state; + + func_for_each_insn(file, func, insn) { + + if (insn->offset != func->offset && + (insn->type != INSN_START || insn->cfi)) { + continue; + } + + init_insn_state(file, &state, sec); + set_func_state(&state.cfi); + + if (!validate_branch(file, sec, func, insn, &state)) + return false; + } + return true; +} + +static void remove_cfi(struct objtool_file *file, struct symbol *func) +{ + struct instruction *insn; + + func_for_each_insn(file, func, insn) { + insn->cfi = NULL; + } +} + +/* + * Instructions that were not visited by walk_reachable() would not have a + * CFI. Try to initialize their CFI. For instance, there could be a table of + * unconditional branches like for a switch statement. Or, code can be patched + * by the kernel at runtime. After patching, some of the previously unreachable + * code may become reachable. + * + * This follows the same pattern as the DWARF info generated by the compiler. + */ +static bool walk_unreachable(struct objtool_file *file, struct section *sec, + struct symbol *func) +{ + struct instruction *insn, *prev; + struct insn_state state; + + func_for_each_insn(file, func, insn) { + + if (insn->cfi) + continue; + + prev = prev_insn_same_sec(file, insn); + if (!prev || prev->sym != insn->sym || !prev->cfi) + continue; + + if (prev->type != INSN_JUMP_UNCONDITIONAL && + prev->type != INSN_JUMP_DYNAMIC && + prev->type != INSN_BUG) { + continue; + } + + /* Propagate the CFI. */ + state.cfi = *prev->cfi; + if (!validate_branch(file, sec, func, insn, &state)) + return false; + } + return true; +} + +static void walk_section(struct objtool_file *file, struct section *sec) +{ + struct symbol *func; + + list_for_each_entry(func, &sec->symbol_list, list) { + + if (func->type != STT_FUNC || !func->len || + func->pfunc != func || func->alias != func) { + /* No CFI generated for this function. */ + continue; + } + + if (!walk_reachable(file, sec, func) || + !walk_unreachable(file, sec, func)) { + remove_cfi(file, func); + continue; + } + } +} + +static void walk_sections(struct objtool_file *file) +{ + struct section *sec; + + for_each_sec(file, sec) { + if (sec->sh.sh_flags & SHF_EXECINSTR) + walk_section(file, sec); + } +} + +int check(struct objtool_file *file) +{ + int ret; + + if (!opts.stackval) + return 1; + + arch_initial_func_cfi_state(&initial_func_cfi); + + if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) + return -1; + + ret = decode_instructions(file); + if (ret) + return ret; + + add_jump_destinations(file); + + walk_sections(file); + + ret = read_unwind_hints(file); + if (ret) + return ret; + + if (opts.orc) + ret = orc_create(file); + + return ret; +} diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 0b303eba660e4625757682544a3b4a555e288511..aad28c7a6de91d2f888af46d341d6a1a39b608d8 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -29,6 +29,8 @@ enum insn_type { INSN_TRAP, INSN_ENDBR, INSN_OTHER, + INSN_START, + INSN_UNRELIABLE, }; enum op_dest_type { diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h index daa46f1f0965ad7f390bef7c6dc2e1e9eb2fd2cd..f389816e3fa265da1be8ff16b36ebe47bc06c253 100644 --- a/tools/objtool/include/objtool/check.h +++ b/tools/objtool/include/objtool/check.h @@ -6,6 +6,9 @@ #ifndef _CHECK_H #define _CHECK_H +#ifdef __aarch64__ +#include +#else #include #include #include @@ -121,4 +124,10 @@ struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruc insn && insn->sec == _sec; \ insn = next_insn_same_sec(file, insn)) +static inline bool insn_can_reloc(struct instruction *insn) +{ + return true; +} + +#endif /* endof !__aarch64__ */ #endif /* _CHECK_H */ diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index 9f71e988eca45fd6bc81734b9a52d76b895e9517..d7e815c2fd1567bdd86fdbb78e26b5ab68238398 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -67,6 +67,8 @@ struct symbol { u8 profiling_func : 1; u8 warned : 1; u8 embedded_insn : 1; + u8 local_label : 1; + u8 frame_pointer : 1; struct list_head pv_target; struct reloc *relocs; }; diff --git a/tools/objtool/include/objtool/endianness.h b/tools/objtool/include/objtool/endianness.h index 4d2aa9b0fe2fd5351691395d11b28270f7efc5df..8ea0818bd9a06f509f2c912ed395f641f480da17 100644 --- a/tools/objtool/include/objtool/endianness.h +++ b/tools/objtool/include/objtool/endianness.h @@ -29,6 +29,8 @@ static inline bool need_bswap(struct elf *elf) __ret = __need_bswap ? bswap_32(val) : (val); break; \ case 2: \ __ret = __need_bswap ? bswap_16(val) : (val); break; \ + case 1: \ + __ret = (val); break; \ default: \ BUILD_BUG(); break; \ } \ diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..36922da5ccc700d54c6d3c5af86e64ccb87bb4fb --- /dev/null +++ b/tools/objtool/include/objtool/insn.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#ifndef _INSN_H +#define _INSN_H + +/* This is an arm64 specific version for check.h */ +#ifdef __aarch64__ +#include +#include + +struct insn_state { + struct cfi_state cfi; + unsigned int uaccess_stack; + bool uaccess; + bool df; + bool noinstr; + s8 instr; +}; + +#define INSN_CHUNK_BITS 8 +#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS) +#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1) + +struct instruction { + struct hlist_node hash; + struct list_head call_node; + struct section *sec; + unsigned long offset; + unsigned long immediate; + + u8 len; + u8 prev_len; + u8 type; + s8 instr; + + u32 idx : INSN_CHUNK_BITS, + dead_end : 1, + ignore : 1, + ignore_alts : 1, + hint : 1, + save : 1, + restore : 1, + retpoline_safe : 1, + noendbr : 1, + unret : 1, + visited : 4, + no_reloc : 1; + /* 10 bit hole */ + + struct alt_group *alt_group; + struct instruction *jump_dest; + struct instruction *first_jump_src; + union { + struct symbol *_call_dest; + struct reloc *_jump_table; + }; + struct alternative *alts; + struct symbol *sym; + struct stack_op *stack_ops; + struct cfi_state *cfi; +}; + +struct alt_group { + /* + * Pointer from a replacement group to the original group. NULL if it + * *is* the original group. + */ + struct alt_group *orig_group; + + /* First and last instructions in the group */ + struct instruction *first_insn, *last_insn, *nop; + + /* + * Byte-offset-addressed len-sized array of pointers to CFI structs. + * This is shared with the other alt_groups in the same alternative. + */ + struct cfi_state **cfi; +}; + +static inline struct symbol *insn_func(struct instruction *insn) +{ + struct symbol *sym = insn->sym; + + if (sym && sym->type != STT_FUNC) + sym = NULL; + + return sym; +} + +static inline bool is_static_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_CONDITIONAL || + insn->type == INSN_JUMP_UNCONDITIONAL; +} + +static inline bool is_dynamic_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_DYNAMIC || + insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL; +} + +static inline bool is_jump(struct instruction *insn) +{ + return is_static_jump(insn) || is_dynamic_jump(insn); +} + +void init_insn_state(struct objtool_file *file, struct insn_state *state, + struct section *sec); +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset); +struct instruction *find_last_insn(struct objtool_file *file, + struct section *sec); +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn); +struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn); +struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn); +struct instruction *prev_insn_same_sym(struct objtool_file *file, + struct instruction *insn); + +struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn); +bool insn_can_reloc(struct instruction *insn); +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, + bool print); +bool is_first_func_insn(struct objtool_file *file, + struct instruction *insn, struct symbol *sym); + +int read_unwind_hints(struct objtool_file *file); +int decode_instructions(struct objtool_file *file); + +#define sec_for_each_insn(file, _sec, insn) \ + for (insn = find_insn(file, _sec, 0); \ + insn && insn->sec == _sec; \ + insn = next_insn_same_sec(file, insn)) + +#define for_each_insn(file, insn) \ + for (struct section *__sec, *__fake = (struct section *)1; \ + __fake; __fake = NULL) \ + for_each_sec(file, __sec) \ + sec_for_each_insn(file, __sec, insn) + +#define func_for_each_insn(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + +#define sym_for_each_insn(file, sym, insn) \ + for (insn = find_insn(file, sym->sec, sym->offset); \ + insn && insn->offset < sym->offset + sym->len; \ + insn = next_insn_same_sec(file, insn)) + +#define sym_for_each_insn_continue_reverse(file, sym, insn) \ + for (insn = prev_insn_same_sec(file, insn); \ + insn && insn->offset >= sym->offset; \ + insn = prev_insn_same_sec(file, insn)) + +#define sec_for_each_insn_from(file, insn) \ + for (; insn; insn = next_insn_same_sec(file, insn)) + +#define sec_for_each_insn_continue(file, insn) \ + for (insn = next_insn_same_sec(file, insn); insn; \ + insn = next_insn_same_sec(file, insn)) + +extern unsigned long nr_insns; +#endif /* __aarch64__ */ +#endif /* _INSN_H */ diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h index 94a33ee7b36306f85530efe149277c0c8ddf9061..059abeff5066bb0eff08dfe87dba0d7b6cc99cfd 100644 --- a/tools/objtool/include/objtool/objtool.h +++ b/tools/objtool/include/objtool/objtool.h @@ -12,6 +12,8 @@ #include +#include + #define __weak __attribute__((weak)) struct pv_state { @@ -46,5 +48,7 @@ void objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func); int check(struct objtool_file *file); int orc_dump(const char *objname); int orc_create(struct objtool_file *file); +bool orc_ignore_section(struct section *sec); +void arch_init_orc_entry(struct orc_entry *entry); #endif /* _OBJTOOL_H */ diff --git a/tools/objtool/include/objtool/orc.h b/tools/objtool/include/objtool/orc.h new file mode 100644 index 0000000000000000000000000000000000000000..88dc98a2b8a400ab6597ad891c3ed98cdbb9ca2a --- /dev/null +++ b/tools/objtool/include/objtool/orc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ORC_H +#define _OBJTOOL_ORC_H + +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn); +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i); +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o); + +#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 0e183bb1c72051157cb1e6daf30fa12ff3f0ce7c..a62247efb64f2e06c293e129ade3abae4fd456c1 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -6,65 +6,10 @@ #include #include #include +#include #include #include -static const char *reg_name(unsigned int reg) -{ - switch (reg) { - case ORC_REG_PREV_SP: - return "prevsp"; - case ORC_REG_DX: - return "dx"; - case ORC_REG_DI: - return "di"; - case ORC_REG_BP: - return "bp"; - case ORC_REG_SP: - return "sp"; - case ORC_REG_R10: - return "r10"; - case ORC_REG_R13: - return "r13"; - case ORC_REG_BP_INDIRECT: - return "bp(ind)"; - case ORC_REG_SP_INDIRECT: - return "sp(ind)"; - default: - return "?"; - } -} - -static const char *orc_type_name(unsigned int type) -{ - switch (type) { - case ORC_TYPE_UNDEFINED: - return "(und)"; - case ORC_TYPE_END_OF_STACK: - return "end"; - case ORC_TYPE_CALL: - return "call"; - case ORC_TYPE_REGS: - return "regs"; - case ORC_TYPE_REGS_PARTIAL: - return "regs (partial)"; - default: - return "?"; - } -} - -static void print_reg(unsigned int reg, int offset) -{ - if (reg == ORC_REG_BP_INDIRECT) - printf("(bp%+d)", offset); - else if (reg == ORC_REG_SP_INDIRECT) - printf("(sp)%+d", offset); - else if (reg == ORC_REG_UNDEFINED) - printf("(und)"); - else - printf("%s%+d", reg_name(reg), offset); -} - int orc_dump(const char *_objname) { int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; @@ -205,17 +150,7 @@ int orc_dump(const char *_objname) printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i])); } - printf("type:%s", orc_type_name(orc[i].type)); - - printf(" sp:"); - - print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); - - printf(" bp:"); - - print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); - - printf(" signal:%d\n", orc[i].signal); + orc_print_dump(&dummy_elf, orc, i); } elf_end(elf); diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index bae343908867105f25ac35c61083e4d0ecfeca73..6674f816d0256119f7a871eaed3f50d4bbca8e38 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -10,119 +10,17 @@ #include #include +#include #include #include -static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, - struct instruction *insn) +bool __weak orc_ignore_section(struct section *sec) { - struct cfi_reg *bp = &cfi->regs[CFI_BP]; - - memset(orc, 0, sizeof(*orc)); - - if (!cfi) { - /* - * This is usually either unreachable nops/traps (which don't - * trigger unreachable instruction warnings), or - * STACK_FRAME_NON_STANDARD functions. - */ - orc->type = ORC_TYPE_UNDEFINED; - return 0; - } - - switch (cfi->type) { - case UNWIND_HINT_TYPE_UNDEFINED: - orc->type = ORC_TYPE_UNDEFINED; - return 0; - case UNWIND_HINT_TYPE_END_OF_STACK: - orc->type = ORC_TYPE_END_OF_STACK; - return 0; - case UNWIND_HINT_TYPE_CALL: - orc->type = ORC_TYPE_CALL; - break; - case UNWIND_HINT_TYPE_REGS: - orc->type = ORC_TYPE_REGS; - break; - case UNWIND_HINT_TYPE_REGS_PARTIAL: - orc->type = ORC_TYPE_REGS_PARTIAL; - break; - default: - WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); - return -1; - } - - orc->signal = cfi->signal; - - switch (cfi->cfa.base) { - case CFI_SP: - orc->sp_reg = ORC_REG_SP; - break; - case CFI_SP_INDIRECT: - orc->sp_reg = ORC_REG_SP_INDIRECT; - break; - case CFI_BP: - orc->sp_reg = ORC_REG_BP; - break; - case CFI_BP_INDIRECT: - orc->sp_reg = ORC_REG_BP_INDIRECT; - break; - case CFI_R10: - orc->sp_reg = ORC_REG_R10; - break; - case CFI_R13: - orc->sp_reg = ORC_REG_R13; - break; - case CFI_DI: - orc->sp_reg = ORC_REG_DI; - break; - case CFI_DX: - orc->sp_reg = ORC_REG_DX; - break; - default: - WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); - return -1; - } - - switch (bp->base) { - case CFI_UNDEFINED: - orc->bp_reg = ORC_REG_UNDEFINED; - break; - case CFI_CFA: - orc->bp_reg = ORC_REG_PREV_SP; - break; - case CFI_BP: - orc->bp_reg = ORC_REG_BP; - break; - default: - WARN_INSN(insn, "unknown BP base reg %d", bp->base); - return -1; - } - - orc->sp_offset = cfi->cfa.offset; - orc->bp_offset = bp->offset; - - return 0; + return false; } -static int write_orc_entry(struct elf *elf, struct section *orc_sec, - struct section *ip_sec, unsigned int idx, - struct section *insn_sec, unsigned long insn_off, - struct orc_entry *o) +void __weak arch_init_orc_entry(struct orc_entry *entry) { - struct orc_entry *orc; - - /* populate ORC data */ - orc = (struct orc_entry *)orc_sec->data->d_buf + idx; - memcpy(orc, o, sizeof(*orc)); - orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); - orc->bp_offset = bswap_if_needed(elf, orc->bp_offset); - - /* populate reloc for ip */ - if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, - insn_sec, insn_off)) - return -1; - - return 0; } struct orc_list_entry { @@ -166,6 +64,9 @@ int orc_create(struct objtool_file *file) struct orc_entry null = { .type = ORC_TYPE_UNDEFINED }; + /* Override orc_entry initialization for arch specific definition*/ + arch_init_orc_entry(&null); + /* Build a deduplicated list of ORC entries: */ INIT_LIST_HEAD(&orc_list); for_each_sec(file, sec) { @@ -173,13 +74,16 @@ int orc_create(struct objtool_file *file) struct instruction *insn; bool empty = true; - if (!sec->text) + if (!sec->text || orc_ignore_section(sec)) continue; sec_for_each_insn(file, sec, insn) { struct alt_group *alt_group = insn->alt_group; int i; + if (!insn_can_reloc(insn)) + continue; + if (!alt_group) { if (init_orc_entry(&orc, insn->cfi, insn)) return -1; @@ -223,7 +127,7 @@ int orc_create(struct objtool_file *file) } /* Add a section terminator */ - if (!empty) { + if (!empty && sec->sym) { orc_list_add(&orc_list, &null, sec, sec->sh.sh_size); nr++; } diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 81d120d0544255a0806013f3f32fcd4c98f8b297..53a12a0f9d8c44091e06dcd5b2a1e09cf9e4cbd9 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -27,6 +27,13 @@ arch/x86/lib/insn.c ' fi +if [ "$SRCARCH" = "arm64" ]; then +FILES="$FILES +arch/arm64/include/asm/unwind_hints.h +arch/arm64/include/asm/orc_types.h +" +fi + check_2 () { file1=$1 file2=$2 diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 856f0dfb8e5a30f228460911bd0f297aaa291ef6..192ab0415ee9e855ff61c74f88d15632ee3364b2 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -21,9 +21,9 @@ you to track down the cacheline contentions. On Intel, the tool is based on load latency and precise store facility events provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling -with thresholding feature. On AMD, the tool uses IBS op pmu (due to hardware -limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses SPE to -sample load and store operations, therefore hardware and kernel support is +with thresholding feature. On AMD and Hygon, the tool uses IBS op pmu (due to +hardware limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses +SPE to sample load and store operations, therefore hardware and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide. Due to the statistical nature of Arm SPE sampling, not every memory operation will be sampled. @@ -152,7 +152,7 @@ default on Intel: cpu/mem-loads,ldlat=30/P cpu/mem-stores/P -following on AMD: +following on AMD and Hygon: ibs_op// diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index d5217be012d79492efdf34868b2db49e3d955294..1889f66addf2aa936bafea132aed55ab0908ff8d 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -374,6 +374,9 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0- In per-thread mode with inheritance mode on (default), samples are captured only when the thread executes on the designated CPUs. Default is to monitor all CPUs. +User space tasks can migrate between CPUs, so when tracing selected CPUs, +a dummy event is created to track sideband for all CPUs. + -B:: --no-buildid:: Do not save the build ids of binaries in the perf.data files. This skips diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index d66b52407e19c6f4dde9fded0da2d8dcf0a95d45..a48258330cc0e5f6c39401540e13280b05162d6e 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -94,6 +94,12 @@ ifeq ($(SRCARCH),csky) NO_PERF_REGS := 0 endif +ifeq ($(SRCARCH),sw_64) + NO_PERF_REGS := 0 + CFLAGS += -mieee + LIBUNWIND_LIBS = -lunwind -lunwind-sw_64 +endif + ifeq ($(ARCH),s390) NO_PERF_REGS := 0 CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated @@ -640,6 +646,13 @@ ifndef NO_LIBUNWIND CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64 endif endif + ifeq ($(feature-libunwind-sw_64), 1) + $(call detected,CONFIG_LIBUNWIND_SW64) + CFLAGS += -DHAVE_LIBUNWIND_SW_64_SUPPORT + LDFLAGS += -lunwind-sw_64 + EXTLIBS_LIBUNWIND += -lunwind-sw_64 + have_libunwind = 1 + endif ifneq ($(feature-libunwind), 1) msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile index c392e7af474332e2c6ebd5a2e108207fc08e4786..c8be64c5cdb445514a8be026efec93da3b41b295 100644 --- a/tools/perf/arch/loongarch/Makefile +++ b/tools/perf/arch/loongarch/Makefile @@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1 endif PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 PERF_HAVE_JITDUMP := 1 +HAVE_KVM_STAT_SUPPORT := 1 # # Syscall table generation for perf diff --git a/tools/perf/arch/loongarch/util/Build b/tools/perf/arch/loongarch/util/Build index d776125a2d06832b5841c798bf39460357737ce9..b12d374d70964929ef39c211dfd5edd9cd51f01e 100644 --- a/tools/perf/arch/loongarch/util/Build +++ b/tools/perf/arch/loongarch/util/Build @@ -1,5 +1,7 @@ +perf-y += header.o perf-y += perf_regs.o perf-$(CONFIG_DWARF) += dwarf-regs.o perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o diff --git a/tools/perf/arch/loongarch/util/header.c b/tools/perf/arch/loongarch/util/header.c new file mode 100644 index 0000000000000000000000000000000000000000..d962dff55512b39c9d6adfb4f5ad76f9857df404 --- /dev/null +++ b/tools/perf/arch/loongarch/util/header.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of get_cpuid(). + * + * Author: Nikita Shubin + * Bibo Mao + * Huacai Chen + */ + +#include +#include +#include +#include +#include "util/debug.h" +#include "util/header.h" + +/* + * Output example from /proc/cpuinfo + * CPU Family : Loongson-64bit + * Model Name : Loongson-3C5000 + * CPU Revision : 0x10 + * FPU Revision : 0x01 + */ +#define CPUINFO_MODEL "Model Name" +#define CPUINFO "/proc/cpuinfo" + +static char *_get_field(const char *line) +{ + char *line2, *nl; + + line2 = strrchr(line, ' '); + if (!line2) + return NULL; + + line2++; + nl = strrchr(line, '\n'); + if (!nl) + return NULL; + + return strndup(line2, nl - line2); +} + +static char *_get_cpuid(void) +{ + unsigned long line_sz; + char *line, *model, *cpuid; + FILE *file; + + file = fopen(CPUINFO, "r"); + if (file == NULL) + return NULL; + + line = model = cpuid = NULL; + while (getline(&line, &line_sz, file) != -1) { + if (strncmp(line, CPUINFO_MODEL, strlen(CPUINFO_MODEL))) + continue; + + model = _get_field(line); + if (!model) + goto out_free; + break; + } + + if (model && (asprintf(&cpuid, "%s", model) < 0)) + cpuid = NULL; + +out_free: + fclose(file); + free(model); + return cpuid; +} + +int get_cpuid(char *buffer, size_t sz) +{ + int ret = 0; + char *cpuid = _get_cpuid(); + + if (!cpuid) + return EINVAL; + + if (sz < strlen(cpuid)) { + ret = ENOBUFS; + goto out_free; + } + + scnprintf(buffer, sz, "%s", cpuid); + +out_free: + free(cpuid); + return ret; +} + +char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + return _get_cpuid(); +} diff --git a/tools/perf/arch/loongarch/util/kvm-stat.c b/tools/perf/arch/loongarch/util/kvm-stat.c new file mode 100644 index 0000000000000000000000000000000000000000..a7859a3a9a51b37e9581c3ae3a8a9b32a77bb8e8 --- /dev/null +++ b/tools/perf/arch/loongarch/util/kvm-stat.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include "util/kvm-stat.h" +#include "util/parse-events.h" +#include "util/debug.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/pmus.h" + +#define LOONGARCH_EXCEPTION_INT 0 +#define LOONGARCH_EXCEPTION_PIL 1 +#define LOONGARCH_EXCEPTION_PIS 2 +#define LOONGARCH_EXCEPTION_PIF 3 +#define LOONGARCH_EXCEPTION_PME 4 +#define LOONGARCH_EXCEPTION_FPD 15 +#define LOONGARCH_EXCEPTION_SXD 16 +#define LOONGARCH_EXCEPTION_ASXD 17 +#define LOONGARCH_EXCEPTION_GSPR 22 +#define LOONGARCH_EXCEPTION_CPUCFG 100 +#define LOONGARCH_EXCEPTION_CSR 101 +#define LOONGARCH_EXCEPTION_IOCSR 102 +#define LOONGARCH_EXCEPTION_IDLE 103 +#define LOONGARCH_EXCEPTION_OTHERS 104 +#define LOONGARCH_EXCEPTION_HVC 23 + +#define loongarch_exception_type \ + {LOONGARCH_EXCEPTION_INT, "Interrupt" }, \ + {LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \ + {LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \ + {LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \ + {LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \ + {LOONGARCH_EXCEPTION_FPD, "FPU" }, \ + {LOONGARCH_EXCEPTION_SXD, "LSX" }, \ + {LOONGARCH_EXCEPTION_ASXD, "LASX" }, \ + {LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \ + {LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \ + {LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \ + {LOONGARCH_EXCEPTION_CSR, "CSR" }, \ + {LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \ + {LOONGARCH_EXCEPTION_IDLE, "Idle" }, \ + {LOONGARCH_EXCEPTION_OTHERS, "Others" } + +define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type); + +const char *vcpu_id_str = "vcpu_id"; +const char *kvm_exit_reason = "reason"; +const char *kvm_entry_trace = "kvm:kvm_enter"; +const char *kvm_reenter_trace = "kvm:kvm_reenter"; +const char *kvm_exit_trace = "kvm:kvm_exit"; +const char *kvm_events_tp[] = { + "kvm:kvm_enter", + "kvm:kvm_reenter", + "kvm:kvm_exit", + "kvm:kvm_exit_gspr", + NULL, +}; + +static bool event_begin(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + return exit_event_begin(evsel, sample, key); +} + +static bool event_end(struct evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + /* + * LoongArch kvm is different with other architectures + * + * There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with + * kvm:kvm_exit event. + * kvm:kvm_enter means returning to vmm and then to guest + * kvm:kvm_reenter means returning to guest immediately + */ + return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace); +} + +static void event_gspr_get_key(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + unsigned int insn; + + key->key = LOONGARCH_EXCEPTION_OTHERS; + insn = evsel__intval(evsel, sample, "inst_word"); + + switch (insn >> 24) { + case 0: + /* CPUCFG inst trap */ + if ((insn >> 10) == 0x1b) + key->key = LOONGARCH_EXCEPTION_CPUCFG; + break; + case 4: + /* CSR inst trap */ + key->key = LOONGARCH_EXCEPTION_CSR; + break; + case 6: + /* IOCSR inst trap */ + if ((insn >> 15) == 0xc90) + key->key = LOONGARCH_EXCEPTION_IOCSR; + else if ((insn >> 15) == 0xc91) + /* Idle inst trap */ + key->key = LOONGARCH_EXCEPTION_IDLE; + break; + default: + key->key = LOONGARCH_EXCEPTION_OTHERS; + break; + } +} + +static struct child_event_ops child_events[] = { + { .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key }, + { NULL, NULL }, +}; + +static struct kvm_events_ops exit_events = { + .is_begin_event = event_begin, + .is_end_event = event_end, + .child_ops = child_events, + .decode_key = exit_event_decode_key, + .name = "VM-EXIT" +}; + +struct kvm_reg_events_ops kvm_reg_events_ops[] = { + { .name = "vmexit", .ops = &exit_events, }, + { NULL, NULL }, +}; + +const char * const kvm_skip_events[] = { + NULL, +}; + +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused) +{ + kvm->exit_reasons_isa = "loongarch64"; + kvm->exit_reasons = loongarch_exit_reasons; + return 0; +} diff --git a/tools/perf/arch/sw_64/Build b/tools/perf/arch/sw_64/Build new file mode 100644 index 0000000000000000000000000000000000000000..36222e64bbf7d00c5701c4f6fed009da02782cca --- /dev/null +++ b/tools/perf/arch/sw_64/Build @@ -0,0 +1,2 @@ +perf-y += util/ +perf-$(CONFIG_DWARF_UNWIND) += tests/ diff --git a/tools/perf/arch/sw_64/Makefile b/tools/perf/arch/sw_64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1aa9dd772489a6d1c5ccc03f9bbe9539251ff744 --- /dev/null +++ b/tools/perf/arch/sw_64/Makefile @@ -0,0 +1,4 @@ +ifndef NO_DWARF +PERF_HAVE_DWARF_REGS := 1 +endif +PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 diff --git a/tools/perf/arch/sw_64/include/arch-tests.h b/tools/perf/arch/sw_64/include/arch-tests.h new file mode 100644 index 0000000000000000000000000000000000000000..90ec4c8cb8802d22e7780a826adcbc94b7cfd17a --- /dev/null +++ b/tools/perf/arch/sw_64/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/sw_64/include/perf_regs.h b/tools/perf/arch/sw_64/include/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..e0c1b15375b5c2c7e975df45adeb644d1eed2ccf --- /dev/null +++ b/tools/perf/arch/sw_64/include/perf_regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include +#include +#include + +void perf_regs_load(u64 *regs); + +#define PERF_REGS_MASK ((1ULL << PERF_REG_SW64_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_SW64_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 + +#define PERF_REG_IP PERF_REG_SW64_PC +#define PERF_REG_SP PERF_REG_SW64_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_SW64_R0: + return "r0"; + case PERF_REG_SW64_R1: + return "r1"; + case PERF_REG_SW64_R2: + return "r2"; + case PERF_REG_SW64_R3: + return "r3"; + case PERF_REG_SW64_R4: + return "r4"; + case PERF_REG_SW64_R5: + return "r5"; + case PERF_REG_SW64_R6: + return "r6"; + case PERF_REG_SW64_R7: + return "r7"; + case PERF_REG_SW64_R8: + return "r8"; + case PERF_REG_SW64_R9: + return "r9"; + case PERF_REG_SW64_R10: + return "r10"; + case PERF_REG_SW64_R11: + return "r11"; + case PERF_REG_SW64_R12: + return "r12"; + case PERF_REG_SW64_R13: + return "r13"; + case PERF_REG_SW64_R14: + return "r14"; + case PERF_REG_SW64_R15: + return "r15"; + case PERF_REG_SW64_R16: + return "r16"; + case PERF_REG_SW64_R17: + return "r17"; + case PERF_REG_SW64_R18: + return "r18"; + case PERF_REG_SW64_R19: + return "r19"; + case PERF_REG_SW64_R20: + return "r20"; + case PERF_REG_SW64_R21: + return "r21"; + case PERF_REG_SW64_R22: + return "r22"; + case PERF_REG_SW64_R23: + return "r23"; + case PERF_REG_SW64_R24: + return "r24"; + case PERF_REG_SW64_R25: + return "r25"; + case PERF_REG_SW64_R26: + return "r26"; + case PERF_REG_SW64_R27: + return "r27"; + case PERF_REG_SW64_R28: + return "r28"; + case PERF_REG_SW64_GP: + return "gp"; + case PERF_REG_SW64_SP: + return "sp"; + case PERF_REG_SW64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/sw_64/tests/Build b/tools/perf/arch/sw_64/tests/Build new file mode 100644 index 0000000000000000000000000000000000000000..b8a38eadfb3562723278e2e6855e56109e77213e --- /dev/null +++ b/tools/perf/arch/sw_64/tests/Build @@ -0,0 +1,3 @@ +perf-y += regs_load.o +perf-y += dwarf-unwind.o +perf-y += arch-tests.o diff --git a/tools/perf/arch/sw_64/tests/arch-tests.c b/tools/perf/arch/sw_64/tests/arch-tests.c new file mode 100644 index 0000000000000000000000000000000000000000..5b1543c980223d2a969976e0259becd6cd583bd9 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; diff --git a/tools/perf/arch/sw_64/tests/dwarf-unwind.c b/tools/perf/arch/sw_64/tests/dwarf-unwind.c new file mode 100644 index 0000000000000000000000000000000000000000..cd7047b7a54683c8577b2086ccff6b44865fbd7e --- /dev/null +++ b/tools/perf/arch/sw_64/tests/dwarf-unwind.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "maps.h" +#include "event.h" +#include "debug.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_SW64_SP]; + + map = maps__find(thread->maps, (u64)sp); + if (!map) { + printf("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = calloc(1, sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/sw_64/tests/regs_load.S b/tools/perf/arch/sw_64/tests/regs_load.S new file mode 100644 index 0000000000000000000000000000000000000000..8c5aabc2c6fbde125064ecbcf85787bdd09cbcaf --- /dev/null +++ b/tools/perf/arch/sw_64/tests/regs_load.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +.text +.set noat +.type perf_regs_load,%function +#define STL_REG(r) stl $r, (8 * r)($16) +#define LDL_REG(r) ldl $r, (8 * r)($16) +#define SP (8 * 30) +#define PC (8 * 31) +SYM_FUNC_START(perf_regs_load) + STL_REG(0) + STL_REG(1) + STL_REG(2) + STL_REG(3) + STL_REG(4) + STL_REG(5) + STL_REG(6) + STL_REG(7) + STL_REG(8) + STL_REG(9) + STL_REG(10) + STL_REG(11) + STL_REG(12) + STL_REG(13) + STL_REG(14) + STL_REG(15) + STL_REG(16) + STL_REG(17) + STL_REG(18) + STL_REG(19) + STL_REG(20) + STL_REG(21) + STL_REG(22) + STL_REG(23) + STL_REG(24) + STL_REG(25) + STL_REG(26) + STL_REG(27) + STL_REG(28) + STL_REG(29) + mov $30, $17 + stl $17, (SP)($16) + stl $26, (PC)($16) + LDL_REG(17) + ret +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/sw_64/util/Build b/tools/perf/arch/sw_64/util/Build new file mode 100644 index 0000000000000000000000000000000000000000..39f459b636a0cf4ef76b07ac9d09a02fcb095a5a --- /dev/null +++ b/tools/perf/arch/sw_64/util/Build @@ -0,0 +1,4 @@ +perf-y += perf_regs.o +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o diff --git a/tools/perf/arch/sw_64/util/dwarf-regs.c b/tools/perf/arch/sw_64/util/dwarf-regs.c new file mode 100644 index 0000000000000000000000000000000000000000..11c1ee5444dab96427749688549a6529a89d83d8 --- /dev/null +++ b/tools/perf/arch/sw_64/util/dwarf-regs.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Will Deacon, ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include /* for struct user_pt_regs */ +#include +#include "util.h" + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; +}; + +#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%x##num), .dwarfnum = num} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} +#define DWARFNUM2OFFSET(index) \ + (index * sizeof((struct user_pt_regs *)0)->regs[0]) + +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + REG_DWARFNUM_NAME("%fp", 15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + REG_DWARFNUM_NAME("%gp", 29), + REG_DWARFNUM_NAME("%sp", 30), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} + +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return DWARFNUM2OFFSET(roff->dwarfnum); + return -EINVAL; +} diff --git a/tools/perf/arch/sw_64/util/perf_regs.c b/tools/perf/arch/sw_64/util/perf_regs.c new file mode 100644 index 0000000000000000000000000000000000000000..2833e101a7c6407263130e9948a06a2caa32bc4b --- /dev/null +++ b/tools/perf/arch/sw_64/util/perf_regs.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../util/perf_regs.h" + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; diff --git a/tools/perf/arch/sw_64/util/unwind-libdw.c b/tools/perf/arch/sw_64/util/unwind-libdw.c new file mode 100644 index 0000000000000000000000000000000000000000..3e2b6acc40ac3336273f662c1f790adbe8df9a27 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libdw.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" +#include "../../util/event.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[PERF_REG_SW64_MAX], dwarf_pc; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_SW64_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(R16); + dwarf_regs[17] = REG(R17); + dwarf_regs[18] = REG(R18); + dwarf_regs[19] = REG(R19); + dwarf_regs[20] = REG(R20); + dwarf_regs[21] = REG(R21); + dwarf_regs[22] = REG(R22); + dwarf_regs[23] = REG(R23); + dwarf_regs[24] = REG(R24); + dwarf_regs[25] = REG(R25); + dwarf_regs[26] = REG(R26); + dwarf_regs[27] = REG(R27); + dwarf_regs[28] = REG(R28); + dwarf_regs[29] = REG(R29); + dwarf_regs[30] = REG(R30); + dwarf_regs[31] = REG(R31); + + if (!dwfl_thread_state_registers(thread, 0, PERF_REG_SW64_MAX, + dwarf_regs)) + return false; + + dwarf_pc = REG(PC); + dwfl_thread_state_register_pc(thread, dwarf_pc); + + return true; +} diff --git a/tools/perf/arch/sw_64/util/unwind-libunwind.c b/tools/perf/arch/sw_64/util/unwind-libunwind.c new file mode 100644 index 0000000000000000000000000000000000000000..134e3c2280d297b3374ee4d7a6c471bc4d923b14 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libunwind.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#ifndef REMOTE_UNWIND_LIBUNWIND +#include +#include "perf_regs.h" +#include "../../util/unwind.h" +#include "../../util/debug.h" +#endif + +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + switch (regnum) { + case UNW_SW_64_R0: + return PERF_REG_SW64_R0; + case UNW_SW_64_R1: + return PERF_REG_SW64_R1; + case UNW_SW_64_R2: + return PERF_REG_SW64_R2; + case UNW_SW_64_R3: + return PERF_REG_SW64_R3; + case UNW_SW_64_R4: + return PERF_REG_SW64_R4; + case UNW_SW_64_R5: + return PERF_REG_SW64_R5; + case UNW_SW_64_R6: + return PERF_REG_SW64_R6; + case UNW_SW_64_R7: + return PERF_REG_SW64_R7; + case UNW_SW_64_R8: + return PERF_REG_SW64_R8; + case UNW_SW_64_R9: + return PERF_REG_SW64_R9; + case UNW_SW_64_R10: + return PERF_REG_SW64_R10; + case UNW_SW_64_R11: + return PERF_REG_SW64_R11; + case UNW_SW_64_R12: + return PERF_REG_SW64_R12; + case UNW_SW_64_R13: + return PERF_REG_SW64_R13; + case UNW_SW_64_R14: + return PERF_REG_SW64_R14; + case UNW_SW_64_R15: + return PERF_REG_SW64_R15; + case UNW_SW_64_R16: + return PERF_REG_SW64_R16; + case UNW_SW_64_R17: + return PERF_REG_SW64_R17; + case UNW_SW_64_R18: + return PERF_REG_SW64_R18; + case UNW_SW_64_R19: + return PERF_REG_SW64_R19; + case UNW_SW_64_R20: + return PERF_REG_SW64_R20; + case UNW_SW_64_R21: + return PERF_REG_SW64_R21; + case UNW_SW_64_R22: + return PERF_REG_SW64_R22; + case UNW_SW_64_R23: + return PERF_REG_SW64_R23; + case UNW_SW_64_R24: + return PERF_REG_SW64_R24; + case UNW_SW_64_R25: + return PERF_REG_SW64_R25; + case UNW_SW_64_R26: + return PERF_REG_SW64_R26; + case UNW_SW_64_R27: + return PERF_REG_SW64_R27; + case UNW_SW_64_R28: + return PERF_REG_SW64_R28; + case UNW_SW_64_R29: + return PERF_REG_SW64_GP; + case UNW_SW_64_R30: + return PERF_REG_SW64_SP; + case UNW_SW_64_PC: + return PERF_REG_SW64_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c index 3e537ffb1353aab2595496ef0848501c1fea42b3..f1de12d20b2ae6bf79216406935dd4735a0c855e 100644 --- a/tools/perf/arch/x86/util/env.c +++ b/tools/perf/arch/x86/util/env.c @@ -17,3 +17,18 @@ bool x86__is_amd_cpu(void) ret: return is_amd >= 1 ? true : false; } + +bool x86__is_hygon_cpu(void) +{ + struct perf_env env = { .total_mem = 0, }; + static int is_hygon; /* 0: Uninitialized, 1: Yes, -1: No */ + + if (is_hygon) + goto ret; + + perf_env__cpuid(&env); + is_hygon = env.cpuid && strstarts(env.cpuid, "HygonGenuine") ? 1 : -1; + perf_env__exit(&env); +ret: + return is_hygon >= 1 ? true : false; +} diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h index d78f080b6b3f889a3b5921d4ce5e3295cf340dcf..904d5e2283606a6ddc0d8be9a0538c74c7f63889 100644 --- a/tools/perf/arch/x86/util/env.h +++ b/tools/perf/arch/x86/util/env.h @@ -3,5 +3,6 @@ #define _X86_ENV_H bool x86__is_amd_cpu(void); +bool x86__is_hygon_cpu(void); #endif /* _X86_ENV_H */ diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c index 191b372f9a2d3630a849a32a52e49c10cd9fd353..f8d9aecbf2f28763b5361d48b03dba54022affa3 100644 --- a/tools/perf/arch/x86/util/mem-events.c +++ b/tools/perf/arch/x86/util/mem-events.c @@ -33,7 +33,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i) if (i >= PERF_MEM_EVENTS__MAX) return NULL; - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return &perf_mem_events_amd[i]; return &perf_mem_events_intel[i]; diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index f428cffb037818fb908f61c325aeef88a97471cc..0af2562364665d6405b9fcb4913190513b999bba 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -174,7 +174,7 @@ const char *pmu_find_alias_name(const char *name) int perf_pmus__num_mem_pmus(void) { /* AMD uses IBS OP pmu and not a core PMU for perf mem/c2c */ - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return 1; /* Intel uses core pmus for perf mem/c2c */ diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b94ae33a343c2a321f603e192082d9f2658d5d07..47406ca3ce4f21601c6e0e26cd000c83ac846541 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -906,10 +906,30 @@ static int record__config_off_cpu(struct record *rec) return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); } +static bool record__tracking_system_wide(struct record *rec) +{ + struct evlist *evlist = rec->evlist; + struct evsel *evsel; + + /* + * If non-dummy evsel exists, system_wide sideband is need to + * help parse sample information. + * For example, PERF_EVENT_MMAP event to help parse symbol, + * and PERF_EVENT_COMM event to help parse task executable name. + */ + evlist__for_each_entry(evlist, evsel) { + if (!evsel__is_dummy_event(evsel)) + return true; + } + + return false; +} + static int record__config_tracking_events(struct record *rec) { struct record_opts *opts = &rec->opts; struct evlist *evlist = rec->evlist; + bool system_wide = false; struct evsel *evsel; /* @@ -919,7 +939,15 @@ static int record__config_tracking_events(struct record *rec) */ if (opts->target.initial_delay || target__has_cpu(&opts->target) || perf_pmus__num_core_pmus() > 1) { - evsel = evlist__findnew_tracking_event(evlist, false); + + /* + * User space tasks can migrate between CPUs, so when tracing + * selected CPUs, sideband for all CPUs is still needed. + */ + if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) + system_wide = true; + + evsel = evlist__findnew_tracking_event(evlist, system_wide); if (!evsel) return -ENOMEM; diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json new file mode 100644 index 0000000000000000000000000000000000000000..428605c37d10bcb5aef284aaa5b8279085c0002d --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "hnf_cache_miss", + "EventidCode": "0x1", + "NodeType": "0x5", + "BriefDescription": "Counts total cache misses in first lookup result (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_sf_cache_access", + "EventidCode": "0x2", + "NodeType": "0x5", + "BriefDescription": "Counts number of cache accesses in first access (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_cache_fill", + "EventidCode": "0x3", + "NodeType": "0x5", + "BriefDescription": "Counts total allocations in HN SLC (all cache line allocations to SLC).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_retry", + "EventidCode": "0x4", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried requests.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_reqs_recvd", + "EventidCode": "0x5", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that HN receives.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_hit", + "EventidCode": "0x6", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF hits.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_evictions", + "EventidCode": "0x7", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF eviction cache invalidations initiated.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_dir_snoops_sent", + "EventidCode": "0x8", + "NodeType": "0x5", + "BriefDescription": "Counts number of directed snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_brd_snoops_sent", + "EventidCode": "0x9", + "NodeType": "0x5", + "BriefDescription": "Counts number of multicast snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_eviction", + "EventidCode": "0xa", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC evictions (dirty only).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_fill_invalid_way", + "EventidCode": "0xb", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC fills to an invalid way.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_retries", + "EventidCode": "0xc", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried transactions by the MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_reqs", + "EventidCode": "0xd", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that are sent to MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_qos_hh_retry", + "EventidCode": "0xe", + "NodeType": "0x5", + "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s0_rdata_beats", + "EventidCode": "0x1", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 0. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s1_rdata_beats", + "EventidCode": "0x2", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 1. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s2_rdata_beats", + "EventidCode": "0x3", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 2. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_rxdat_flits", + "EventidCode": "0x4", + "NodeType": "0xa", + "BriefDescription": "Number of RXDAT flits received. This event measures the true read data bandwidth, excluding CMOs.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txdat_flits", + "EventidCode": "0x5", + "NodeType": "0xa", + "BriefDescription": "Number of TXDAT flits dispatched. This event measures the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_total", + "EventidCode": "0x6", + "NodeType": "0xa", + "BriefDescription": "Number of TXREQ flits dispatched. This event measures the total request bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_retried", + "EventidCode": "0x7", + "NodeType": "0xa", + "BriefDescription": "Number of retried TXREQ flits dispatched. This event measures the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txrsp_retryack", + "EventidCode": "0x4", + "NodeType": "0x7", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txdat_flitv", + "EventidCode": "0x5", + "NodeType": "0x7", + "BriefDescription": "Number of TXDAT flits dispatched from XP to SBSX. This event is a measure of the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_arvalid_no_arready", + "EventidCode": "0x21", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_awvalid_no_awready", + "EventidCode": "0x22", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_wvalid_no_wready", + "EventidCode": "0x23", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txrsp_retryack", + "EventidCode": "0x2a", + "NodeType": "0x4", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arvalid_no_arready", + "EventidCode": "0x2b", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arready_no_arvalid", + "EventidCode": "0x2c", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AR channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awvalid_no_awready", + "EventidCode": "0x2d", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awready_no_awvalid", + "EventidCode": "0x2e", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AW channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_wvalid_no_wready", + "EventidCode": "0x2f", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txdat_stall", + "EventidCode": "0x30", + "NodeType": "0x4", + "BriefDescription": "TXDAT valid but no link credit available.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json new file mode 100644 index 0000000000000000000000000000000000000000..f7823bd265db26b218aa886a32c7a9fc12481ac8 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json @@ -0,0 +1,74 @@ +[ + { + "MetricName": "slc_miss_rate", + "BriefDescription": "The system level cache miss rate.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_cache_miss / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "hnf_message_retry_rate", + "BriefDescription": "HN-F message retry rate indicates whether a lack of credits is causing the bottlenecks.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_pocq_retry / hnf_pocq_reqs_recvd", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sf_hit_rate", + "BriefDescription": "Snoop filter hit rate can be used to measure the snoop filter efficiency.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_sf_hit / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "mc_message_retry_rate", + "BriefDescription": "The memory controller request retries rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_mc_retries / hnf_mc_reqs", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_read_bandwidth.all", + "BriefDescription": "This event measure the actual bandwidth that RN-I bridge sends to the interconnect.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_rxdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_write_bandwidth.all", + "BriefDescription": "This event measures the actual write bandwidth at RN-I bridges.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_retry_rate", + "BriefDescription": "RN-I bridge retry rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txreq_flits_retried / rnid_txreq_flits_total", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sbsx_actual_write_bandwidth.all", + "BriefDescription": "sbsx actual write bandwidth.", + "MetricGroup": "cmn", + "MetricExpr": "sbsx_txdat_flitv * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json index c7e7528db3158f2caecbea67059efbf347a97dc3..4d423b149ad128f64e82fc94483d6b764a66ef52 100644 --- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json +++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json @@ -12,5 +12,13 @@ "EventName": "sys_ccn_pmu.read_cycles", "Unit": "sys_ccn_pmu", "Compat": "0x01" + }, + { + "BriefDescription": "Counts total cache misses in first lookup result (high priority)", + "EventidCode": "0x1", + "NodeType": "0x5", + "EventName": "sys_cmn_pmu.hnf_cache_miss", + "Unit": "sys_cmn_pmu", + "Compat": "(434|436|43c|43a).*" } ] diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c index 12bd043a05e3579d090ffbc252a8852afad3036f..13727421d424b1e37972d38f90fce4c3d60181c9 100644 --- a/tools/perf/pmu-events/empty-pmu-events.c +++ b/tools/perf/pmu-events/empty-pmu-events.c @@ -244,6 +244,14 @@ static const struct pmu_event pmu_events__test_soc_sys[] = { .topic = "uncore", .pmu = "uncore_sys_ccn_pmu", }, + { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ", + .compat = "(434|436|43c|43a).*", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + }, { .name = 0, .event = 0, diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 72ba4a9239c6bff6da2685c311e60c19e3b85562..ae2bd49e880562553c2c4bad56d53d8dc2f681bc 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -289,6 +289,7 @@ class JsonEvent: 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', 'ali_drw': 'ali_drw', + 'arm_cmn': 'arm_cmn', } return table[unit] if unit in table else f'uncore_{unit.lower()}' @@ -298,6 +299,7 @@ class JsonEvent: if 'ExtSel' in jd: eventcode |= int(jd['ExtSel']) << 8 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None + eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None self.name = jd['EventName'].lower() if 'EventName' in jd else None self.topic = '' self.compat = jd.get('Compat') @@ -335,7 +337,13 @@ class JsonEvent: if precise and self.desc and '(Precise Event)' not in self.desc: extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 'event)') - event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' + event = None + if configcode is not None: + event = f'config={llx(configcode)}' + elif eventidcode is not None: + event = f'eventid={llx(eventidcode)}' + else: + event = f'event={llx(eventcode)}' event_fields = [ ('AnyThread', 'any='), ('PortMask', 'ch_mask='), @@ -345,6 +353,7 @@ class JsonEvent: ('Invert', 'inv='), ('SampleAfterValue', 'period='), ('UMask', 'umask='), + ('NodeType', 'type='), ] for key, value in event_fields: if key in jd and jd[key] != '0': diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 2b45ffa462a6c4b9b29629dde2fa8262620bbe61..29c065768a8bda62bfd69ebb7eabdd238d9442fe 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -68,7 +68,7 @@ perf-y += event_groups.o perf-y += symbols.o perf-y += util.o -ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc sw_64)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0 new file mode 100644 index 0000000000000000000000000000000000000000..576ec48b3aafaa6a783e3e58f3ecd72cc927521b --- /dev/null +++ b/tools/perf/tests/attr/test-record-dummy-C0 @@ -0,0 +1,55 @@ +[config] +command = record +args = --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1 +ret = 1 + +[event] +fd=1 +group_fd=-1 +cpu=0 +pid=-1 +flags=8 +type=1 +size=136 +config=9 +sample_period=4000 +# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | +# PERF_SAMPLE_PERIOD +# + PERF_SAMPLE_CPU added by -C 0 +sample_type=391 +read_format=4|20 +disabled=0 +inherit=1 +pinned=0 +exclusive=0 +exclude_user=0 +exclude_kernel=0 +exclude_hv=0 +exclude_idle=0 +mmap=1 +comm=1 +freq=1 +inherit_stat=0 +enable_on_exec=0 +task=1 +watermark=0 +precise_ip=0 +mmap_data=0 +sample_id_all=1 +exclude_host=0 +exclude_guest=1 +exclude_callchain_kernel=0 +exclude_callchain_user=0 +mmap2=1 +comm_exec=1 +context_switch=0 +write_backward=0 +namespaces=0 +use_clockid=0 +wakeup_events=0 +bp_type=0 +config1=0 +config2=0 +branch_sample_type=0 +sample_regs_user=0 +sample_stack_user=0 diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index d47f1f8711641c77fffde3523b6a19f8e7f112b5..2b66ffba3bb0901db2aa3888babb008ec26e4132 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -2514,9 +2514,14 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest while ((pmu = perf_pmus__scan(pmu)) != NULL) { struct stat st; char path[PATH_MAX]; + char pmu_event[PATH_MAX]; + char *buf = NULL; + FILE *file; struct dirent *ent; + size_t len = 0; DIR *dir; int err; + int n; snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/", sysfs__mountpoint(), pmu->name); @@ -2538,11 +2543,45 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest struct evlist_test e = { .name = NULL, }; char name[2 * NAME_MAX + 1 + 12 + 3]; int test_ret; + bool is_event_parameterized = 0; /* Names containing . are special and cannot be used directly */ if (strchr(ent->d_name, '.')) continue; + /* exclude parametrized ones (name contains '?') */ + n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name); + if (n >= PATH_MAX) { + pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX); + continue; + } + + file = fopen(pmu_event, "r"); + if (!file) { + pr_debug("can't open pmu event file for '%s'\n", ent->d_name); + ret = combine_test_results(ret, TEST_FAIL); + continue; + } + + if (getline(&buf, &len, file) < 0) { + pr_debug(" pmu event: %s is a null event\n", ent->d_name); + ret = combine_test_results(ret, TEST_FAIL); + fclose(file); + continue; + } + + if (strchr(buf, '?')) + is_event_parameterized = 1; + + free(buf); + buf = NULL; + fclose(file); + + if (is_event_parameterized == 1) { + pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event); + continue; + } + snprintf(name, sizeof(name), "%s/event=%s/u", pmu->name, ent->d_name); e.name = name; diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index f5321fbdee79d1e591bfd643e73b9b09360123e2..a56d32905743a00aa49d62fde542b426ee96ab5d 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -245,7 +245,7 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = { }, .alias_str = "event=0x2b", .alias_long_desc = "ddr write-cycles event", - .matching_pmu = "uncore_sys_ddr_pmu", + .matching_pmu = "uncore_sys_ddr_pmu0", }; static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { @@ -259,12 +259,27 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { }, .alias_str = "config=0x2c", .alias_long_desc = "ccn read-cycles event", - .matching_pmu = "uncore_sys_ccn_pmu", + .matching_pmu = "uncore_sys_ccn_pmu4", +}; + +static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = { + .event = { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority)", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + .compat = "(434|436|43c|43a).*", + }, + .alias_str = "eventid=0x1,type=0x5", + .alias_long_desc = "Counts total cache misses in first lookup result (high priority)", + .matching_pmu = "uncore_sys_cmn_pmu0", }; static const struct perf_pmu_test_event *sys_events[] = { &sys_ddr_pmu_write_cycles, &sys_ccn_pmu_read_cycles, + &sys_cmn_pmu_hnf_cache_miss, NULL }; @@ -615,6 +630,12 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu) .count = &matched_count, }; + if (strcmp(pmu_name, test_event.matching_pmu)) { + pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n", + pmu_name, test_event.matching_pmu, pmu_name); + return -1; + } + err = perf_pmu__find_event(pmu, event->name, &args, test_core_pmu_event_aliases_cb); if (err) { @@ -701,6 +722,46 @@ static struct perf_pmu_test_pmu test_pmus[] = { &sys_ccn_pmu_read_cycles, }, }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43401", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43602", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43c03", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43a01", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + } }; /* Test that aliases generated are as expected */ diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh new file mode 100644 index 0000000000000000000000000000000000000000..561c93b75d77a57a28531a60d91d833cd689f24a --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +perf_has_symbol() +{ + if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then + echo "perf does have symbol '$1'" + return 0 + fi + echo "perf does not have symbol '$1'" + return 1 +} + +skip_test_missing_symbol() +{ + if ! perf_has_symbol "$1" ; then + echo "perf is missing symbols - skipping test" + exit 2 + fi + return 0 +} diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh index 8dd115dd35a7e1d0e57b5907459d0e995221e9c2..a78d35d2cff070d731769e1f8c73cb54354d7835 100755 --- a/tools/perf/tests/shell/pipe_test.sh +++ b/tools/perf/tests/shell/pipe_test.sh @@ -2,10 +2,17 @@ # perf pipe recording and injection test # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + +sym="noploop" + +skip_test_missing_symbol ${sym} + data=$(mktemp /tmp/perf.data.XXXXXX) prog="perf test -w noploop" task="perf" -sym="noploop" if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then echo "cannot find the test file in the perf report" diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 89214a6d9951f1d2c173a638037d928f706cc2f9..1dcb91f8a847a43c224192518dd1ba73aee02b85 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -43,7 +43,10 @@ trace_libc_inet_pton_backtrace() { ;; ppc64|ppc64le) eventattr='max-stack=4' - echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + # Add gaih_inet to expected backtrace only if it is part of libc. + if nm $libc | grep -F -q gaih_inet.; then + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + fi echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh index 4fbc74805d526a6b54df154fcf5fc59900aabe28..3988b10b3f598f5e1bb32df5c9f99054f4228d56 100755 --- a/tools/perf/tests/shell/record.sh +++ b/tools/perf/tests/shell/record.sh @@ -7,10 +7,15 @@ set -e shelldir=$(dirname "$0") . "${shelldir}"/lib/waiting.sh +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh +testsym="test_loop" + +skip_test_missing_symbol ${testsym} + err=0 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) testprog="perf test -w thloop" -testsym="test_loop" cleanup() { rm -rf "${perfdata}" diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh new file mode 100755 index 0000000000000000000000000000000000000000..5024a7ce0c517a2c6053154cbabf4ad809a0b407 --- /dev/null +++ b/tools/perf/tests/shell/record_sideband.sh @@ -0,0 +1,58 @@ +#!/bin/sh +# perf record sideband tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +err=0 +perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) + +cleanup() +{ + rm -rf ${perfdata} + trap - EXIT TERM INT +} + +trap_cleanup() +{ + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +can_cpu_wide() +{ + if ! perf record -o ${perfdata} -BN --no-bpf-event -C $1 true 2>&1 >/dev/null + then + echo "record sideband test [Skipped cannot record cpu$1]" + err=2 + fi + + rm -f ${perfdata} + return $err +} + +test_system_wide_tracking() +{ + # Need CPU 0 and CPU 1 + can_cpu_wide 0 || return 0 + can_cpu_wide 1 || return 0 + + # Record on CPU 0 a task running on CPU 1 + perf record -BN --no-bpf-event -o ${perfdata} -C 0 -- taskset --cpu-list 1 true + + # Should get MMAP events from CPU 1 + mmap_cnt=`perf script -i ${perfdata} --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l` + + if [ ${mmap_cnt} -gt 0 ] ; then + return 0 + fi + + echo "Failed to record MMAP events on CPU 1 when tracing CPU 0" + return 1 +} + +test_system_wide_tracking + +cleanup +exit $err diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh index fb2b10547a113d79fd87aa1891f9ae31589716b9..25f8f8cf048582f1d3936ff8f75cc7f728da9587 100755 --- a/tools/perf/tests/shell/stat+std_output.sh +++ b/tools/perf/tests/shell/stat+std_output.sh @@ -12,7 +12,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX) event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses) event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches") -skip_metric=("stalled cycles per insn" "tma_") +skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound") cleanup() { rm -f "${stat_output}" diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index 60cd35c73e47dbd820884f66cc70a54a7c9056cb..730526c632ceec6a147a57fbd2a7ec3671a6e945 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -2,8 +2,14 @@ # Check Arm64 callgraphs are complete in fp mode # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + lscpu | grep -q "aarch64" || exit 2 +skip_test_missing_symbol leafloop + PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) TEST_PROGRAM="perf test -w leafloop" diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh index 09908d71c9941d3c4e3cb1d81cc08617581e13d8..5f14d0cb013f838629446abc4f15484edb2cd7d8 100755 --- a/tools/perf/tests/shell/test_brstack.sh +++ b/tools/perf/tests/shell/test_brstack.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-2.0 # German Gomez , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + # skip the test if the hardware doesn't support branch stack sampling # and if the architecture doesn't support filter types: any,save_type,u if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then @@ -11,6 +15,8 @@ if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev exit 2 fi +skip_test_missing_symbol brstack_bench + TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX) TESTPROG="perf test -w brstack" diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index 69bb6fe86c5078a8325dedbcc2ca045a6d552f2a..3dfa91832aa87f89b8f0ef0c1ba51df6d130d2d2 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -4,6 +4,13 @@ # SPDX-License-Identifier: GPL-2.0 # Leo Yan , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/waiting.sh +. "${shelldir}"/lib/waiting.sh + +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + skip_if_no_mem_event() { perf mem record -e list 2>&1 | grep -E -q 'available' && return 0 return 2 @@ -11,8 +18,11 @@ skip_if_no_mem_event() { skip_if_no_mem_event || exit 2 +skip_test_missing_symbol buf1 + TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) check_result() { # The memory report format is as below: @@ -50,13 +60,15 @@ echo "Recording workload..." # specific CPU and test in per-CPU mode. is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo) if (($is_amd >= 1)); then - perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM & + perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" & else - perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM & + perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" & fi PERFPID=$! +wait_for_perf_to_start ${PERFPID} "${ERR_FILE}" + sleep 1 kill $PERFPID diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 6d657c9927f7db2e72dc966ff067732115911414..89a051732e87d52feb2eee7a8b9b03e9025ca005 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -199,6 +199,7 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o +perf-$(CONFIG_LIBUNWIND_SW64) += libunwind/sw64.o ifeq ($(CONFIG_LIBTRACEEVENT),y) perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 6dfe11cbf30e2b8f9ab966a576a6313985d65af8..51625af5f85df212f398ef8ebc3a21187fac8ed5 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -183,6 +183,9 @@ static struct arch architectures[] = { .comment_char = '#', }, }, + { + .name = "sw_64", + }, { .name = "x86", .init = x86__annotate_init, diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index d2c7b6e6eae51b723313b90e567bc26bd7cc095d..8175df5df556b57b5b891d0edb5316d04bf2866b 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -436,6 +436,8 @@ static const char *normalize_arch(char *arch) return "arm64"; if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) return "arm"; + if (!strncmp(arch, "sw_64", 5)) + return "sw_64"; if (!strncmp(arch, "s390", 4)) return "s390"; if (!strncmp(arch, "parisc", 6)) diff --git a/tools/perf/util/libunwind/sw64.c b/tools/perf/util/libunwind/sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..12452bf2ab8b8539a317329969fffa8801e38082 --- /dev/null +++ b/tools/perf/util/libunwind/sw64.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file setups defines to compile arch specific binary from the + * generic one. + * + * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch + * name and the defination of this function is included directly from + * 'arch/arm64/util/unwind-libunwind.c', to make sure that this function + * is defined no matter what arch the host is. + * + * Finally, the arch specific unwind methods are exported which will + * be assigned to each arm64 thread. + */ + +#define REMOTE_UNWIND_LIBUNWIND + +/* Define arch specific functions & regs for libunwind, should be + * defined before including "unwind.h" + */ +#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__sw_64_reg_id(regnum) +#define LIBUNWIND__ARCH_REG_IP PERF_REG_SW64_PC +#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_SP + +#include "unwind.h" +#include "debug.h" +#include "libunwind-sw_64.h" +#include <../../../arch/sw_64/include/uapi/asm/perf_regs.h> +#include "../../arch/sw_64/util/unwind-libunwind.c" + +#include "util/unwind-libunwind-local.c" + +struct unwind_libunwind_ops * +sw64_unwind_libunwind_ops = &_unwind_libunwind_ops; diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index bb5faaa25d510f7ceba173e07ca77d4805310e09..ca3e0404f18720d7a3cc2376896195f55cf1192d 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm, while ((pmu = perf_pmus__scan(pmu))) { - if (!pmu->id || strcmp(pmu->id, pm->compat)) + if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id)) continue; return d->fn(pm, table, d->data); diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80a8efef85cf91645b4d77a746f6c32b..c2e84a827e33d698f3f19fde7a411a0f36916e88 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -11,6 +11,7 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; +struct unwind_libunwind_ops __weak *sw64_unwind_libunwind_ops; static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) { @@ -53,6 +54,9 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { if (dso_type == DSO__TYPE_64BIT) ops = arm64_unwind_libunwind_ops; + } else if (!strcmp(arch, "sw_64")) { + if (dso_type == DSO__TYPE_64BIT) + ops = sw64_unwind_libunwind_ops; } if (!ops) { diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 5b61b8bb29f846a504992da2baaa952a3085bcf0..5c77c4e00443c83ac4351bd02ca59b2db9b77dd7 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -18,6 +18,7 @@ TARGETS += drivers/dma-buf TARGETS += drivers/s390x/uvdevice TARGETS += drivers/net/bonding TARGETS += drivers/net/team +TARGETS += drivers/platform/x86/intel/ifs TARGETS += efivarfs TARGETS += exec TARGETS += fchmodat2 diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index 5f648b97a06fea4df891e3e998c997b85bbc200a..ea9c7d47790f9daf44c0f49e789e72ee2200e46e 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -66,6 +66,11 @@ static struct vec_data vec_data[] = { }, }; +static bool vec_type_supported(struct vec_data *data) +{ + return getauxval(data->hwcap_type) & data->hwcap; +} + static int stdio_read_integer(FILE *f, const char *what, int *val) { int n = 0; @@ -564,8 +569,11 @@ static void prctl_set_all_vqs(struct vec_data *data) return; } - for (i = 0; i < ARRAY_SIZE(vec_data); i++) + for (i = 0; i < ARRAY_SIZE(vec_data); i++) { + if (!vec_type_supported(&vec_data[i])) + continue; orig_vls[i] = vec_data[i].rdvl(); + } for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { vl = sve_vl_from_vq(vq); @@ -594,7 +602,7 @@ static void prctl_set_all_vqs(struct vec_data *data) if (&vec_data[i] == data) continue; - if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap)) + if (!vec_type_supported(&vec_data[i])) continue; if (vec_data[i].rdvl() != orig_vls[i]) { @@ -765,7 +773,7 @@ int main(void) struct vec_data *data = &vec_data[i]; unsigned long supported; - supported = getauxval(data->hwcap_type) & data->hwcap; + supported = vec_type_supported(data); if (!supported) all_supported = false; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 11ee801e75e7e048d6c1ea8e3dd4d252f274c35b..6c3b4d4f173ac62783db42894c20b9d9655bd5c8 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -34,6 +34,12 @@ DECLARE_TRACE(bpf_testmod_test_write_bare, TP_ARGS(task, ctx) ); +/* Used in bpf_testmod_test_read() to test __nullable suffix */ +DECLARE_TRACE(bpf_testmod_test_nullable_bare, + TP_PROTO(struct bpf_testmod_test_read_ctx *ctx__nullable), + TP_ARGS(ctx__nullable) +); + #undef BPF_TESTMOD_DECLARE_TRACE #ifdef DECLARE_TRACE_WRITABLE #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 2e8adf059fa3b867f977255d3be13412c1c7c42e..77527d866b9feeae9bd6d7bdc2d5952140d9dfb2 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ +#include #include #include #include @@ -40,9 +41,7 @@ struct bpf_testmod_struct_arg_4 { int b; }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in bpf_testmod.ko BTF"); +__bpf_hook_start(); noinline int bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { @@ -282,6 +281,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, if (bpf_testmod_loop_test(101) > 100) trace_bpf_testmod_test_read(current, &ctx); + trace_bpf_testmod_test_nullable_bare(NULL); + /* Magic number to enable writable tp */ if (len == 64) { struct bpf_testmod_test_writable_ctx writable = { @@ -332,7 +333,7 @@ noinline int bpf_fentry_shadow_test(int a) } EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); -__diag_pop(); +__bpf_hook_end(); static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .attr = { .name = "bpf_testmod", .mode = 0666, }, @@ -340,11 +341,11 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; -BTF_SET8_START(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) -BTF_SET8_END(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .owner = THIS_MODULE, @@ -490,7 +491,7 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused return arg; } -BTF_SET8_START(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) BTF_ID_FLAGS(func, bpf_kfunc_call_test2) @@ -516,13 +517,77 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) -BTF_SET8_END(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) + +static int bpf_testmod_ops_init(struct btf *btf) +{ + return 0; +} + +static bool bpf_testmod_ops_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_testmod_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .owner = THIS_MODULE, .set = &bpf_testmod_check_kfunc_ids, }; +static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + +static int bpf_dummy_reg(void *kdata) +{ + struct bpf_testmod_ops *ops = kdata; + int r; + + r = ops->test_2(4, 3); + + return 0; +} + +static void bpf_dummy_unreg(void *kdata) +{ +} + +static int bpf_testmod_test_1(void) +{ + return 0; +} + +static int bpf_testmod_test_2(int a, int b) +{ + return 0; +} + +static struct bpf_testmod_ops __bpf_testmod_ops = { + .test_1 = bpf_testmod_test_1, + .test_2 = bpf_testmod_test_2, +}; + +struct bpf_struct_ops bpf_bpf_testmod_ops = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops, + .name = "bpf_testmod_ops", + .owner = THIS_MODULE, +}; + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) @@ -533,6 +598,7 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index f32793efe095ba68bc1a388b46486dbb68d4c6a4..ca5435751c79460381892924b13f8befb34b58a7 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -28,4 +28,9 @@ struct bpf_iter_testmod_seq { int cnt; }; +struct bpf_testmod_ops { + int (*test_1)(void); + int (*test_2)(int a, int b); +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index d3d94596ab79cf0f4b1d78a60a03b69231da17f4..f43fcb13d2c460bb5761ef083b3d8ad72a83b737 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -98,8 +98,7 @@ static void test_dummy_init_ptr_arg(void) static void test_dummy_multiple_args(void) { - struct bpf_dummy_ops_state st = { 7 }; - __u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; + __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, .ctx_size_in = sizeof(args), @@ -116,7 +115,6 @@ static void test_dummy_multiple_args(void) fd = bpf_program__fd(skel->progs.test_2); err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); - args[0] = 7; for (i = 0; i < ARRAY_SIZE(args); i++) { snprintf(name, sizeof(name), "arg %zu", i); ASSERT_EQ(skel->bss->test_2_args[i], args[i], name); @@ -126,32 +124,6 @@ static void test_dummy_multiple_args(void) } static void test_dummy_sleepable(void) -{ - struct bpf_dummy_ops_state st; - __u64 args[1] = {(__u64)&st}; - LIBBPF_OPTS(bpf_test_run_opts, attr, - .ctx_in = args, - .ctx_size_in = sizeof(args), - ); - struct dummy_st_ops_success *skel; - int fd, err; - - skel = dummy_st_ops_success__open_and_load(); - if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) - return; - - fd = bpf_program__fd(skel->progs.test_sleepable); - err = bpf_prog_test_run_opts(fd, &attr); - ASSERT_OK(err, "test_run"); - - dummy_st_ops_success__destroy(skel); -} - -/* dummy_st_ops.test_sleepable() parameter is not marked as nullable, - * thus bpf_prog_test_run_opts() below should be rejected as it tries - * to pass NULL for this parameter. - */ -static void test_dummy_sleepable_reject_null(void) { __u64 args[1] = {0}; LIBBPF_OPTS(bpf_test_run_opts, attr, @@ -167,7 +139,7 @@ static void test_dummy_sleepable_reject_null(void) fd = bpf_program__fd(skel->progs.test_sleepable); err = bpf_prog_test_run_opts(fd, &attr); - ASSERT_EQ(err, -EINVAL, "test_run"); + ASSERT_OK(err, "test_run"); dummy_st_ops_success__destroy(skel); } @@ -184,8 +156,6 @@ void test_dummy_st_ops(void) test_dummy_multiple_args(); if (test__start_subtest("dummy_sleepable")) test_dummy_sleepable(); - if (test__start_subtest("dummy_sleepable_reject_null")) - test_dummy_sleepable_reject_null(); RUN_TESTS(dummy_st_ops_fail); } diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 7cfac53c0d58dba9068ab3169873edded40ad18d..b614a5272dfd6486e287181270a0bcf63f638344 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -9,6 +9,7 @@ enum test_setup_type { SETUP_SYSCALL_SLEEP, SETUP_SKB_PROG, + SETUP_SKB_PROG_TP, }; static struct { @@ -28,6 +29,7 @@ static struct { {"test_dynptr_clone", SETUP_SKB_PROG}, {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, + {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) @@ -35,7 +37,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ struct dynptr_success *skel; struct bpf_program *prog; struct bpf_link *link; - int err; + int err; skel = dynptr_success__open(); if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) @@ -47,7 +49,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) goto cleanup; - bpf_program__set_autoload(prog, true); + bpf_program__set_autoload(prog, true); err = dynptr_success__load(skel); if (!ASSERT_OK(err, "dynptr_success__load")) @@ -87,6 +89,37 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ break; } + case SETUP_SKB_PROG_TP: + { + struct __sk_buff skb = {}; + struct bpf_object *obj; + int aux_prog_fd; + + /* Just use its test_run to trigger kfree_skb tracepoint */ + err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &aux_prog_fd); + if (!ASSERT_OK(err, "prog_load sched cls")) + goto cleanup; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + err = bpf_prog_test_run_opts(aux_prog_fd, &topts); + bpf_link__destroy(link); + + if (!ASSERT_OK(err, "test_run")) + goto cleanup; + + break; + } } ASSERT_EQ(skel->bss->err, 0, "err"); diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c new file mode 100644 index 0000000000000000000000000000000000000000..ae98a48775ece999b03423fdcc8ef5f35106aad9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include + +#include "struct_ops_module.skel.h" + +static void check_map_info(struct bpf_map_info *info) +{ + struct bpf_btf_info btf_info; + char btf_name[256]; + u32 btf_info_len = sizeof(btf_info); + int err, fd; + + fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); + if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) + return; + + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.name = ptr_to_u64(btf_name); + btf_info.name_len = sizeof(btf_name); + err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); + if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) + goto cleanup; + + if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) + goto cleanup; + +cleanup: + close(fd); +} + +static void test_struct_ops_load(void) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + struct struct_ops_module *skel; + struct bpf_map_info info = {}; + struct bpf_link *link; + int err; + u32 len; + + skel = struct_ops_module__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) + return; + + err = struct_ops_module__load(skel); + if (!ASSERT_OK(err, "struct_ops_module_load")) + goto cleanup; + + len = sizeof(info); + err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, + &len); + if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + ASSERT_OK_PTR(link, "attach_test_mod_1"); + + /* test_2() will be called from bpf_dummy_reg() in bpf_testmod.c */ + ASSERT_EQ(skel->bss->test_2_result, 7, "test_2_result"); + + bpf_link__destroy(link); + + check_map_info(&info); + +cleanup: + struct_ops_module__destroy(skel); +} + +void serial_test_struct_ops_module(void) +{ + if (test__start_subtest("test_struct_ops_load")) + test_struct_ops_load(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c new file mode 100644 index 0000000000000000000000000000000000000000..accc42e01f8a88f0ee27f4336149f7613b7ca94b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "test_tp_btf_nullable.skel.h" + +void test_tp_btf_nullable(void) +{ + if (!env.has_testmod) { + test__skip(); + return; + } + + RUN_TESTS(test_tp_btf_nullable); +} diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c index ec0c595d47af8439ba15636c3000236ca4b678e9..1efa746c25dc7767dfeb1d0ca6d9d28aa1ddabf9 100644 --- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c @@ -11,17 +11,8 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state) { int ret; - /* Check that 'state' nullable status is detected correctly. - * If 'state' argument would be assumed non-null by verifier - * the code below would be deleted as dead (which it shouldn't). - * Hide it from the compiler behind 'asm' block to avoid - * unnecessary optimizations. - */ - asm volatile ( - "if %[state] != 0 goto +2;" - "r0 = 0xf2f3f4f5;" - "exit;" - ::[state]"p"(state)); + if (!state) + return 0xf2f3f4f5; ret = state->val; state->val = 0x5a; @@ -34,7 +25,7 @@ SEC("struct_ops/test_2") int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2, char a3, unsigned long a4) { - test_2_args[0] = state->val; + test_2_args[0] = (unsigned long)state; test_2_args[1] = a1; test_2_args[2] = a2; test_2_args[3] = a3; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 66a60bfb58672f98266d237d6c4f8a3226d8aa50..9791cd8d48a43e1b07efb6916dc79a0ad1917098 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "bpf_misc.h" #include "bpf_kfuncs.h" @@ -1254,6 +1255,30 @@ int skb_invalid_ctx(void *ctx) return 0; } +SEC("fentry/skb_tx_error") +__failure __msg("must be referenced or trusted") +int BPF_PROG(skb_invalid_ctx_fentry, void *skb) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(skb, 0, &ptr); + + return 0; +} + +SEC("fexit/skb_tx_error") +__failure __msg("must be referenced or trusted") +int BPF_PROG(skb_invalid_ctx_fexit, void *skb) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(skb, 0, &ptr); + + return 0; +} + /* Reject writes to dynptr slot for uninit arg */ SEC("?raw_tp") __failure __msg("potential write to dynptr at off=-16") diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index 5985920d162e7be608c4412c855dcf6520672cb5..bfcc85686cf046361b451f97f4cd310a6ccdb1ed 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "bpf_misc.h" #include "bpf_kfuncs.h" #include "errno.h" @@ -544,3 +545,25 @@ int test_dynptr_skb_strcmp(struct __sk_buff *skb) return 1; } + +SEC("tp_btf/kfree_skb") +int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location) +{ + __u8 write_data[2] = {1, 2}; + struct bpf_dynptr ptr; + int ret; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* since tp_btf skbs are read only, writes should fail */ + ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + if (ret != -EINVAL) { + err = 2; + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c new file mode 100644 index 0000000000000000000000000000000000000000..5a411db986cddc892b8c1bff3ac8627c3f46a7cd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +int test_2_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + return 0xdeadbeef; +} + +SEC("struct_ops/test_2") +int BPF_PROG(test_2, int a, int b) +{ + test_2_result = a + b; + return a + b; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c new file mode 100644 index 0000000000000000000000000000000000000000..bba3e37f749b866b9d1af47958ed88b9193cab30 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include +#include +#include "../bpf_testmod/bpf_testmod.h" +#include "bpf_misc.h" + +SEC("tp_btf/bpf_testmod_test_nullable_bare") +__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) +{ + return nullable_ctx->len; +} + +SEC("tp_btf/bpf_testmod_test_nullable_bare") +int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx) +{ + if (nullable_ctx) + return nullable_ctx->len; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 40cba8d368d9fb2c67529d6c2819d80a3425129c..6157f884d0916962d838f44f859fd9ffcb286420 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -169,12 +169,14 @@ def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False): return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail, include_stderr=include_stderr) -def bpftool_prog_list(expected=None, ns=""): +def bpftool_prog_list(expected=None, ns="", exclude_orphaned=True): _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) # Remove the base progs for p in base_progs: if p in progs: progs.remove(p) + if exclude_orphaned: + progs = [ p for p in progs if not p['orphaned'] ] if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs loaded, expected %d" % @@ -612,11 +614,9 @@ def pin_map(file_name, idx=0, expected=1): def check_dev_info_removed(prog_file=None, map_file=None): bpftool_prog_list(expected=0) + bpftool_prog_list(expected=1, exclude_orphaned=False) ret, err = bpftool("prog show pin %s" % (prog_file), fail=False) - fail(ret == 0, "Showing prog with removed device did not fail") - fail(err["error"].find("No such device") == -1, - "Showing prog with removed device expected ENODEV, error is %s" % - (err["error"])) + fail(ret != 0, "failed to show prog with removed device") bpftool_map_list(expected=0) ret, err = bpftool("map show pin %s" % (map_file), fail=False) @@ -1395,10 +1395,7 @@ try: start_test("Test multi-dev ASIC cross-dev destruction - orphaned...") ret, out = bpftool("prog show %s" % (progB), fail=False) - fail(ret == 0, "got information about orphaned program") - fail("error" not in out, "no error reported for get info on orphaned") - fail(out["error"] != "can't get prog info: No such device", - "wrong error for get info on orphaned") + fail(ret != 0, "couldn't get information about orphaned program") print("%s: OK" % (os.path.basename(__file__))) diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..03d0449d307c2492df57abb8d62e7b6a200121f7 --- /dev/null +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for ifs(In Field Scan) selftests + +TEST_PROGS := test_ifs.sh + +include ../../../../../lib.mk diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh new file mode 100755 index 0000000000000000000000000000000000000000..8b68964b29f468465d4ce69172d075ede8fcffa4 --- /dev/null +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh @@ -0,0 +1,494 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test the functionality of the Intel IFS(In Field Scan) driver. +# + +# Matched with kselftest framework: tools/testing/selftests/kselftest.h +readonly KSFT_PASS=0 +readonly KSFT_FAIL=1 +readonly KSFT_XFAIL=2 +readonly KSFT_SKIP=4 + +readonly CPU_SYSFS="/sys/devices/system/cpu" +readonly CPU_OFFLINE_SYSFS="${CPU_SYSFS}/offline" +readonly IMG_PATH="/lib/firmware/intel/ifs_0" +readonly IFS_SCAN_MODE="0" +readonly IFS_ARRAY_BIST_SCAN_MODE="1" +readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs" +readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}" +readonly IFS_ARRAY_BIST_SYSFS_PATH="${IFS_PATH}_${IFS_ARRAY_BIST_SCAN_MODE}" +readonly RUN_TEST="run_test" +readonly STATUS="status" +readonly DETAILS="details" +readonly STATUS_PASS="pass" +readonly PASS="PASS" +readonly FAIL="FAIL" +readonly INFO="INFO" +readonly XFAIL="XFAIL" +readonly SKIP="SKIP" +readonly IFS_NAME="intel_ifs" +readonly ALL="all" +readonly SIBLINGS="siblings" + +# Matches arch/x86/include/asm/intel-family.h and +# drivers/platform/x86/intel/ifs/core.c requirement as follows +readonly SAPPHIRERAPIDS_X="8f" +readonly EMERALDRAPIDS_X="cf" + +readonly INTEL_FAM6="06" + +LOOP_TIMES=3 +FML="" +MODEL="" +STEPPING="" +CPU_FMS="" +TRUE="true" +FALSE="false" +RESULT=$KSFT_PASS +IMAGE_NAME="" +INTERVAL_TIME=1 +OFFLINE_CPUS="" +# For IFS cleanup tags +ORIGIN_IFS_LOADED="" +IFS_IMAGE_NEED_RESTORE=$FALSE +IFS_LOG="/tmp/ifs_logs.$$" +RANDOM_CPU="" +DEFAULT_IMG_ID="" + +append_log() +{ + echo -e "$1" | tee -a "$IFS_LOG" +} + +online_offline_cpu_list() +{ + local on_off=$1 + local target_cpus=$2 + local cpu="" + local cpu_start="" + local cpu_end="" + local i="" + + if [[ -n "$target_cpus" ]]; then + for cpu in $(echo "$target_cpus" | tr ',' ' '); do + if [[ "$cpu" == *"-"* ]]; then + cpu_start="" + cpu_end="" + i="" + cpu_start=$(echo "$cpu" | cut -d "-" -f 1) + cpu_end=$(echo "$cpu" | cut -d "-" -f 2) + for((i=cpu_start;i<=cpu_end;i++)); do + append_log "[$INFO] echo $on_off > \ +${CPU_SYSFS}/cpu${i}/online" + echo "$on_off" > "$CPU_SYSFS"/cpu"$i"/online + done + else + set_target_cpu "$on_off" "$cpu" + fi + done + fi +} + +ifs_scan_result_summary() +{ + local failed_info pass_num skip_num fail_num + + if [[ -e "$IFS_LOG" ]]; then + failed_info=$(grep ^"\[${FAIL}\]" "$IFS_LOG") + fail_num=$(grep -c ^"\[${FAIL}\]" "$IFS_LOG") + skip_num=$(grep -c ^"\[${SKIP}\]" "$IFS_LOG") + pass_num=$(grep -c ^"\[${PASS}\]" "$IFS_LOG") + + if [[ "$fail_num" -ne 0 ]]; then + RESULT=$KSFT_FAIL + echo "[$INFO] IFS test failure summary:" + echo "$failed_info" + elif [[ "$skip_num" -ne 0 ]]; then + RESULT=$KSFT_SKIP + fi + echo "[$INFO] IFS test pass:$pass_num, skip:$skip_num, fail:$fail_num" + else + echo "[$INFO] No file $IFS_LOG for IFS scan summary" + fi +} + +ifs_cleanup() +{ + echo "[$INFO] Restore environment after IFS test" + + # Restore ifs origin image if origin image backup step is needed + [[ "$IFS_IMAGE_NEED_RESTORE" == "$TRUE" ]] && { + mv -f "$IMG_PATH"/"$IMAGE_NAME"_origin "$IMG_PATH"/"$IMAGE_NAME" + } + + # Restore the CPUs to the state before testing + [[ -z "$OFFLINE_CPUS" ]] || online_offline_cpu_list "0" "$OFFLINE_CPUS" + + lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && { + echo "[$INFO] modprobe -r $IFS_NAME" + modprobe -r "$IFS_NAME" + } + + ifs_scan_result_summary + [[ -e "$IFS_LOG" ]] && rm -rf "$IFS_LOG" + + echo "[RESULT] IFS test exit with $RESULT" + exit "$RESULT" +} + +do_cmd() +{ + local cmd=$* + local ret="" + + append_log "[$INFO] $cmd" + eval "$cmd" + ret=$? + if [[ $ret -ne 0 ]]; then + append_log "[$FAIL] $cmd failed. Return code is $ret" + RESULT=$KSFT_XFAIL + ifs_cleanup + fi +} + +test_exit() +{ + local info=$1 + RESULT=$2 + + declare -A EXIT_MAP + EXIT_MAP[$KSFT_PASS]=$PASS + EXIT_MAP[$KSFT_FAIL]=$FAIL + EXIT_MAP[$KSFT_XFAIL]=$XFAIL + EXIT_MAP[$KSFT_SKIP]=$SKIP + + append_log "[${EXIT_MAP[$RESULT]}] $info" + ifs_cleanup +} + +online_all_cpus() +{ + local off_cpus="" + + OFFLINE_CPUS=$(cat "$CPU_OFFLINE_SYSFS") + online_offline_cpu_list "1" "$OFFLINE_CPUS" + + off_cpus=$(cat "$CPU_OFFLINE_SYSFS") + if [[ -z "$off_cpus" ]]; then + append_log "[$INFO] All CPUs are online." + else + append_log "[$XFAIL] There is offline cpu:$off_cpus after online all cpu!" + RESULT=$KSFT_XFAIL + ifs_cleanup + fi +} + +get_cpu_fms() +{ + FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + MODEL=$(grep -m 1 "model" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + STEPPING=$(grep -m 1 "stepping" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + CPU_FMS="${FML}-${MODEL}-${STEPPING}" +} + +check_cpu_ifs_support_interval_time() +{ + get_cpu_fms + + if [[ "$FML" != "$INTEL_FAM6" ]]; then + test_exit "CPU family:$FML does not support IFS" "$KSFT_SKIP" + fi + + # Ucode has time interval requirement for IFS scan on same CPU as follows: + case $MODEL in + "$SAPPHIRERAPIDS_X") + INTERVAL_TIME=180; + ;; + "$EMERALDRAPIDS_X") + INTERVAL_TIME=30; + ;; + *) + # Set default interval time for other platforms + INTERVAL_TIME=1; + append_log "[$INFO] CPU FML:$FML model:0x$MODEL, default: 1s interval time" + ;; + esac +} + +check_ifs_loaded() +{ + local ifs_info="" + + ifs_info=$(lsmod | grep "$IFS_NAME") + if [[ -z "$ifs_info" ]]; then + append_log "[$INFO] modprobe $IFS_NAME" + modprobe "$IFS_NAME" || { + test_exit "Check if CONFIG_INTEL_IFS is set to m or \ +platform doesn't support ifs" "$KSFT_SKIP" + } + ifs_info=$(lsmod | grep "$IFS_NAME") + [[ -n "$ifs_info" ]] || test_exit "No ifs module listed by lsmod" "$KSFT_FAIL" + fi +} + +test_ifs_scan_entry() +{ + local ifs_info="" + + ifs_info=$(lsmod | grep "$IFS_NAME") + + if [[ -z "$ifs_info" ]]; then + ORIGIN_IFS_LOADED="$FALSE" + check_ifs_loaded + else + ORIGIN_IFS_LOADED="$TRUE" + append_log "[$INFO] Module $IFS_NAME is already loaded" + fi + + if [[ -d "$IFS_SCAN_SYSFS_PATH" ]]; then + append_log "[$PASS] IFS sysfs $IFS_SCAN_SYSFS_PATH entry is created\n" + else + test_exit "No sysfs entry in $IFS_SCAN_SYSFS_PATH" "$KSFT_FAIL" + fi +} + +load_image() +{ + local image_id=$1 + local image_info="" + local ret="" + + check_ifs_loaded + if [[ -e "${IMG_PATH}/${IMAGE_NAME}" ]]; then + append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch" + echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null + ret=$? + [[ "$ret" -eq 0 ]] || { + append_log "[$FAIL] Load ifs image $image_id failed with ret:$ret\n" + return "$ret" + } + image_info=$(cat ${IFS_SCAN_SYSFS_PATH}/current_batch) + if [[ "$image_info" == 0x"$image_id" ]]; then + append_log "[$PASS] load IFS current_batch:$image_info" + else + append_log "[$FAIL] current_batch:$image_info is not expected:$image_id" + return "$KSFT_FAIL" + fi + else + append_log "[$FAIL] No IFS image file ${IMG_PATH}/${IMAGE_NAME}"\ + return "$KSFT_FAIL" + fi + return 0 +} + +test_load_origin_ifs_image() +{ + local image_id=$1 + + IMAGE_NAME="${CPU_FMS}-${image_id}.scan" + + load_image "$image_id" || return $? + return 0 +} + +test_load_bad_ifs_image() +{ + local image_id=$1 + + IMAGE_NAME="${CPU_FMS}-${image_id}.scan" + + do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME} ${IMG_PATH}/${IMAGE_NAME}_origin" + + # Set IFS_IMAGE_NEED_RESTORE to true before corrupt the origin ifs image file + IFS_IMAGE_NEED_RESTORE=$TRUE + do_cmd "dd if=/dev/urandom of=${IMG_PATH}/${IMAGE_NAME} bs=1K count=6 2>/dev/null" + + # Use the specified judgment for negative testing + append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch" + echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null + ret=$? + if [[ "$ret" -ne 0 ]]; then + append_log "[$PASS] Load invalid ifs image failed with ret:$ret not 0 as expected" + else + append_log "[$FAIL] Load invalid ifs image ret:$ret unexpectedly" + fi + + do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME}_origin ${IMG_PATH}/${IMAGE_NAME}" + IFS_IMAGE_NEED_RESTORE=$FALSE +} + +test_bad_and_origin_ifs_image() +{ + local image_id=$1 + + append_log "[$INFO] Test loading bad and then loading original IFS image:" + test_load_origin_ifs_image "$image_id" || return $? + test_load_bad_ifs_image "$image_id" + # Load origin image again and make sure it's worked + test_load_origin_ifs_image "$image_id" || return $? + append_log "[$INFO] Loading invalid IFS image and then loading initial image passed.\n" +} + +ifs_test_cpu() +{ + local ifs_mode=$1 + local cpu_num=$2 + local image_id status details ret result result_info + + echo "$cpu_num" > "$IFS_PATH"_"$ifs_mode"/"$RUN_TEST" + ret=$? + + status=$(cat "${IFS_PATH}_${ifs_mode}/${STATUS}") + details=$(cat "${IFS_PATH}_${ifs_mode}/${DETAILS}") + + if [[ "$ret" -eq 0 && "$status" == "$STATUS_PASS" ]]; then + result="$PASS" + else + result="$FAIL" + fi + + cpu_num=$(cat "${CPU_SYSFS}/cpu${cpu_num}/topology/thread_siblings_list") + + # There is no image file for IFS ARRAY BIST scan + if [[ -e "${IFS_PATH}_${ifs_mode}/current_batch" ]]; then + image_id=$(cat "${IFS_PATH}_${ifs_mode}/current_batch") + result_info=$(printf "[%s] ifs_%1d cpu(s):%s, current_batch:0x%02x, \ +ret:%2d, status:%s, details:0x%016x" \ + "$result" "$ifs_mode" "$cpu_num" "$image_id" "$ret" \ + "$status" "$details") + else + result_info=$(printf "[%s] ifs_%1d cpu(s):%s, ret:%2d, status:%s, details:0x%016x" \ + "$result" "$ifs_mode" "$cpu_num" "$ret" "$status" "$details") + fi + + append_log "$result_info" +} + +ifs_test_cpus() +{ + local cpus_type=$1 + local ifs_mode=$2 + local image_id=$3 + local cpu_max_num="" + local cpu_num="" + + case "$cpus_type" in + "$ALL") + cpu_max_num=$(($(nproc) - 1)) + cpus=$(seq 0 $cpu_max_num) + ;; + "$SIBLINGS") + cpus=$(cat ${CPU_SYSFS}/cpu*/topology/thread_siblings_list \ + | sed -e 's/,.*//' \ + | sed -e 's/-.*//' \ + | sort -n \ + | uniq) + ;; + *) + test_exit "Invalid cpus_type:$cpus_type" "$KSFT_XFAIL" + ;; + esac + + for cpu_num in $cpus; do + ifs_test_cpu "$ifs_mode" "$cpu_num" + done + + if [[ -z "$image_id" ]]; then + append_log "[$INFO] ifs_$ifs_mode test $cpus_type cpus completed\n" + else + append_log "[$INFO] ifs_$ifs_mode $cpus_type cpus with $CPU_FMS-$image_id.scan \ +completed\n" + fi +} + +test_ifs_same_cpu_loop() +{ + local ifs_mode=$1 + local cpu_num=$2 + local loop_times=$3 + + append_log "[$INFO] Test ifs mode $ifs_mode on CPU:$cpu_num for $loop_times rounds:" + [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]] && { + load_image "$DEFAULT_IMG_ID" || return $? + } + for (( i=1; i<=loop_times; i++ )); do + append_log "[$INFO] Loop iteration: $i in total of $loop_times" + # Only IFS scan needs the interval time + if [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]]; then + do_cmd "sleep $INTERVAL_TIME" + elif [[ "$ifs_mode" == "$IFS_ARRAY_BIST_SCAN_MODE" ]]; then + true + else + test_exit "Invalid ifs_mode:$ifs_mode" "$KSFT_XFAIL" + fi + + ifs_test_cpu "$ifs_mode" "$cpu_num" + done + append_log "[$INFO] $loop_times rounds of ifs_$ifs_mode test on CPU:$cpu_num completed.\n" +} + +test_ifs_scan_available_imgs() +{ + local image_ids="" + local image_id="" + + append_log "[$INFO] Test ifs scan with available images:" + image_ids=$(find "$IMG_PATH" -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \ + 2>/dev/null \ + | sort \ + | awk -F "-" '{print $NF}' \ + | cut -d "." -f 1) + + for image_id in $image_ids; do + load_image "$image_id" || return $? + + ifs_test_cpus "$SIBLINGS" "$IFS_SCAN_MODE" "$image_id" + # IFS scan requires time interval for the scan on the same CPU + do_cmd "sleep $INTERVAL_TIME" + done +} + +prepare_ifs_test_env() +{ + local max_cpu="" + + check_cpu_ifs_support_interval_time + + online_all_cpus + max_cpu=$(($(nproc) - 1)) + RANDOM_CPU=$(shuf -i 0-$max_cpu -n 1) + + DEFAULT_IMG_ID=$(find $IMG_PATH -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \ + 2>/dev/null \ + | sort \ + | head -n 1 \ + | awk -F "-" '{print $NF}' \ + | cut -d "." -f 1) +} + +test_ifs() +{ + prepare_ifs_test_env + + test_ifs_scan_entry + + if [[ -z "$DEFAULT_IMG_ID" ]]; then + append_log "[$SKIP] No proper ${IMG_PATH}/${CPU_FMS}-*.scan, skip ifs_0 scan" + else + test_bad_and_origin_ifs_image "$DEFAULT_IMG_ID" + test_ifs_scan_available_imgs + test_ifs_same_cpu_loop "$IFS_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES" + fi + + if [[ -d "$IFS_ARRAY_BIST_SYSFS_PATH" ]]; then + ifs_test_cpus "$SIBLINGS" "$IFS_ARRAY_BIST_SCAN_MODE" + test_ifs_same_cpu_loop "$IFS_ARRAY_BIST_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES" + else + append_log "[$SKIP] No $IFS_ARRAY_BIST_SYSFS_PATH, skip IFS ARRAY BIST scan" + fi +} + +trap ifs_cleanup SIGTERM SIGINT +test_ifs +ifs_cleanup diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc index ce5d2e62731f38d7dace318d5f01a897c1636a34..cd89d9ddfecd076668dec9c064136b1761cbbb07 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc @@ -34,7 +34,9 @@ mips*) esac : "Test get argument (1)" -if grep -q eventfs_add_dir available_filter_functions; then +if grep -q eventfs_create_dir available_filter_functions; then + DIR_NAME="eventfs_create_dir" +elif grep -q eventfs_add_dir available_filter_functions; then DIR_NAME="eventfs_add_dir" else DIR_NAME="tracefs_create_dir" diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index 4f72c2875f6b9cf4addcb7875251f650ae446281..07707e81c1524a7fed42ce19eaa5dc3ca37c6ae7 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -31,13 +31,18 @@ mips*) loongarch*) ARG1=%r4 ;; +sw_64) + ARG1=%r16 +;; *) echo "Please implement other architecture here" exit_untested esac : "Test get argument (1)" -if grep -q eventfs_add_dir available_filter_functions; then +if grep -q eventfs_create_dir available_filter_functions; then + DIR_NAME="eventfs_create_dir" +elif grep -q eventfs_add_dir available_filter_functions; then DIR_NAME="eventfs_add_dir" else DIR_NAME="tracefs_create_dir" diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 1df61e13a812b789d11a06cc9a3ddc517a72a359..8de38fb00baef9a5659cbb0e60ae3059a3feb62f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -44,6 +44,10 @@ loongarch*) GOODREG=%r4 BADREG=%r12 ;; +sw_64) + GOODREG=%r16 + BADREG=%ps +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 890a81f4ff6184837079e0206a0be6b5e2eace5f..ae2101c6d6403ce6a49e9df59ea03a5330ba7ca5 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -86,12 +86,13 @@ TEST_F(iommufd, cmd_fail) TEST_F(iommufd, cmd_length) { -#define TEST_LENGTH(_struct, _ioctl) \ +#define TEST_LENGTH(_struct, _ioctl, _last) \ { \ + size_t min_size = offsetofend(struct _struct, _last); \ struct { \ struct _struct cmd; \ uint8_t extra; \ - } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \ + } cmd = { .cmd = { .size = min_size - 1 }, \ .extra = UINT8_MAX }; \ int old_errno; \ int rc; \ @@ -112,16 +113,19 @@ TEST_F(iommufd, cmd_length) } \ } - TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); - TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO); - TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); - TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); - TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); - TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP); - TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY); - TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP); - TEST_LENGTH(iommu_option, IOMMU_OPTION); - TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS); + TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id); + TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved); + TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved); + TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id); + TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES, + out_iova_alignment); + TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS, + allowed_iovas); + TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova); + TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova); + TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length); + TEST_LENGTH(iommu_option, IOMMU_OPTION, val64); + TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved); #undef TEST_LENGTH } @@ -1404,16 +1408,242 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) int i; for (i = 0; i != variant->mock_domains; i++) { + uint32_t hwpt_id[2]; uint32_t stddev_id; - uint32_t hwpt_id; - test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id); - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_hwpt_alloc(EOPNOTSUPP, + self->idev_ids[i], self->ioas_id, + ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + 0, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]); + + /* Do a hw_pagetable rotation test */ + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0])); + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1])); + test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id); + test_ioctl_destroy(hwpt_id[1]); + + test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL); test_ioctl_destroy(stddev_id); - test_ioctl_destroy(hwpt_id); + test_ioctl_destroy(hwpt_id[0]); } } +FIXTURE(iommufd_dirty_tracking) +{ + int fd; + uint32_t ioas_id; + uint32_t hwpt_id; + uint32_t stdev_id; + uint32_t idev_id; + unsigned long page_size; + unsigned long bitmap_size; + void *bitmap; + void *buffer; +}; + +FIXTURE_VARIANT(iommufd_dirty_tracking) +{ + unsigned long buffer_size; +}; + +FIXTURE_SETUP(iommufd_dirty_tracking) +{ + void *vrc; + int rc; + + self->fd = open("/dev/iommu", O_RDWR); + ASSERT_NE(-1, self->fd); + + rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size); + if (rc || !self->buffer) { + SKIP(return, "Skipping buffer_size=%lu due to errno=%d", + variant->buffer_size, rc); + } + + assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); + vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + assert(vrc == self->buffer); + + self->page_size = MOCK_PAGE_SIZE; + self->bitmap_size = + variant->buffer_size / self->page_size / BITS_PER_BYTE; + + /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */ + rc = posix_memalign(&self->bitmap, PAGE_SIZE, + self->bitmap_size + MOCK_PAGE_SIZE); + assert(!rc); + assert(self->bitmap); + assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); + + test_ioctl_ioas_alloc(&self->ioas_id); + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id, + &self->idev_id); +} + +FIXTURE_TEARDOWN(iommufd_dirty_tracking) +{ + munmap(self->buffer, variant->buffer_size); + munmap(self->bitmap, self->bitmap_size); + teardown_iommufd(self->fd, _metadata); +} + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +{ + /* one u32 index bitmap */ + .buffer_size = 128UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +{ + /* one u64 index bitmap */ + .buffer_size = 256UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +{ + /* two u64 index and trailing end bitmap */ + .buffer_size = 640UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +{ + /* 4K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +{ + /* 8K bitmap (256M IOVA range) */ + .buffer_size = 256UL * 1024UL * 1024UL, +}; + +TEST_F(iommufd_dirty_tracking, enforce_dirty) +{ + uint32_t ioas_id, stddev_id, idev_id; + uint32_t hwpt_id, _hwpt_id; + uint32_t dev_flags; + + /* Regular case */ + dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY; + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id, + NULL); + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); + + /* IOMMU device does not support dirty tracking */ + test_ioctl_ioas_alloc(&ioas_id); + test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id, + &idev_id); + test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_ioctl_destroy(stddev_id); +} + +TEST_F(iommufd_dirty_tracking, set_dirty_tracking) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_set_dirty_tracking(hwpt_id, true); + test_cmd_set_dirty_tracking(hwpt_id, false); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, device_dirty_capability) +{ + uint32_t caps = 0; + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_get_hw_capabilities(self->idev_id, caps, + IOMMU_HW_CAP_DIRTY_TRACKING); + ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING, + caps & IOMMU_HW_CAP_DIRTY_TRACKING); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, 0, _metadata); + + /* PAGE_SIZE unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, 0, _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + /* Unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index a220ca2a689d160c95129d0bc281ed63dfb38c73..1fcd69cb0e416718c85657f5278e75824a0d7da4 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -612,10 +612,10 @@ TEST_FAIL_NTH(basic_fail_nth, device) &idev_id)) return -1; - if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info))) + if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) return -1; - if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id)) + if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id)) return -1; if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL)) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e0753d03ecaa8576005120cf86dd47a529df1f94..70d558e0f0c747ea887d33805c2e7ff78fc6b8fb 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -16,6 +16,25 @@ /* Hack to make assertions more readable */ #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD +/* Imported from include/asm-generic/bitops/generic-non-atomic.h */ +#define BITS_PER_BYTE 8 +#define BITS_PER_LONG __BITS_PER_LONG +#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) + +static inline void set_bit(unsigned int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline bool test_bit(unsigned int nr, unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1))); +} + static void *buffer; static unsigned long BUFFER_SIZE; @@ -74,6 +93,38 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ stdev_id, hwpt_id, NULL)) +static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id, + __u32 stdev_flags, __u32 *stdev_id, + __u32 *hwpt_id, __u32 *idev_id) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + .id = ioas_id, + .mock_domain_flags = { .dev_flags = stdev_flags }, + }; + int ret; + + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); + if (ret) + return ret; + if (stdev_id) + *stdev_id = cmd.mock_domain_flags.out_stdev_id; + assert(cmd.id != 0); + if (hwpt_id) + *hwpt_id = cmd.mock_domain_flags.out_hwpt_id; + if (idev_id) + *idev_id = cmd.mock_domain_flags.out_idev_id; + return 0; +} +#define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \ + ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, idev_id)) +#define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, NULL)) + static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, __u32 *hwpt_id) { @@ -103,10 +154,11 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, pt_id, NULL)) static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, - __u32 *hwpt_id) + __u32 flags, __u32 *hwpt_id) { struct iommu_hwpt_alloc cmd = { .size = sizeof(cmd), + .flags = flags, .dev_id = device_id, .pt_id = pt_id, }; @@ -120,8 +172,12 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, return 0; } -#define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id)) +#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) +#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \ + EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, unsigned int ioas_id) @@ -142,6 +198,125 @@ static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, #define test_cmd_access_replace_ioas(access_id, ioas_id) \ ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id)) +static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) +{ + struct iommu_hwpt_set_dirty_tracking cmd = { + .size = sizeof(cmd), + .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0, + .hwpt_id = hwpt_id, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd); + if (ret) + return -errno; + return 0; +} +#define test_cmd_set_dirty_tracking(hwpt_id, enabled) \ + ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled)) + +static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u32 flags) +{ + struct iommu_hwpt_get_dirty_bitmap cmd = { + .size = sizeof(cmd), + .hwpt_id = hwpt_id, + .flags = flags, + .iova = iova, + .length = length, + .page_size = page_size, + .data = (uintptr_t)bitmap, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd); + if (ret) + return ret; + return 0; +} + +#define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ + bitmap, flags) \ + ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ + page_size, bitmap, flags)) + +static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u64 *dirty) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_DIRTY, + .id = hwpt_id, + .dirty = { + .iova = iova, + .length = length, + .page_size = page_size, + .uptr = (uintptr_t)bitmap, + } + }; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd); + if (ret) + return -ret; + if (dirty) + *dirty = cmd.dirty.out_nr_dirty; + return 0; +} + +#define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \ + bitmap, nr) \ + ASSERT_EQ(0, \ + _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ + page_size, bitmap, nr)) + +static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, __u64 *bitmap, + __u64 bitmap_size, __u32 flags, + struct __test_metadata *_metadata) +{ + unsigned long i, nbits = bitmap_size * BITS_PER_BYTE; + unsigned long nr = nbits / 2; + __u64 out_dirty = 0; + + /* Mark all even bits as dirty in the mock domain */ + for (i = 0; i < nbits; i += 2) + set_bit(i, (unsigned long *)bitmap); + + test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, + bitmap, &out_dirty); + ASSERT_EQ(nr, out_dirty); + + /* Expect all even bits as dirty in the user bitmap */ + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */ + for (i = 0; i < nbits; i++) { + ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); + } + + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + + /* It as read already -- expect all zeroes */ + for (i = 0; i < nbits; i++) { + ASSERT_EQ(!(i % 2) && (flags & + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), + test_bit(i, (unsigned long *)bitmap)); + } + + return 0; +} +#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \ + bitmap_size, flags, _metadata) \ + ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ + page_size, bitmap, bitmap_size, \ + flags, _metadata)) + static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) { @@ -266,6 +441,17 @@ static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer, IOMMU_IOAS_MAP_READABLE)); \ }) +#define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \ + ({ \ + __u64 __iova = iova; \ + ASSERT_EQ(0, \ + _test_ioctl_ioas_map( \ + self->fd, ioas_id, buffer, length, &__iova, \ + IOMMU_IOAS_MAP_FIXED_IOVA | \ + IOMMU_IOAS_MAP_WRITEABLE | \ + IOMMU_IOAS_MAP_READABLE)); \ + }) + #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \ ({ \ __u64 __iova = iova; \ @@ -354,8 +540,8 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) #endif /* @data can be NULL */ -static int _test_cmd_get_hw_info(int fd, __u32 device_id, - void *data, size_t data_len) +static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, + size_t data_len, uint32_t *capabilities) { struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data; struct iommu_hw_info cmd = { @@ -363,6 +549,7 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, .dev_id = device_id, .data_len = data_len, .data_uptr = (uint64_t)data, + .out_capabilities = 0, }; int ret; @@ -399,14 +586,19 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, assert(!info->flags); } + if (capabilities) + *capabilities = cmd.out_capabilities; + return 0; } -#define test_cmd_get_hw_info(device_id, data, data_len) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_info(device_id, data, data_len) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) + +#define test_err_get_hw_info(_errno, device_id, data, data_len) \ + EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) -#define test_err_get_hw_info(_errno, device_id, data, data_len) \ - EXPECT_ERRNO(_errno, \ - _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_capabilities(device_id, caps, mask) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps)) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 25bc61dac5fbe69bcc62195dde70ef53f56868b0..490a0a7efb3c4b7c7d389708a0d82d2127d44558 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -272,6 +272,7 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) +#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23) #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 7a8af1821f5dae2993995c60e0ef08faa6431919..bc1ef536b640c6a4528c98afefbc9ef0b5ee9284 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -685,9 +685,6 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, hash_del(®ion->slot_node); } - region->region.memory_size = 0; - vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); - sparsebit_free(®ion->unused_phy_pages); ret = munmap(region->mmap_start, region->mmap_size); TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); @@ -1178,7 +1175,12 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) */ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) { - __vm_mem_region_delete(vm, memslot2region(vm, slot), true); + struct userspace_mem_region *region = memslot2region(vm, slot); + + region->region.memory_size = 0; + vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + + __vm_mem_region_delete(vm, region, true); } /* Returns the size of a vCPU's kvm_run structure. */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d8288374078e4b3ce888bed569c7e59192d43e7c..5a035bc2b9c1c0829ee23696b6d22b194565ee2a 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1248,9 +1248,20 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) { const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ unsigned long ht_gfn, max_gfn, max_pfn; - uint8_t maxphyaddr; + uint8_t maxphyaddr, guest_maxphyaddr; - max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + /* + * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR + * enumerates the max _mappable_ GPA, which can be less than the raw + * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU + * doesn't support 5-level TDP. + */ + guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR); + guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; + TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, + "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR"); + + max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; /* Avoid reserved HyperTransport region on AMD processors. */ if (!host_cpu_is_amd) diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index c9fcbc6e5121e64a1170d086643d199b944b4385..f64ec79d772ee279947c0000ac6de7663a0caa64 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -117,8 +117,8 @@ TEST_FILES += va_high_addr_switch.sh include ../lib.mk -$(TEST_GEN_PROGS): vm_util.c -$(TEST_GEN_FILES): vm_util.c +$(TEST_GEN_PROGS): vm_util.c thp_settings.c +$(TEST_GEN_FILES): vm_util.c thp_settings.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 6f2f839904416c4d1dce542b2dd04250e2bddaa2..363bf5f801be5f579dc6e32683401d9009ae002d 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -29,15 +29,49 @@ #include "../../../../mm/gup_test.h" #include "../kselftest.h" #include "vm_util.h" +#include "thp_settings.h" static size_t pagesize; static int pagemap_fd; -static size_t thpsize; +static size_t pmdsize; +static int nr_thpsizes; +static size_t thpsizes[20]; static int nr_hugetlbsizes; static size_t hugetlbsizes[10]; static int gup_fd; static bool has_huge_zeropage; +static int sz2ord(size_t size) +{ + return __builtin_ctzll(size / pagesize); +} + +static int detect_thp_sizes(size_t sizes[], int max) +{ + int count = 0; + unsigned long orders; + size_t kb; + int i; + + /* thp not supported at all. */ + if (!pmdsize) + return 0; + + orders = 1UL << sz2ord(pmdsize); + orders |= thp_supported_orders(); + + for (i = 0; orders && count < max; i++) { + if (!(orders & (1UL << i))) + continue; + orders &= ~(1UL << i); + kb = (pagesize >> 10) << i; + sizes[count++] = kb * 1024; + ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb); + } + + return count; +} + static void detect_huge_zeropage(void) { int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page", @@ -734,7 +768,7 @@ enum thp_run { THP_RUN_PARTIAL_SHARED, }; -static void do_run_with_thp(test_fn fn, enum thp_run thp_run) +static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) { char *mem, *mmap_mem, *tmp, *mremap_mem = MAP_FAILED; size_t size, mmap_size, mremap_size; @@ -759,11 +793,11 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) } /* - * Try to populate a THP. Touch the first sub-page and test if we get - * another sub-page populated automatically. + * Try to populate a THP. Touch the first sub-page and test if + * we get the last sub-page populated automatically. */ mem[0] = 0; - if (!pagemap_is_populated(pagemap_fd, mem + pagesize)) { + if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) { ksft_test_result_skip("Did not get a THP populated\n"); goto munmap; } @@ -773,12 +807,14 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) switch (thp_run) { case THP_RUN_PMD: case THP_RUN_PMD_SWAPOUT: + assert(thpsize == pmdsize); break; case THP_RUN_PTE: case THP_RUN_PTE_SWAPOUT: /* * Trigger PTE-mapping the THP by temporarily mapping a single - * subpage R/O. + * subpage R/O. This is a noop if the THP is not pmdsize (and + * therefore already PTE-mapped). */ ret = mprotect(mem + pagesize, pagesize, PROT_READ); if (ret) { @@ -875,52 +911,60 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) munmap(mremap_mem, mremap_size); } -static void run_with_thp(test_fn fn, const char *desc) +static void run_with_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with THP\n", desc); - do_run_with_thp(fn, THP_RUN_PMD); + ksft_print_msg("[RUN] %s ... with THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PMD, size); } -static void run_with_thp_swap(test_fn fn, const char *desc) +static void run_with_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with swapped-out THP\n", desc); - do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT); + ksft_print_msg("[RUN] %s ... with swapped-out THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT, size); } -static void run_with_pte_mapped_thp(test_fn fn, const char *desc) +static void run_with_pte_mapped_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with PTE-mapped THP\n", desc); - do_run_with_thp(fn, THP_RUN_PTE); + ksft_print_msg("[RUN] %s ... with PTE-mapped THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PTE, size); } -static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc) +static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP\n", desc); - do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT); + ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT, size); } -static void run_with_single_pte_of_thp(test_fn fn, const char *desc) +static void run_with_single_pte_of_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with single PTE of THP\n", desc); - do_run_with_thp(fn, THP_RUN_SINGLE_PTE); + ksft_print_msg("[RUN] %s ... with single PTE of THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_SINGLE_PTE, size); } -static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc) +static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP\n", desc); - do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT); + ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT, size); } -static void run_with_partial_mremap_thp(test_fn fn, const char *desc) +static void run_with_partial_mremap_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP\n", desc); - do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP); + ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP, size); } -static void run_with_partial_shared_thp(test_fn fn, const char *desc) +static void run_with_partial_shared_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with partially shared THP\n", desc); - do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED); + ksft_print_msg("[RUN] %s ... with partially shared THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED, size); } static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize) @@ -1091,15 +1135,27 @@ static void run_anon_test_case(struct test_case const *test_case) run_with_base_page(test_case->fn, test_case->desc); run_with_base_page_swap(test_case->fn, test_case->desc); - if (thpsize) { - run_with_thp(test_case->fn, test_case->desc); - run_with_thp_swap(test_case->fn, test_case->desc); - run_with_pte_mapped_thp(test_case->fn, test_case->desc); - run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc); - run_with_single_pte_of_thp(test_case->fn, test_case->desc); - run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc); - run_with_partial_mremap_thp(test_case->fn, test_case->desc); - run_with_partial_shared_thp(test_case->fn, test_case->desc); + for (i = 0; i < nr_thpsizes; i++) { + size_t size = thpsizes[i]; + struct thp_settings settings = *thp_current_settings(); + + settings.hugepages[sz2ord(pmdsize)].enabled = THP_NEVER; + settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS; + thp_push_settings(&settings); + + if (size == pmdsize) { + run_with_thp(test_case->fn, test_case->desc, size); + run_with_thp_swap(test_case->fn, test_case->desc, size); + } + + run_with_pte_mapped_thp(test_case->fn, test_case->desc, size); + run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc, size); + run_with_single_pte_of_thp(test_case->fn, test_case->desc, size); + run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc, size); + run_with_partial_mremap_thp(test_case->fn, test_case->desc, size); + run_with_partial_shared_thp(test_case->fn, test_case->desc, size); + + thp_pop_settings(); } for (i = 0; i < nr_hugetlbsizes; i++) run_with_hugetlb(test_case->fn, test_case->desc, @@ -1120,8 +1176,9 @@ static int tests_per_anon_test_case(void) { int tests = 2 + nr_hugetlbsizes; - if (thpsize) - tests += 8; + tests += 6 * nr_thpsizes; + if (pmdsize) + tests += 2; return tests; } @@ -1329,7 +1386,7 @@ static void run_anon_thp_test_cases(void) { int i; - if (!thpsize) + if (!pmdsize) return; ksft_print_msg("[INFO] Anonymous THP tests\n"); @@ -1338,13 +1395,13 @@ static void run_anon_thp_test_cases(void) struct test_case const *test_case = &anon_thp_test_cases[i]; ksft_print_msg("[RUN] %s\n", test_case->desc); - do_run_with_thp(test_case->fn, THP_RUN_PMD); + do_run_with_thp(test_case->fn, THP_RUN_PMD, pmdsize); } } static int tests_per_anon_thp_test_case(void) { - return thpsize ? 1 : 0; + return pmdsize ? 1 : 0; } typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size); @@ -1419,7 +1476,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) } /* For alignment purposes, we need twice the thp size. */ - mmap_size = 2 * thpsize; + mmap_size = 2 * pmdsize; mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (mmap_mem == MAP_FAILED) { @@ -1434,11 +1491,11 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) } /* We need a THP-aligned memory area. */ - mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1)); - smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1)); + mem = (char *)(((uintptr_t)mmap_mem + pmdsize) & ~(pmdsize - 1)); + smem = (char *)(((uintptr_t)mmap_smem + pmdsize) & ~(pmdsize - 1)); - ret = madvise(mem, thpsize, MADV_HUGEPAGE); - ret |= madvise(smem, thpsize, MADV_HUGEPAGE); + ret = madvise(mem, pmdsize, MADV_HUGEPAGE); + ret |= madvise(smem, pmdsize, MADV_HUGEPAGE); if (ret) { ksft_test_result_fail("MADV_HUGEPAGE failed\n"); goto munmap; @@ -1457,7 +1514,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) goto munmap; } - fn(mem, smem, thpsize); + fn(mem, smem, pmdsize); munmap: munmap(mmap_mem, mmap_size); if (mmap_smem != MAP_FAILED) @@ -1650,7 +1707,7 @@ static void run_non_anon_test_case(struct non_anon_test_case const *test_case) run_with_zeropage(test_case->fn, test_case->desc); run_with_memfd(test_case->fn, test_case->desc); run_with_tmpfile(test_case->fn, test_case->desc); - if (thpsize) + if (pmdsize) run_with_huge_zeropage(test_case->fn, test_case->desc); for (i = 0; i < nr_hugetlbsizes; i++) run_with_memfd_hugetlb(test_case->fn, test_case->desc, @@ -1671,7 +1728,7 @@ static int tests_per_non_anon_test_case(void) { int tests = 3 + nr_hugetlbsizes; - if (thpsize) + if (pmdsize) tests += 1; return tests; } @@ -1679,14 +1736,23 @@ static int tests_per_non_anon_test_case(void) int main(int argc, char **argv) { int err; + struct thp_settings default_settings; ksft_print_header(); pagesize = getpagesize(); - thpsize = read_pmd_pagesize(); - if (thpsize) - ksft_print_msg("[INFO] detected THP size: %zu KiB\n", - thpsize / 1024); + pmdsize = read_pmd_pagesize(); + if (pmdsize) { + /* Only if THP is supported. */ + thp_read_settings(&default_settings); + default_settings.hugepages[sz2ord(pmdsize)].enabled = THP_INHERIT; + thp_save_settings(); + thp_push_settings(&default_settings); + + ksft_print_msg("[INFO] detected PMD size: %zu KiB\n", + pmdsize / 1024); + nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes)); + } nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, ARRAY_SIZE(hugetlbsizes)); detect_huge_zeropage(); @@ -1704,6 +1770,11 @@ int main(int argc, char **argv) run_anon_thp_test_cases(); run_non_anon_test_cases(); + if (pmdsize) { + /* Only if THP is supported. */ + thp_restore_settings(); + } + err = ksft_get_fail_cnt(); if (err) ksft_exit_fail_msg("%d out of %d tests failed\n", diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index 030667cb55337735355bb0bb833f4a5a56cfbdee..56d4480e8d3cbfc4c20af9c48026e2c34f0fa39e 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -22,13 +22,14 @@ #include "linux/magic.h" #include "vm_util.h" +#include "thp_settings.h" #define BASE_ADDR ((void *)(1UL << 30)) static unsigned long hpage_pmd_size; static unsigned long page_size; static int hpage_pmd_nr; +static int anon_order; -#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/" #define PID_SMAPS "/proc/self/smaps" #define TEST_FILE "collapse_test_file" @@ -71,78 +72,7 @@ struct file_info { }; static struct file_info finfo; - -enum thp_enabled { - THP_ALWAYS, - THP_MADVISE, - THP_NEVER, -}; - -static const char *thp_enabled_strings[] = { - "always", - "madvise", - "never", - NULL -}; - -enum thp_defrag { - THP_DEFRAG_ALWAYS, - THP_DEFRAG_DEFER, - THP_DEFRAG_DEFER_MADVISE, - THP_DEFRAG_MADVISE, - THP_DEFRAG_NEVER, -}; - -static const char *thp_defrag_strings[] = { - "always", - "defer", - "defer+madvise", - "madvise", - "never", - NULL -}; - -enum shmem_enabled { - SHMEM_ALWAYS, - SHMEM_WITHIN_SIZE, - SHMEM_ADVISE, - SHMEM_NEVER, - SHMEM_DENY, - SHMEM_FORCE, -}; - -static const char *shmem_enabled_strings[] = { - "always", - "within_size", - "advise", - "never", - "deny", - "force", - NULL -}; - -struct khugepaged_settings { - bool defrag; - unsigned int alloc_sleep_millisecs; - unsigned int scan_sleep_millisecs; - unsigned int max_ptes_none; - unsigned int max_ptes_swap; - unsigned int max_ptes_shared; - unsigned long pages_to_scan; -}; - -struct settings { - enum thp_enabled thp_enabled; - enum thp_defrag thp_defrag; - enum shmem_enabled shmem_enabled; - bool use_zero_page; - struct khugepaged_settings khugepaged; - unsigned long read_ahead_kb; -}; - -static struct settings saved_settings; static bool skip_settings_restore; - static int exit_status; static void success(const char *msg) @@ -161,260 +91,34 @@ static void skip(const char *msg) printf(" \e[33m%s\e[0m\n", msg); } -static int read_file(const char *path, char *buf, size_t buflen) -{ - int fd; - ssize_t numread; - - fd = open(path, O_RDONLY); - if (fd == -1) - return 0; - - numread = read(fd, buf, buflen - 1); - if (numread < 1) { - close(fd); - return 0; - } - - buf[numread] = '\0'; - close(fd); - - return (unsigned int) numread; -} - -static int write_file(const char *path, const char *buf, size_t buflen) -{ - int fd; - ssize_t numwritten; - - fd = open(path, O_WRONLY); - if (fd == -1) { - printf("open(%s)\n", path); - exit(EXIT_FAILURE); - return 0; - } - - numwritten = write(fd, buf, buflen - 1); - close(fd); - if (numwritten < 1) { - printf("write(%s)\n", buf); - exit(EXIT_FAILURE); - return 0; - } - - return (unsigned int) numwritten; -} - -static int read_string(const char *name, const char *strings[]) +static void restore_settings_atexit(void) { - char path[PATH_MAX]; - char buf[256]; - char *c; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - - if (!read_file(path, buf, sizeof(buf))) { - perror(path); - exit(EXIT_FAILURE); - } - - c = strchr(buf, '['); - if (!c) { - printf("%s: Parse failure\n", __func__); - exit(EXIT_FAILURE); - } - - c++; - memmove(buf, c, sizeof(buf) - (c - buf)); - - c = strchr(buf, ']'); - if (!c) { - printf("%s: Parse failure\n", __func__); - exit(EXIT_FAILURE); - } - *c = '\0'; - - ret = 0; - while (strings[ret]) { - if (!strcmp(strings[ret], buf)) - return ret; - ret++; - } - - printf("Failed to parse %s\n", name); - exit(EXIT_FAILURE); -} - -static void write_string(const char *name, const char *val) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - - if (!write_file(path, val, strlen(val) + 1)) { - perror(path); - exit(EXIT_FAILURE); - } -} - -static const unsigned long _read_num(const char *path) -{ - char buf[21]; - - if (read_file(path, buf, sizeof(buf)) < 0) { - perror("read_file(read_num)"); - exit(EXIT_FAILURE); - } - - return strtoul(buf, NULL, 10); -} - -static const unsigned long read_num(const char *name) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - return _read_num(path); -} - -static void _write_num(const char *path, unsigned long num) -{ - char buf[21]; - - sprintf(buf, "%ld", num); - if (!write_file(path, buf, strlen(buf) + 1)) { - perror(path); - exit(EXIT_FAILURE); - } -} - -static void write_num(const char *name, unsigned long num) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - _write_num(path, num); -} - -static void write_settings(struct settings *settings) -{ - struct khugepaged_settings *khugepaged = &settings->khugepaged; - - write_string("enabled", thp_enabled_strings[settings->thp_enabled]); - write_string("defrag", thp_defrag_strings[settings->thp_defrag]); - write_string("shmem_enabled", - shmem_enabled_strings[settings->shmem_enabled]); - write_num("use_zero_page", settings->use_zero_page); - - write_num("khugepaged/defrag", khugepaged->defrag); - write_num("khugepaged/alloc_sleep_millisecs", - khugepaged->alloc_sleep_millisecs); - write_num("khugepaged/scan_sleep_millisecs", - khugepaged->scan_sleep_millisecs); - write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none); - write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap); - write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared); - write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan); - - if (file_ops && finfo.type == VMA_FILE) - _write_num(finfo.dev_queue_read_ahead_path, - settings->read_ahead_kb); -} - -#define MAX_SETTINGS_DEPTH 4 -static struct settings settings_stack[MAX_SETTINGS_DEPTH]; -static int settings_index; - -static struct settings *current_settings(void) -{ - if (!settings_index) { - printf("Fail: No settings set"); - exit(EXIT_FAILURE); - } - return settings_stack + settings_index - 1; -} + if (skip_settings_restore) + return; -static void push_settings(struct settings *settings) -{ - if (settings_index >= MAX_SETTINGS_DEPTH) { - printf("Fail: Settings stack exceeded"); - exit(EXIT_FAILURE); - } - settings_stack[settings_index++] = *settings; - write_settings(current_settings()); -} + printf("Restore THP and khugepaged settings..."); + thp_restore_settings(); + success("OK"); -static void pop_settings(void) -{ - if (settings_index <= 0) { - printf("Fail: Settings stack empty"); - exit(EXIT_FAILURE); - } - --settings_index; - write_settings(current_settings()); + skip_settings_restore = true; } static void restore_settings(int sig) { - if (skip_settings_restore) - goto out; - - printf("Restore THP and khugepaged settings..."); - write_settings(&saved_settings); - success("OK"); - if (sig) - exit(EXIT_FAILURE); -out: - exit(exit_status); + /* exit() will invoke the restore_settings_atexit handler. */ + exit(sig ? EXIT_FAILURE : exit_status); } static void save_settings(void) { printf("Save THP and khugepaged settings..."); - saved_settings = (struct settings) { - .thp_enabled = read_string("enabled", thp_enabled_strings), - .thp_defrag = read_string("defrag", thp_defrag_strings), - .shmem_enabled = - read_string("shmem_enabled", shmem_enabled_strings), - .use_zero_page = read_num("use_zero_page"), - }; - saved_settings.khugepaged = (struct khugepaged_settings) { - .defrag = read_num("khugepaged/defrag"), - .alloc_sleep_millisecs = - read_num("khugepaged/alloc_sleep_millisecs"), - .scan_sleep_millisecs = - read_num("khugepaged/scan_sleep_millisecs"), - .max_ptes_none = read_num("khugepaged/max_ptes_none"), - .max_ptes_swap = read_num("khugepaged/max_ptes_swap"), - .max_ptes_shared = read_num("khugepaged/max_ptes_shared"), - .pages_to_scan = read_num("khugepaged/pages_to_scan"), - }; if (file_ops && finfo.type == VMA_FILE) - saved_settings.read_ahead_kb = - _read_num(finfo.dev_queue_read_ahead_path); + thp_set_read_ahead_path(finfo.dev_queue_read_ahead_path); + thp_save_settings(); success("OK"); + atexit(restore_settings_atexit); signal(SIGTERM, restore_settings); signal(SIGINT, restore_settings); signal(SIGHUP, restore_settings); @@ -793,7 +497,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, struct mem_ops *ops, bool expect) { int ret; - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); printf("%s...", msg); @@ -803,7 +507,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, */ settings.thp_enabled = THP_NEVER; settings.shmem_enabled = SHMEM_NEVER; - push_settings(&settings); + thp_push_settings(&settings); /* Clear VM_NOHUGEPAGE */ madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); @@ -815,7 +519,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, else success("OK"); - pop_settings(); + thp_pop_settings(); } static void madvise_collapse(const char *msg, char *p, int nr_hpages, @@ -845,13 +549,13 @@ static bool wait_for_scan(const char *msg, char *p, int nr_hpages, madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); /* Wait until the second full_scan completed */ - full_scans = read_num("khugepaged/full_scans") + 2; + full_scans = thp_read_num("khugepaged/full_scans") + 2; printf("%s...", msg); while (timeout--) { if (ops->check_huge(p, nr_hpages)) break; - if (read_num("khugepaged/full_scans") >= full_scans) + if (thp_read_num("khugepaged/full_scans") >= full_scans) break; printf("."); usleep(TICK); @@ -904,13 +608,18 @@ static bool is_tmpfs(struct mem_ops *ops) return ops == &__file_ops && finfo.type == VMA_SHMEM; } +static bool is_anon(struct mem_ops *ops) +{ + return ops == &__anon_ops; +} + static void alloc_at_fault(void) { - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); char *p; settings.thp_enabled = THP_ALWAYS; - push_settings(&settings); + thp_push_settings(&settings); p = alloc_mapping(1); *p = 1; @@ -920,7 +629,7 @@ static void alloc_at_fault(void) else fail("Fail"); - pop_settings(); + thp_pop_settings(); madvise(p, page_size, MADV_DONTNEED); printf("Split huge PMD on MADV_DONTNEED..."); @@ -968,11 +677,12 @@ static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops) { int max_ptes_none = hpage_pmd_nr / 2; - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); void *p; + int fault_nr_pages = is_anon(ops) ? 1 << anon_order : 1; settings.khugepaged.max_ptes_none = max_ptes_none; - push_settings(&settings); + thp_push_settings(&settings); p = ops->setup_area(1); @@ -983,10 +693,10 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o goto skip; } - ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); + ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size); c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1, ops, !c->enforce_pte_scan_limits); - validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); + validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size); if (c->enforce_pte_scan_limits) { ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); @@ -997,7 +707,7 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o } skip: ops->cleanup_area(p, hpage_pmd_size); - pop_settings(); + thp_pop_settings(); } static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops) @@ -1028,7 +738,7 @@ static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_op static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops) { - int max_ptes_swap = read_num("khugepaged/max_ptes_swap"); + int max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap"); void *p; p = ops->setup_area(1); @@ -1245,11 +955,11 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o fail("Fail"); ops->fault(p, 0, page_size); - write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); + thp_write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); c->collapse("Collapse PTE table full of compound pages in child", p, 1, ops, true); - write_num("khugepaged/max_ptes_shared", - current_settings()->khugepaged.max_ptes_shared); + thp_write_num("khugepaged/max_ptes_shared", + thp_current_settings()->khugepaged.max_ptes_shared); validate_memory(p, 0, hpage_pmd_size); ops->cleanup_area(p, hpage_pmd_size); @@ -1270,7 +980,7 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops) { - int max_ptes_shared = read_num("khugepaged/max_ptes_shared"); + int max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared"); int wstatus; void *p; @@ -1373,7 +1083,7 @@ static void madvise_retracted_page_tables(struct collapse_context *c, static void usage(void) { - fprintf(stderr, "\nUsage: ./khugepaged [dir]\n\n"); + fprintf(stderr, "\nUsage: ./khugepaged [OPTIONS] [dir]\n\n"); fprintf(stderr, "\t\t: :\n"); fprintf(stderr, "\t\t: [all|khugepaged|madvise]\n"); fprintf(stderr, "\t\t: [all|anon|file|shmem]\n"); @@ -1382,15 +1092,34 @@ static void usage(void) fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n"); fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n"); fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n"); + fprintf(stderr, "\n\tSupported Options:\n"); + fprintf(stderr, "\t\t-h: This help message.\n"); + fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n"); + fprintf(stderr, "\t\t Defaults to 0. Use this size for anon or shmem allocations.\n"); exit(1); } -static void parse_test_type(int argc, const char **argv) +static void parse_test_type(int argc, char **argv) { + int opt; char *buf; const char *token; - if (argc == 1) { + while ((opt = getopt(argc, argv, "s:h")) != -1) { + switch (opt) { + case 's': + anon_order = atoi(optarg); + break; + case 'h': + default: + usage(); + } + } + + argv += optind; + argc -= optind; + + if (argc == 0) { /* Backwards compatibility */ khugepaged_context = &__khugepaged_context; madvise_context = &__madvise_context; @@ -1398,7 +1127,7 @@ static void parse_test_type(int argc, const char **argv) return; } - buf = strdup(argv[1]); + buf = strdup(argv[0]); token = strsep(&buf, ":"); if (!strcmp(token, "all")) { @@ -1432,13 +1161,16 @@ static void parse_test_type(int argc, const char **argv) if (!file_ops) return; - if (argc != 3) + if (argc != 2) usage(); + + get_finfo(argv[1]); } -int main(int argc, const char **argv) +int main(int argc, char **argv) { - struct settings default_settings = { + int hpage_pmd_order; + struct thp_settings default_settings = { .thp_enabled = THP_MADVISE, .thp_defrag = THP_DEFRAG_ALWAYS, .shmem_enabled = SHMEM_ADVISE, @@ -1460,9 +1192,6 @@ int main(int argc, const char **argv) parse_test_type(argc, argv); - if (file_ops) - get_finfo(argv[2]); - setbuf(stdout, NULL); page_size = getpagesize(); @@ -1472,14 +1201,19 @@ int main(int argc, const char **argv) exit(EXIT_FAILURE); } hpage_pmd_nr = hpage_pmd_size / page_size; + hpage_pmd_order = __builtin_ctz(hpage_pmd_nr); default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1; default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8; default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2; default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; + default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT; + default_settings.hugepages[anon_order].enabled = THP_ALWAYS; + default_settings.shmem_hugepages[hpage_pmd_order].enabled = SHMEM_INHERIT; + default_settings.shmem_hugepages[anon_order].enabled = SHMEM_ALWAYS; save_settings(); - push_settings(&default_settings); + thp_push_settings(&default_settings); alloc_at_fault(); diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index 9b298f6a04b371a6521ac8c9cca069be6a08a4ae..9a0597310a76511c8f01d08b58fc780730d01a63 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "../kselftest.h" @@ -83,6 +84,45 @@ static void test_mlock_limit(int fd) pass("mlock limit is respected\n"); } +static void test_vmsplice(int fd, const char *desc) +{ + ssize_t transferred; + struct iovec iov; + int pipefd[2]; + char *mem; + + if (pipe(pipefd)) { + fail("pipe failed: %s\n", strerror(errno)); + return; + } + + mem = mmap(NULL, page_size, prot, mode, fd, 0); + if (mem == MAP_FAILED) { + fail("Unable to mmap secret memory\n"); + goto close_pipe; + } + + /* + * vmsplice() may use GUP-fast, which must also fail. Prefault the + * page table, so GUP-fast could find it. + */ + memset(mem, PATTERN, page_size); + + iov.iov_base = mem; + iov.iov_len = page_size; + transferred = vmsplice(pipefd[1], &iov, 1, 0); + + if (transferred < 0 && errno == EFAULT) + pass("vmsplice is blocked as expected with %s\n", desc); + else + fail("vmsplice: unexpected memory access with %s\n", desc); + + munmap(mem, page_size); +close_pipe: + close(pipefd[0]); + close(pipefd[1]); +} + static void try_process_vm_read(int fd, int pipefd[2]) { struct iovec liov, riov; @@ -187,7 +227,6 @@ static void test_remote_access(int fd, const char *name, return; } - ftruncate(fd, page_size); memset(mem, PATTERN, page_size); if (write(pipefd[1], &mem, sizeof(mem)) < 0) { @@ -258,7 +297,7 @@ static void prepare(void) strerror(errno)); } -#define NUM_TESTS 4 +#define NUM_TESTS 6 int main(int argc, char *argv[]) { @@ -277,9 +316,17 @@ int main(int argc, char *argv[]) ksft_exit_fail_msg("memfd_secret failed: %s\n", strerror(errno)); } + if (ftruncate(fd, page_size)) + ksft_exit_fail_msg("ftruncate failed: %s\n", strerror(errno)); test_mlock_limit(fd); test_file_apis(fd); + /* + * We have to run the first vmsplice test before any secretmem page was + * allocated for this fd. + */ + test_vmsplice(fd, "fresh page"); + test_vmsplice(fd, "existing page"); test_process_vm_read(fd); test_ptrace(fd); diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index d7b2c9d07eec5313ce874d30141c09333167c2af..8ec99d704d06e663983e26e9b7e7f2beefbbc2ba 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -377,6 +377,8 @@ CATEGORY="cow" run_test ./cow CATEGORY="thp" run_test ./khugepaged +CATEGORY="thp" run_test ./khugepaged -s 2 + CATEGORY="thp" run_test ./transhuge-stress -d 20 CATEGORY="thp" run_test ./split_huge_page_test diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c new file mode 100644 index 0000000000000000000000000000000000000000..577eaab6266fd94528b445959b0fa8d08cd840ac --- /dev/null +++ b/tools/testing/selftests/mm/thp_settings.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include "thp_settings.h" + +#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/" +#define MAX_SETTINGS_DEPTH 4 +static struct thp_settings settings_stack[MAX_SETTINGS_DEPTH]; +static int settings_index; +static struct thp_settings saved_settings; +static char dev_queue_read_ahead_path[PATH_MAX]; + +static const char * const thp_enabled_strings[] = { + "never", + "always", + "inherit", + "madvise", + NULL +}; + +static const char * const thp_defrag_strings[] = { + "always", + "defer", + "defer+madvise", + "madvise", + "never", + NULL +}; + +static const char * const shmem_enabled_strings[] = { + "never", + "always", + "within_size", + "advise", + "inherit", + "deny", + "force", + NULL +}; + +int read_file(const char *path, char *buf, size_t buflen) +{ + int fd; + ssize_t numread; + + fd = open(path, O_RDONLY); + if (fd == -1) + return 0; + + numread = read(fd, buf, buflen - 1); + if (numread < 1) { + close(fd); + return 0; + } + + buf[numread] = '\0'; + close(fd); + + return (unsigned int) numread; +} + +int write_file(const char *path, const char *buf, size_t buflen) +{ + int fd; + ssize_t numwritten; + + fd = open(path, O_WRONLY); + if (fd == -1) { + printf("open(%s)\n", path); + exit(EXIT_FAILURE); + return 0; + } + + numwritten = write(fd, buf, buflen - 1); + close(fd); + if (numwritten < 1) { + printf("write(%s)\n", buf); + exit(EXIT_FAILURE); + return 0; + } + + return (unsigned int) numwritten; +} + +const unsigned long read_num(const char *path) +{ + char buf[21]; + + if (read_file(path, buf, sizeof(buf)) < 0) { + perror("read_file()"); + exit(EXIT_FAILURE); + } + + return strtoul(buf, NULL, 10); +} + +void write_num(const char *path, unsigned long num) +{ + char buf[21]; + + sprintf(buf, "%ld", num); + if (!write_file(path, buf, strlen(buf) + 1)) { + perror(path); + exit(EXIT_FAILURE); + } +} + +int thp_read_string(const char *name, const char * const strings[]) +{ + char path[PATH_MAX]; + char buf[256]; + char *c; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!read_file(path, buf, sizeof(buf))) { + perror(path); + exit(EXIT_FAILURE); + } + + c = strchr(buf, '['); + if (!c) { + printf("%s: Parse failure\n", __func__); + exit(EXIT_FAILURE); + } + + c++; + memmove(buf, c, sizeof(buf) - (c - buf)); + + c = strchr(buf, ']'); + if (!c) { + printf("%s: Parse failure\n", __func__); + exit(EXIT_FAILURE); + } + *c = '\0'; + + ret = 0; + while (strings[ret]) { + if (!strcmp(strings[ret], buf)) + return ret; + ret++; + } + + printf("Failed to parse %s\n", name); + exit(EXIT_FAILURE); +} + +void thp_write_string(const char *name, const char *val) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!write_file(path, val, strlen(val) + 1)) { + perror(path); + exit(EXIT_FAILURE); + } +} + +const unsigned long thp_read_num(const char *name) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + return read_num(path); +} + +void thp_write_num(const char *name, unsigned long num) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + write_num(path, num); +} + +void thp_read_settings(struct thp_settings *settings) +{ + unsigned long orders = thp_supported_orders(); + unsigned long shmem_orders = thp_shmem_supported_orders(); + char path[PATH_MAX]; + int i; + + *settings = (struct thp_settings) { + .thp_enabled = thp_read_string("enabled", thp_enabled_strings), + .thp_defrag = thp_read_string("defrag", thp_defrag_strings), + .shmem_enabled = + thp_read_string("shmem_enabled", shmem_enabled_strings), + .use_zero_page = thp_read_num("use_zero_page"), + }; + settings->khugepaged = (struct khugepaged_settings) { + .defrag = thp_read_num("khugepaged/defrag"), + .alloc_sleep_millisecs = + thp_read_num("khugepaged/alloc_sleep_millisecs"), + .scan_sleep_millisecs = + thp_read_num("khugepaged/scan_sleep_millisecs"), + .max_ptes_none = thp_read_num("khugepaged/max_ptes_none"), + .max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap"), + .max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared"), + .pages_to_scan = thp_read_num("khugepaged/pages_to_scan"), + }; + if (dev_queue_read_ahead_path[0]) + settings->read_ahead_kb = read_num(dev_queue_read_ahead_path); + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & orders)) { + settings->hugepages[i].enabled = THP_NEVER; + continue; + } + snprintf(path, PATH_MAX, "hugepages-%ukB/enabled", + (getpagesize() >> 10) << i); + settings->hugepages[i].enabled = + thp_read_string(path, thp_enabled_strings); + } + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & shmem_orders)) { + settings->shmem_hugepages[i].enabled = SHMEM_NEVER; + continue; + } + snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled", + (getpagesize() >> 10) << i); + settings->shmem_hugepages[i].enabled = + thp_read_string(path, shmem_enabled_strings); + } +} + +void thp_write_settings(struct thp_settings *settings) +{ + struct khugepaged_settings *khugepaged = &settings->khugepaged; + unsigned long orders = thp_supported_orders(); + unsigned long shmem_orders = thp_shmem_supported_orders(); + char path[PATH_MAX]; + int enabled; + int i; + + thp_write_string("enabled", thp_enabled_strings[settings->thp_enabled]); + thp_write_string("defrag", thp_defrag_strings[settings->thp_defrag]); + thp_write_string("shmem_enabled", + shmem_enabled_strings[settings->shmem_enabled]); + thp_write_num("use_zero_page", settings->use_zero_page); + + thp_write_num("khugepaged/defrag", khugepaged->defrag); + thp_write_num("khugepaged/alloc_sleep_millisecs", + khugepaged->alloc_sleep_millisecs); + thp_write_num("khugepaged/scan_sleep_millisecs", + khugepaged->scan_sleep_millisecs); + thp_write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none); + thp_write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap); + thp_write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared); + thp_write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan); + + if (dev_queue_read_ahead_path[0]) + write_num(dev_queue_read_ahead_path, settings->read_ahead_kb); + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & orders)) + continue; + snprintf(path, PATH_MAX, "hugepages-%ukB/enabled", + (getpagesize() >> 10) << i); + enabled = settings->hugepages[i].enabled; + thp_write_string(path, thp_enabled_strings[enabled]); + } + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & shmem_orders)) + continue; + snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled", + (getpagesize() >> 10) << i); + enabled = settings->shmem_hugepages[i].enabled; + thp_write_string(path, shmem_enabled_strings[enabled]); + } +} + +struct thp_settings *thp_current_settings(void) +{ + if (!settings_index) { + printf("Fail: No settings set"); + exit(EXIT_FAILURE); + } + return settings_stack + settings_index - 1; +} + +void thp_push_settings(struct thp_settings *settings) +{ + if (settings_index >= MAX_SETTINGS_DEPTH) { + printf("Fail: Settings stack exceeded"); + exit(EXIT_FAILURE); + } + settings_stack[settings_index++] = *settings; + thp_write_settings(thp_current_settings()); +} + +void thp_pop_settings(void) +{ + if (settings_index <= 0) { + printf("Fail: Settings stack empty"); + exit(EXIT_FAILURE); + } + --settings_index; + thp_write_settings(thp_current_settings()); +} + +void thp_restore_settings(void) +{ + thp_write_settings(&saved_settings); +} + +void thp_save_settings(void) +{ + thp_read_settings(&saved_settings); +} + +void thp_set_read_ahead_path(char *path) +{ + if (!path) { + dev_queue_read_ahead_path[0] = '\0'; + return; + } + + strncpy(dev_queue_read_ahead_path, path, + sizeof(dev_queue_read_ahead_path)); + dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0'; +} + +static unsigned long __thp_supported_orders(bool is_shmem) +{ + unsigned long orders = 0; + char path[PATH_MAX]; + char buf[256]; + int ret, i; + char anon_dir[] = "enabled"; + char shmem_dir[] = "shmem_enabled"; + + for (i = 0; i < NR_ORDERS; i++) { + ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/%s", + (getpagesize() >> 10) << i, is_shmem ? shmem_dir : anon_dir); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + ret = read_file(path, buf, sizeof(buf)); + if (ret) + orders |= 1UL << i; + } + + return orders; +} + +unsigned long thp_supported_orders(void) +{ + return __thp_supported_orders(false); +} + +unsigned long thp_shmem_supported_orders(void) +{ + return __thp_supported_orders(true); +} diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h new file mode 100644 index 0000000000000000000000000000000000000000..876235a23460ca2c050d99700e78d5b15abba2ed --- /dev/null +++ b/tools/testing/selftests/mm/thp_settings.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __THP_SETTINGS_H__ +#define __THP_SETTINGS_H__ + +#include +#include +#include + +enum thp_enabled { + THP_NEVER, + THP_ALWAYS, + THP_INHERIT, + THP_MADVISE, +}; + +enum thp_defrag { + THP_DEFRAG_ALWAYS, + THP_DEFRAG_DEFER, + THP_DEFRAG_DEFER_MADVISE, + THP_DEFRAG_MADVISE, + THP_DEFRAG_NEVER, +}; + +enum shmem_enabled { + SHMEM_NEVER, + SHMEM_ALWAYS, + SHMEM_WITHIN_SIZE, + SHMEM_ADVISE, + SHMEM_INHERIT, + SHMEM_DENY, + SHMEM_FORCE, +}; + +#define NR_ORDERS 20 + +struct hugepages_settings { + enum thp_enabled enabled; +}; + +struct khugepaged_settings { + bool defrag; + unsigned int alloc_sleep_millisecs; + unsigned int scan_sleep_millisecs; + unsigned int max_ptes_none; + unsigned int max_ptes_swap; + unsigned int max_ptes_shared; + unsigned long pages_to_scan; +}; + +struct shmem_hugepages_settings { + enum shmem_enabled enabled; +}; + +struct thp_settings { + enum thp_enabled thp_enabled; + enum thp_defrag thp_defrag; + enum shmem_enabled shmem_enabled; + bool use_zero_page; + struct khugepaged_settings khugepaged; + unsigned long read_ahead_kb; + struct hugepages_settings hugepages[NR_ORDERS]; + struct shmem_hugepages_settings shmem_hugepages[NR_ORDERS]; +}; + +int read_file(const char *path, char *buf, size_t buflen); +int write_file(const char *path, const char *buf, size_t buflen); +const unsigned long read_num(const char *path); +void write_num(const char *path, unsigned long num); + +int thp_read_string(const char *name, const char * const strings[]); +void thp_write_string(const char *name, const char *val); +const unsigned long thp_read_num(const char *name); +void thp_write_num(const char *name, unsigned long num); + +void thp_write_settings(struct thp_settings *settings); +void thp_read_settings(struct thp_settings *settings); +struct thp_settings *thp_current_settings(void); +void thp_push_settings(struct thp_settings *settings); +void thp_pop_settings(void); +void thp_restore_settings(void); +void thp_save_settings(void); + +void thp_set_read_ahead_path(char *path); +unsigned long thp_supported_orders(void); +unsigned long thp_shmem_supported_orders(void); + +#endif /* __THP_SETTINGS_H__ */ diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c index bae0ceaf95b13baeb997c720d6b4244948b98d51..76efbd5637cb78262884fce4c0c9eab077b277c9 100644 --- a/tools/testing/selftests/mm/virtual_address_range.c +++ b/tools/testing/selftests/mm/virtual_address_range.c @@ -54,6 +54,11 @@ #define HIGH_ADDR_SHIFT 49 #define NR_CHUNKS_LOW NR_CHUNKS_256TB #define NR_CHUNKS_HIGH NR_CHUNKS_3840TB +#elif defined __sw_64__ +#define HIGH_ADDR_MARK ADDR_MARK_128TB * 32UL +#define HIGH_ADDR_SHIFT 53 +#define NR_CHUNKS_LOW NR_CHUNKS_128TB * 32UL +#define NR_CHUNKS_HIGH 0 #else #define HIGH_ADDR_MARK ADDR_MARK_128TB #define HIGH_ADDR_SHIFT 48 diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index 53341c8135e8895e5de7d5a2b9b091c6862d8023..d5ffd8c9172e1d9e36dd180fa2de561637bab5ad 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -7,8 +7,6 @@ source net_helper.sh readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" -BPF_FILE="xdp_dummy.o" - # set global exit status, but never reset nonzero one. check_err() { @@ -38,7 +36,7 @@ cfg_veth() { ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24 ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad ip -netns "${PEER_NS}" link set dev veth1 up - ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp + ip netns exec "${PEER_NS}" ethtool -K veth1 gro on } run_one() { @@ -206,11 +204,6 @@ run_all() { return $ret } -if [ ! -f ${BPF_FILE} ]; then - echo "Missing ${BPF_FILE}. Run 'make' first" - exit -1 -fi - if [[ $# -eq 0 ]]; then run_all elif [[ $1 == "__subprocess" ]]; then diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index f4549e6894dd9a8ff3bf4362f1ee173cc7be928f..83ed987cff340eb018f5296dd4f983ebac4a6245 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -217,6 +217,7 @@ for family in 4 6; do cleanup create_ns + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list" $BM_NET$DST 1 0 cleanup @@ -227,6 +228,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_ns ip -n $NS_DST addr add dev veth$DST $BM_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $BM_NET$DST_NAT \ -j DNAT --to-destination $BM_NET$DST @@ -240,6 +242,7 @@ for family in 4 6; do cleanup create_vxlan_pair + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10 cleanup @@ -247,6 +250,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_vxlan_pair ip -n $NS_DST addr add dev $VXDEV$DST $OL_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $OL_NET$DST_NAT \ -j DNAT --to-destination $OL_NET$DST diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh index 27574bbf2d6386f770673b82684edf07b586c79a..3a394b43e274bdf51e735e60349b44d7e66e5dc2 100755 --- a/tools/testing/selftests/net/veth.sh +++ b/tools/testing/selftests/net/veth.sh @@ -246,6 +246,35 @@ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on chk_gro " - aggregation with TSO off" 1 cleanup +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +chk_gro_flag "gro vs xdp while down - gro flag off" $DST off +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST off +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST off +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST off +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST off +cleanup + +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST on +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST on +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST on +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST on +cleanup + create_ns chk_channels "default channels" $DST 1 1 @@ -313,11 +342,14 @@ if [ $CPUS -gt 2 ]; then fi ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null -chk_gro_flag "with xdp attached - gro flag" $DST on +chk_gro_flag "with xdp attached - gro flag" $DST off chk_gro_flag " - peer gro flag" $SRC off chk_tso_flag " - tso flag" $SRC off chk_tso_flag " - peer tso flag" $DST on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on +chk_gro " - no aggregation" 10 +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag " - gro flag with GRO on" $DST on chk_gro " - aggregation" 1 diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 5b9772cdf2651bb924e1c68d50eea83d9c1a3e96..96e812bdf8a45c7ad7cc0f8d1e8b2b26d6570722 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -60,6 +60,12 @@ unsigned int rseq_size = -1U; /* Flags used during rseq registration. */ unsigned int rseq_flags; +/* + * rseq feature size supported by the kernel. 0 if the registration was + * unsuccessful. + */ +unsigned int rseq_feature_size = -1U; + static int rseq_ownership; static int rseq_reg_success; /* At least one rseq registration has succeded. */ @@ -105,43 +111,6 @@ int rseq_available(void) } } -/* The rseq areas need to be at least 32 bytes. */ -static -unsigned int get_rseq_min_alloc_size(void) -{ - unsigned int alloc_size = rseq_size; - - if (alloc_size < ORIG_RSEQ_ALLOC_SIZE) - alloc_size = ORIG_RSEQ_ALLOC_SIZE; - return alloc_size; -} - -/* - * Return the feature size supported by the kernel. - * - * Depending on the value returned by getauxval(AT_RSEQ_FEATURE_SIZE): - * - * 0: Return ORIG_RSEQ_FEATURE_SIZE (20) - * > 0: Return the value from getauxval(AT_RSEQ_FEATURE_SIZE). - * - * It should never return a value below ORIG_RSEQ_FEATURE_SIZE. - */ -static -unsigned int get_rseq_kernel_feature_size(void) -{ - unsigned long auxv_rseq_feature_size, auxv_rseq_align; - - auxv_rseq_align = getauxval(AT_RSEQ_ALIGN); - assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE); - - auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE); - assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE); - if (auxv_rseq_feature_size) - return auxv_rseq_feature_size; - else - return ORIG_RSEQ_FEATURE_SIZE; -} - int rseq_register_current_thread(void) { int rc; @@ -150,7 +119,7 @@ int rseq_register_current_thread(void) /* Treat libc's ownership as a successful registration. */ return 0; } - rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG); + rc = sys_rseq(&__rseq_abi, rseq_size, 0, RSEQ_SIG); if (rc) { if (RSEQ_READ_ONCE(rseq_reg_success)) { /* Incoherent success/failure within process. */ @@ -171,12 +140,28 @@ int rseq_unregister_current_thread(void) /* Treat libc's ownership as a successful unregistration. */ return 0; } - rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); + rc = sys_rseq(&__rseq_abi, rseq_size, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); if (rc) return -1; return 0; } +static +unsigned int get_rseq_feature_size(void) +{ + unsigned long auxv_rseq_feature_size, auxv_rseq_align; + + auxv_rseq_align = getauxval(AT_RSEQ_ALIGN); + assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE); + + auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE); + assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE); + if (auxv_rseq_feature_size) + return auxv_rseq_feature_size; + else + return ORIG_RSEQ_FEATURE_SIZE; +} + static __attribute__((constructor)) void rseq_init(void) { @@ -193,54 +178,28 @@ void rseq_init(void) } if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && *libc_rseq_size_p != 0) { - unsigned int libc_rseq_size; - /* rseq registration owned by glibc */ rseq_offset = *libc_rseq_offset_p; - libc_rseq_size = *libc_rseq_size_p; + rseq_size = *libc_rseq_size_p; rseq_flags = *libc_rseq_flags_p; - - /* - * Previous versions of glibc expose the value - * 32 even though the kernel only supported 20 - * bytes initially. Therefore treat 32 as a - * special-case. glibc 2.40 exposes a 20 bytes - * __rseq_size without using getauxval(3) to - * query the supported size, while still allocating a 32 - * bytes area. Also treat 20 as a special-case. - * - * Special-cases are handled by using the following - * value as active feature set size: - * - * rseq_size = min(32, get_rseq_kernel_feature_size()) - */ - switch (libc_rseq_size) { - case ORIG_RSEQ_FEATURE_SIZE: - fallthrough; - case ORIG_RSEQ_ALLOC_SIZE: - { - unsigned int rseq_kernel_feature_size = get_rseq_kernel_feature_size(); - - if (rseq_kernel_feature_size < ORIG_RSEQ_ALLOC_SIZE) - rseq_size = rseq_kernel_feature_size; - else - rseq_size = ORIG_RSEQ_ALLOC_SIZE; - break; - } - default: - /* Otherwise just use the __rseq_size from libc as rseq_size. */ - rseq_size = libc_rseq_size; - break; - } + rseq_feature_size = get_rseq_feature_size(); + if (rseq_feature_size > rseq_size) + rseq_feature_size = rseq_size; return; } rseq_ownership = 1; if (!rseq_available()) { rseq_size = 0; + rseq_feature_size = 0; return; } rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer(); rseq_flags = 0; + rseq_feature_size = get_rseq_feature_size(); + if (rseq_feature_size == ORIG_RSEQ_FEATURE_SIZE) + rseq_size = ORIG_RSEQ_ALLOC_SIZE; + else + rseq_size = RSEQ_THREAD_AREA_ALLOC_SIZE; } static __attribute__((destructor)) @@ -250,6 +209,7 @@ void rseq_exit(void) return; rseq_offset = 0; rseq_size = -1U; + rseq_feature_size = -1U; rseq_ownership = 0; } diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 4e217b620e0c7a9743b0db06dfd22e3cbdc72d08..d7364ea4d201d206709af51c49d8c7b8e4936c81 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -68,6 +68,12 @@ extern unsigned int rseq_size; /* Flags used during rseq registration. */ extern unsigned int rseq_flags; +/* + * rseq feature size supported by the kernel. 0 if the registration was + * unsuccessful. + */ +extern unsigned int rseq_feature_size; + enum rseq_mo { RSEQ_MO_RELAXED = 0, RSEQ_MO_CONSUME = 1, /* Unused */ @@ -187,7 +193,7 @@ static inline uint32_t rseq_current_cpu(void) static inline bool rseq_node_id_available(void) { - return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, node_id); + return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, node_id); } /* @@ -201,7 +207,7 @@ static inline uint32_t rseq_current_node_id(void) static inline bool rseq_mm_cid_available(void) { - return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, mm_cid); + return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, mm_cid); } static inline uint32_t rseq_current_mm_cid(void) diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 5b5c9d558dee07bc1f7afd7df280e1189858451e..7004099ce11bdf20247044f08facd87669cd65b8 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -20,6 +20,11 @@ #include "../kselftest.h" +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + unsigned long long timing(clockid_t clk_id, unsigned long long samples) { struct timespec start, finish; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index cacf6507f6905519e88a262e8398323c53b2c34b..e5b1c58721acf76aa013c38a14afeec4bbd52ec8 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -66,6 +66,11 @@ # define PR_SET_PTRACER 0x59616d61 #endif +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 @@ -142,6 +147,8 @@ struct seccomp_data { # define __NR_seccomp 372 # elif defined(__mc68000__) # define __NR_seccomp 380 +# elif defined(__sw_64__) +# define __NR_seccomp 514 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff @@ -1850,6 +1857,12 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM(_regs) (_regs).orig_d0 # define SYSCALL_RET(_regs) (_regs).d0 +#elif defined(__sw_64__) +# define ARCH_REGS struct user_pt_regs +# define SYSCALL_NUM(_regs) (_regs).regs[0] +# define SYSCALL_RET(_regs) (_regs).regs[0] +# define SYSCALL_RET_SET(_regs, _val) \ + TH_LOG("Can't modify syscall return on this architecture") #else # error "Do not know how to find your architecture's registers and syscalls" #endif @@ -1914,7 +1927,7 @@ const bool ptrace_entry_set_syscall_ret = * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ -#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) +#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) || defined(__sw_64__) # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) #else diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c index d884fd69dd510bce52c684f496eb68b9d5ba866b..95aad6d8849beb70b7b50afda143587e33fd3e62 100644 --- a/tools/testing/selftests/x86/amx.c +++ b/tools/testing/selftests/x86/amx.c @@ -103,21 +103,6 @@ static void clearhandler(int sig) #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) -static inline void check_cpuid_xsave(void) -{ - uint32_t eax, ebx, ecx, edx; - - /* - * CPUID.1:ECX.XSAVE[bit 26] enumerates general - * support for the XSAVE feature set, including - * XGETBV. - */ - __cpuid_count(1, 0, eax, ebx, ecx, edx); - if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK)) - fatal_error("cpuid: no CPU xsave support"); - if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK)) - fatal_error("cpuid: no OS xsave support"); -} static uint32_t xbuf_size; @@ -350,6 +335,7 @@ enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED }; /* arch_prctl() and sigaltstack() test */ +#define ARCH_GET_XCOMP_SUPP 0x1021 #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 @@ -928,8 +914,15 @@ static void test_ptrace(void) int main(void) { - /* Check hardware availability at first */ - check_cpuid_xsave(); + unsigned long features; + long rc; + + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features); + if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) { + ksft_print_msg("no AMX support\n"); + return KSFT_SKIP; + } + check_cpuid_xtiledata(); init_stashed_xsave(); diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c index 8f9b06d9ce039ab61a8c9ba2d5a3a33c02a85828..edc14b15da34f7c505c1a38f3456544a31bf07fe 100644 --- a/tools/testing/selftests/x86/lam.c +++ b/tools/testing/selftests/x86/lam.c @@ -1183,7 +1183,7 @@ int main(int argc, char **argv) if (!cpu_has_lam()) { ksft_print_msg("Unsupported LAM feature!\n"); - return -1; + return KSFT_SKIP; } while ((c = getopt(argc, argv, "ht:")) != -1) { diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index c1cd7dfe4a9088fc046c73e3e24dca4293ff99f5..27e50190d419be6c7c657a3dd41914f150891027 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -55,6 +55,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) struct kvm_memory_slot *memslot; int as_id, id; + if (!mask) + return; + as_id = slot >> 16; id = (u16)slot; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 44c228bcd699d9ad1cb812d3686fca893afb4b67..3a14fe4910508be8a9ba1d6ed1479d95f01a79e8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,6 +154,11 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +#ifdef CONFIG_SW64 +#define DFX_SW64_MAX_VCPU 1024 +#define DFX_SW64_MAX_VCPU_STAT_SIZE 1024 +#endif + __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { } @@ -4158,6 +4163,9 @@ static long kvm_vcpu_ioctl(struct file *filp, if (oldpid) synchronize_rcu(); put_pid(oldpid); +#ifdef CONFIG_SW64 + vcpu->stat.pid = current->pid; +#endif } r = kvm_arch_vcpu_ioctl_run(vcpu); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); @@ -5750,6 +5758,10 @@ static int kvm_stat_data_get(void *data, u64 *val) r = kvm_get_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset, val); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5772,6 +5784,10 @@ static int kvm_stat_data_clear(void *data, u64 val) r = kvm_clear_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5866,6 +5882,116 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); +#ifdef CONFIG_SW64 +void __weak kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ +} + +/* + * copy of seq_buf_alloc of kernel, kernel not export it + */ +static void *dfx_sw64_seq_buf_alloc(unsigned long size) +{ + return kvmalloc(size, GFP_KERNEL_ACCOUNT); +} + +static void dfx_sw64_seq_buf_free(const void *buf) +{ + kvfree(buf); +} + +static int dfx_sw64_seq_buf_alloc_vcpu(struct seq_file *p, int vcpu_nr) +{ + char *buf; + size_t size; + + size = (vcpu_nr + 1) * DFX_SW64_MAX_VCPU_STAT_SIZE; + buf = dfx_sw64_seq_buf_alloc(size); + if (!buf) + return -ENOMEM; + if (p->buf) + dfx_sw64_seq_buf_free(p->buf); + p->buf = buf; + p->size = size; + return 0; +} + +static int __dfx_sw64_vcpu_stats_get(struct seq_file *p, void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_stat *vcpu_stats; + struct dfx_sw64_kvm_stats_debugfs_item *dp; + int vcpu_nr = 0; + int index = 0; + unsigned long i; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu_nr++; + } + mutex_unlock(&kvm_lock); + vcpu_nr = min(vcpu_nr, DFX_SW64_MAX_VCPU); + if (!vcpu_nr) { + seq_putc(p, '\n'); + return 0; + } + + if (dfx_sw64_seq_buf_alloc_vcpu(p, vcpu_nr)) + return -ENOMEM; + + vcpu_stats = vmalloc(vcpu_nr * sizeof(struct kvm_vcpu_stat)); + if (!vcpu_stats) + return -ENOMEM; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (index >= vcpu_nr) + break; + memcpy(vcpu_stats + index, &(vcpu->stat), + sizeof(struct kvm_vcpu_stat)); + kvm_arch_vcpu_stat_reset(&vcpu->stat); + ++index; + } + } + mutex_unlock(&kvm_lock); + for (i = 0; i < vcpu_nr; i++) { + for (dp = dfx_sw64_debugfs_entries; dp->name; ++dp) { + switch (dp->dfx_kind) { + case DFX_SW64_STAT_U64: + seq_put_decimal_ull(p, " ", + *(u64 *)((void *)&vcpu_stats[i] + dp->offset)); + break; + case DFX_SW64_STAT_CPUTIME: + pr_warn("DFX_SW64_STAT_CPUTIME not supported currently!"); + break; + default: + pr_warn("Bad dfx_sw64_kind in dfx_debugfs_entries!"); + break; + } + } + seq_putc(p, '\n'); + } + + vfree(vcpu_stats); + return 0; +} + +static int dfx_sw64_vcpu_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, __dfx_sw64_vcpu_stats_get, NULL); +} + +static const struct file_operations dfx_sw64_stat_fops = { + .open = dfx_sw64_vcpu_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env;